1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 23 * 24 * Fibre Channel SCSI ULP Mapping driver 25 */ 26 27 #include <sys/scsi/scsi.h> 28 #include <sys/types.h> 29 #include <sys/varargs.h> 30 #include <sys/devctl.h> 31 #include <sys/thread.h> 32 #include <sys/thread.h> 33 #include <sys/open.h> 34 #include <sys/file.h> 35 #include <sys/sunndi.h> 36 #include <sys/console.h> 37 #include <sys/proc.h> 38 #include <sys/time.h> 39 #include <sys/utsname.h> 40 #include <sys/scsi/impl/scsi_reset_notify.h> 41 #include <sys/ndi_impldefs.h> 42 #include <sys/byteorder.h> 43 #include <sys/fs/dv_node.h> 44 #include <sys/ctype.h> 45 #include <sys/sunmdi.h> 46 47 #include <sys/fibre-channel/fc.h> 48 #include <sys/fibre-channel/impl/fc_ulpif.h> 49 #include <sys/fibre-channel/ulp/fcpvar.h> 50 51 /* 52 * Discovery Process 53 * ================= 54 * 55 * The discovery process is a major function of FCP. In order to help 56 * understand that function a flow diagram is given here. This diagram 57 * doesn't claim to cover all the cases and the events that can occur during 58 * the discovery process nor the subtleties of the code. The code paths shown 59 * are simplified. Its purpose is to help the reader (and potentially bug 60 * fixer) have an overall view of the logic of the code. For that reason the 61 * diagram covers the simple case of the line coming up cleanly or of a new 62 * port attaching to FCP the link being up. The reader must keep in mind 63 * that: 64 * 65 * - There are special cases where bringing devices online and offline 66 * is driven by Ioctl. 67 * 68 * - The behavior of the discovery process can be modified through the 69 * .conf file. 70 * 71 * - The line can go down and come back up at any time during the 72 * discovery process which explains some of the complexity of the code. 73 * 74 * ............................................................................ 75 * 76 * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP. 77 * 78 * 79 * +-------------------------+ 80 * fp/fctl module --->| fcp_port_attach | 81 * +-------------------------+ 82 * | | 83 * | | 84 * | v 85 * | +-------------------------+ 86 * | | fcp_handle_port_attach | 87 * | +-------------------------+ 88 * | | 89 * | | 90 * +--------------------+ | 91 * | | 92 * v v 93 * +-------------------------+ 94 * | fcp_statec_callback | 95 * +-------------------------+ 96 * | 97 * | 98 * v 99 * +-------------------------+ 100 * | fcp_handle_devices | 101 * +-------------------------+ 102 * | 103 * | 104 * v 105 * +-------------------------+ 106 * | fcp_handle_mapflags | 107 * +-------------------------+ 108 * | 109 * | 110 * v 111 * +-------------------------+ 112 * | fcp_send_els | 113 * | | 114 * | PLOGI or PRLI To all the| 115 * | reachable devices. | 116 * +-------------------------+ 117 * 118 * 119 * ............................................................................ 120 * 121 * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during 122 * STEP 1 are called (it is actually the same function). 123 * 124 * 125 * +-------------------------+ 126 * | fcp_icmd_callback | 127 * fp/fctl module --->| | 128 * | callback for PLOGI and | 129 * | PRLI. | 130 * +-------------------------+ 131 * | 132 * | 133 * Received PLOGI Accept /-\ Received PRLI Accept 134 * _ _ _ _ _ _ / \_ _ _ _ _ _ 135 * | \ / | 136 * | \-/ | 137 * | | 138 * v v 139 * +-------------------------+ +-------------------------+ 140 * | fcp_send_els | | fcp_send_scsi | 141 * | | | | 142 * | PRLI | | REPORT_LUN | 143 * +-------------------------+ +-------------------------+ 144 * 145 * ............................................................................ 146 * 147 * STEP 3: The callback functions of the SCSI commands issued by FCP are called 148 * (It is actually the same function). 149 * 150 * 151 * +-------------------------+ 152 * fp/fctl module ------->| fcp_scsi_callback | 153 * +-------------------------+ 154 * | 155 * | 156 * | 157 * Receive REPORT_LUN reply /-\ Receive INQUIRY PAGE83 reply 158 * _ _ _ _ _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ _ _ _ _ 159 * | \ / | 160 * | \-/ | 161 * | | | 162 * | Receive INQUIRY reply| | 163 * | | | 164 * v v v 165 * +------------------------+ +----------------------+ +----------------------+ 166 * | fcp_handle_reportlun | | fcp_handle_inquiry | | fcp_handle_page83 | 167 * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) | 168 * +------------------------+ +----------------------+ +----------------------+ 169 * | | | 170 * | | | 171 * | | | 172 * v v | 173 * +-----------------+ +-----------------+ | 174 * | fcp_send_scsi | | fcp_send_scsi | | 175 * | | | | | 176 * | INQUIRY | | INQUIRY PAGE83 | | 177 * | (To each LUN) | +-----------------+ | 178 * +-----------------+ | 179 * | 180 * v 181 * +------------------------+ 182 * | fcp_call_finish_init | 183 * +------------------------+ 184 * | 185 * v 186 * +-----------------------------+ 187 * | fcp_call_finish_init_held | 188 * +-----------------------------+ 189 * | 190 * | 191 * All LUNs scanned /-\ 192 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ / \ 193 * | \ / 194 * | \-/ 195 * v | 196 * +------------------+ | 197 * | fcp_finish_tgt | | 198 * +------------------+ | 199 * | Target Not Offline and | 200 * Target Not Offline and | not marked and tgt_node_state | 201 * marked /-\ not FCP_TGT_NODE_ON_DEMAND | 202 * _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ | 203 * | \ / | | 204 * | \-/ | | 205 * v v | 206 * +----------------------------+ +-------------------+ | 207 * | fcp_offline_target | | fcp_create_luns | | 208 * | | +-------------------+ | 209 * | A structure fcp_tgt_elem | | | 210 * | is created and queued in | v | 211 * | the FCP port list | +-------------------+ | 212 * | port_offline_tgts. It | | fcp_pass_to_hp | | 213 * | will be unqueued by the | | | | 214 * | watchdog timer. | | Called for each | | 215 * +----------------------------+ | LUN. Dispatches | | 216 * | | fcp_hp_task | | 217 * | +-------------------+ | 218 * | | | 219 * | | | 220 * | | | 221 * | +---------------->| 222 * | | 223 * +---------------------------------------------->| 224 * | 225 * | 226 * All the targets (devices) have been scanned /-\ 227 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ / \ 228 * | \ / 229 * | \-/ 230 * +-------------------------------------+ | 231 * | fcp_finish_init | | 232 * | | | 233 * | Signal broadcasts the condition | | 234 * | variable port_config_cv of the FCP | | 235 * | port. One potential code sequence | | 236 * | waiting on the condition variable | | 237 * | the code sequence handling | | 238 * | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER| | 239 * | The other is in the function | | 240 * | fcp_reconfig_wait which is called | | 241 * | in the transmit path preventing IOs | | 242 * | from going through till the disco- | | 243 * | very process is over. | | 244 * +-------------------------------------+ | 245 * | | 246 * | | 247 * +--------------------------------->| 248 * | 249 * v 250 * Return 251 * 252 * ............................................................................ 253 * 254 * STEP 4: The hot plug task is called (for each fcp_hp_elem). 255 * 256 * 257 * +-------------------------+ 258 * | fcp_hp_task | 259 * +-------------------------+ 260 * | 261 * | 262 * v 263 * +-------------------------+ 264 * | fcp_trigger_lun | 265 * +-------------------------+ 266 * | 267 * | 268 * v 269 * Bring offline /-\ Bring online 270 * _ _ _ _ _ _ _ _ _/ \_ _ _ _ _ _ _ _ _ _ 271 * | \ / | 272 * | \-/ | 273 * v v 274 * +---------------------+ +-----------------------+ 275 * | fcp_offline_child | | fcp_get_cip | 276 * +---------------------+ | | 277 * | Creates a dev_info_t | 278 * | or a mdi_pathinfo_t | 279 * | depending on whether | 280 * | mpxio is on or off. | 281 * +-----------------------+ 282 * | 283 * | 284 * v 285 * +-----------------------+ 286 * | fcp_online_child | 287 * | | 288 * | Set device online | 289 * | using NDI or MDI. | 290 * +-----------------------+ 291 * 292 * ............................................................................ 293 * 294 * STEP 5: The watchdog timer expires. The watch dog timer does much more that 295 * what is described here. We only show the target offline path. 296 * 297 * 298 * +--------------------------+ 299 * | fcp_watch | 300 * +--------------------------+ 301 * | 302 * | 303 * v 304 * +--------------------------+ 305 * | fcp_scan_offline_tgts | 306 * +--------------------------+ 307 * | 308 * | 309 * v 310 * +--------------------------+ 311 * | fcp_offline_target_now | 312 * +--------------------------+ 313 * | 314 * | 315 * v 316 * +--------------------------+ 317 * | fcp_offline_tgt_luns | 318 * +--------------------------+ 319 * | 320 * | 321 * v 322 * +--------------------------+ 323 * | fcp_offline_lun | 324 * +--------------------------+ 325 * | 326 * | 327 * v 328 * +----------------------------------+ 329 * | fcp_offline_lun_now | 330 * | | 331 * | A request (or two if mpxio) is | 332 * | sent to the hot plug task using | 333 * | a fcp_hp_elem structure. | 334 * +----------------------------------+ 335 */ 336 337 /* 338 * Functions registered with DDI framework 339 */ 340 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd); 341 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd); 342 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp); 343 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp); 344 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, 345 cred_t *credp, int *rval); 346 347 /* 348 * Functions registered with FC Transport framework 349 */ 350 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo, 351 fc_attach_cmd_t cmd, uint32_t s_id); 352 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info, 353 fc_detach_cmd_t cmd); 354 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, 355 int cmd, intptr_t data, int mode, cred_t *credp, int *rval, 356 uint32_t claimed); 357 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle, 358 fc_unsol_buf_t *buf, uint32_t claimed); 359 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle, 360 fc_unsol_buf_t *buf, uint32_t claimed); 361 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle, 362 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist, 363 uint32_t dev_cnt, uint32_t port_sid); 364 365 /* 366 * Functions registered with SCSA framework 367 */ 368 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 369 scsi_hba_tran_t *hba_tran, struct scsi_device *sd); 370 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 371 scsi_hba_tran_t *hba_tran, struct scsi_device *sd); 372 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 373 scsi_hba_tran_t *hba_tran, struct scsi_device *sd); 374 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt); 375 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt); 376 static int fcp_scsi_reset(struct scsi_address *ap, int level); 377 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom); 378 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, 379 int whom); 380 static void fcp_pkt_teardown(struct scsi_pkt *pkt); 381 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag, 382 void (*callback)(caddr_t), caddr_t arg); 383 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, 384 char *name, ddi_eventcookie_t *event_cookiep); 385 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip, 386 ddi_eventcookie_t eventid, void (*callback)(), void *arg, 387 ddi_callback_id_t *cb_id); 388 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi, 389 ddi_callback_id_t cb_id); 390 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip, 391 ddi_eventcookie_t eventid, void *impldata); 392 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag, 393 ddi_bus_config_op_t op, void *arg, dev_info_t **childp); 394 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag, 395 ddi_bus_config_op_t op, void *arg); 396 397 /* 398 * Internal functions 399 */ 400 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, 401 int mode, int *rval); 402 403 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi, 404 int mode, int *rval); 405 static int fcp_copyin_scsi_cmd(caddr_t base_addr, 406 struct fcp_scsi_cmd *fscsi, int mode); 407 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, 408 caddr_t base_addr, int mode); 409 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi); 410 411 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr, 412 la_wwn_t *pwwn, int *ret_val, int *fc_status, int *fc_pkt_state, 413 int *fc_pkt_reason, int *fc_pkt_action); 414 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, 415 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action); 416 static int fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, 417 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action); 418 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd); 419 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd); 420 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt); 421 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd); 422 423 static void fcp_handle_devices(struct fcp_port *pptr, 424 fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt, 425 fcp_map_tag_t *map_tag, int cause); 426 static int fcp_handle_mapflags(struct fcp_port *pptr, 427 struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt, 428 int tgt_cnt, int cause); 429 static int fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause); 430 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt, 431 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause); 432 static void fcp_update_state(struct fcp_port *pptr, uint32_t state, 433 int cause); 434 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, 435 uint32_t state); 436 static struct fcp_port *fcp_get_port(opaque_t port_handle); 437 static void fcp_unsol_callback(fc_packet_t *fpkt); 438 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf, 439 uchar_t r_ctl, uchar_t type); 440 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf); 441 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr, 442 struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len, 443 int nodma, int lcount, int tcount, int cause, uint32_t rscn_count); 444 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd); 445 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd, 446 int nodma, int flags); 447 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd); 448 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr, 449 uchar_t *wwn); 450 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr, 451 uint32_t d_id); 452 static void fcp_icmd_callback(fc_packet_t *fpkt); 453 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, 454 int len, int lcount, int tcount, int cause, uint32_t rscn_count); 455 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt); 456 static void fcp_scsi_callback(fc_packet_t *fpkt); 457 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt); 458 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd); 459 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd); 460 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt, 461 uint16_t lun_num); 462 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt, 463 int link_cnt, int tgt_cnt, int cause); 464 static void fcp_finish_init(struct fcp_port *pptr); 465 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, 466 int tgt_cnt, int cause); 467 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, 468 int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags); 469 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt, 470 int link_cnt, int tgt_cnt, int nowait, int flags); 471 static void fcp_offline_target_now(struct fcp_port *pptr, 472 struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags); 473 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, 474 int tgt_cnt, int flags); 475 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt, 476 int nowait, int flags); 477 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, 478 int tgt_cnt); 479 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, 480 int tgt_cnt, int flags); 481 static void fcp_scan_offline_luns(struct fcp_port *pptr); 482 static void fcp_scan_offline_tgts(struct fcp_port *pptr); 483 static void fcp_update_offline_flags(struct fcp_lun *plun); 484 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun); 485 static void fcp_abort_commands(struct fcp_pkt *head, struct 486 fcp_port *pptr); 487 static void fcp_cmd_callback(fc_packet_t *fpkt); 488 static void fcp_complete_pkt(fc_packet_t *fpkt); 489 static int fcp_validate_fcp_response(struct fcp_rsp *rsp, 490 struct fcp_port *pptr); 491 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt, 492 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause); 493 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt); 494 static void fcp_dealloc_lun(struct fcp_lun *plun); 495 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr, 496 fc_portmap_t *map_entry, int link_cnt); 497 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt); 498 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt); 499 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, 500 int internal); 501 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...); 502 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo, 503 uint32_t s_id, int instance); 504 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag, 505 int instance); 506 static void fcp_cleanup_port(struct fcp_port *pptr, int instance); 507 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *, 508 int); 509 static void fcp_kmem_cache_destructor(struct scsi_pkt *, scsi_hba_tran_t *); 510 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t); 511 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, 512 int flags); 513 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt); 514 static int fcp_reset_target(struct scsi_address *ap, int level); 515 static int fcp_commoncap(struct scsi_address *ap, char *cap, 516 int val, int tgtonly, int doset); 517 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len); 518 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len); 519 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, 520 int sleep); 521 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo, 522 uint32_t s_id, fc_attach_cmd_t cmd, int instance); 523 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo); 524 static void fcp_process_elem(struct fcp_hp_elem *elem, int result); 525 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, 526 int lcount, int tcount); 527 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip); 528 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip); 529 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt, 530 int tgt_cnt); 531 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun, 532 dev_info_t *pdip, caddr_t name); 533 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip, 534 int lcount, int tcount, int flags, int *circ); 535 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, 536 int lcount, int tcount, int flags, int *circ); 537 static void fcp_remove_child(struct fcp_lun *plun); 538 static void fcp_watch(void *arg); 539 static void fcp_check_reset_delay(struct fcp_port *pptr); 540 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt, 541 struct fcp_lun *rlun, int tgt_cnt); 542 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr); 543 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr, 544 uchar_t *wwn, uint16_t lun); 545 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd, 546 struct fcp_lun *plun); 547 static void fcp_post_callback(struct fcp_pkt *cmd); 548 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd); 549 static struct fcp_port *fcp_dip2port(dev_info_t *dip); 550 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr, 551 child_info_t *cip); 552 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr, 553 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt, 554 int tgt_cnt, int flags); 555 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr, 556 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt, 557 int tgt_cnt, int flags, int wait); 558 static void fcp_retransport_cmd(struct fcp_port *pptr, 559 struct fcp_pkt *cmd); 560 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, 561 uint_t statistics); 562 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd); 563 static void fcp_update_targets(struct fcp_port *pptr, 564 fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause); 565 static int fcp_call_finish_init(struct fcp_port *pptr, 566 struct fcp_tgt *ptgt, int lcount, int tcount, int cause); 567 static int fcp_call_finish_init_held(struct fcp_port *pptr, 568 struct fcp_tgt *ptgt, int lcount, int tcount, int cause); 569 static void fcp_reconfigure_luns(void * tgt_handle); 570 static void fcp_free_targets(struct fcp_port *pptr); 571 static void fcp_free_target(struct fcp_tgt *ptgt); 572 static int fcp_is_retryable(struct fcp_ipkt *icmd); 573 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn); 574 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int); 575 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string); 576 static void fcp_print_error(fc_packet_t *fpkt); 577 static int fcp_handle_ipkt_errors(struct fcp_port *pptr, 578 struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op); 579 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt); 580 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr, 581 uint32_t *dev_cnt); 582 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause); 583 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval); 584 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *, 585 struct fcp_ioctl *, struct fcp_port **); 586 static char *fcp_get_lun_path(struct fcp_lun *plun); 587 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode, 588 int *rval); 589 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id); 590 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id); 591 static char *fcp_get_lun_path(struct fcp_lun *plun); 592 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode, 593 int *rval); 594 static void fcp_reconfig_wait(struct fcp_port *pptr); 595 596 /* 597 * New functions added for mpxio support 598 */ 599 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 600 scsi_hba_tran_t *hba_tran, struct scsi_device *sd); 601 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount, 602 int tcount); 603 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun, 604 dev_info_t *pdip); 605 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip); 606 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int); 607 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr); 608 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp); 609 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, 610 int what); 611 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt, 612 fc_packet_t *fpkt); 613 static int fcp_symmetric_device_probe(struct fcp_lun *plun); 614 615 /* 616 * New functions added for lun masking support 617 */ 618 static void fcp_read_blacklist(dev_info_t *dip, 619 struct fcp_black_list_entry **pplun_blacklist); 620 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun, 621 struct fcp_black_list_entry **pplun_blacklist); 622 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id, 623 struct fcp_black_list_entry **pplun_blacklist); 624 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id); 625 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist); 626 627 /* 628 * New functions to support software FCA (like fcoei) 629 */ 630 static struct scsi_pkt *fcp_pseudo_init_pkt( 631 struct scsi_address *ap, struct scsi_pkt *pkt, 632 struct buf *bp, int cmdlen, int statuslen, 633 int tgtlen, int flags, int (*callback)(), caddr_t arg); 634 static void fcp_pseudo_destroy_pkt( 635 struct scsi_address *ap, struct scsi_pkt *pkt); 636 static void fcp_pseudo_sync_pkt( 637 struct scsi_address *ap, struct scsi_pkt *pkt); 638 static int fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt); 639 static void fcp_pseudo_dmafree( 640 struct scsi_address *ap, struct scsi_pkt *pkt); 641 642 extern struct mod_ops mod_driverops; 643 /* 644 * This variable is defined in modctl.c and set to '1' after the root driver 645 * and fs are loaded. It serves as an indication that the root filesystem can 646 * be used. 647 */ 648 extern int modrootloaded; 649 /* 650 * This table contains strings associated with the SCSI sense key codes. It 651 * is used by FCP to print a clear explanation of the code returned in the 652 * sense information by a device. 653 */ 654 extern char *sense_keys[]; 655 /* 656 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is 657 * under this device that the paths to a physical device are created when 658 * MPxIO is used. 659 */ 660 extern dev_info_t *scsi_vhci_dip; 661 662 /* 663 * Report lun processing 664 */ 665 #define FCP_LUN_ADDRESSING 0x80 666 #define FCP_PD_ADDRESSING 0x00 667 #define FCP_VOLUME_ADDRESSING 0x40 668 669 #define FCP_SVE_THROTTLE 0x28 /* Vicom */ 670 #define MAX_INT_DMA 0x7fffffff 671 /* 672 * Property definitions 673 */ 674 #define NODE_WWN_PROP (char *)fcp_node_wwn_prop 675 #define PORT_WWN_PROP (char *)fcp_port_wwn_prop 676 #define TARGET_PROP (char *)fcp_target_prop 677 #define LUN_PROP (char *)fcp_lun_prop 678 #define SAM_LUN_PROP (char *)fcp_sam_lun_prop 679 #define CONF_WWN_PROP (char *)fcp_conf_wwn_prop 680 #define OBP_BOOT_WWN (char *)fcp_obp_boot_wwn 681 #define MANUAL_CFG_ONLY (char *)fcp_manual_config_only 682 #define INIT_PORT_PROP (char *)fcp_init_port_prop 683 #define TGT_PORT_PROP (char *)fcp_tgt_port_prop 684 #define LUN_BLACKLIST_PROP (char *)fcp_lun_blacklist_prop 685 /* 686 * Short hand macros. 687 */ 688 #define LUN_PORT (plun->lun_tgt->tgt_port) 689 #define LUN_TGT (plun->lun_tgt) 690 691 /* 692 * Driver private macros 693 */ 694 #define FCP_ATOB(x) (((x) >= '0' && (x) <= '9') ? ((x) - '0') : \ 695 ((x) >= 'a' && (x) <= 'f') ? \ 696 ((x) - 'a' + 10) : ((x) - 'A' + 10)) 697 698 #define FCP_MAX(a, b) ((a) > (b) ? (a) : (b)) 699 700 #define FCP_N_NDI_EVENTS \ 701 (sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t)) 702 703 #define FCP_LINK_STATE_CHANGED(p, c) \ 704 ((p)->port_link_cnt != (c)->ipkt_link_cnt) 705 706 #define FCP_TGT_STATE_CHANGED(t, c) \ 707 ((t)->tgt_change_cnt != (c)->ipkt_change_cnt) 708 709 #define FCP_STATE_CHANGED(p, t, c) \ 710 (FCP_TGT_STATE_CHANGED(t, c)) 711 712 #define FCP_MUST_RETRY(fpkt) \ 713 ((fpkt)->pkt_state == FC_PKT_LOCAL_BSY || \ 714 (fpkt)->pkt_state == FC_PKT_LOCAL_RJT || \ 715 (fpkt)->pkt_state == FC_PKT_TRAN_BSY || \ 716 (fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS || \ 717 (fpkt)->pkt_state == FC_PKT_NPORT_BSY || \ 718 (fpkt)->pkt_state == FC_PKT_FABRIC_BSY || \ 719 (fpkt)->pkt_state == FC_PKT_PORT_OFFLINE || \ 720 (fpkt)->pkt_reason == FC_REASON_OFFLINE) 721 722 #define FCP_SENSE_REPORTLUN_CHANGED(es) \ 723 ((es)->es_key == KEY_UNIT_ATTENTION && \ 724 (es)->es_add_code == 0x3f && \ 725 (es)->es_qual_code == 0x0e) 726 727 #define FCP_SENSE_NO_LUN(es) \ 728 ((es)->es_key == KEY_ILLEGAL_REQUEST && \ 729 (es)->es_add_code == 0x25 && \ 730 (es)->es_qual_code == 0x0) 731 732 #define FCP_VERSION "20091208-1.192" 733 #define FCP_NAME_VERSION "SunFC FCP v" FCP_VERSION 734 735 #define FCP_NUM_ELEMENTS(array) \ 736 (sizeof (array) / sizeof ((array)[0])) 737 738 /* 739 * Debugging, Error reporting, and tracing 740 */ 741 #define FCP_LOG_SIZE 1024 * 1024 742 743 #define FCP_LEVEL_1 0x00001 /* attach/detach PM CPR */ 744 #define FCP_LEVEL_2 0x00002 /* failures/Invalid data */ 745 #define FCP_LEVEL_3 0x00004 /* state change, discovery */ 746 #define FCP_LEVEL_4 0x00008 /* ULP messages */ 747 #define FCP_LEVEL_5 0x00010 /* ELS/SCSI cmds */ 748 #define FCP_LEVEL_6 0x00020 /* Transport failures */ 749 #define FCP_LEVEL_7 0x00040 750 #define FCP_LEVEL_8 0x00080 /* I/O tracing */ 751 #define FCP_LEVEL_9 0x00100 /* I/O tracing */ 752 753 754 755 /* 756 * Log contents to system messages file 757 */ 758 #define FCP_MSG_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_MSG) 759 #define FCP_MSG_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_MSG) 760 #define FCP_MSG_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_MSG) 761 #define FCP_MSG_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_MSG) 762 #define FCP_MSG_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_MSG) 763 #define FCP_MSG_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_MSG) 764 #define FCP_MSG_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_MSG) 765 #define FCP_MSG_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_MSG) 766 #define FCP_MSG_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_MSG) 767 768 769 /* 770 * Log contents to trace buffer 771 */ 772 #define FCP_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF) 773 #define FCP_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF) 774 #define FCP_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF) 775 #define FCP_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF) 776 #define FCP_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF) 777 #define FCP_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF) 778 #define FCP_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF) 779 #define FCP_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF) 780 #define FCP_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF) 781 782 783 /* 784 * Log contents to both system messages file and trace buffer 785 */ 786 #define FCP_MSG_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF | \ 787 FC_TRACE_LOG_MSG) 788 #define FCP_MSG_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF | \ 789 FC_TRACE_LOG_MSG) 790 #define FCP_MSG_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF | \ 791 FC_TRACE_LOG_MSG) 792 #define FCP_MSG_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF | \ 793 FC_TRACE_LOG_MSG) 794 #define FCP_MSG_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF | \ 795 FC_TRACE_LOG_MSG) 796 #define FCP_MSG_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF | \ 797 FC_TRACE_LOG_MSG) 798 #define FCP_MSG_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF | \ 799 FC_TRACE_LOG_MSG) 800 #define FCP_MSG_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF | \ 801 FC_TRACE_LOG_MSG) 802 #define FCP_MSG_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF | \ 803 FC_TRACE_LOG_MSG) 804 #ifdef DEBUG 805 #define FCP_DTRACE fc_trace_debug 806 #else 807 #define FCP_DTRACE 808 #endif 809 810 #define FCP_TRACE fc_trace_debug 811 812 static struct cb_ops fcp_cb_ops = { 813 fcp_open, /* open */ 814 fcp_close, /* close */ 815 nodev, /* strategy */ 816 nodev, /* print */ 817 nodev, /* dump */ 818 nodev, /* read */ 819 nodev, /* write */ 820 fcp_ioctl, /* ioctl */ 821 nodev, /* devmap */ 822 nodev, /* mmap */ 823 nodev, /* segmap */ 824 nochpoll, /* chpoll */ 825 ddi_prop_op, /* cb_prop_op */ 826 0, /* streamtab */ 827 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 828 CB_REV, /* rev */ 829 nodev, /* aread */ 830 nodev /* awrite */ 831 }; 832 833 834 static struct dev_ops fcp_ops = { 835 DEVO_REV, 836 0, 837 ddi_getinfo_1to1, 838 nulldev, /* identify */ 839 nulldev, /* probe */ 840 fcp_attach, /* attach and detach are mandatory */ 841 fcp_detach, 842 nodev, /* reset */ 843 &fcp_cb_ops, /* cb_ops */ 844 NULL, /* bus_ops */ 845 NULL, /* power */ 846 }; 847 848 849 char *fcp_version = FCP_NAME_VERSION; 850 851 static struct modldrv modldrv = { 852 &mod_driverops, 853 FCP_NAME_VERSION, 854 &fcp_ops 855 }; 856 857 858 static struct modlinkage modlinkage = { 859 MODREV_1, 860 &modldrv, 861 NULL 862 }; 863 864 865 static fc_ulp_modinfo_t fcp_modinfo = { 866 &fcp_modinfo, /* ulp_handle */ 867 FCTL_ULP_MODREV_4, /* ulp_rev */ 868 FC4_SCSI_FCP, /* ulp_type */ 869 "fcp", /* ulp_name */ 870 FCP_STATEC_MASK, /* ulp_statec_mask */ 871 fcp_port_attach, /* ulp_port_attach */ 872 fcp_port_detach, /* ulp_port_detach */ 873 fcp_port_ioctl, /* ulp_port_ioctl */ 874 fcp_els_callback, /* ulp_els_callback */ 875 fcp_data_callback, /* ulp_data_callback */ 876 fcp_statec_callback /* ulp_statec_callback */ 877 }; 878 879 #ifdef DEBUG 880 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \ 881 FCP_LEVEL_2 | FCP_LEVEL_3 | \ 882 FCP_LEVEL_4 | FCP_LEVEL_5 | \ 883 FCP_LEVEL_6 | FCP_LEVEL_7) 884 #else 885 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \ 886 FCP_LEVEL_2 | FCP_LEVEL_3 | \ 887 FCP_LEVEL_4 | FCP_LEVEL_5 | \ 888 FCP_LEVEL_6 | FCP_LEVEL_7) 889 #endif 890 891 /* FCP global variables */ 892 int fcp_bus_config_debug = 0; 893 static int fcp_log_size = FCP_LOG_SIZE; 894 static int fcp_trace = FCP_TRACE_DEFAULT; 895 static fc_trace_logq_t *fcp_logq = NULL; 896 static struct fcp_black_list_entry *fcp_lun_blacklist = NULL; 897 /* 898 * The auto-configuration is set by default. The only way of disabling it is 899 * through the property MANUAL_CFG_ONLY in the fcp.conf file. 900 */ 901 static int fcp_enable_auto_configuration = 1; 902 static int fcp_max_bus_config_retries = 4; 903 static int fcp_lun_ready_retry = 300; 904 /* 905 * The value assigned to the following variable has changed several times due 906 * to a problem with the data underruns reporting of some firmware(s). The 907 * current value of 50 gives a timeout value of 25 seconds for a max number 908 * of 256 LUNs. 909 */ 910 static int fcp_max_target_retries = 50; 911 /* 912 * Watchdog variables 913 * ------------------ 914 * 915 * fcp_watchdog_init 916 * 917 * Indicates if the watchdog timer is running or not. This is actually 918 * a counter of the number of Fibre Channel ports that attached. When 919 * the first port attaches the watchdog is started. When the last port 920 * detaches the watchdog timer is stopped. 921 * 922 * fcp_watchdog_time 923 * 924 * This is the watchdog clock counter. It is incremented by 925 * fcp_watchdog_time each time the watchdog timer expires. 926 * 927 * fcp_watchdog_timeout 928 * 929 * Increment value of the variable fcp_watchdog_time as well as the 930 * the timeout value of the watchdog timer. The unit is 1 second. It 931 * is strange that this is not a #define but a variable since the code 932 * never changes this value. The reason why it can be said that the 933 * unit is 1 second is because the number of ticks for the watchdog 934 * timer is determined like this: 935 * 936 * fcp_watchdog_tick = fcp_watchdog_timeout * 937 * drv_usectohz(1000000); 938 * 939 * The value 1000000 is hard coded in the code. 940 * 941 * fcp_watchdog_tick 942 * 943 * Watchdog timer value in ticks. 944 */ 945 static int fcp_watchdog_init = 0; 946 static int fcp_watchdog_time = 0; 947 static int fcp_watchdog_timeout = 1; 948 static int fcp_watchdog_tick; 949 950 /* 951 * fcp_offline_delay is a global variable to enable customisation of 952 * the timeout on link offlines or RSCNs. The default value is set 953 * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as 954 * specified in FCP4 Chapter 11 (see www.t10.org). 955 * 956 * The variable fcp_offline_delay is specified in SECONDS. 957 * 958 * If we made this a static var then the user would not be able to 959 * change it. This variable is set in fcp_attach(). 960 */ 961 unsigned int fcp_offline_delay = FCP_OFFLINE_DELAY; 962 963 static void *fcp_softstate = NULL; /* for soft state */ 964 static uchar_t fcp_oflag = FCP_IDLE; /* open flag */ 965 static kmutex_t fcp_global_mutex; 966 static kmutex_t fcp_ioctl_mutex; 967 static dev_info_t *fcp_global_dip = NULL; 968 static timeout_id_t fcp_watchdog_id; 969 const char *fcp_lun_prop = "lun"; 970 const char *fcp_sam_lun_prop = "sam-lun"; 971 const char *fcp_target_prop = "target"; 972 /* 973 * NOTE: consumers of "node-wwn" property include stmsboot in ON 974 * consolidation. 975 */ 976 const char *fcp_node_wwn_prop = "node-wwn"; 977 const char *fcp_port_wwn_prop = "port-wwn"; 978 const char *fcp_conf_wwn_prop = "fc-port-wwn"; 979 const char *fcp_obp_boot_wwn = "fc-boot-dev-portwwn"; 980 const char *fcp_manual_config_only = "manual_configuration_only"; 981 const char *fcp_init_port_prop = "initiator-port"; 982 const char *fcp_tgt_port_prop = "target-port"; 983 const char *fcp_lun_blacklist_prop = "pwwn-lun-blacklist"; 984 985 static struct fcp_port *fcp_port_head = NULL; 986 static ddi_eventcookie_t fcp_insert_eid; 987 static ddi_eventcookie_t fcp_remove_eid; 988 989 static ndi_event_definition_t fcp_ndi_event_defs[] = { 990 { FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL }, 991 { FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT } 992 }; 993 994 /* 995 * List of valid commands for the scsi_ioctl call 996 */ 997 static uint8_t scsi_ioctl_list[] = { 998 SCMD_INQUIRY, 999 SCMD_REPORT_LUN, 1000 SCMD_READ_CAPACITY 1001 }; 1002 1003 /* 1004 * this is used to dummy up a report lun response for cases 1005 * where the target doesn't support it 1006 */ 1007 static uchar_t fcp_dummy_lun[] = { 1008 0x00, /* MSB length (length = no of luns * 8) */ 1009 0x00, 1010 0x00, 1011 0x08, /* LSB length */ 1012 0x00, /* MSB reserved */ 1013 0x00, 1014 0x00, 1015 0x00, /* LSB reserved */ 1016 FCP_PD_ADDRESSING, 1017 0x00, /* LUN is ZERO at the first level */ 1018 0x00, 1019 0x00, /* second level is zero */ 1020 0x00, 1021 0x00, /* third level is zero */ 1022 0x00, 1023 0x00 /* fourth level is zero */ 1024 }; 1025 1026 static uchar_t fcp_alpa_to_switch[] = { 1027 0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00, 1028 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00, 1029 0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74, 1030 0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e, 1031 0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67, 1032 0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00, 1033 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d, 1034 0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00, 1035 0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e, 1036 0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b, 1037 0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43, 1038 0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00, 1039 0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37, 1040 0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 1041 0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 1042 0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c, 1043 0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27, 1044 0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f, 1045 0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00, 1046 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15, 1047 0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e, 1048 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00, 1049 0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00, 1050 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1051 }; 1052 1053 static caddr_t pid = "SESS01 "; 1054 1055 #if !defined(lint) 1056 1057 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex, 1058 fcp_port::fcp_next fcp_watchdog_id)) 1059 1060 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time)) 1061 1062 _NOTE(SCHEME_PROTECTS_DATA("Unshared", 1063 fcp_insert_eid 1064 fcp_remove_eid 1065 fcp_watchdog_time)) 1066 1067 _NOTE(SCHEME_PROTECTS_DATA("Unshared", 1068 fcp_cb_ops 1069 fcp_ops 1070 callb_cpr)) 1071 1072 #endif /* lint */ 1073 1074 /* 1075 * This table is used to determine whether or not it's safe to copy in 1076 * the target node name for a lun. Since all luns behind the same target 1077 * have the same wwnn, only tagets that do not support multiple luns are 1078 * eligible to be enumerated under mpxio if they aren't page83 compliant. 1079 */ 1080 1081 char *fcp_symmetric_disk_table[] = { 1082 "SEAGATE ST", 1083 "IBM DDYFT", 1084 "SUNW SUNWGS", /* Daktari enclosure */ 1085 "SUN SENA", /* SES device */ 1086 "SUN SESS01" /* VICOM SVE box */ 1087 }; 1088 1089 int fcp_symmetric_disk_table_size = 1090 sizeof (fcp_symmetric_disk_table)/sizeof (char *); 1091 1092 /* 1093 * This structure is bogus. scsi_hba_attach_setup() requires, as in the kernel 1094 * will panic if you don't pass this in to the routine, this information. 1095 * Need to determine what the actual impact to the system is by providing 1096 * this information if any. Since dma allocation is done in pkt_init it may 1097 * not have any impact. These values are straight from the Writing Device 1098 * Driver manual. 1099 */ 1100 static ddi_dma_attr_t pseudo_fca_dma_attr = { 1101 DMA_ATTR_V0, /* ddi_dma_attr version */ 1102 0, /* low address */ 1103 0xffffffff, /* high address */ 1104 0x00ffffff, /* counter upper bound */ 1105 1, /* alignment requirements */ 1106 0x3f, /* burst sizes */ 1107 1, /* minimum DMA access */ 1108 0xffffffff, /* maximum DMA access */ 1109 (1 << 24) - 1, /* segment boundary restrictions */ 1110 1, /* scater/gather list length */ 1111 512, /* device granularity */ 1112 0 /* DMA flags */ 1113 }; 1114 1115 /* 1116 * The _init(9e) return value should be that of mod_install(9f). Under 1117 * some circumstances, a failure may not be related mod_install(9f) and 1118 * one would then require a return value to indicate the failure. Looking 1119 * at mod_install(9f), it is expected to return 0 for success and non-zero 1120 * for failure. mod_install(9f) for device drivers, further goes down the 1121 * calling chain and ends up in ddi_installdrv(), whose return values are 1122 * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the 1123 * calling chain of mod_install(9f) which return values like EINVAL and 1124 * in some even return -1. 1125 * 1126 * To work around the vagaries of the mod_install() calling chain, return 1127 * either 0 or ENODEV depending on the success or failure of mod_install() 1128 */ 1129 int 1130 _init(void) 1131 { 1132 int rval; 1133 1134 /* 1135 * Allocate soft state and prepare to do ddi_soft_state_zalloc() 1136 * before registering with the transport first. 1137 */ 1138 if (ddi_soft_state_init(&fcp_softstate, 1139 sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) { 1140 return (EINVAL); 1141 } 1142 1143 mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL); 1144 mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL); 1145 1146 if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) { 1147 cmn_err(CE_WARN, "fcp: fc_ulp_add failed"); 1148 mutex_destroy(&fcp_global_mutex); 1149 mutex_destroy(&fcp_ioctl_mutex); 1150 ddi_soft_state_fini(&fcp_softstate); 1151 return (ENODEV); 1152 } 1153 1154 fcp_logq = fc_trace_alloc_logq(fcp_log_size); 1155 1156 if ((rval = mod_install(&modlinkage)) != 0) { 1157 fc_trace_free_logq(fcp_logq); 1158 (void) fc_ulp_remove(&fcp_modinfo); 1159 mutex_destroy(&fcp_global_mutex); 1160 mutex_destroy(&fcp_ioctl_mutex); 1161 ddi_soft_state_fini(&fcp_softstate); 1162 rval = ENODEV; 1163 } 1164 1165 return (rval); 1166 } 1167 1168 1169 /* 1170 * the system is done with us as a driver, so clean up 1171 */ 1172 int 1173 _fini(void) 1174 { 1175 int rval; 1176 1177 /* 1178 * don't start cleaning up until we know that the module remove 1179 * has worked -- if this works, then we know that each instance 1180 * has successfully been DDI_DETACHed 1181 */ 1182 if ((rval = mod_remove(&modlinkage)) != 0) { 1183 return (rval); 1184 } 1185 1186 (void) fc_ulp_remove(&fcp_modinfo); 1187 1188 ddi_soft_state_fini(&fcp_softstate); 1189 mutex_destroy(&fcp_global_mutex); 1190 mutex_destroy(&fcp_ioctl_mutex); 1191 fc_trace_free_logq(fcp_logq); 1192 1193 return (rval); 1194 } 1195 1196 1197 int 1198 _info(struct modinfo *modinfop) 1199 { 1200 return (mod_info(&modlinkage, modinfop)); 1201 } 1202 1203 1204 /* 1205 * attach the module 1206 */ 1207 static int 1208 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 1209 { 1210 int rval = DDI_SUCCESS; 1211 1212 FCP_DTRACE(fcp_logq, "fcp", fcp_trace, 1213 FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd); 1214 1215 if (cmd == DDI_ATTACH) { 1216 /* The FCP pseudo device is created here. */ 1217 mutex_enter(&fcp_global_mutex); 1218 fcp_global_dip = devi; 1219 mutex_exit(&fcp_global_mutex); 1220 1221 if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR, 1222 0, DDI_PSEUDO, 0) == DDI_SUCCESS) { 1223 ddi_report_dev(fcp_global_dip); 1224 } else { 1225 cmn_err(CE_WARN, "FCP: Cannot create minor node"); 1226 mutex_enter(&fcp_global_mutex); 1227 fcp_global_dip = NULL; 1228 mutex_exit(&fcp_global_mutex); 1229 1230 rval = DDI_FAILURE; 1231 } 1232 /* 1233 * We check the fcp_offline_delay property at this 1234 * point. This variable is global for the driver, 1235 * not specific to an instance. 1236 * 1237 * We do not recommend setting the value to less 1238 * than 10 seconds (RA_TOV_els), or greater than 1239 * 60 seconds. 1240 */ 1241 fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY, 1242 devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1243 "fcp_offline_delay", FCP_OFFLINE_DELAY); 1244 if ((fcp_offline_delay < 10) || 1245 (fcp_offline_delay > 60)) { 1246 cmn_err(CE_WARN, "Setting fcp_offline_delay " 1247 "to %d second(s). This is outside the " 1248 "recommended range of 10..60 seconds.", 1249 fcp_offline_delay); 1250 } 1251 } 1252 1253 return (rval); 1254 } 1255 1256 1257 /*ARGSUSED*/ 1258 static int 1259 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd) 1260 { 1261 int res = DDI_SUCCESS; 1262 1263 FCP_DTRACE(fcp_logq, "fcp", fcp_trace, 1264 FCP_BUF_LEVEL_8, 0, "module detach: cmd=0x%x", cmd); 1265 1266 if (cmd == DDI_DETACH) { 1267 /* 1268 * Check if there are active ports/threads. If there 1269 * are any, we will fail, else we will succeed (there 1270 * should not be much to clean up) 1271 */ 1272 mutex_enter(&fcp_global_mutex); 1273 FCP_DTRACE(fcp_logq, "fcp", 1274 fcp_trace, FCP_BUF_LEVEL_8, 0, "port_head=%p", 1275 (void *) fcp_port_head); 1276 1277 if (fcp_port_head == NULL) { 1278 ddi_remove_minor_node(fcp_global_dip, NULL); 1279 fcp_global_dip = NULL; 1280 mutex_exit(&fcp_global_mutex); 1281 } else { 1282 mutex_exit(&fcp_global_mutex); 1283 res = DDI_FAILURE; 1284 } 1285 } 1286 FCP_DTRACE(fcp_logq, "fcp", fcp_trace, 1287 FCP_BUF_LEVEL_8, 0, "module detach returning %d", res); 1288 1289 return (res); 1290 } 1291 1292 1293 /* ARGSUSED */ 1294 static int 1295 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp) 1296 { 1297 if (otype != OTYP_CHR) { 1298 return (EINVAL); 1299 } 1300 1301 /* 1302 * Allow only root to talk; 1303 */ 1304 if (drv_priv(credp)) { 1305 return (EPERM); 1306 } 1307 1308 mutex_enter(&fcp_global_mutex); 1309 if (fcp_oflag & FCP_EXCL) { 1310 mutex_exit(&fcp_global_mutex); 1311 return (EBUSY); 1312 } 1313 1314 if (flag & FEXCL) { 1315 if (fcp_oflag & FCP_OPEN) { 1316 mutex_exit(&fcp_global_mutex); 1317 return (EBUSY); 1318 } 1319 fcp_oflag |= FCP_EXCL; 1320 } 1321 fcp_oflag |= FCP_OPEN; 1322 mutex_exit(&fcp_global_mutex); 1323 1324 return (0); 1325 } 1326 1327 1328 /* ARGSUSED */ 1329 static int 1330 fcp_close(dev_t dev, int flag, int otype, cred_t *credp) 1331 { 1332 if (otype != OTYP_CHR) { 1333 return (EINVAL); 1334 } 1335 1336 mutex_enter(&fcp_global_mutex); 1337 if (!(fcp_oflag & FCP_OPEN)) { 1338 mutex_exit(&fcp_global_mutex); 1339 return (ENODEV); 1340 } 1341 fcp_oflag = FCP_IDLE; 1342 mutex_exit(&fcp_global_mutex); 1343 1344 return (0); 1345 } 1346 1347 1348 /* 1349 * fcp_ioctl 1350 * Entry point for the FCP ioctls 1351 * 1352 * Input: 1353 * See ioctl(9E) 1354 * 1355 * Output: 1356 * See ioctl(9E) 1357 * 1358 * Returns: 1359 * See ioctl(9E) 1360 * 1361 * Context: 1362 * Kernel context. 1363 */ 1364 /* ARGSUSED */ 1365 static int 1366 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, 1367 int *rval) 1368 { 1369 int ret = 0; 1370 1371 mutex_enter(&fcp_global_mutex); 1372 if (!(fcp_oflag & FCP_OPEN)) { 1373 mutex_exit(&fcp_global_mutex); 1374 return (ENXIO); 1375 } 1376 mutex_exit(&fcp_global_mutex); 1377 1378 switch (cmd) { 1379 case FCP_TGT_INQUIRY: 1380 case FCP_TGT_CREATE: 1381 case FCP_TGT_DELETE: 1382 ret = fcp_setup_device_data_ioctl(cmd, 1383 (struct fcp_ioctl *)data, mode, rval); 1384 break; 1385 1386 case FCP_TGT_SEND_SCSI: 1387 mutex_enter(&fcp_ioctl_mutex); 1388 ret = fcp_setup_scsi_ioctl( 1389 (struct fcp_scsi_cmd *)data, mode, rval); 1390 mutex_exit(&fcp_ioctl_mutex); 1391 break; 1392 1393 case FCP_STATE_COUNT: 1394 ret = fcp_get_statec_count((struct fcp_ioctl *)data, 1395 mode, rval); 1396 break; 1397 case FCP_GET_TARGET_MAPPINGS: 1398 ret = fcp_get_target_mappings((struct fcp_ioctl *)data, 1399 mode, rval); 1400 break; 1401 default: 1402 fcp_log(CE_WARN, NULL, 1403 "!Invalid ioctl opcode = 0x%x", cmd); 1404 ret = EINVAL; 1405 } 1406 1407 return (ret); 1408 } 1409 1410 1411 /* 1412 * fcp_setup_device_data_ioctl 1413 * Setup handler for the "device data" style of 1414 * ioctl for FCP. See "fcp_util.h" for data structure 1415 * definition. 1416 * 1417 * Input: 1418 * cmd = FCP ioctl command 1419 * data = ioctl data 1420 * mode = See ioctl(9E) 1421 * 1422 * Output: 1423 * data = ioctl data 1424 * rval = return value - see ioctl(9E) 1425 * 1426 * Returns: 1427 * See ioctl(9E) 1428 * 1429 * Context: 1430 * Kernel context. 1431 */ 1432 /* ARGSUSED */ 1433 static int 1434 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode, 1435 int *rval) 1436 { 1437 struct fcp_port *pptr; 1438 struct device_data *dev_data; 1439 uint32_t link_cnt; 1440 la_wwn_t *wwn_ptr = NULL; 1441 struct fcp_tgt *ptgt = NULL; 1442 struct fcp_lun *plun = NULL; 1443 int i, error; 1444 struct fcp_ioctl fioctl; 1445 1446 #ifdef _MULTI_DATAMODEL 1447 switch (ddi_model_convert_from(mode & FMODELS)) { 1448 case DDI_MODEL_ILP32: { 1449 struct fcp32_ioctl f32_ioctl; 1450 1451 if (ddi_copyin((void *)data, (void *)&f32_ioctl, 1452 sizeof (struct fcp32_ioctl), mode)) { 1453 return (EFAULT); 1454 } 1455 fioctl.fp_minor = f32_ioctl.fp_minor; 1456 fioctl.listlen = f32_ioctl.listlen; 1457 fioctl.list = (caddr_t)(long)f32_ioctl.list; 1458 break; 1459 } 1460 case DDI_MODEL_NONE: 1461 if (ddi_copyin((void *)data, (void *)&fioctl, 1462 sizeof (struct fcp_ioctl), mode)) { 1463 return (EFAULT); 1464 } 1465 break; 1466 } 1467 1468 #else /* _MULTI_DATAMODEL */ 1469 if (ddi_copyin((void *)data, (void *)&fioctl, 1470 sizeof (struct fcp_ioctl), mode)) { 1471 return (EFAULT); 1472 } 1473 #endif /* _MULTI_DATAMODEL */ 1474 1475 /* 1476 * Right now we can assume that the minor number matches with 1477 * this instance of fp. If this changes we will need to 1478 * revisit this logic. 1479 */ 1480 mutex_enter(&fcp_global_mutex); 1481 pptr = fcp_port_head; 1482 while (pptr) { 1483 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) { 1484 break; 1485 } else { 1486 pptr = pptr->port_next; 1487 } 1488 } 1489 mutex_exit(&fcp_global_mutex); 1490 if (pptr == NULL) { 1491 return (ENXIO); 1492 } 1493 mutex_enter(&pptr->port_mutex); 1494 1495 1496 if ((dev_data = kmem_zalloc((sizeof (struct device_data)) * 1497 fioctl.listlen, KM_NOSLEEP)) == NULL) { 1498 mutex_exit(&pptr->port_mutex); 1499 return (ENOMEM); 1500 } 1501 1502 if (ddi_copyin(fioctl.list, dev_data, 1503 (sizeof (struct device_data)) * fioctl.listlen, mode)) { 1504 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen); 1505 mutex_exit(&pptr->port_mutex); 1506 return (EFAULT); 1507 } 1508 link_cnt = pptr->port_link_cnt; 1509 1510 if (cmd == FCP_TGT_INQUIRY) { 1511 wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn); 1512 if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn, 1513 sizeof (wwn_ptr->raw_wwn)) == 0) { 1514 /* This ioctl is requesting INQ info of local HBA */ 1515 mutex_exit(&pptr->port_mutex); 1516 dev_data[0].dev0_type = DTYPE_UNKNOWN; 1517 dev_data[0].dev_status = 0; 1518 if (ddi_copyout(dev_data, fioctl.list, 1519 (sizeof (struct device_data)) * fioctl.listlen, 1520 mode)) { 1521 kmem_free(dev_data, 1522 sizeof (*dev_data) * fioctl.listlen); 1523 return (EFAULT); 1524 } 1525 kmem_free(dev_data, 1526 sizeof (*dev_data) * fioctl.listlen); 1527 #ifdef _MULTI_DATAMODEL 1528 switch (ddi_model_convert_from(mode & FMODELS)) { 1529 case DDI_MODEL_ILP32: { 1530 struct fcp32_ioctl f32_ioctl; 1531 f32_ioctl.fp_minor = fioctl.fp_minor; 1532 f32_ioctl.listlen = fioctl.listlen; 1533 f32_ioctl.list = (caddr32_t)(long)fioctl.list; 1534 if (ddi_copyout((void *)&f32_ioctl, 1535 (void *)data, 1536 sizeof (struct fcp32_ioctl), mode)) { 1537 return (EFAULT); 1538 } 1539 break; 1540 } 1541 case DDI_MODEL_NONE: 1542 if (ddi_copyout((void *)&fioctl, (void *)data, 1543 sizeof (struct fcp_ioctl), mode)) { 1544 return (EFAULT); 1545 } 1546 break; 1547 } 1548 #else /* _MULTI_DATAMODEL */ 1549 if (ddi_copyout((void *)&fioctl, (void *)data, 1550 sizeof (struct fcp_ioctl), mode)) { 1551 return (EFAULT); 1552 } 1553 #endif /* _MULTI_DATAMODEL */ 1554 return (0); 1555 } 1556 } 1557 1558 if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) { 1559 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen); 1560 mutex_exit(&pptr->port_mutex); 1561 return (ENXIO); 1562 } 1563 1564 for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt); 1565 i++) { 1566 wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn); 1567 1568 dev_data[i].dev0_type = DTYPE_UNKNOWN; 1569 1570 1571 dev_data[i].dev_status = ENXIO; 1572 1573 if ((ptgt = fcp_lookup_target(pptr, 1574 (uchar_t *)wwn_ptr)) == NULL) { 1575 mutex_exit(&pptr->port_mutex); 1576 if (fc_ulp_get_remote_port(pptr->port_fp_handle, 1577 wwn_ptr, &error, 0) == NULL) { 1578 dev_data[i].dev_status = ENODEV; 1579 mutex_enter(&pptr->port_mutex); 1580 continue; 1581 } else { 1582 1583 dev_data[i].dev_status = EAGAIN; 1584 1585 mutex_enter(&pptr->port_mutex); 1586 continue; 1587 } 1588 } else { 1589 mutex_enter(&ptgt->tgt_mutex); 1590 if (ptgt->tgt_state & (FCP_TGT_MARK | 1591 FCP_TGT_BUSY)) { 1592 dev_data[i].dev_status = EAGAIN; 1593 mutex_exit(&ptgt->tgt_mutex); 1594 continue; 1595 } 1596 1597 if (ptgt->tgt_state & FCP_TGT_OFFLINE) { 1598 if (ptgt->tgt_icap && !ptgt->tgt_tcap) { 1599 dev_data[i].dev_status = ENOTSUP; 1600 } else { 1601 dev_data[i].dev_status = ENXIO; 1602 } 1603 mutex_exit(&ptgt->tgt_mutex); 1604 continue; 1605 } 1606 1607 switch (cmd) { 1608 case FCP_TGT_INQUIRY: 1609 /* 1610 * The reason we give device type of 1611 * lun 0 only even though in some 1612 * cases(like maxstrat) lun 0 device 1613 * type may be 0x3f(invalid) is that 1614 * for bridge boxes target will appear 1615 * as luns and the first lun could be 1616 * a device that utility may not care 1617 * about (like a tape device). 1618 */ 1619 dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt; 1620 dev_data[i].dev_status = 0; 1621 mutex_exit(&ptgt->tgt_mutex); 1622 1623 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) { 1624 dev_data[i].dev0_type = DTYPE_UNKNOWN; 1625 } else { 1626 dev_data[i].dev0_type = plun->lun_type; 1627 } 1628 mutex_enter(&ptgt->tgt_mutex); 1629 break; 1630 1631 case FCP_TGT_CREATE: 1632 mutex_exit(&ptgt->tgt_mutex); 1633 mutex_exit(&pptr->port_mutex); 1634 1635 /* 1636 * serialize state change call backs. 1637 * only one call back will be handled 1638 * at a time. 1639 */ 1640 mutex_enter(&fcp_global_mutex); 1641 if (fcp_oflag & FCP_BUSY) { 1642 mutex_exit(&fcp_global_mutex); 1643 if (dev_data) { 1644 kmem_free(dev_data, 1645 sizeof (*dev_data) * 1646 fioctl.listlen); 1647 } 1648 return (EBUSY); 1649 } 1650 fcp_oflag |= FCP_BUSY; 1651 mutex_exit(&fcp_global_mutex); 1652 1653 dev_data[i].dev_status = 1654 fcp_create_on_demand(pptr, 1655 wwn_ptr->raw_wwn); 1656 1657 if (dev_data[i].dev_status != 0) { 1658 char buf[25]; 1659 1660 for (i = 0; i < FC_WWN_SIZE; i++) { 1661 (void) sprintf(&buf[i << 1], 1662 "%02x", 1663 wwn_ptr->raw_wwn[i]); 1664 } 1665 1666 fcp_log(CE_WARN, pptr->port_dip, 1667 "!Failed to create nodes for" 1668 " pwwn=%s; error=%x", buf, 1669 dev_data[i].dev_status); 1670 } 1671 1672 /* allow state change call backs again */ 1673 mutex_enter(&fcp_global_mutex); 1674 fcp_oflag &= ~FCP_BUSY; 1675 mutex_exit(&fcp_global_mutex); 1676 1677 mutex_enter(&pptr->port_mutex); 1678 mutex_enter(&ptgt->tgt_mutex); 1679 1680 break; 1681 1682 case FCP_TGT_DELETE: 1683 break; 1684 1685 default: 1686 fcp_log(CE_WARN, pptr->port_dip, 1687 "!Invalid device data ioctl " 1688 "opcode = 0x%x", cmd); 1689 } 1690 mutex_exit(&ptgt->tgt_mutex); 1691 } 1692 } 1693 mutex_exit(&pptr->port_mutex); 1694 1695 if (ddi_copyout(dev_data, fioctl.list, 1696 (sizeof (struct device_data)) * fioctl.listlen, mode)) { 1697 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen); 1698 return (EFAULT); 1699 } 1700 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen); 1701 1702 #ifdef _MULTI_DATAMODEL 1703 switch (ddi_model_convert_from(mode & FMODELS)) { 1704 case DDI_MODEL_ILP32: { 1705 struct fcp32_ioctl f32_ioctl; 1706 1707 f32_ioctl.fp_minor = fioctl.fp_minor; 1708 f32_ioctl.listlen = fioctl.listlen; 1709 f32_ioctl.list = (caddr32_t)(long)fioctl.list; 1710 if (ddi_copyout((void *)&f32_ioctl, (void *)data, 1711 sizeof (struct fcp32_ioctl), mode)) { 1712 return (EFAULT); 1713 } 1714 break; 1715 } 1716 case DDI_MODEL_NONE: 1717 if (ddi_copyout((void *)&fioctl, (void *)data, 1718 sizeof (struct fcp_ioctl), mode)) { 1719 return (EFAULT); 1720 } 1721 break; 1722 } 1723 #else /* _MULTI_DATAMODEL */ 1724 1725 if (ddi_copyout((void *)&fioctl, (void *)data, 1726 sizeof (struct fcp_ioctl), mode)) { 1727 return (EFAULT); 1728 } 1729 #endif /* _MULTI_DATAMODEL */ 1730 1731 return (0); 1732 } 1733 1734 /* 1735 * Fetch the target mappings (path, etc.) for all LUNs 1736 * on this port. 1737 */ 1738 /* ARGSUSED */ 1739 static int 1740 fcp_get_target_mappings(struct fcp_ioctl *data, 1741 int mode, int *rval) 1742 { 1743 struct fcp_port *pptr; 1744 fc_hba_target_mappings_t *mappings; 1745 fc_hba_mapping_entry_t *map; 1746 struct fcp_tgt *ptgt = NULL; 1747 struct fcp_lun *plun = NULL; 1748 int i, mapIndex, mappingSize; 1749 int listlen; 1750 struct fcp_ioctl fioctl; 1751 char *path; 1752 fcp_ent_addr_t sam_lun_addr; 1753 1754 #ifdef _MULTI_DATAMODEL 1755 switch (ddi_model_convert_from(mode & FMODELS)) { 1756 case DDI_MODEL_ILP32: { 1757 struct fcp32_ioctl f32_ioctl; 1758 1759 if (ddi_copyin((void *)data, (void *)&f32_ioctl, 1760 sizeof (struct fcp32_ioctl), mode)) { 1761 return (EFAULT); 1762 } 1763 fioctl.fp_minor = f32_ioctl.fp_minor; 1764 fioctl.listlen = f32_ioctl.listlen; 1765 fioctl.list = (caddr_t)(long)f32_ioctl.list; 1766 break; 1767 } 1768 case DDI_MODEL_NONE: 1769 if (ddi_copyin((void *)data, (void *)&fioctl, 1770 sizeof (struct fcp_ioctl), mode)) { 1771 return (EFAULT); 1772 } 1773 break; 1774 } 1775 1776 #else /* _MULTI_DATAMODEL */ 1777 if (ddi_copyin((void *)data, (void *)&fioctl, 1778 sizeof (struct fcp_ioctl), mode)) { 1779 return (EFAULT); 1780 } 1781 #endif /* _MULTI_DATAMODEL */ 1782 1783 /* 1784 * Right now we can assume that the minor number matches with 1785 * this instance of fp. If this changes we will need to 1786 * revisit this logic. 1787 */ 1788 mutex_enter(&fcp_global_mutex); 1789 pptr = fcp_port_head; 1790 while (pptr) { 1791 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) { 1792 break; 1793 } else { 1794 pptr = pptr->port_next; 1795 } 1796 } 1797 mutex_exit(&fcp_global_mutex); 1798 if (pptr == NULL) { 1799 cmn_err(CE_NOTE, "target mappings: unknown instance number: %d", 1800 fioctl.fp_minor); 1801 return (ENXIO); 1802 } 1803 1804 1805 /* We use listlen to show the total buffer size */ 1806 mappingSize = fioctl.listlen; 1807 1808 /* Now calculate how many mapping entries will fit */ 1809 listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t) 1810 - sizeof (fc_hba_target_mappings_t); 1811 if (listlen <= 0) { 1812 cmn_err(CE_NOTE, "target mappings: Insufficient buffer"); 1813 return (ENXIO); 1814 } 1815 listlen = listlen / sizeof (fc_hba_mapping_entry_t); 1816 1817 if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) { 1818 return (ENOMEM); 1819 } 1820 mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION; 1821 1822 /* Now get to work */ 1823 mapIndex = 0; 1824 1825 mutex_enter(&pptr->port_mutex); 1826 /* Loop through all targets on this port */ 1827 for (i = 0; i < FCP_NUM_HASH; i++) { 1828 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL; 1829 ptgt = ptgt->tgt_next) { 1830 1831 mutex_enter(&ptgt->tgt_mutex); 1832 1833 /* Loop through all LUNs on this target */ 1834 for (plun = ptgt->tgt_lun; plun != NULL; 1835 plun = plun->lun_next) { 1836 if (plun->lun_state & FCP_LUN_OFFLINE) { 1837 continue; 1838 } 1839 1840 path = fcp_get_lun_path(plun); 1841 if (path == NULL) { 1842 continue; 1843 } 1844 1845 if (mapIndex >= listlen) { 1846 mapIndex ++; 1847 kmem_free(path, MAXPATHLEN); 1848 continue; 1849 } 1850 map = &mappings->entries[mapIndex++]; 1851 bcopy(path, map->targetDriver, 1852 sizeof (map->targetDriver)); 1853 map->d_id = ptgt->tgt_d_id; 1854 map->busNumber = 0; 1855 map->targetNumber = ptgt->tgt_d_id; 1856 map->osLUN = plun->lun_num; 1857 1858 /* 1859 * We had swapped lun when we stored it in 1860 * lun_addr. We need to swap it back before 1861 * returning it to user land 1862 */ 1863 1864 sam_lun_addr.ent_addr_0 = 1865 BE_16(plun->lun_addr.ent_addr_0); 1866 sam_lun_addr.ent_addr_1 = 1867 BE_16(plun->lun_addr.ent_addr_1); 1868 sam_lun_addr.ent_addr_2 = 1869 BE_16(plun->lun_addr.ent_addr_2); 1870 sam_lun_addr.ent_addr_3 = 1871 BE_16(plun->lun_addr.ent_addr_3); 1872 1873 bcopy(&sam_lun_addr, &map->samLUN, 1874 FCP_LUN_SIZE); 1875 bcopy(ptgt->tgt_node_wwn.raw_wwn, 1876 map->NodeWWN.raw_wwn, sizeof (la_wwn_t)); 1877 bcopy(ptgt->tgt_port_wwn.raw_wwn, 1878 map->PortWWN.raw_wwn, sizeof (la_wwn_t)); 1879 1880 if (plun->lun_guid) { 1881 1882 /* convert ascii wwn to bytes */ 1883 fcp_ascii_to_wwn(plun->lun_guid, 1884 map->guid, sizeof (map->guid)); 1885 1886 if ((sizeof (map->guid)) < 1887 plun->lun_guid_size / 2) { 1888 cmn_err(CE_WARN, 1889 "fcp_get_target_mappings:" 1890 "guid copy space " 1891 "insufficient." 1892 "Copy Truncation - " 1893 "available %d; need %d", 1894 (int)sizeof (map->guid), 1895 (int) 1896 plun->lun_guid_size / 2); 1897 } 1898 } 1899 kmem_free(path, MAXPATHLEN); 1900 } 1901 mutex_exit(&ptgt->tgt_mutex); 1902 } 1903 } 1904 mutex_exit(&pptr->port_mutex); 1905 mappings->numLuns = mapIndex; 1906 1907 if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) { 1908 kmem_free(mappings, mappingSize); 1909 return (EFAULT); 1910 } 1911 kmem_free(mappings, mappingSize); 1912 1913 #ifdef _MULTI_DATAMODEL 1914 switch (ddi_model_convert_from(mode & FMODELS)) { 1915 case DDI_MODEL_ILP32: { 1916 struct fcp32_ioctl f32_ioctl; 1917 1918 f32_ioctl.fp_minor = fioctl.fp_minor; 1919 f32_ioctl.listlen = fioctl.listlen; 1920 f32_ioctl.list = (caddr32_t)(long)fioctl.list; 1921 if (ddi_copyout((void *)&f32_ioctl, (void *)data, 1922 sizeof (struct fcp32_ioctl), mode)) { 1923 return (EFAULT); 1924 } 1925 break; 1926 } 1927 case DDI_MODEL_NONE: 1928 if (ddi_copyout((void *)&fioctl, (void *)data, 1929 sizeof (struct fcp_ioctl), mode)) { 1930 return (EFAULT); 1931 } 1932 break; 1933 } 1934 #else /* _MULTI_DATAMODEL */ 1935 1936 if (ddi_copyout((void *)&fioctl, (void *)data, 1937 sizeof (struct fcp_ioctl), mode)) { 1938 return (EFAULT); 1939 } 1940 #endif /* _MULTI_DATAMODEL */ 1941 1942 return (0); 1943 } 1944 1945 /* 1946 * fcp_setup_scsi_ioctl 1947 * Setup handler for the "scsi passthru" style of 1948 * ioctl for FCP. See "fcp_util.h" for data structure 1949 * definition. 1950 * 1951 * Input: 1952 * u_fscsi = ioctl data (user address space) 1953 * mode = See ioctl(9E) 1954 * 1955 * Output: 1956 * u_fscsi = ioctl data (user address space) 1957 * rval = return value - see ioctl(9E) 1958 * 1959 * Returns: 1960 * 0 = OK 1961 * EAGAIN = See errno.h 1962 * EBUSY = See errno.h 1963 * EFAULT = See errno.h 1964 * EINTR = See errno.h 1965 * EINVAL = See errno.h 1966 * EIO = See errno.h 1967 * ENOMEM = See errno.h 1968 * ENXIO = See errno.h 1969 * 1970 * Context: 1971 * Kernel context. 1972 */ 1973 /* ARGSUSED */ 1974 static int 1975 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi, 1976 int mode, int *rval) 1977 { 1978 int ret = 0; 1979 int temp_ret; 1980 caddr_t k_cdbbufaddr = NULL; 1981 caddr_t k_bufaddr = NULL; 1982 caddr_t k_rqbufaddr = NULL; 1983 caddr_t u_cdbbufaddr; 1984 caddr_t u_bufaddr; 1985 caddr_t u_rqbufaddr; 1986 struct fcp_scsi_cmd k_fscsi; 1987 1988 /* 1989 * Get fcp_scsi_cmd array element from user address space 1990 */ 1991 if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode)) 1992 != 0) { 1993 return (ret); 1994 } 1995 1996 1997 /* 1998 * Even though kmem_alloc() checks the validity of the 1999 * buffer length, this check is needed when the 2000 * kmem_flags set and the zero buffer length is passed. 2001 */ 2002 if ((k_fscsi.scsi_cdblen <= 0) || 2003 (k_fscsi.scsi_buflen <= 0) || 2004 (k_fscsi.scsi_rqlen <= 0)) { 2005 return (EINVAL); 2006 } 2007 2008 /* 2009 * Allocate data for fcp_scsi_cmd pointer fields 2010 */ 2011 if (ret == 0) { 2012 k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP); 2013 k_bufaddr = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP); 2014 k_rqbufaddr = kmem_alloc(k_fscsi.scsi_rqlen, KM_NOSLEEP); 2015 2016 if (k_cdbbufaddr == NULL || 2017 k_bufaddr == NULL || 2018 k_rqbufaddr == NULL) { 2019 ret = ENOMEM; 2020 } 2021 } 2022 2023 /* 2024 * Get fcp_scsi_cmd pointer fields from user 2025 * address space 2026 */ 2027 if (ret == 0) { 2028 u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr; 2029 u_bufaddr = k_fscsi.scsi_bufaddr; 2030 u_rqbufaddr = k_fscsi.scsi_rqbufaddr; 2031 2032 if (ddi_copyin(u_cdbbufaddr, 2033 k_cdbbufaddr, 2034 k_fscsi.scsi_cdblen, 2035 mode)) { 2036 ret = EFAULT; 2037 } else if (ddi_copyin(u_bufaddr, 2038 k_bufaddr, 2039 k_fscsi.scsi_buflen, 2040 mode)) { 2041 ret = EFAULT; 2042 } else if (ddi_copyin(u_rqbufaddr, 2043 k_rqbufaddr, 2044 k_fscsi.scsi_rqlen, 2045 mode)) { 2046 ret = EFAULT; 2047 } 2048 } 2049 2050 /* 2051 * Send scsi command (blocking) 2052 */ 2053 if (ret == 0) { 2054 /* 2055 * Prior to sending the scsi command, the 2056 * fcp_scsi_cmd data structure must contain kernel, 2057 * not user, addresses. 2058 */ 2059 k_fscsi.scsi_cdbbufaddr = k_cdbbufaddr; 2060 k_fscsi.scsi_bufaddr = k_bufaddr; 2061 k_fscsi.scsi_rqbufaddr = k_rqbufaddr; 2062 2063 ret = fcp_send_scsi_ioctl(&k_fscsi); 2064 2065 /* 2066 * After sending the scsi command, the 2067 * fcp_scsi_cmd data structure must contain user, 2068 * not kernel, addresses. 2069 */ 2070 k_fscsi.scsi_cdbbufaddr = u_cdbbufaddr; 2071 k_fscsi.scsi_bufaddr = u_bufaddr; 2072 k_fscsi.scsi_rqbufaddr = u_rqbufaddr; 2073 } 2074 2075 /* 2076 * Put fcp_scsi_cmd pointer fields to user address space 2077 */ 2078 if (ret == 0) { 2079 if (ddi_copyout(k_cdbbufaddr, 2080 u_cdbbufaddr, 2081 k_fscsi.scsi_cdblen, 2082 mode)) { 2083 ret = EFAULT; 2084 } else if (ddi_copyout(k_bufaddr, 2085 u_bufaddr, 2086 k_fscsi.scsi_buflen, 2087 mode)) { 2088 ret = EFAULT; 2089 } else if (ddi_copyout(k_rqbufaddr, 2090 u_rqbufaddr, 2091 k_fscsi.scsi_rqlen, 2092 mode)) { 2093 ret = EFAULT; 2094 } 2095 } 2096 2097 /* 2098 * Free data for fcp_scsi_cmd pointer fields 2099 */ 2100 if (k_cdbbufaddr != NULL) { 2101 kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen); 2102 } 2103 if (k_bufaddr != NULL) { 2104 kmem_free(k_bufaddr, k_fscsi.scsi_buflen); 2105 } 2106 if (k_rqbufaddr != NULL) { 2107 kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen); 2108 } 2109 2110 /* 2111 * Put fcp_scsi_cmd array element to user address space 2112 */ 2113 temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode); 2114 if (temp_ret != 0) { 2115 ret = temp_ret; 2116 } 2117 2118 /* 2119 * Return status 2120 */ 2121 return (ret); 2122 } 2123 2124 2125 /* 2126 * fcp_copyin_scsi_cmd 2127 * Copy in fcp_scsi_cmd data structure from user address space. 2128 * The data may be in 32 bit or 64 bit modes. 2129 * 2130 * Input: 2131 * base_addr = from address (user address space) 2132 * mode = See ioctl(9E) and ddi_copyin(9F) 2133 * 2134 * Output: 2135 * fscsi = to address (kernel address space) 2136 * 2137 * Returns: 2138 * 0 = OK 2139 * EFAULT = Error 2140 * 2141 * Context: 2142 * Kernel context. 2143 */ 2144 static int 2145 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode) 2146 { 2147 #ifdef _MULTI_DATAMODEL 2148 struct fcp32_scsi_cmd f32scsi; 2149 2150 switch (ddi_model_convert_from(mode & FMODELS)) { 2151 case DDI_MODEL_ILP32: 2152 /* 2153 * Copy data from user address space 2154 */ 2155 if (ddi_copyin((void *)base_addr, 2156 &f32scsi, 2157 sizeof (struct fcp32_scsi_cmd), 2158 mode)) { 2159 return (EFAULT); 2160 } 2161 /* 2162 * Convert from 32 bit to 64 bit 2163 */ 2164 FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi); 2165 break; 2166 case DDI_MODEL_NONE: 2167 /* 2168 * Copy data from user address space 2169 */ 2170 if (ddi_copyin((void *)base_addr, 2171 fscsi, 2172 sizeof (struct fcp_scsi_cmd), 2173 mode)) { 2174 return (EFAULT); 2175 } 2176 break; 2177 } 2178 #else /* _MULTI_DATAMODEL */ 2179 /* 2180 * Copy data from user address space 2181 */ 2182 if (ddi_copyin((void *)base_addr, 2183 fscsi, 2184 sizeof (struct fcp_scsi_cmd), 2185 mode)) { 2186 return (EFAULT); 2187 } 2188 #endif /* _MULTI_DATAMODEL */ 2189 2190 return (0); 2191 } 2192 2193 2194 /* 2195 * fcp_copyout_scsi_cmd 2196 * Copy out fcp_scsi_cmd data structure to user address space. 2197 * The data may be in 32 bit or 64 bit modes. 2198 * 2199 * Input: 2200 * fscsi = to address (kernel address space) 2201 * mode = See ioctl(9E) and ddi_copyin(9F) 2202 * 2203 * Output: 2204 * base_addr = from address (user address space) 2205 * 2206 * Returns: 2207 * 0 = OK 2208 * EFAULT = Error 2209 * 2210 * Context: 2211 * Kernel context. 2212 */ 2213 static int 2214 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode) 2215 { 2216 #ifdef _MULTI_DATAMODEL 2217 struct fcp32_scsi_cmd f32scsi; 2218 2219 switch (ddi_model_convert_from(mode & FMODELS)) { 2220 case DDI_MODEL_ILP32: 2221 /* 2222 * Convert from 64 bit to 32 bit 2223 */ 2224 FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi); 2225 /* 2226 * Copy data to user address space 2227 */ 2228 if (ddi_copyout(&f32scsi, 2229 (void *)base_addr, 2230 sizeof (struct fcp32_scsi_cmd), 2231 mode)) { 2232 return (EFAULT); 2233 } 2234 break; 2235 case DDI_MODEL_NONE: 2236 /* 2237 * Copy data to user address space 2238 */ 2239 if (ddi_copyout(fscsi, 2240 (void *)base_addr, 2241 sizeof (struct fcp_scsi_cmd), 2242 mode)) { 2243 return (EFAULT); 2244 } 2245 break; 2246 } 2247 #else /* _MULTI_DATAMODEL */ 2248 /* 2249 * Copy data to user address space 2250 */ 2251 if (ddi_copyout(fscsi, 2252 (void *)base_addr, 2253 sizeof (struct fcp_scsi_cmd), 2254 mode)) { 2255 return (EFAULT); 2256 } 2257 #endif /* _MULTI_DATAMODEL */ 2258 2259 return (0); 2260 } 2261 2262 2263 /* 2264 * fcp_send_scsi_ioctl 2265 * Sends the SCSI command in blocking mode. 2266 * 2267 * Input: 2268 * fscsi = SCSI command data structure 2269 * 2270 * Output: 2271 * fscsi = SCSI command data structure 2272 * 2273 * Returns: 2274 * 0 = OK 2275 * EAGAIN = See errno.h 2276 * EBUSY = See errno.h 2277 * EINTR = See errno.h 2278 * EINVAL = See errno.h 2279 * EIO = See errno.h 2280 * ENOMEM = See errno.h 2281 * ENXIO = See errno.h 2282 * 2283 * Context: 2284 * Kernel context. 2285 */ 2286 static int 2287 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi) 2288 { 2289 struct fcp_lun *plun = NULL; 2290 struct fcp_port *pptr = NULL; 2291 struct fcp_tgt *ptgt = NULL; 2292 fc_packet_t *fpkt = NULL; 2293 struct fcp_ipkt *icmd = NULL; 2294 int target_created = FALSE; 2295 fc_frame_hdr_t *hp; 2296 struct fcp_cmd fcp_cmd; 2297 struct fcp_cmd *fcmd; 2298 union scsi_cdb *scsi_cdb; 2299 la_wwn_t *wwn_ptr; 2300 int nodma; 2301 struct fcp_rsp *rsp; 2302 struct fcp_rsp_info *rsp_info; 2303 caddr_t rsp_sense; 2304 int buf_len; 2305 int info_len; 2306 int sense_len; 2307 struct scsi_extended_sense *sense_to = NULL; 2308 timeout_id_t tid; 2309 uint8_t reconfig_lun = FALSE; 2310 uint8_t reconfig_pending = FALSE; 2311 uint8_t scsi_cmd; 2312 int rsp_len; 2313 int cmd_index; 2314 int fc_status; 2315 int pkt_state; 2316 int pkt_action; 2317 int pkt_reason; 2318 int ret, xport_retval = ~FC_SUCCESS; 2319 int lcount; 2320 int tcount; 2321 int reconfig_status; 2322 int port_busy = FALSE; 2323 uchar_t *lun_string; 2324 2325 /* 2326 * Check valid SCSI command 2327 */ 2328 scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0]; 2329 ret = EINVAL; 2330 for (cmd_index = 0; 2331 cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) && 2332 ret != 0; 2333 cmd_index++) { 2334 /* 2335 * First byte of CDB is the SCSI command 2336 */ 2337 if (scsi_ioctl_list[cmd_index] == scsi_cmd) { 2338 ret = 0; 2339 } 2340 } 2341 2342 /* 2343 * Check inputs 2344 */ 2345 if (fscsi->scsi_flags != FCP_SCSI_READ) { 2346 ret = EINVAL; 2347 } else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) { 2348 /* no larger than */ 2349 ret = EINVAL; 2350 } 2351 2352 2353 /* 2354 * Find FC port 2355 */ 2356 if (ret == 0) { 2357 /* 2358 * Acquire global mutex 2359 */ 2360 mutex_enter(&fcp_global_mutex); 2361 2362 pptr = fcp_port_head; 2363 while (pptr) { 2364 if (pptr->port_instance == 2365 (uint32_t)fscsi->scsi_fc_port_num) { 2366 break; 2367 } else { 2368 pptr = pptr->port_next; 2369 } 2370 } 2371 2372 if (pptr == NULL) { 2373 ret = ENXIO; 2374 } else { 2375 /* 2376 * fc_ulp_busy_port can raise power 2377 * so, we must not hold any mutexes involved in PM 2378 */ 2379 mutex_exit(&fcp_global_mutex); 2380 ret = fc_ulp_busy_port(pptr->port_fp_handle); 2381 } 2382 2383 if (ret == 0) { 2384 2385 /* remember port is busy, so we will release later */ 2386 port_busy = TRUE; 2387 2388 /* 2389 * If there is a reconfiguration in progress, wait 2390 * for it to complete. 2391 */ 2392 2393 fcp_reconfig_wait(pptr); 2394 2395 /* reacquire mutexes in order */ 2396 mutex_enter(&fcp_global_mutex); 2397 mutex_enter(&pptr->port_mutex); 2398 2399 /* 2400 * Will port accept DMA? 2401 */ 2402 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) 2403 ? 1 : 0; 2404 2405 /* 2406 * If init or offline, device not known 2407 * 2408 * If we are discovering (onlining), we can 2409 * NOT obviously provide reliable data about 2410 * devices until it is complete 2411 */ 2412 if (pptr->port_state & (FCP_STATE_INIT | 2413 FCP_STATE_OFFLINE)) { 2414 ret = ENXIO; 2415 } else if (pptr->port_state & FCP_STATE_ONLINING) { 2416 ret = EBUSY; 2417 } else { 2418 /* 2419 * Find target from pwwn 2420 * 2421 * The wwn must be put into a local 2422 * variable to ensure alignment. 2423 */ 2424 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn); 2425 ptgt = fcp_lookup_target(pptr, 2426 (uchar_t *)wwn_ptr); 2427 2428 /* 2429 * If target not found, 2430 */ 2431 if (ptgt == NULL) { 2432 /* 2433 * Note: Still have global & 2434 * port mutexes 2435 */ 2436 mutex_exit(&pptr->port_mutex); 2437 ptgt = fcp_port_create_tgt(pptr, 2438 wwn_ptr, &ret, &fc_status, 2439 &pkt_state, &pkt_action, 2440 &pkt_reason); 2441 mutex_enter(&pptr->port_mutex); 2442 2443 fscsi->scsi_fc_status = fc_status; 2444 fscsi->scsi_pkt_state = 2445 (uchar_t)pkt_state; 2446 fscsi->scsi_pkt_reason = pkt_reason; 2447 fscsi->scsi_pkt_action = 2448 (uchar_t)pkt_action; 2449 2450 if (ptgt != NULL) { 2451 target_created = TRUE; 2452 } else if (ret == 0) { 2453 ret = ENOMEM; 2454 } 2455 } 2456 2457 if (ret == 0) { 2458 /* 2459 * Acquire target 2460 */ 2461 mutex_enter(&ptgt->tgt_mutex); 2462 2463 /* 2464 * If target is mark or busy, 2465 * then target can not be used 2466 */ 2467 if (ptgt->tgt_state & 2468 (FCP_TGT_MARK | 2469 FCP_TGT_BUSY)) { 2470 ret = EBUSY; 2471 } else { 2472 /* 2473 * Mark target as busy 2474 */ 2475 ptgt->tgt_state |= 2476 FCP_TGT_BUSY; 2477 } 2478 2479 /* 2480 * Release target 2481 */ 2482 lcount = pptr->port_link_cnt; 2483 tcount = ptgt->tgt_change_cnt; 2484 mutex_exit(&ptgt->tgt_mutex); 2485 } 2486 } 2487 2488 /* 2489 * Release port 2490 */ 2491 mutex_exit(&pptr->port_mutex); 2492 } 2493 2494 /* 2495 * Release global mutex 2496 */ 2497 mutex_exit(&fcp_global_mutex); 2498 } 2499 2500 if (ret == 0) { 2501 uint64_t belun = BE_64(fscsi->scsi_lun); 2502 2503 /* 2504 * If it's a target device, find lun from pwwn 2505 * The wwn must be put into a local 2506 * variable to ensure alignment. 2507 */ 2508 mutex_enter(&pptr->port_mutex); 2509 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn); 2510 if (!ptgt->tgt_tcap && ptgt->tgt_icap) { 2511 /* this is not a target */ 2512 fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT; 2513 ret = ENXIO; 2514 } else if ((belun << 16) != 0) { 2515 /* 2516 * Since fcp only support PD and LU addressing method 2517 * so far, the last 6 bytes of a valid LUN are expected 2518 * to be filled with 00h. 2519 */ 2520 fscsi->scsi_fc_status = FC_INVALID_LUN; 2521 cmn_err(CE_WARN, "fcp: Unsupported LUN addressing" 2522 " method 0x%02x with LUN number 0x%016" PRIx64, 2523 (uint8_t)(belun >> 62), belun); 2524 ret = ENXIO; 2525 } else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr, 2526 (uint16_t)((belun >> 48) & 0x3fff))) == NULL) { 2527 /* 2528 * This is a SCSI target, but no LUN at this 2529 * address. 2530 * 2531 * In the future, we may want to send this to 2532 * the target, and let it respond 2533 * appropriately 2534 */ 2535 ret = ENXIO; 2536 } 2537 mutex_exit(&pptr->port_mutex); 2538 } 2539 2540 /* 2541 * Finished grabbing external resources 2542 * Allocate internal packet (icmd) 2543 */ 2544 if (ret == 0) { 2545 /* 2546 * Calc rsp len assuming rsp info included 2547 */ 2548 rsp_len = sizeof (struct fcp_rsp) + 2549 sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen; 2550 2551 icmd = fcp_icmd_alloc(pptr, ptgt, 2552 sizeof (struct fcp_cmd), 2553 rsp_len, 2554 fscsi->scsi_buflen, 2555 nodma, 2556 lcount, /* ipkt_link_cnt */ 2557 tcount, /* ipkt_change_cnt */ 2558 0, /* cause */ 2559 FC_INVALID_RSCN_COUNT); /* invalidate the count */ 2560 2561 if (icmd == NULL) { 2562 ret = ENOMEM; 2563 } else { 2564 /* 2565 * Setup internal packet as sema sync 2566 */ 2567 fcp_ipkt_sema_init(icmd); 2568 } 2569 } 2570 2571 if (ret == 0) { 2572 /* 2573 * Init fpkt pointer for use. 2574 */ 2575 2576 fpkt = icmd->ipkt_fpkt; 2577 2578 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR; 2579 fpkt->pkt_tran_type = FC_PKT_FCP_READ; /* only rd for now */ 2580 fpkt->pkt_timeout = fscsi->scsi_timeout; 2581 2582 /* 2583 * Init fcmd pointer for use by SCSI command 2584 */ 2585 2586 if (nodma) { 2587 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd; 2588 } else { 2589 fcmd = &fcp_cmd; 2590 } 2591 bzero(fcmd, sizeof (struct fcp_cmd)); 2592 ptgt = plun->lun_tgt; 2593 2594 lun_string = (uchar_t *)&fscsi->scsi_lun; 2595 2596 fcmd->fcp_ent_addr.ent_addr_0 = 2597 BE_16(*(uint16_t *)&(lun_string[0])); 2598 fcmd->fcp_ent_addr.ent_addr_1 = 2599 BE_16(*(uint16_t *)&(lun_string[2])); 2600 fcmd->fcp_ent_addr.ent_addr_2 = 2601 BE_16(*(uint16_t *)&(lun_string[4])); 2602 fcmd->fcp_ent_addr.ent_addr_3 = 2603 BE_16(*(uint16_t *)&(lun_string[6])); 2604 2605 /* 2606 * Setup internal packet(icmd) 2607 */ 2608 icmd->ipkt_lun = plun; 2609 icmd->ipkt_restart = 0; 2610 icmd->ipkt_retries = 0; 2611 icmd->ipkt_opcode = 0; 2612 2613 /* 2614 * Init the frame HEADER Pointer for use 2615 */ 2616 hp = &fpkt->pkt_cmd_fhdr; 2617 2618 hp->s_id = pptr->port_id; 2619 hp->d_id = ptgt->tgt_d_id; 2620 hp->r_ctl = R_CTL_COMMAND; 2621 hp->type = FC_TYPE_SCSI_FCP; 2622 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 2623 hp->rsvd = 0; 2624 hp->seq_id = 0; 2625 hp->seq_cnt = 0; 2626 hp->ox_id = 0xffff; 2627 hp->rx_id = 0xffff; 2628 hp->ro = 0; 2629 2630 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE; 2631 fcmd->fcp_cntl.cntl_read_data = 1; /* only rd for now */ 2632 fcmd->fcp_cntl.cntl_write_data = 0; 2633 fcmd->fcp_data_len = fscsi->scsi_buflen; 2634 2635 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb; 2636 bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb, 2637 fscsi->scsi_cdblen); 2638 2639 if (!nodma) { 2640 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd, 2641 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd)); 2642 } 2643 2644 /* 2645 * Send SCSI command to FC transport 2646 */ 2647 2648 if (ret == 0) { 2649 mutex_enter(&ptgt->tgt_mutex); 2650 2651 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) { 2652 mutex_exit(&ptgt->tgt_mutex); 2653 fscsi->scsi_fc_status = xport_retval = 2654 fc_ulp_transport(pptr->port_fp_handle, 2655 fpkt); 2656 if (fscsi->scsi_fc_status != FC_SUCCESS) { 2657 ret = EIO; 2658 } 2659 } else { 2660 mutex_exit(&ptgt->tgt_mutex); 2661 ret = EBUSY; 2662 } 2663 } 2664 } 2665 2666 /* 2667 * Wait for completion only if fc_ulp_transport was called and it 2668 * returned a success. This is the only time callback will happen. 2669 * Otherwise, there is no point in waiting 2670 */ 2671 if ((ret == 0) && (xport_retval == FC_SUCCESS)) { 2672 ret = fcp_ipkt_sema_wait(icmd); 2673 } 2674 2675 /* 2676 * Copy data to IOCTL data structures 2677 */ 2678 rsp = NULL; 2679 if ((ret == 0) && (xport_retval == FC_SUCCESS)) { 2680 rsp = (struct fcp_rsp *)fpkt->pkt_resp; 2681 2682 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) { 2683 fcp_log(CE_WARN, pptr->port_dip, 2684 "!SCSI command to d_id=0x%x lun=0x%x" 2685 " failed, Bad FCP response values:" 2686 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x," 2687 " sts-rsvd2=%x, rsplen=%x, senselen=%x", 2688 ptgt->tgt_d_id, plun->lun_num, 2689 rsp->reserved_0, rsp->reserved_1, 2690 rsp->fcp_u.fcp_status.reserved_0, 2691 rsp->fcp_u.fcp_status.reserved_1, 2692 rsp->fcp_response_len, rsp->fcp_sense_len); 2693 2694 ret = EIO; 2695 } 2696 } 2697 2698 if ((ret == 0) && (rsp != NULL)) { 2699 /* 2700 * Calc response lengths 2701 */ 2702 sense_len = 0; 2703 info_len = 0; 2704 2705 if (rsp->fcp_u.fcp_status.rsp_len_set) { 2706 info_len = rsp->fcp_response_len; 2707 } 2708 2709 rsp_info = (struct fcp_rsp_info *) 2710 ((uint8_t *)rsp + sizeof (struct fcp_rsp)); 2711 2712 /* 2713 * Get SCSI status 2714 */ 2715 fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status; 2716 /* 2717 * If a lun was just added or removed and the next command 2718 * comes through this interface, we need to capture the check 2719 * condition so we can discover the new topology. 2720 */ 2721 if (fscsi->scsi_bufstatus != STATUS_GOOD && 2722 rsp->fcp_u.fcp_status.sense_len_set) { 2723 sense_len = rsp->fcp_sense_len; 2724 rsp_sense = (caddr_t)((uint8_t *)rsp_info + info_len); 2725 sense_to = (struct scsi_extended_sense *)rsp_sense; 2726 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) || 2727 (FCP_SENSE_NO_LUN(sense_to))) { 2728 reconfig_lun = TRUE; 2729 } 2730 } 2731 2732 if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) && 2733 (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) { 2734 if (reconfig_lun == FALSE) { 2735 reconfig_status = 2736 fcp_is_reconfig_needed(ptgt, fpkt); 2737 } 2738 2739 if ((reconfig_lun == TRUE) || 2740 (reconfig_status == TRUE)) { 2741 mutex_enter(&ptgt->tgt_mutex); 2742 if (ptgt->tgt_tid == NULL) { 2743 /* 2744 * Either we've been notified the 2745 * REPORT_LUN data has changed, or 2746 * we've determined on our own that 2747 * we're out of date. Kick off 2748 * rediscovery. 2749 */ 2750 tid = timeout(fcp_reconfigure_luns, 2751 (caddr_t)ptgt, drv_usectohz(1)); 2752 2753 ptgt->tgt_tid = tid; 2754 ptgt->tgt_state |= FCP_TGT_BUSY; 2755 ret = EBUSY; 2756 reconfig_pending = TRUE; 2757 } 2758 mutex_exit(&ptgt->tgt_mutex); 2759 } 2760 } 2761 2762 /* 2763 * Calc residuals and buffer lengths 2764 */ 2765 2766 if (ret == 0) { 2767 buf_len = fscsi->scsi_buflen; 2768 fscsi->scsi_bufresid = 0; 2769 if (rsp->fcp_u.fcp_status.resid_under) { 2770 if (rsp->fcp_resid <= fscsi->scsi_buflen) { 2771 fscsi->scsi_bufresid = rsp->fcp_resid; 2772 } else { 2773 cmn_err(CE_WARN, "fcp: bad residue %x " 2774 "for txfer len %x", rsp->fcp_resid, 2775 fscsi->scsi_buflen); 2776 fscsi->scsi_bufresid = 2777 fscsi->scsi_buflen; 2778 } 2779 buf_len -= fscsi->scsi_bufresid; 2780 } 2781 if (rsp->fcp_u.fcp_status.resid_over) { 2782 fscsi->scsi_bufresid = -rsp->fcp_resid; 2783 } 2784 2785 fscsi->scsi_rqresid = fscsi->scsi_rqlen - sense_len; 2786 if (fscsi->scsi_rqlen < sense_len) { 2787 sense_len = fscsi->scsi_rqlen; 2788 } 2789 2790 fscsi->scsi_fc_rspcode = 0; 2791 if (rsp->fcp_u.fcp_status.rsp_len_set) { 2792 fscsi->scsi_fc_rspcode = rsp_info->rsp_code; 2793 } 2794 fscsi->scsi_pkt_state = fpkt->pkt_state; 2795 fscsi->scsi_pkt_action = fpkt->pkt_action; 2796 fscsi->scsi_pkt_reason = fpkt->pkt_reason; 2797 2798 /* 2799 * Copy data and request sense 2800 * 2801 * Data must be copied by using the FCP_CP_IN macro. 2802 * This will ensure the proper byte order since the data 2803 * is being copied directly from the memory mapped 2804 * device register. 2805 * 2806 * The response (and request sense) will be in the 2807 * correct byte order. No special copy is necessary. 2808 */ 2809 2810 if (buf_len) { 2811 FCP_CP_IN(fpkt->pkt_data, 2812 fscsi->scsi_bufaddr, 2813 fpkt->pkt_data_acc, 2814 buf_len); 2815 } 2816 bcopy((void *)rsp_sense, 2817 (void *)fscsi->scsi_rqbufaddr, 2818 sense_len); 2819 } 2820 } 2821 2822 /* 2823 * Cleanup transport data structures if icmd was alloc-ed 2824 * So, cleanup happens in the same thread that icmd was alloc-ed 2825 */ 2826 if (icmd != NULL) { 2827 fcp_ipkt_sema_cleanup(icmd); 2828 } 2829 2830 /* restore pm busy/idle status */ 2831 if (port_busy) { 2832 fc_ulp_idle_port(pptr->port_fp_handle); 2833 } 2834 2835 /* 2836 * Cleanup target. if a reconfig is pending, don't clear the BUSY 2837 * flag, it'll be cleared when the reconfig is complete. 2838 */ 2839 if ((ptgt != NULL) && !reconfig_pending) { 2840 /* 2841 * If target was created, 2842 */ 2843 if (target_created) { 2844 mutex_enter(&ptgt->tgt_mutex); 2845 ptgt->tgt_state &= ~FCP_TGT_BUSY; 2846 mutex_exit(&ptgt->tgt_mutex); 2847 } else { 2848 /* 2849 * De-mark target as busy 2850 */ 2851 mutex_enter(&ptgt->tgt_mutex); 2852 ptgt->tgt_state &= ~FCP_TGT_BUSY; 2853 mutex_exit(&ptgt->tgt_mutex); 2854 } 2855 } 2856 return (ret); 2857 } 2858 2859 2860 static int 2861 fcp_is_reconfig_needed(struct fcp_tgt *ptgt, 2862 fc_packet_t *fpkt) 2863 { 2864 uchar_t *lun_string; 2865 uint16_t lun_num, i; 2866 int num_luns; 2867 int actual_luns; 2868 int num_masked_luns; 2869 int lun_buflen; 2870 struct fcp_lun *plun = NULL; 2871 struct fcp_reportlun_resp *report_lun; 2872 uint8_t reconfig_needed = FALSE; 2873 uint8_t lun_exists = FALSE; 2874 fcp_port_t *pptr = ptgt->tgt_port; 2875 2876 report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP); 2877 2878 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc, 2879 fpkt->pkt_datalen); 2880 2881 /* get number of luns (which is supplied as LUNS * 8) */ 2882 num_luns = BE_32(report_lun->num_lun) >> 3; 2883 2884 /* 2885 * Figure out exactly how many lun strings our response buffer 2886 * can hold. 2887 */ 2888 lun_buflen = (fpkt->pkt_datalen - 2889 2 * sizeof (uint32_t)) / sizeof (longlong_t); 2890 2891 /* 2892 * Is our response buffer full or not? We don't want to 2893 * potentially walk beyond the number of luns we have. 2894 */ 2895 if (num_luns <= lun_buflen) { 2896 actual_luns = num_luns; 2897 } else { 2898 actual_luns = lun_buflen; 2899 } 2900 2901 mutex_enter(&ptgt->tgt_mutex); 2902 2903 /* Scan each lun to see if we have masked it. */ 2904 num_masked_luns = 0; 2905 if (fcp_lun_blacklist != NULL) { 2906 for (i = 0; i < actual_luns; i++) { 2907 lun_string = (uchar_t *)&(report_lun->lun_string[i]); 2908 switch (lun_string[0] & 0xC0) { 2909 case FCP_LUN_ADDRESSING: 2910 case FCP_PD_ADDRESSING: 2911 case FCP_VOLUME_ADDRESSING: 2912 lun_num = ((lun_string[0] & 0x3F) << 8) 2913 | lun_string[1]; 2914 if (fcp_should_mask(&ptgt->tgt_port_wwn, 2915 lun_num) == TRUE) { 2916 num_masked_luns++; 2917 } 2918 break; 2919 default: 2920 break; 2921 } 2922 } 2923 } 2924 2925 /* 2926 * The quick and easy check. If the number of LUNs reported 2927 * doesn't match the number we currently know about, we need 2928 * to reconfigure. 2929 */ 2930 if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) { 2931 mutex_exit(&ptgt->tgt_mutex); 2932 kmem_free(report_lun, fpkt->pkt_datalen); 2933 return (TRUE); 2934 } 2935 2936 /* 2937 * If the quick and easy check doesn't turn up anything, we walk 2938 * the list of luns from the REPORT_LUN response and look for 2939 * any luns we don't know about. If we find one, we know we need 2940 * to reconfigure. We will skip LUNs that are masked because of the 2941 * blacklist. 2942 */ 2943 for (i = 0; i < actual_luns; i++) { 2944 lun_string = (uchar_t *)&(report_lun->lun_string[i]); 2945 lun_exists = FALSE; 2946 switch (lun_string[0] & 0xC0) { 2947 case FCP_LUN_ADDRESSING: 2948 case FCP_PD_ADDRESSING: 2949 case FCP_VOLUME_ADDRESSING: 2950 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1]; 2951 2952 if ((fcp_lun_blacklist != NULL) && (fcp_should_mask( 2953 &ptgt->tgt_port_wwn, lun_num) == TRUE)) { 2954 lun_exists = TRUE; 2955 break; 2956 } 2957 2958 for (plun = ptgt->tgt_lun; plun; 2959 plun = plun->lun_next) { 2960 if (plun->lun_num == lun_num) { 2961 lun_exists = TRUE; 2962 break; 2963 } 2964 } 2965 break; 2966 default: 2967 break; 2968 } 2969 2970 if (lun_exists == FALSE) { 2971 reconfig_needed = TRUE; 2972 break; 2973 } 2974 } 2975 2976 mutex_exit(&ptgt->tgt_mutex); 2977 kmem_free(report_lun, fpkt->pkt_datalen); 2978 2979 return (reconfig_needed); 2980 } 2981 2982 /* 2983 * This function is called by fcp_handle_page83 and uses inquiry response data 2984 * stored in plun->lun_inq to determine whether or not a device is a member of 2985 * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table, 2986 * otherwise 1. 2987 */ 2988 static int 2989 fcp_symmetric_device_probe(struct fcp_lun *plun) 2990 { 2991 struct scsi_inquiry *stdinq = &plun->lun_inq; 2992 char *devidptr; 2993 int i, len; 2994 2995 for (i = 0; i < fcp_symmetric_disk_table_size; i++) { 2996 devidptr = fcp_symmetric_disk_table[i]; 2997 len = (int)strlen(devidptr); 2998 2999 if (bcmp(stdinq->inq_vid, devidptr, len) == 0) { 3000 return (0); 3001 } 3002 } 3003 return (1); 3004 } 3005 3006 3007 /* 3008 * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl 3009 * It basically returns the current count of # of state change callbacks 3010 * i.e the value of tgt_change_cnt. 3011 * 3012 * INPUT: 3013 * fcp_ioctl.fp_minor -> The minor # of the fp port 3014 * fcp_ioctl.listlen -> 1 3015 * fcp_ioctl.list -> Pointer to a 32 bit integer 3016 */ 3017 /*ARGSUSED2*/ 3018 static int 3019 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval) 3020 { 3021 int ret; 3022 uint32_t link_cnt; 3023 struct fcp_ioctl fioctl; 3024 struct fcp_port *pptr = NULL; 3025 3026 if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl, 3027 &pptr)) != 0) { 3028 return (ret); 3029 } 3030 3031 ASSERT(pptr != NULL); 3032 3033 if (fioctl.listlen != 1) { 3034 return (EINVAL); 3035 } 3036 3037 mutex_enter(&pptr->port_mutex); 3038 if (pptr->port_state & FCP_STATE_OFFLINE) { 3039 mutex_exit(&pptr->port_mutex); 3040 return (ENXIO); 3041 } 3042 3043 /* 3044 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded): 3045 * When the fcp initially attaches to the port and there are nothing 3046 * hanging out of the port or if there was a repeat offline state change 3047 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case). 3048 * In the latter case, port_tmp_cnt will be non-zero and that is how we 3049 * will differentiate the 2 cases. 3050 */ 3051 if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) { 3052 mutex_exit(&pptr->port_mutex); 3053 return (ENXIO); 3054 } 3055 3056 link_cnt = pptr->port_link_cnt; 3057 mutex_exit(&pptr->port_mutex); 3058 3059 if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) { 3060 return (EFAULT); 3061 } 3062 3063 #ifdef _MULTI_DATAMODEL 3064 switch (ddi_model_convert_from(mode & FMODELS)) { 3065 case DDI_MODEL_ILP32: { 3066 struct fcp32_ioctl f32_ioctl; 3067 3068 f32_ioctl.fp_minor = fioctl.fp_minor; 3069 f32_ioctl.listlen = fioctl.listlen; 3070 f32_ioctl.list = (caddr32_t)(long)fioctl.list; 3071 if (ddi_copyout((void *)&f32_ioctl, (void *)data, 3072 sizeof (struct fcp32_ioctl), mode)) { 3073 return (EFAULT); 3074 } 3075 break; 3076 } 3077 case DDI_MODEL_NONE: 3078 if (ddi_copyout((void *)&fioctl, (void *)data, 3079 sizeof (struct fcp_ioctl), mode)) { 3080 return (EFAULT); 3081 } 3082 break; 3083 } 3084 #else /* _MULTI_DATAMODEL */ 3085 3086 if (ddi_copyout((void *)&fioctl, (void *)data, 3087 sizeof (struct fcp_ioctl), mode)) { 3088 return (EFAULT); 3089 } 3090 #endif /* _MULTI_DATAMODEL */ 3091 3092 return (0); 3093 } 3094 3095 /* 3096 * This function copies the fcp_ioctl structure passed in from user land 3097 * into kernel land. Handles 32 bit applications. 3098 */ 3099 /*ARGSUSED*/ 3100 static int 3101 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval, 3102 struct fcp_ioctl *fioctl, struct fcp_port **pptr) 3103 { 3104 struct fcp_port *t_pptr; 3105 3106 #ifdef _MULTI_DATAMODEL 3107 switch (ddi_model_convert_from(mode & FMODELS)) { 3108 case DDI_MODEL_ILP32: { 3109 struct fcp32_ioctl f32_ioctl; 3110 3111 if (ddi_copyin((void *)data, (void *)&f32_ioctl, 3112 sizeof (struct fcp32_ioctl), mode)) { 3113 return (EFAULT); 3114 } 3115 fioctl->fp_minor = f32_ioctl.fp_minor; 3116 fioctl->listlen = f32_ioctl.listlen; 3117 fioctl->list = (caddr_t)(long)f32_ioctl.list; 3118 break; 3119 } 3120 case DDI_MODEL_NONE: 3121 if (ddi_copyin((void *)data, (void *)fioctl, 3122 sizeof (struct fcp_ioctl), mode)) { 3123 return (EFAULT); 3124 } 3125 break; 3126 } 3127 3128 #else /* _MULTI_DATAMODEL */ 3129 if (ddi_copyin((void *)data, (void *)fioctl, 3130 sizeof (struct fcp_ioctl), mode)) { 3131 return (EFAULT); 3132 } 3133 #endif /* _MULTI_DATAMODEL */ 3134 3135 /* 3136 * Right now we can assume that the minor number matches with 3137 * this instance of fp. If this changes we will need to 3138 * revisit this logic. 3139 */ 3140 mutex_enter(&fcp_global_mutex); 3141 t_pptr = fcp_port_head; 3142 while (t_pptr) { 3143 if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) { 3144 break; 3145 } else { 3146 t_pptr = t_pptr->port_next; 3147 } 3148 } 3149 *pptr = t_pptr; 3150 mutex_exit(&fcp_global_mutex); 3151 if (t_pptr == NULL) { 3152 return (ENXIO); 3153 } 3154 3155 return (0); 3156 } 3157 3158 /* 3159 * Function: fcp_port_create_tgt 3160 * 3161 * Description: As the name suggest this function creates the target context 3162 * specified by the the WWN provided by the caller. If the 3163 * creation goes well and the target is known by fp/fctl a PLOGI 3164 * followed by a PRLI are issued. 3165 * 3166 * Argument: pptr fcp port structure 3167 * pwwn WWN of the target 3168 * ret_val Address of the return code. It could be: 3169 * EIO, ENOMEM or 0. 3170 * fc_status PLOGI or PRLI status completion 3171 * fc_pkt_state PLOGI or PRLI state completion 3172 * fc_pkt_reason PLOGI or PRLI reason completion 3173 * fc_pkt_action PLOGI or PRLI action completion 3174 * 3175 * Return Value: NULL if it failed 3176 * Target structure address if it succeeds 3177 */ 3178 static struct fcp_tgt * 3179 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val, 3180 int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action) 3181 { 3182 struct fcp_tgt *ptgt = NULL; 3183 fc_portmap_t devlist; 3184 int lcount; 3185 int error; 3186 3187 *ret_val = 0; 3188 3189 /* 3190 * Check FC port device & get port map 3191 */ 3192 if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn, 3193 &error, 1) == NULL) { 3194 *ret_val = EIO; 3195 } else { 3196 if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn, 3197 &devlist) != FC_SUCCESS) { 3198 *ret_val = EIO; 3199 } 3200 } 3201 3202 /* Set port map flags */ 3203 devlist.map_type = PORT_DEVICE_USER_CREATE; 3204 3205 /* Allocate target */ 3206 if (*ret_val == 0) { 3207 lcount = pptr->port_link_cnt; 3208 ptgt = fcp_alloc_tgt(pptr, &devlist, lcount); 3209 if (ptgt == NULL) { 3210 fcp_log(CE_WARN, pptr->port_dip, 3211 "!FC target allocation failed"); 3212 *ret_val = ENOMEM; 3213 } else { 3214 /* Setup target */ 3215 mutex_enter(&ptgt->tgt_mutex); 3216 3217 ptgt->tgt_statec_cause = FCP_CAUSE_TGT_CHANGE; 3218 ptgt->tgt_tmp_cnt = 1; 3219 ptgt->tgt_d_id = devlist.map_did.port_id; 3220 ptgt->tgt_hard_addr = 3221 devlist.map_hard_addr.hard_addr; 3222 ptgt->tgt_pd_handle = devlist.map_pd; 3223 ptgt->tgt_fca_dev = NULL; 3224 3225 bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0], 3226 FC_WWN_SIZE); 3227 bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0], 3228 FC_WWN_SIZE); 3229 3230 mutex_exit(&ptgt->tgt_mutex); 3231 } 3232 } 3233 3234 /* Release global mutex for PLOGI and PRLI */ 3235 mutex_exit(&fcp_global_mutex); 3236 3237 /* Send PLOGI (If necessary) */ 3238 if (*ret_val == 0) { 3239 *ret_val = fcp_tgt_send_plogi(ptgt, fc_status, 3240 fc_pkt_state, fc_pkt_reason, fc_pkt_action); 3241 } 3242 3243 /* Send PRLI (If necessary) */ 3244 if (*ret_val == 0) { 3245 *ret_val = fcp_tgt_send_prli(ptgt, fc_status, 3246 fc_pkt_state, fc_pkt_reason, fc_pkt_action); 3247 } 3248 3249 mutex_enter(&fcp_global_mutex); 3250 3251 return (ptgt); 3252 } 3253 3254 /* 3255 * Function: fcp_tgt_send_plogi 3256 * 3257 * Description: This function sends a PLOGI to the target specified by the 3258 * caller and waits till it completes. 3259 * 3260 * Argument: ptgt Target to send the plogi to. 3261 * fc_status Status returned by fp/fctl in the PLOGI request. 3262 * fc_pkt_state State returned by fp/fctl in the PLOGI request. 3263 * fc_pkt_reason Reason returned by fp/fctl in the PLOGI request. 3264 * fc_pkt_action Action returned by fp/fctl in the PLOGI request. 3265 * 3266 * Return Value: 0 3267 * ENOMEM 3268 * EIO 3269 * 3270 * Context: User context. 3271 */ 3272 static int 3273 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state, 3274 int *fc_pkt_reason, int *fc_pkt_action) 3275 { 3276 struct fcp_port *pptr; 3277 struct fcp_ipkt *icmd; 3278 struct fc_packet *fpkt; 3279 fc_frame_hdr_t *hp; 3280 struct la_els_logi logi; 3281 int tcount; 3282 int lcount; 3283 int ret, login_retval = ~FC_SUCCESS; 3284 3285 ret = 0; 3286 3287 pptr = ptgt->tgt_port; 3288 3289 lcount = pptr->port_link_cnt; 3290 tcount = ptgt->tgt_change_cnt; 3291 3292 /* Alloc internal packet */ 3293 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t), 3294 sizeof (la_els_logi_t), 0, 3295 pptr->port_state & FCP_STATE_FCA_IS_NODMA, 3296 lcount, tcount, 0, FC_INVALID_RSCN_COUNT); 3297 3298 if (icmd == NULL) { 3299 ret = ENOMEM; 3300 } else { 3301 /* 3302 * Setup internal packet as sema sync 3303 */ 3304 fcp_ipkt_sema_init(icmd); 3305 3306 /* 3307 * Setup internal packet (icmd) 3308 */ 3309 icmd->ipkt_lun = NULL; 3310 icmd->ipkt_restart = 0; 3311 icmd->ipkt_retries = 0; 3312 icmd->ipkt_opcode = LA_ELS_PLOGI; 3313 3314 /* 3315 * Setup fc_packet 3316 */ 3317 fpkt = icmd->ipkt_fpkt; 3318 3319 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR; 3320 fpkt->pkt_tran_type = FC_PKT_EXCHANGE; 3321 fpkt->pkt_timeout = FCP_ELS_TIMEOUT; 3322 3323 /* 3324 * Setup FC frame header 3325 */ 3326 hp = &fpkt->pkt_cmd_fhdr; 3327 3328 hp->s_id = pptr->port_id; /* source ID */ 3329 hp->d_id = ptgt->tgt_d_id; /* dest ID */ 3330 hp->r_ctl = R_CTL_ELS_REQ; 3331 hp->type = FC_TYPE_EXTENDED_LS; 3332 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 3333 hp->seq_id = 0; 3334 hp->rsvd = 0; 3335 hp->df_ctl = 0; 3336 hp->seq_cnt = 0; 3337 hp->ox_id = 0xffff; /* i.e. none */ 3338 hp->rx_id = 0xffff; /* i.e. none */ 3339 hp->ro = 0; 3340 3341 /* 3342 * Setup PLOGI 3343 */ 3344 bzero(&logi, sizeof (struct la_els_logi)); 3345 logi.ls_code.ls_code = LA_ELS_PLOGI; 3346 3347 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd, 3348 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi)); 3349 3350 /* 3351 * Send PLOGI 3352 */ 3353 *fc_status = login_retval = 3354 fc_ulp_login(pptr->port_fp_handle, &fpkt, 1); 3355 if (*fc_status != FC_SUCCESS) { 3356 ret = EIO; 3357 } 3358 } 3359 3360 /* 3361 * Wait for completion 3362 */ 3363 if ((ret == 0) && (login_retval == FC_SUCCESS)) { 3364 ret = fcp_ipkt_sema_wait(icmd); 3365 3366 *fc_pkt_state = fpkt->pkt_state; 3367 *fc_pkt_reason = fpkt->pkt_reason; 3368 *fc_pkt_action = fpkt->pkt_action; 3369 } 3370 3371 /* 3372 * Cleanup transport data structures if icmd was alloc-ed AND if there 3373 * is going to be no callback (i.e if fc_ulp_login() failed). 3374 * Otherwise, cleanup happens in callback routine. 3375 */ 3376 if (icmd != NULL) { 3377 fcp_ipkt_sema_cleanup(icmd); 3378 } 3379 3380 return (ret); 3381 } 3382 3383 /* 3384 * Function: fcp_tgt_send_prli 3385 * 3386 * Description: Does nothing as of today. 3387 * 3388 * Argument: ptgt Target to send the prli to. 3389 * fc_status Status returned by fp/fctl in the PRLI request. 3390 * fc_pkt_state State returned by fp/fctl in the PRLI request. 3391 * fc_pkt_reason Reason returned by fp/fctl in the PRLI request. 3392 * fc_pkt_action Action returned by fp/fctl in the PRLI request. 3393 * 3394 * Return Value: 0 3395 */ 3396 /*ARGSUSED*/ 3397 static int 3398 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state, 3399 int *fc_pkt_reason, int *fc_pkt_action) 3400 { 3401 return (0); 3402 } 3403 3404 /* 3405 * Function: fcp_ipkt_sema_init 3406 * 3407 * Description: Initializes the semaphore contained in the internal packet. 3408 * 3409 * Argument: icmd Internal packet the semaphore of which must be 3410 * initialized. 3411 * 3412 * Return Value: None 3413 * 3414 * Context: User context only. 3415 */ 3416 static void 3417 fcp_ipkt_sema_init(struct fcp_ipkt *icmd) 3418 { 3419 struct fc_packet *fpkt; 3420 3421 fpkt = icmd->ipkt_fpkt; 3422 3423 /* Create semaphore for sync */ 3424 sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL); 3425 3426 /* Setup the completion callback */ 3427 fpkt->pkt_comp = fcp_ipkt_sema_callback; 3428 } 3429 3430 /* 3431 * Function: fcp_ipkt_sema_wait 3432 * 3433 * Description: Wait on the semaphore embedded in the internal packet. The 3434 * semaphore is released in the callback. 3435 * 3436 * Argument: icmd Internal packet to wait on for completion. 3437 * 3438 * Return Value: 0 3439 * EIO 3440 * EBUSY 3441 * EAGAIN 3442 * 3443 * Context: User context only. 3444 * 3445 * This function does a conversion between the field pkt_state of the fc_packet 3446 * embedded in the internal packet (icmd) and the code it returns. 3447 */ 3448 static int 3449 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd) 3450 { 3451 struct fc_packet *fpkt; 3452 int ret; 3453 3454 ret = EIO; 3455 fpkt = icmd->ipkt_fpkt; 3456 3457 /* 3458 * Wait on semaphore 3459 */ 3460 sema_p(&(icmd->ipkt_sema)); 3461 3462 /* 3463 * Check the status of the FC packet 3464 */ 3465 switch (fpkt->pkt_state) { 3466 case FC_PKT_SUCCESS: 3467 ret = 0; 3468 break; 3469 case FC_PKT_LOCAL_RJT: 3470 switch (fpkt->pkt_reason) { 3471 case FC_REASON_SEQ_TIMEOUT: 3472 case FC_REASON_RX_BUF_TIMEOUT: 3473 ret = EAGAIN; 3474 break; 3475 case FC_REASON_PKT_BUSY: 3476 ret = EBUSY; 3477 break; 3478 } 3479 break; 3480 case FC_PKT_TIMEOUT: 3481 ret = EAGAIN; 3482 break; 3483 case FC_PKT_LOCAL_BSY: 3484 case FC_PKT_TRAN_BSY: 3485 case FC_PKT_NPORT_BSY: 3486 case FC_PKT_FABRIC_BSY: 3487 ret = EBUSY; 3488 break; 3489 case FC_PKT_LS_RJT: 3490 case FC_PKT_BA_RJT: 3491 switch (fpkt->pkt_reason) { 3492 case FC_REASON_LOGICAL_BSY: 3493 ret = EBUSY; 3494 break; 3495 } 3496 break; 3497 case FC_PKT_FS_RJT: 3498 switch (fpkt->pkt_reason) { 3499 case FC_REASON_FS_LOGICAL_BUSY: 3500 ret = EBUSY; 3501 break; 3502 } 3503 break; 3504 } 3505 3506 return (ret); 3507 } 3508 3509 /* 3510 * Function: fcp_ipkt_sema_callback 3511 * 3512 * Description: Registered as the completion callback function for the FC 3513 * transport when the ipkt semaphore is used for sync. This will 3514 * cleanup the used data structures, if necessary and wake up 3515 * the user thread to complete the transaction. 3516 * 3517 * Argument: fpkt FC packet (points to the icmd) 3518 * 3519 * Return Value: None 3520 * 3521 * Context: User context only 3522 */ 3523 static void 3524 fcp_ipkt_sema_callback(struct fc_packet *fpkt) 3525 { 3526 struct fcp_ipkt *icmd; 3527 3528 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private; 3529 3530 /* 3531 * Wake up user thread 3532 */ 3533 sema_v(&(icmd->ipkt_sema)); 3534 } 3535 3536 /* 3537 * Function: fcp_ipkt_sema_cleanup 3538 * 3539 * Description: Called to cleanup (if necessary) the data structures used 3540 * when ipkt sema is used for sync. This function will detect 3541 * whether the caller is the last thread (via counter) and 3542 * cleanup only if necessary. 3543 * 3544 * Argument: icmd Internal command packet 3545 * 3546 * Return Value: None 3547 * 3548 * Context: User context only 3549 */ 3550 static void 3551 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd) 3552 { 3553 struct fcp_tgt *ptgt; 3554 struct fcp_port *pptr; 3555 3556 ptgt = icmd->ipkt_tgt; 3557 pptr = icmd->ipkt_port; 3558 3559 /* 3560 * Acquire data structure 3561 */ 3562 mutex_enter(&ptgt->tgt_mutex); 3563 3564 /* 3565 * Destroy semaphore 3566 */ 3567 sema_destroy(&(icmd->ipkt_sema)); 3568 3569 /* 3570 * Cleanup internal packet 3571 */ 3572 mutex_exit(&ptgt->tgt_mutex); 3573 fcp_icmd_free(pptr, icmd); 3574 } 3575 3576 /* 3577 * Function: fcp_port_attach 3578 * 3579 * Description: Called by the transport framework to resume, suspend or 3580 * attach a new port. 3581 * 3582 * Argument: ulph Port handle 3583 * *pinfo Port information 3584 * cmd Command 3585 * s_id Port ID 3586 * 3587 * Return Value: FC_FAILURE or FC_SUCCESS 3588 */ 3589 /*ARGSUSED*/ 3590 static int 3591 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo, 3592 fc_attach_cmd_t cmd, uint32_t s_id) 3593 { 3594 int instance; 3595 int res = FC_FAILURE; /* default result */ 3596 3597 ASSERT(pinfo != NULL); 3598 3599 instance = ddi_get_instance(pinfo->port_dip); 3600 3601 switch (cmd) { 3602 case FC_CMD_ATTACH: 3603 /* 3604 * this port instance attaching for the first time (or after 3605 * being detached before) 3606 */ 3607 if (fcp_handle_port_attach(ulph, pinfo, s_id, 3608 instance) == DDI_SUCCESS) { 3609 res = FC_SUCCESS; 3610 } else { 3611 ASSERT(ddi_get_soft_state(fcp_softstate, 3612 instance) == NULL); 3613 } 3614 break; 3615 3616 case FC_CMD_RESUME: 3617 case FC_CMD_POWER_UP: 3618 /* 3619 * this port instance was attached and the suspended and 3620 * will now be resumed 3621 */ 3622 if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd, 3623 instance) == DDI_SUCCESS) { 3624 res = FC_SUCCESS; 3625 } 3626 break; 3627 3628 default: 3629 /* shouldn't happen */ 3630 FCP_TRACE(fcp_logq, "fcp", 3631 fcp_trace, FCP_BUF_LEVEL_2, 0, 3632 "port_attach: unknown cmdcommand: %d", cmd); 3633 break; 3634 } 3635 3636 /* return result */ 3637 FCP_DTRACE(fcp_logq, "fcp", fcp_trace, 3638 FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res); 3639 3640 return (res); 3641 } 3642 3643 3644 /* 3645 * detach or suspend this port instance 3646 * 3647 * acquires and releases the global mutex 3648 * 3649 * acquires and releases the mutex for this port 3650 * 3651 * acquires and releases the hotplug mutex for this port 3652 */ 3653 /*ARGSUSED*/ 3654 static int 3655 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info, 3656 fc_detach_cmd_t cmd) 3657 { 3658 int flag; 3659 int instance; 3660 struct fcp_port *pptr; 3661 3662 instance = ddi_get_instance(info->port_dip); 3663 pptr = ddi_get_soft_state(fcp_softstate, instance); 3664 3665 switch (cmd) { 3666 case FC_CMD_SUSPEND: 3667 FCP_DTRACE(fcp_logq, "fcp", 3668 fcp_trace, FCP_BUF_LEVEL_8, 0, 3669 "port suspend called for port %d", instance); 3670 flag = FCP_STATE_SUSPENDED; 3671 break; 3672 3673 case FC_CMD_POWER_DOWN: 3674 FCP_DTRACE(fcp_logq, "fcp", 3675 fcp_trace, FCP_BUF_LEVEL_8, 0, 3676 "port power down called for port %d", instance); 3677 flag = FCP_STATE_POWER_DOWN; 3678 break; 3679 3680 case FC_CMD_DETACH: 3681 FCP_DTRACE(fcp_logq, "fcp", 3682 fcp_trace, FCP_BUF_LEVEL_8, 0, 3683 "port detach called for port %d", instance); 3684 flag = FCP_STATE_DETACHING; 3685 break; 3686 3687 default: 3688 /* shouldn't happen */ 3689 return (FC_FAILURE); 3690 } 3691 FCP_DTRACE(fcp_logq, "fcp", fcp_trace, 3692 FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning"); 3693 3694 return (fcp_handle_port_detach(pptr, flag, instance)); 3695 } 3696 3697 3698 /* 3699 * called for ioctls on the transport's devctl interface, and the transport 3700 * has passed it to us 3701 * 3702 * this will only be called for device control ioctls (i.e. hotplugging stuff) 3703 * 3704 * return FC_SUCCESS if we decide to claim the ioctl, 3705 * else return FC_UNCLAIMED 3706 * 3707 * *rval is set iff we decide to claim the ioctl 3708 */ 3709 /*ARGSUSED*/ 3710 static int 3711 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd, 3712 intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed) 3713 { 3714 int retval = FC_UNCLAIMED; /* return value */ 3715 struct fcp_port *pptr = NULL; /* our soft state */ 3716 struct devctl_iocdata *dcp = NULL; /* for devctl */ 3717 dev_info_t *cdip; 3718 mdi_pathinfo_t *pip = NULL; 3719 char *ndi_nm; /* NDI name */ 3720 char *ndi_addr; /* NDI addr */ 3721 int is_mpxio, circ; 3722 int devi_entered = 0; 3723 clock_t end_time; 3724 3725 ASSERT(rval != NULL); 3726 3727 FCP_DTRACE(fcp_logq, "fcp", 3728 fcp_trace, FCP_BUF_LEVEL_8, 0, 3729 "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed); 3730 3731 /* if already claimed then forget it */ 3732 if (claimed) { 3733 /* 3734 * for now, if this ioctl has already been claimed, then 3735 * we just ignore it 3736 */ 3737 return (retval); 3738 } 3739 3740 /* get our port info */ 3741 if ((pptr = fcp_get_port(port_handle)) == NULL) { 3742 fcp_log(CE_WARN, NULL, 3743 "!fcp:Invalid port handle handle in ioctl"); 3744 *rval = ENXIO; 3745 return (retval); 3746 } 3747 is_mpxio = pptr->port_mpxio; 3748 3749 switch (cmd) { 3750 case DEVCTL_BUS_GETSTATE: 3751 case DEVCTL_BUS_QUIESCE: 3752 case DEVCTL_BUS_UNQUIESCE: 3753 case DEVCTL_BUS_RESET: 3754 case DEVCTL_BUS_RESETALL: 3755 3756 case DEVCTL_BUS_DEV_CREATE: 3757 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) { 3758 return (retval); 3759 } 3760 break; 3761 3762 case DEVCTL_DEVICE_GETSTATE: 3763 case DEVCTL_DEVICE_OFFLINE: 3764 case DEVCTL_DEVICE_ONLINE: 3765 case DEVCTL_DEVICE_REMOVE: 3766 case DEVCTL_DEVICE_RESET: 3767 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) { 3768 return (retval); 3769 } 3770 3771 ASSERT(dcp != NULL); 3772 3773 /* ensure we have a name and address */ 3774 if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) || 3775 ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) { 3776 FCP_TRACE(fcp_logq, pptr->port_instbuf, 3777 fcp_trace, FCP_BUF_LEVEL_2, 0, 3778 "ioctl: can't get name (%s) or addr (%s)", 3779 ndi_nm ? ndi_nm : "<null ptr>", 3780 ndi_addr ? ndi_addr : "<null ptr>"); 3781 ndi_dc_freehdl(dcp); 3782 return (retval); 3783 } 3784 3785 3786 /* get our child's DIP */ 3787 ASSERT(pptr != NULL); 3788 if (is_mpxio) { 3789 mdi_devi_enter(pptr->port_dip, &circ); 3790 } else { 3791 ndi_devi_enter(pptr->port_dip, &circ); 3792 } 3793 devi_entered = 1; 3794 3795 if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm, 3796 ndi_addr)) == NULL) { 3797 /* Look for virtually enumerated devices. */ 3798 pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr); 3799 if (pip == NULL || 3800 ((cdip = mdi_pi_get_client(pip)) == NULL)) { 3801 *rval = ENXIO; 3802 goto out; 3803 } 3804 } 3805 break; 3806 3807 default: 3808 *rval = ENOTTY; 3809 return (retval); 3810 } 3811 3812 /* this ioctl is ours -- process it */ 3813 3814 retval = FC_SUCCESS; /* just means we claim the ioctl */ 3815 3816 /* we assume it will be a success; else we'll set error value */ 3817 *rval = 0; 3818 3819 3820 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 3821 fcp_trace, FCP_BUF_LEVEL_8, 0, 3822 "ioctl: claiming this one"); 3823 3824 /* handle ioctls now */ 3825 switch (cmd) { 3826 case DEVCTL_DEVICE_GETSTATE: 3827 ASSERT(cdip != NULL); 3828 ASSERT(dcp != NULL); 3829 if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) { 3830 *rval = EFAULT; 3831 } 3832 break; 3833 3834 case DEVCTL_DEVICE_REMOVE: 3835 case DEVCTL_DEVICE_OFFLINE: { 3836 int flag = 0; 3837 int lcount; 3838 int tcount; 3839 struct fcp_pkt *head = NULL; 3840 struct fcp_lun *plun; 3841 child_info_t *cip = CIP(cdip); 3842 int all = 1; 3843 struct fcp_lun *tplun; 3844 struct fcp_tgt *ptgt; 3845 3846 ASSERT(pptr != NULL); 3847 ASSERT(cdip != NULL); 3848 3849 mutex_enter(&pptr->port_mutex); 3850 if (pip != NULL) { 3851 cip = CIP(pip); 3852 } 3853 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) { 3854 mutex_exit(&pptr->port_mutex); 3855 *rval = ENXIO; 3856 break; 3857 } 3858 3859 head = fcp_scan_commands(plun); 3860 if (head != NULL) { 3861 fcp_abort_commands(head, LUN_PORT); 3862 } 3863 lcount = pptr->port_link_cnt; 3864 tcount = plun->lun_tgt->tgt_change_cnt; 3865 mutex_exit(&pptr->port_mutex); 3866 3867 if (cmd == DEVCTL_DEVICE_REMOVE) { 3868 flag = NDI_DEVI_REMOVE; 3869 } 3870 3871 if (is_mpxio) { 3872 mdi_devi_exit(pptr->port_dip, circ); 3873 } else { 3874 ndi_devi_exit(pptr->port_dip, circ); 3875 } 3876 devi_entered = 0; 3877 3878 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip, 3879 FCP_OFFLINE, lcount, tcount, flag); 3880 3881 if (*rval != NDI_SUCCESS) { 3882 *rval = (*rval == NDI_BUSY) ? EBUSY : EIO; 3883 break; 3884 } 3885 3886 fcp_update_offline_flags(plun); 3887 3888 ptgt = plun->lun_tgt; 3889 mutex_enter(&ptgt->tgt_mutex); 3890 for (tplun = ptgt->tgt_lun; tplun != NULL; tplun = 3891 tplun->lun_next) { 3892 mutex_enter(&tplun->lun_mutex); 3893 if (!(tplun->lun_state & FCP_LUN_OFFLINE)) { 3894 all = 0; 3895 } 3896 mutex_exit(&tplun->lun_mutex); 3897 } 3898 3899 if (all) { 3900 ptgt->tgt_node_state = FCP_TGT_NODE_NONE; 3901 /* 3902 * The user is unconfiguring/offlining the device. 3903 * If fabric and the auto configuration is set 3904 * then make sure the user is the only one who 3905 * can reconfigure the device. 3906 */ 3907 if (FC_TOP_EXTERNAL(pptr->port_topology) && 3908 fcp_enable_auto_configuration) { 3909 ptgt->tgt_manual_config_only = 1; 3910 } 3911 } 3912 mutex_exit(&ptgt->tgt_mutex); 3913 break; 3914 } 3915 3916 case DEVCTL_DEVICE_ONLINE: { 3917 int lcount; 3918 int tcount; 3919 struct fcp_lun *plun; 3920 child_info_t *cip = CIP(cdip); 3921 3922 ASSERT(cdip != NULL); 3923 ASSERT(pptr != NULL); 3924 3925 mutex_enter(&pptr->port_mutex); 3926 if (pip != NULL) { 3927 cip = CIP(pip); 3928 } 3929 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) { 3930 mutex_exit(&pptr->port_mutex); 3931 *rval = ENXIO; 3932 break; 3933 } 3934 lcount = pptr->port_link_cnt; 3935 tcount = plun->lun_tgt->tgt_change_cnt; 3936 mutex_exit(&pptr->port_mutex); 3937 3938 /* 3939 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start() 3940 * to allow the device attach to occur when the device is 3941 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command 3942 * from the scsi_probe()). 3943 */ 3944 mutex_enter(&LUN_TGT->tgt_mutex); 3945 plun->lun_state |= FCP_LUN_ONLINING; 3946 mutex_exit(&LUN_TGT->tgt_mutex); 3947 3948 if (is_mpxio) { 3949 mdi_devi_exit(pptr->port_dip, circ); 3950 } else { 3951 ndi_devi_exit(pptr->port_dip, circ); 3952 } 3953 devi_entered = 0; 3954 3955 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip, 3956 FCP_ONLINE, lcount, tcount, 0); 3957 3958 if (*rval != NDI_SUCCESS) { 3959 /* Reset the FCP_LUN_ONLINING bit */ 3960 mutex_enter(&LUN_TGT->tgt_mutex); 3961 plun->lun_state &= ~FCP_LUN_ONLINING; 3962 mutex_exit(&LUN_TGT->tgt_mutex); 3963 *rval = EIO; 3964 break; 3965 } 3966 mutex_enter(&LUN_TGT->tgt_mutex); 3967 plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY | 3968 FCP_LUN_ONLINING); 3969 mutex_exit(&LUN_TGT->tgt_mutex); 3970 break; 3971 } 3972 3973 case DEVCTL_BUS_DEV_CREATE: { 3974 uchar_t *bytes = NULL; 3975 uint_t nbytes; 3976 struct fcp_tgt *ptgt = NULL; 3977 struct fcp_lun *plun = NULL; 3978 dev_info_t *useless_dip = NULL; 3979 3980 *rval = ndi_dc_devi_create(dcp, pptr->port_dip, 3981 DEVCTL_CONSTRUCT, &useless_dip); 3982 if (*rval != 0 || useless_dip == NULL) { 3983 break; 3984 } 3985 3986 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip, 3987 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes, 3988 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) { 3989 *rval = EINVAL; 3990 (void) ndi_devi_free(useless_dip); 3991 if (bytes != NULL) { 3992 ddi_prop_free(bytes); 3993 } 3994 break; 3995 } 3996 3997 *rval = fcp_create_on_demand(pptr, bytes); 3998 if (*rval == 0) { 3999 mutex_enter(&pptr->port_mutex); 4000 ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes); 4001 if (ptgt) { 4002 /* 4003 * We now have a pointer to the target that 4004 * was created. Lets point to the first LUN on 4005 * this new target. 4006 */ 4007 mutex_enter(&ptgt->tgt_mutex); 4008 4009 plun = ptgt->tgt_lun; 4010 /* 4011 * There may be stale/offline LUN entries on 4012 * this list (this is by design) and so we have 4013 * to make sure we point to the first online 4014 * LUN 4015 */ 4016 while (plun && 4017 plun->lun_state & FCP_LUN_OFFLINE) { 4018 plun = plun->lun_next; 4019 } 4020 4021 mutex_exit(&ptgt->tgt_mutex); 4022 } 4023 mutex_exit(&pptr->port_mutex); 4024 } 4025 4026 if (*rval == 0 && ptgt && plun) { 4027 mutex_enter(&plun->lun_mutex); 4028 /* 4029 * Allow up to fcp_lun_ready_retry seconds to 4030 * configure all the luns behind the target. 4031 * 4032 * The intent here is to allow targets with long 4033 * reboot/reset-recovery times to become available 4034 * while limiting the maximum wait time for an 4035 * unresponsive target. 4036 */ 4037 end_time = ddi_get_lbolt() + 4038 SEC_TO_TICK(fcp_lun_ready_retry); 4039 4040 while (ddi_get_lbolt() < end_time) { 4041 retval = FC_SUCCESS; 4042 4043 /* 4044 * The new ndi interfaces for on-demand creation 4045 * are inflexible, Do some more work to pass on 4046 * a path name of some LUN (design is broken !) 4047 */ 4048 if (plun->lun_cip) { 4049 if (plun->lun_mpxio == 0) { 4050 cdip = DIP(plun->lun_cip); 4051 } else { 4052 cdip = mdi_pi_get_client( 4053 PIP(plun->lun_cip)); 4054 } 4055 if (cdip == NULL) { 4056 *rval = ENXIO; 4057 break; 4058 } 4059 4060 if (!i_ddi_devi_attached(cdip)) { 4061 mutex_exit(&plun->lun_mutex); 4062 delay(drv_usectohz(1000000)); 4063 mutex_enter(&plun->lun_mutex); 4064 } else { 4065 /* 4066 * This Lun is ready, lets 4067 * check the next one. 4068 */ 4069 mutex_exit(&plun->lun_mutex); 4070 plun = plun->lun_next; 4071 while (plun && (plun->lun_state 4072 & FCP_LUN_OFFLINE)) { 4073 plun = plun->lun_next; 4074 } 4075 if (!plun) { 4076 break; 4077 } 4078 mutex_enter(&plun->lun_mutex); 4079 } 4080 } else { 4081 /* 4082 * lun_cip field for a valid lun 4083 * should never be NULL. Fail the 4084 * command. 4085 */ 4086 *rval = ENXIO; 4087 break; 4088 } 4089 } 4090 if (plun) { 4091 mutex_exit(&plun->lun_mutex); 4092 } else { 4093 char devnm[MAXNAMELEN]; 4094 int nmlen; 4095 4096 nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s", 4097 ddi_node_name(cdip), 4098 ddi_get_name_addr(cdip)); 4099 4100 if (copyout(&devnm, dcp->cpyout_buf, nmlen) != 4101 0) { 4102 *rval = EFAULT; 4103 } 4104 } 4105 } else { 4106 int i; 4107 char buf[25]; 4108 4109 for (i = 0; i < FC_WWN_SIZE; i++) { 4110 (void) sprintf(&buf[i << 1], "%02x", bytes[i]); 4111 } 4112 4113 fcp_log(CE_WARN, pptr->port_dip, 4114 "!Failed to create nodes for pwwn=%s; error=%x", 4115 buf, *rval); 4116 } 4117 4118 (void) ndi_devi_free(useless_dip); 4119 ddi_prop_free(bytes); 4120 break; 4121 } 4122 4123 case DEVCTL_DEVICE_RESET: { 4124 struct fcp_lun *plun; 4125 child_info_t *cip = CIP(cdip); 4126 4127 ASSERT(cdip != NULL); 4128 ASSERT(pptr != NULL); 4129 mutex_enter(&pptr->port_mutex); 4130 if (pip != NULL) { 4131 cip = CIP(pip); 4132 } 4133 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) { 4134 mutex_exit(&pptr->port_mutex); 4135 *rval = ENXIO; 4136 break; 4137 } 4138 mutex_exit(&pptr->port_mutex); 4139 4140 mutex_enter(&plun->lun_tgt->tgt_mutex); 4141 if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) { 4142 mutex_exit(&plun->lun_tgt->tgt_mutex); 4143 4144 *rval = ENXIO; 4145 break; 4146 } 4147 4148 if (plun->lun_sd == NULL) { 4149 mutex_exit(&plun->lun_tgt->tgt_mutex); 4150 4151 *rval = ENXIO; 4152 break; 4153 } 4154 mutex_exit(&plun->lun_tgt->tgt_mutex); 4155 4156 /* 4157 * set up ap so that fcp_reset can figure out 4158 * which target to reset 4159 */ 4160 if (fcp_scsi_reset(&plun->lun_sd->sd_address, 4161 RESET_TARGET) == FALSE) { 4162 *rval = EIO; 4163 } 4164 break; 4165 } 4166 4167 case DEVCTL_BUS_GETSTATE: 4168 ASSERT(dcp != NULL); 4169 ASSERT(pptr != NULL); 4170 ASSERT(pptr->port_dip != NULL); 4171 if (ndi_dc_return_bus_state(pptr->port_dip, dcp) != 4172 NDI_SUCCESS) { 4173 *rval = EFAULT; 4174 } 4175 break; 4176 4177 case DEVCTL_BUS_QUIESCE: 4178 case DEVCTL_BUS_UNQUIESCE: 4179 *rval = ENOTSUP; 4180 break; 4181 4182 case DEVCTL_BUS_RESET: 4183 case DEVCTL_BUS_RESETALL: 4184 ASSERT(pptr != NULL); 4185 (void) fcp_linkreset(pptr, NULL, KM_SLEEP); 4186 break; 4187 4188 default: 4189 ASSERT(dcp != NULL); 4190 *rval = ENOTTY; 4191 break; 4192 } 4193 4194 /* all done -- clean up and return */ 4195 out: if (devi_entered) { 4196 if (is_mpxio) { 4197 mdi_devi_exit(pptr->port_dip, circ); 4198 } else { 4199 ndi_devi_exit(pptr->port_dip, circ); 4200 } 4201 } 4202 4203 if (dcp != NULL) { 4204 ndi_dc_freehdl(dcp); 4205 } 4206 4207 return (retval); 4208 } 4209 4210 4211 /*ARGSUSED*/ 4212 static int 4213 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf, 4214 uint32_t claimed) 4215 { 4216 uchar_t r_ctl; 4217 uchar_t ls_code; 4218 struct fcp_port *pptr; 4219 4220 if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) { 4221 return (FC_UNCLAIMED); 4222 } 4223 4224 mutex_enter(&pptr->port_mutex); 4225 if (pptr->port_state & (FCP_STATE_DETACHING | 4226 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) { 4227 mutex_exit(&pptr->port_mutex); 4228 return (FC_UNCLAIMED); 4229 } 4230 mutex_exit(&pptr->port_mutex); 4231 4232 r_ctl = buf->ub_frame.r_ctl; 4233 4234 switch (r_ctl & R_CTL_ROUTING) { 4235 case R_CTL_EXTENDED_SVC: 4236 if (r_ctl == R_CTL_ELS_REQ) { 4237 ls_code = buf->ub_buffer[0]; 4238 4239 switch (ls_code) { 4240 case LA_ELS_PRLI: 4241 /* 4242 * We really don't care if something fails. 4243 * If the PRLI was not sent out, then the 4244 * other end will time it out. 4245 */ 4246 if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) { 4247 return (FC_SUCCESS); 4248 } 4249 return (FC_UNCLAIMED); 4250 /* NOTREACHED */ 4251 4252 default: 4253 break; 4254 } 4255 } 4256 /* FALLTHROUGH */ 4257 4258 default: 4259 return (FC_UNCLAIMED); 4260 } 4261 } 4262 4263 4264 /*ARGSUSED*/ 4265 static int 4266 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf, 4267 uint32_t claimed) 4268 { 4269 return (FC_UNCLAIMED); 4270 } 4271 4272 /* 4273 * Function: fcp_statec_callback 4274 * 4275 * Description: The purpose of this function is to handle a port state change. 4276 * It is called from fp/fctl and, in a few instances, internally. 4277 * 4278 * Argument: ulph fp/fctl port handle 4279 * port_handle fcp_port structure 4280 * port_state Physical state of the port 4281 * port_top Topology 4282 * *devlist Pointer to the first entry of a table 4283 * containing the remote ports that can be 4284 * reached. 4285 * dev_cnt Number of entries pointed by devlist. 4286 * port_sid Port ID of the local port. 4287 * 4288 * Return Value: None 4289 */ 4290 /*ARGSUSED*/ 4291 static void 4292 fcp_statec_callback(opaque_t ulph, opaque_t port_handle, 4293 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist, 4294 uint32_t dev_cnt, uint32_t port_sid) 4295 { 4296 uint32_t link_count; 4297 int map_len = 0; 4298 struct fcp_port *pptr; 4299 fcp_map_tag_t *map_tag = NULL; 4300 4301 if ((pptr = fcp_get_port(port_handle)) == NULL) { 4302 fcp_log(CE_WARN, NULL, "!Invalid port handle in callback"); 4303 return; /* nothing to work with! */ 4304 } 4305 4306 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4307 fcp_trace, FCP_BUF_LEVEL_2, 0, 4308 "fcp_statec_callback: port state/dev_cnt/top =" 4309 "%d/%d/%d", FC_PORT_STATE_MASK(port_state), 4310 dev_cnt, port_top); 4311 4312 mutex_enter(&pptr->port_mutex); 4313 4314 /* 4315 * If a thread is in detach, don't do anything. 4316 */ 4317 if (pptr->port_state & (FCP_STATE_DETACHING | 4318 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) { 4319 mutex_exit(&pptr->port_mutex); 4320 return; 4321 } 4322 4323 /* 4324 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if 4325 * init_pkt is called, it knows whether or not the target's status 4326 * (or pd) might be changing. 4327 */ 4328 4329 if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) { 4330 pptr->port_state |= FCP_STATE_IN_CB_DEVC; 4331 } 4332 4333 /* 4334 * the transport doesn't allocate or probe unless being 4335 * asked to by either the applications or ULPs 4336 * 4337 * in cases where the port is OFFLINE at the time of port 4338 * attach callback and the link comes ONLINE later, for 4339 * easier automatic node creation (i.e. without you having to 4340 * go out and run the utility to perform LOGINs) the 4341 * following conditional is helpful 4342 */ 4343 pptr->port_phys_state = port_state; 4344 4345 if (dev_cnt) { 4346 mutex_exit(&pptr->port_mutex); 4347 4348 map_len = sizeof (*map_tag) * dev_cnt; 4349 map_tag = kmem_alloc(map_len, KM_NOSLEEP); 4350 if (map_tag == NULL) { 4351 fcp_log(CE_WARN, pptr->port_dip, 4352 "!fcp%d: failed to allocate for map tags; " 4353 " state change will not be processed", 4354 pptr->port_instance); 4355 4356 mutex_enter(&pptr->port_mutex); 4357 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC; 4358 mutex_exit(&pptr->port_mutex); 4359 4360 return; 4361 } 4362 4363 mutex_enter(&pptr->port_mutex); 4364 } 4365 4366 if (pptr->port_id != port_sid) { 4367 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4368 fcp_trace, FCP_BUF_LEVEL_3, 0, 4369 "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id, 4370 port_sid); 4371 /* 4372 * The local port changed ID. It is the first time a port ID 4373 * is assigned or something drastic happened. We might have 4374 * been unplugged and replugged on another loop or fabric port 4375 * or somebody grabbed the AL_PA we had or somebody rezoned 4376 * the fabric we were plugged into. 4377 */ 4378 pptr->port_id = port_sid; 4379 } 4380 4381 switch (FC_PORT_STATE_MASK(port_state)) { 4382 case FC_STATE_OFFLINE: 4383 case FC_STATE_RESET_REQUESTED: 4384 /* 4385 * link has gone from online to offline -- just update the 4386 * state of this port to BUSY and MARKed to go offline 4387 */ 4388 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4389 fcp_trace, FCP_BUF_LEVEL_3, 0, 4390 "link went offline"); 4391 if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) { 4392 /* 4393 * We were offline a while ago and this one 4394 * seems to indicate that the loop has gone 4395 * dead forever. 4396 */ 4397 pptr->port_tmp_cnt += dev_cnt; 4398 pptr->port_state &= ~FCP_STATE_OFFLINE; 4399 pptr->port_state |= FCP_STATE_INIT; 4400 link_count = pptr->port_link_cnt; 4401 fcp_handle_devices(pptr, devlist, dev_cnt, 4402 link_count, map_tag, FCP_CAUSE_LINK_DOWN); 4403 } else { 4404 pptr->port_link_cnt++; 4405 ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED)); 4406 fcp_update_state(pptr, (FCP_LUN_BUSY | 4407 FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN); 4408 if (pptr->port_mpxio) { 4409 fcp_update_mpxio_path_verifybusy(pptr); 4410 } 4411 pptr->port_state |= FCP_STATE_OFFLINE; 4412 pptr->port_state &= 4413 ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE); 4414 pptr->port_tmp_cnt = 0; 4415 } 4416 mutex_exit(&pptr->port_mutex); 4417 break; 4418 4419 case FC_STATE_ONLINE: 4420 case FC_STATE_LIP: 4421 case FC_STATE_LIP_LBIT_SET: 4422 /* 4423 * link has gone from offline to online 4424 */ 4425 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4426 fcp_trace, FCP_BUF_LEVEL_3, 0, 4427 "link went online"); 4428 4429 pptr->port_link_cnt++; 4430 4431 while (pptr->port_ipkt_cnt) { 4432 mutex_exit(&pptr->port_mutex); 4433 delay(drv_usectohz(1000000)); 4434 mutex_enter(&pptr->port_mutex); 4435 } 4436 4437 pptr->port_topology = port_top; 4438 4439 /* 4440 * The state of the targets and luns accessible through this 4441 * port is updated. 4442 */ 4443 fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK, 4444 FCP_CAUSE_LINK_CHANGE); 4445 4446 pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE); 4447 pptr->port_state |= FCP_STATE_ONLINING; 4448 pptr->port_tmp_cnt = dev_cnt; 4449 link_count = pptr->port_link_cnt; 4450 4451 pptr->port_deadline = fcp_watchdog_time + 4452 FCP_ICMD_DEADLINE; 4453 4454 if (!dev_cnt) { 4455 /* 4456 * We go directly to the online state if no remote 4457 * ports were discovered. 4458 */ 4459 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4460 fcp_trace, FCP_BUF_LEVEL_3, 0, 4461 "No remote ports discovered"); 4462 4463 pptr->port_state &= ~FCP_STATE_ONLINING; 4464 pptr->port_state |= FCP_STATE_ONLINE; 4465 } 4466 4467 switch (port_top) { 4468 case FC_TOP_FABRIC: 4469 case FC_TOP_PUBLIC_LOOP: 4470 case FC_TOP_PRIVATE_LOOP: 4471 case FC_TOP_PT_PT: 4472 4473 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) { 4474 fcp_retry_ns_registry(pptr, port_sid); 4475 } 4476 4477 fcp_handle_devices(pptr, devlist, dev_cnt, link_count, 4478 map_tag, FCP_CAUSE_LINK_CHANGE); 4479 break; 4480 4481 default: 4482 /* 4483 * We got here because we were provided with an unknown 4484 * topology. 4485 */ 4486 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) { 4487 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED; 4488 } 4489 4490 pptr->port_tmp_cnt -= dev_cnt; 4491 fcp_log(CE_WARN, pptr->port_dip, 4492 "!unknown/unsupported topology (0x%x)", port_top); 4493 break; 4494 } 4495 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4496 fcp_trace, FCP_BUF_LEVEL_3, 0, 4497 "Notify ssd of the reset to reinstate the reservations"); 4498 4499 scsi_hba_reset_notify_callback(&pptr->port_mutex, 4500 &pptr->port_reset_notify_listf); 4501 4502 mutex_exit(&pptr->port_mutex); 4503 4504 break; 4505 4506 case FC_STATE_RESET: 4507 ASSERT(pptr->port_state & FCP_STATE_OFFLINE); 4508 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4509 fcp_trace, FCP_BUF_LEVEL_3, 0, 4510 "RESET state, waiting for Offline/Online state_cb"); 4511 mutex_exit(&pptr->port_mutex); 4512 break; 4513 4514 case FC_STATE_DEVICE_CHANGE: 4515 /* 4516 * We come here when an application has requested 4517 * Dynamic node creation/deletion in Fabric connectivity. 4518 */ 4519 if (pptr->port_state & (FCP_STATE_OFFLINE | 4520 FCP_STATE_INIT)) { 4521 /* 4522 * This case can happen when the FCTL is in the 4523 * process of giving us on online and the host on 4524 * the other side issues a PLOGI/PLOGO. Ideally 4525 * the state changes should be serialized unless 4526 * they are opposite (online-offline). 4527 * The transport will give us a final state change 4528 * so we can ignore this for the time being. 4529 */ 4530 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC; 4531 mutex_exit(&pptr->port_mutex); 4532 break; 4533 } 4534 4535 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) { 4536 fcp_retry_ns_registry(pptr, port_sid); 4537 } 4538 4539 /* 4540 * Extend the deadline under steady state conditions 4541 * to provide more time for the device-change-commands 4542 */ 4543 if (!pptr->port_ipkt_cnt) { 4544 pptr->port_deadline = fcp_watchdog_time + 4545 FCP_ICMD_DEADLINE; 4546 } 4547 4548 /* 4549 * There is another race condition here, where if we were 4550 * in ONLINEING state and a devices in the map logs out, 4551 * fp will give another state change as DEVICE_CHANGE 4552 * and OLD. This will result in that target being offlined. 4553 * The pd_handle is freed. If from the first statec callback 4554 * we were going to fire a PLOGI/PRLI, the system will 4555 * panic in fc_ulp_transport with invalid pd_handle. 4556 * The fix is to check for the link_cnt before issuing 4557 * any command down. 4558 */ 4559 fcp_update_targets(pptr, devlist, dev_cnt, 4560 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE); 4561 4562 link_count = pptr->port_link_cnt; 4563 4564 fcp_handle_devices(pptr, devlist, dev_cnt, 4565 link_count, map_tag, FCP_CAUSE_TGT_CHANGE); 4566 4567 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC; 4568 4569 mutex_exit(&pptr->port_mutex); 4570 break; 4571 4572 case FC_STATE_TARGET_PORT_RESET: 4573 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) { 4574 fcp_retry_ns_registry(pptr, port_sid); 4575 } 4576 4577 /* Do nothing else */ 4578 mutex_exit(&pptr->port_mutex); 4579 break; 4580 4581 default: 4582 fcp_log(CE_WARN, pptr->port_dip, 4583 "!Invalid state change=0x%x", port_state); 4584 mutex_exit(&pptr->port_mutex); 4585 break; 4586 } 4587 4588 if (map_tag) { 4589 kmem_free(map_tag, map_len); 4590 } 4591 } 4592 4593 /* 4594 * Function: fcp_handle_devices 4595 * 4596 * Description: This function updates the devices currently known by 4597 * walking the list provided by the caller. The list passed 4598 * by the caller is supposed to be the list of reachable 4599 * devices. 4600 * 4601 * Argument: *pptr Fcp port structure. 4602 * *devlist Pointer to the first entry of a table 4603 * containing the remote ports that can be 4604 * reached. 4605 * dev_cnt Number of entries pointed by devlist. 4606 * link_cnt Link state count. 4607 * *map_tag Array of fcp_map_tag_t structures. 4608 * cause What caused this function to be called. 4609 * 4610 * Return Value: None 4611 * 4612 * Notes: The pptr->port_mutex must be held. 4613 */ 4614 static void 4615 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[], 4616 uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause) 4617 { 4618 int i; 4619 int check_finish_init = 0; 4620 fc_portmap_t *map_entry; 4621 struct fcp_tgt *ptgt = NULL; 4622 4623 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4624 fcp_trace, FCP_BUF_LEVEL_3, 0, 4625 "fcp_handle_devices: called for %d dev(s)", dev_cnt); 4626 4627 if (dev_cnt) { 4628 ASSERT(map_tag != NULL); 4629 } 4630 4631 /* 4632 * The following code goes through the list of remote ports that are 4633 * accessible through this (pptr) local port (The list walked is the 4634 * one provided by the caller which is the list of the remote ports 4635 * currently reachable). It checks if any of them was already 4636 * known by looking for the corresponding target structure based on 4637 * the world wide name. If a target is part of the list it is tagged 4638 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED). 4639 * 4640 * Old comment 4641 * ----------- 4642 * Before we drop port mutex; we MUST get the tags updated; This 4643 * two step process is somewhat slow, but more reliable. 4644 */ 4645 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) { 4646 map_entry = &(devlist[i]); 4647 4648 /* 4649 * get ptr to this map entry in our port's 4650 * list (if any) 4651 */ 4652 ptgt = fcp_lookup_target(pptr, 4653 (uchar_t *)&(map_entry->map_pwwn)); 4654 4655 if (ptgt) { 4656 map_tag[i] = ptgt->tgt_change_cnt; 4657 if (cause == FCP_CAUSE_LINK_CHANGE) { 4658 ptgt->tgt_aux_state = FCP_TGT_TAGGED; 4659 } 4660 } 4661 } 4662 4663 /* 4664 * At this point we know which devices of the new list were already 4665 * known (The field tgt_aux_state of the target structure has been 4666 * set to FCP_TGT_TAGGED). 4667 * 4668 * The following code goes through the list of targets currently known 4669 * by the local port (the list is actually a hashing table). If a 4670 * target is found and is not tagged, it means the target cannot 4671 * be reached anymore through the local port (pptr). It is offlined. 4672 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE. 4673 */ 4674 for (i = 0; i < FCP_NUM_HASH; i++) { 4675 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL; 4676 ptgt = ptgt->tgt_next) { 4677 mutex_enter(&ptgt->tgt_mutex); 4678 if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) && 4679 (cause == FCP_CAUSE_LINK_CHANGE) && 4680 !(ptgt->tgt_state & FCP_TGT_OFFLINE)) { 4681 fcp_offline_target_now(pptr, ptgt, 4682 link_cnt, ptgt->tgt_change_cnt, 0); 4683 } 4684 mutex_exit(&ptgt->tgt_mutex); 4685 } 4686 } 4687 4688 /* 4689 * At this point, the devices that were known but cannot be reached 4690 * anymore, have most likely been offlined. 4691 * 4692 * The following section of code seems to go through the list of 4693 * remote ports that can now be reached. For every single one it 4694 * checks if it is already known or if it is a new port. 4695 */ 4696 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) { 4697 4698 if (check_finish_init) { 4699 ASSERT(i > 0); 4700 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt, 4701 map_tag[i - 1], cause); 4702 check_finish_init = 0; 4703 } 4704 4705 /* get a pointer to this map entry */ 4706 map_entry = &(devlist[i]); 4707 4708 /* 4709 * Check for the duplicate map entry flag. If we have marked 4710 * this entry as a duplicate we skip it since the correct 4711 * (perhaps even same) state change will be encountered 4712 * later in the list. 4713 */ 4714 if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) { 4715 continue; 4716 } 4717 4718 /* get ptr to this map entry in our port's list (if any) */ 4719 ptgt = fcp_lookup_target(pptr, 4720 (uchar_t *)&(map_entry->map_pwwn)); 4721 4722 if (ptgt) { 4723 /* 4724 * This device was already known. The field 4725 * tgt_aux_state is reset (was probably set to 4726 * FCP_TGT_TAGGED previously in this routine). 4727 */ 4728 ptgt->tgt_aux_state = 0; 4729 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4730 fcp_trace, FCP_BUF_LEVEL_3, 0, 4731 "handle_devices: map did/state/type/flags = " 4732 "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, " 4733 "tgt_state=%d", 4734 map_entry->map_did.port_id, map_entry->map_state, 4735 map_entry->map_type, map_entry->map_flags, 4736 ptgt->tgt_d_id, ptgt->tgt_state); 4737 } 4738 4739 if (map_entry->map_type == PORT_DEVICE_OLD || 4740 map_entry->map_type == PORT_DEVICE_NEW || 4741 map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED || 4742 map_entry->map_type == PORT_DEVICE_CHANGED) { 4743 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4744 fcp_trace, FCP_BUF_LEVEL_2, 0, 4745 "map_type=%x, did = %x", 4746 map_entry->map_type, 4747 map_entry->map_did.port_id); 4748 } 4749 4750 switch (map_entry->map_type) { 4751 case PORT_DEVICE_NOCHANGE: 4752 case PORT_DEVICE_USER_CREATE: 4753 case PORT_DEVICE_USER_LOGIN: 4754 case PORT_DEVICE_NEW: 4755 case PORT_DEVICE_REPORTLUN_CHANGED: 4756 FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1); 4757 4758 if (fcp_handle_mapflags(pptr, ptgt, map_entry, 4759 link_cnt, (ptgt) ? map_tag[i] : 0, 4760 cause) == TRUE) { 4761 4762 FCP_TGT_TRACE(ptgt, map_tag[i], 4763 FCP_TGT_TRACE_2); 4764 check_finish_init++; 4765 } 4766 break; 4767 4768 case PORT_DEVICE_OLD: 4769 if (ptgt != NULL) { 4770 FCP_TGT_TRACE(ptgt, map_tag[i], 4771 FCP_TGT_TRACE_3); 4772 4773 mutex_enter(&ptgt->tgt_mutex); 4774 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) { 4775 /* 4776 * Must do an in-line wait for I/Os 4777 * to get drained 4778 */ 4779 mutex_exit(&ptgt->tgt_mutex); 4780 mutex_exit(&pptr->port_mutex); 4781 4782 mutex_enter(&ptgt->tgt_mutex); 4783 while (ptgt->tgt_ipkt_cnt || 4784 fcp_outstanding_lun_cmds(ptgt) 4785 == FC_SUCCESS) { 4786 mutex_exit(&ptgt->tgt_mutex); 4787 delay(drv_usectohz(1000000)); 4788 mutex_enter(&ptgt->tgt_mutex); 4789 } 4790 mutex_exit(&ptgt->tgt_mutex); 4791 4792 mutex_enter(&pptr->port_mutex); 4793 mutex_enter(&ptgt->tgt_mutex); 4794 4795 (void) fcp_offline_target(pptr, ptgt, 4796 link_cnt, map_tag[i], 0, 0); 4797 } 4798 mutex_exit(&ptgt->tgt_mutex); 4799 } 4800 check_finish_init++; 4801 break; 4802 4803 case PORT_DEVICE_USER_DELETE: 4804 case PORT_DEVICE_USER_LOGOUT: 4805 if (ptgt != NULL) { 4806 FCP_TGT_TRACE(ptgt, map_tag[i], 4807 FCP_TGT_TRACE_4); 4808 4809 mutex_enter(&ptgt->tgt_mutex); 4810 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) { 4811 (void) fcp_offline_target(pptr, ptgt, 4812 link_cnt, map_tag[i], 1, 0); 4813 } 4814 mutex_exit(&ptgt->tgt_mutex); 4815 } 4816 check_finish_init++; 4817 break; 4818 4819 case PORT_DEVICE_CHANGED: 4820 if (ptgt != NULL) { 4821 FCP_TGT_TRACE(ptgt, map_tag[i], 4822 FCP_TGT_TRACE_5); 4823 4824 if (fcp_device_changed(pptr, ptgt, 4825 map_entry, link_cnt, map_tag[i], 4826 cause) == TRUE) { 4827 check_finish_init++; 4828 } 4829 } else { 4830 if (fcp_handle_mapflags(pptr, ptgt, 4831 map_entry, link_cnt, 0, cause) == TRUE) { 4832 check_finish_init++; 4833 } 4834 } 4835 break; 4836 4837 default: 4838 fcp_log(CE_WARN, pptr->port_dip, 4839 "!Invalid map_type=0x%x", map_entry->map_type); 4840 check_finish_init++; 4841 break; 4842 } 4843 } 4844 4845 if (check_finish_init && pptr->port_link_cnt == link_cnt) { 4846 ASSERT(i > 0); 4847 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt, 4848 map_tag[i-1], cause); 4849 } else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) { 4850 fcp_offline_all(pptr, link_cnt, cause); 4851 } 4852 } 4853 4854 static int 4855 fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause) 4856 { 4857 struct fcp_lun *plun; 4858 struct fcp_port *pptr; 4859 int rscn_count; 4860 int lun0_newalloc; 4861 int ret = TRUE; 4862 4863 ASSERT(ptgt); 4864 pptr = ptgt->tgt_port; 4865 lun0_newalloc = 0; 4866 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) { 4867 /* 4868 * no LUN struct for LUN 0 yet exists, 4869 * so create one 4870 */ 4871 plun = fcp_alloc_lun(ptgt); 4872 if (plun == NULL) { 4873 fcp_log(CE_WARN, pptr->port_dip, 4874 "!Failed to allocate lun 0 for" 4875 " D_ID=%x", ptgt->tgt_d_id); 4876 return (ret); 4877 } 4878 lun0_newalloc = 1; 4879 } 4880 4881 mutex_enter(&ptgt->tgt_mutex); 4882 /* 4883 * consider lun 0 as device not connected if it is 4884 * offlined or newly allocated 4885 */ 4886 if ((plun->lun_state & FCP_LUN_OFFLINE) || lun0_newalloc) { 4887 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED; 4888 } 4889 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK); 4890 plun->lun_state &= ~FCP_LUN_OFFLINE; 4891 ptgt->tgt_lun_cnt = 1; 4892 ptgt->tgt_report_lun_cnt = 0; 4893 mutex_exit(&ptgt->tgt_mutex); 4894 4895 rscn_count = fc_ulp_get_rscn_count(pptr->port_fp_handle); 4896 if (fcp_send_scsi(plun, SCMD_REPORT_LUN, 4897 sizeof (struct fcp_reportlun_resp), pptr->port_link_cnt, 4898 ptgt->tgt_change_cnt, cause, rscn_count) != DDI_SUCCESS) { 4899 FCP_TRACE(fcp_logq, pptr->port_instbuf, 4900 fcp_trace, FCP_BUF_LEVEL_3, 0, "!Failed to send REPORTLUN " 4901 "to D_ID=%x", ptgt->tgt_d_id); 4902 } else { 4903 ret = FALSE; 4904 } 4905 4906 return (ret); 4907 } 4908 4909 /* 4910 * Function: fcp_handle_mapflags 4911 * 4912 * Description: This function creates a target structure if the ptgt passed 4913 * is NULL. It also kicks off the PLOGI if we are not logged 4914 * into the target yet or the PRLI if we are logged into the 4915 * target already. The rest of the treatment is done in the 4916 * callbacks of the PLOGI or PRLI. 4917 * 4918 * Argument: *pptr FCP Port structure. 4919 * *ptgt Target structure. 4920 * *map_entry Array of fc_portmap_t structures. 4921 * link_cnt Link state count. 4922 * tgt_cnt Target state count. 4923 * cause What caused this function to be called. 4924 * 4925 * Return Value: TRUE Failed 4926 * FALSE Succeeded 4927 * 4928 * Notes: pptr->port_mutex must be owned. 4929 */ 4930 static int 4931 fcp_handle_mapflags(struct fcp_port *pptr, struct fcp_tgt *ptgt, 4932 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause) 4933 { 4934 int lcount; 4935 int tcount; 4936 int ret = TRUE; 4937 int alloc; 4938 struct fcp_ipkt *icmd; 4939 struct fcp_lun *pseq_lun = NULL; 4940 uchar_t opcode; 4941 int valid_ptgt_was_passed = FALSE; 4942 4943 ASSERT(mutex_owned(&pptr->port_mutex)); 4944 4945 /* 4946 * This case is possible where the FCTL has come up and done discovery 4947 * before FCP was loaded and attached. FCTL would have discovered the 4948 * devices and later the ULP came online. In this case ULP's would get 4949 * PORT_DEVICE_NOCHANGE but target would be NULL. 4950 */ 4951 if (ptgt == NULL) { 4952 /* don't already have a target */ 4953 mutex_exit(&pptr->port_mutex); 4954 ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt); 4955 mutex_enter(&pptr->port_mutex); 4956 4957 if (ptgt == NULL) { 4958 fcp_log(CE_WARN, pptr->port_dip, 4959 "!FC target allocation failed"); 4960 return (ret); 4961 } 4962 mutex_enter(&ptgt->tgt_mutex); 4963 ptgt->tgt_statec_cause = cause; 4964 ptgt->tgt_tmp_cnt = 1; 4965 mutex_exit(&ptgt->tgt_mutex); 4966 } else { 4967 valid_ptgt_was_passed = TRUE; 4968 } 4969 4970 /* 4971 * Copy in the target parameters 4972 */ 4973 mutex_enter(&ptgt->tgt_mutex); 4974 ptgt->tgt_d_id = map_entry->map_did.port_id; 4975 ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr; 4976 ptgt->tgt_pd_handle = map_entry->map_pd; 4977 ptgt->tgt_fca_dev = NULL; 4978 4979 /* Copy port and node WWNs */ 4980 bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0], 4981 FC_WWN_SIZE); 4982 bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0], 4983 FC_WWN_SIZE); 4984 4985 if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) && 4986 (map_entry->map_type == PORT_DEVICE_NOCHANGE) && 4987 (map_entry->map_state == PORT_DEVICE_LOGGED_IN) && 4988 valid_ptgt_was_passed) { 4989 /* 4990 * determine if there are any tape LUNs on this target 4991 */ 4992 for (pseq_lun = ptgt->tgt_lun; 4993 pseq_lun != NULL; 4994 pseq_lun = pseq_lun->lun_next) { 4995 if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) && 4996 !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) { 4997 fcp_update_tgt_state(ptgt, FCP_RESET, 4998 FCP_LUN_MARK); 4999 mutex_exit(&ptgt->tgt_mutex); 5000 return (ret); 5001 } 5002 } 5003 } 5004 5005 /* 5006 * if UA'REPORT_LUN_CHANGED received, 5007 * send out REPORT LUN promptly, skip PLOGI/PRLI process 5008 */ 5009 if (map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED) { 5010 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | FCP_TGT_MARK); 5011 mutex_exit(&ptgt->tgt_mutex); 5012 mutex_exit(&pptr->port_mutex); 5013 5014 ret = fcp_handle_reportlun_changed(ptgt, cause); 5015 5016 mutex_enter(&pptr->port_mutex); 5017 return (ret); 5018 } 5019 5020 /* 5021 * If ptgt was NULL when this function was entered, then tgt_node_state 5022 * was never specifically initialized but zeroed out which means 5023 * FCP_TGT_NODE_NONE. 5024 */ 5025 switch (ptgt->tgt_node_state) { 5026 case FCP_TGT_NODE_NONE: 5027 case FCP_TGT_NODE_ON_DEMAND: 5028 if (FC_TOP_EXTERNAL(pptr->port_topology) && 5029 !fcp_enable_auto_configuration && 5030 map_entry->map_type != PORT_DEVICE_USER_CREATE) { 5031 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND; 5032 } else if (FC_TOP_EXTERNAL(pptr->port_topology) && 5033 fcp_enable_auto_configuration && 5034 (ptgt->tgt_manual_config_only == 1) && 5035 map_entry->map_type != PORT_DEVICE_USER_CREATE) { 5036 /* 5037 * If auto configuration is set and 5038 * the tgt_manual_config_only flag is set then 5039 * we only want the user to be able to change 5040 * the state through create_on_demand. 5041 */ 5042 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND; 5043 } else { 5044 ptgt->tgt_node_state = FCP_TGT_NODE_NONE; 5045 } 5046 break; 5047 5048 case FCP_TGT_NODE_PRESENT: 5049 break; 5050 } 5051 /* 5052 * If we are booting from a fabric device, make sure we 5053 * mark the node state appropriately for this target to be 5054 * enumerated 5055 */ 5056 if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) { 5057 if (bcmp((caddr_t)pptr->port_boot_wwn, 5058 (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0], 5059 sizeof (ptgt->tgt_port_wwn)) == 0) { 5060 ptgt->tgt_node_state = FCP_TGT_NODE_NONE; 5061 } 5062 } 5063 mutex_exit(&ptgt->tgt_mutex); 5064 5065 FCP_TRACE(fcp_logq, pptr->port_instbuf, 5066 fcp_trace, FCP_BUF_LEVEL_3, 0, 5067 "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x", 5068 map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id, 5069 map_entry->map_rscn_info.ulp_rscn_count); 5070 5071 mutex_enter(&ptgt->tgt_mutex); 5072 5073 /* 5074 * Reset target OFFLINE state and mark the target BUSY 5075 */ 5076 ptgt->tgt_state &= ~FCP_TGT_OFFLINE; 5077 ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK); 5078 5079 tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt; 5080 lcount = link_cnt; 5081 5082 mutex_exit(&ptgt->tgt_mutex); 5083 mutex_exit(&pptr->port_mutex); 5084 5085 /* 5086 * if we are already logged in, then we do a PRLI, else 5087 * we do a PLOGI first (to get logged in) 5088 * 5089 * We will not check if we are the PLOGI initiator 5090 */ 5091 opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN && 5092 map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI; 5093 5094 alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t)); 5095 5096 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0, 5097 pptr->port_state & FCP_STATE_FCA_IS_NODMA, lcount, tcount, 5098 cause, map_entry->map_rscn_info.ulp_rscn_count); 5099 5100 if (icmd == NULL) { 5101 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29); 5102 /* 5103 * We've exited port_mutex before calling fcp_icmd_alloc, 5104 * we need to make sure we reacquire it before returning. 5105 */ 5106 mutex_enter(&pptr->port_mutex); 5107 return (FALSE); 5108 } 5109 5110 /* TRUE is only returned while target is intended skipped */ 5111 ret = FALSE; 5112 /* discover info about this target */ 5113 if ((fcp_send_els(pptr, ptgt, icmd, opcode, 5114 lcount, tcount, cause)) == DDI_SUCCESS) { 5115 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9); 5116 } else { 5117 fcp_icmd_free(pptr, icmd); 5118 ret = TRUE; 5119 } 5120 mutex_enter(&pptr->port_mutex); 5121 5122 return (ret); 5123 } 5124 5125 /* 5126 * Function: fcp_send_els 5127 * 5128 * Description: Sends an ELS to the target specified by the caller. Supports 5129 * PLOGI and PRLI. 5130 * 5131 * Argument: *pptr Fcp port. 5132 * *ptgt Target to send the ELS to. 5133 * *icmd Internal packet 5134 * opcode ELS opcode 5135 * lcount Link state change counter 5136 * tcount Target state change counter 5137 * cause What caused the call 5138 * 5139 * Return Value: DDI_SUCCESS 5140 * Others 5141 */ 5142 static int 5143 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt, 5144 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause) 5145 { 5146 fc_packet_t *fpkt; 5147 fc_frame_hdr_t *hp; 5148 int internal = 0; 5149 int alloc; 5150 int cmd_len; 5151 int resp_len; 5152 int res = DDI_FAILURE; /* default result */ 5153 int rval = DDI_FAILURE; 5154 5155 ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI); 5156 ASSERT(ptgt->tgt_port == pptr); 5157 5158 FCP_TRACE(fcp_logq, pptr->port_instbuf, 5159 fcp_trace, FCP_BUF_LEVEL_5, 0, 5160 "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode, 5161 (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI"); 5162 5163 if (opcode == LA_ELS_PLOGI) { 5164 cmd_len = sizeof (la_els_logi_t); 5165 resp_len = sizeof (la_els_logi_t); 5166 } else { 5167 ASSERT(opcode == LA_ELS_PRLI); 5168 cmd_len = sizeof (la_els_prli_t); 5169 resp_len = sizeof (la_els_prli_t); 5170 } 5171 5172 if (icmd == NULL) { 5173 alloc = FCP_MAX(sizeof (la_els_logi_t), 5174 sizeof (la_els_prli_t)); 5175 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0, 5176 pptr->port_state & FCP_STATE_FCA_IS_NODMA, 5177 lcount, tcount, cause, FC_INVALID_RSCN_COUNT); 5178 if (icmd == NULL) { 5179 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10); 5180 return (res); 5181 } 5182 internal++; 5183 } 5184 fpkt = icmd->ipkt_fpkt; 5185 5186 fpkt->pkt_cmdlen = cmd_len; 5187 fpkt->pkt_rsplen = resp_len; 5188 fpkt->pkt_datalen = 0; 5189 icmd->ipkt_retries = 0; 5190 5191 /* fill in fpkt info */ 5192 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR; 5193 fpkt->pkt_tran_type = FC_PKT_EXCHANGE; 5194 fpkt->pkt_timeout = FCP_ELS_TIMEOUT; 5195 5196 /* get ptr to frame hdr in fpkt */ 5197 hp = &fpkt->pkt_cmd_fhdr; 5198 5199 /* 5200 * fill in frame hdr 5201 */ 5202 hp->r_ctl = R_CTL_ELS_REQ; 5203 hp->s_id = pptr->port_id; /* source ID */ 5204 hp->d_id = ptgt->tgt_d_id; /* dest ID */ 5205 hp->type = FC_TYPE_EXTENDED_LS; 5206 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 5207 hp->seq_id = 0; 5208 hp->rsvd = 0; 5209 hp->df_ctl = 0; 5210 hp->seq_cnt = 0; 5211 hp->ox_id = 0xffff; /* i.e. none */ 5212 hp->rx_id = 0xffff; /* i.e. none */ 5213 hp->ro = 0; 5214 5215 /* 5216 * at this point we have a filled in cmd pkt 5217 * 5218 * fill in the respective info, then use the transport to send 5219 * the packet 5220 * 5221 * for a PLOGI call fc_ulp_login(), and 5222 * for a PRLI call fc_ulp_issue_els() 5223 */ 5224 switch (opcode) { 5225 case LA_ELS_PLOGI: { 5226 struct la_els_logi logi; 5227 5228 bzero(&logi, sizeof (struct la_els_logi)); 5229 5230 hp = &fpkt->pkt_cmd_fhdr; 5231 hp->r_ctl = R_CTL_ELS_REQ; 5232 logi.ls_code.ls_code = LA_ELS_PLOGI; 5233 logi.ls_code.mbz = 0; 5234 5235 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd, 5236 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi)); 5237 5238 icmd->ipkt_opcode = LA_ELS_PLOGI; 5239 5240 mutex_enter(&pptr->port_mutex); 5241 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) { 5242 5243 mutex_exit(&pptr->port_mutex); 5244 5245 rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1); 5246 if (rval == FC_SUCCESS) { 5247 res = DDI_SUCCESS; 5248 break; 5249 } 5250 5251 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11); 5252 5253 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd, 5254 rval, "PLOGI"); 5255 } else { 5256 FCP_TRACE(fcp_logq, pptr->port_instbuf, 5257 fcp_trace, FCP_BUF_LEVEL_5, 0, 5258 "fcp_send_els1: state change occured" 5259 " for D_ID=0x%x", ptgt->tgt_d_id); 5260 mutex_exit(&pptr->port_mutex); 5261 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12); 5262 } 5263 break; 5264 } 5265 5266 case LA_ELS_PRLI: { 5267 struct la_els_prli prli; 5268 struct fcp_prli *fprli; 5269 5270 bzero(&prli, sizeof (struct la_els_prli)); 5271 5272 hp = &fpkt->pkt_cmd_fhdr; 5273 hp->r_ctl = R_CTL_ELS_REQ; 5274 5275 /* fill in PRLI cmd ELS fields */ 5276 prli.ls_code = LA_ELS_PRLI; 5277 prli.page_length = 0x10; /* huh? */ 5278 prli.payload_length = sizeof (struct la_els_prli); 5279 5280 icmd->ipkt_opcode = LA_ELS_PRLI; 5281 5282 /* get ptr to PRLI service params */ 5283 fprli = (struct fcp_prli *)prli.service_params; 5284 5285 /* fill in service params */ 5286 fprli->type = 0x08; 5287 fprli->resvd1 = 0; 5288 fprli->orig_process_assoc_valid = 0; 5289 fprli->resp_process_assoc_valid = 0; 5290 fprli->establish_image_pair = 1; 5291 fprli->resvd2 = 0; 5292 fprli->resvd3 = 0; 5293 fprli->obsolete_1 = 0; 5294 fprli->obsolete_2 = 0; 5295 fprli->data_overlay_allowed = 0; 5296 fprli->initiator_fn = 1; 5297 fprli->confirmed_compl_allowed = 1; 5298 5299 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) { 5300 fprli->target_fn = 1; 5301 } else { 5302 fprli->target_fn = 0; 5303 } 5304 5305 fprli->retry = 1; 5306 fprli->read_xfer_rdy_disabled = 1; 5307 fprli->write_xfer_rdy_disabled = 0; 5308 5309 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd, 5310 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli)); 5311 5312 /* issue the PRLI request */ 5313 5314 mutex_enter(&pptr->port_mutex); 5315 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) { 5316 5317 mutex_exit(&pptr->port_mutex); 5318 5319 rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt); 5320 if (rval == FC_SUCCESS) { 5321 res = DDI_SUCCESS; 5322 break; 5323 } 5324 5325 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13); 5326 5327 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd, 5328 rval, "PRLI"); 5329 } else { 5330 mutex_exit(&pptr->port_mutex); 5331 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14); 5332 } 5333 break; 5334 } 5335 5336 default: 5337 fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode); 5338 break; 5339 } 5340 5341 FCP_TRACE(fcp_logq, pptr->port_instbuf, 5342 fcp_trace, FCP_BUF_LEVEL_5, 0, 5343 "fcp_send_els: returning %d", res); 5344 5345 if (res != DDI_SUCCESS) { 5346 if (internal) { 5347 fcp_icmd_free(pptr, icmd); 5348 } 5349 } 5350 5351 return (res); 5352 } 5353 5354 5355 /* 5356 * called internally update the state of all of the tgts and each LUN 5357 * for this port (i.e. each target known to be attached to this port) 5358 * if they are not already offline 5359 * 5360 * must be called with the port mutex owned 5361 * 5362 * acquires and releases the target mutexes for each target attached 5363 * to this port 5364 */ 5365 void 5366 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause) 5367 { 5368 int i; 5369 struct fcp_tgt *ptgt; 5370 5371 ASSERT(mutex_owned(&pptr->port_mutex)); 5372 5373 for (i = 0; i < FCP_NUM_HASH; i++) { 5374 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL; 5375 ptgt = ptgt->tgt_next) { 5376 mutex_enter(&ptgt->tgt_mutex); 5377 fcp_update_tgt_state(ptgt, FCP_SET, state); 5378 ptgt->tgt_change_cnt++; 5379 ptgt->tgt_statec_cause = cause; 5380 ptgt->tgt_tmp_cnt = 1; 5381 ptgt->tgt_done = 0; 5382 mutex_exit(&ptgt->tgt_mutex); 5383 } 5384 } 5385 } 5386 5387 5388 static void 5389 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause) 5390 { 5391 int i; 5392 int ndevs; 5393 struct fcp_tgt *ptgt; 5394 5395 ASSERT(mutex_owned(&pptr->port_mutex)); 5396 5397 for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) { 5398 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL; 5399 ptgt = ptgt->tgt_next) { 5400 ndevs++; 5401 } 5402 } 5403 5404 if (ndevs == 0) { 5405 return; 5406 } 5407 pptr->port_tmp_cnt = ndevs; 5408 5409 for (i = 0; i < FCP_NUM_HASH; i++) { 5410 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL; 5411 ptgt = ptgt->tgt_next) { 5412 (void) fcp_call_finish_init_held(pptr, ptgt, 5413 lcount, ptgt->tgt_change_cnt, cause); 5414 } 5415 } 5416 } 5417 5418 /* 5419 * Function: fcp_update_tgt_state 5420 * 5421 * Description: This function updates the field tgt_state of a target. That 5422 * field is a bitmap and which bit can be set or reset 5423 * individually. The action applied to the target state is also 5424 * applied to all the LUNs belonging to the target (provided the 5425 * LUN is not offline). A side effect of applying the state 5426 * modification to the target and the LUNs is the field tgt_trace 5427 * of the target and lun_trace of the LUNs is set to zero. 5428 * 5429 * 5430 * Argument: *ptgt Target structure. 5431 * flag Flag indication what action to apply (set/reset). 5432 * state State bits to update. 5433 * 5434 * Return Value: None 5435 * 5436 * Context: Interrupt, Kernel or User context. 5437 * The mutex of the target (ptgt->tgt_mutex) must be owned when 5438 * calling this function. 5439 */ 5440 void 5441 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state) 5442 { 5443 struct fcp_lun *plun; 5444 5445 ASSERT(mutex_owned(&ptgt->tgt_mutex)); 5446 5447 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) { 5448 /* The target is not offline. */ 5449 if (flag == FCP_SET) { 5450 ptgt->tgt_state |= state; 5451 ptgt->tgt_trace = 0; 5452 } else { 5453 ptgt->tgt_state &= ~state; 5454 } 5455 5456 for (plun = ptgt->tgt_lun; plun != NULL; 5457 plun = plun->lun_next) { 5458 if (!(plun->lun_state & FCP_LUN_OFFLINE)) { 5459 /* The LUN is not offline. */ 5460 if (flag == FCP_SET) { 5461 plun->lun_state |= state; 5462 plun->lun_trace = 0; 5463 } else { 5464 plun->lun_state &= ~state; 5465 } 5466 } 5467 } 5468 } 5469 } 5470 5471 /* 5472 * Function: fcp_update_tgt_state 5473 * 5474 * Description: This function updates the field lun_state of a LUN. That 5475 * field is a bitmap and which bit can be set or reset 5476 * individually. 5477 * 5478 * Argument: *plun LUN structure. 5479 * flag Flag indication what action to apply (set/reset). 5480 * state State bits to update. 5481 * 5482 * Return Value: None 5483 * 5484 * Context: Interrupt, Kernel or User context. 5485 * The mutex of the target (ptgt->tgt_mutex) must be owned when 5486 * calling this function. 5487 */ 5488 void 5489 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state) 5490 { 5491 struct fcp_tgt *ptgt = plun->lun_tgt; 5492 5493 ASSERT(mutex_owned(&ptgt->tgt_mutex)); 5494 5495 if (!(plun->lun_state & FCP_TGT_OFFLINE)) { 5496 if (flag == FCP_SET) { 5497 plun->lun_state |= state; 5498 } else { 5499 plun->lun_state &= ~state; 5500 } 5501 } 5502 } 5503 5504 /* 5505 * Function: fcp_get_port 5506 * 5507 * Description: This function returns the fcp_port structure from the opaque 5508 * handle passed by the caller. That opaque handle is the handle 5509 * used by fp/fctl to identify a particular local port. That 5510 * handle has been stored in the corresponding fcp_port 5511 * structure. This function is going to walk the global list of 5512 * fcp_port structures till one has a port_fp_handle that matches 5513 * the handle passed by the caller. This function enters the 5514 * mutex fcp_global_mutex while walking the global list and then 5515 * releases it. 5516 * 5517 * Argument: port_handle Opaque handle that fp/fctl uses to identify a 5518 * particular port. 5519 * 5520 * Return Value: NULL Not found. 5521 * Not NULL Pointer to the fcp_port structure. 5522 * 5523 * Context: Interrupt, Kernel or User context. 5524 */ 5525 static struct fcp_port * 5526 fcp_get_port(opaque_t port_handle) 5527 { 5528 struct fcp_port *pptr; 5529 5530 ASSERT(port_handle != NULL); 5531 5532 mutex_enter(&fcp_global_mutex); 5533 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) { 5534 if (pptr->port_fp_handle == port_handle) { 5535 break; 5536 } 5537 } 5538 mutex_exit(&fcp_global_mutex); 5539 5540 return (pptr); 5541 } 5542 5543 5544 static void 5545 fcp_unsol_callback(fc_packet_t *fpkt) 5546 { 5547 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private; 5548 struct fcp_port *pptr = icmd->ipkt_port; 5549 5550 if (fpkt->pkt_state != FC_PKT_SUCCESS) { 5551 caddr_t state, reason, action, expln; 5552 5553 (void) fc_ulp_pkt_error(fpkt, &state, &reason, 5554 &action, &expln); 5555 5556 fcp_log(CE_WARN, pptr->port_dip, 5557 "!couldn't post response to unsolicited request: " 5558 " state=%s reason=%s rx_id=%x ox_id=%x", 5559 state, reason, fpkt->pkt_cmd_fhdr.ox_id, 5560 fpkt->pkt_cmd_fhdr.rx_id); 5561 } 5562 fcp_icmd_free(pptr, icmd); 5563 } 5564 5565 5566 /* 5567 * Perform general purpose preparation of a response to an unsolicited request 5568 */ 5569 static void 5570 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf, 5571 uchar_t r_ctl, uchar_t type) 5572 { 5573 pkt->pkt_cmd_fhdr.r_ctl = r_ctl; 5574 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id; 5575 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id; 5576 pkt->pkt_cmd_fhdr.type = type; 5577 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT; 5578 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id; 5579 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl; 5580 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt; 5581 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id; 5582 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id; 5583 pkt->pkt_cmd_fhdr.ro = 0; 5584 pkt->pkt_cmd_fhdr.rsvd = 0; 5585 pkt->pkt_comp = fcp_unsol_callback; 5586 pkt->pkt_pd = NULL; 5587 pkt->pkt_ub_resp_token = (opaque_t)buf; 5588 } 5589 5590 5591 /*ARGSUSED*/ 5592 static int 5593 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf) 5594 { 5595 fc_packet_t *fpkt; 5596 struct la_els_prli prli; 5597 struct fcp_prli *fprli; 5598 struct fcp_ipkt *icmd; 5599 struct la_els_prli *from; 5600 struct fcp_prli *orig; 5601 struct fcp_tgt *ptgt; 5602 int tcount = 0; 5603 int lcount; 5604 5605 from = (struct la_els_prli *)buf->ub_buffer; 5606 orig = (struct fcp_prli *)from->service_params; 5607 if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) != 5608 NULL) { 5609 mutex_enter(&ptgt->tgt_mutex); 5610 tcount = ptgt->tgt_change_cnt; 5611 mutex_exit(&ptgt->tgt_mutex); 5612 } 5613 5614 mutex_enter(&pptr->port_mutex); 5615 lcount = pptr->port_link_cnt; 5616 mutex_exit(&pptr->port_mutex); 5617 5618 if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t), 5619 sizeof (la_els_prli_t), 0, 5620 pptr->port_state & FCP_STATE_FCA_IS_NODMA, 5621 lcount, tcount, 0, FC_INVALID_RSCN_COUNT)) == NULL) { 5622 return (FC_FAILURE); 5623 } 5624 5625 fpkt = icmd->ipkt_fpkt; 5626 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR; 5627 fpkt->pkt_tran_type = FC_PKT_OUTBOUND; 5628 fpkt->pkt_timeout = FCP_ELS_TIMEOUT; 5629 fpkt->pkt_cmdlen = sizeof (la_els_prli_t); 5630 fpkt->pkt_rsplen = 0; 5631 fpkt->pkt_datalen = 0; 5632 5633 icmd->ipkt_opcode = LA_ELS_PRLI; 5634 5635 bzero(&prli, sizeof (struct la_els_prli)); 5636 fprli = (struct fcp_prli *)prli.service_params; 5637 prli.ls_code = LA_ELS_ACC; 5638 prli.page_length = 0x10; 5639 prli.payload_length = sizeof (struct la_els_prli); 5640 5641 /* fill in service params */ 5642 fprli->type = 0x08; 5643 fprli->resvd1 = 0; 5644 fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid; 5645 fprli->orig_process_associator = orig->orig_process_associator; 5646 fprli->resp_process_assoc_valid = 0; 5647 fprli->establish_image_pair = 1; 5648 fprli->resvd2 = 0; 5649 fprli->resvd3 = 0; 5650 fprli->obsolete_1 = 0; 5651 fprli->obsolete_2 = 0; 5652 fprli->data_overlay_allowed = 0; 5653 fprli->initiator_fn = 1; 5654 fprli->confirmed_compl_allowed = 1; 5655 5656 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) { 5657 fprli->target_fn = 1; 5658 } else { 5659 fprli->target_fn = 0; 5660 } 5661 5662 fprli->retry = 1; 5663 fprli->read_xfer_rdy_disabled = 1; 5664 fprli->write_xfer_rdy_disabled = 0; 5665 5666 /* save the unsol prli payload first */ 5667 FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp, 5668 fpkt->pkt_resp_acc, sizeof (struct la_els_prli)); 5669 5670 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd, 5671 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli)); 5672 5673 fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 5674 5675 mutex_enter(&pptr->port_mutex); 5676 if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) { 5677 int rval; 5678 mutex_exit(&pptr->port_mutex); 5679 5680 if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) != 5681 FC_SUCCESS) { 5682 if ((rval == FC_STATEC_BUSY || rval == FC_OFFLINE) && 5683 ptgt != NULL) { 5684 fcp_queue_ipkt(pptr, fpkt); 5685 return (FC_SUCCESS); 5686 } 5687 /* Let it timeout */ 5688 fcp_icmd_free(pptr, icmd); 5689 return (FC_FAILURE); 5690 } 5691 } else { 5692 mutex_exit(&pptr->port_mutex); 5693 fcp_icmd_free(pptr, icmd); 5694 return (FC_FAILURE); 5695 } 5696 5697 (void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token); 5698 5699 return (FC_SUCCESS); 5700 } 5701 5702 /* 5703 * Function: fcp_icmd_alloc 5704 * 5705 * Description: This function allocated a fcp_ipkt structure. The pkt_comp 5706 * field is initialized to fcp_icmd_callback. Sometimes it is 5707 * modified by the caller (such as fcp_send_scsi). The 5708 * structure is also tied to the state of the line and of the 5709 * target at a particular time. That link is established by 5710 * setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount 5711 * and tcount which came respectively from pptr->link_cnt and 5712 * ptgt->tgt_change_cnt. 5713 * 5714 * Argument: *pptr Fcp port. 5715 * *ptgt Target (destination of the command). 5716 * cmd_len Length of the command. 5717 * resp_len Length of the expected response. 5718 * data_len Length of the data. 5719 * nodma Indicates weither the command and response. 5720 * will be transfer through DMA or not. 5721 * lcount Link state change counter. 5722 * tcount Target state change counter. 5723 * cause Reason that lead to this call. 5724 * 5725 * Return Value: NULL Failed. 5726 * Not NULL Internal packet address. 5727 */ 5728 static struct fcp_ipkt * 5729 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len, 5730 int resp_len, int data_len, int nodma, int lcount, int tcount, int cause, 5731 uint32_t rscn_count) 5732 { 5733 int dma_setup = 0; 5734 fc_packet_t *fpkt; 5735 struct fcp_ipkt *icmd = NULL; 5736 5737 icmd = kmem_zalloc(sizeof (struct fcp_ipkt) + 5738 pptr->port_dmacookie_sz + pptr->port_priv_pkt_len, 5739 KM_NOSLEEP); 5740 if (icmd == NULL) { 5741 fcp_log(CE_WARN, pptr->port_dip, 5742 "!internal packet allocation failed"); 5743 return (NULL); 5744 } 5745 5746 /* 5747 * initialize the allocated packet 5748 */ 5749 icmd->ipkt_nodma = nodma; 5750 icmd->ipkt_next = icmd->ipkt_prev = NULL; 5751 icmd->ipkt_lun = NULL; 5752 5753 icmd->ipkt_link_cnt = lcount; 5754 icmd->ipkt_change_cnt = tcount; 5755 icmd->ipkt_cause = cause; 5756 5757 mutex_enter(&pptr->port_mutex); 5758 icmd->ipkt_port = pptr; 5759 mutex_exit(&pptr->port_mutex); 5760 5761 /* keep track of amt of data to be sent in pkt */ 5762 icmd->ipkt_cmdlen = cmd_len; 5763 icmd->ipkt_resplen = resp_len; 5764 icmd->ipkt_datalen = data_len; 5765 5766 /* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */ 5767 icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet); 5768 5769 /* set pkt's private ptr to point to cmd pkt */ 5770 icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd; 5771 5772 /* set FCA private ptr to memory just beyond */ 5773 icmd->ipkt_fpkt->pkt_fca_private = (opaque_t) 5774 ((char *)icmd + sizeof (struct fcp_ipkt) + 5775 pptr->port_dmacookie_sz); 5776 5777 /* get ptr to fpkt substruct and fill it in */ 5778 fpkt = icmd->ipkt_fpkt; 5779 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd + 5780 sizeof (struct fcp_ipkt)); 5781 5782 if (ptgt != NULL) { 5783 icmd->ipkt_tgt = ptgt; 5784 fpkt->pkt_fca_device = ptgt->tgt_fca_dev; 5785 } 5786 5787 fpkt->pkt_comp = fcp_icmd_callback; 5788 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR); 5789 fpkt->pkt_cmdlen = cmd_len; 5790 fpkt->pkt_rsplen = resp_len; 5791 fpkt->pkt_datalen = data_len; 5792 5793 /* 5794 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the 5795 * rscn_count as fcp knows down to the transport. If a valid count was 5796 * passed into this function, we allocate memory to actually pass down 5797 * this info. 5798 * 5799 * BTW, if the kmem_zalloc fails, we won't try too hard. This will 5800 * basically mean that fcp will not be able to help transport 5801 * distinguish if a new RSCN has come after fcp was last informed about 5802 * it. In such cases, it might lead to the problem mentioned in CR/bug # 5803 * 5068068 where the device might end up going offline in case of RSCN 5804 * storms. 5805 */ 5806 fpkt->pkt_ulp_rscn_infop = NULL; 5807 if (rscn_count != FC_INVALID_RSCN_COUNT) { 5808 fpkt->pkt_ulp_rscn_infop = kmem_zalloc( 5809 sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP); 5810 if (fpkt->pkt_ulp_rscn_infop == NULL) { 5811 FCP_TRACE(fcp_logq, pptr->port_instbuf, 5812 fcp_trace, FCP_BUF_LEVEL_6, 0, 5813 "Failed to alloc memory to pass rscn info"); 5814 } 5815 } 5816 5817 if (fpkt->pkt_ulp_rscn_infop != NULL) { 5818 fc_ulp_rscn_info_t *rscnp; 5819 5820 rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop; 5821 rscnp->ulp_rscn_count = rscn_count; 5822 } 5823 5824 if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) { 5825 goto fail; 5826 } 5827 dma_setup++; 5828 5829 /* 5830 * Must hold target mutex across setting of pkt_pd and call to 5831 * fc_ulp_init_packet to ensure the handle to the target doesn't go 5832 * away while we're not looking. 5833 */ 5834 if (ptgt != NULL) { 5835 mutex_enter(&ptgt->tgt_mutex); 5836 fpkt->pkt_pd = ptgt->tgt_pd_handle; 5837 5838 /* ask transport to do its initialization on this pkt */ 5839 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP) 5840 != FC_SUCCESS) { 5841 FCP_TRACE(fcp_logq, pptr->port_instbuf, 5842 fcp_trace, FCP_BUF_LEVEL_6, 0, 5843 "fc_ulp_init_packet failed"); 5844 mutex_exit(&ptgt->tgt_mutex); 5845 goto fail; 5846 } 5847 mutex_exit(&ptgt->tgt_mutex); 5848 } else { 5849 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP) 5850 != FC_SUCCESS) { 5851 FCP_TRACE(fcp_logq, pptr->port_instbuf, 5852 fcp_trace, FCP_BUF_LEVEL_6, 0, 5853 "fc_ulp_init_packet failed"); 5854 goto fail; 5855 } 5856 } 5857 5858 mutex_enter(&pptr->port_mutex); 5859 if (pptr->port_state & (FCP_STATE_DETACHING | 5860 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) { 5861 int rval; 5862 5863 mutex_exit(&pptr->port_mutex); 5864 5865 rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt); 5866 ASSERT(rval == FC_SUCCESS); 5867 5868 goto fail; 5869 } 5870 5871 if (ptgt != NULL) { 5872 mutex_enter(&ptgt->tgt_mutex); 5873 ptgt->tgt_ipkt_cnt++; 5874 mutex_exit(&ptgt->tgt_mutex); 5875 } 5876 5877 pptr->port_ipkt_cnt++; 5878 5879 mutex_exit(&pptr->port_mutex); 5880 5881 return (icmd); 5882 5883 fail: 5884 if (fpkt->pkt_ulp_rscn_infop != NULL) { 5885 kmem_free(fpkt->pkt_ulp_rscn_infop, 5886 sizeof (fc_ulp_rscn_info_t)); 5887 fpkt->pkt_ulp_rscn_infop = NULL; 5888 } 5889 5890 if (dma_setup) { 5891 fcp_free_dma(pptr, icmd); 5892 } 5893 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len + 5894 (size_t)pptr->port_dmacookie_sz); 5895 5896 return (NULL); 5897 } 5898 5899 /* 5900 * Function: fcp_icmd_free 5901 * 5902 * Description: Frees the internal command passed by the caller. 5903 * 5904 * Argument: *pptr Fcp port. 5905 * *icmd Internal packet to free. 5906 * 5907 * Return Value: None 5908 */ 5909 static void 5910 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd) 5911 { 5912 struct fcp_tgt *ptgt = icmd->ipkt_tgt; 5913 5914 /* Let the underlying layers do their cleanup. */ 5915 (void) fc_ulp_uninit_packet(pptr->port_fp_handle, 5916 icmd->ipkt_fpkt); 5917 5918 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) { 5919 kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop, 5920 sizeof (fc_ulp_rscn_info_t)); 5921 } 5922 5923 fcp_free_dma(pptr, icmd); 5924 5925 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len + 5926 (size_t)pptr->port_dmacookie_sz); 5927 5928 mutex_enter(&pptr->port_mutex); 5929 5930 if (ptgt) { 5931 mutex_enter(&ptgt->tgt_mutex); 5932 ptgt->tgt_ipkt_cnt--; 5933 mutex_exit(&ptgt->tgt_mutex); 5934 } 5935 5936 pptr->port_ipkt_cnt--; 5937 mutex_exit(&pptr->port_mutex); 5938 } 5939 5940 /* 5941 * Function: fcp_alloc_dma 5942 * 5943 * Description: Allocated the DMA resources required for the internal 5944 * packet. 5945 * 5946 * Argument: *pptr FCP port. 5947 * *icmd Internal FCP packet. 5948 * nodma Indicates if the Cmd and Resp will be DMAed. 5949 * flags Allocation flags (Sleep or NoSleep). 5950 * 5951 * Return Value: FC_SUCCESS 5952 * FC_NOMEM 5953 */ 5954 static int 5955 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd, 5956 int nodma, int flags) 5957 { 5958 int rval; 5959 size_t real_size; 5960 uint_t ccount; 5961 int bound = 0; 5962 int cmd_resp = 0; 5963 fc_packet_t *fpkt; 5964 ddi_dma_cookie_t pkt_data_cookie; 5965 ddi_dma_cookie_t *cp; 5966 uint32_t cnt; 5967 5968 fpkt = &icmd->ipkt_fc_packet; 5969 5970 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL && 5971 fpkt->pkt_resp_dma == NULL); 5972 5973 icmd->ipkt_nodma = nodma; 5974 5975 if (nodma) { 5976 fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags); 5977 if (fpkt->pkt_cmd == NULL) { 5978 goto fail; 5979 } 5980 5981 fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags); 5982 if (fpkt->pkt_resp == NULL) { 5983 goto fail; 5984 } 5985 } else { 5986 ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen); 5987 5988 rval = fcp_alloc_cmd_resp(pptr, fpkt, flags); 5989 if (rval == FC_FAILURE) { 5990 ASSERT(fpkt->pkt_cmd_dma == NULL && 5991 fpkt->pkt_resp_dma == NULL); 5992 goto fail; 5993 } 5994 cmd_resp++; 5995 } 5996 5997 if ((fpkt->pkt_datalen != 0) && 5998 !(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) { 5999 /* 6000 * set up DMA handle and memory for the data in this packet 6001 */ 6002 if (ddi_dma_alloc_handle(pptr->port_dip, 6003 &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT, 6004 NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) { 6005 goto fail; 6006 } 6007 6008 if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen, 6009 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, 6010 DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data, 6011 &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) { 6012 goto fail; 6013 } 6014 6015 /* was DMA mem size gotten < size asked for/needed ?? */ 6016 if (real_size < fpkt->pkt_datalen) { 6017 goto fail; 6018 } 6019 6020 /* bind DMA address and handle together */ 6021 if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma, 6022 NULL, fpkt->pkt_data, real_size, DDI_DMA_READ | 6023 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, 6024 &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) { 6025 goto fail; 6026 } 6027 bound++; 6028 6029 if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) { 6030 goto fail; 6031 } 6032 6033 fpkt->pkt_data_cookie_cnt = ccount; 6034 6035 cp = fpkt->pkt_data_cookie; 6036 *cp = pkt_data_cookie; 6037 cp++; 6038 6039 for (cnt = 1; cnt < ccount; cnt++, cp++) { 6040 ddi_dma_nextcookie(fpkt->pkt_data_dma, 6041 &pkt_data_cookie); 6042 *cp = pkt_data_cookie; 6043 } 6044 6045 } else if (fpkt->pkt_datalen != 0) { 6046 /* 6047 * If it's a pseudo FCA, then it can't support DMA even in 6048 * SCSI data phase. 6049 */ 6050 fpkt->pkt_data = kmem_alloc(fpkt->pkt_datalen, flags); 6051 if (fpkt->pkt_data == NULL) { 6052 goto fail; 6053 } 6054 6055 } 6056 6057 return (FC_SUCCESS); 6058 6059 fail: 6060 if (bound) { 6061 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma); 6062 } 6063 6064 if (fpkt->pkt_data_dma) { 6065 if (fpkt->pkt_data) { 6066 ddi_dma_mem_free(&fpkt->pkt_data_acc); 6067 } 6068 ddi_dma_free_handle(&fpkt->pkt_data_dma); 6069 } else { 6070 if (fpkt->pkt_data) { 6071 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen); 6072 } 6073 } 6074 6075 if (nodma) { 6076 if (fpkt->pkt_cmd) { 6077 kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen); 6078 } 6079 if (fpkt->pkt_resp) { 6080 kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen); 6081 } 6082 } else { 6083 if (cmd_resp) { 6084 fcp_free_cmd_resp(pptr, fpkt); 6085 } 6086 } 6087 6088 return (FC_NOMEM); 6089 } 6090 6091 6092 static void 6093 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd) 6094 { 6095 fc_packet_t *fpkt = icmd->ipkt_fpkt; 6096 6097 if (fpkt->pkt_data_dma) { 6098 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma); 6099 if (fpkt->pkt_data) { 6100 ddi_dma_mem_free(&fpkt->pkt_data_acc); 6101 } 6102 ddi_dma_free_handle(&fpkt->pkt_data_dma); 6103 } else { 6104 if (fpkt->pkt_data) { 6105 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen); 6106 } 6107 /* 6108 * Need we reset pkt_* to zero??? 6109 */ 6110 } 6111 6112 if (icmd->ipkt_nodma) { 6113 if (fpkt->pkt_cmd) { 6114 kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen); 6115 } 6116 if (fpkt->pkt_resp) { 6117 kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen); 6118 } 6119 } else { 6120 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL); 6121 6122 fcp_free_cmd_resp(pptr, fpkt); 6123 } 6124 } 6125 6126 /* 6127 * Function: fcp_lookup_target 6128 * 6129 * Description: Finds a target given a WWN. 6130 * 6131 * Argument: *pptr FCP port. 6132 * *wwn World Wide Name of the device to look for. 6133 * 6134 * Return Value: NULL No target found 6135 * Not NULL Target structure 6136 * 6137 * Context: Interrupt context. 6138 * The mutex pptr->port_mutex must be owned. 6139 */ 6140 /* ARGSUSED */ 6141 static struct fcp_tgt * 6142 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn) 6143 { 6144 int hash; 6145 struct fcp_tgt *ptgt; 6146 6147 ASSERT(mutex_owned(&pptr->port_mutex)); 6148 6149 hash = FCP_HASH(wwn); 6150 6151 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL; 6152 ptgt = ptgt->tgt_next) { 6153 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) && 6154 bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0], 6155 sizeof (ptgt->tgt_port_wwn)) == 0) { 6156 break; 6157 } 6158 } 6159 6160 return (ptgt); 6161 } 6162 6163 6164 /* 6165 * Find target structure given a port identifier 6166 */ 6167 static struct fcp_tgt * 6168 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id) 6169 { 6170 fc_portid_t port_id; 6171 la_wwn_t pwwn; 6172 struct fcp_tgt *ptgt = NULL; 6173 6174 port_id.priv_lilp_posit = 0; 6175 port_id.port_id = d_id; 6176 if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id, 6177 &pwwn) == FC_SUCCESS) { 6178 mutex_enter(&pptr->port_mutex); 6179 ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn); 6180 mutex_exit(&pptr->port_mutex); 6181 } 6182 6183 return (ptgt); 6184 } 6185 6186 6187 /* 6188 * the packet completion callback routine for info cmd pkts 6189 * 6190 * this means fpkt pts to a response to either a PLOGI or a PRLI 6191 * 6192 * if there is an error an attempt is made to call a routine to resend 6193 * the command that failed 6194 */ 6195 static void 6196 fcp_icmd_callback(fc_packet_t *fpkt) 6197 { 6198 struct fcp_ipkt *icmd; 6199 struct fcp_port *pptr; 6200 struct fcp_tgt *ptgt; 6201 struct la_els_prli *prli; 6202 struct la_els_prli prli_s; 6203 struct fcp_prli *fprli; 6204 struct fcp_lun *plun; 6205 int free_pkt = 1; 6206 int rval; 6207 ls_code_t resp; 6208 uchar_t prli_acc = 0; 6209 uint32_t rscn_count = FC_INVALID_RSCN_COUNT; 6210 int lun0_newalloc; 6211 6212 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private; 6213 6214 /* get ptrs to the port and target structs for the cmd */ 6215 pptr = icmd->ipkt_port; 6216 ptgt = icmd->ipkt_tgt; 6217 6218 FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp)); 6219 6220 if (icmd->ipkt_opcode == LA_ELS_PRLI) { 6221 FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc, 6222 sizeof (prli_s)); 6223 prli_acc = (prli_s.ls_code == LA_ELS_ACC); 6224 } 6225 6226 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6227 fcp_trace, FCP_BUF_LEVEL_2, 0, 6228 "ELS (%x) callback state=0x%x reason=0x%x for %x", 6229 icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason, 6230 ptgt->tgt_d_id); 6231 6232 if ((fpkt->pkt_state == FC_PKT_SUCCESS) && 6233 ((resp.ls_code == LA_ELS_ACC) || prli_acc)) { 6234 6235 mutex_enter(&ptgt->tgt_mutex); 6236 if (ptgt->tgt_pd_handle == NULL) { 6237 /* 6238 * in a fabric environment the port device handles 6239 * get created only after successful LOGIN into the 6240 * transport, so the transport makes this port 6241 * device (pd) handle available in this packet, so 6242 * save it now 6243 */ 6244 ASSERT(fpkt->pkt_pd != NULL); 6245 ptgt->tgt_pd_handle = fpkt->pkt_pd; 6246 } 6247 mutex_exit(&ptgt->tgt_mutex); 6248 6249 /* which ELS cmd is this response for ?? */ 6250 switch (icmd->ipkt_opcode) { 6251 case LA_ELS_PLOGI: 6252 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6253 fcp_trace, FCP_BUF_LEVEL_5, 0, 6254 "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x", 6255 ptgt->tgt_d_id, 6256 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]), 6257 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4])); 6258 6259 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, 6260 FCP_TGT_TRACE_15); 6261 6262 /* Note that we are not allocating a new icmd */ 6263 if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI, 6264 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 6265 icmd->ipkt_cause) != DDI_SUCCESS) { 6266 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, 6267 FCP_TGT_TRACE_16); 6268 goto fail; 6269 } 6270 break; 6271 6272 case LA_ELS_PRLI: 6273 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6274 fcp_trace, FCP_BUF_LEVEL_5, 0, 6275 "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id); 6276 6277 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, 6278 FCP_TGT_TRACE_17); 6279 6280 prli = &prli_s; 6281 6282 FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc, 6283 sizeof (prli_s)); 6284 6285 fprli = (struct fcp_prli *)prli->service_params; 6286 6287 mutex_enter(&ptgt->tgt_mutex); 6288 ptgt->tgt_icap = fprli->initiator_fn; 6289 ptgt->tgt_tcap = fprli->target_fn; 6290 mutex_exit(&ptgt->tgt_mutex); 6291 6292 if ((fprli->type != 0x08) || (fprli->target_fn != 1)) { 6293 /* 6294 * this FCP device does not support target mode 6295 */ 6296 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, 6297 FCP_TGT_TRACE_18); 6298 goto fail; 6299 } 6300 if (fprli->retry == 1) { 6301 fc_ulp_disable_relogin(pptr->port_fp_handle, 6302 &ptgt->tgt_port_wwn); 6303 } 6304 6305 /* target is no longer offline */ 6306 mutex_enter(&pptr->port_mutex); 6307 mutex_enter(&ptgt->tgt_mutex); 6308 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) { 6309 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | 6310 FCP_TGT_MARK); 6311 } else { 6312 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6313 fcp_trace, FCP_BUF_LEVEL_2, 0, 6314 "fcp_icmd_callback,1: state change " 6315 " occured for D_ID=0x%x", ptgt->tgt_d_id); 6316 mutex_exit(&ptgt->tgt_mutex); 6317 mutex_exit(&pptr->port_mutex); 6318 goto fail; 6319 } 6320 mutex_exit(&ptgt->tgt_mutex); 6321 mutex_exit(&pptr->port_mutex); 6322 6323 /* 6324 * lun 0 should always respond to inquiry, so 6325 * get the LUN struct for LUN 0 6326 * 6327 * Currently we deal with first level of addressing. 6328 * If / when we start supporting 0x device types 6329 * (DTYPE_ARRAY_CTRL, i.e. array controllers) 6330 * this logic will need revisiting. 6331 */ 6332 lun0_newalloc = 0; 6333 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) { 6334 /* 6335 * no LUN struct for LUN 0 yet exists, 6336 * so create one 6337 */ 6338 plun = fcp_alloc_lun(ptgt); 6339 if (plun == NULL) { 6340 fcp_log(CE_WARN, pptr->port_dip, 6341 "!Failed to allocate lun 0 for" 6342 " D_ID=%x", ptgt->tgt_d_id); 6343 goto fail; 6344 } 6345 lun0_newalloc = 1; 6346 } 6347 6348 /* fill in LUN info */ 6349 mutex_enter(&ptgt->tgt_mutex); 6350 /* 6351 * consider lun 0 as device not connected if it is 6352 * offlined or newly allocated 6353 */ 6354 if ((plun->lun_state & FCP_LUN_OFFLINE) || 6355 lun0_newalloc) { 6356 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED; 6357 } 6358 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK); 6359 plun->lun_state &= ~FCP_LUN_OFFLINE; 6360 ptgt->tgt_lun_cnt = 1; 6361 ptgt->tgt_report_lun_cnt = 0; 6362 mutex_exit(&ptgt->tgt_mutex); 6363 6364 /* Retrieve the rscn count (if a valid one exists) */ 6365 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) { 6366 rscn_count = ((fc_ulp_rscn_info_t *) 6367 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop)) 6368 ->ulp_rscn_count; 6369 } else { 6370 rscn_count = FC_INVALID_RSCN_COUNT; 6371 } 6372 6373 /* send Report Lun request to target */ 6374 if (fcp_send_scsi(plun, SCMD_REPORT_LUN, 6375 sizeof (struct fcp_reportlun_resp), 6376 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 6377 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) { 6378 mutex_enter(&pptr->port_mutex); 6379 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) { 6380 fcp_log(CE_WARN, pptr->port_dip, 6381 "!Failed to send REPORT LUN to" 6382 " D_ID=%x", ptgt->tgt_d_id); 6383 } else { 6384 FCP_TRACE(fcp_logq, 6385 pptr->port_instbuf, fcp_trace, 6386 FCP_BUF_LEVEL_5, 0, 6387 "fcp_icmd_callback,2:state change" 6388 " occured for D_ID=0x%x", 6389 ptgt->tgt_d_id); 6390 } 6391 mutex_exit(&pptr->port_mutex); 6392 6393 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, 6394 FCP_TGT_TRACE_19); 6395 6396 goto fail; 6397 } else { 6398 free_pkt = 0; 6399 fcp_icmd_free(pptr, icmd); 6400 } 6401 break; 6402 6403 default: 6404 fcp_log(CE_WARN, pptr->port_dip, 6405 "!fcp_icmd_callback Invalid opcode"); 6406 goto fail; 6407 } 6408 6409 return; 6410 } 6411 6412 6413 /* 6414 * Other PLOGI failures are not retried as the 6415 * transport does it already 6416 */ 6417 if (icmd->ipkt_opcode != LA_ELS_PLOGI) { 6418 if (fcp_is_retryable(icmd) && 6419 icmd->ipkt_retries++ < FCP_MAX_RETRIES) { 6420 6421 if (FCP_MUST_RETRY(fpkt)) { 6422 fcp_queue_ipkt(pptr, fpkt); 6423 return; 6424 } 6425 6426 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6427 fcp_trace, FCP_BUF_LEVEL_2, 0, 6428 "ELS PRLI is retried for d_id=0x%x, state=%x," 6429 " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state, 6430 fpkt->pkt_reason); 6431 6432 /* 6433 * Retry by recalling the routine that 6434 * originally queued this packet 6435 */ 6436 mutex_enter(&pptr->port_mutex); 6437 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) { 6438 caddr_t msg; 6439 6440 mutex_exit(&pptr->port_mutex); 6441 6442 ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI); 6443 6444 if (fpkt->pkt_state == FC_PKT_TIMEOUT) { 6445 fpkt->pkt_timeout += 6446 FCP_TIMEOUT_DELTA; 6447 } 6448 6449 rval = fc_ulp_issue_els(pptr->port_fp_handle, 6450 fpkt); 6451 if (rval == FC_SUCCESS) { 6452 return; 6453 } 6454 6455 if (rval == FC_STATEC_BUSY || 6456 rval == FC_OFFLINE) { 6457 fcp_queue_ipkt(pptr, fpkt); 6458 return; 6459 } 6460 (void) fc_ulp_error(rval, &msg); 6461 6462 fcp_log(CE_NOTE, pptr->port_dip, 6463 "!ELS 0x%x failed to d_id=0x%x;" 6464 " %s", icmd->ipkt_opcode, 6465 ptgt->tgt_d_id, msg); 6466 } else { 6467 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6468 fcp_trace, FCP_BUF_LEVEL_2, 0, 6469 "fcp_icmd_callback,3: state change " 6470 " occured for D_ID=0x%x", ptgt->tgt_d_id); 6471 mutex_exit(&pptr->port_mutex); 6472 } 6473 } 6474 } else { 6475 if (fcp_is_retryable(icmd) && 6476 icmd->ipkt_retries++ < FCP_MAX_RETRIES) { 6477 if (FCP_MUST_RETRY(fpkt)) { 6478 fcp_queue_ipkt(pptr, fpkt); 6479 return; 6480 } 6481 } 6482 mutex_enter(&pptr->port_mutex); 6483 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) && 6484 fpkt->pkt_state != FC_PKT_PORT_OFFLINE) { 6485 mutex_exit(&pptr->port_mutex); 6486 fcp_print_error(fpkt); 6487 } else { 6488 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6489 fcp_trace, FCP_BUF_LEVEL_2, 0, 6490 "fcp_icmd_callback,4: state change occured" 6491 " for D_ID=0x%x", ptgt->tgt_d_id); 6492 mutex_exit(&pptr->port_mutex); 6493 } 6494 } 6495 6496 fail: 6497 if (free_pkt) { 6498 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 6499 icmd->ipkt_change_cnt, icmd->ipkt_cause); 6500 fcp_icmd_free(pptr, icmd); 6501 } 6502 } 6503 6504 6505 /* 6506 * called internally to send an info cmd using the transport 6507 * 6508 * sends either an INQ or a REPORT_LUN 6509 * 6510 * when the packet is completed fcp_scsi_callback is called 6511 */ 6512 static int 6513 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len, 6514 int lcount, int tcount, int cause, uint32_t rscn_count) 6515 { 6516 int nodma; 6517 struct fcp_ipkt *icmd; 6518 struct fcp_tgt *ptgt; 6519 struct fcp_port *pptr; 6520 fc_frame_hdr_t *hp; 6521 fc_packet_t *fpkt; 6522 struct fcp_cmd fcp_cmd; 6523 struct fcp_cmd *fcmd; 6524 union scsi_cdb *scsi_cdb; 6525 6526 ASSERT(plun != NULL); 6527 6528 ptgt = plun->lun_tgt; 6529 ASSERT(ptgt != NULL); 6530 6531 pptr = ptgt->tgt_port; 6532 ASSERT(pptr != NULL); 6533 6534 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6535 fcp_trace, FCP_BUF_LEVEL_5, 0, 6536 "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode); 6537 6538 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0; 6539 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd), 6540 FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause, 6541 rscn_count); 6542 6543 if (icmd == NULL) { 6544 return (DDI_FAILURE); 6545 } 6546 6547 fpkt = icmd->ipkt_fpkt; 6548 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR; 6549 icmd->ipkt_retries = 0; 6550 icmd->ipkt_opcode = opcode; 6551 icmd->ipkt_lun = plun; 6552 6553 if (nodma) { 6554 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd; 6555 } else { 6556 fcmd = &fcp_cmd; 6557 } 6558 bzero(fcmd, sizeof (struct fcp_cmd)); 6559 6560 fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT; 6561 6562 hp = &fpkt->pkt_cmd_fhdr; 6563 6564 hp->s_id = pptr->port_id; 6565 hp->d_id = ptgt->tgt_d_id; 6566 hp->r_ctl = R_CTL_COMMAND; 6567 hp->type = FC_TYPE_SCSI_FCP; 6568 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 6569 hp->rsvd = 0; 6570 hp->seq_id = 0; 6571 hp->seq_cnt = 0; 6572 hp->ox_id = 0xffff; 6573 hp->rx_id = 0xffff; 6574 hp->ro = 0; 6575 6576 bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE); 6577 6578 /* 6579 * Request SCSI target for expedited processing 6580 */ 6581 6582 /* 6583 * Set up for untagged queuing because we do not 6584 * know if the fibre device supports queuing. 6585 */ 6586 fcmd->fcp_cntl.cntl_reserved_0 = 0; 6587 fcmd->fcp_cntl.cntl_reserved_1 = 0; 6588 fcmd->fcp_cntl.cntl_reserved_2 = 0; 6589 fcmd->fcp_cntl.cntl_reserved_3 = 0; 6590 fcmd->fcp_cntl.cntl_reserved_4 = 0; 6591 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED; 6592 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb; 6593 6594 switch (opcode) { 6595 case SCMD_INQUIRY_PAGE83: 6596 /* 6597 * Prepare to get the Inquiry VPD page 83 information 6598 */ 6599 fcmd->fcp_cntl.cntl_read_data = 1; 6600 fcmd->fcp_cntl.cntl_write_data = 0; 6601 fcmd->fcp_data_len = alloc_len; 6602 6603 fpkt->pkt_tran_type = FC_PKT_FCP_READ; 6604 fpkt->pkt_comp = fcp_scsi_callback; 6605 6606 scsi_cdb->scc_cmd = SCMD_INQUIRY; 6607 scsi_cdb->g0_addr2 = 0x01; 6608 scsi_cdb->g0_addr1 = 0x83; 6609 scsi_cdb->g0_count0 = (uchar_t)alloc_len; 6610 break; 6611 6612 case SCMD_INQUIRY: 6613 fcmd->fcp_cntl.cntl_read_data = 1; 6614 fcmd->fcp_cntl.cntl_write_data = 0; 6615 fcmd->fcp_data_len = alloc_len; 6616 6617 fpkt->pkt_tran_type = FC_PKT_FCP_READ; 6618 fpkt->pkt_comp = fcp_scsi_callback; 6619 6620 scsi_cdb->scc_cmd = SCMD_INQUIRY; 6621 scsi_cdb->g0_count0 = SUN_INQSIZE; 6622 break; 6623 6624 case SCMD_REPORT_LUN: { 6625 fc_portid_t d_id; 6626 opaque_t fca_dev; 6627 6628 ASSERT(alloc_len >= 16); 6629 6630 d_id.priv_lilp_posit = 0; 6631 d_id.port_id = ptgt->tgt_d_id; 6632 6633 fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id); 6634 6635 mutex_enter(&ptgt->tgt_mutex); 6636 ptgt->tgt_fca_dev = fca_dev; 6637 mutex_exit(&ptgt->tgt_mutex); 6638 6639 fcmd->fcp_cntl.cntl_read_data = 1; 6640 fcmd->fcp_cntl.cntl_write_data = 0; 6641 fcmd->fcp_data_len = alloc_len; 6642 6643 fpkt->pkt_tran_type = FC_PKT_FCP_READ; 6644 fpkt->pkt_comp = fcp_scsi_callback; 6645 6646 scsi_cdb->scc_cmd = SCMD_REPORT_LUN; 6647 scsi_cdb->scc5_count0 = alloc_len & 0xff; 6648 scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff; 6649 scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff; 6650 scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff; 6651 break; 6652 } 6653 6654 default: 6655 fcp_log(CE_WARN, pptr->port_dip, 6656 "!fcp_send_scsi Invalid opcode"); 6657 break; 6658 } 6659 6660 if (!nodma) { 6661 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd, 6662 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd)); 6663 } 6664 6665 mutex_enter(&pptr->port_mutex); 6666 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) { 6667 6668 mutex_exit(&pptr->port_mutex); 6669 if (fcp_transport(pptr->port_fp_handle, fpkt, 1) != 6670 FC_SUCCESS) { 6671 fcp_icmd_free(pptr, icmd); 6672 return (DDI_FAILURE); 6673 } 6674 return (DDI_SUCCESS); 6675 } else { 6676 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6677 fcp_trace, FCP_BUF_LEVEL_2, 0, 6678 "fcp_send_scsi,1: state change occured" 6679 " for D_ID=0x%x", ptgt->tgt_d_id); 6680 mutex_exit(&pptr->port_mutex); 6681 fcp_icmd_free(pptr, icmd); 6682 return (DDI_FAILURE); 6683 } 6684 } 6685 6686 6687 /* 6688 * called by fcp_scsi_callback to check to handle the case where 6689 * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION 6690 */ 6691 static int 6692 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt) 6693 { 6694 uchar_t rqlen; 6695 int rval = DDI_FAILURE; 6696 struct scsi_extended_sense sense_info, *sense; 6697 struct fcp_ipkt *icmd = (struct fcp_ipkt *) 6698 fpkt->pkt_ulp_private; 6699 struct fcp_tgt *ptgt = icmd->ipkt_tgt; 6700 struct fcp_port *pptr = ptgt->tgt_port; 6701 6702 ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN); 6703 6704 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) { 6705 /* 6706 * SCSI-II Reserve Release support. Some older FC drives return 6707 * Reservation conflict for Report Luns command. 6708 */ 6709 if (icmd->ipkt_nodma) { 6710 rsp->fcp_u.fcp_status.rsp_len_set = 0; 6711 rsp->fcp_u.fcp_status.sense_len_set = 0; 6712 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD; 6713 } else { 6714 fcp_rsp_t new_resp; 6715 6716 FCP_CP_IN(fpkt->pkt_resp, &new_resp, 6717 fpkt->pkt_resp_acc, sizeof (new_resp)); 6718 6719 new_resp.fcp_u.fcp_status.rsp_len_set = 0; 6720 new_resp.fcp_u.fcp_status.sense_len_set = 0; 6721 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD; 6722 6723 FCP_CP_OUT(&new_resp, fpkt->pkt_resp, 6724 fpkt->pkt_resp_acc, sizeof (new_resp)); 6725 } 6726 6727 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data, 6728 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun)); 6729 6730 return (DDI_SUCCESS); 6731 } 6732 6733 sense = &sense_info; 6734 if (!rsp->fcp_u.fcp_status.sense_len_set) { 6735 /* no need to continue if sense length is not set */ 6736 return (rval); 6737 } 6738 6739 /* casting 64-bit integer to 8-bit */ 6740 rqlen = (uchar_t)min(rsp->fcp_sense_len, 6741 sizeof (struct scsi_extended_sense)); 6742 6743 if (rqlen < 14) { 6744 /* no need to continue if request length isn't long enough */ 6745 return (rval); 6746 } 6747 6748 if (icmd->ipkt_nodma) { 6749 /* 6750 * We can safely use fcp_response_len here since the 6751 * only path that calls fcp_check_reportlun, 6752 * fcp_scsi_callback, has already called 6753 * fcp_validate_fcp_response. 6754 */ 6755 sense = (struct scsi_extended_sense *)(fpkt->pkt_resp + 6756 sizeof (struct fcp_rsp) + rsp->fcp_response_len); 6757 } else { 6758 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) + 6759 rsp->fcp_response_len, sense, fpkt->pkt_resp_acc, 6760 sizeof (struct scsi_extended_sense)); 6761 } 6762 6763 if (!FCP_SENSE_NO_LUN(sense)) { 6764 mutex_enter(&ptgt->tgt_mutex); 6765 /* clear the flag if any */ 6766 ptgt->tgt_state &= ~FCP_TGT_ILLREQ; 6767 mutex_exit(&ptgt->tgt_mutex); 6768 } 6769 6770 if ((sense->es_key == KEY_ILLEGAL_REQUEST) && 6771 (sense->es_add_code == 0x20)) { 6772 if (icmd->ipkt_nodma) { 6773 rsp->fcp_u.fcp_status.rsp_len_set = 0; 6774 rsp->fcp_u.fcp_status.sense_len_set = 0; 6775 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD; 6776 } else { 6777 fcp_rsp_t new_resp; 6778 6779 FCP_CP_IN(fpkt->pkt_resp, &new_resp, 6780 fpkt->pkt_resp_acc, sizeof (new_resp)); 6781 6782 new_resp.fcp_u.fcp_status.rsp_len_set = 0; 6783 new_resp.fcp_u.fcp_status.sense_len_set = 0; 6784 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD; 6785 6786 FCP_CP_OUT(&new_resp, fpkt->pkt_resp, 6787 fpkt->pkt_resp_acc, sizeof (new_resp)); 6788 } 6789 6790 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data, 6791 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun)); 6792 6793 return (DDI_SUCCESS); 6794 } 6795 6796 /* 6797 * This is for the STK library which returns a check condition, 6798 * to indicate device is not ready, manual assistance needed. 6799 * This is to a report lun command when the door is open. 6800 */ 6801 if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) { 6802 if (icmd->ipkt_nodma) { 6803 rsp->fcp_u.fcp_status.rsp_len_set = 0; 6804 rsp->fcp_u.fcp_status.sense_len_set = 0; 6805 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD; 6806 } else { 6807 fcp_rsp_t new_resp; 6808 6809 FCP_CP_IN(fpkt->pkt_resp, &new_resp, 6810 fpkt->pkt_resp_acc, sizeof (new_resp)); 6811 6812 new_resp.fcp_u.fcp_status.rsp_len_set = 0; 6813 new_resp.fcp_u.fcp_status.sense_len_set = 0; 6814 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD; 6815 6816 FCP_CP_OUT(&new_resp, fpkt->pkt_resp, 6817 fpkt->pkt_resp_acc, sizeof (new_resp)); 6818 } 6819 6820 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data, 6821 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun)); 6822 6823 return (DDI_SUCCESS); 6824 } 6825 6826 if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) || 6827 (FCP_SENSE_NO_LUN(sense))) { 6828 mutex_enter(&ptgt->tgt_mutex); 6829 if ((FCP_SENSE_NO_LUN(sense)) && 6830 (ptgt->tgt_state & FCP_TGT_ILLREQ)) { 6831 ptgt->tgt_state &= ~FCP_TGT_ILLREQ; 6832 mutex_exit(&ptgt->tgt_mutex); 6833 /* 6834 * reconfig was triggred by ILLEGAL REQUEST but 6835 * got ILLEGAL REQUEST again 6836 */ 6837 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6838 fcp_trace, FCP_BUF_LEVEL_3, 0, 6839 "!FCP: Unable to obtain Report Lun data" 6840 " target=%x", ptgt->tgt_d_id); 6841 } else { 6842 if (ptgt->tgt_tid == NULL) { 6843 timeout_id_t tid; 6844 /* 6845 * REPORT LUN data has changed. Kick off 6846 * rediscovery 6847 */ 6848 tid = timeout(fcp_reconfigure_luns, 6849 (caddr_t)ptgt, (clock_t)drv_usectohz(1)); 6850 6851 ptgt->tgt_tid = tid; 6852 ptgt->tgt_state |= FCP_TGT_BUSY; 6853 } 6854 if (FCP_SENSE_NO_LUN(sense)) { 6855 ptgt->tgt_state |= FCP_TGT_ILLREQ; 6856 } 6857 mutex_exit(&ptgt->tgt_mutex); 6858 if (FCP_SENSE_REPORTLUN_CHANGED(sense)) { 6859 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6860 fcp_trace, FCP_BUF_LEVEL_3, 0, 6861 "!FCP:Report Lun Has Changed" 6862 " target=%x", ptgt->tgt_d_id); 6863 } else if (FCP_SENSE_NO_LUN(sense)) { 6864 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6865 fcp_trace, FCP_BUF_LEVEL_3, 0, 6866 "!FCP:LU Not Supported" 6867 " target=%x", ptgt->tgt_d_id); 6868 } 6869 } 6870 rval = DDI_SUCCESS; 6871 } 6872 6873 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6874 fcp_trace, FCP_BUF_LEVEL_5, 0, 6875 "D_ID=%x, sense=%x, status=%x", 6876 fpkt->pkt_cmd_fhdr.d_id, sense->es_key, 6877 rsp->fcp_u.fcp_status.scsi_status); 6878 6879 return (rval); 6880 } 6881 6882 /* 6883 * Function: fcp_scsi_callback 6884 * 6885 * Description: This is the callback routine set by fcp_send_scsi() after 6886 * it calls fcp_icmd_alloc(). The SCSI command completed here 6887 * and autogenerated by FCP are: REPORT_LUN, INQUIRY and 6888 * INQUIRY_PAGE83. 6889 * 6890 * Argument: *fpkt FC packet used to convey the command 6891 * 6892 * Return Value: None 6893 */ 6894 static void 6895 fcp_scsi_callback(fc_packet_t *fpkt) 6896 { 6897 struct fcp_ipkt *icmd = (struct fcp_ipkt *) 6898 fpkt->pkt_ulp_private; 6899 struct fcp_rsp_info fcp_rsp_err, *bep; 6900 struct fcp_port *pptr; 6901 struct fcp_tgt *ptgt; 6902 struct fcp_lun *plun; 6903 struct fcp_rsp response, *rsp; 6904 6905 ptgt = icmd->ipkt_tgt; 6906 pptr = ptgt->tgt_port; 6907 plun = icmd->ipkt_lun; 6908 6909 if (icmd->ipkt_nodma) { 6910 rsp = (struct fcp_rsp *)fpkt->pkt_resp; 6911 } else { 6912 rsp = &response; 6913 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc, 6914 sizeof (struct fcp_rsp)); 6915 } 6916 6917 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6918 fcp_trace, FCP_BUF_LEVEL_2, 0, 6919 "SCSI callback state=0x%x for %x, op_code=0x%x, " 6920 "status=%x, lun num=%x", 6921 fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode, 6922 rsp->fcp_u.fcp_status.scsi_status, plun->lun_num); 6923 6924 /* 6925 * Pre-init LUN GUID with NWWN if it is not a device that 6926 * supports multiple luns and we know it's not page83 6927 * compliant. Although using a NWWN is not lun unique, 6928 * we will be fine since there is only one lun behind the taget 6929 * in this case. 6930 */ 6931 if ((plun->lun_guid_size == 0) && 6932 (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) && 6933 (fcp_symmetric_device_probe(plun) == 0)) { 6934 6935 char ascii_wwn[FC_WWN_SIZE*2+1]; 6936 fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn); 6937 (void) fcp_copy_guid_2_lun_block(plun, ascii_wwn); 6938 } 6939 6940 /* 6941 * Some old FC tapes and FC <-> SCSI bridge devices return overrun 6942 * when thay have more data than what is asked in CDB. An overrun 6943 * is really when FCP_DL is smaller than the data length in CDB. 6944 * In the case here we know that REPORT LUN command we formed within 6945 * this binary has correct FCP_DL. So this OVERRUN is due to bad device 6946 * behavior. In reality this is FC_SUCCESS. 6947 */ 6948 if ((fpkt->pkt_state != FC_PKT_SUCCESS) && 6949 (fpkt->pkt_reason == FC_REASON_OVERRUN) && 6950 (icmd->ipkt_opcode == SCMD_REPORT_LUN)) { 6951 fpkt->pkt_state = FC_PKT_SUCCESS; 6952 } 6953 6954 if (fpkt->pkt_state != FC_PKT_SUCCESS) { 6955 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6956 fcp_trace, FCP_BUF_LEVEL_2, 0, 6957 "icmd failed with state=0x%x for %x", fpkt->pkt_state, 6958 ptgt->tgt_d_id); 6959 6960 if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) { 6961 /* 6962 * Inquiry VPD page command on A5K SES devices would 6963 * result in data CRC errors. 6964 */ 6965 if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) { 6966 (void) fcp_handle_page83(fpkt, icmd, 1); 6967 return; 6968 } 6969 } 6970 if (fpkt->pkt_state == FC_PKT_TIMEOUT || 6971 FCP_MUST_RETRY(fpkt)) { 6972 fpkt->pkt_timeout += FCP_TIMEOUT_DELTA; 6973 fcp_retry_scsi_cmd(fpkt); 6974 return; 6975 } 6976 6977 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, 6978 FCP_TGT_TRACE_20); 6979 6980 mutex_enter(&pptr->port_mutex); 6981 mutex_enter(&ptgt->tgt_mutex); 6982 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) { 6983 mutex_exit(&ptgt->tgt_mutex); 6984 mutex_exit(&pptr->port_mutex); 6985 fcp_print_error(fpkt); 6986 } else { 6987 FCP_TRACE(fcp_logq, pptr->port_instbuf, 6988 fcp_trace, FCP_BUF_LEVEL_2, 0, 6989 "fcp_scsi_callback,1: state change occured" 6990 " for D_ID=0x%x", ptgt->tgt_d_id); 6991 mutex_exit(&ptgt->tgt_mutex); 6992 mutex_exit(&pptr->port_mutex); 6993 } 6994 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 6995 icmd->ipkt_change_cnt, icmd->ipkt_cause); 6996 fcp_icmd_free(pptr, icmd); 6997 return; 6998 } 6999 7000 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21); 7001 7002 mutex_enter(&pptr->port_mutex); 7003 mutex_enter(&ptgt->tgt_mutex); 7004 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) { 7005 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7006 fcp_trace, FCP_BUF_LEVEL_2, 0, 7007 "fcp_scsi_callback,2: state change occured" 7008 " for D_ID=0x%x", ptgt->tgt_d_id); 7009 mutex_exit(&ptgt->tgt_mutex); 7010 mutex_exit(&pptr->port_mutex); 7011 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 7012 icmd->ipkt_change_cnt, icmd->ipkt_cause); 7013 fcp_icmd_free(pptr, icmd); 7014 return; 7015 } 7016 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0); 7017 7018 mutex_exit(&ptgt->tgt_mutex); 7019 mutex_exit(&pptr->port_mutex); 7020 7021 if (icmd->ipkt_nodma) { 7022 bep = (struct fcp_rsp_info *)(fpkt->pkt_resp + 7023 sizeof (struct fcp_rsp)); 7024 } else { 7025 bep = &fcp_rsp_err; 7026 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep, 7027 fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info)); 7028 } 7029 7030 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) { 7031 fcp_retry_scsi_cmd(fpkt); 7032 return; 7033 } 7034 7035 if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code != 7036 FCP_NO_FAILURE) { 7037 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7038 fcp_trace, FCP_BUF_LEVEL_2, 0, 7039 "rsp_code=0x%x, rsp_len_set=0x%x", 7040 bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set); 7041 fcp_retry_scsi_cmd(fpkt); 7042 return; 7043 } 7044 7045 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL || 7046 rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) { 7047 fcp_queue_ipkt(pptr, fpkt); 7048 return; 7049 } 7050 7051 /* 7052 * Devices that do not support INQUIRY_PAGE83, return check condition 7053 * with illegal request as per SCSI spec. 7054 * Crossbridge is one such device and Daktari's SES node is another. 7055 * We want to ideally enumerate these devices as a non-mpxio devices. 7056 * SES nodes (Daktari only currently) are an exception to this. 7057 */ 7058 if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) && 7059 (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) { 7060 7061 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7062 fcp_trace, FCP_BUF_LEVEL_3, 0, 7063 "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with " 7064 "check condition. May enumerate as non-mpxio device", 7065 ptgt->tgt_d_id, plun->lun_type); 7066 7067 /* 7068 * If we let Daktari's SES be enumerated as a non-mpxio 7069 * device, there will be a discrepency in that the other 7070 * internal FC disks will get enumerated as mpxio devices. 7071 * Applications like luxadm expect this to be consistent. 7072 * 7073 * So, we put in a hack here to check if this is an SES device 7074 * and handle it here. 7075 */ 7076 if (plun->lun_type == DTYPE_ESI) { 7077 /* 7078 * Since, pkt_state is actually FC_PKT_SUCCESS 7079 * at this stage, we fake a failure here so that 7080 * fcp_handle_page83 will create a device path using 7081 * the WWN instead of the GUID which is not there anyway 7082 */ 7083 fpkt->pkt_state = FC_PKT_LOCAL_RJT; 7084 (void) fcp_handle_page83(fpkt, icmd, 1); 7085 return; 7086 } 7087 7088 mutex_enter(&ptgt->tgt_mutex); 7089 plun->lun_state &= ~(FCP_LUN_OFFLINE | 7090 FCP_LUN_MARK | FCP_LUN_BUSY); 7091 mutex_exit(&ptgt->tgt_mutex); 7092 7093 (void) fcp_call_finish_init(pptr, ptgt, 7094 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 7095 icmd->ipkt_cause); 7096 fcp_icmd_free(pptr, icmd); 7097 return; 7098 } 7099 7100 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) { 7101 int rval = DDI_FAILURE; 7102 7103 /* 7104 * handle cases where report lun isn't supported 7105 * by faking up our own REPORT_LUN response or 7106 * UNIT ATTENTION 7107 */ 7108 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) { 7109 rval = fcp_check_reportlun(rsp, fpkt); 7110 7111 /* 7112 * fcp_check_reportlun might have modified the 7113 * FCP response. Copy it in again to get an updated 7114 * FCP response 7115 */ 7116 if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) { 7117 rsp = &response; 7118 7119 FCP_CP_IN(fpkt->pkt_resp, rsp, 7120 fpkt->pkt_resp_acc, 7121 sizeof (struct fcp_rsp)); 7122 } 7123 } 7124 7125 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) { 7126 if (rval == DDI_SUCCESS) { 7127 (void) fcp_call_finish_init(pptr, ptgt, 7128 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 7129 icmd->ipkt_cause); 7130 fcp_icmd_free(pptr, icmd); 7131 } else { 7132 fcp_retry_scsi_cmd(fpkt); 7133 } 7134 7135 return; 7136 } 7137 } else { 7138 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) { 7139 mutex_enter(&ptgt->tgt_mutex); 7140 ptgt->tgt_state &= ~FCP_TGT_ILLREQ; 7141 mutex_exit(&ptgt->tgt_mutex); 7142 } 7143 } 7144 7145 ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD); 7146 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) { 7147 (void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0, 7148 DDI_DMA_SYNC_FORCPU); 7149 } 7150 7151 switch (icmd->ipkt_opcode) { 7152 case SCMD_INQUIRY: 7153 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1); 7154 fcp_handle_inquiry(fpkt, icmd); 7155 break; 7156 7157 case SCMD_REPORT_LUN: 7158 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, 7159 FCP_TGT_TRACE_22); 7160 fcp_handle_reportlun(fpkt, icmd); 7161 break; 7162 7163 case SCMD_INQUIRY_PAGE83: 7164 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2); 7165 (void) fcp_handle_page83(fpkt, icmd, 0); 7166 break; 7167 7168 default: 7169 fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode"); 7170 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 7171 icmd->ipkt_change_cnt, icmd->ipkt_cause); 7172 fcp_icmd_free(pptr, icmd); 7173 break; 7174 } 7175 } 7176 7177 7178 static void 7179 fcp_retry_scsi_cmd(fc_packet_t *fpkt) 7180 { 7181 struct fcp_ipkt *icmd = (struct fcp_ipkt *) 7182 fpkt->pkt_ulp_private; 7183 struct fcp_tgt *ptgt = icmd->ipkt_tgt; 7184 struct fcp_port *pptr = ptgt->tgt_port; 7185 7186 if (icmd->ipkt_retries < FCP_MAX_RETRIES && 7187 fcp_is_retryable(icmd)) { 7188 mutex_enter(&pptr->port_mutex); 7189 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) { 7190 mutex_exit(&pptr->port_mutex); 7191 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7192 fcp_trace, FCP_BUF_LEVEL_3, 0, 7193 "Retrying %s to %x; state=%x, reason=%x", 7194 (icmd->ipkt_opcode == SCMD_REPORT_LUN) ? 7195 "Report LUN" : "INQUIRY", ptgt->tgt_d_id, 7196 fpkt->pkt_state, fpkt->pkt_reason); 7197 7198 fcp_queue_ipkt(pptr, fpkt); 7199 } else { 7200 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7201 fcp_trace, FCP_BUF_LEVEL_3, 0, 7202 "fcp_retry_scsi_cmd,1: state change occured" 7203 " for D_ID=0x%x", ptgt->tgt_d_id); 7204 mutex_exit(&pptr->port_mutex); 7205 (void) fcp_call_finish_init(pptr, ptgt, 7206 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 7207 icmd->ipkt_cause); 7208 fcp_icmd_free(pptr, icmd); 7209 } 7210 } else { 7211 fcp_print_error(fpkt); 7212 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 7213 icmd->ipkt_change_cnt, icmd->ipkt_cause); 7214 fcp_icmd_free(pptr, icmd); 7215 } 7216 } 7217 7218 /* 7219 * Function: fcp_handle_page83 7220 * 7221 * Description: Treats the response to INQUIRY_PAGE83. 7222 * 7223 * Argument: *fpkt FC packet used to convey the command. 7224 * *icmd Original fcp_ipkt structure. 7225 * ignore_page83_data 7226 * if it's 1, that means it's a special devices's 7227 * page83 response, it should be enumerated under mpxio 7228 * 7229 * Return Value: None 7230 */ 7231 static void 7232 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd, 7233 int ignore_page83_data) 7234 { 7235 struct fcp_port *pptr; 7236 struct fcp_lun *plun; 7237 struct fcp_tgt *ptgt; 7238 uchar_t dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE]; 7239 int fail = 0; 7240 ddi_devid_t devid; 7241 char *guid = NULL; 7242 int ret; 7243 7244 ASSERT(icmd != NULL && fpkt != NULL); 7245 7246 pptr = icmd->ipkt_port; 7247 ptgt = icmd->ipkt_tgt; 7248 plun = icmd->ipkt_lun; 7249 7250 if (fpkt->pkt_state == FC_PKT_SUCCESS) { 7251 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7); 7252 7253 FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc, 7254 SCMD_MAX_INQUIRY_PAGE83_SIZE); 7255 7256 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7257 fcp_trace, FCP_BUF_LEVEL_5, 0, 7258 "fcp_handle_page83: port=%d, tgt D_ID=0x%x, " 7259 "dtype=0x%x, lun num=%x", 7260 pptr->port_instance, ptgt->tgt_d_id, 7261 dev_id_page[0], plun->lun_num); 7262 7263 ret = ddi_devid_scsi_encode( 7264 DEVID_SCSI_ENCODE_VERSION_LATEST, 7265 NULL, /* driver name */ 7266 (unsigned char *) &plun->lun_inq, /* standard inquiry */ 7267 sizeof (plun->lun_inq), /* size of standard inquiry */ 7268 NULL, /* page 80 data */ 7269 0, /* page 80 len */ 7270 dev_id_page, /* page 83 data */ 7271 SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */ 7272 &devid); 7273 7274 if (ret == DDI_SUCCESS) { 7275 7276 guid = ddi_devid_to_guid(devid); 7277 7278 if (guid) { 7279 /* 7280 * Check our current guid. If it's non null 7281 * and it has changed, we need to copy it into 7282 * lun_old_guid since we might still need it. 7283 */ 7284 if (plun->lun_guid && 7285 strcmp(guid, plun->lun_guid)) { 7286 unsigned int len; 7287 7288 /* 7289 * If the guid of the LUN changes, 7290 * reconfiguration should be triggered 7291 * to reflect the changes. 7292 * i.e. we should offline the LUN with 7293 * the old guid, and online the LUN with 7294 * the new guid. 7295 */ 7296 plun->lun_state |= FCP_LUN_CHANGED; 7297 7298 if (plun->lun_old_guid) { 7299 kmem_free(plun->lun_old_guid, 7300 plun->lun_old_guid_size); 7301 } 7302 7303 len = plun->lun_guid_size; 7304 plun->lun_old_guid_size = len; 7305 7306 plun->lun_old_guid = kmem_zalloc(len, 7307 KM_NOSLEEP); 7308 7309 if (plun->lun_old_guid) { 7310 /* 7311 * The alloc was successful then 7312 * let's do the copy. 7313 */ 7314 bcopy(plun->lun_guid, 7315 plun->lun_old_guid, len); 7316 } else { 7317 fail = 1; 7318 plun->lun_old_guid_size = 0; 7319 } 7320 } 7321 if (!fail) { 7322 if (fcp_copy_guid_2_lun_block( 7323 plun, guid)) { 7324 fail = 1; 7325 } 7326 } 7327 ddi_devid_free_guid(guid); 7328 7329 } else { 7330 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7331 fcp_trace, FCP_BUF_LEVEL_2, 0, 7332 "fcp_handle_page83: unable to create " 7333 "GUID"); 7334 7335 /* couldn't create good guid from devid */ 7336 fail = 1; 7337 } 7338 ddi_devid_free(devid); 7339 7340 } else if (ret == DDI_NOT_WELL_FORMED) { 7341 /* NULL filled data for page 83 */ 7342 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7343 fcp_trace, FCP_BUF_LEVEL_2, 0, 7344 "fcp_handle_page83: retry GUID"); 7345 7346 icmd->ipkt_retries = 0; 7347 fcp_retry_scsi_cmd(fpkt); 7348 return; 7349 } else { 7350 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7351 fcp_trace, FCP_BUF_LEVEL_2, 0, 7352 "fcp_handle_page83: bad ddi_devid_scsi_encode %x", 7353 ret); 7354 /* 7355 * Since the page83 validation 7356 * introduced late, we are being 7357 * tolerant to the existing devices 7358 * that already found to be working 7359 * under mpxio, like A5200's SES device, 7360 * its page83 response will not be standard-compliant, 7361 * but we still want it to be enumerated under mpxio. 7362 */ 7363 if (fcp_symmetric_device_probe(plun) != 0) { 7364 fail = 1; 7365 } 7366 } 7367 7368 } else { 7369 /* bad packet state */ 7370 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8); 7371 7372 /* 7373 * For some special devices (A5K SES and Daktari's SES devices), 7374 * they should be enumerated under mpxio 7375 * or "luxadm dis" will fail 7376 */ 7377 if (ignore_page83_data) { 7378 fail = 0; 7379 } else { 7380 fail = 1; 7381 } 7382 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7383 fcp_trace, FCP_BUF_LEVEL_2, 0, 7384 "!Devid page cmd failed. " 7385 "fpkt_state: %x fpkt_reason: %x", 7386 "ignore_page83: %d", 7387 fpkt->pkt_state, fpkt->pkt_reason, 7388 ignore_page83_data); 7389 } 7390 7391 mutex_enter(&pptr->port_mutex); 7392 mutex_enter(&plun->lun_mutex); 7393 /* 7394 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid 7395 * mismatch between lun_cip and lun_mpxio. 7396 */ 7397 if (plun->lun_cip == NULL) { 7398 /* 7399 * If we don't have a guid for this lun it's because we were 7400 * unable to glean one from the page 83 response. Set the 7401 * control flag to 0 here to make sure that we don't attempt to 7402 * enumerate it under mpxio. 7403 */ 7404 if (fail || pptr->port_mpxio == 0) { 7405 plun->lun_mpxio = 0; 7406 } else { 7407 plun->lun_mpxio = 1; 7408 } 7409 } 7410 mutex_exit(&plun->lun_mutex); 7411 mutex_exit(&pptr->port_mutex); 7412 7413 mutex_enter(&ptgt->tgt_mutex); 7414 plun->lun_state &= 7415 ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY); 7416 mutex_exit(&ptgt->tgt_mutex); 7417 7418 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 7419 icmd->ipkt_change_cnt, icmd->ipkt_cause); 7420 7421 fcp_icmd_free(pptr, icmd); 7422 } 7423 7424 /* 7425 * Function: fcp_handle_inquiry 7426 * 7427 * Description: Called by fcp_scsi_callback to handle the response to an 7428 * INQUIRY request. 7429 * 7430 * Argument: *fpkt FC packet used to convey the command. 7431 * *icmd Original fcp_ipkt structure. 7432 * 7433 * Return Value: None 7434 */ 7435 static void 7436 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd) 7437 { 7438 struct fcp_port *pptr; 7439 struct fcp_lun *plun; 7440 struct fcp_tgt *ptgt; 7441 uchar_t dtype; 7442 uchar_t pqual; 7443 uint32_t rscn_count = FC_INVALID_RSCN_COUNT; 7444 7445 ASSERT(icmd != NULL && fpkt != NULL); 7446 7447 pptr = icmd->ipkt_port; 7448 ptgt = icmd->ipkt_tgt; 7449 plun = icmd->ipkt_lun; 7450 7451 FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc, 7452 sizeof (struct scsi_inquiry)); 7453 7454 dtype = plun->lun_inq.inq_dtype & DTYPE_MASK; 7455 pqual = plun->lun_inq.inq_dtype >> 5; 7456 7457 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7458 fcp_trace, FCP_BUF_LEVEL_5, 0, 7459 "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, " 7460 "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id, 7461 plun->lun_num, dtype, pqual); 7462 7463 if (pqual != 0) { 7464 /* 7465 * Non-zero peripheral qualifier 7466 */ 7467 fcp_log(CE_CONT, pptr->port_dip, 7468 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: " 7469 "Device type=0x%x Peripheral qual=0x%x\n", 7470 ptgt->tgt_d_id, plun->lun_num, dtype, pqual); 7471 7472 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7473 fcp_trace, FCP_BUF_LEVEL_5, 0, 7474 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: " 7475 "Device type=0x%x Peripheral qual=0x%x\n", 7476 ptgt->tgt_d_id, plun->lun_num, dtype, pqual); 7477 7478 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3); 7479 7480 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 7481 icmd->ipkt_change_cnt, icmd->ipkt_cause); 7482 fcp_icmd_free(pptr, icmd); 7483 return; 7484 } 7485 7486 /* 7487 * If the device is already initialized, check the dtype 7488 * for a change. If it has changed then update the flags 7489 * so the create_luns will offline the old device and 7490 * create the new device. Refer to bug: 4764752 7491 */ 7492 if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) { 7493 plun->lun_state |= FCP_LUN_CHANGED; 7494 } 7495 plun->lun_type = plun->lun_inq.inq_dtype; 7496 7497 /* 7498 * This code is setting/initializing the throttling in the FCA 7499 * driver. 7500 */ 7501 mutex_enter(&pptr->port_mutex); 7502 if (!pptr->port_notify) { 7503 if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) { 7504 uint32_t cmd = 0; 7505 cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) | 7506 ((cmd & 0xFFFFFF00 >> 8) | 7507 FCP_SVE_THROTTLE << 8)); 7508 pptr->port_notify = 1; 7509 mutex_exit(&pptr->port_mutex); 7510 (void) fc_ulp_port_notify(pptr->port_fp_handle, cmd); 7511 mutex_enter(&pptr->port_mutex); 7512 } 7513 } 7514 7515 if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) { 7516 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7517 fcp_trace, FCP_BUF_LEVEL_2, 0, 7518 "fcp_handle_inquiry,1:state change occured" 7519 " for D_ID=0x%x", ptgt->tgt_d_id); 7520 mutex_exit(&pptr->port_mutex); 7521 7522 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5); 7523 (void) fcp_call_finish_init(pptr, ptgt, 7524 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 7525 icmd->ipkt_cause); 7526 fcp_icmd_free(pptr, icmd); 7527 return; 7528 } 7529 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0); 7530 mutex_exit(&pptr->port_mutex); 7531 7532 /* Retrieve the rscn count (if a valid one exists) */ 7533 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) { 7534 rscn_count = ((fc_ulp_rscn_info_t *) 7535 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count; 7536 } else { 7537 rscn_count = FC_INVALID_RSCN_COUNT; 7538 } 7539 7540 if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83, 7541 SCMD_MAX_INQUIRY_PAGE83_SIZE, 7542 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 7543 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) { 7544 fcp_log(CE_WARN, NULL, "!failed to send page 83"); 7545 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6); 7546 (void) fcp_call_finish_init(pptr, ptgt, 7547 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 7548 icmd->ipkt_cause); 7549 } 7550 7551 /* 7552 * Read Inquiry VPD Page 0x83 to uniquely 7553 * identify this logical unit. 7554 */ 7555 fcp_icmd_free(pptr, icmd); 7556 } 7557 7558 /* 7559 * Function: fcp_handle_reportlun 7560 * 7561 * Description: Called by fcp_scsi_callback to handle the response to a 7562 * REPORT_LUN request. 7563 * 7564 * Argument: *fpkt FC packet used to convey the command. 7565 * *icmd Original fcp_ipkt structure. 7566 * 7567 * Return Value: None 7568 */ 7569 static void 7570 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd) 7571 { 7572 int i; 7573 int nluns_claimed; 7574 int nluns_bufmax; 7575 int len; 7576 uint16_t lun_num; 7577 uint32_t rscn_count = FC_INVALID_RSCN_COUNT; 7578 struct fcp_port *pptr; 7579 struct fcp_tgt *ptgt; 7580 struct fcp_lun *plun; 7581 struct fcp_reportlun_resp *report_lun; 7582 7583 pptr = icmd->ipkt_port; 7584 ptgt = icmd->ipkt_tgt; 7585 len = fpkt->pkt_datalen; 7586 7587 if ((len < FCP_LUN_HEADER) || 7588 ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) { 7589 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 7590 icmd->ipkt_change_cnt, icmd->ipkt_cause); 7591 fcp_icmd_free(pptr, icmd); 7592 return; 7593 } 7594 7595 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc, 7596 fpkt->pkt_datalen); 7597 7598 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7599 fcp_trace, FCP_BUF_LEVEL_5, 0, 7600 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x", 7601 pptr->port_instance, ptgt->tgt_d_id); 7602 7603 /* 7604 * Get the number of luns (which is supplied as LUNS * 8) the 7605 * device claims it has. 7606 */ 7607 nluns_claimed = BE_32(report_lun->num_lun) >> 3; 7608 7609 /* 7610 * Get the maximum number of luns the buffer submitted can hold. 7611 */ 7612 nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE; 7613 7614 /* 7615 * Due to limitations of certain hardware, we support only 16 bit LUNs 7616 */ 7617 if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) { 7618 kmem_free(report_lun, len); 7619 7620 fcp_log(CE_NOTE, pptr->port_dip, "!Can not support" 7621 " 0x%x number of LUNs for target=%x", nluns_claimed, 7622 ptgt->tgt_d_id); 7623 7624 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 7625 icmd->ipkt_change_cnt, icmd->ipkt_cause); 7626 fcp_icmd_free(pptr, icmd); 7627 return; 7628 } 7629 7630 /* 7631 * If there are more LUNs than we have allocated memory for, 7632 * allocate more space and send down yet another report lun if 7633 * the maximum number of attempts hasn't been reached. 7634 */ 7635 mutex_enter(&ptgt->tgt_mutex); 7636 7637 if ((nluns_claimed > nluns_bufmax) && 7638 (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) { 7639 7640 struct fcp_lun *plun; 7641 7642 ptgt->tgt_report_lun_cnt++; 7643 plun = ptgt->tgt_lun; 7644 ASSERT(plun != NULL); 7645 mutex_exit(&ptgt->tgt_mutex); 7646 7647 kmem_free(report_lun, len); 7648 7649 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7650 fcp_trace, FCP_BUF_LEVEL_5, 0, 7651 "!Dynamically discovered %d LUNs for D_ID=%x", 7652 nluns_claimed, ptgt->tgt_d_id); 7653 7654 /* Retrieve the rscn count (if a valid one exists) */ 7655 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) { 7656 rscn_count = ((fc_ulp_rscn_info_t *) 7657 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))-> 7658 ulp_rscn_count; 7659 } else { 7660 rscn_count = FC_INVALID_RSCN_COUNT; 7661 } 7662 7663 if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN, 7664 FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE), 7665 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 7666 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) { 7667 (void) fcp_call_finish_init(pptr, ptgt, 7668 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 7669 icmd->ipkt_cause); 7670 } 7671 7672 fcp_icmd_free(pptr, icmd); 7673 return; 7674 } 7675 7676 if (nluns_claimed > nluns_bufmax) { 7677 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7678 fcp_trace, FCP_BUF_LEVEL_5, 0, 7679 "Target=%x:%x:%x:%x:%x:%x:%x:%x" 7680 " Number of LUNs lost=%x", 7681 ptgt->tgt_port_wwn.raw_wwn[0], 7682 ptgt->tgt_port_wwn.raw_wwn[1], 7683 ptgt->tgt_port_wwn.raw_wwn[2], 7684 ptgt->tgt_port_wwn.raw_wwn[3], 7685 ptgt->tgt_port_wwn.raw_wwn[4], 7686 ptgt->tgt_port_wwn.raw_wwn[5], 7687 ptgt->tgt_port_wwn.raw_wwn[6], 7688 ptgt->tgt_port_wwn.raw_wwn[7], 7689 nluns_claimed - nluns_bufmax); 7690 7691 nluns_claimed = nluns_bufmax; 7692 } 7693 ptgt->tgt_lun_cnt = nluns_claimed; 7694 7695 /* 7696 * Identify missing LUNs and print warning messages 7697 */ 7698 for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) { 7699 int offline; 7700 int exists = 0; 7701 7702 offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0; 7703 7704 for (i = 0; i < nluns_claimed && exists == 0; i++) { 7705 uchar_t *lun_string; 7706 7707 lun_string = (uchar_t *)&(report_lun->lun_string[i]); 7708 7709 switch (lun_string[0] & 0xC0) { 7710 case FCP_LUN_ADDRESSING: 7711 case FCP_PD_ADDRESSING: 7712 case FCP_VOLUME_ADDRESSING: 7713 lun_num = ((lun_string[0] & 0x3F) << 8) | 7714 lun_string[1]; 7715 if (plun->lun_num == lun_num) { 7716 exists++; 7717 break; 7718 } 7719 break; 7720 7721 default: 7722 break; 7723 } 7724 } 7725 7726 if (!exists && !offline) { 7727 mutex_exit(&ptgt->tgt_mutex); 7728 7729 mutex_enter(&pptr->port_mutex); 7730 mutex_enter(&ptgt->tgt_mutex); 7731 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) { 7732 /* 7733 * set disappear flag when device was connected 7734 */ 7735 if (!(plun->lun_state & 7736 FCP_LUN_DEVICE_NOT_CONNECTED)) { 7737 plun->lun_state |= FCP_LUN_DISAPPEARED; 7738 } 7739 mutex_exit(&ptgt->tgt_mutex); 7740 mutex_exit(&pptr->port_mutex); 7741 if (!(plun->lun_state & 7742 FCP_LUN_DEVICE_NOT_CONNECTED)) { 7743 fcp_log(CE_NOTE, pptr->port_dip, 7744 "!Lun=%x for target=%x disappeared", 7745 plun->lun_num, ptgt->tgt_d_id); 7746 } 7747 mutex_enter(&ptgt->tgt_mutex); 7748 } else { 7749 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7750 fcp_trace, FCP_BUF_LEVEL_5, 0, 7751 "fcp_handle_reportlun,1: state change" 7752 " occured for D_ID=0x%x", ptgt->tgt_d_id); 7753 mutex_exit(&ptgt->tgt_mutex); 7754 mutex_exit(&pptr->port_mutex); 7755 kmem_free(report_lun, len); 7756 (void) fcp_call_finish_init(pptr, ptgt, 7757 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 7758 icmd->ipkt_cause); 7759 fcp_icmd_free(pptr, icmd); 7760 return; 7761 } 7762 } else if (exists) { 7763 /* 7764 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0 7765 * actually exists in REPORT_LUN response 7766 */ 7767 if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) { 7768 plun->lun_state &= 7769 ~FCP_LUN_DEVICE_NOT_CONNECTED; 7770 } 7771 if (offline || plun->lun_num == 0) { 7772 if (plun->lun_state & FCP_LUN_DISAPPEARED) { 7773 plun->lun_state &= ~FCP_LUN_DISAPPEARED; 7774 mutex_exit(&ptgt->tgt_mutex); 7775 fcp_log(CE_NOTE, pptr->port_dip, 7776 "!Lun=%x for target=%x reappeared", 7777 plun->lun_num, ptgt->tgt_d_id); 7778 mutex_enter(&ptgt->tgt_mutex); 7779 } 7780 } 7781 } 7782 } 7783 7784 ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1; 7785 mutex_exit(&ptgt->tgt_mutex); 7786 7787 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7788 fcp_trace, FCP_BUF_LEVEL_5, 0, 7789 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)", 7790 pptr->port_instance, ptgt->tgt_d_id, nluns_claimed); 7791 7792 /* scan each lun */ 7793 for (i = 0; i < nluns_claimed; i++) { 7794 uchar_t *lun_string; 7795 7796 lun_string = (uchar_t *)&(report_lun->lun_string[i]); 7797 7798 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7799 fcp_trace, FCP_BUF_LEVEL_5, 0, 7800 "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d," 7801 " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1], 7802 lun_string[0]); 7803 7804 switch (lun_string[0] & 0xC0) { 7805 case FCP_LUN_ADDRESSING: 7806 case FCP_PD_ADDRESSING: 7807 case FCP_VOLUME_ADDRESSING: 7808 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1]; 7809 7810 /* We will skip masked LUNs because of the blacklist. */ 7811 if (fcp_lun_blacklist != NULL) { 7812 mutex_enter(&ptgt->tgt_mutex); 7813 if (fcp_should_mask(&ptgt->tgt_port_wwn, 7814 lun_num) == TRUE) { 7815 ptgt->tgt_lun_cnt--; 7816 mutex_exit(&ptgt->tgt_mutex); 7817 break; 7818 } 7819 mutex_exit(&ptgt->tgt_mutex); 7820 } 7821 7822 /* see if this LUN is already allocated */ 7823 if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) { 7824 plun = fcp_alloc_lun(ptgt); 7825 if (plun == NULL) { 7826 fcp_log(CE_NOTE, pptr->port_dip, 7827 "!Lun allocation failed" 7828 " target=%x lun=%x", 7829 ptgt->tgt_d_id, lun_num); 7830 break; 7831 } 7832 } 7833 7834 mutex_enter(&plun->lun_tgt->tgt_mutex); 7835 /* convert to LUN */ 7836 plun->lun_addr.ent_addr_0 = 7837 BE_16(*(uint16_t *)&(lun_string[0])); 7838 plun->lun_addr.ent_addr_1 = 7839 BE_16(*(uint16_t *)&(lun_string[2])); 7840 plun->lun_addr.ent_addr_2 = 7841 BE_16(*(uint16_t *)&(lun_string[4])); 7842 plun->lun_addr.ent_addr_3 = 7843 BE_16(*(uint16_t *)&(lun_string[6])); 7844 7845 plun->lun_num = lun_num; 7846 plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK; 7847 plun->lun_state &= ~FCP_LUN_OFFLINE; 7848 mutex_exit(&plun->lun_tgt->tgt_mutex); 7849 7850 /* Retrieve the rscn count (if a valid one exists) */ 7851 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) { 7852 rscn_count = ((fc_ulp_rscn_info_t *) 7853 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))-> 7854 ulp_rscn_count; 7855 } else { 7856 rscn_count = FC_INVALID_RSCN_COUNT; 7857 } 7858 7859 if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE, 7860 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 7861 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) { 7862 mutex_enter(&pptr->port_mutex); 7863 mutex_enter(&plun->lun_tgt->tgt_mutex); 7864 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) { 7865 fcp_log(CE_NOTE, pptr->port_dip, 7866 "!failed to send INQUIRY" 7867 " target=%x lun=%x", 7868 ptgt->tgt_d_id, plun->lun_num); 7869 } else { 7870 FCP_TRACE(fcp_logq, 7871 pptr->port_instbuf, fcp_trace, 7872 FCP_BUF_LEVEL_5, 0, 7873 "fcp_handle_reportlun,2: state" 7874 " change occured for D_ID=0x%x", 7875 ptgt->tgt_d_id); 7876 } 7877 mutex_exit(&plun->lun_tgt->tgt_mutex); 7878 mutex_exit(&pptr->port_mutex); 7879 } else { 7880 continue; 7881 } 7882 break; 7883 7884 default: 7885 fcp_log(CE_WARN, NULL, 7886 "!Unsupported LUN Addressing method %x " 7887 "in response to REPORT_LUN", lun_string[0]); 7888 break; 7889 } 7890 7891 /* 7892 * each time through this loop we should decrement 7893 * the tmp_cnt by one -- since we go through this loop 7894 * one time for each LUN, the tmp_cnt should never be <=0 7895 */ 7896 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 7897 icmd->ipkt_change_cnt, icmd->ipkt_cause); 7898 } 7899 7900 if (i == 0) { 7901 fcp_log(CE_WARN, pptr->port_dip, 7902 "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id); 7903 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 7904 icmd->ipkt_change_cnt, icmd->ipkt_cause); 7905 } 7906 7907 kmem_free(report_lun, len); 7908 fcp_icmd_free(pptr, icmd); 7909 } 7910 7911 7912 /* 7913 * called internally to return a LUN given a target and a LUN number 7914 */ 7915 static struct fcp_lun * 7916 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num) 7917 { 7918 struct fcp_lun *plun; 7919 7920 mutex_enter(&ptgt->tgt_mutex); 7921 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) { 7922 if (plun->lun_num == lun_num) { 7923 mutex_exit(&ptgt->tgt_mutex); 7924 return (plun); 7925 } 7926 } 7927 mutex_exit(&ptgt->tgt_mutex); 7928 7929 return (NULL); 7930 } 7931 7932 7933 /* 7934 * handle finishing one target for fcp_finish_init 7935 * 7936 * return true (non-zero) if we want finish_init to continue with the 7937 * next target 7938 * 7939 * called with the port mutex held 7940 */ 7941 /*ARGSUSED*/ 7942 static int 7943 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt, 7944 int link_cnt, int tgt_cnt, int cause) 7945 { 7946 int rval = 1; 7947 ASSERT(pptr != NULL); 7948 ASSERT(ptgt != NULL); 7949 7950 FCP_TRACE(fcp_logq, pptr->port_instbuf, 7951 fcp_trace, FCP_BUF_LEVEL_5, 0, 7952 "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id, 7953 ptgt->tgt_state); 7954 7955 ASSERT(mutex_owned(&pptr->port_mutex)); 7956 7957 if ((pptr->port_link_cnt != link_cnt) || 7958 (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) { 7959 /* 7960 * oh oh -- another link reset or target change 7961 * must have occurred while we are in here 7962 */ 7963 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23); 7964 7965 return (0); 7966 } else { 7967 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24); 7968 } 7969 7970 mutex_enter(&ptgt->tgt_mutex); 7971 7972 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) { 7973 /* 7974 * tgt is not offline -- is it marked (i.e. needs 7975 * to be offlined) ?? 7976 */ 7977 if (ptgt->tgt_state & FCP_TGT_MARK) { 7978 /* 7979 * this target not offline *and* 7980 * marked 7981 */ 7982 ptgt->tgt_state &= ~FCP_TGT_MARK; 7983 rval = fcp_offline_target(pptr, ptgt, link_cnt, 7984 tgt_cnt, 0, 0); 7985 } else { 7986 ptgt->tgt_state &= ~FCP_TGT_BUSY; 7987 7988 /* create the LUNs */ 7989 if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) { 7990 ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT; 7991 fcp_create_luns(ptgt, link_cnt, tgt_cnt, 7992 cause); 7993 ptgt->tgt_device_created = 1; 7994 } else { 7995 fcp_update_tgt_state(ptgt, FCP_RESET, 7996 FCP_LUN_BUSY); 7997 } 7998 } 7999 } 8000 8001 mutex_exit(&ptgt->tgt_mutex); 8002 8003 return (rval); 8004 } 8005 8006 8007 /* 8008 * this routine is called to finish port initialization 8009 * 8010 * Each port has a "temp" counter -- when a state change happens (e.g. 8011 * port online), the temp count is set to the number of devices in the map. 8012 * Then, as each device gets "discovered", the temp counter is decremented 8013 * by one. When this count reaches zero we know that all of the devices 8014 * in the map have been discovered (or an error has occurred), so we can 8015 * then finish initialization -- which is done by this routine (well, this 8016 * and fcp-finish_tgt()) 8017 * 8018 * acquires and releases the global mutex 8019 * 8020 * called with the port mutex owned 8021 */ 8022 static void 8023 fcp_finish_init(struct fcp_port *pptr) 8024 { 8025 #ifdef DEBUG 8026 bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack)); 8027 pptr->port_finish_depth = getpcstack(pptr->port_finish_stack, 8028 FCP_STACK_DEPTH); 8029 #endif /* DEBUG */ 8030 8031 ASSERT(mutex_owned(&pptr->port_mutex)); 8032 8033 FCP_TRACE(fcp_logq, pptr->port_instbuf, 8034 fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:" 8035 " entering; ipkt count=%d", pptr->port_ipkt_cnt); 8036 8037 if ((pptr->port_state & FCP_STATE_ONLINING) && 8038 !(pptr->port_state & (FCP_STATE_SUSPENDED | 8039 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) { 8040 pptr->port_state &= ~FCP_STATE_ONLINING; 8041 pptr->port_state |= FCP_STATE_ONLINE; 8042 } 8043 8044 /* Wake up threads waiting on config done */ 8045 cv_broadcast(&pptr->port_config_cv); 8046 } 8047 8048 8049 /* 8050 * called from fcp_finish_init to create the LUNs for a target 8051 * 8052 * called with the port mutex owned 8053 */ 8054 static void 8055 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause) 8056 { 8057 struct fcp_lun *plun; 8058 struct fcp_port *pptr; 8059 child_info_t *cip = NULL; 8060 8061 ASSERT(ptgt != NULL); 8062 ASSERT(mutex_owned(&ptgt->tgt_mutex)); 8063 8064 pptr = ptgt->tgt_port; 8065 8066 ASSERT(pptr != NULL); 8067 8068 /* scan all LUNs for this target */ 8069 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) { 8070 if (plun->lun_state & FCP_LUN_OFFLINE) { 8071 continue; 8072 } 8073 8074 if (plun->lun_state & FCP_LUN_MARK) { 8075 FCP_TRACE(fcp_logq, pptr->port_instbuf, 8076 fcp_trace, FCP_BUF_LEVEL_2, 0, 8077 "fcp_create_luns: offlining marked LUN!"); 8078 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0); 8079 continue; 8080 } 8081 8082 plun->lun_state &= ~FCP_LUN_BUSY; 8083 8084 /* 8085 * There are conditions in which FCP_LUN_INIT flag is cleared 8086 * but we have a valid plun->lun_cip. To cover this case also 8087 * CLEAR_BUSY whenever we have a valid lun_cip. 8088 */ 8089 if (plun->lun_mpxio && plun->lun_cip && 8090 (!fcp_pass_to_hp(pptr, plun, plun->lun_cip, 8091 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt, 8092 0, 0))) { 8093 FCP_TRACE(fcp_logq, pptr->port_instbuf, 8094 fcp_trace, FCP_BUF_LEVEL_2, 0, 8095 "fcp_create_luns: enable lun %p failed!", 8096 plun); 8097 } 8098 8099 if (plun->lun_state & FCP_LUN_INIT && 8100 !(plun->lun_state & FCP_LUN_CHANGED)) { 8101 continue; 8102 } 8103 8104 if (cause == FCP_CAUSE_USER_CREATE) { 8105 continue; 8106 } 8107 8108 FCP_TRACE(fcp_logq, pptr->port_instbuf, 8109 fcp_trace, FCP_BUF_LEVEL_6, 0, 8110 "create_luns: passing ONLINE elem to HP thread"); 8111 8112 /* 8113 * If lun has changed, prepare for offlining the old path. 8114 * Do not offline the old path right now, since it may be 8115 * still opened. 8116 */ 8117 if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) { 8118 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt); 8119 } 8120 8121 /* pass an ONLINE element to the hotplug thread */ 8122 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE, 8123 link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) { 8124 8125 /* 8126 * We can not synchronous attach (i.e pass 8127 * NDI_ONLINE_ATTACH) here as we might be 8128 * coming from an interrupt or callback 8129 * thread. 8130 */ 8131 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE, 8132 link_cnt, tgt_cnt, 0, 0)) { 8133 fcp_log(CE_CONT, pptr->port_dip, 8134 "Can not ONLINE LUN; D_ID=%x, LUN=%x\n", 8135 plun->lun_tgt->tgt_d_id, plun->lun_num); 8136 } 8137 } 8138 } 8139 } 8140 8141 8142 /* 8143 * function to online/offline devices 8144 */ 8145 static int 8146 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio, 8147 int online, int lcount, int tcount, int flags) 8148 { 8149 int rval = NDI_FAILURE; 8150 int circ; 8151 child_info_t *ccip; 8152 struct fcp_port *pptr = plun->lun_tgt->tgt_port; 8153 int is_mpxio = pptr->port_mpxio; 8154 dev_info_t *cdip, *pdip; 8155 char *devname; 8156 8157 if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) { 8158 /* 8159 * When this event gets serviced, lun_cip and lun_mpxio 8160 * has changed, so it should be invalidated now. 8161 */ 8162 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 8163 FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: " 8164 "plun: %p, cip: %p, what:%d", plun, cip, online); 8165 return (rval); 8166 } 8167 8168 FCP_TRACE(fcp_logq, pptr->port_instbuf, 8169 fcp_trace, FCP_BUF_LEVEL_2, 0, 8170 "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x " 8171 "flags=%x mpxio=%x\n", 8172 plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags, 8173 plun->lun_mpxio); 8174 8175 /* 8176 * lun_mpxio needs checking here because we can end up in a race 8177 * condition where this task has been dispatched while lun_mpxio is 8178 * set, but an earlier FCP_ONLINE task for the same LUN tried to 8179 * enable MPXIO for the LUN, but was unable to, and hence cleared 8180 * the flag. We rely on the serialization of the tasks here. We return 8181 * NDI_SUCCESS so any callers continue without reporting spurious 8182 * errors, and the still think we're an MPXIO LUN. 8183 */ 8184 8185 if (online == FCP_MPXIO_PATH_CLEAR_BUSY || 8186 online == FCP_MPXIO_PATH_SET_BUSY) { 8187 if (plun->lun_mpxio) { 8188 rval = fcp_update_mpxio_path(plun, cip, online); 8189 } else { 8190 rval = NDI_SUCCESS; 8191 } 8192 return (rval); 8193 } 8194 8195 /* 8196 * Explicit devfs_clean() due to ndi_devi_offline() not 8197 * executing devfs_clean() if parent lock is held. 8198 */ 8199 ASSERT(!servicing_interrupt()); 8200 if (online == FCP_OFFLINE) { 8201 if (plun->lun_mpxio == 0) { 8202 if (plun->lun_cip == cip) { 8203 cdip = DIP(plun->lun_cip); 8204 } else { 8205 cdip = DIP(cip); 8206 } 8207 } else if ((plun->lun_cip == cip) && plun->lun_cip) { 8208 cdip = mdi_pi_get_client(PIP(plun->lun_cip)); 8209 } else if ((plun->lun_cip != cip) && cip) { 8210 /* 8211 * This means a DTYPE/GUID change, we shall get the 8212 * dip of the old cip instead of the current lun_cip. 8213 */ 8214 cdip = mdi_pi_get_client(PIP(cip)); 8215 } 8216 if (cdip) { 8217 if (i_ddi_devi_attached(cdip)) { 8218 pdip = ddi_get_parent(cdip); 8219 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP); 8220 ndi_devi_enter(pdip, &circ); 8221 (void) ddi_deviname(cdip, devname); 8222 /* 8223 * Release parent lock before calling 8224 * devfs_clean(). 8225 */ 8226 ndi_devi_exit(pdip, circ); 8227 (void) devfs_clean(pdip, devname + 1, 8228 DV_CLEAN_FORCE); 8229 kmem_free(devname, MAXNAMELEN + 1); 8230 } 8231 } 8232 } 8233 8234 if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) { 8235 return (NDI_FAILURE); 8236 } 8237 8238 if (is_mpxio) { 8239 mdi_devi_enter(pptr->port_dip, &circ); 8240 } else { 8241 ndi_devi_enter(pptr->port_dip, &circ); 8242 } 8243 8244 mutex_enter(&pptr->port_mutex); 8245 mutex_enter(&plun->lun_mutex); 8246 8247 if (online == FCP_ONLINE) { 8248 ccip = fcp_get_cip(plun, cip, lcount, tcount); 8249 if (ccip == NULL) { 8250 goto fail; 8251 } 8252 } else { 8253 if (fcp_is_child_present(plun, cip) != FC_SUCCESS) { 8254 goto fail; 8255 } 8256 ccip = cip; 8257 } 8258 8259 if (online == FCP_ONLINE) { 8260 rval = fcp_online_child(plun, ccip, lcount, tcount, flags, 8261 &circ); 8262 fc_ulp_log_device_event(pptr->port_fp_handle, 8263 FC_ULP_DEVICE_ONLINE); 8264 } else { 8265 rval = fcp_offline_child(plun, ccip, lcount, tcount, flags, 8266 &circ); 8267 fc_ulp_log_device_event(pptr->port_fp_handle, 8268 FC_ULP_DEVICE_OFFLINE); 8269 } 8270 8271 fail: mutex_exit(&plun->lun_mutex); 8272 mutex_exit(&pptr->port_mutex); 8273 8274 if (is_mpxio) { 8275 mdi_devi_exit(pptr->port_dip, circ); 8276 } else { 8277 ndi_devi_exit(pptr->port_dip, circ); 8278 } 8279 8280 fc_ulp_idle_port(pptr->port_fp_handle); 8281 8282 return (rval); 8283 } 8284 8285 8286 /* 8287 * take a target offline by taking all of its LUNs offline 8288 */ 8289 /*ARGSUSED*/ 8290 static int 8291 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt, 8292 int link_cnt, int tgt_cnt, int nowait, int flags) 8293 { 8294 struct fcp_tgt_elem *elem; 8295 8296 ASSERT(mutex_owned(&pptr->port_mutex)); 8297 ASSERT(mutex_owned(&ptgt->tgt_mutex)); 8298 8299 ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE)); 8300 8301 if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt != 8302 ptgt->tgt_change_cnt)) { 8303 mutex_exit(&ptgt->tgt_mutex); 8304 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25); 8305 mutex_enter(&ptgt->tgt_mutex); 8306 8307 return (0); 8308 } 8309 8310 ptgt->tgt_pd_handle = NULL; 8311 mutex_exit(&ptgt->tgt_mutex); 8312 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26); 8313 mutex_enter(&ptgt->tgt_mutex); 8314 8315 tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt; 8316 8317 if (ptgt->tgt_tcap && 8318 (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) { 8319 elem->flags = flags; 8320 elem->time = fcp_watchdog_time; 8321 if (nowait == 0) { 8322 elem->time += fcp_offline_delay; 8323 } 8324 elem->ptgt = ptgt; 8325 elem->link_cnt = link_cnt; 8326 elem->tgt_cnt = tgt_cnt; 8327 elem->next = pptr->port_offline_tgts; 8328 pptr->port_offline_tgts = elem; 8329 } else { 8330 fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags); 8331 } 8332 8333 return (1); 8334 } 8335 8336 8337 static void 8338 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt, 8339 int link_cnt, int tgt_cnt, int flags) 8340 { 8341 ASSERT(mutex_owned(&pptr->port_mutex)); 8342 ASSERT(mutex_owned(&ptgt->tgt_mutex)); 8343 8344 fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn); 8345 ptgt->tgt_state = FCP_TGT_OFFLINE; 8346 ptgt->tgt_pd_handle = NULL; 8347 fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags); 8348 } 8349 8350 8351 static void 8352 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, 8353 int flags) 8354 { 8355 struct fcp_lun *plun; 8356 8357 ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex)); 8358 ASSERT(mutex_owned(&ptgt->tgt_mutex)); 8359 8360 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) { 8361 if (!(plun->lun_state & FCP_LUN_OFFLINE)) { 8362 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags); 8363 } 8364 } 8365 } 8366 8367 8368 /* 8369 * take a LUN offline 8370 * 8371 * enters and leaves with the target mutex held, releasing it in the process 8372 * 8373 * allocates memory in non-sleep mode 8374 */ 8375 static void 8376 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt, 8377 int nowait, int flags) 8378 { 8379 struct fcp_port *pptr = plun->lun_tgt->tgt_port; 8380 struct fcp_lun_elem *elem; 8381 8382 ASSERT(plun != NULL); 8383 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex)); 8384 8385 if (nowait) { 8386 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags); 8387 return; 8388 } 8389 8390 if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) { 8391 elem->flags = flags; 8392 elem->time = fcp_watchdog_time; 8393 if (nowait == 0) { 8394 elem->time += fcp_offline_delay; 8395 } 8396 elem->plun = plun; 8397 elem->link_cnt = link_cnt; 8398 elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt; 8399 elem->next = pptr->port_offline_luns; 8400 pptr->port_offline_luns = elem; 8401 } else { 8402 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags); 8403 } 8404 } 8405 8406 8407 static void 8408 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt) 8409 { 8410 struct fcp_pkt *head = NULL; 8411 8412 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex)); 8413 8414 mutex_exit(&LUN_TGT->tgt_mutex); 8415 8416 head = fcp_scan_commands(plun); 8417 if (head != NULL) { 8418 fcp_abort_commands(head, LUN_PORT); 8419 } 8420 8421 mutex_enter(&LUN_TGT->tgt_mutex); 8422 8423 if (plun->lun_cip && plun->lun_mpxio) { 8424 /* 8425 * Intimate MPxIO lun busy is cleared 8426 */ 8427 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, 8428 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt, 8429 0, 0)) { 8430 fcp_log(CE_NOTE, LUN_PORT->port_dip, 8431 "Can not ENABLE LUN; D_ID=%x, LUN=%x", 8432 LUN_TGT->tgt_d_id, plun->lun_num); 8433 } 8434 /* 8435 * Intimate MPxIO that the lun is now marked for offline 8436 */ 8437 mutex_exit(&LUN_TGT->tgt_mutex); 8438 (void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE); 8439 mutex_enter(&LUN_TGT->tgt_mutex); 8440 } 8441 } 8442 8443 static void 8444 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt, 8445 int flags) 8446 { 8447 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex)); 8448 8449 mutex_exit(&LUN_TGT->tgt_mutex); 8450 fcp_update_offline_flags(plun); 8451 mutex_enter(&LUN_TGT->tgt_mutex); 8452 8453 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt); 8454 8455 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf, 8456 fcp_trace, FCP_BUF_LEVEL_4, 0, 8457 "offline_lun: passing OFFLINE elem to HP thread"); 8458 8459 if (plun->lun_cip) { 8460 fcp_log(CE_NOTE, LUN_PORT->port_dip, 8461 "!offlining lun=%x (trace=%x), target=%x (trace=%x)", 8462 plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id, 8463 LUN_TGT->tgt_trace); 8464 8465 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE, 8466 link_cnt, tgt_cnt, flags, 0)) { 8467 fcp_log(CE_CONT, LUN_PORT->port_dip, 8468 "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n", 8469 LUN_TGT->tgt_d_id, plun->lun_num); 8470 } 8471 } 8472 } 8473 8474 static void 8475 fcp_scan_offline_luns(struct fcp_port *pptr) 8476 { 8477 struct fcp_lun_elem *elem; 8478 struct fcp_lun_elem *prev; 8479 struct fcp_lun_elem *next; 8480 8481 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 8482 8483 prev = NULL; 8484 elem = pptr->port_offline_luns; 8485 while (elem) { 8486 next = elem->next; 8487 if (elem->time <= fcp_watchdog_time) { 8488 int changed = 1; 8489 struct fcp_tgt *ptgt = elem->plun->lun_tgt; 8490 8491 mutex_enter(&ptgt->tgt_mutex); 8492 if (pptr->port_link_cnt == elem->link_cnt && 8493 ptgt->tgt_change_cnt == elem->tgt_cnt) { 8494 changed = 0; 8495 } 8496 8497 if (!changed && 8498 !(elem->plun->lun_state & FCP_TGT_OFFLINE)) { 8499 fcp_offline_lun_now(elem->plun, 8500 elem->link_cnt, elem->tgt_cnt, elem->flags); 8501 } 8502 mutex_exit(&ptgt->tgt_mutex); 8503 8504 kmem_free(elem, sizeof (*elem)); 8505 8506 if (prev) { 8507 prev->next = next; 8508 } else { 8509 pptr->port_offline_luns = next; 8510 } 8511 } else { 8512 prev = elem; 8513 } 8514 elem = next; 8515 } 8516 } 8517 8518 8519 static void 8520 fcp_scan_offline_tgts(struct fcp_port *pptr) 8521 { 8522 struct fcp_tgt_elem *elem; 8523 struct fcp_tgt_elem *prev; 8524 struct fcp_tgt_elem *next; 8525 8526 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 8527 8528 prev = NULL; 8529 elem = pptr->port_offline_tgts; 8530 while (elem) { 8531 next = elem->next; 8532 if (elem->time <= fcp_watchdog_time) { 8533 int outdated = 1; 8534 struct fcp_tgt *ptgt = elem->ptgt; 8535 8536 mutex_enter(&ptgt->tgt_mutex); 8537 8538 if (ptgt->tgt_change_cnt == elem->tgt_cnt) { 8539 /* No change on tgt since elem was created. */ 8540 outdated = 0; 8541 } else if (ptgt->tgt_change_cnt == elem->tgt_cnt + 1 && 8542 pptr->port_link_cnt == elem->link_cnt + 1 && 8543 ptgt->tgt_statec_cause == FCP_CAUSE_LINK_DOWN) { 8544 /* 8545 * Exactly one thing happened to the target 8546 * inbetween: the local port went offline. 8547 * For fp the remote port is already gone so 8548 * it will not tell us again to offline the 8549 * target. We must offline it now. 8550 */ 8551 outdated = 0; 8552 } 8553 8554 if (!outdated && !(ptgt->tgt_state & 8555 FCP_TGT_OFFLINE)) { 8556 fcp_offline_target_now(pptr, 8557 ptgt, elem->link_cnt, elem->tgt_cnt, 8558 elem->flags); 8559 } 8560 8561 mutex_exit(&ptgt->tgt_mutex); 8562 8563 kmem_free(elem, sizeof (*elem)); 8564 8565 if (prev) { 8566 prev->next = next; 8567 } else { 8568 pptr->port_offline_tgts = next; 8569 } 8570 } else { 8571 prev = elem; 8572 } 8573 elem = next; 8574 } 8575 } 8576 8577 8578 static void 8579 fcp_update_offline_flags(struct fcp_lun *plun) 8580 { 8581 struct fcp_port *pptr = LUN_PORT; 8582 ASSERT(plun != NULL); 8583 8584 mutex_enter(&LUN_TGT->tgt_mutex); 8585 plun->lun_state |= FCP_LUN_OFFLINE; 8586 plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK); 8587 8588 mutex_enter(&plun->lun_mutex); 8589 if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) { 8590 dev_info_t *cdip = NULL; 8591 8592 mutex_exit(&LUN_TGT->tgt_mutex); 8593 8594 if (plun->lun_mpxio == 0) { 8595 cdip = DIP(plun->lun_cip); 8596 } else if (plun->lun_cip) { 8597 cdip = mdi_pi_get_client(PIP(plun->lun_cip)); 8598 } 8599 8600 mutex_exit(&plun->lun_mutex); 8601 if (cdip) { 8602 (void) ndi_event_retrieve_cookie( 8603 pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT, 8604 &fcp_remove_eid, NDI_EVENT_NOPASS); 8605 (void) ndi_event_run_callbacks( 8606 pptr->port_ndi_event_hdl, cdip, 8607 fcp_remove_eid, NULL); 8608 } 8609 } else { 8610 mutex_exit(&plun->lun_mutex); 8611 mutex_exit(&LUN_TGT->tgt_mutex); 8612 } 8613 } 8614 8615 8616 /* 8617 * Scan all of the command pkts for this port, moving pkts that 8618 * match our LUN onto our own list (headed by "head") 8619 */ 8620 static struct fcp_pkt * 8621 fcp_scan_commands(struct fcp_lun *plun) 8622 { 8623 struct fcp_port *pptr = LUN_PORT; 8624 8625 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */ 8626 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */ 8627 struct fcp_pkt *pcmd = NULL; /* the previous command */ 8628 8629 struct fcp_pkt *head = NULL; /* head of our list */ 8630 struct fcp_pkt *tail = NULL; /* tail of our list */ 8631 8632 int cmds_found = 0; 8633 8634 mutex_enter(&pptr->port_pkt_mutex); 8635 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) { 8636 struct fcp_lun *tlun = 8637 ADDR2LUN(&cmd->cmd_pkt->pkt_address); 8638 8639 ncmd = cmd->cmd_next; /* set next command */ 8640 8641 /* 8642 * if this pkt is for a different LUN or the 8643 * command is sent down, skip it. 8644 */ 8645 if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED || 8646 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) { 8647 pcmd = cmd; 8648 continue; 8649 } 8650 cmds_found++; 8651 if (pcmd != NULL) { 8652 ASSERT(pptr->port_pkt_head != cmd); 8653 pcmd->cmd_next = cmd->cmd_next; 8654 } else { 8655 ASSERT(cmd == pptr->port_pkt_head); 8656 pptr->port_pkt_head = cmd->cmd_next; 8657 } 8658 8659 if (cmd == pptr->port_pkt_tail) { 8660 pptr->port_pkt_tail = pcmd; 8661 if (pcmd) { 8662 pcmd->cmd_next = NULL; 8663 } 8664 } 8665 8666 if (head == NULL) { 8667 head = tail = cmd; 8668 } else { 8669 ASSERT(tail != NULL); 8670 8671 tail->cmd_next = cmd; 8672 tail = cmd; 8673 } 8674 cmd->cmd_next = NULL; 8675 } 8676 mutex_exit(&pptr->port_pkt_mutex); 8677 8678 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 8679 fcp_trace, FCP_BUF_LEVEL_8, 0, 8680 "scan commands: %d cmd(s) found", cmds_found); 8681 8682 return (head); 8683 } 8684 8685 8686 /* 8687 * Abort all the commands in the command queue 8688 */ 8689 static void 8690 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr) 8691 { 8692 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */ 8693 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */ 8694 8695 ASSERT(mutex_owned(&pptr->port_mutex)); 8696 8697 /* scan through the pkts and invalid them */ 8698 for (cmd = head; cmd != NULL; cmd = ncmd) { 8699 struct scsi_pkt *pkt = cmd->cmd_pkt; 8700 8701 ncmd = cmd->cmd_next; 8702 ASSERT(pkt != NULL); 8703 8704 /* 8705 * The lun is going to be marked offline. Indicate 8706 * the target driver not to requeue or retry this command 8707 * as the device is going to be offlined pretty soon. 8708 */ 8709 pkt->pkt_reason = CMD_DEV_GONE; 8710 pkt->pkt_statistics = 0; 8711 pkt->pkt_state = 0; 8712 8713 /* reset cmd flags/state */ 8714 cmd->cmd_flags &= ~CFLAG_IN_QUEUE; 8715 cmd->cmd_state = FCP_PKT_IDLE; 8716 8717 /* 8718 * ensure we have a packet completion routine, 8719 * then call it. 8720 */ 8721 ASSERT(pkt->pkt_comp != NULL); 8722 8723 mutex_exit(&pptr->port_mutex); 8724 fcp_post_callback(cmd); 8725 mutex_enter(&pptr->port_mutex); 8726 } 8727 } 8728 8729 8730 /* 8731 * the pkt_comp callback for command packets 8732 */ 8733 static void 8734 fcp_cmd_callback(fc_packet_t *fpkt) 8735 { 8736 struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private; 8737 struct scsi_pkt *pkt = cmd->cmd_pkt; 8738 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address); 8739 8740 ASSERT(cmd->cmd_state != FCP_PKT_IDLE); 8741 8742 if (cmd->cmd_state == FCP_PKT_IDLE) { 8743 cmn_err(CE_PANIC, "Packet already completed %p", 8744 (void *)cmd); 8745 } 8746 8747 /* 8748 * Watch thread should be freeing the packet, ignore the pkt. 8749 */ 8750 if (cmd->cmd_state == FCP_PKT_ABORTING) { 8751 fcp_log(CE_CONT, pptr->port_dip, 8752 "!FCP: Pkt completed while aborting\n"); 8753 return; 8754 } 8755 cmd->cmd_state = FCP_PKT_IDLE; 8756 8757 fcp_complete_pkt(fpkt); 8758 8759 #ifdef DEBUG 8760 mutex_enter(&pptr->port_pkt_mutex); 8761 pptr->port_npkts--; 8762 mutex_exit(&pptr->port_pkt_mutex); 8763 #endif /* DEBUG */ 8764 8765 fcp_post_callback(cmd); 8766 } 8767 8768 8769 static void 8770 fcp_complete_pkt(fc_packet_t *fpkt) 8771 { 8772 int error = 0; 8773 struct fcp_pkt *cmd = (struct fcp_pkt *) 8774 fpkt->pkt_ulp_private; 8775 struct scsi_pkt *pkt = cmd->cmd_pkt; 8776 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address); 8777 struct fcp_lun *plun; 8778 struct fcp_tgt *ptgt; 8779 struct fcp_rsp *rsp; 8780 struct scsi_address save; 8781 8782 #ifdef DEBUG 8783 save = pkt->pkt_address; 8784 #endif /* DEBUG */ 8785 8786 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp; 8787 8788 if (fpkt->pkt_state == FC_PKT_SUCCESS) { 8789 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) { 8790 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc, 8791 sizeof (struct fcp_rsp)); 8792 } 8793 8794 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 8795 STATE_SENT_CMD | STATE_GOT_STATUS; 8796 8797 pkt->pkt_resid = 0; 8798 8799 if (fpkt->pkt_datalen) { 8800 pkt->pkt_state |= STATE_XFERRED_DATA; 8801 if (fpkt->pkt_data_resid) { 8802 error++; 8803 } 8804 } 8805 8806 if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) = 8807 rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) { 8808 /* 8809 * The next two checks make sure that if there 8810 * is no sense data or a valid response and 8811 * the command came back with check condition, 8812 * the command should be retried. 8813 */ 8814 if (!rsp->fcp_u.fcp_status.rsp_len_set && 8815 !rsp->fcp_u.fcp_status.sense_len_set) { 8816 pkt->pkt_state &= ~STATE_XFERRED_DATA; 8817 pkt->pkt_resid = cmd->cmd_dmacount; 8818 } 8819 } 8820 8821 if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) { 8822 return; 8823 } 8824 8825 plun = ADDR2LUN(&pkt->pkt_address); 8826 ptgt = plun->lun_tgt; 8827 ASSERT(ptgt != NULL); 8828 8829 /* 8830 * Update the transfer resid, if appropriate 8831 */ 8832 if (rsp->fcp_u.fcp_status.resid_over || 8833 rsp->fcp_u.fcp_status.resid_under) { 8834 pkt->pkt_resid = rsp->fcp_resid; 8835 } 8836 8837 /* 8838 * First see if we got a FCP protocol error. 8839 */ 8840 if (rsp->fcp_u.fcp_status.rsp_len_set) { 8841 struct fcp_rsp_info *bep; 8842 bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp + 8843 sizeof (struct fcp_rsp)); 8844 8845 if (fcp_validate_fcp_response(rsp, pptr) != 8846 FC_SUCCESS) { 8847 pkt->pkt_reason = CMD_CMPLT; 8848 *(pkt->pkt_scbp) = STATUS_CHECK; 8849 8850 fcp_log(CE_WARN, pptr->port_dip, 8851 "!SCSI command to d_id=0x%x lun=0x%x" 8852 " failed, Bad FCP response values:" 8853 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x," 8854 " sts-rsvd2=%x, rsplen=%x, senselen=%x", 8855 ptgt->tgt_d_id, plun->lun_num, 8856 rsp->reserved_0, rsp->reserved_1, 8857 rsp->fcp_u.fcp_status.reserved_0, 8858 rsp->fcp_u.fcp_status.reserved_1, 8859 rsp->fcp_response_len, rsp->fcp_sense_len); 8860 8861 return; 8862 } 8863 8864 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) { 8865 FCP_CP_IN(fpkt->pkt_resp + 8866 sizeof (struct fcp_rsp), bep, 8867 fpkt->pkt_resp_acc, 8868 sizeof (struct fcp_rsp_info)); 8869 } 8870 8871 if (bep->rsp_code != FCP_NO_FAILURE) { 8872 child_info_t *cip; 8873 8874 pkt->pkt_reason = CMD_TRAN_ERR; 8875 8876 mutex_enter(&plun->lun_mutex); 8877 cip = plun->lun_cip; 8878 mutex_exit(&plun->lun_mutex); 8879 8880 FCP_TRACE(fcp_logq, pptr->port_instbuf, 8881 fcp_trace, FCP_BUF_LEVEL_2, 0, 8882 "FCP response error on cmd=%p" 8883 " target=0x%x, cip=%p", cmd, 8884 ptgt->tgt_d_id, cip); 8885 } 8886 } 8887 8888 /* 8889 * See if we got a SCSI error with sense data 8890 */ 8891 if (rsp->fcp_u.fcp_status.sense_len_set) { 8892 uchar_t rqlen; 8893 caddr_t sense_from; 8894 child_info_t *cip; 8895 timeout_id_t tid; 8896 struct scsi_arq_status *arq; 8897 struct scsi_extended_sense *sense_to; 8898 8899 arq = (struct scsi_arq_status *)pkt->pkt_scbp; 8900 sense_to = &arq->sts_sensedata; 8901 8902 rqlen = (uchar_t)min(rsp->fcp_sense_len, 8903 sizeof (struct scsi_extended_sense)); 8904 8905 sense_from = (caddr_t)fpkt->pkt_resp + 8906 sizeof (struct fcp_rsp) + rsp->fcp_response_len; 8907 8908 if (fcp_validate_fcp_response(rsp, pptr) != 8909 FC_SUCCESS) { 8910 pkt->pkt_reason = CMD_CMPLT; 8911 *(pkt->pkt_scbp) = STATUS_CHECK; 8912 8913 fcp_log(CE_WARN, pptr->port_dip, 8914 "!SCSI command to d_id=0x%x lun=0x%x" 8915 " failed, Bad FCP response values:" 8916 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x," 8917 " sts-rsvd2=%x, rsplen=%x, senselen=%x", 8918 ptgt->tgt_d_id, plun->lun_num, 8919 rsp->reserved_0, rsp->reserved_1, 8920 rsp->fcp_u.fcp_status.reserved_0, 8921 rsp->fcp_u.fcp_status.reserved_1, 8922 rsp->fcp_response_len, rsp->fcp_sense_len); 8923 8924 return; 8925 } 8926 8927 /* 8928 * copy in sense information 8929 */ 8930 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) { 8931 FCP_CP_IN(sense_from, sense_to, 8932 fpkt->pkt_resp_acc, rqlen); 8933 } else { 8934 bcopy(sense_from, sense_to, rqlen); 8935 } 8936 8937 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) || 8938 (FCP_SENSE_NO_LUN(sense_to))) { 8939 mutex_enter(&ptgt->tgt_mutex); 8940 if (ptgt->tgt_tid == NULL) { 8941 /* 8942 * Kick off rediscovery 8943 */ 8944 tid = timeout(fcp_reconfigure_luns, 8945 (caddr_t)ptgt, drv_usectohz(1)); 8946 8947 ptgt->tgt_tid = tid; 8948 ptgt->tgt_state |= FCP_TGT_BUSY; 8949 } 8950 mutex_exit(&ptgt->tgt_mutex); 8951 if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) { 8952 FCP_TRACE(fcp_logq, pptr->port_instbuf, 8953 fcp_trace, FCP_BUF_LEVEL_3, 0, 8954 "!FCP: Report Lun Has Changed" 8955 " target=%x", ptgt->tgt_d_id); 8956 } else if (FCP_SENSE_NO_LUN(sense_to)) { 8957 FCP_TRACE(fcp_logq, pptr->port_instbuf, 8958 fcp_trace, FCP_BUF_LEVEL_3, 0, 8959 "!FCP: LU Not Supported" 8960 " target=%x", ptgt->tgt_d_id); 8961 } 8962 } 8963 ASSERT(pkt->pkt_scbp != NULL); 8964 8965 pkt->pkt_state |= STATE_ARQ_DONE; 8966 8967 arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen; 8968 8969 *((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD; 8970 arq->sts_rqpkt_reason = 0; 8971 arq->sts_rqpkt_statistics = 0; 8972 8973 arq->sts_rqpkt_state = STATE_GOT_BUS | 8974 STATE_GOT_TARGET | STATE_SENT_CMD | 8975 STATE_GOT_STATUS | STATE_ARQ_DONE | 8976 STATE_XFERRED_DATA; 8977 8978 mutex_enter(&plun->lun_mutex); 8979 cip = plun->lun_cip; 8980 mutex_exit(&plun->lun_mutex); 8981 8982 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 8983 fcp_trace, FCP_BUF_LEVEL_8, 0, 8984 "SCSI Check condition on cmd=%p target=0x%x" 8985 " LUN=%p, cmd=%x SCSI status=%x, es key=%x" 8986 " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip, 8987 cmd->cmd_fcp_cmd.fcp_cdb[0], 8988 rsp->fcp_u.fcp_status.scsi_status, 8989 sense_to->es_key, sense_to->es_add_code, 8990 sense_to->es_qual_code); 8991 } 8992 } else { 8993 plun = ADDR2LUN(&pkt->pkt_address); 8994 ptgt = plun->lun_tgt; 8995 ASSERT(ptgt != NULL); 8996 8997 /* 8998 * Work harder to translate errors into target driver 8999 * understandable ones. Note with despair that the target 9000 * drivers don't decode pkt_state and pkt_reason exhaustively 9001 * They resort to using the big hammer most often, which 9002 * may not get fixed in the life time of this driver. 9003 */ 9004 pkt->pkt_state = 0; 9005 pkt->pkt_statistics = 0; 9006 9007 switch (fpkt->pkt_state) { 9008 case FC_PKT_TRAN_ERROR: 9009 switch (fpkt->pkt_reason) { 9010 case FC_REASON_OVERRUN: 9011 pkt->pkt_reason = CMD_CMD_OVR; 9012 pkt->pkt_statistics |= STAT_ABORTED; 9013 break; 9014 9015 case FC_REASON_XCHG_BSY: { 9016 caddr_t ptr; 9017 9018 pkt->pkt_reason = CMD_CMPLT; /* Lie */ 9019 9020 ptr = (caddr_t)pkt->pkt_scbp; 9021 if (ptr) { 9022 *ptr = STATUS_BUSY; 9023 } 9024 break; 9025 } 9026 9027 case FC_REASON_ABORTED: 9028 pkt->pkt_reason = CMD_TRAN_ERR; 9029 pkt->pkt_statistics |= STAT_ABORTED; 9030 break; 9031 9032 case FC_REASON_ABORT_FAILED: 9033 pkt->pkt_reason = CMD_ABORT_FAIL; 9034 break; 9035 9036 case FC_REASON_NO_SEQ_INIT: 9037 case FC_REASON_CRC_ERROR: 9038 pkt->pkt_reason = CMD_TRAN_ERR; 9039 pkt->pkt_statistics |= STAT_ABORTED; 9040 break; 9041 default: 9042 pkt->pkt_reason = CMD_TRAN_ERR; 9043 break; 9044 } 9045 break; 9046 9047 case FC_PKT_PORT_OFFLINE: { 9048 dev_info_t *cdip = NULL; 9049 caddr_t ptr; 9050 9051 if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) { 9052 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 9053 fcp_trace, FCP_BUF_LEVEL_8, 0, 9054 "SCSI cmd; LOGIN REQUIRED from FCA for %x", 9055 ptgt->tgt_d_id); 9056 } 9057 9058 mutex_enter(&plun->lun_mutex); 9059 if (plun->lun_mpxio == 0) { 9060 cdip = DIP(plun->lun_cip); 9061 } else if (plun->lun_cip) { 9062 cdip = mdi_pi_get_client(PIP(plun->lun_cip)); 9063 } 9064 9065 mutex_exit(&plun->lun_mutex); 9066 9067 if (cdip) { 9068 (void) ndi_event_retrieve_cookie( 9069 pptr->port_ndi_event_hdl, cdip, 9070 FCAL_REMOVE_EVENT, &fcp_remove_eid, 9071 NDI_EVENT_NOPASS); 9072 (void) ndi_event_run_callbacks( 9073 pptr->port_ndi_event_hdl, cdip, 9074 fcp_remove_eid, NULL); 9075 } 9076 9077 /* 9078 * If the link goes off-line for a lip, 9079 * this will cause a error to the ST SG 9080 * SGEN drivers. By setting BUSY we will 9081 * give the drivers the chance to retry 9082 * before it blows of the job. ST will 9083 * remember how many times it has retried. 9084 */ 9085 9086 if ((plun->lun_type == DTYPE_SEQUENTIAL) || 9087 (plun->lun_type == DTYPE_CHANGER)) { 9088 pkt->pkt_reason = CMD_CMPLT; /* Lie */ 9089 ptr = (caddr_t)pkt->pkt_scbp; 9090 if (ptr) { 9091 *ptr = STATUS_BUSY; 9092 } 9093 } else { 9094 pkt->pkt_reason = CMD_TRAN_ERR; 9095 pkt->pkt_statistics |= STAT_BUS_RESET; 9096 } 9097 break; 9098 } 9099 9100 case FC_PKT_TRAN_BSY: 9101 /* 9102 * Use the ssd Qfull handling here. 9103 */ 9104 *pkt->pkt_scbp = STATUS_INTERMEDIATE; 9105 pkt->pkt_state = STATE_GOT_BUS; 9106 break; 9107 9108 case FC_PKT_TIMEOUT: 9109 pkt->pkt_reason = CMD_TIMEOUT; 9110 if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) { 9111 pkt->pkt_statistics |= STAT_TIMEOUT; 9112 } else { 9113 pkt->pkt_statistics |= STAT_ABORTED; 9114 } 9115 break; 9116 9117 case FC_PKT_LOCAL_RJT: 9118 switch (fpkt->pkt_reason) { 9119 case FC_REASON_OFFLINE: { 9120 dev_info_t *cdip = NULL; 9121 9122 mutex_enter(&plun->lun_mutex); 9123 if (plun->lun_mpxio == 0) { 9124 cdip = DIP(plun->lun_cip); 9125 } else if (plun->lun_cip) { 9126 cdip = mdi_pi_get_client( 9127 PIP(plun->lun_cip)); 9128 } 9129 mutex_exit(&plun->lun_mutex); 9130 9131 if (cdip) { 9132 (void) ndi_event_retrieve_cookie( 9133 pptr->port_ndi_event_hdl, cdip, 9134 FCAL_REMOVE_EVENT, 9135 &fcp_remove_eid, 9136 NDI_EVENT_NOPASS); 9137 (void) ndi_event_run_callbacks( 9138 pptr->port_ndi_event_hdl, 9139 cdip, fcp_remove_eid, NULL); 9140 } 9141 9142 pkt->pkt_reason = CMD_TRAN_ERR; 9143 pkt->pkt_statistics |= STAT_BUS_RESET; 9144 9145 break; 9146 } 9147 9148 case FC_REASON_NOMEM: 9149 case FC_REASON_QFULL: { 9150 caddr_t ptr; 9151 9152 pkt->pkt_reason = CMD_CMPLT; /* Lie */ 9153 ptr = (caddr_t)pkt->pkt_scbp; 9154 if (ptr) { 9155 *ptr = STATUS_BUSY; 9156 } 9157 break; 9158 } 9159 9160 case FC_REASON_DMA_ERROR: 9161 pkt->pkt_reason = CMD_DMA_DERR; 9162 pkt->pkt_statistics |= STAT_ABORTED; 9163 break; 9164 9165 case FC_REASON_CRC_ERROR: 9166 case FC_REASON_UNDERRUN: { 9167 uchar_t status; 9168 /* 9169 * Work around for Bugid: 4240945. 9170 * IB on A5k doesn't set the Underrun bit 9171 * in the fcp status, when it is transferring 9172 * less than requested amount of data. Work 9173 * around the ses problem to keep luxadm 9174 * happy till ibfirmware is fixed. 9175 */ 9176 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) { 9177 FCP_CP_IN(fpkt->pkt_resp, rsp, 9178 fpkt->pkt_resp_acc, 9179 sizeof (struct fcp_rsp)); 9180 } 9181 status = rsp->fcp_u.fcp_status.scsi_status; 9182 if (((plun->lun_type & DTYPE_MASK) == 9183 DTYPE_ESI) && (status == STATUS_GOOD)) { 9184 pkt->pkt_reason = CMD_CMPLT; 9185 *pkt->pkt_scbp = status; 9186 pkt->pkt_resid = 0; 9187 } else { 9188 pkt->pkt_reason = CMD_TRAN_ERR; 9189 pkt->pkt_statistics |= STAT_ABORTED; 9190 } 9191 break; 9192 } 9193 9194 case FC_REASON_NO_CONNECTION: 9195 case FC_REASON_UNSUPPORTED: 9196 case FC_REASON_ILLEGAL_REQ: 9197 case FC_REASON_BAD_SID: 9198 case FC_REASON_DIAG_BUSY: 9199 case FC_REASON_FCAL_OPN_FAIL: 9200 case FC_REASON_BAD_XID: 9201 default: 9202 pkt->pkt_reason = CMD_TRAN_ERR; 9203 pkt->pkt_statistics |= STAT_ABORTED; 9204 break; 9205 9206 } 9207 break; 9208 9209 case FC_PKT_NPORT_RJT: 9210 case FC_PKT_FABRIC_RJT: 9211 case FC_PKT_NPORT_BSY: 9212 case FC_PKT_FABRIC_BSY: 9213 default: 9214 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 9215 fcp_trace, FCP_BUF_LEVEL_8, 0, 9216 "FC Status 0x%x, reason 0x%x", 9217 fpkt->pkt_state, fpkt->pkt_reason); 9218 pkt->pkt_reason = CMD_TRAN_ERR; 9219 pkt->pkt_statistics |= STAT_ABORTED; 9220 break; 9221 } 9222 9223 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 9224 fcp_trace, FCP_BUF_LEVEL_9, 0, 9225 "!FC error on cmd=%p target=0x%x: pkt state=0x%x " 9226 " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state, 9227 fpkt->pkt_reason); 9228 } 9229 9230 ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran); 9231 } 9232 9233 9234 static int 9235 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr) 9236 { 9237 if (rsp->reserved_0 || rsp->reserved_1 || 9238 rsp->fcp_u.fcp_status.reserved_0 || 9239 rsp->fcp_u.fcp_status.reserved_1) { 9240 /* 9241 * These reserved fields should ideally be zero. FCP-2 does say 9242 * that the recipient need not check for reserved fields to be 9243 * zero. If they are not zero, we will not make a fuss about it 9244 * - just log it (in debug to both trace buffer and messages 9245 * file and to trace buffer only in non-debug) and move on. 9246 * 9247 * Non-zero reserved fields were seen with minnows. 9248 * 9249 * qlc takes care of some of this but we cannot assume that all 9250 * FCAs will do so. 9251 */ 9252 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 9253 FCP_BUF_LEVEL_5, 0, 9254 "Got fcp response packet with non-zero reserved fields " 9255 "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, " 9256 "status.reserved_0:0x%x, status.reserved_1:0x%x", 9257 rsp->reserved_0, rsp->reserved_1, 9258 rsp->fcp_u.fcp_status.reserved_0, 9259 rsp->fcp_u.fcp_status.reserved_1); 9260 } 9261 9262 if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len > 9263 (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) { 9264 return (FC_FAILURE); 9265 } 9266 9267 if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len > 9268 (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len - 9269 sizeof (struct fcp_rsp))) { 9270 return (FC_FAILURE); 9271 } 9272 9273 return (FC_SUCCESS); 9274 } 9275 9276 9277 /* 9278 * This is called when there is a change the in device state. The case we're 9279 * handling here is, if the d_id s does not match, offline this tgt and online 9280 * a new tgt with the new d_id. called from fcp_handle_devices with 9281 * port_mutex held. 9282 */ 9283 static int 9284 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt, 9285 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause) 9286 { 9287 ASSERT(mutex_owned(&pptr->port_mutex)); 9288 9289 FCP_TRACE(fcp_logq, pptr->port_instbuf, 9290 fcp_trace, FCP_BUF_LEVEL_3, 0, 9291 "Starting fcp_device_changed..."); 9292 9293 /* 9294 * The two cases where the port_device_changed is called is 9295 * either it changes it's d_id or it's hard address. 9296 */ 9297 if ((ptgt->tgt_d_id != map_entry->map_did.port_id) || 9298 (FC_TOP_EXTERNAL(pptr->port_topology) && 9299 (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) { 9300 9301 /* offline this target */ 9302 mutex_enter(&ptgt->tgt_mutex); 9303 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) { 9304 (void) fcp_offline_target(pptr, ptgt, link_cnt, 9305 0, 1, NDI_DEVI_REMOVE); 9306 } 9307 mutex_exit(&ptgt->tgt_mutex); 9308 9309 fcp_log(CE_NOTE, pptr->port_dip, 9310 "Change in target properties: Old D_ID=%x New D_ID=%x" 9311 " Old HA=%x New HA=%x", ptgt->tgt_d_id, 9312 map_entry->map_did.port_id, ptgt->tgt_hard_addr, 9313 map_entry->map_hard_addr.hard_addr); 9314 } 9315 9316 return (fcp_handle_mapflags(pptr, ptgt, map_entry, 9317 link_cnt, tgt_cnt, cause)); 9318 } 9319 9320 /* 9321 * Function: fcp_alloc_lun 9322 * 9323 * Description: Creates a new lun structure and adds it to the list 9324 * of luns of the target. 9325 * 9326 * Argument: ptgt Target the lun will belong to. 9327 * 9328 * Return Value: NULL Failed 9329 * Not NULL Succeeded 9330 * 9331 * Context: Kernel context 9332 */ 9333 static struct fcp_lun * 9334 fcp_alloc_lun(struct fcp_tgt *ptgt) 9335 { 9336 struct fcp_lun *plun; 9337 9338 plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP); 9339 if (plun != NULL) { 9340 /* 9341 * Initialize the mutex before putting in the target list 9342 * especially before releasing the target mutex. 9343 */ 9344 mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL); 9345 plun->lun_tgt = ptgt; 9346 9347 mutex_enter(&ptgt->tgt_mutex); 9348 plun->lun_next = ptgt->tgt_lun; 9349 ptgt->tgt_lun = plun; 9350 plun->lun_old_guid = NULL; 9351 plun->lun_old_guid_size = 0; 9352 mutex_exit(&ptgt->tgt_mutex); 9353 } 9354 9355 return (plun); 9356 } 9357 9358 /* 9359 * Function: fcp_dealloc_lun 9360 * 9361 * Description: Frees the LUN structure passed by the caller. 9362 * 9363 * Argument: plun LUN structure to free. 9364 * 9365 * Return Value: None 9366 * 9367 * Context: Kernel context. 9368 */ 9369 static void 9370 fcp_dealloc_lun(struct fcp_lun *plun) 9371 { 9372 mutex_enter(&plun->lun_mutex); 9373 if (plun->lun_cip) { 9374 fcp_remove_child(plun); 9375 } 9376 mutex_exit(&plun->lun_mutex); 9377 9378 mutex_destroy(&plun->lun_mutex); 9379 if (plun->lun_guid) { 9380 kmem_free(plun->lun_guid, plun->lun_guid_size); 9381 } 9382 if (plun->lun_old_guid) { 9383 kmem_free(plun->lun_old_guid, plun->lun_old_guid_size); 9384 } 9385 kmem_free(plun, sizeof (*plun)); 9386 } 9387 9388 /* 9389 * Function: fcp_alloc_tgt 9390 * 9391 * Description: Creates a new target structure and adds it to the port 9392 * hash list. 9393 * 9394 * Argument: pptr fcp port structure 9395 * *map_entry entry describing the target to create 9396 * link_cnt Link state change counter 9397 * 9398 * Return Value: NULL Failed 9399 * Not NULL Succeeded 9400 * 9401 * Context: Kernel context. 9402 */ 9403 static struct fcp_tgt * 9404 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt) 9405 { 9406 int hash; 9407 uchar_t *wwn; 9408 struct fcp_tgt *ptgt; 9409 9410 ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP); 9411 if (ptgt != NULL) { 9412 mutex_enter(&pptr->port_mutex); 9413 if (link_cnt != pptr->port_link_cnt) { 9414 /* 9415 * oh oh -- another link reset 9416 * in progress -- give up 9417 */ 9418 mutex_exit(&pptr->port_mutex); 9419 kmem_free(ptgt, sizeof (*ptgt)); 9420 ptgt = NULL; 9421 } else { 9422 /* 9423 * initialize the mutex before putting in the port 9424 * wwn list, especially before releasing the port 9425 * mutex. 9426 */ 9427 mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL); 9428 9429 /* add new target entry to the port's hash list */ 9430 wwn = (uchar_t *)&map_entry->map_pwwn; 9431 hash = FCP_HASH(wwn); 9432 9433 ptgt->tgt_next = pptr->port_tgt_hash_table[hash]; 9434 pptr->port_tgt_hash_table[hash] = ptgt; 9435 9436 /* save cross-ptr */ 9437 ptgt->tgt_port = pptr; 9438 9439 ptgt->tgt_change_cnt = 1; 9440 9441 /* initialize the target manual_config_only flag */ 9442 if (fcp_enable_auto_configuration) { 9443 ptgt->tgt_manual_config_only = 0; 9444 } else { 9445 ptgt->tgt_manual_config_only = 1; 9446 } 9447 9448 mutex_exit(&pptr->port_mutex); 9449 } 9450 } 9451 9452 return (ptgt); 9453 } 9454 9455 /* 9456 * Function: fcp_dealloc_tgt 9457 * 9458 * Description: Frees the target structure passed by the caller. 9459 * 9460 * Argument: ptgt Target structure to free. 9461 * 9462 * Return Value: None 9463 * 9464 * Context: Kernel context. 9465 */ 9466 static void 9467 fcp_dealloc_tgt(struct fcp_tgt *ptgt) 9468 { 9469 mutex_destroy(&ptgt->tgt_mutex); 9470 kmem_free(ptgt, sizeof (*ptgt)); 9471 } 9472 9473 9474 /* 9475 * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry 9476 * 9477 * Device discovery commands will not be retried for-ever as 9478 * this will have repercussions on other devices that need to 9479 * be submitted to the hotplug thread. After a quick glance 9480 * at the SCSI-3 spec, it was found that the spec doesn't 9481 * mandate a forever retry, rather recommends a delayed retry. 9482 * 9483 * Since Photon IB is single threaded, STATUS_BUSY is common 9484 * in a 4+initiator environment. Make sure the total time 9485 * spent on retries (including command timeout) does not 9486 * 60 seconds 9487 */ 9488 static void 9489 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt) 9490 { 9491 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private; 9492 struct fcp_tgt *ptgt = icmd->ipkt_tgt; 9493 9494 mutex_enter(&pptr->port_mutex); 9495 mutex_enter(&ptgt->tgt_mutex); 9496 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) { 9497 FCP_TRACE(fcp_logq, pptr->port_instbuf, 9498 fcp_trace, FCP_BUF_LEVEL_2, 0, 9499 "fcp_queue_ipkt,1:state change occured" 9500 " for D_ID=0x%x", ptgt->tgt_d_id); 9501 mutex_exit(&ptgt->tgt_mutex); 9502 mutex_exit(&pptr->port_mutex); 9503 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt, 9504 icmd->ipkt_change_cnt, icmd->ipkt_cause); 9505 fcp_icmd_free(pptr, icmd); 9506 return; 9507 } 9508 mutex_exit(&ptgt->tgt_mutex); 9509 9510 icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++; 9511 9512 if (pptr->port_ipkt_list != NULL) { 9513 /* add pkt to front of doubly-linked list */ 9514 pptr->port_ipkt_list->ipkt_prev = icmd; 9515 icmd->ipkt_next = pptr->port_ipkt_list; 9516 pptr->port_ipkt_list = icmd; 9517 icmd->ipkt_prev = NULL; 9518 } else { 9519 /* this is the first/only pkt on the list */ 9520 pptr->port_ipkt_list = icmd; 9521 icmd->ipkt_next = NULL; 9522 icmd->ipkt_prev = NULL; 9523 } 9524 mutex_exit(&pptr->port_mutex); 9525 } 9526 9527 /* 9528 * Function: fcp_transport 9529 * 9530 * Description: This function submits the Fibre Channel packet to the transort 9531 * layer by calling fc_ulp_transport(). If fc_ulp_transport() 9532 * fails the submission, the treatment depends on the value of 9533 * the variable internal. 9534 * 9535 * Argument: port_handle fp/fctl port handle. 9536 * *fpkt Packet to submit to the transport layer. 9537 * internal Not zero when it's an internal packet. 9538 * 9539 * Return Value: FC_TRAN_BUSY 9540 * FC_STATEC_BUSY 9541 * FC_OFFLINE 9542 * FC_LOGINREQ 9543 * FC_DEVICE_BUSY 9544 * FC_SUCCESS 9545 */ 9546 static int 9547 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal) 9548 { 9549 int rval; 9550 9551 rval = fc_ulp_transport(port_handle, fpkt); 9552 if (rval == FC_SUCCESS) { 9553 return (rval); 9554 } 9555 9556 /* 9557 * LUN isn't marked BUSY or OFFLINE, so we got here to transport 9558 * a command, if the underlying modules see that there is a state 9559 * change, or if a port is OFFLINE, that means, that state change 9560 * hasn't reached FCP yet, so re-queue the command for deferred 9561 * submission. 9562 */ 9563 if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) || 9564 (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) || 9565 (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) { 9566 /* 9567 * Defer packet re-submission. Life hang is possible on 9568 * internal commands if the port driver sends FC_STATEC_BUSY 9569 * for ever, but that shouldn't happen in a good environment. 9570 * Limiting re-transport for internal commands is probably a 9571 * good idea.. 9572 * A race condition can happen when a port sees barrage of 9573 * link transitions offline to online. If the FCTL has 9574 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the 9575 * internal commands should be queued to do the discovery. 9576 * The race condition is when an online comes and FCP starts 9577 * its internal discovery and the link goes offline. It is 9578 * possible that the statec_callback has not reached FCP 9579 * and FCP is carrying on with its internal discovery. 9580 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication 9581 * that the link has gone offline. At this point FCP should 9582 * drop all the internal commands and wait for the 9583 * statec_callback. It will be facilitated by incrementing 9584 * port_link_cnt. 9585 * 9586 * For external commands, the (FC)pkt_timeout is decremented 9587 * by the QUEUE Delay added by our driver, Care is taken to 9588 * ensure that it doesn't become zero (zero means no timeout) 9589 * If the time expires right inside driver queue itself, 9590 * the watch thread will return it to the original caller 9591 * indicating that the command has timed-out. 9592 */ 9593 if (internal) { 9594 char *op; 9595 struct fcp_ipkt *icmd; 9596 9597 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private; 9598 switch (icmd->ipkt_opcode) { 9599 case SCMD_REPORT_LUN: 9600 op = "REPORT LUN"; 9601 break; 9602 9603 case SCMD_INQUIRY: 9604 op = "INQUIRY"; 9605 break; 9606 9607 case SCMD_INQUIRY_PAGE83: 9608 op = "INQUIRY-83"; 9609 break; 9610 9611 default: 9612 op = "Internal SCSI COMMAND"; 9613 break; 9614 } 9615 9616 if (fcp_handle_ipkt_errors(icmd->ipkt_port, 9617 icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) { 9618 rval = FC_SUCCESS; 9619 } 9620 } else { 9621 struct fcp_pkt *cmd; 9622 struct fcp_port *pptr; 9623 9624 cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private; 9625 cmd->cmd_state = FCP_PKT_IDLE; 9626 pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address); 9627 9628 if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) { 9629 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 9630 fcp_trace, FCP_BUF_LEVEL_9, 0, 9631 "fcp_transport: xport busy for pkt %p", 9632 cmd->cmd_pkt); 9633 rval = FC_TRAN_BUSY; 9634 } else { 9635 fcp_queue_pkt(pptr, cmd); 9636 rval = FC_SUCCESS; 9637 } 9638 } 9639 } 9640 9641 return (rval); 9642 } 9643 9644 /*VARARGS3*/ 9645 static void 9646 fcp_log(int level, dev_info_t *dip, const char *fmt, ...) 9647 { 9648 char buf[256]; 9649 va_list ap; 9650 9651 if (dip == NULL) { 9652 dip = fcp_global_dip; 9653 } 9654 9655 va_start(ap, fmt); 9656 (void) vsprintf(buf, fmt, ap); 9657 va_end(ap); 9658 9659 scsi_log(dip, "fcp", level, buf); 9660 } 9661 9662 /* 9663 * This function retries NS registry of FC4 type. 9664 * It assumes that fcp_mutex is held. 9665 * The function does nothing if topology is not fabric 9666 * So, the topology has to be set before this function can be called 9667 */ 9668 static void 9669 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id) 9670 { 9671 int rval; 9672 9673 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 9674 9675 if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) || 9676 ((pptr->port_topology != FC_TOP_FABRIC) && 9677 (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) { 9678 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) { 9679 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED; 9680 } 9681 return; 9682 } 9683 mutex_exit(&pptr->port_mutex); 9684 rval = fcp_do_ns_registry(pptr, s_id); 9685 mutex_enter(&pptr->port_mutex); 9686 9687 if (rval == 0) { 9688 /* Registry successful. Reset flag */ 9689 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED); 9690 } 9691 } 9692 9693 /* 9694 * This function registers the ULP with the switch by calling transport i/f 9695 */ 9696 static int 9697 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id) 9698 { 9699 fc_ns_cmd_t ns_cmd; 9700 ns_rfc_type_t rfc; 9701 uint32_t types[8]; 9702 9703 /* 9704 * Prepare the Name server structure to 9705 * register with the transport in case of 9706 * Fabric configuration. 9707 */ 9708 bzero(&rfc, sizeof (rfc)); 9709 bzero(types, sizeof (types)); 9710 9711 types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] = 9712 (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP)); 9713 9714 rfc.rfc_port_id.port_id = s_id; 9715 bcopy(types, rfc.rfc_types, sizeof (types)); 9716 9717 ns_cmd.ns_flags = 0; 9718 ns_cmd.ns_cmd = NS_RFT_ID; 9719 ns_cmd.ns_req_len = sizeof (rfc); 9720 ns_cmd.ns_req_payload = (caddr_t)&rfc; 9721 ns_cmd.ns_resp_len = 0; 9722 ns_cmd.ns_resp_payload = NULL; 9723 9724 /* 9725 * Perform the Name Server Registration for SCSI_FCP FC4 Type. 9726 */ 9727 if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) { 9728 fcp_log(CE_WARN, pptr->port_dip, 9729 "!ns_registry: failed name server registration"); 9730 return (1); 9731 } 9732 9733 return (0); 9734 } 9735 9736 /* 9737 * Function: fcp_handle_port_attach 9738 * 9739 * Description: This function is called from fcp_port_attach() to attach a 9740 * new port. This routine does the following: 9741 * 9742 * 1) Allocates an fcp_port structure and initializes it. 9743 * 2) Tries to register the new FC-4 (FCP) capablity with the name 9744 * server. 9745 * 3) Kicks off the enumeration of the targets/luns visible 9746 * through this new port. That is done by calling 9747 * fcp_statec_callback() if the port is online. 9748 * 9749 * Argument: ulph fp/fctl port handle. 9750 * *pinfo Port information. 9751 * s_id Port ID. 9752 * instance Device instance number for the local port 9753 * (returned by ddi_get_instance()). 9754 * 9755 * Return Value: DDI_SUCCESS 9756 * DDI_FAILURE 9757 * 9758 * Context: User and Kernel context. 9759 */ 9760 /*ARGSUSED*/ 9761 int 9762 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo, 9763 uint32_t s_id, int instance) 9764 { 9765 int res = DDI_FAILURE; 9766 scsi_hba_tran_t *tran; 9767 int mutex_initted = FALSE; 9768 int hba_attached = FALSE; 9769 int soft_state_linked = FALSE; 9770 int event_bind = FALSE; 9771 struct fcp_port *pptr; 9772 fc_portmap_t *tmp_list = NULL; 9773 uint32_t max_cnt, alloc_cnt; 9774 uchar_t *boot_wwn = NULL; 9775 uint_t nbytes; 9776 int manual_cfg; 9777 9778 /* 9779 * this port instance attaching for the first time (or after 9780 * being detached before) 9781 */ 9782 FCP_TRACE(fcp_logq, "fcp", fcp_trace, 9783 FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance); 9784 9785 if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) { 9786 cmn_err(CE_WARN, "fcp: Softstate struct alloc failed" 9787 "parent dip: %p; instance: %d", (void *)pinfo->port_dip, 9788 instance); 9789 return (res); 9790 } 9791 9792 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) { 9793 /* this shouldn't happen */ 9794 ddi_soft_state_free(fcp_softstate, instance); 9795 cmn_err(CE_WARN, "fcp: bad soft state"); 9796 return (res); 9797 } 9798 9799 (void) sprintf(pptr->port_instbuf, "fcp(%d)", instance); 9800 9801 /* 9802 * Make a copy of ulp_port_info as fctl allocates 9803 * a temp struct. 9804 */ 9805 (void) fcp_cp_pinfo(pptr, pinfo); 9806 9807 /* 9808 * Check for manual_configuration_only property. 9809 * Enable manual configurtion if the property is 9810 * set to 1, otherwise disable manual configuration. 9811 */ 9812 if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip, 9813 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 9814 MANUAL_CFG_ONLY, 9815 -1)) != -1) { 9816 if (manual_cfg == 1) { 9817 char *pathname; 9818 pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 9819 (void) ddi_pathname(pptr->port_dip, pathname); 9820 cmn_err(CE_NOTE, 9821 "%s (%s%d) %s is enabled via %s.conf.", 9822 pathname, 9823 ddi_driver_name(pptr->port_dip), 9824 ddi_get_instance(pptr->port_dip), 9825 MANUAL_CFG_ONLY, 9826 ddi_driver_name(pptr->port_dip)); 9827 fcp_enable_auto_configuration = 0; 9828 kmem_free(pathname, MAXPATHLEN); 9829 } 9830 } 9831 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt)); 9832 pptr->port_link_cnt = 1; 9833 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt)); 9834 pptr->port_id = s_id; 9835 pptr->port_instance = instance; 9836 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state)); 9837 pptr->port_state = FCP_STATE_INIT; 9838 if (pinfo->port_acc_attr == NULL) { 9839 /* 9840 * The corresponding FCA doesn't support DMA at all 9841 */ 9842 pptr->port_state |= FCP_STATE_FCA_IS_NODMA; 9843 } 9844 9845 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state)); 9846 9847 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) { 9848 /* 9849 * If FCA supports DMA in SCSI data phase, we need preallocate 9850 * dma cookie, so stash the cookie size 9851 */ 9852 pptr->port_dmacookie_sz = sizeof (ddi_dma_cookie_t) * 9853 pptr->port_data_dma_attr.dma_attr_sgllen; 9854 } 9855 9856 /* 9857 * The two mutexes of fcp_port are initialized. The variable 9858 * mutex_initted is incremented to remember that fact. That variable 9859 * is checked when the routine fails and the mutexes have to be 9860 * destroyed. 9861 */ 9862 mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL); 9863 mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL); 9864 mutex_initted++; 9865 9866 /* 9867 * The SCSI tran structure is allocate and initialized now. 9868 */ 9869 if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) { 9870 fcp_log(CE_WARN, pptr->port_dip, 9871 "!fcp%d: scsi_hba_tran_alloc failed", instance); 9872 goto fail; 9873 } 9874 9875 /* link in the transport structure then fill it in */ 9876 pptr->port_tran = tran; 9877 tran->tran_hba_private = pptr; 9878 tran->tran_tgt_init = fcp_scsi_tgt_init; 9879 tran->tran_tgt_probe = NULL; 9880 tran->tran_tgt_free = fcp_scsi_tgt_free; 9881 tran->tran_start = fcp_scsi_start; 9882 tran->tran_reset = fcp_scsi_reset; 9883 tran->tran_abort = fcp_scsi_abort; 9884 tran->tran_getcap = fcp_scsi_getcap; 9885 tran->tran_setcap = fcp_scsi_setcap; 9886 tran->tran_init_pkt = NULL; 9887 tran->tran_destroy_pkt = NULL; 9888 tran->tran_dmafree = NULL; 9889 tran->tran_sync_pkt = NULL; 9890 tran->tran_reset_notify = fcp_scsi_reset_notify; 9891 tran->tran_get_bus_addr = fcp_scsi_get_bus_addr; 9892 tran->tran_get_name = fcp_scsi_get_name; 9893 tran->tran_clear_aca = NULL; 9894 tran->tran_clear_task_set = NULL; 9895 tran->tran_terminate_task = NULL; 9896 tran->tran_get_eventcookie = fcp_scsi_bus_get_eventcookie; 9897 tran->tran_add_eventcall = fcp_scsi_bus_add_eventcall; 9898 tran->tran_remove_eventcall = fcp_scsi_bus_remove_eventcall; 9899 tran->tran_post_event = fcp_scsi_bus_post_event; 9900 tran->tran_quiesce = NULL; 9901 tran->tran_unquiesce = NULL; 9902 tran->tran_bus_reset = NULL; 9903 tran->tran_bus_config = fcp_scsi_bus_config; 9904 tran->tran_bus_unconfig = fcp_scsi_bus_unconfig; 9905 tran->tran_bus_power = NULL; 9906 tran->tran_interconnect_type = INTERCONNECT_FABRIC; 9907 9908 tran->tran_pkt_constructor = fcp_kmem_cache_constructor; 9909 tran->tran_pkt_destructor = fcp_kmem_cache_destructor; 9910 tran->tran_setup_pkt = fcp_pkt_setup; 9911 tran->tran_teardown_pkt = fcp_pkt_teardown; 9912 tran->tran_hba_len = pptr->port_priv_pkt_len + 9913 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz; 9914 if (pptr->port_state & FCP_STATE_FCA_IS_NODMA) { 9915 /* 9916 * If FCA don't support DMA, then we use different vectors to 9917 * minimize the effects on DMA code flow path 9918 */ 9919 tran->tran_start = fcp_pseudo_start; 9920 tran->tran_init_pkt = fcp_pseudo_init_pkt; 9921 tran->tran_destroy_pkt = fcp_pseudo_destroy_pkt; 9922 tran->tran_sync_pkt = fcp_pseudo_sync_pkt; 9923 tran->tran_dmafree = fcp_pseudo_dmafree; 9924 tran->tran_setup_pkt = NULL; 9925 tran->tran_teardown_pkt = NULL; 9926 tran->tran_pkt_constructor = NULL; 9927 tran->tran_pkt_destructor = NULL; 9928 pptr->port_data_dma_attr = pseudo_fca_dma_attr; 9929 } 9930 9931 /* 9932 * Allocate an ndi event handle 9933 */ 9934 pptr->port_ndi_event_defs = (ndi_event_definition_t *) 9935 kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP); 9936 9937 bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs, 9938 sizeof (fcp_ndi_event_defs)); 9939 9940 (void) ndi_event_alloc_hdl(pptr->port_dip, NULL, 9941 &pptr->port_ndi_event_hdl, NDI_SLEEP); 9942 9943 pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1; 9944 pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS; 9945 pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs; 9946 9947 if (DEVI_IS_ATTACHING(pptr->port_dip) && 9948 (ndi_event_bind_set(pptr->port_ndi_event_hdl, 9949 &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) { 9950 goto fail; 9951 } 9952 event_bind++; /* Checked in fail case */ 9953 9954 if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr, 9955 tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB) 9956 != DDI_SUCCESS) { 9957 fcp_log(CE_WARN, pptr->port_dip, 9958 "!fcp%d: scsi_hba_attach_setup failed", instance); 9959 goto fail; 9960 } 9961 hba_attached++; /* Checked in fail case */ 9962 9963 pptr->port_mpxio = 0; 9964 if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) == 9965 MDI_SUCCESS) { 9966 pptr->port_mpxio++; 9967 } 9968 9969 /* 9970 * The following code is putting the new port structure in the global 9971 * list of ports and, if it is the first port to attach, it start the 9972 * fcp_watchdog_tick. 9973 * 9974 * Why put this new port in the global before we are done attaching it? 9975 * We are actually making the structure globally known before we are 9976 * done attaching it. The reason for that is: because of the code that 9977 * follows. At this point the resources to handle the port are 9978 * allocated. This function is now going to do the following: 9979 * 9980 * 1) It is going to try to register with the name server advertizing 9981 * the new FCP capability of the port. 9982 * 2) It is going to play the role of the fp/fctl layer by building 9983 * a list of worlwide names reachable through this port and call 9984 * itself on fcp_statec_callback(). That requires the port to 9985 * be part of the global list. 9986 */ 9987 mutex_enter(&fcp_global_mutex); 9988 if (fcp_port_head == NULL) { 9989 fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist); 9990 } 9991 pptr->port_next = fcp_port_head; 9992 fcp_port_head = pptr; 9993 soft_state_linked++; 9994 9995 if (fcp_watchdog_init++ == 0) { 9996 fcp_watchdog_tick = fcp_watchdog_timeout * 9997 drv_usectohz(1000000); 9998 fcp_watchdog_id = timeout(fcp_watch, NULL, 9999 fcp_watchdog_tick); 10000 } 10001 mutex_exit(&fcp_global_mutex); 10002 10003 /* 10004 * Here an attempt is made to register with the name server, the new 10005 * FCP capability. That is done using an RTF_ID to the name server. 10006 * It is done synchronously. The function fcp_do_ns_registry() 10007 * doesn't return till the name server responded. 10008 * On failures, just ignore it for now and it will get retried during 10009 * state change callbacks. We'll set a flag to show this failure 10010 */ 10011 if (fcp_do_ns_registry(pptr, s_id)) { 10012 mutex_enter(&pptr->port_mutex); 10013 pptr->port_state |= FCP_STATE_NS_REG_FAILED; 10014 mutex_exit(&pptr->port_mutex); 10015 } else { 10016 mutex_enter(&pptr->port_mutex); 10017 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED); 10018 mutex_exit(&pptr->port_mutex); 10019 } 10020 10021 /* 10022 * Lookup for boot WWN property 10023 */ 10024 if (modrootloaded != 1) { 10025 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, 10026 ddi_get_parent(pinfo->port_dip), 10027 DDI_PROP_DONTPASS, OBP_BOOT_WWN, 10028 &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) && 10029 (nbytes == FC_WWN_SIZE)) { 10030 bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE); 10031 } 10032 if (boot_wwn) { 10033 ddi_prop_free(boot_wwn); 10034 } 10035 } 10036 10037 /* 10038 * Handle various topologies and link states. 10039 */ 10040 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) { 10041 case FC_STATE_OFFLINE: 10042 10043 /* 10044 * we're attaching a port where the link is offline 10045 * 10046 * Wait for ONLINE, at which time a state 10047 * change will cause a statec_callback 10048 * 10049 * in the mean time, do not do anything 10050 */ 10051 res = DDI_SUCCESS; 10052 pptr->port_state |= FCP_STATE_OFFLINE; 10053 break; 10054 10055 case FC_STATE_ONLINE: { 10056 if (pptr->port_topology == FC_TOP_UNKNOWN) { 10057 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP); 10058 res = DDI_SUCCESS; 10059 break; 10060 } 10061 /* 10062 * discover devices and create nodes (a private 10063 * loop or point-to-point) 10064 */ 10065 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN); 10066 10067 /* 10068 * At this point we are going to build a list of all the ports 10069 * that can be reached through this local port. It looks like 10070 * we cannot handle more than FCP_MAX_DEVICES per local port 10071 * (128). 10072 */ 10073 if ((tmp_list = (fc_portmap_t *)kmem_zalloc( 10074 sizeof (fc_portmap_t) * FCP_MAX_DEVICES, 10075 KM_NOSLEEP)) == NULL) { 10076 fcp_log(CE_WARN, pptr->port_dip, 10077 "!fcp%d: failed to allocate portmap", 10078 instance); 10079 goto fail; 10080 } 10081 10082 /* 10083 * fc_ulp_getportmap() is going to provide us with the list of 10084 * remote ports in the buffer we just allocated. The way the 10085 * list is going to be retrieved depends on the topology. 10086 * However, if we are connected to a Fabric, a name server 10087 * request may be sent to get the list of FCP capable ports. 10088 * It should be noted that is the case the request is 10089 * synchronous. This means we are stuck here till the name 10090 * server replies. A lot of things can change during that time 10091 * and including, may be, being called on 10092 * fcp_statec_callback() for different reasons. I'm not sure 10093 * the code can handle that. 10094 */ 10095 max_cnt = FCP_MAX_DEVICES; 10096 alloc_cnt = FCP_MAX_DEVICES; 10097 if ((res = fc_ulp_getportmap(pptr->port_fp_handle, 10098 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) != 10099 FC_SUCCESS) { 10100 caddr_t msg; 10101 10102 (void) fc_ulp_error(res, &msg); 10103 10104 /* 10105 * this just means the transport is 10106 * busy perhaps building a portmap so, 10107 * for now, succeed this port attach 10108 * when the transport has a new map, 10109 * it'll send us a state change then 10110 */ 10111 fcp_log(CE_WARN, pptr->port_dip, 10112 "!failed to get port map : %s", msg); 10113 10114 res = DDI_SUCCESS; 10115 break; /* go return result */ 10116 } 10117 if (max_cnt > alloc_cnt) { 10118 alloc_cnt = max_cnt; 10119 } 10120 10121 /* 10122 * We are now going to call fcp_statec_callback() ourselves. 10123 * By issuing this call we are trying to kick off the enumera- 10124 * tion process. 10125 */ 10126 /* 10127 * let the state change callback do the SCSI device 10128 * discovery and create the devinfos 10129 */ 10130 fcp_statec_callback(ulph, pptr->port_fp_handle, 10131 pptr->port_phys_state, pptr->port_topology, tmp_list, 10132 max_cnt, pptr->port_id); 10133 10134 res = DDI_SUCCESS; 10135 break; 10136 } 10137 10138 default: 10139 /* unknown port state */ 10140 fcp_log(CE_WARN, pptr->port_dip, 10141 "!fcp%d: invalid port state at attach=0x%x", 10142 instance, pptr->port_phys_state); 10143 10144 mutex_enter(&pptr->port_mutex); 10145 pptr->port_phys_state = FCP_STATE_OFFLINE; 10146 mutex_exit(&pptr->port_mutex); 10147 10148 res = DDI_SUCCESS; 10149 break; 10150 } 10151 10152 /* free temp list if used */ 10153 if (tmp_list != NULL) { 10154 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt); 10155 } 10156 10157 /* note the attach time */ 10158 pptr->port_attach_time = ddi_get_lbolt64(); 10159 10160 /* all done */ 10161 return (res); 10162 10163 /* a failure we have to clean up after */ 10164 fail: 10165 fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port"); 10166 10167 if (soft_state_linked) { 10168 /* remove this fcp_port from the linked list */ 10169 (void) fcp_soft_state_unlink(pptr); 10170 } 10171 10172 /* unbind and free event set */ 10173 if (pptr->port_ndi_event_hdl) { 10174 if (event_bind) { 10175 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl, 10176 &pptr->port_ndi_events, NDI_SLEEP); 10177 } 10178 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl); 10179 } 10180 10181 if (pptr->port_ndi_event_defs) { 10182 (void) kmem_free(pptr->port_ndi_event_defs, 10183 sizeof (fcp_ndi_event_defs)); 10184 } 10185 10186 /* 10187 * Clean up mpxio stuff 10188 */ 10189 if (pptr->port_mpxio) { 10190 (void) mdi_phci_unregister(pptr->port_dip, 0); 10191 pptr->port_mpxio--; 10192 } 10193 10194 /* undo SCSI HBA setup */ 10195 if (hba_attached) { 10196 (void) scsi_hba_detach(pptr->port_dip); 10197 } 10198 if (pptr->port_tran != NULL) { 10199 scsi_hba_tran_free(pptr->port_tran); 10200 } 10201 10202 mutex_enter(&fcp_global_mutex); 10203 10204 /* 10205 * We check soft_state_linked, because it is incremented right before 10206 * we call increment fcp_watchdog_init. Therefore, we know if 10207 * soft_state_linked is still FALSE, we do not want to decrement 10208 * fcp_watchdog_init or possibly call untimeout. 10209 */ 10210 10211 if (soft_state_linked) { 10212 if (--fcp_watchdog_init == 0) { 10213 timeout_id_t tid = fcp_watchdog_id; 10214 10215 mutex_exit(&fcp_global_mutex); 10216 (void) untimeout(tid); 10217 } else { 10218 mutex_exit(&fcp_global_mutex); 10219 } 10220 } else { 10221 mutex_exit(&fcp_global_mutex); 10222 } 10223 10224 if (mutex_initted) { 10225 mutex_destroy(&pptr->port_mutex); 10226 mutex_destroy(&pptr->port_pkt_mutex); 10227 } 10228 10229 if (tmp_list != NULL) { 10230 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt); 10231 } 10232 10233 /* this makes pptr invalid */ 10234 ddi_soft_state_free(fcp_softstate, instance); 10235 10236 return (DDI_FAILURE); 10237 } 10238 10239 10240 static int 10241 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance) 10242 { 10243 int count = 0; 10244 10245 mutex_enter(&pptr->port_mutex); 10246 10247 /* 10248 * if the port is powered down or suspended, nothing else 10249 * to do; just return. 10250 */ 10251 if (flag != FCP_STATE_DETACHING) { 10252 if (pptr->port_state & (FCP_STATE_POWER_DOWN | 10253 FCP_STATE_SUSPENDED)) { 10254 pptr->port_state |= flag; 10255 mutex_exit(&pptr->port_mutex); 10256 return (FC_SUCCESS); 10257 } 10258 } 10259 10260 if (pptr->port_state & FCP_STATE_IN_MDI) { 10261 mutex_exit(&pptr->port_mutex); 10262 return (FC_FAILURE); 10263 } 10264 10265 FCP_TRACE(fcp_logq, pptr->port_instbuf, 10266 fcp_trace, FCP_BUF_LEVEL_2, 0, 10267 "fcp_handle_port_detach: port is detaching"); 10268 10269 pptr->port_state |= flag; 10270 10271 /* 10272 * Wait for any ongoing reconfig/ipkt to complete, that 10273 * ensures the freeing to targets/luns is safe. 10274 * No more ref to this port should happen from statec/ioctl 10275 * after that as it was removed from the global port list. 10276 */ 10277 while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt || 10278 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) { 10279 /* 10280 * Let's give sufficient time for reconfig/ipkt 10281 * to complete. 10282 */ 10283 if (count++ >= FCP_ICMD_DEADLINE) { 10284 break; 10285 } 10286 mutex_exit(&pptr->port_mutex); 10287 delay(drv_usectohz(1000000)); 10288 mutex_enter(&pptr->port_mutex); 10289 } 10290 10291 /* 10292 * if the driver is still busy then fail to 10293 * suspend/power down. 10294 */ 10295 if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt || 10296 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) { 10297 pptr->port_state &= ~flag; 10298 mutex_exit(&pptr->port_mutex); 10299 return (FC_FAILURE); 10300 } 10301 10302 if (flag == FCP_STATE_DETACHING) { 10303 pptr = fcp_soft_state_unlink(pptr); 10304 ASSERT(pptr != NULL); 10305 } 10306 10307 pptr->port_link_cnt++; 10308 pptr->port_state |= FCP_STATE_OFFLINE; 10309 pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE); 10310 10311 fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK), 10312 FCP_CAUSE_LINK_DOWN); 10313 mutex_exit(&pptr->port_mutex); 10314 10315 /* kill watch dog timer if we're the last */ 10316 mutex_enter(&fcp_global_mutex); 10317 if (--fcp_watchdog_init == 0) { 10318 timeout_id_t tid = fcp_watchdog_id; 10319 mutex_exit(&fcp_global_mutex); 10320 (void) untimeout(tid); 10321 } else { 10322 mutex_exit(&fcp_global_mutex); 10323 } 10324 10325 /* clean up the port structures */ 10326 if (flag == FCP_STATE_DETACHING) { 10327 fcp_cleanup_port(pptr, instance); 10328 } 10329 10330 return (FC_SUCCESS); 10331 } 10332 10333 10334 static void 10335 fcp_cleanup_port(struct fcp_port *pptr, int instance) 10336 { 10337 ASSERT(pptr != NULL); 10338 10339 /* unbind and free event set */ 10340 if (pptr->port_ndi_event_hdl) { 10341 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl, 10342 &pptr->port_ndi_events, NDI_SLEEP); 10343 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl); 10344 } 10345 10346 if (pptr->port_ndi_event_defs) { 10347 (void) kmem_free(pptr->port_ndi_event_defs, 10348 sizeof (fcp_ndi_event_defs)); 10349 } 10350 10351 /* free the lun/target structures and devinfos */ 10352 fcp_free_targets(pptr); 10353 10354 /* 10355 * Clean up mpxio stuff 10356 */ 10357 if (pptr->port_mpxio) { 10358 (void) mdi_phci_unregister(pptr->port_dip, 0); 10359 pptr->port_mpxio--; 10360 } 10361 10362 /* clean up SCSA stuff */ 10363 (void) scsi_hba_detach(pptr->port_dip); 10364 if (pptr->port_tran != NULL) { 10365 scsi_hba_tran_free(pptr->port_tran); 10366 } 10367 10368 #ifdef KSTATS_CODE 10369 /* clean up kstats */ 10370 if (pptr->fcp_ksp != NULL) { 10371 kstat_delete(pptr->fcp_ksp); 10372 } 10373 #endif 10374 10375 /* clean up soft state mutexes/condition variables */ 10376 mutex_destroy(&pptr->port_mutex); 10377 mutex_destroy(&pptr->port_pkt_mutex); 10378 10379 /* all done with soft state */ 10380 ddi_soft_state_free(fcp_softstate, instance); 10381 } 10382 10383 /* 10384 * Function: fcp_kmem_cache_constructor 10385 * 10386 * Description: This function allocates and initializes the resources required 10387 * to build a scsi_pkt structure the target driver. The result 10388 * of the allocation and initialization will be cached in the 10389 * memory cache. As DMA resources may be allocated here, that 10390 * means DMA resources will be tied up in the cache manager. 10391 * This is a tradeoff that has been made for performance reasons. 10392 * 10393 * Argument: *buf Memory to preinitialize. 10394 * *arg FCP port structure (fcp_port). 10395 * kmflags Value passed to kmem_cache_alloc() and 10396 * propagated to the constructor. 10397 * 10398 * Return Value: 0 Allocation/Initialization was successful. 10399 * -1 Allocation or Initialization failed. 10400 * 10401 * 10402 * If the returned value is 0, the buffer is initialized like this: 10403 * 10404 * +================================+ 10405 * +----> | struct scsi_pkt | 10406 * | | | 10407 * | +--- | pkt_ha_private | 10408 * | | | | 10409 * | | +================================+ 10410 * | | 10411 * | | +================================+ 10412 * | +--> | struct fcp_pkt | <---------+ 10413 * | | | | 10414 * +----- | cmd_pkt | | 10415 * | cmd_fp_pkt | ---+ | 10416 * +-------->| cmd_fcp_rsp[] | | | 10417 * | +--->| cmd_fcp_cmd[] | | | 10418 * | | |--------------------------------| | | 10419 * | | | struct fc_packet | <--+ | 10420 * | | | | | 10421 * | | | pkt_ulp_private | ----------+ 10422 * | | | pkt_fca_private | -----+ 10423 * | | | pkt_data_cookie | ---+ | 10424 * | | | pkt_cmdlen | | | 10425 * | |(a) | pkt_rsplen | | | 10426 * | +----| .......... pkt_cmd ........... | ---|-|---------------+ 10427 * | (b) | pkt_cmd_cookie | ---|-|----------+ | 10428 * +---------| .......... pkt_resp .......... | ---|-|------+ | | 10429 * | pkt_resp_cookie | ---|-|--+ | | | 10430 * | pkt_cmd_dma | | | | | | | 10431 * | pkt_cmd_acc | | | | | | | 10432 * +================================+ | | | | | | 10433 * | dma_cookies | <--+ | | | | | 10434 * | | | | | | | 10435 * +================================+ | | | | | 10436 * | fca_private | <----+ | | | | 10437 * | | | | | | 10438 * +================================+ | | | | 10439 * | | | | 10440 * | | | | 10441 * +================================+ (d) | | | | 10442 * | fcp_resp cookies | <-------+ | | | 10443 * | | | | | 10444 * +================================+ | | | 10445 * | | | 10446 * +================================+ (d) | | | 10447 * | fcp_resp | <-----------+ | | 10448 * | (DMA resources associated) | | | 10449 * +================================+ | | 10450 * | | 10451 * | | 10452 * | | 10453 * +================================+ (c) | | 10454 * | fcp_cmd cookies | <---------------+ | 10455 * | | | 10456 * +================================+ | 10457 * | 10458 * +================================+ (c) | 10459 * | fcp_cmd | <--------------------+ 10460 * | (DMA resources associated) | 10461 * +================================+ 10462 * 10463 * (a) Only if DMA is NOT used for the FCP_CMD buffer. 10464 * (b) Only if DMA is NOT used for the FCP_RESP buffer 10465 * (c) Only if DMA is used for the FCP_CMD buffer. 10466 * (d) Only if DMA is used for the FCP_RESP buffer 10467 */ 10468 static int 10469 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran, 10470 int kmflags) 10471 { 10472 struct fcp_pkt *cmd; 10473 struct fcp_port *pptr; 10474 fc_packet_t *fpkt; 10475 10476 pptr = (struct fcp_port *)tran->tran_hba_private; 10477 cmd = (struct fcp_pkt *)pkt->pkt_ha_private; 10478 bzero(cmd, tran->tran_hba_len); 10479 10480 cmd->cmd_pkt = pkt; 10481 pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb; 10482 fpkt = (fc_packet_t *)&cmd->cmd_fc_packet; 10483 cmd->cmd_fp_pkt = fpkt; 10484 10485 cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd; 10486 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd; 10487 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd + 10488 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz); 10489 10490 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd + 10491 sizeof (struct fcp_pkt)); 10492 10493 fpkt->pkt_cmdlen = sizeof (struct fcp_cmd); 10494 fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE; 10495 10496 if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) { 10497 /* 10498 * The underlying HBA doesn't want to DMA the fcp_cmd or 10499 * fcp_resp. The transfer of information will be done by 10500 * bcopy. 10501 * The naming of the flags (that is actually a value) is 10502 * unfortunate. FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL 10503 * DMA" but instead "NO DMA". 10504 */ 10505 fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL; 10506 fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd; 10507 fpkt->pkt_resp = cmd->cmd_fcp_rsp; 10508 } else { 10509 /* 10510 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp 10511 * buffer. A buffer is allocated for each one the ddi_dma_* 10512 * interfaces. 10513 */ 10514 if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) { 10515 return (-1); 10516 } 10517 } 10518 10519 return (0); 10520 } 10521 10522 /* 10523 * Function: fcp_kmem_cache_destructor 10524 * 10525 * Description: Called by the destructor of the cache managed by SCSA. 10526 * All the resources pre-allocated in fcp_pkt_constructor 10527 * and the data also pre-initialized in fcp_pkt_constructor 10528 * are freed and uninitialized here. 10529 * 10530 * Argument: *buf Memory to uninitialize. 10531 * *arg FCP port structure (fcp_port). 10532 * 10533 * Return Value: None 10534 * 10535 * Context: kernel 10536 */ 10537 static void 10538 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran) 10539 { 10540 struct fcp_pkt *cmd; 10541 struct fcp_port *pptr; 10542 10543 pptr = (struct fcp_port *)(tran->tran_hba_private); 10544 cmd = pkt->pkt_ha_private; 10545 10546 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) { 10547 /* 10548 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the 10549 * buffer and DMA resources allocated to do so are released. 10550 */ 10551 fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt); 10552 } 10553 } 10554 10555 /* 10556 * Function: fcp_alloc_cmd_resp 10557 * 10558 * Description: This function allocated an FCP_CMD and FCP_RESP buffer that 10559 * will be DMAed by the HBA. The buffer is allocated applying 10560 * the DMA requirements for the HBA. The buffers allocated will 10561 * also be bound. DMA resources are allocated in the process. 10562 * They will be released by fcp_free_cmd_resp(). 10563 * 10564 * Argument: *pptr FCP port. 10565 * *fpkt fc packet for which the cmd and resp packet should be 10566 * allocated. 10567 * flags Allocation flags. 10568 * 10569 * Return Value: FC_FAILURE 10570 * FC_SUCCESS 10571 * 10572 * Context: User or Kernel context only if flags == KM_SLEEP. 10573 * Interrupt context if the KM_SLEEP is not specified. 10574 */ 10575 static int 10576 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags) 10577 { 10578 int rval; 10579 int cmd_len; 10580 int resp_len; 10581 ulong_t real_len; 10582 int (*cb) (caddr_t); 10583 ddi_dma_cookie_t pkt_cookie; 10584 ddi_dma_cookie_t *cp; 10585 uint32_t cnt; 10586 10587 cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 10588 10589 cmd_len = fpkt->pkt_cmdlen; 10590 resp_len = fpkt->pkt_rsplen; 10591 10592 ASSERT(fpkt->pkt_cmd_dma == NULL); 10593 10594 /* Allocation of a DMA handle used in subsequent calls. */ 10595 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr, 10596 cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) { 10597 return (FC_FAILURE); 10598 } 10599 10600 /* A buffer is allocated that satisfies the DMA requirements. */ 10601 rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len, 10602 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL, 10603 (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc); 10604 10605 if (rval != DDI_SUCCESS) { 10606 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10607 return (FC_FAILURE); 10608 } 10609 10610 if (real_len < cmd_len) { 10611 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10612 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10613 return (FC_FAILURE); 10614 } 10615 10616 /* The buffer allocated is DMA bound. */ 10617 rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL, 10618 fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT, 10619 cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt); 10620 10621 if (rval != DDI_DMA_MAPPED) { 10622 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10623 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10624 return (FC_FAILURE); 10625 } 10626 10627 if (fpkt->pkt_cmd_cookie_cnt > 10628 pptr->port_cmd_dma_attr.dma_attr_sgllen) { 10629 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma); 10630 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10631 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10632 return (FC_FAILURE); 10633 } 10634 10635 ASSERT(fpkt->pkt_cmd_cookie_cnt != 0); 10636 10637 /* 10638 * The buffer where the scatter/gather list is going to be built is 10639 * allocated. 10640 */ 10641 cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc( 10642 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie), 10643 KM_NOSLEEP); 10644 10645 if (cp == NULL) { 10646 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma); 10647 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10648 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10649 return (FC_FAILURE); 10650 } 10651 10652 /* 10653 * The scatter/gather list for the buffer we just allocated is built 10654 * here. 10655 */ 10656 *cp = pkt_cookie; 10657 cp++; 10658 10659 for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) { 10660 ddi_dma_nextcookie(fpkt->pkt_cmd_dma, 10661 &pkt_cookie); 10662 *cp = pkt_cookie; 10663 } 10664 10665 ASSERT(fpkt->pkt_resp_dma == NULL); 10666 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr, 10667 cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) { 10668 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma); 10669 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10670 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10671 return (FC_FAILURE); 10672 } 10673 10674 rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len, 10675 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL, 10676 (caddr_t *)&fpkt->pkt_resp, &real_len, 10677 &fpkt->pkt_resp_acc); 10678 10679 if (rval != DDI_SUCCESS) { 10680 ddi_dma_free_handle(&fpkt->pkt_resp_dma); 10681 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma); 10682 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10683 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10684 kmem_free(fpkt->pkt_cmd_cookie, 10685 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie)); 10686 return (FC_FAILURE); 10687 } 10688 10689 if (real_len < resp_len) { 10690 ddi_dma_mem_free(&fpkt->pkt_resp_acc); 10691 ddi_dma_free_handle(&fpkt->pkt_resp_dma); 10692 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma); 10693 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10694 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10695 kmem_free(fpkt->pkt_cmd_cookie, 10696 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie)); 10697 return (FC_FAILURE); 10698 } 10699 10700 rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL, 10701 fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT, 10702 cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt); 10703 10704 if (rval != DDI_DMA_MAPPED) { 10705 ddi_dma_mem_free(&fpkt->pkt_resp_acc); 10706 ddi_dma_free_handle(&fpkt->pkt_resp_dma); 10707 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma); 10708 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10709 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10710 kmem_free(fpkt->pkt_cmd_cookie, 10711 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie)); 10712 return (FC_FAILURE); 10713 } 10714 10715 if (fpkt->pkt_resp_cookie_cnt > 10716 pptr->port_resp_dma_attr.dma_attr_sgllen) { 10717 ddi_dma_mem_free(&fpkt->pkt_resp_acc); 10718 ddi_dma_free_handle(&fpkt->pkt_resp_dma); 10719 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma); 10720 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10721 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10722 kmem_free(fpkt->pkt_cmd_cookie, 10723 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie)); 10724 return (FC_FAILURE); 10725 } 10726 10727 ASSERT(fpkt->pkt_resp_cookie_cnt != 0); 10728 10729 cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc( 10730 fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie), 10731 KM_NOSLEEP); 10732 10733 if (cp == NULL) { 10734 ddi_dma_mem_free(&fpkt->pkt_resp_acc); 10735 ddi_dma_free_handle(&fpkt->pkt_resp_dma); 10736 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma); 10737 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10738 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10739 kmem_free(fpkt->pkt_cmd_cookie, 10740 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie)); 10741 return (FC_FAILURE); 10742 } 10743 10744 *cp = pkt_cookie; 10745 cp++; 10746 10747 for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) { 10748 ddi_dma_nextcookie(fpkt->pkt_resp_dma, 10749 &pkt_cookie); 10750 *cp = pkt_cookie; 10751 } 10752 10753 return (FC_SUCCESS); 10754 } 10755 10756 /* 10757 * Function: fcp_free_cmd_resp 10758 * 10759 * Description: This function releases the FCP_CMD and FCP_RESP buffer 10760 * allocated by fcp_alloc_cmd_resp() and all the resources 10761 * associated with them. That includes the DMA resources and the 10762 * buffer allocated for the cookies of each one of them. 10763 * 10764 * Argument: *pptr FCP port context. 10765 * *fpkt fc packet containing the cmd and resp packet 10766 * to be released. 10767 * 10768 * Return Value: None 10769 * 10770 * Context: Interrupt, User and Kernel context. 10771 */ 10772 /* ARGSUSED */ 10773 static void 10774 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt) 10775 { 10776 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL); 10777 10778 if (fpkt->pkt_resp_dma) { 10779 (void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma); 10780 ddi_dma_mem_free(&fpkt->pkt_resp_acc); 10781 ddi_dma_free_handle(&fpkt->pkt_resp_dma); 10782 } 10783 10784 if (fpkt->pkt_resp_cookie) { 10785 kmem_free(fpkt->pkt_resp_cookie, 10786 fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t)); 10787 fpkt->pkt_resp_cookie = NULL; 10788 } 10789 10790 if (fpkt->pkt_cmd_dma) { 10791 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma); 10792 ddi_dma_mem_free(&fpkt->pkt_cmd_acc); 10793 ddi_dma_free_handle(&fpkt->pkt_cmd_dma); 10794 } 10795 10796 if (fpkt->pkt_cmd_cookie) { 10797 kmem_free(fpkt->pkt_cmd_cookie, 10798 fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t)); 10799 fpkt->pkt_cmd_cookie = NULL; 10800 } 10801 } 10802 10803 10804 /* 10805 * called by the transport to do our own target initialization 10806 * 10807 * can acquire and release the global mutex 10808 */ 10809 /* ARGSUSED */ 10810 static int 10811 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 10812 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 10813 { 10814 uchar_t *bytes; 10815 uint_t nbytes; 10816 uint16_t lun_num; 10817 struct fcp_tgt *ptgt; 10818 struct fcp_lun *plun; 10819 struct fcp_port *pptr = (struct fcp_port *) 10820 hba_tran->tran_hba_private; 10821 10822 ASSERT(pptr != NULL); 10823 10824 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 10825 FCP_BUF_LEVEL_8, 0, 10826 "fcp_phys_tgt_init: called for %s (instance %d)", 10827 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip)); 10828 10829 /* get our port WWN property */ 10830 bytes = NULL; 10831 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH, 10832 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) || 10833 (nbytes != FC_WWN_SIZE)) { 10834 /* no port WWN property */ 10835 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 10836 FCP_BUF_LEVEL_8, 0, 10837 "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED" 10838 " for %s (instance %d): bytes=%p nbytes=%x", 10839 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes, 10840 nbytes); 10841 10842 if (bytes != NULL) { 10843 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes); 10844 } 10845 10846 return (DDI_NOT_WELL_FORMED); 10847 } 10848 ASSERT(bytes != NULL); 10849 10850 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH, 10851 LUN_PROP, 0xFFFF); 10852 if (lun_num == 0xFFFF) { 10853 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 10854 FCP_BUF_LEVEL_8, 0, 10855 "fcp_phys_tgt_init: Returning DDI_FAILURE:lun" 10856 " for %s (instance %d)", ddi_get_name(tgt_dip), 10857 ddi_get_instance(tgt_dip)); 10858 10859 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes); 10860 return (DDI_NOT_WELL_FORMED); 10861 } 10862 10863 mutex_enter(&pptr->port_mutex); 10864 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) { 10865 mutex_exit(&pptr->port_mutex); 10866 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 10867 FCP_BUF_LEVEL_8, 0, 10868 "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun" 10869 " for %s (instance %d)", ddi_get_name(tgt_dip), 10870 ddi_get_instance(tgt_dip)); 10871 10872 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes); 10873 return (DDI_FAILURE); 10874 } 10875 10876 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes, 10877 FC_WWN_SIZE) == 0); 10878 ASSERT(plun->lun_num == lun_num); 10879 10880 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes); 10881 10882 ptgt = plun->lun_tgt; 10883 10884 mutex_enter(&ptgt->tgt_mutex); 10885 plun->lun_tgt_count++; 10886 scsi_device_hba_private_set(sd, plun); 10887 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT; 10888 plun->lun_sd = sd; 10889 mutex_exit(&ptgt->tgt_mutex); 10890 mutex_exit(&pptr->port_mutex); 10891 10892 return (DDI_SUCCESS); 10893 } 10894 10895 /*ARGSUSED*/ 10896 static int 10897 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 10898 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 10899 { 10900 uchar_t *bytes; 10901 uint_t nbytes; 10902 uint16_t lun_num; 10903 struct fcp_tgt *ptgt; 10904 struct fcp_lun *plun; 10905 struct fcp_port *pptr = (struct fcp_port *) 10906 hba_tran->tran_hba_private; 10907 child_info_t *cip; 10908 10909 ASSERT(pptr != NULL); 10910 10911 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 10912 fcp_trace, FCP_BUF_LEVEL_8, 0, 10913 "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p)," 10914 " (tgt_dip %p)", ddi_get_name(tgt_dip), 10915 ddi_get_instance(tgt_dip), hba_dip, tgt_dip); 10916 10917 cip = (child_info_t *)sd->sd_pathinfo; 10918 if (cip == NULL) { 10919 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 10920 fcp_trace, FCP_BUF_LEVEL_8, 0, 10921 "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED" 10922 " for %s (instance %d)", ddi_get_name(tgt_dip), 10923 ddi_get_instance(tgt_dip)); 10924 10925 return (DDI_NOT_WELL_FORMED); 10926 } 10927 10928 /* get our port WWN property */ 10929 bytes = NULL; 10930 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH, 10931 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) || 10932 (nbytes != FC_WWN_SIZE)) { 10933 if (bytes) { 10934 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes); 10935 } 10936 return (DDI_NOT_WELL_FORMED); 10937 } 10938 10939 ASSERT(bytes != NULL); 10940 10941 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH, 10942 LUN_PROP, 0xFFFF); 10943 if (lun_num == 0xFFFF) { 10944 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 10945 fcp_trace, FCP_BUF_LEVEL_8, 0, 10946 "fcp_virt_tgt_init: Returning DDI_FAILURE:lun" 10947 " for %s (instance %d)", ddi_get_name(tgt_dip), 10948 ddi_get_instance(tgt_dip)); 10949 10950 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes); 10951 return (DDI_NOT_WELL_FORMED); 10952 } 10953 10954 mutex_enter(&pptr->port_mutex); 10955 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) { 10956 mutex_exit(&pptr->port_mutex); 10957 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 10958 fcp_trace, FCP_BUF_LEVEL_8, 0, 10959 "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun" 10960 " for %s (instance %d)", ddi_get_name(tgt_dip), 10961 ddi_get_instance(tgt_dip)); 10962 10963 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes); 10964 return (DDI_FAILURE); 10965 } 10966 10967 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes, 10968 FC_WWN_SIZE) == 0); 10969 ASSERT(plun->lun_num == lun_num); 10970 10971 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes); 10972 10973 ptgt = plun->lun_tgt; 10974 10975 mutex_enter(&ptgt->tgt_mutex); 10976 plun->lun_tgt_count++; 10977 scsi_device_hba_private_set(sd, plun); 10978 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT; 10979 plun->lun_sd = sd; 10980 mutex_exit(&ptgt->tgt_mutex); 10981 mutex_exit(&pptr->port_mutex); 10982 10983 return (DDI_SUCCESS); 10984 } 10985 10986 10987 /* 10988 * called by the transport to do our own target initialization 10989 * 10990 * can acquire and release the global mutex 10991 */ 10992 /* ARGSUSED */ 10993 static int 10994 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 10995 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 10996 { 10997 struct fcp_port *pptr = (struct fcp_port *) 10998 hba_tran->tran_hba_private; 10999 int rval; 11000 11001 ASSERT(pptr != NULL); 11002 11003 /* 11004 * Child node is getting initialized. Look at the mpxio component 11005 * type on the child device to see if this device is mpxio managed 11006 * or not. 11007 */ 11008 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) { 11009 rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd); 11010 } else { 11011 rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd); 11012 } 11013 11014 return (rval); 11015 } 11016 11017 11018 /* ARGSUSED */ 11019 static void 11020 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip, 11021 scsi_hba_tran_t *hba_tran, struct scsi_device *sd) 11022 { 11023 struct fcp_lun *plun = scsi_device_hba_private_get(sd); 11024 struct fcp_tgt *ptgt; 11025 11026 FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf, 11027 fcp_trace, FCP_BUF_LEVEL_8, 0, 11028 "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d", 11029 ddi_get_name(hba_dip), ddi_get_instance(hba_dip), 11030 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip)); 11031 11032 if (plun == NULL) { 11033 return; 11034 } 11035 ptgt = plun->lun_tgt; 11036 11037 ASSERT(ptgt != NULL); 11038 11039 mutex_enter(&ptgt->tgt_mutex); 11040 ASSERT(plun->lun_tgt_count > 0); 11041 11042 if (--plun->lun_tgt_count == 0) { 11043 plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT; 11044 } 11045 plun->lun_sd = NULL; 11046 mutex_exit(&ptgt->tgt_mutex); 11047 } 11048 11049 /* 11050 * Function: fcp_scsi_start 11051 * 11052 * Description: This function is called by the target driver to request a 11053 * command to be sent. 11054 * 11055 * Argument: *ap SCSI address of the device. 11056 * *pkt SCSI packet containing the cmd to send. 11057 * 11058 * Return Value: TRAN_ACCEPT 11059 * TRAN_BUSY 11060 * TRAN_BADPKT 11061 * TRAN_FATAL_ERROR 11062 */ 11063 static int 11064 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt) 11065 { 11066 struct fcp_port *pptr = ADDR2FCP(ap); 11067 struct fcp_lun *plun = ADDR2LUN(ap); 11068 struct fcp_pkt *cmd = PKT2CMD(pkt); 11069 struct fcp_tgt *ptgt = plun->lun_tgt; 11070 int rval; 11071 11072 /* ensure command isn't already issued */ 11073 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED); 11074 11075 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 11076 fcp_trace, FCP_BUF_LEVEL_9, 0, 11077 "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id); 11078 11079 /* 11080 * It is strange that we enter the fcp_port mutex and the target 11081 * mutex to check the lun state (which has a mutex of its own). 11082 */ 11083 mutex_enter(&pptr->port_mutex); 11084 mutex_enter(&ptgt->tgt_mutex); 11085 11086 /* 11087 * If the device is offline and is not in the process of coming 11088 * online, fail the request. 11089 */ 11090 11091 if ((plun->lun_state & FCP_LUN_OFFLINE) && 11092 !(plun->lun_state & FCP_LUN_ONLINING)) { 11093 mutex_exit(&ptgt->tgt_mutex); 11094 mutex_exit(&pptr->port_mutex); 11095 11096 if (cmd->cmd_fp_pkt->pkt_pd == NULL) { 11097 pkt->pkt_reason = CMD_DEV_GONE; 11098 } 11099 11100 return (TRAN_FATAL_ERROR); 11101 } 11102 cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time; 11103 11104 /* 11105 * If we are suspended, kernel is trying to dump, so don't 11106 * block, fail or defer requests - send them down right away. 11107 * NOTE: If we are in panic (i.e. trying to dump), we can't 11108 * assume we have been suspended. There is hardware such as 11109 * the v880 that doesn't do PM. Thus, the check for 11110 * ddi_in_panic. 11111 * 11112 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process 11113 * of changing. So, if we can queue the packet, do it. Eventually, 11114 * either the device will have gone away or changed and we can fail 11115 * the request, or we can proceed if the device didn't change. 11116 * 11117 * If the pd in the target or the packet is NULL it's probably 11118 * because the device has gone away, we allow the request to be 11119 * put on the internal queue here in case the device comes back within 11120 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle 11121 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd 11122 * could be NULL because the device was disappearing during or since 11123 * packet initialization. 11124 */ 11125 11126 if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state & 11127 FCP_STATE_SUSPENDED)) && !ddi_in_panic()) || 11128 (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) || 11129 (ptgt->tgt_pd_handle == NULL) || 11130 (cmd->cmd_fp_pkt->pkt_pd == NULL)) { 11131 /* 11132 * If ((LUN is busy AND 11133 * LUN not suspended AND 11134 * The system is not in panic state) OR 11135 * (The port is coming up)) 11136 * 11137 * We check to see if the any of the flags FLAG_NOINTR or 11138 * FLAG_NOQUEUE is set. If one of them is set the value 11139 * returned will be TRAN_BUSY. If not, the request is queued. 11140 */ 11141 mutex_exit(&ptgt->tgt_mutex); 11142 mutex_exit(&pptr->port_mutex); 11143 11144 /* see if using interrupts is allowed (so queueing'll work) */ 11145 if (pkt->pkt_flags & FLAG_NOINTR) { 11146 pkt->pkt_resid = 0; 11147 return (TRAN_BUSY); 11148 } 11149 if (pkt->pkt_flags & FLAG_NOQUEUE) { 11150 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 11151 fcp_trace, FCP_BUF_LEVEL_9, 0, 11152 "fcp_scsi_start: lun busy for pkt %p", pkt); 11153 return (TRAN_BUSY); 11154 } 11155 #ifdef DEBUG 11156 mutex_enter(&pptr->port_pkt_mutex); 11157 pptr->port_npkts++; 11158 mutex_exit(&pptr->port_pkt_mutex); 11159 #endif /* DEBUG */ 11160 11161 /* got queue up the pkt for later */ 11162 fcp_queue_pkt(pptr, cmd); 11163 return (TRAN_ACCEPT); 11164 } 11165 cmd->cmd_state = FCP_PKT_ISSUED; 11166 11167 mutex_exit(&ptgt->tgt_mutex); 11168 mutex_exit(&pptr->port_mutex); 11169 11170 /* 11171 * Now that we released the mutexes, what was protected by them can 11172 * change. 11173 */ 11174 11175 /* 11176 * If there is a reconfiguration in progress, wait for it to complete. 11177 */ 11178 fcp_reconfig_wait(pptr); 11179 11180 cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time + 11181 pkt->pkt_time : 0; 11182 11183 /* prepare the packet */ 11184 11185 fcp_prepare_pkt(pptr, cmd, plun); 11186 11187 if (cmd->cmd_pkt->pkt_time) { 11188 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time; 11189 } else { 11190 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60; 11191 } 11192 11193 /* 11194 * if interrupts aren't allowed (e.g. at dump time) then we'll 11195 * have to do polled I/O 11196 */ 11197 if (pkt->pkt_flags & FLAG_NOINTR) { 11198 cmd->cmd_state &= ~FCP_PKT_ISSUED; 11199 return (fcp_dopoll(pptr, cmd)); 11200 } 11201 11202 #ifdef DEBUG 11203 mutex_enter(&pptr->port_pkt_mutex); 11204 pptr->port_npkts++; 11205 mutex_exit(&pptr->port_pkt_mutex); 11206 #endif /* DEBUG */ 11207 11208 rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0); 11209 if (rval == FC_SUCCESS) { 11210 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 11211 fcp_trace, FCP_BUF_LEVEL_9, 0, 11212 "fcp_transport success for %x", plun->lun_tgt->tgt_d_id); 11213 return (TRAN_ACCEPT); 11214 } 11215 11216 cmd->cmd_state = FCP_PKT_IDLE; 11217 11218 #ifdef DEBUG 11219 mutex_enter(&pptr->port_pkt_mutex); 11220 pptr->port_npkts--; 11221 mutex_exit(&pptr->port_pkt_mutex); 11222 #endif /* DEBUG */ 11223 11224 /* 11225 * For lack of clearer definitions, choose 11226 * between TRAN_BUSY and TRAN_FATAL_ERROR. 11227 */ 11228 11229 if (rval == FC_TRAN_BUSY) { 11230 pkt->pkt_resid = 0; 11231 rval = TRAN_BUSY; 11232 } else { 11233 mutex_enter(&ptgt->tgt_mutex); 11234 if (plun->lun_state & FCP_LUN_OFFLINE) { 11235 child_info_t *cip; 11236 11237 mutex_enter(&plun->lun_mutex); 11238 cip = plun->lun_cip; 11239 mutex_exit(&plun->lun_mutex); 11240 11241 FCP_TRACE(fcp_logq, pptr->port_instbuf, 11242 fcp_trace, FCP_BUF_LEVEL_6, 0, 11243 "fcp_transport failed 2 for %x: %x; dip=%p", 11244 plun->lun_tgt->tgt_d_id, rval, cip); 11245 11246 rval = TRAN_FATAL_ERROR; 11247 } else { 11248 if (pkt->pkt_flags & FLAG_NOQUEUE) { 11249 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 11250 fcp_trace, FCP_BUF_LEVEL_9, 0, 11251 "fcp_scsi_start: FC_BUSY for pkt %p", 11252 pkt); 11253 rval = TRAN_BUSY; 11254 } else { 11255 rval = TRAN_ACCEPT; 11256 fcp_queue_pkt(pptr, cmd); 11257 } 11258 } 11259 mutex_exit(&ptgt->tgt_mutex); 11260 } 11261 11262 return (rval); 11263 } 11264 11265 /* 11266 * called by the transport to abort a packet 11267 */ 11268 /*ARGSUSED*/ 11269 static int 11270 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 11271 { 11272 int tgt_cnt; 11273 struct fcp_port *pptr = ADDR2FCP(ap); 11274 struct fcp_lun *plun = ADDR2LUN(ap); 11275 struct fcp_tgt *ptgt = plun->lun_tgt; 11276 11277 if (pkt == NULL) { 11278 if (ptgt) { 11279 mutex_enter(&ptgt->tgt_mutex); 11280 tgt_cnt = ptgt->tgt_change_cnt; 11281 mutex_exit(&ptgt->tgt_mutex); 11282 fcp_abort_all(pptr, ptgt, plun, tgt_cnt); 11283 return (TRUE); 11284 } 11285 } 11286 return (FALSE); 11287 } 11288 11289 11290 /* 11291 * Perform reset 11292 */ 11293 int 11294 fcp_scsi_reset(struct scsi_address *ap, int level) 11295 { 11296 int rval = 0; 11297 struct fcp_port *pptr = ADDR2FCP(ap); 11298 struct fcp_lun *plun = ADDR2LUN(ap); 11299 struct fcp_tgt *ptgt = plun->lun_tgt; 11300 11301 if (level == RESET_ALL) { 11302 if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) { 11303 rval = 1; 11304 } 11305 } else if (level == RESET_TARGET || level == RESET_LUN) { 11306 /* 11307 * If we are in the middle of discovery, return 11308 * SUCCESS as this target will be rediscovered 11309 * anyway 11310 */ 11311 mutex_enter(&ptgt->tgt_mutex); 11312 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) { 11313 mutex_exit(&ptgt->tgt_mutex); 11314 return (1); 11315 } 11316 mutex_exit(&ptgt->tgt_mutex); 11317 11318 if (fcp_reset_target(ap, level) == FC_SUCCESS) { 11319 rval = 1; 11320 } 11321 } 11322 return (rval); 11323 } 11324 11325 11326 /* 11327 * called by the framework to get a SCSI capability 11328 */ 11329 static int 11330 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom) 11331 { 11332 return (fcp_commoncap(ap, cap, 0, whom, 0)); 11333 } 11334 11335 11336 /* 11337 * called by the framework to set a SCSI capability 11338 */ 11339 static int 11340 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom) 11341 { 11342 return (fcp_commoncap(ap, cap, value, whom, 1)); 11343 } 11344 11345 /* 11346 * Function: fcp_pkt_setup 11347 * 11348 * Description: This function sets up the scsi_pkt structure passed by the 11349 * caller. This function assumes fcp_pkt_constructor has been 11350 * called previously for the packet passed by the caller. If 11351 * successful this call will have the following results: 11352 * 11353 * - The resources needed that will be constant through out 11354 * the whole transaction are allocated. 11355 * - The fields that will be constant through out the whole 11356 * transaction are initialized. 11357 * - The scsi packet will be linked to the LUN structure 11358 * addressed by the transaction. 11359 * 11360 * Argument: 11361 * *pkt Pointer to a scsi_pkt structure. 11362 * callback 11363 * arg 11364 * 11365 * Return Value: 0 Success 11366 * !0 Failure 11367 * 11368 * Context: Kernel context or interrupt context 11369 */ 11370 /* ARGSUSED */ 11371 static int 11372 fcp_pkt_setup(struct scsi_pkt *pkt, 11373 int (*callback)(caddr_t arg), 11374 caddr_t arg) 11375 { 11376 struct fcp_pkt *cmd; 11377 struct fcp_port *pptr; 11378 struct fcp_lun *plun; 11379 struct fcp_tgt *ptgt; 11380 int kf; 11381 fc_packet_t *fpkt; 11382 fc_frame_hdr_t *hp; 11383 11384 pptr = ADDR2FCP(&pkt->pkt_address); 11385 plun = ADDR2LUN(&pkt->pkt_address); 11386 ptgt = plun->lun_tgt; 11387 11388 cmd = (struct fcp_pkt *)pkt->pkt_ha_private; 11389 fpkt = cmd->cmd_fp_pkt; 11390 11391 /* 11392 * this request is for dma allocation only 11393 */ 11394 /* 11395 * First step of fcp_scsi_init_pkt: pkt allocation 11396 * We determine if the caller is willing to wait for the 11397 * resources. 11398 */ 11399 kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP; 11400 11401 /* 11402 * Selective zeroing of the pkt. 11403 */ 11404 cmd->cmd_back = NULL; 11405 cmd->cmd_next = NULL; 11406 11407 /* 11408 * Zero out fcp command 11409 */ 11410 bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd)); 11411 11412 cmd->cmd_state = FCP_PKT_IDLE; 11413 11414 fpkt = cmd->cmd_fp_pkt; 11415 fpkt->pkt_data_acc = NULL; 11416 11417 /* 11418 * When port_state is FCP_STATE_OFFLINE, remote_port (tgt_pd_handle) 11419 * could be destroyed. We need fail pkt_setup. 11420 */ 11421 if (pptr->port_state & FCP_STATE_OFFLINE) { 11422 return (-1); 11423 } 11424 11425 mutex_enter(&ptgt->tgt_mutex); 11426 fpkt->pkt_pd = ptgt->tgt_pd_handle; 11427 11428 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf) 11429 != FC_SUCCESS) { 11430 mutex_exit(&ptgt->tgt_mutex); 11431 return (-1); 11432 } 11433 11434 mutex_exit(&ptgt->tgt_mutex); 11435 11436 /* Fill in the Fabric Channel Header */ 11437 hp = &fpkt->pkt_cmd_fhdr; 11438 hp->r_ctl = R_CTL_COMMAND; 11439 hp->rsvd = 0; 11440 hp->type = FC_TYPE_SCSI_FCP; 11441 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 11442 hp->seq_id = 0; 11443 hp->df_ctl = 0; 11444 hp->seq_cnt = 0; 11445 hp->ox_id = 0xffff; 11446 hp->rx_id = 0xffff; 11447 hp->ro = 0; 11448 11449 /* 11450 * A doubly linked list (cmd_forw, cmd_back) is built 11451 * out of every allocated packet on a per-lun basis 11452 * 11453 * The packets are maintained in the list so as to satisfy 11454 * scsi_abort() requests. At present (which is unlikely to 11455 * change in the future) nobody performs a real scsi_abort 11456 * in the SCSI target drivers (as they don't keep the packets 11457 * after doing scsi_transport - so they don't know how to 11458 * abort a packet other than sending a NULL to abort all 11459 * outstanding packets) 11460 */ 11461 mutex_enter(&plun->lun_mutex); 11462 if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) { 11463 plun->lun_pkt_head->cmd_back = cmd; 11464 } else { 11465 plun->lun_pkt_tail = cmd; 11466 } 11467 plun->lun_pkt_head = cmd; 11468 mutex_exit(&plun->lun_mutex); 11469 return (0); 11470 } 11471 11472 /* 11473 * Function: fcp_pkt_teardown 11474 * 11475 * Description: This function releases a scsi_pkt structure and all the 11476 * resources attached to it. 11477 * 11478 * Argument: *pkt Pointer to a scsi_pkt structure. 11479 * 11480 * Return Value: None 11481 * 11482 * Context: User, Kernel or Interrupt context. 11483 */ 11484 static void 11485 fcp_pkt_teardown(struct scsi_pkt *pkt) 11486 { 11487 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address); 11488 struct fcp_lun *plun = ADDR2LUN(&pkt->pkt_address); 11489 struct fcp_pkt *cmd = (struct fcp_pkt *)pkt->pkt_ha_private; 11490 11491 /* 11492 * Remove the packet from the per-lun list 11493 */ 11494 mutex_enter(&plun->lun_mutex); 11495 if (cmd->cmd_back) { 11496 ASSERT(cmd != plun->lun_pkt_head); 11497 cmd->cmd_back->cmd_forw = cmd->cmd_forw; 11498 } else { 11499 ASSERT(cmd == plun->lun_pkt_head); 11500 plun->lun_pkt_head = cmd->cmd_forw; 11501 } 11502 11503 if (cmd->cmd_forw) { 11504 cmd->cmd_forw->cmd_back = cmd->cmd_back; 11505 } else { 11506 ASSERT(cmd == plun->lun_pkt_tail); 11507 plun->lun_pkt_tail = cmd->cmd_back; 11508 } 11509 11510 mutex_exit(&plun->lun_mutex); 11511 11512 (void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt); 11513 } 11514 11515 /* 11516 * Routine for reset notification setup, to register or cancel. 11517 * This function is called by SCSA 11518 */ 11519 /*ARGSUSED*/ 11520 static int 11521 fcp_scsi_reset_notify(struct scsi_address *ap, int flag, 11522 void (*callback)(caddr_t), caddr_t arg) 11523 { 11524 struct fcp_port *pptr = ADDR2FCP(ap); 11525 11526 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg, 11527 &pptr->port_mutex, &pptr->port_reset_notify_listf)); 11528 } 11529 11530 11531 static int 11532 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name, 11533 ddi_eventcookie_t *event_cookiep) 11534 { 11535 struct fcp_port *pptr = fcp_dip2port(dip); 11536 11537 if (pptr == NULL) { 11538 return (DDI_FAILURE); 11539 } 11540 11541 return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name, 11542 event_cookiep, NDI_EVENT_NOPASS)); 11543 } 11544 11545 11546 static int 11547 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip, 11548 ddi_eventcookie_t eventid, void (*callback)(), void *arg, 11549 ddi_callback_id_t *cb_id) 11550 { 11551 struct fcp_port *pptr = fcp_dip2port(dip); 11552 11553 if (pptr == NULL) { 11554 return (DDI_FAILURE); 11555 } 11556 11557 return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip, 11558 eventid, callback, arg, NDI_SLEEP, cb_id)); 11559 } 11560 11561 11562 static int 11563 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id) 11564 { 11565 11566 struct fcp_port *pptr = fcp_dip2port(dip); 11567 11568 if (pptr == NULL) { 11569 return (DDI_FAILURE); 11570 } 11571 return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id)); 11572 } 11573 11574 11575 /* 11576 * called by the transport to post an event 11577 */ 11578 static int 11579 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip, 11580 ddi_eventcookie_t eventid, void *impldata) 11581 { 11582 struct fcp_port *pptr = fcp_dip2port(dip); 11583 11584 if (pptr == NULL) { 11585 return (DDI_FAILURE); 11586 } 11587 11588 return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip, 11589 eventid, impldata)); 11590 } 11591 11592 11593 /* 11594 * A target in in many cases in Fibre Channel has a one to one relation 11595 * with a port identifier (which is also known as D_ID and also as AL_PA 11596 * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset 11597 * will most likely result in resetting all LUNs (which means a reset will 11598 * occur on all the SCSI devices connected at the other end of the bridge) 11599 * That is the latest favorite topic for discussion, for, one can debate as 11600 * hot as one likes and come up with arguably a best solution to one's 11601 * satisfaction 11602 * 11603 * To stay on track and not digress much, here are the problems stated 11604 * briefly: 11605 * 11606 * SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the 11607 * target drivers use RESET_TARGET even if their instance is on a 11608 * LUN. Doesn't that sound a bit broken ? 11609 * 11610 * FCP SCSI (the current spec) only defines RESET TARGET in the 11611 * control fields of an FCP_CMND structure. It should have been 11612 * fixed right there, giving flexibility to the initiators to 11613 * minimize havoc that could be caused by resetting a target. 11614 */ 11615 static int 11616 fcp_reset_target(struct scsi_address *ap, int level) 11617 { 11618 int rval = FC_FAILURE; 11619 char lun_id[25]; 11620 struct fcp_port *pptr = ADDR2FCP(ap); 11621 struct fcp_lun *plun = ADDR2LUN(ap); 11622 struct fcp_tgt *ptgt = plun->lun_tgt; 11623 struct scsi_pkt *pkt; 11624 struct fcp_pkt *cmd; 11625 struct fcp_rsp *rsp; 11626 uint32_t tgt_cnt; 11627 struct fcp_rsp_info *rsp_info; 11628 struct fcp_reset_elem *p; 11629 int bval; 11630 11631 if ((p = kmem_alloc(sizeof (struct fcp_reset_elem), 11632 KM_NOSLEEP)) == NULL) { 11633 return (rval); 11634 } 11635 11636 mutex_enter(&ptgt->tgt_mutex); 11637 if (level == RESET_TARGET) { 11638 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) { 11639 mutex_exit(&ptgt->tgt_mutex); 11640 kmem_free(p, sizeof (struct fcp_reset_elem)); 11641 return (rval); 11642 } 11643 fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY); 11644 (void) strcpy(lun_id, " "); 11645 } else { 11646 if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) { 11647 mutex_exit(&ptgt->tgt_mutex); 11648 kmem_free(p, sizeof (struct fcp_reset_elem)); 11649 return (rval); 11650 } 11651 fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY); 11652 11653 (void) sprintf(lun_id, ", LUN=%d", plun->lun_num); 11654 } 11655 tgt_cnt = ptgt->tgt_change_cnt; 11656 11657 mutex_exit(&ptgt->tgt_mutex); 11658 11659 if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0, 11660 0, 0, NULL, 0)) == NULL) { 11661 kmem_free(p, sizeof (struct fcp_reset_elem)); 11662 mutex_enter(&ptgt->tgt_mutex); 11663 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY); 11664 mutex_exit(&ptgt->tgt_mutex); 11665 return (rval); 11666 } 11667 pkt->pkt_time = FCP_POLL_TIMEOUT; 11668 11669 /* fill in cmd part of packet */ 11670 cmd = PKT2CMD(pkt); 11671 if (level == RESET_TARGET) { 11672 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1; 11673 } else { 11674 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1; 11675 } 11676 cmd->cmd_fp_pkt->pkt_comp = NULL; 11677 cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR; 11678 11679 /* prepare a packet for transport */ 11680 fcp_prepare_pkt(pptr, cmd, plun); 11681 11682 if (cmd->cmd_pkt->pkt_time) { 11683 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time; 11684 } else { 11685 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60; 11686 } 11687 11688 (void) fc_ulp_busy_port(pptr->port_fp_handle); 11689 bval = fcp_dopoll(pptr, cmd); 11690 fc_ulp_idle_port(pptr->port_fp_handle); 11691 11692 /* submit the packet */ 11693 if (bval == TRAN_ACCEPT) { 11694 int error = 3; 11695 11696 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp; 11697 rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp + 11698 sizeof (struct fcp_rsp)); 11699 11700 if (rsp->fcp_u.fcp_status.rsp_len_set) { 11701 if (fcp_validate_fcp_response(rsp, pptr) == 11702 FC_SUCCESS) { 11703 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) { 11704 FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp + 11705 sizeof (struct fcp_rsp), rsp_info, 11706 cmd->cmd_fp_pkt->pkt_resp_acc, 11707 sizeof (struct fcp_rsp_info)); 11708 } 11709 if (rsp_info->rsp_code == FCP_NO_FAILURE) { 11710 rval = FC_SUCCESS; 11711 error = 0; 11712 } else { 11713 error = 1; 11714 } 11715 } else { 11716 error = 2; 11717 } 11718 } 11719 11720 switch (error) { 11721 case 0: 11722 fcp_log(CE_WARN, pptr->port_dip, 11723 "!FCP: WWN 0x%08x%08x %s reset successfully", 11724 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]), 11725 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id); 11726 break; 11727 11728 case 1: 11729 fcp_log(CE_WARN, pptr->port_dip, 11730 "!FCP: Reset to WWN 0x%08x%08x %s failed," 11731 " response code=%x", 11732 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]), 11733 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id, 11734 rsp_info->rsp_code); 11735 break; 11736 11737 case 2: 11738 fcp_log(CE_WARN, pptr->port_dip, 11739 "!FCP: Reset to WWN 0x%08x%08x %s failed," 11740 " Bad FCP response values: rsvd1=%x," 11741 " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x," 11742 " rsplen=%x, senselen=%x", 11743 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]), 11744 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id, 11745 rsp->reserved_0, rsp->reserved_1, 11746 rsp->fcp_u.fcp_status.reserved_0, 11747 rsp->fcp_u.fcp_status.reserved_1, 11748 rsp->fcp_response_len, rsp->fcp_sense_len); 11749 break; 11750 11751 default: 11752 fcp_log(CE_WARN, pptr->port_dip, 11753 "!FCP: Reset to WWN 0x%08x%08x %s failed", 11754 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]), 11755 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id); 11756 break; 11757 } 11758 } 11759 scsi_destroy_pkt(pkt); 11760 11761 if (rval == FC_FAILURE) { 11762 mutex_enter(&ptgt->tgt_mutex); 11763 if (level == RESET_TARGET) { 11764 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY); 11765 } else { 11766 fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY); 11767 } 11768 mutex_exit(&ptgt->tgt_mutex); 11769 kmem_free(p, sizeof (struct fcp_reset_elem)); 11770 return (rval); 11771 } 11772 11773 mutex_enter(&pptr->port_mutex); 11774 if (level == RESET_TARGET) { 11775 p->tgt = ptgt; 11776 p->lun = NULL; 11777 } else { 11778 p->tgt = NULL; 11779 p->lun = plun; 11780 } 11781 p->tgt = ptgt; 11782 p->tgt_cnt = tgt_cnt; 11783 p->timeout = fcp_watchdog_time + FCP_RESET_DELAY; 11784 p->next = pptr->port_reset_list; 11785 pptr->port_reset_list = p; 11786 11787 FCP_TRACE(fcp_logq, pptr->port_instbuf, 11788 fcp_trace, FCP_BUF_LEVEL_3, 0, 11789 "Notify ssd of the reset to reinstate the reservations"); 11790 11791 scsi_hba_reset_notify_callback(&pptr->port_mutex, 11792 &pptr->port_reset_notify_listf); 11793 11794 mutex_exit(&pptr->port_mutex); 11795 11796 return (rval); 11797 } 11798 11799 11800 /* 11801 * called by fcp_getcap and fcp_setcap to get and set (respectively) 11802 * SCSI capabilities 11803 */ 11804 /* ARGSUSED */ 11805 static int 11806 fcp_commoncap(struct scsi_address *ap, char *cap, 11807 int val, int tgtonly, int doset) 11808 { 11809 struct fcp_port *pptr = ADDR2FCP(ap); 11810 struct fcp_lun *plun = ADDR2LUN(ap); 11811 struct fcp_tgt *ptgt = plun->lun_tgt; 11812 int cidx; 11813 int rval = FALSE; 11814 11815 if (cap == (char *)0) { 11816 FCP_TRACE(fcp_logq, pptr->port_instbuf, 11817 fcp_trace, FCP_BUF_LEVEL_3, 0, 11818 "fcp_commoncap: invalid arg"); 11819 return (rval); 11820 } 11821 11822 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) { 11823 return (UNDEFINED); 11824 } 11825 11826 /* 11827 * Process setcap request. 11828 */ 11829 if (doset) { 11830 /* 11831 * At present, we can only set binary (0/1) values 11832 */ 11833 switch (cidx) { 11834 case SCSI_CAP_ARQ: 11835 if (val == 0) { 11836 rval = FALSE; 11837 } else { 11838 rval = TRUE; 11839 } 11840 break; 11841 11842 case SCSI_CAP_LUN_RESET: 11843 if (val) { 11844 plun->lun_cap |= FCP_LUN_CAP_RESET; 11845 } else { 11846 plun->lun_cap &= ~FCP_LUN_CAP_RESET; 11847 } 11848 rval = TRUE; 11849 break; 11850 11851 case SCSI_CAP_SECTOR_SIZE: 11852 rval = TRUE; 11853 break; 11854 default: 11855 FCP_TRACE(fcp_logq, pptr->port_instbuf, 11856 fcp_trace, FCP_BUF_LEVEL_4, 0, 11857 "fcp_setcap: unsupported %d", cidx); 11858 rval = UNDEFINED; 11859 break; 11860 } 11861 11862 FCP_TRACE(fcp_logq, pptr->port_instbuf, 11863 fcp_trace, FCP_BUF_LEVEL_5, 0, 11864 "set cap: cap=%s, val/tgtonly/doset/rval = " 11865 "0x%x/0x%x/0x%x/%d", 11866 cap, val, tgtonly, doset, rval); 11867 11868 } else { 11869 /* 11870 * Process getcap request. 11871 */ 11872 switch (cidx) { 11873 case SCSI_CAP_DMA_MAX: 11874 rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer; 11875 11876 /* 11877 * Need to make an adjustment qlc is uint_t 64 11878 * st is int, so we will make the adjustment here 11879 * being as nobody wants to touch this. 11880 * It still leaves the max single block length 11881 * of 2 gig. This should last . 11882 */ 11883 11884 if (rval == -1) { 11885 rval = MAX_INT_DMA; 11886 } 11887 11888 break; 11889 11890 case SCSI_CAP_INITIATOR_ID: 11891 rval = pptr->port_id; 11892 break; 11893 11894 case SCSI_CAP_ARQ: 11895 case SCSI_CAP_RESET_NOTIFICATION: 11896 case SCSI_CAP_TAGGED_QING: 11897 rval = TRUE; 11898 break; 11899 11900 case SCSI_CAP_SCSI_VERSION: 11901 rval = 3; 11902 break; 11903 11904 case SCSI_CAP_INTERCONNECT_TYPE: 11905 if (FC_TOP_EXTERNAL(pptr->port_topology) || 11906 (ptgt->tgt_hard_addr == 0)) { 11907 rval = INTERCONNECT_FABRIC; 11908 } else { 11909 rval = INTERCONNECT_FIBRE; 11910 } 11911 break; 11912 11913 case SCSI_CAP_LUN_RESET: 11914 rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ? 11915 TRUE : FALSE; 11916 break; 11917 11918 default: 11919 FCP_TRACE(fcp_logq, pptr->port_instbuf, 11920 fcp_trace, FCP_BUF_LEVEL_4, 0, 11921 "fcp_getcap: unsupported %d", cidx); 11922 rval = UNDEFINED; 11923 break; 11924 } 11925 11926 FCP_TRACE(fcp_logq, pptr->port_instbuf, 11927 fcp_trace, FCP_BUF_LEVEL_8, 0, 11928 "get cap: cap=%s, val/tgtonly/doset/rval = " 11929 "0x%x/0x%x/0x%x/%d", 11930 cap, val, tgtonly, doset, rval); 11931 } 11932 11933 return (rval); 11934 } 11935 11936 /* 11937 * called by the transport to get the port-wwn and lun 11938 * properties of this device, and to create a "name" based on them 11939 * 11940 * these properties don't exist on sun4m 11941 * 11942 * return 1 for success else return 0 11943 */ 11944 /* ARGSUSED */ 11945 static int 11946 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len) 11947 { 11948 int i; 11949 int *lun; 11950 int numChars; 11951 uint_t nlun; 11952 uint_t count; 11953 uint_t nbytes; 11954 uchar_t *bytes; 11955 uint16_t lun_num; 11956 uint32_t tgt_id; 11957 char **conf_wwn; 11958 char tbuf[(FC_WWN_SIZE << 1) + 1]; 11959 uchar_t barray[FC_WWN_SIZE]; 11960 dev_info_t *tgt_dip; 11961 struct fcp_tgt *ptgt; 11962 struct fcp_port *pptr; 11963 struct fcp_lun *plun; 11964 11965 ASSERT(sd != NULL); 11966 ASSERT(name != NULL); 11967 11968 tgt_dip = sd->sd_dev; 11969 pptr = ddi_get_soft_state(fcp_softstate, 11970 ddi_get_instance(ddi_get_parent(tgt_dip))); 11971 if (pptr == NULL) { 11972 return (0); 11973 } 11974 11975 ASSERT(tgt_dip != NULL); 11976 11977 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev, 11978 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 11979 LUN_PROP, &lun, &nlun) != DDI_SUCCESS) { 11980 name[0] = '\0'; 11981 return (0); 11982 } 11983 11984 if (nlun == 0) { 11985 ddi_prop_free(lun); 11986 return (0); 11987 } 11988 11989 lun_num = lun[0]; 11990 ddi_prop_free(lun); 11991 11992 /* 11993 * Lookup for .conf WWN property 11994 */ 11995 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip, 11996 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP, 11997 &conf_wwn, &count) == DDI_PROP_SUCCESS) { 11998 ASSERT(count >= 1); 11999 12000 fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE); 12001 ddi_prop_free(conf_wwn); 12002 mutex_enter(&pptr->port_mutex); 12003 if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) { 12004 mutex_exit(&pptr->port_mutex); 12005 return (0); 12006 } 12007 ptgt = plun->lun_tgt; 12008 mutex_exit(&pptr->port_mutex); 12009 12010 (void) ndi_prop_update_byte_array(DDI_DEV_T_NONE, 12011 tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE); 12012 12013 if (!FC_TOP_EXTERNAL(pptr->port_topology) && 12014 ptgt->tgt_hard_addr != 0) { 12015 tgt_id = (uint32_t)fcp_alpa_to_switch[ 12016 ptgt->tgt_hard_addr]; 12017 } else { 12018 tgt_id = ptgt->tgt_d_id; 12019 } 12020 12021 (void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip, 12022 TARGET_PROP, tgt_id); 12023 } 12024 12025 /* get the our port-wwn property */ 12026 bytes = NULL; 12027 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip, 12028 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes, 12029 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) { 12030 if (bytes != NULL) { 12031 ddi_prop_free(bytes); 12032 } 12033 return (0); 12034 } 12035 12036 for (i = 0; i < FC_WWN_SIZE; i++) { 12037 (void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i)); 12038 } 12039 12040 /* Stick in the address of the form "wWWN,LUN" */ 12041 numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num); 12042 12043 ASSERT(numChars < len); 12044 if (numChars >= len) { 12045 fcp_log(CE_WARN, pptr->port_dip, 12046 "!fcp_scsi_get_name: " 12047 "name parameter length too small, it needs to be %d", 12048 numChars+1); 12049 } 12050 12051 ddi_prop_free(bytes); 12052 12053 return (1); 12054 } 12055 12056 12057 /* 12058 * called by the transport to get the SCSI target id value, returning 12059 * it in "name" 12060 * 12061 * this isn't needed/used on sun4m 12062 * 12063 * return 1 for success else return 0 12064 */ 12065 /* ARGSUSED */ 12066 static int 12067 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len) 12068 { 12069 struct fcp_lun *plun = ADDR2LUN(&sd->sd_address); 12070 struct fcp_tgt *ptgt; 12071 int numChars; 12072 12073 if (plun == NULL) { 12074 return (0); 12075 } 12076 12077 if ((ptgt = plun->lun_tgt) == NULL) { 12078 return (0); 12079 } 12080 12081 numChars = snprintf(name, len, "%x", ptgt->tgt_d_id); 12082 12083 ASSERT(numChars < len); 12084 if (numChars >= len) { 12085 fcp_log(CE_WARN, NULL, 12086 "!fcp_scsi_get_bus_addr: " 12087 "name parameter length too small, it needs to be %d", 12088 numChars+1); 12089 } 12090 12091 return (1); 12092 } 12093 12094 12095 /* 12096 * called internally to reset the link where the specified port lives 12097 */ 12098 static int 12099 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep) 12100 { 12101 la_wwn_t wwn; 12102 struct fcp_lun *plun; 12103 struct fcp_tgt *ptgt; 12104 12105 /* disable restart of lip if we're suspended */ 12106 mutex_enter(&pptr->port_mutex); 12107 12108 if (pptr->port_state & (FCP_STATE_SUSPENDED | 12109 FCP_STATE_POWER_DOWN)) { 12110 mutex_exit(&pptr->port_mutex); 12111 FCP_TRACE(fcp_logq, pptr->port_instbuf, 12112 fcp_trace, FCP_BUF_LEVEL_2, 0, 12113 "fcp_linkreset, fcp%d: link reset " 12114 "disabled due to DDI_SUSPEND", 12115 ddi_get_instance(pptr->port_dip)); 12116 return (FC_FAILURE); 12117 } 12118 12119 if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) { 12120 mutex_exit(&pptr->port_mutex); 12121 return (FC_SUCCESS); 12122 } 12123 12124 FCP_DTRACE(fcp_logq, pptr->port_instbuf, 12125 fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset"); 12126 12127 /* 12128 * If ap == NULL assume local link reset. 12129 */ 12130 if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) { 12131 plun = ADDR2LUN(ap); 12132 ptgt = plun->lun_tgt; 12133 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn)); 12134 } else { 12135 bzero((caddr_t)&wwn, sizeof (wwn)); 12136 } 12137 mutex_exit(&pptr->port_mutex); 12138 12139 return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep)); 12140 } 12141 12142 12143 /* 12144 * called from fcp_port_attach() to resume a port 12145 * return DDI_* success/failure status 12146 * acquires and releases the global mutex 12147 * acquires and releases the port mutex 12148 */ 12149 /*ARGSUSED*/ 12150 12151 static int 12152 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo, 12153 uint32_t s_id, fc_attach_cmd_t cmd, int instance) 12154 { 12155 int res = DDI_FAILURE; /* default result */ 12156 struct fcp_port *pptr; /* port state ptr */ 12157 uint32_t alloc_cnt; 12158 uint32_t max_cnt; 12159 fc_portmap_t *tmp_list = NULL; 12160 12161 FCP_DTRACE(fcp_logq, "fcp", fcp_trace, 12162 FCP_BUF_LEVEL_8, 0, "port resume: for port %d", 12163 instance); 12164 12165 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) { 12166 cmn_err(CE_WARN, "fcp: bad soft state"); 12167 return (res); 12168 } 12169 12170 mutex_enter(&pptr->port_mutex); 12171 switch (cmd) { 12172 case FC_CMD_RESUME: 12173 ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0); 12174 pptr->port_state &= ~FCP_STATE_SUSPENDED; 12175 break; 12176 12177 case FC_CMD_POWER_UP: 12178 /* 12179 * If the port is DDI_SUSPENded, defer rediscovery 12180 * until DDI_RESUME occurs 12181 */ 12182 if (pptr->port_state & FCP_STATE_SUSPENDED) { 12183 pptr->port_state &= ~FCP_STATE_POWER_DOWN; 12184 mutex_exit(&pptr->port_mutex); 12185 return (DDI_SUCCESS); 12186 } 12187 pptr->port_state &= ~FCP_STATE_POWER_DOWN; 12188 } 12189 pptr->port_id = s_id; 12190 pptr->port_state = FCP_STATE_INIT; 12191 mutex_exit(&pptr->port_mutex); 12192 12193 /* 12194 * Make a copy of ulp_port_info as fctl allocates 12195 * a temp struct. 12196 */ 12197 (void) fcp_cp_pinfo(pptr, pinfo); 12198 12199 mutex_enter(&fcp_global_mutex); 12200 if (fcp_watchdog_init++ == 0) { 12201 fcp_watchdog_tick = fcp_watchdog_timeout * 12202 drv_usectohz(1000000); 12203 fcp_watchdog_id = timeout(fcp_watch, 12204 NULL, fcp_watchdog_tick); 12205 } 12206 mutex_exit(&fcp_global_mutex); 12207 12208 /* 12209 * Handle various topologies and link states. 12210 */ 12211 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) { 12212 case FC_STATE_OFFLINE: 12213 /* 12214 * Wait for ONLINE, at which time a state 12215 * change will cause a statec_callback 12216 */ 12217 res = DDI_SUCCESS; 12218 break; 12219 12220 case FC_STATE_ONLINE: 12221 12222 if (pptr->port_topology == FC_TOP_UNKNOWN) { 12223 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP); 12224 res = DDI_SUCCESS; 12225 break; 12226 } 12227 12228 if (FC_TOP_EXTERNAL(pptr->port_topology) && 12229 !fcp_enable_auto_configuration) { 12230 tmp_list = fcp_construct_map(pptr, &alloc_cnt); 12231 if (tmp_list == NULL) { 12232 if (!alloc_cnt) { 12233 res = DDI_SUCCESS; 12234 } 12235 break; 12236 } 12237 max_cnt = alloc_cnt; 12238 } else { 12239 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN); 12240 12241 alloc_cnt = FCP_MAX_DEVICES; 12242 12243 if ((tmp_list = (fc_portmap_t *)kmem_zalloc( 12244 (sizeof (fc_portmap_t)) * alloc_cnt, 12245 KM_NOSLEEP)) == NULL) { 12246 fcp_log(CE_WARN, pptr->port_dip, 12247 "!fcp%d: failed to allocate portmap", 12248 instance); 12249 break; 12250 } 12251 12252 max_cnt = alloc_cnt; 12253 if ((res = fc_ulp_getportmap(pptr->port_fp_handle, 12254 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) != 12255 FC_SUCCESS) { 12256 caddr_t msg; 12257 12258 (void) fc_ulp_error(res, &msg); 12259 12260 FCP_TRACE(fcp_logq, pptr->port_instbuf, 12261 fcp_trace, FCP_BUF_LEVEL_2, 0, 12262 "resume failed getportmap: reason=0x%x", 12263 res); 12264 12265 fcp_log(CE_WARN, pptr->port_dip, 12266 "!failed to get port map : %s", msg); 12267 break; 12268 } 12269 if (max_cnt > alloc_cnt) { 12270 alloc_cnt = max_cnt; 12271 } 12272 } 12273 12274 /* 12275 * do the SCSI device discovery and create 12276 * the devinfos 12277 */ 12278 fcp_statec_callback(ulph, pptr->port_fp_handle, 12279 pptr->port_phys_state, pptr->port_topology, tmp_list, 12280 max_cnt, pptr->port_id); 12281 12282 res = DDI_SUCCESS; 12283 break; 12284 12285 default: 12286 fcp_log(CE_WARN, pptr->port_dip, 12287 "!fcp%d: invalid port state at attach=0x%x", 12288 instance, pptr->port_phys_state); 12289 12290 mutex_enter(&pptr->port_mutex); 12291 pptr->port_phys_state = FCP_STATE_OFFLINE; 12292 mutex_exit(&pptr->port_mutex); 12293 res = DDI_SUCCESS; 12294 12295 break; 12296 } 12297 12298 if (tmp_list != NULL) { 12299 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt); 12300 } 12301 12302 return (res); 12303 } 12304 12305 12306 static void 12307 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo) 12308 { 12309 pptr->port_fp_modlinkage = *pinfo->port_linkage; 12310 pptr->port_dip = pinfo->port_dip; 12311 pptr->port_fp_handle = pinfo->port_handle; 12312 if (pinfo->port_acc_attr != NULL) { 12313 /* 12314 * FCA supports DMA 12315 */ 12316 pptr->port_data_dma_attr = *pinfo->port_data_dma_attr; 12317 pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr; 12318 pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr; 12319 pptr->port_dma_acc_attr = *pinfo->port_acc_attr; 12320 } 12321 pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size; 12322 pptr->port_max_exch = pinfo->port_fca_max_exch; 12323 pptr->port_phys_state = pinfo->port_state; 12324 pptr->port_topology = pinfo->port_flags; 12325 pptr->port_reset_action = pinfo->port_reset_action; 12326 pptr->port_cmds_dma_flags = pinfo->port_dma_behavior; 12327 pptr->port_fcp_dma = pinfo->port_fcp_dma; 12328 bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t)); 12329 bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t)); 12330 12331 /* Clear FMA caps to avoid fm-capability ereport */ 12332 if (pptr->port_cmd_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR) 12333 pptr->port_cmd_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 12334 if (pptr->port_data_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR) 12335 pptr->port_data_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 12336 if (pptr->port_resp_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR) 12337 pptr->port_resp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 12338 } 12339 12340 /* 12341 * If the elements wait field is set to 1 then 12342 * another thread is waiting for the operation to complete. Once 12343 * it is complete, the waiting thread is signaled and the element is 12344 * freed by the waiting thread. If the elements wait field is set to 0 12345 * the element is freed. 12346 */ 12347 static void 12348 fcp_process_elem(struct fcp_hp_elem *elem, int result) 12349 { 12350 ASSERT(elem != NULL); 12351 mutex_enter(&elem->mutex); 12352 elem->result = result; 12353 if (elem->wait) { 12354 elem->wait = 0; 12355 cv_signal(&elem->cv); 12356 mutex_exit(&elem->mutex); 12357 } else { 12358 mutex_exit(&elem->mutex); 12359 cv_destroy(&elem->cv); 12360 mutex_destroy(&elem->mutex); 12361 kmem_free(elem, sizeof (struct fcp_hp_elem)); 12362 } 12363 } 12364 12365 /* 12366 * This function is invoked from the taskq thread to allocate 12367 * devinfo nodes and to online/offline them. 12368 */ 12369 static void 12370 fcp_hp_task(void *arg) 12371 { 12372 struct fcp_hp_elem *elem = (struct fcp_hp_elem *)arg; 12373 struct fcp_lun *plun = elem->lun; 12374 struct fcp_port *pptr = elem->port; 12375 int result; 12376 12377 ASSERT(elem->what == FCP_ONLINE || 12378 elem->what == FCP_OFFLINE || 12379 elem->what == FCP_MPXIO_PATH_CLEAR_BUSY || 12380 elem->what == FCP_MPXIO_PATH_SET_BUSY); 12381 12382 mutex_enter(&pptr->port_mutex); 12383 mutex_enter(&plun->lun_mutex); 12384 if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) && 12385 plun->lun_event_count != elem->event_cnt) || 12386 pptr->port_state & (FCP_STATE_SUSPENDED | 12387 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) { 12388 mutex_exit(&plun->lun_mutex); 12389 mutex_exit(&pptr->port_mutex); 12390 fcp_process_elem(elem, NDI_FAILURE); 12391 return; 12392 } 12393 mutex_exit(&plun->lun_mutex); 12394 mutex_exit(&pptr->port_mutex); 12395 12396 result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio, 12397 elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags); 12398 fcp_process_elem(elem, result); 12399 } 12400 12401 12402 static child_info_t * 12403 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount, 12404 int tcount) 12405 { 12406 ASSERT(MUTEX_HELD(&plun->lun_mutex)); 12407 12408 if (fcp_is_child_present(plun, cip) == FC_FAILURE) { 12409 struct fcp_port *pptr = plun->lun_tgt->tgt_port; 12410 12411 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 12412 /* 12413 * Child has not been created yet. Create the child device 12414 * based on the per-Lun flags. 12415 */ 12416 if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) { 12417 plun->lun_cip = 12418 CIP(fcp_create_dip(plun, lcount, tcount)); 12419 plun->lun_mpxio = 0; 12420 } else { 12421 plun->lun_cip = 12422 CIP(fcp_create_pip(plun, lcount, tcount)); 12423 plun->lun_mpxio = 1; 12424 } 12425 } else { 12426 plun->lun_cip = cip; 12427 } 12428 12429 return (plun->lun_cip); 12430 } 12431 12432 12433 static int 12434 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip) 12435 { 12436 int rval = FC_FAILURE; 12437 dev_info_t *pdip; 12438 struct dev_info *dip; 12439 int circular; 12440 12441 ASSERT(MUTEX_HELD(&plun->lun_mutex)); 12442 12443 pdip = plun->lun_tgt->tgt_port->port_dip; 12444 12445 if (plun->lun_cip == NULL) { 12446 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf, 12447 fcp_trace, FCP_BUF_LEVEL_3, 0, 12448 "fcp_is_dip_present: plun->lun_cip is NULL: " 12449 "plun: %p lun state: %x num: %d target state: %x", 12450 plun, plun->lun_state, plun->lun_num, 12451 plun->lun_tgt->tgt_port->port_state); 12452 return (rval); 12453 } 12454 ndi_devi_enter(pdip, &circular); 12455 dip = DEVI(pdip)->devi_child; 12456 while (dip) { 12457 if (dip == DEVI(cdip)) { 12458 rval = FC_SUCCESS; 12459 break; 12460 } 12461 dip = dip->devi_sibling; 12462 } 12463 ndi_devi_exit(pdip, circular); 12464 return (rval); 12465 } 12466 12467 static int 12468 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip) 12469 { 12470 int rval = FC_FAILURE; 12471 12472 ASSERT(plun != NULL); 12473 ASSERT(MUTEX_HELD(&plun->lun_mutex)); 12474 12475 if (plun->lun_mpxio == 0) { 12476 rval = fcp_is_dip_present(plun, DIP(cip)); 12477 } else { 12478 rval = fcp_is_pip_present(plun, PIP(cip)); 12479 } 12480 12481 return (rval); 12482 } 12483 12484 /* 12485 * Function: fcp_create_dip 12486 * 12487 * Description: Creates a dev_info_t structure for the LUN specified by the 12488 * caller. 12489 * 12490 * Argument: plun Lun structure 12491 * link_cnt Link state count. 12492 * tgt_cnt Target state change count. 12493 * 12494 * Return Value: NULL if it failed 12495 * dev_info_t structure address if it succeeded 12496 * 12497 * Context: Kernel context 12498 */ 12499 static dev_info_t * 12500 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt) 12501 { 12502 int failure = 0; 12503 uint32_t tgt_id; 12504 uint64_t sam_lun; 12505 struct fcp_tgt *ptgt = plun->lun_tgt; 12506 struct fcp_port *pptr = ptgt->tgt_port; 12507 dev_info_t *pdip = pptr->port_dip; 12508 dev_info_t *cdip = NULL; 12509 dev_info_t *old_dip = DIP(plun->lun_cip); 12510 char *nname = NULL; 12511 char **compatible = NULL; 12512 int ncompatible; 12513 char *scsi_binding_set; 12514 char t_pwwn[17]; 12515 12516 ASSERT(MUTEX_HELD(&plun->lun_mutex)); 12517 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 12518 12519 /* get the 'scsi-binding-set' property */ 12520 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, 12521 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set", 12522 &scsi_binding_set) != DDI_PROP_SUCCESS) { 12523 scsi_binding_set = NULL; 12524 } 12525 12526 /* determine the node name and compatible */ 12527 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set, 12528 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible); 12529 if (scsi_binding_set) { 12530 ddi_prop_free(scsi_binding_set); 12531 } 12532 12533 if (nname == NULL) { 12534 #ifdef DEBUG 12535 cmn_err(CE_WARN, "%s%d: no driver for " 12536 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:" 12537 " compatible: %s", 12538 ddi_driver_name(pdip), ddi_get_instance(pdip), 12539 ptgt->tgt_port_wwn.raw_wwn[0], 12540 ptgt->tgt_port_wwn.raw_wwn[1], 12541 ptgt->tgt_port_wwn.raw_wwn[2], 12542 ptgt->tgt_port_wwn.raw_wwn[3], 12543 ptgt->tgt_port_wwn.raw_wwn[4], 12544 ptgt->tgt_port_wwn.raw_wwn[5], 12545 ptgt->tgt_port_wwn.raw_wwn[6], 12546 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num, 12547 *compatible); 12548 #endif /* DEBUG */ 12549 failure++; 12550 goto end_of_fcp_create_dip; 12551 } 12552 12553 cdip = fcp_find_existing_dip(plun, pdip, nname); 12554 12555 /* 12556 * if the old_dip does not match the cdip, that means there is 12557 * some property change. since we'll be using the cdip, we need 12558 * to offline the old_dip. If the state contains FCP_LUN_CHANGED 12559 * then the dtype for the device has been updated. Offline the 12560 * the old device and create a new device with the new device type 12561 * Refer to bug: 4764752 12562 */ 12563 if (old_dip && (cdip != old_dip || 12564 plun->lun_state & FCP_LUN_CHANGED)) { 12565 plun->lun_state &= ~(FCP_LUN_INIT); 12566 mutex_exit(&plun->lun_mutex); 12567 mutex_exit(&pptr->port_mutex); 12568 12569 mutex_enter(&ptgt->tgt_mutex); 12570 (void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE, 12571 link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0); 12572 mutex_exit(&ptgt->tgt_mutex); 12573 12574 #ifdef DEBUG 12575 if (cdip != NULL) { 12576 FCP_TRACE(fcp_logq, pptr->port_instbuf, 12577 fcp_trace, FCP_BUF_LEVEL_2, 0, 12578 "Old dip=%p; New dip=%p don't match", old_dip, 12579 cdip); 12580 } else { 12581 FCP_TRACE(fcp_logq, pptr->port_instbuf, 12582 fcp_trace, FCP_BUF_LEVEL_2, 0, 12583 "Old dip=%p; New dip=NULL don't match", old_dip); 12584 } 12585 #endif 12586 12587 mutex_enter(&pptr->port_mutex); 12588 mutex_enter(&plun->lun_mutex); 12589 } 12590 12591 if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) { 12592 plun->lun_state &= ~(FCP_LUN_CHANGED); 12593 if (ndi_devi_alloc(pptr->port_dip, nname, 12594 DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) { 12595 failure++; 12596 goto end_of_fcp_create_dip; 12597 } 12598 } 12599 12600 /* 12601 * Previously all the properties for the devinfo were destroyed here 12602 * with a call to ndi_prop_remove_all(). Since this may cause loss of 12603 * the devid property (and other properties established by the target 12604 * driver or framework) which the code does not always recreate, this 12605 * call was removed. 12606 * This opens a theoretical possibility that we may return with a 12607 * stale devid on the node if the scsi entity behind the fibre channel 12608 * lun has changed. 12609 */ 12610 12611 /* decorate the node with compatible */ 12612 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip, 12613 "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) { 12614 failure++; 12615 goto end_of_fcp_create_dip; 12616 } 12617 12618 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP, 12619 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) { 12620 failure++; 12621 goto end_of_fcp_create_dip; 12622 } 12623 12624 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP, 12625 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) { 12626 failure++; 12627 goto end_of_fcp_create_dip; 12628 } 12629 12630 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn); 12631 t_pwwn[16] = '\0'; 12632 if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn) 12633 != DDI_PROP_SUCCESS) { 12634 failure++; 12635 goto end_of_fcp_create_dip; 12636 } 12637 12638 /* 12639 * If there is no hard address - We might have to deal with 12640 * that by using WWN - Having said that it is important to 12641 * recognize this problem early so ssd can be informed of 12642 * the right interconnect type. 12643 */ 12644 if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) { 12645 tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr]; 12646 } else { 12647 tgt_id = ptgt->tgt_d_id; 12648 } 12649 12650 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP, 12651 tgt_id) != DDI_PROP_SUCCESS) { 12652 failure++; 12653 goto end_of_fcp_create_dip; 12654 } 12655 12656 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP, 12657 (int)plun->lun_num) != DDI_PROP_SUCCESS) { 12658 failure++; 12659 goto end_of_fcp_create_dip; 12660 } 12661 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE); 12662 if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP, 12663 sam_lun) != DDI_PROP_SUCCESS) { 12664 failure++; 12665 goto end_of_fcp_create_dip; 12666 } 12667 12668 end_of_fcp_create_dip: 12669 scsi_hba_nodename_compatible_free(nname, compatible); 12670 12671 if (cdip != NULL && failure) { 12672 (void) ndi_prop_remove_all(cdip); 12673 (void) ndi_devi_free(cdip); 12674 cdip = NULL; 12675 } 12676 12677 return (cdip); 12678 } 12679 12680 /* 12681 * Function: fcp_create_pip 12682 * 12683 * Description: Creates a Path Id for the LUN specified by the caller. 12684 * 12685 * Argument: plun Lun structure 12686 * link_cnt Link state count. 12687 * tgt_cnt Target state count. 12688 * 12689 * Return Value: NULL if it failed 12690 * mdi_pathinfo_t structure address if it succeeded 12691 * 12692 * Context: Kernel context 12693 */ 12694 static mdi_pathinfo_t * 12695 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount) 12696 { 12697 int i; 12698 char buf[MAXNAMELEN]; 12699 char uaddr[MAXNAMELEN]; 12700 int failure = 0; 12701 uint32_t tgt_id; 12702 uint64_t sam_lun; 12703 struct fcp_tgt *ptgt = plun->lun_tgt; 12704 struct fcp_port *pptr = ptgt->tgt_port; 12705 dev_info_t *pdip = pptr->port_dip; 12706 mdi_pathinfo_t *pip = NULL; 12707 mdi_pathinfo_t *old_pip = PIP(plun->lun_cip); 12708 char *nname = NULL; 12709 char **compatible = NULL; 12710 int ncompatible; 12711 char *scsi_binding_set; 12712 char t_pwwn[17]; 12713 12714 ASSERT(MUTEX_HELD(&plun->lun_mutex)); 12715 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 12716 12717 scsi_binding_set = "vhci"; 12718 12719 /* determine the node name and compatible */ 12720 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set, 12721 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible); 12722 12723 if (nname == NULL) { 12724 #ifdef DEBUG 12725 cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for " 12726 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:" 12727 " compatible: %s", 12728 ddi_driver_name(pdip), ddi_get_instance(pdip), 12729 ptgt->tgt_port_wwn.raw_wwn[0], 12730 ptgt->tgt_port_wwn.raw_wwn[1], 12731 ptgt->tgt_port_wwn.raw_wwn[2], 12732 ptgt->tgt_port_wwn.raw_wwn[3], 12733 ptgt->tgt_port_wwn.raw_wwn[4], 12734 ptgt->tgt_port_wwn.raw_wwn[5], 12735 ptgt->tgt_port_wwn.raw_wwn[6], 12736 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num, 12737 *compatible); 12738 #endif /* DEBUG */ 12739 failure++; 12740 goto end_of_fcp_create_pip; 12741 } 12742 12743 pip = fcp_find_existing_pip(plun, pdip); 12744 12745 /* 12746 * if the old_dip does not match the cdip, that means there is 12747 * some property change. since we'll be using the cdip, we need 12748 * to offline the old_dip. If the state contains FCP_LUN_CHANGED 12749 * then the dtype for the device has been updated. Offline the 12750 * the old device and create a new device with the new device type 12751 * Refer to bug: 4764752 12752 */ 12753 if (old_pip && (pip != old_pip || 12754 plun->lun_state & FCP_LUN_CHANGED)) { 12755 plun->lun_state &= ~(FCP_LUN_INIT); 12756 mutex_exit(&plun->lun_mutex); 12757 mutex_exit(&pptr->port_mutex); 12758 12759 mutex_enter(&ptgt->tgt_mutex); 12760 (void) fcp_pass_to_hp(pptr, plun, CIP(old_pip), 12761 FCP_OFFLINE, lcount, tcount, 12762 NDI_DEVI_REMOVE, 0); 12763 mutex_exit(&ptgt->tgt_mutex); 12764 12765 if (pip != NULL) { 12766 FCP_TRACE(fcp_logq, pptr->port_instbuf, 12767 fcp_trace, FCP_BUF_LEVEL_2, 0, 12768 "Old pip=%p; New pip=%p don't match", 12769 old_pip, pip); 12770 } else { 12771 FCP_TRACE(fcp_logq, pptr->port_instbuf, 12772 fcp_trace, FCP_BUF_LEVEL_2, 0, 12773 "Old pip=%p; New pip=NULL don't match", 12774 old_pip); 12775 } 12776 12777 mutex_enter(&pptr->port_mutex); 12778 mutex_enter(&plun->lun_mutex); 12779 } 12780 12781 /* 12782 * Since FC_WWN_SIZE is 8 bytes and its not like the 12783 * lun_guid_size which is dependent on the target, I don't 12784 * believe the same trancation happens here UNLESS the standards 12785 * change the FC_WWN_SIZE value to something larger than 12786 * MAXNAMELEN(currently 255 bytes). 12787 */ 12788 12789 for (i = 0; i < FC_WWN_SIZE; i++) { 12790 (void) sprintf(&buf[i << 1], "%02x", 12791 ptgt->tgt_port_wwn.raw_wwn[i]); 12792 } 12793 12794 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", 12795 buf, plun->lun_num); 12796 12797 if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) { 12798 /* 12799 * Release the locks before calling into 12800 * mdi_pi_alloc_compatible() since this can result in a 12801 * callback into fcp which can result in a deadlock 12802 * (see bug # 4870272). 12803 * 12804 * Basically, what we are trying to avoid is the scenario where 12805 * one thread does ndi_devi_enter() and tries to grab 12806 * fcp_mutex and another does it the other way round. 12807 * 12808 * But before we do that, make sure that nobody releases the 12809 * port in the meantime. We can do this by setting a flag. 12810 */ 12811 plun->lun_state &= ~(FCP_LUN_CHANGED); 12812 pptr->port_state |= FCP_STATE_IN_MDI; 12813 mutex_exit(&plun->lun_mutex); 12814 mutex_exit(&pptr->port_mutex); 12815 if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid, 12816 uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) { 12817 fcp_log(CE_WARN, pptr->port_dip, 12818 "!path alloc failed:0x%x", plun); 12819 mutex_enter(&pptr->port_mutex); 12820 mutex_enter(&plun->lun_mutex); 12821 pptr->port_state &= ~FCP_STATE_IN_MDI; 12822 failure++; 12823 goto end_of_fcp_create_pip; 12824 } 12825 mutex_enter(&pptr->port_mutex); 12826 mutex_enter(&plun->lun_mutex); 12827 pptr->port_state &= ~FCP_STATE_IN_MDI; 12828 } else { 12829 (void) mdi_prop_remove(pip, NULL); 12830 } 12831 12832 mdi_pi_set_phci_private(pip, (caddr_t)plun); 12833 12834 if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP, 12835 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) 12836 != DDI_PROP_SUCCESS) { 12837 failure++; 12838 goto end_of_fcp_create_pip; 12839 } 12840 12841 if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP, 12842 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) 12843 != DDI_PROP_SUCCESS) { 12844 failure++; 12845 goto end_of_fcp_create_pip; 12846 } 12847 12848 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn); 12849 t_pwwn[16] = '\0'; 12850 if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn) 12851 != DDI_PROP_SUCCESS) { 12852 failure++; 12853 goto end_of_fcp_create_pip; 12854 } 12855 12856 /* 12857 * If there is no hard address - We might have to deal with 12858 * that by using WWN - Having said that it is important to 12859 * recognize this problem early so ssd can be informed of 12860 * the right interconnect type. 12861 */ 12862 if (!FC_TOP_EXTERNAL(pptr->port_topology) && 12863 ptgt->tgt_hard_addr != 0) { 12864 tgt_id = (uint32_t) 12865 fcp_alpa_to_switch[ptgt->tgt_hard_addr]; 12866 } else { 12867 tgt_id = ptgt->tgt_d_id; 12868 } 12869 12870 if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id) 12871 != DDI_PROP_SUCCESS) { 12872 failure++; 12873 goto end_of_fcp_create_pip; 12874 } 12875 12876 if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num) 12877 != DDI_PROP_SUCCESS) { 12878 failure++; 12879 goto end_of_fcp_create_pip; 12880 } 12881 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE); 12882 if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun) 12883 != DDI_PROP_SUCCESS) { 12884 failure++; 12885 goto end_of_fcp_create_pip; 12886 } 12887 12888 end_of_fcp_create_pip: 12889 scsi_hba_nodename_compatible_free(nname, compatible); 12890 12891 if (pip != NULL && failure) { 12892 (void) mdi_prop_remove(pip, NULL); 12893 mutex_exit(&plun->lun_mutex); 12894 mutex_exit(&pptr->port_mutex); 12895 (void) mdi_pi_free(pip, 0); 12896 mutex_enter(&pptr->port_mutex); 12897 mutex_enter(&plun->lun_mutex); 12898 pip = NULL; 12899 } 12900 12901 return (pip); 12902 } 12903 12904 static dev_info_t * 12905 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name) 12906 { 12907 uint_t nbytes; 12908 uchar_t *bytes; 12909 uint_t nwords; 12910 uint32_t tgt_id; 12911 int *words; 12912 dev_info_t *cdip; 12913 dev_info_t *ndip; 12914 struct fcp_tgt *ptgt = plun->lun_tgt; 12915 struct fcp_port *pptr = ptgt->tgt_port; 12916 int circular; 12917 12918 ndi_devi_enter(pdip, &circular); 12919 12920 ndip = (dev_info_t *)DEVI(pdip)->devi_child; 12921 while ((cdip = ndip) != NULL) { 12922 ndip = (dev_info_t *)DEVI(cdip)->devi_sibling; 12923 12924 if (strcmp(DEVI(cdip)->devi_node_name, name)) { 12925 continue; 12926 } 12927 12928 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip, 12929 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes, 12930 &nbytes) != DDI_PROP_SUCCESS) { 12931 continue; 12932 } 12933 12934 if (nbytes != FC_WWN_SIZE || bytes == NULL) { 12935 if (bytes != NULL) { 12936 ddi_prop_free(bytes); 12937 } 12938 continue; 12939 } 12940 ASSERT(bytes != NULL); 12941 12942 if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) { 12943 ddi_prop_free(bytes); 12944 continue; 12945 } 12946 12947 ddi_prop_free(bytes); 12948 12949 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip, 12950 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes, 12951 &nbytes) != DDI_PROP_SUCCESS) { 12952 continue; 12953 } 12954 12955 if (nbytes != FC_WWN_SIZE || bytes == NULL) { 12956 if (bytes != NULL) { 12957 ddi_prop_free(bytes); 12958 } 12959 continue; 12960 } 12961 ASSERT(bytes != NULL); 12962 12963 if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) { 12964 ddi_prop_free(bytes); 12965 continue; 12966 } 12967 12968 ddi_prop_free(bytes); 12969 12970 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip, 12971 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words, 12972 &nwords) != DDI_PROP_SUCCESS) { 12973 continue; 12974 } 12975 12976 if (nwords != 1 || words == NULL) { 12977 if (words != NULL) { 12978 ddi_prop_free(words); 12979 } 12980 continue; 12981 } 12982 ASSERT(words != NULL); 12983 12984 /* 12985 * If there is no hard address - We might have to deal with 12986 * that by using WWN - Having said that it is important to 12987 * recognize this problem early so ssd can be informed of 12988 * the right interconnect type. 12989 */ 12990 if (!FC_TOP_EXTERNAL(pptr->port_topology) && 12991 ptgt->tgt_hard_addr != 0) { 12992 tgt_id = 12993 (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr]; 12994 } else { 12995 tgt_id = ptgt->tgt_d_id; 12996 } 12997 12998 if (tgt_id != (uint32_t)*words) { 12999 ddi_prop_free(words); 13000 continue; 13001 } 13002 ddi_prop_free(words); 13003 13004 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip, 13005 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words, 13006 &nwords) != DDI_PROP_SUCCESS) { 13007 continue; 13008 } 13009 13010 if (nwords != 1 || words == NULL) { 13011 if (words != NULL) { 13012 ddi_prop_free(words); 13013 } 13014 continue; 13015 } 13016 ASSERT(words != NULL); 13017 13018 if (plun->lun_num == (uint16_t)*words) { 13019 ddi_prop_free(words); 13020 break; 13021 } 13022 ddi_prop_free(words); 13023 } 13024 ndi_devi_exit(pdip, circular); 13025 13026 return (cdip); 13027 } 13028 13029 13030 static int 13031 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip) 13032 { 13033 dev_info_t *pdip; 13034 char buf[MAXNAMELEN]; 13035 char uaddr[MAXNAMELEN]; 13036 int rval = FC_FAILURE; 13037 13038 ASSERT(MUTEX_HELD(&plun->lun_mutex)); 13039 13040 pdip = plun->lun_tgt->tgt_port->port_dip; 13041 13042 /* 13043 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be 13044 * non-NULL even when the LUN is not there as in the case when a LUN is 13045 * configured and then deleted on the device end (for T3/T4 case). In 13046 * such cases, pip will be NULL. 13047 * 13048 * If the device generates an RSCN, it will end up getting offlined when 13049 * it disappeared and a new LUN will get created when it is rediscovered 13050 * on the device. If we check for lun_cip here, the LUN will not end 13051 * up getting onlined since this function will end up returning a 13052 * FC_SUCCESS. 13053 * 13054 * The behavior is different on other devices. For instance, on a HDS, 13055 * there was no RSCN generated by the device but the next I/O generated 13056 * a check condition and rediscovery got triggered that way. So, in 13057 * such cases, this path will not be exercised 13058 */ 13059 if (pip == NULL) { 13060 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf, 13061 fcp_trace, FCP_BUF_LEVEL_4, 0, 13062 "fcp_is_pip_present: plun->lun_cip is NULL: " 13063 "plun: %p lun state: %x num: %d target state: %x", 13064 plun, plun->lun_state, plun->lun_num, 13065 plun->lun_tgt->tgt_port->port_state); 13066 return (rval); 13067 } 13068 13069 fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf); 13070 13071 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num); 13072 13073 if (plun->lun_old_guid) { 13074 if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) { 13075 rval = FC_SUCCESS; 13076 } 13077 } else { 13078 if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) { 13079 rval = FC_SUCCESS; 13080 } 13081 } 13082 return (rval); 13083 } 13084 13085 static mdi_pathinfo_t * 13086 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip) 13087 { 13088 char buf[MAXNAMELEN]; 13089 char uaddr[MAXNAMELEN]; 13090 mdi_pathinfo_t *pip; 13091 struct fcp_tgt *ptgt = plun->lun_tgt; 13092 struct fcp_port *pptr = ptgt->tgt_port; 13093 13094 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 13095 13096 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf); 13097 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num); 13098 13099 pip = mdi_pi_find(pdip, plun->lun_guid, uaddr); 13100 13101 return (pip); 13102 } 13103 13104 13105 static int 13106 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount, 13107 int tcount, int flags, int *circ) 13108 { 13109 int rval; 13110 struct fcp_port *pptr = plun->lun_tgt->tgt_port; 13111 struct fcp_tgt *ptgt = plun->lun_tgt; 13112 dev_info_t *cdip = NULL; 13113 13114 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 13115 ASSERT(MUTEX_HELD(&plun->lun_mutex)); 13116 13117 if (plun->lun_cip == NULL) { 13118 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13119 fcp_trace, FCP_BUF_LEVEL_3, 0, 13120 "fcp_online_child: plun->lun_cip is NULL: " 13121 "plun: %p state: %x num: %d target state: %x", 13122 plun, plun->lun_state, plun->lun_num, 13123 plun->lun_tgt->tgt_port->port_state); 13124 return (NDI_FAILURE); 13125 } 13126 again: 13127 if (plun->lun_mpxio == 0) { 13128 cdip = DIP(cip); 13129 mutex_exit(&plun->lun_mutex); 13130 mutex_exit(&pptr->port_mutex); 13131 13132 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13133 fcp_trace, FCP_BUF_LEVEL_3, 0, 13134 "!Invoking ndi_devi_online for %s: target=%x lun=%x", 13135 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num); 13136 13137 /* 13138 * We could check for FCP_LUN_INIT here but chances 13139 * of getting here when it's already in FCP_LUN_INIT 13140 * is rare and a duplicate ndi_devi_online wouldn't 13141 * hurt either (as the node would already have been 13142 * in CF2) 13143 */ 13144 if (!i_ddi_devi_attached(ddi_get_parent(cdip))) { 13145 rval = ndi_devi_bind_driver(cdip, flags); 13146 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13147 fcp_trace, FCP_BUF_LEVEL_3, 0, 13148 "!Invoking ndi_devi_bind_driver: rval=%d", rval); 13149 } else { 13150 rval = ndi_devi_online(cdip, flags); 13151 } 13152 13153 /* 13154 * We log the message into trace buffer if the device 13155 * is "ses" and into syslog for any other device 13156 * type. This is to prevent the ndi_devi_online failure 13157 * message that appears for V880/A5K ses devices. 13158 */ 13159 if (rval == NDI_SUCCESS) { 13160 mutex_enter(&ptgt->tgt_mutex); 13161 plun->lun_state |= FCP_LUN_INIT; 13162 mutex_exit(&ptgt->tgt_mutex); 13163 } else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) { 13164 fcp_log(CE_NOTE, pptr->port_dip, 13165 "!ndi_devi_online:" 13166 " failed for %s: target=%x lun=%x %x", 13167 ddi_get_name(cdip), ptgt->tgt_d_id, 13168 plun->lun_num, rval); 13169 } else { 13170 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13171 fcp_trace, FCP_BUF_LEVEL_3, 0, 13172 " !ndi_devi_online:" 13173 " failed for %s: target=%x lun=%x %x", 13174 ddi_get_name(cdip), ptgt->tgt_d_id, 13175 plun->lun_num, rval); 13176 } 13177 } else { 13178 cdip = mdi_pi_get_client(PIP(cip)); 13179 mutex_exit(&plun->lun_mutex); 13180 mutex_exit(&pptr->port_mutex); 13181 13182 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13183 fcp_trace, FCP_BUF_LEVEL_3, 0, 13184 "!Invoking mdi_pi_online for %s: target=%x lun=%x", 13185 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num); 13186 13187 /* 13188 * Hold path and exit phci to avoid deadlock with power 13189 * management code during mdi_pi_online. 13190 */ 13191 mdi_hold_path(PIP(cip)); 13192 mdi_devi_exit_phci(pptr->port_dip, *circ); 13193 13194 rval = mdi_pi_online(PIP(cip), flags); 13195 13196 mdi_devi_enter_phci(pptr->port_dip, circ); 13197 mdi_rele_path(PIP(cip)); 13198 13199 if (rval == MDI_SUCCESS) { 13200 mutex_enter(&ptgt->tgt_mutex); 13201 plun->lun_state |= FCP_LUN_INIT; 13202 mutex_exit(&ptgt->tgt_mutex); 13203 13204 /* 13205 * Clear MPxIO path permanent disable in case 13206 * fcp hotplug dropped the offline event. 13207 */ 13208 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE); 13209 13210 } else if (rval == MDI_NOT_SUPPORTED) { 13211 child_info_t *old_cip = cip; 13212 13213 /* 13214 * MPxIO does not support this device yet. 13215 * Enumerate in legacy mode. 13216 */ 13217 mutex_enter(&pptr->port_mutex); 13218 mutex_enter(&plun->lun_mutex); 13219 plun->lun_mpxio = 0; 13220 plun->lun_cip = NULL; 13221 cdip = fcp_create_dip(plun, lcount, tcount); 13222 plun->lun_cip = cip = CIP(cdip); 13223 if (cip == NULL) { 13224 fcp_log(CE_WARN, pptr->port_dip, 13225 "!fcp_online_child: " 13226 "Create devinfo failed for LU=%p", plun); 13227 mutex_exit(&plun->lun_mutex); 13228 13229 mutex_enter(&ptgt->tgt_mutex); 13230 plun->lun_state |= FCP_LUN_OFFLINE; 13231 mutex_exit(&ptgt->tgt_mutex); 13232 13233 mutex_exit(&pptr->port_mutex); 13234 13235 /* 13236 * free the mdi_pathinfo node 13237 */ 13238 (void) mdi_pi_free(PIP(old_cip), 0); 13239 } else { 13240 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13241 fcp_trace, FCP_BUF_LEVEL_3, 0, 13242 "fcp_online_child: creating devinfo " 13243 "node 0x%p for plun 0x%p", 13244 cip, plun); 13245 mutex_exit(&plun->lun_mutex); 13246 mutex_exit(&pptr->port_mutex); 13247 /* 13248 * free the mdi_pathinfo node 13249 */ 13250 (void) mdi_pi_free(PIP(old_cip), 0); 13251 mutex_enter(&pptr->port_mutex); 13252 mutex_enter(&plun->lun_mutex); 13253 goto again; 13254 } 13255 } else { 13256 if (cdip) { 13257 fcp_log(CE_NOTE, pptr->port_dip, 13258 "!fcp_online_child: mdi_pi_online:" 13259 " failed for %s: target=%x lun=%x %x", 13260 ddi_get_name(cdip), ptgt->tgt_d_id, 13261 plun->lun_num, rval); 13262 } 13263 } 13264 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE; 13265 } 13266 13267 if (rval == NDI_SUCCESS) { 13268 if (cdip) { 13269 (void) ndi_event_retrieve_cookie( 13270 pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT, 13271 &fcp_insert_eid, NDI_EVENT_NOPASS); 13272 (void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl, 13273 cdip, fcp_insert_eid, NULL); 13274 } 13275 } 13276 mutex_enter(&pptr->port_mutex); 13277 mutex_enter(&plun->lun_mutex); 13278 return (rval); 13279 } 13280 13281 /* ARGSUSED */ 13282 static int 13283 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount, 13284 int tcount, int flags, int *circ) 13285 { 13286 int rval; 13287 int lun_mpxio; 13288 struct fcp_port *pptr = plun->lun_tgt->tgt_port; 13289 struct fcp_tgt *ptgt = plun->lun_tgt; 13290 dev_info_t *cdip; 13291 13292 ASSERT(MUTEX_HELD(&plun->lun_mutex)); 13293 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 13294 13295 if (plun->lun_cip == NULL) { 13296 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13297 fcp_trace, FCP_BUF_LEVEL_3, 0, 13298 "fcp_offline_child: plun->lun_cip is NULL: " 13299 "plun: %p lun state: %x num: %d target state: %x", 13300 plun, plun->lun_state, plun->lun_num, 13301 plun->lun_tgt->tgt_port->port_state); 13302 return (NDI_FAILURE); 13303 } 13304 13305 /* 13306 * We will use this value twice. Make a copy to be sure we use 13307 * the same value in both places. 13308 */ 13309 lun_mpxio = plun->lun_mpxio; 13310 13311 if (lun_mpxio == 0) { 13312 cdip = DIP(cip); 13313 mutex_exit(&plun->lun_mutex); 13314 mutex_exit(&pptr->port_mutex); 13315 rval = ndi_devi_offline(DIP(cip), flags); 13316 if (rval != NDI_SUCCESS) { 13317 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13318 fcp_trace, FCP_BUF_LEVEL_3, 0, 13319 "fcp_offline_child: ndi_devi_offline failed " 13320 "rval=%x cip=%p", rval, cip); 13321 } 13322 } else { 13323 cdip = mdi_pi_get_client(PIP(cip)); 13324 mutex_exit(&plun->lun_mutex); 13325 mutex_exit(&pptr->port_mutex); 13326 13327 /* 13328 * Exit phci to avoid deadlock with power management code 13329 * during mdi_pi_offline 13330 */ 13331 mdi_hold_path(PIP(cip)); 13332 mdi_devi_exit_phci(pptr->port_dip, *circ); 13333 13334 rval = mdi_pi_offline(PIP(cip), flags); 13335 13336 mdi_devi_enter_phci(pptr->port_dip, circ); 13337 mdi_rele_path(PIP(cip)); 13338 13339 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE; 13340 } 13341 13342 mutex_enter(&ptgt->tgt_mutex); 13343 plun->lun_state &= ~FCP_LUN_INIT; 13344 mutex_exit(&ptgt->tgt_mutex); 13345 13346 if (rval == NDI_SUCCESS) { 13347 cdip = NULL; 13348 if (flags & NDI_DEVI_REMOVE) { 13349 mutex_enter(&plun->lun_mutex); 13350 /* 13351 * If the guid of the LUN changes, lun_cip will not 13352 * equal to cip, and after offlining the LUN with the 13353 * old guid, we should keep lun_cip since it's the cip 13354 * of the LUN with the new guid. 13355 * Otherwise remove our reference to child node. 13356 * 13357 * This must be done before the child node is freed, 13358 * otherwise other threads could see a stale lun_cip 13359 * pointer. 13360 */ 13361 if (plun->lun_cip == cip) { 13362 plun->lun_cip = NULL; 13363 } 13364 if (plun->lun_old_guid) { 13365 kmem_free(plun->lun_old_guid, 13366 plun->lun_old_guid_size); 13367 plun->lun_old_guid = NULL; 13368 plun->lun_old_guid_size = 0; 13369 } 13370 mutex_exit(&plun->lun_mutex); 13371 } 13372 } 13373 13374 if (lun_mpxio != 0) { 13375 if (rval == NDI_SUCCESS) { 13376 /* 13377 * Clear MPxIO path permanent disable as the path is 13378 * already offlined. 13379 */ 13380 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE); 13381 13382 if (flags & NDI_DEVI_REMOVE) { 13383 (void) mdi_pi_free(PIP(cip), 0); 13384 } 13385 } else { 13386 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13387 fcp_trace, FCP_BUF_LEVEL_3, 0, 13388 "fcp_offline_child: mdi_pi_offline failed " 13389 "rval=%x cip=%p", rval, cip); 13390 } 13391 } 13392 13393 mutex_enter(&pptr->port_mutex); 13394 mutex_enter(&plun->lun_mutex); 13395 13396 if (cdip) { 13397 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13398 fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:" 13399 " target=%x lun=%x", "ndi_offline", 13400 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num); 13401 } 13402 13403 return (rval); 13404 } 13405 13406 static void 13407 fcp_remove_child(struct fcp_lun *plun) 13408 { 13409 child_info_t *cip; 13410 int circ; 13411 13412 ASSERT(MUTEX_HELD(&plun->lun_mutex)); 13413 13414 if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) { 13415 if (plun->lun_mpxio == 0) { 13416 (void) ndi_prop_remove_all(DIP(plun->lun_cip)); 13417 (void) ndi_devi_free(DIP(plun->lun_cip)); 13418 plun->lun_cip = NULL; 13419 } else { 13420 /* 13421 * Clear reference to the child node in the lun. 13422 * This must be done before freeing it with mdi_pi_free 13423 * and with lun_mutex held so that other threads always 13424 * see either valid lun_cip or NULL when holding 13425 * lun_mutex. We keep a copy in cip. 13426 */ 13427 cip = plun->lun_cip; 13428 plun->lun_cip = NULL; 13429 13430 mutex_exit(&plun->lun_mutex); 13431 mutex_exit(&plun->lun_tgt->tgt_mutex); 13432 mutex_exit(&plun->lun_tgt->tgt_port->port_mutex); 13433 13434 mdi_devi_enter( 13435 plun->lun_tgt->tgt_port->port_dip, &circ); 13436 13437 /* 13438 * Exit phci to avoid deadlock with power management 13439 * code during mdi_pi_offline 13440 */ 13441 mdi_hold_path(PIP(cip)); 13442 mdi_devi_exit_phci( 13443 plun->lun_tgt->tgt_port->port_dip, circ); 13444 (void) mdi_pi_offline(PIP(cip), 13445 NDI_DEVI_REMOVE); 13446 mdi_devi_enter_phci( 13447 plun->lun_tgt->tgt_port->port_dip, &circ); 13448 mdi_rele_path(PIP(cip)); 13449 13450 mdi_devi_exit( 13451 plun->lun_tgt->tgt_port->port_dip, circ); 13452 13453 FCP_TRACE(fcp_logq, 13454 plun->lun_tgt->tgt_port->port_instbuf, 13455 fcp_trace, FCP_BUF_LEVEL_3, 0, 13456 "lun=%p pip freed %p", plun, cip); 13457 13458 (void) mdi_prop_remove(PIP(cip), NULL); 13459 (void) mdi_pi_free(PIP(cip), 0); 13460 13461 mutex_enter(&plun->lun_tgt->tgt_port->port_mutex); 13462 mutex_enter(&plun->lun_tgt->tgt_mutex); 13463 mutex_enter(&plun->lun_mutex); 13464 } 13465 } else { 13466 plun->lun_cip = NULL; 13467 } 13468 } 13469 13470 /* 13471 * called when a timeout occurs 13472 * 13473 * can be scheduled during an attach or resume (if not already running) 13474 * 13475 * one timeout is set up for all ports 13476 * 13477 * acquires and releases the global mutex 13478 */ 13479 /*ARGSUSED*/ 13480 static void 13481 fcp_watch(void *arg) 13482 { 13483 struct fcp_port *pptr; 13484 struct fcp_ipkt *icmd; 13485 struct fcp_ipkt *nicmd; 13486 struct fcp_pkt *cmd; 13487 struct fcp_pkt *ncmd; 13488 struct fcp_pkt *tail; 13489 struct fcp_pkt *pcmd; 13490 struct fcp_pkt *save_head; 13491 struct fcp_port *save_port; 13492 13493 /* increment global watchdog time */ 13494 fcp_watchdog_time += fcp_watchdog_timeout; 13495 13496 mutex_enter(&fcp_global_mutex); 13497 13498 /* scan each port in our list */ 13499 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) { 13500 save_port = fcp_port_head; 13501 pptr->port_state |= FCP_STATE_IN_WATCHDOG; 13502 mutex_exit(&fcp_global_mutex); 13503 13504 mutex_enter(&pptr->port_mutex); 13505 if (pptr->port_ipkt_list == NULL && 13506 (pptr->port_state & (FCP_STATE_SUSPENDED | 13507 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) { 13508 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG; 13509 mutex_exit(&pptr->port_mutex); 13510 mutex_enter(&fcp_global_mutex); 13511 goto end_of_watchdog; 13512 } 13513 13514 /* 13515 * We check if a list of targets need to be offlined. 13516 */ 13517 if (pptr->port_offline_tgts) { 13518 fcp_scan_offline_tgts(pptr); 13519 } 13520 13521 /* 13522 * We check if a list of luns need to be offlined. 13523 */ 13524 if (pptr->port_offline_luns) { 13525 fcp_scan_offline_luns(pptr); 13526 } 13527 13528 /* 13529 * We check if a list of targets or luns need to be reset. 13530 */ 13531 if (pptr->port_reset_list) { 13532 fcp_check_reset_delay(pptr); 13533 } 13534 13535 mutex_exit(&pptr->port_mutex); 13536 13537 /* 13538 * This is where the pending commands (pkt) are checked for 13539 * timeout. 13540 */ 13541 mutex_enter(&pptr->port_pkt_mutex); 13542 tail = pptr->port_pkt_tail; 13543 13544 for (pcmd = NULL, cmd = pptr->port_pkt_head; 13545 cmd != NULL; cmd = ncmd) { 13546 ncmd = cmd->cmd_next; 13547 /* 13548 * If a command is in this queue the bit CFLAG_IN_QUEUE 13549 * must be set. 13550 */ 13551 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE); 13552 /* 13553 * FCP_INVALID_TIMEOUT will be set for those 13554 * command that need to be failed. Mostly those 13555 * cmds that could not be queued down for the 13556 * "timeout" value. cmd->cmd_timeout is used 13557 * to try and requeue the command regularly. 13558 */ 13559 if (cmd->cmd_timeout >= fcp_watchdog_time) { 13560 /* 13561 * This command hasn't timed out yet. Let's 13562 * go to the next one. 13563 */ 13564 pcmd = cmd; 13565 goto end_of_loop; 13566 } 13567 13568 if (cmd == pptr->port_pkt_head) { 13569 ASSERT(pcmd == NULL); 13570 pptr->port_pkt_head = cmd->cmd_next; 13571 } else { 13572 ASSERT(pcmd != NULL); 13573 pcmd->cmd_next = cmd->cmd_next; 13574 } 13575 13576 if (cmd == pptr->port_pkt_tail) { 13577 ASSERT(cmd->cmd_next == NULL); 13578 pptr->port_pkt_tail = pcmd; 13579 if (pcmd) { 13580 pcmd->cmd_next = NULL; 13581 } 13582 } 13583 cmd->cmd_next = NULL; 13584 13585 /* 13586 * save the current head before dropping the 13587 * mutex - If the head doesn't remain the 13588 * same after re acquiring the mutex, just 13589 * bail out and revisit on next tick. 13590 * 13591 * PS: The tail pointer can change as the commands 13592 * get requeued after failure to retransport 13593 */ 13594 save_head = pptr->port_pkt_head; 13595 mutex_exit(&pptr->port_pkt_mutex); 13596 13597 if (cmd->cmd_fp_pkt->pkt_timeout == 13598 FCP_INVALID_TIMEOUT) { 13599 struct scsi_pkt *pkt = cmd->cmd_pkt; 13600 struct fcp_lun *plun; 13601 struct fcp_tgt *ptgt; 13602 13603 plun = ADDR2LUN(&pkt->pkt_address); 13604 ptgt = plun->lun_tgt; 13605 13606 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13607 fcp_trace, FCP_BUF_LEVEL_2, 0, 13608 "SCSI cmd 0x%x to D_ID=%x timed out", 13609 pkt->pkt_cdbp[0], ptgt->tgt_d_id); 13610 13611 cmd->cmd_state == FCP_PKT_ABORTING ? 13612 fcp_fail_cmd(cmd, CMD_RESET, 13613 STAT_DEV_RESET) : fcp_fail_cmd(cmd, 13614 CMD_TIMEOUT, STAT_ABORTED); 13615 } else { 13616 fcp_retransport_cmd(pptr, cmd); 13617 } 13618 mutex_enter(&pptr->port_pkt_mutex); 13619 if (save_head && save_head != pptr->port_pkt_head) { 13620 /* 13621 * Looks like linked list got changed (mostly 13622 * happens when an an OFFLINE LUN code starts 13623 * returning overflow queue commands in 13624 * parallel. So bail out and revisit during 13625 * next tick 13626 */ 13627 break; 13628 } 13629 end_of_loop: 13630 /* 13631 * Scan only upto the previously known tail pointer 13632 * to avoid excessive processing - lots of new packets 13633 * could have been added to the tail or the old ones 13634 * re-queued. 13635 */ 13636 if (cmd == tail) { 13637 break; 13638 } 13639 } 13640 mutex_exit(&pptr->port_pkt_mutex); 13641 13642 mutex_enter(&pptr->port_mutex); 13643 for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) { 13644 struct fcp_tgt *ptgt = icmd->ipkt_tgt; 13645 13646 nicmd = icmd->ipkt_next; 13647 if ((icmd->ipkt_restart != 0) && 13648 (icmd->ipkt_restart >= fcp_watchdog_time)) { 13649 /* packet has not timed out */ 13650 continue; 13651 } 13652 13653 /* time for packet re-transport */ 13654 if (icmd == pptr->port_ipkt_list) { 13655 pptr->port_ipkt_list = icmd->ipkt_next; 13656 if (pptr->port_ipkt_list) { 13657 pptr->port_ipkt_list->ipkt_prev = 13658 NULL; 13659 } 13660 } else { 13661 icmd->ipkt_prev->ipkt_next = icmd->ipkt_next; 13662 if (icmd->ipkt_next) { 13663 icmd->ipkt_next->ipkt_prev = 13664 icmd->ipkt_prev; 13665 } 13666 } 13667 icmd->ipkt_next = NULL; 13668 icmd->ipkt_prev = NULL; 13669 mutex_exit(&pptr->port_mutex); 13670 13671 if (fcp_is_retryable(icmd)) { 13672 fc_ulp_rscn_info_t *rscnp = 13673 (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt-> 13674 pkt_ulp_rscn_infop; 13675 13676 FCP_TRACE(fcp_logq, pptr->port_instbuf, 13677 fcp_trace, FCP_BUF_LEVEL_2, 0, 13678 "%x to D_ID=%x Retrying..", 13679 icmd->ipkt_opcode, 13680 icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id); 13681 13682 /* 13683 * Update the RSCN count in the packet 13684 * before resending. 13685 */ 13686 13687 if (rscnp != NULL) { 13688 rscnp->ulp_rscn_count = 13689 fc_ulp_get_rscn_count(pptr-> 13690 port_fp_handle); 13691 } 13692 13693 mutex_enter(&pptr->port_mutex); 13694 mutex_enter(&ptgt->tgt_mutex); 13695 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) { 13696 mutex_exit(&ptgt->tgt_mutex); 13697 mutex_exit(&pptr->port_mutex); 13698 switch (icmd->ipkt_opcode) { 13699 int rval; 13700 case LA_ELS_PLOGI: 13701 if ((rval = fc_ulp_login( 13702 pptr->port_fp_handle, 13703 &icmd->ipkt_fpkt, 1)) == 13704 FC_SUCCESS) { 13705 mutex_enter( 13706 &pptr->port_mutex); 13707 continue; 13708 } 13709 if (fcp_handle_ipkt_errors( 13710 pptr, ptgt, icmd, rval, 13711 "PLOGI") == DDI_SUCCESS) { 13712 mutex_enter( 13713 &pptr->port_mutex); 13714 continue; 13715 } 13716 break; 13717 13718 case LA_ELS_PRLI: 13719 if ((rval = fc_ulp_issue_els( 13720 pptr->port_fp_handle, 13721 icmd->ipkt_fpkt)) == 13722 FC_SUCCESS) { 13723 mutex_enter( 13724 &pptr->port_mutex); 13725 continue; 13726 } 13727 if (fcp_handle_ipkt_errors( 13728 pptr, ptgt, icmd, rval, 13729 "PRLI") == DDI_SUCCESS) { 13730 mutex_enter( 13731 &pptr->port_mutex); 13732 continue; 13733 } 13734 break; 13735 13736 default: 13737 if ((rval = fcp_transport( 13738 pptr->port_fp_handle, 13739 icmd->ipkt_fpkt, 1)) == 13740 FC_SUCCESS) { 13741 mutex_enter( 13742 &pptr->port_mutex); 13743 continue; 13744 } 13745 if (fcp_handle_ipkt_errors( 13746 pptr, ptgt, icmd, rval, 13747 "PRLI") == DDI_SUCCESS) { 13748 mutex_enter( 13749 &pptr->port_mutex); 13750 continue; 13751 } 13752 break; 13753 } 13754 } else { 13755 mutex_exit(&ptgt->tgt_mutex); 13756 mutex_exit(&pptr->port_mutex); 13757 } 13758 } else { 13759 fcp_print_error(icmd->ipkt_fpkt); 13760 } 13761 13762 (void) fcp_call_finish_init(pptr, ptgt, 13763 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt, 13764 icmd->ipkt_cause); 13765 fcp_icmd_free(pptr, icmd); 13766 mutex_enter(&pptr->port_mutex); 13767 } 13768 13769 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG; 13770 mutex_exit(&pptr->port_mutex); 13771 mutex_enter(&fcp_global_mutex); 13772 13773 end_of_watchdog: 13774 /* 13775 * Bail out early before getting into trouble 13776 */ 13777 if (save_port != fcp_port_head) { 13778 break; 13779 } 13780 } 13781 13782 if (fcp_watchdog_init > 0) { 13783 /* reschedule timeout to go again */ 13784 fcp_watchdog_id = 13785 timeout(fcp_watch, NULL, fcp_watchdog_tick); 13786 } 13787 mutex_exit(&fcp_global_mutex); 13788 } 13789 13790 13791 static void 13792 fcp_check_reset_delay(struct fcp_port *pptr) 13793 { 13794 uint32_t tgt_cnt; 13795 int level; 13796 struct fcp_tgt *ptgt; 13797 struct fcp_lun *plun; 13798 struct fcp_reset_elem *cur = NULL; 13799 struct fcp_reset_elem *next = NULL; 13800 struct fcp_reset_elem *prev = NULL; 13801 13802 ASSERT(mutex_owned(&pptr->port_mutex)); 13803 13804 next = pptr->port_reset_list; 13805 while ((cur = next) != NULL) { 13806 next = cur->next; 13807 13808 if (cur->timeout < fcp_watchdog_time) { 13809 prev = cur; 13810 continue; 13811 } 13812 13813 ptgt = cur->tgt; 13814 plun = cur->lun; 13815 tgt_cnt = cur->tgt_cnt; 13816 13817 if (ptgt) { 13818 level = RESET_TARGET; 13819 } else { 13820 ASSERT(plun != NULL); 13821 level = RESET_LUN; 13822 ptgt = plun->lun_tgt; 13823 } 13824 if (prev) { 13825 prev->next = next; 13826 } else { 13827 /* 13828 * Because we drop port mutex while doing aborts for 13829 * packets, we can't rely on reset_list pointing to 13830 * our head 13831 */ 13832 if (cur == pptr->port_reset_list) { 13833 pptr->port_reset_list = next; 13834 } else { 13835 struct fcp_reset_elem *which; 13836 13837 which = pptr->port_reset_list; 13838 while (which && which->next != cur) { 13839 which = which->next; 13840 } 13841 ASSERT(which != NULL); 13842 13843 which->next = next; 13844 prev = which; 13845 } 13846 } 13847 13848 kmem_free(cur, sizeof (*cur)); 13849 13850 if (tgt_cnt == ptgt->tgt_change_cnt) { 13851 mutex_enter(&ptgt->tgt_mutex); 13852 if (level == RESET_TARGET) { 13853 fcp_update_tgt_state(ptgt, 13854 FCP_RESET, FCP_LUN_BUSY); 13855 } else { 13856 fcp_update_lun_state(plun, 13857 FCP_RESET, FCP_LUN_BUSY); 13858 } 13859 mutex_exit(&ptgt->tgt_mutex); 13860 13861 mutex_exit(&pptr->port_mutex); 13862 fcp_abort_all(pptr, ptgt, plun, tgt_cnt); 13863 mutex_enter(&pptr->port_mutex); 13864 } 13865 } 13866 } 13867 13868 13869 static void 13870 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt, 13871 struct fcp_lun *rlun, int tgt_cnt) 13872 { 13873 int rval; 13874 struct fcp_lun *tlun, *nlun; 13875 struct fcp_pkt *pcmd = NULL, *ncmd = NULL, 13876 *cmd = NULL, *head = NULL, 13877 *tail = NULL; 13878 13879 mutex_enter(&pptr->port_pkt_mutex); 13880 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) { 13881 struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address); 13882 struct fcp_tgt *ptgt = plun->lun_tgt; 13883 13884 ncmd = cmd->cmd_next; 13885 13886 if (ptgt != ttgt && plun != rlun) { 13887 pcmd = cmd; 13888 continue; 13889 } 13890 13891 if (pcmd != NULL) { 13892 ASSERT(pptr->port_pkt_head != cmd); 13893 pcmd->cmd_next = ncmd; 13894 } else { 13895 ASSERT(cmd == pptr->port_pkt_head); 13896 pptr->port_pkt_head = ncmd; 13897 } 13898 if (pptr->port_pkt_tail == cmd) { 13899 ASSERT(cmd->cmd_next == NULL); 13900 pptr->port_pkt_tail = pcmd; 13901 if (pcmd != NULL) { 13902 pcmd->cmd_next = NULL; 13903 } 13904 } 13905 13906 if (head == NULL) { 13907 head = tail = cmd; 13908 } else { 13909 ASSERT(tail != NULL); 13910 tail->cmd_next = cmd; 13911 tail = cmd; 13912 } 13913 cmd->cmd_next = NULL; 13914 } 13915 mutex_exit(&pptr->port_pkt_mutex); 13916 13917 for (cmd = head; cmd != NULL; cmd = ncmd) { 13918 struct scsi_pkt *pkt = cmd->cmd_pkt; 13919 13920 ncmd = cmd->cmd_next; 13921 ASSERT(pkt != NULL); 13922 13923 mutex_enter(&pptr->port_mutex); 13924 if (ttgt->tgt_change_cnt == tgt_cnt) { 13925 mutex_exit(&pptr->port_mutex); 13926 cmd->cmd_flags &= ~CFLAG_IN_QUEUE; 13927 pkt->pkt_reason = CMD_RESET; 13928 pkt->pkt_statistics |= STAT_DEV_RESET; 13929 cmd->cmd_state = FCP_PKT_IDLE; 13930 fcp_post_callback(cmd); 13931 } else { 13932 mutex_exit(&pptr->port_mutex); 13933 } 13934 } 13935 13936 /* 13937 * If the FCA will return all the commands in its queue then our 13938 * work is easy, just return. 13939 */ 13940 13941 if (pptr->port_reset_action == FC_RESET_RETURN_ALL) { 13942 return; 13943 } 13944 13945 /* 13946 * For RESET_LUN get hold of target pointer 13947 */ 13948 if (ttgt == NULL) { 13949 ASSERT(rlun != NULL); 13950 13951 ttgt = rlun->lun_tgt; 13952 13953 ASSERT(ttgt != NULL); 13954 } 13955 13956 /* 13957 * There are some severe race conditions here. 13958 * While we are trying to abort the pkt, it might be completing 13959 * so mark it aborted and if the abort does not succeed then 13960 * handle it in the watch thread. 13961 */ 13962 mutex_enter(&ttgt->tgt_mutex); 13963 nlun = ttgt->tgt_lun; 13964 mutex_exit(&ttgt->tgt_mutex); 13965 while ((tlun = nlun) != NULL) { 13966 int restart = 0; 13967 if (rlun && rlun != tlun) { 13968 mutex_enter(&ttgt->tgt_mutex); 13969 nlun = tlun->lun_next; 13970 mutex_exit(&ttgt->tgt_mutex); 13971 continue; 13972 } 13973 mutex_enter(&tlun->lun_mutex); 13974 cmd = tlun->lun_pkt_head; 13975 while (cmd != NULL) { 13976 if (cmd->cmd_state == FCP_PKT_ISSUED) { 13977 struct scsi_pkt *pkt; 13978 13979 restart = 1; 13980 cmd->cmd_state = FCP_PKT_ABORTING; 13981 mutex_exit(&tlun->lun_mutex); 13982 rval = fc_ulp_abort(pptr->port_fp_handle, 13983 cmd->cmd_fp_pkt, KM_SLEEP); 13984 if (rval == FC_SUCCESS) { 13985 pkt = cmd->cmd_pkt; 13986 pkt->pkt_reason = CMD_RESET; 13987 pkt->pkt_statistics |= STAT_DEV_RESET; 13988 cmd->cmd_state = FCP_PKT_IDLE; 13989 fcp_post_callback(cmd); 13990 } else { 13991 caddr_t msg; 13992 13993 (void) fc_ulp_error(rval, &msg); 13994 13995 /* 13996 * This part is tricky. The abort 13997 * failed and now the command could 13998 * be completing. The cmd_state == 13999 * FCP_PKT_ABORTING should save 14000 * us in fcp_cmd_callback. If we 14001 * are already aborting ignore the 14002 * command in fcp_cmd_callback. 14003 * Here we leave this packet for 20 14004 * sec to be aborted in the 14005 * fcp_watch thread. 14006 */ 14007 fcp_log(CE_WARN, pptr->port_dip, 14008 "!Abort failed after reset %s", 14009 msg); 14010 14011 cmd->cmd_timeout = 14012 fcp_watchdog_time + 14013 cmd->cmd_pkt->pkt_time + 14014 FCP_FAILED_DELAY; 14015 14016 cmd->cmd_fp_pkt->pkt_timeout = 14017 FCP_INVALID_TIMEOUT; 14018 /* 14019 * This is a hack, cmd is put in the 14020 * overflow queue so that it can be 14021 * timed out finally 14022 */ 14023 cmd->cmd_flags |= CFLAG_IN_QUEUE; 14024 14025 mutex_enter(&pptr->port_pkt_mutex); 14026 if (pptr->port_pkt_head) { 14027 ASSERT(pptr->port_pkt_tail 14028 != NULL); 14029 pptr->port_pkt_tail->cmd_next 14030 = cmd; 14031 pptr->port_pkt_tail = cmd; 14032 } else { 14033 ASSERT(pptr->port_pkt_tail 14034 == NULL); 14035 pptr->port_pkt_head = 14036 pptr->port_pkt_tail 14037 = cmd; 14038 } 14039 cmd->cmd_next = NULL; 14040 mutex_exit(&pptr->port_pkt_mutex); 14041 } 14042 mutex_enter(&tlun->lun_mutex); 14043 cmd = tlun->lun_pkt_head; 14044 } else { 14045 cmd = cmd->cmd_forw; 14046 } 14047 } 14048 mutex_exit(&tlun->lun_mutex); 14049 14050 mutex_enter(&ttgt->tgt_mutex); 14051 restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next); 14052 mutex_exit(&ttgt->tgt_mutex); 14053 14054 mutex_enter(&pptr->port_mutex); 14055 if (tgt_cnt != ttgt->tgt_change_cnt) { 14056 mutex_exit(&pptr->port_mutex); 14057 return; 14058 } else { 14059 mutex_exit(&pptr->port_mutex); 14060 } 14061 } 14062 } 14063 14064 14065 /* 14066 * unlink the soft state, returning the soft state found (if any) 14067 * 14068 * acquires and releases the global mutex 14069 */ 14070 struct fcp_port * 14071 fcp_soft_state_unlink(struct fcp_port *pptr) 14072 { 14073 struct fcp_port *hptr; /* ptr index */ 14074 struct fcp_port *tptr; /* prev hptr */ 14075 14076 mutex_enter(&fcp_global_mutex); 14077 for (hptr = fcp_port_head, tptr = NULL; 14078 hptr != NULL; 14079 tptr = hptr, hptr = hptr->port_next) { 14080 if (hptr == pptr) { 14081 /* we found a match -- remove this item */ 14082 if (tptr == NULL) { 14083 /* we're at the head of the list */ 14084 fcp_port_head = hptr->port_next; 14085 } else { 14086 tptr->port_next = hptr->port_next; 14087 } 14088 break; /* success */ 14089 } 14090 } 14091 if (fcp_port_head == NULL) { 14092 fcp_cleanup_blacklist(&fcp_lun_blacklist); 14093 } 14094 mutex_exit(&fcp_global_mutex); 14095 return (hptr); 14096 } 14097 14098 14099 /* 14100 * called by fcp_scsi_hba_tgt_init to find a LUN given a 14101 * WWN and a LUN number 14102 */ 14103 /* ARGSUSED */ 14104 static struct fcp_lun * 14105 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun) 14106 { 14107 int hash; 14108 struct fcp_tgt *ptgt; 14109 struct fcp_lun *plun; 14110 14111 ASSERT(mutex_owned(&pptr->port_mutex)); 14112 14113 hash = FCP_HASH(wwn); 14114 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL; 14115 ptgt = ptgt->tgt_next) { 14116 if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0], 14117 sizeof (ptgt->tgt_port_wwn)) == 0) { 14118 mutex_enter(&ptgt->tgt_mutex); 14119 for (plun = ptgt->tgt_lun; 14120 plun != NULL; 14121 plun = plun->lun_next) { 14122 if (plun->lun_num == lun) { 14123 mutex_exit(&ptgt->tgt_mutex); 14124 return (plun); 14125 } 14126 } 14127 mutex_exit(&ptgt->tgt_mutex); 14128 return (NULL); 14129 } 14130 } 14131 return (NULL); 14132 } 14133 14134 /* 14135 * Function: fcp_prepare_pkt 14136 * 14137 * Description: This function prepares the SCSI cmd pkt, passed by the caller, 14138 * for fcp_start(). It binds the data or partially maps it. 14139 * Builds the FCP header and starts the initialization of the 14140 * Fibre Channel header. 14141 * 14142 * Argument: *pptr FCP port. 14143 * *cmd FCP packet. 14144 * *plun LUN the command will be sent to. 14145 * 14146 * Context: User, Kernel and Interrupt context. 14147 */ 14148 static void 14149 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd, 14150 struct fcp_lun *plun) 14151 { 14152 fc_packet_t *fpkt = cmd->cmd_fp_pkt; 14153 struct fcp_tgt *ptgt = plun->lun_tgt; 14154 struct fcp_cmd *fcmd = &cmd->cmd_fcp_cmd; 14155 14156 ASSERT(cmd->cmd_pkt->pkt_comp || 14157 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)); 14158 14159 if (cmd->cmd_pkt->pkt_numcookies) { 14160 if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) { 14161 fcmd->fcp_cntl.cntl_read_data = 1; 14162 fcmd->fcp_cntl.cntl_write_data = 0; 14163 fpkt->pkt_tran_type = FC_PKT_FCP_READ; 14164 } else { 14165 fcmd->fcp_cntl.cntl_read_data = 0; 14166 fcmd->fcp_cntl.cntl_write_data = 1; 14167 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE; 14168 } 14169 14170 fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies; 14171 14172 fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies; 14173 ASSERT(fpkt->pkt_data_cookie_cnt <= 14174 pptr->port_data_dma_attr.dma_attr_sgllen); 14175 14176 cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len; 14177 14178 /* FCA needs pkt_datalen to be set */ 14179 fpkt->pkt_datalen = cmd->cmd_dmacount; 14180 fcmd->fcp_data_len = cmd->cmd_dmacount; 14181 } else { 14182 fcmd->fcp_cntl.cntl_read_data = 0; 14183 fcmd->fcp_cntl.cntl_write_data = 0; 14184 fpkt->pkt_tran_type = FC_PKT_EXCHANGE; 14185 fpkt->pkt_datalen = 0; 14186 fcmd->fcp_data_len = 0; 14187 } 14188 14189 /* set up the Tagged Queuing type */ 14190 if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) { 14191 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q; 14192 } else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) { 14193 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED; 14194 } else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) { 14195 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE; 14196 } else { 14197 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED; 14198 } 14199 14200 fcmd->fcp_ent_addr = plun->lun_addr; 14201 14202 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) { 14203 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd, 14204 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd)); 14205 } else { 14206 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL); 14207 } 14208 14209 cmd->cmd_pkt->pkt_reason = CMD_CMPLT; 14210 cmd->cmd_pkt->pkt_state = 0; 14211 cmd->cmd_pkt->pkt_statistics = 0; 14212 cmd->cmd_pkt->pkt_resid = 0; 14213 14214 cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle; 14215 14216 if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) { 14217 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR); 14218 fpkt->pkt_comp = NULL; 14219 } else { 14220 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR); 14221 if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) { 14222 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB; 14223 } 14224 fpkt->pkt_comp = fcp_cmd_callback; 14225 } 14226 14227 mutex_enter(&pptr->port_mutex); 14228 if (pptr->port_state & FCP_STATE_SUSPENDED) { 14229 fpkt->pkt_tran_flags |= FC_TRAN_DUMPING; 14230 } 14231 mutex_exit(&pptr->port_mutex); 14232 14233 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id; 14234 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id; 14235 14236 /* 14237 * Save a few kernel cycles here 14238 */ 14239 #ifndef __lock_lint 14240 fpkt->pkt_fca_device = ptgt->tgt_fca_dev; 14241 #endif /* __lock_lint */ 14242 } 14243 14244 static void 14245 fcp_post_callback(struct fcp_pkt *cmd) 14246 { 14247 scsi_hba_pkt_comp(cmd->cmd_pkt); 14248 } 14249 14250 14251 /* 14252 * called to do polled I/O by fcp_start() 14253 * 14254 * return a transport status value, i.e. TRAN_ACCECPT for success 14255 */ 14256 static int 14257 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd) 14258 { 14259 int rval; 14260 14261 #ifdef DEBUG 14262 mutex_enter(&pptr->port_pkt_mutex); 14263 pptr->port_npkts++; 14264 mutex_exit(&pptr->port_pkt_mutex); 14265 #endif /* DEBUG */ 14266 14267 if (cmd->cmd_fp_pkt->pkt_timeout) { 14268 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time; 14269 } else { 14270 cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT; 14271 } 14272 14273 ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL); 14274 14275 cmd->cmd_state = FCP_PKT_ISSUED; 14276 14277 rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt); 14278 14279 #ifdef DEBUG 14280 mutex_enter(&pptr->port_pkt_mutex); 14281 pptr->port_npkts--; 14282 mutex_exit(&pptr->port_pkt_mutex); 14283 #endif /* DEBUG */ 14284 14285 cmd->cmd_state = FCP_PKT_IDLE; 14286 14287 switch (rval) { 14288 case FC_SUCCESS: 14289 if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) { 14290 fcp_complete_pkt(cmd->cmd_fp_pkt); 14291 rval = TRAN_ACCEPT; 14292 } else { 14293 rval = TRAN_FATAL_ERROR; 14294 } 14295 break; 14296 14297 case FC_TRAN_BUSY: 14298 rval = TRAN_BUSY; 14299 cmd->cmd_pkt->pkt_resid = 0; 14300 break; 14301 14302 case FC_BADPACKET: 14303 rval = TRAN_BADPKT; 14304 break; 14305 14306 default: 14307 rval = TRAN_FATAL_ERROR; 14308 break; 14309 } 14310 14311 return (rval); 14312 } 14313 14314 14315 /* 14316 * called by some of the following transport-called routines to convert 14317 * a supplied dip ptr to a port struct ptr (i.e. to the soft state) 14318 */ 14319 static struct fcp_port * 14320 fcp_dip2port(dev_info_t *dip) 14321 { 14322 int instance; 14323 14324 instance = ddi_get_instance(dip); 14325 return (ddi_get_soft_state(fcp_softstate, instance)); 14326 } 14327 14328 14329 /* 14330 * called internally to return a LUN given a dip 14331 */ 14332 struct fcp_lun * 14333 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip) 14334 { 14335 struct fcp_tgt *ptgt; 14336 struct fcp_lun *plun; 14337 int i; 14338 14339 14340 ASSERT(mutex_owned(&pptr->port_mutex)); 14341 14342 for (i = 0; i < FCP_NUM_HASH; i++) { 14343 for (ptgt = pptr->port_tgt_hash_table[i]; 14344 ptgt != NULL; 14345 ptgt = ptgt->tgt_next) { 14346 mutex_enter(&ptgt->tgt_mutex); 14347 for (plun = ptgt->tgt_lun; plun != NULL; 14348 plun = plun->lun_next) { 14349 mutex_enter(&plun->lun_mutex); 14350 if (plun->lun_cip == cip) { 14351 mutex_exit(&plun->lun_mutex); 14352 mutex_exit(&ptgt->tgt_mutex); 14353 return (plun); /* match found */ 14354 } 14355 mutex_exit(&plun->lun_mutex); 14356 } 14357 mutex_exit(&ptgt->tgt_mutex); 14358 } 14359 } 14360 return (NULL); /* no LUN found */ 14361 } 14362 14363 /* 14364 * pass an element to the hotplug list, kick the hotplug thread 14365 * and wait for the element to get processed by the hotplug thread. 14366 * on return the element is freed. 14367 * 14368 * return zero success and non-zero on failure 14369 * 14370 * acquires/releases the target mutex 14371 * 14372 */ 14373 static int 14374 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun, 14375 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags) 14376 { 14377 struct fcp_hp_elem *elem; 14378 int rval; 14379 14380 mutex_enter(&plun->lun_tgt->tgt_mutex); 14381 if ((elem = fcp_pass_to_hp(pptr, plun, cip, 14382 what, link_cnt, tgt_cnt, flags, 1)) == NULL) { 14383 mutex_exit(&plun->lun_tgt->tgt_mutex); 14384 fcp_log(CE_CONT, pptr->port_dip, 14385 "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n", 14386 what, plun->lun_tgt->tgt_d_id, plun->lun_num); 14387 return (NDI_FAILURE); 14388 } 14389 mutex_exit(&plun->lun_tgt->tgt_mutex); 14390 mutex_enter(&elem->mutex); 14391 if (elem->wait) { 14392 while (elem->wait) { 14393 cv_wait(&elem->cv, &elem->mutex); 14394 } 14395 } 14396 rval = (elem->result); 14397 mutex_exit(&elem->mutex); 14398 mutex_destroy(&elem->mutex); 14399 cv_destroy(&elem->cv); 14400 kmem_free(elem, sizeof (struct fcp_hp_elem)); 14401 return (rval); 14402 } 14403 14404 /* 14405 * pass an element to the hotplug list, and then 14406 * kick the hotplug thread 14407 * 14408 * return Boolean success, i.e. non-zero if all goes well, else zero on error 14409 * 14410 * acquires/releases the hotplug mutex 14411 * 14412 * called with the target mutex owned 14413 * 14414 * memory acquired in NOSLEEP mode 14415 * NOTE: if wait is set to 1 then the caller is responsible for waiting on 14416 * for the hp daemon to process the request and is responsible for 14417 * freeing the element 14418 */ 14419 static struct fcp_hp_elem * 14420 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun, 14421 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait) 14422 { 14423 struct fcp_hp_elem *elem; 14424 dev_info_t *pdip; 14425 14426 ASSERT(pptr != NULL); 14427 ASSERT(plun != NULL); 14428 ASSERT(plun->lun_tgt != NULL); 14429 ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex)); 14430 14431 /* create space for a hotplug element */ 14432 if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP)) 14433 == NULL) { 14434 fcp_log(CE_WARN, NULL, 14435 "!can't allocate memory for hotplug element"); 14436 return (NULL); 14437 } 14438 14439 /* fill in hotplug element */ 14440 elem->port = pptr; 14441 elem->lun = plun; 14442 elem->cip = cip; 14443 elem->old_lun_mpxio = plun->lun_mpxio; 14444 elem->what = what; 14445 elem->flags = flags; 14446 elem->link_cnt = link_cnt; 14447 elem->tgt_cnt = tgt_cnt; 14448 elem->wait = wait; 14449 mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL); 14450 cv_init(&elem->cv, NULL, CV_DRIVER, NULL); 14451 14452 /* schedule the hotplug task */ 14453 pdip = pptr->port_dip; 14454 mutex_enter(&plun->lun_mutex); 14455 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) { 14456 plun->lun_event_count++; 14457 elem->event_cnt = plun->lun_event_count; 14458 } 14459 mutex_exit(&plun->lun_mutex); 14460 if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task, 14461 (void *)elem, KM_NOSLEEP) == NULL) { 14462 mutex_enter(&plun->lun_mutex); 14463 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) { 14464 plun->lun_event_count--; 14465 } 14466 mutex_exit(&plun->lun_mutex); 14467 kmem_free(elem, sizeof (*elem)); 14468 return (0); 14469 } 14470 14471 return (elem); 14472 } 14473 14474 14475 static void 14476 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd) 14477 { 14478 int rval; 14479 struct scsi_address *ap; 14480 struct fcp_lun *plun; 14481 struct fcp_tgt *ptgt; 14482 fc_packet_t *fpkt; 14483 14484 ap = &cmd->cmd_pkt->pkt_address; 14485 plun = ADDR2LUN(ap); 14486 ptgt = plun->lun_tgt; 14487 14488 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE); 14489 14490 cmd->cmd_state = FCP_PKT_IDLE; 14491 14492 mutex_enter(&pptr->port_mutex); 14493 mutex_enter(&ptgt->tgt_mutex); 14494 if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) && 14495 (!(pptr->port_state & FCP_STATE_ONLINING))) { 14496 fc_ulp_rscn_info_t *rscnp; 14497 14498 cmd->cmd_state = FCP_PKT_ISSUED; 14499 14500 /* 14501 * It is possible for pkt_pd to be NULL if tgt_pd_handle was 14502 * originally NULL, hence we try to set it to the pd pointed 14503 * to by the SCSI device we're trying to get to. 14504 */ 14505 14506 fpkt = cmd->cmd_fp_pkt; 14507 if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) { 14508 fpkt->pkt_pd = ptgt->tgt_pd_handle; 14509 /* 14510 * We need to notify the transport that we now have a 14511 * reference to the remote port handle. 14512 */ 14513 fc_ulp_hold_remote_port(ptgt->tgt_pd_handle); 14514 } 14515 14516 mutex_exit(&ptgt->tgt_mutex); 14517 mutex_exit(&pptr->port_mutex); 14518 14519 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0); 14520 14521 /* prepare the packet */ 14522 14523 fcp_prepare_pkt(pptr, cmd, plun); 14524 14525 rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt-> 14526 pkt_ulp_rscn_infop; 14527 14528 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ? 14529 fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0; 14530 14531 if (rscnp != NULL) { 14532 rscnp->ulp_rscn_count = 14533 fc_ulp_get_rscn_count(pptr-> 14534 port_fp_handle); 14535 } 14536 14537 rval = fcp_transport(pptr->port_fp_handle, 14538 cmd->cmd_fp_pkt, 0); 14539 14540 if (rval == FC_SUCCESS) { 14541 return; 14542 } 14543 cmd->cmd_state &= ~FCP_PKT_ISSUED; 14544 } else { 14545 mutex_exit(&ptgt->tgt_mutex); 14546 mutex_exit(&pptr->port_mutex); 14547 } 14548 14549 fcp_queue_pkt(pptr, cmd); 14550 } 14551 14552 14553 static void 14554 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics) 14555 { 14556 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE); 14557 14558 cmd->cmd_flags &= ~CFLAG_IN_QUEUE; 14559 cmd->cmd_state = FCP_PKT_IDLE; 14560 14561 cmd->cmd_pkt->pkt_reason = reason; 14562 cmd->cmd_pkt->pkt_state = 0; 14563 cmd->cmd_pkt->pkt_statistics = statistics; 14564 14565 fcp_post_callback(cmd); 14566 } 14567 14568 /* 14569 * Function: fcp_queue_pkt 14570 * 14571 * Description: This function queues the packet passed by the caller into 14572 * the list of packets of the FCP port. 14573 * 14574 * Argument: *pptr FCP port. 14575 * *cmd FCP packet to queue. 14576 * 14577 * Return Value: None 14578 * 14579 * Context: User, Kernel and Interrupt context. 14580 */ 14581 static void 14582 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd) 14583 { 14584 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL); 14585 14586 mutex_enter(&pptr->port_pkt_mutex); 14587 cmd->cmd_flags |= CFLAG_IN_QUEUE; 14588 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED); 14589 cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY; 14590 14591 /* 14592 * zero pkt_time means hang around for ever 14593 */ 14594 if (cmd->cmd_pkt->pkt_time) { 14595 if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) { 14596 cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY; 14597 } else { 14598 /* 14599 * Indicate the watch thread to fail the 14600 * command by setting it to highest value 14601 */ 14602 cmd->cmd_timeout = fcp_watchdog_time; 14603 cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT; 14604 } 14605 } 14606 14607 if (pptr->port_pkt_head) { 14608 ASSERT(pptr->port_pkt_tail != NULL); 14609 14610 pptr->port_pkt_tail->cmd_next = cmd; 14611 pptr->port_pkt_tail = cmd; 14612 } else { 14613 ASSERT(pptr->port_pkt_tail == NULL); 14614 14615 pptr->port_pkt_head = pptr->port_pkt_tail = cmd; 14616 } 14617 cmd->cmd_next = NULL; 14618 mutex_exit(&pptr->port_pkt_mutex); 14619 } 14620 14621 /* 14622 * Function: fcp_update_targets 14623 * 14624 * Description: This function applies the specified change of state to all 14625 * the targets listed. The operation applied is 'set'. 14626 * 14627 * Argument: *pptr FCP port. 14628 * *dev_list Array of fc_portmap_t structures. 14629 * count Length of dev_list. 14630 * state State bits to update. 14631 * cause Reason for the update. 14632 * 14633 * Return Value: None 14634 * 14635 * Context: User, Kernel and Interrupt context. 14636 * The mutex pptr->port_mutex must be held. 14637 */ 14638 static void 14639 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list, 14640 uint32_t count, uint32_t state, int cause) 14641 { 14642 fc_portmap_t *map_entry; 14643 struct fcp_tgt *ptgt; 14644 14645 ASSERT(MUTEX_HELD(&pptr->port_mutex)); 14646 14647 while (count--) { 14648 map_entry = &(dev_list[count]); 14649 ptgt = fcp_lookup_target(pptr, 14650 (uchar_t *)&(map_entry->map_pwwn)); 14651 if (ptgt == NULL) { 14652 continue; 14653 } 14654 14655 mutex_enter(&ptgt->tgt_mutex); 14656 ptgt->tgt_trace = 0; 14657 ptgt->tgt_change_cnt++; 14658 ptgt->tgt_statec_cause = cause; 14659 ptgt->tgt_tmp_cnt = 1; 14660 fcp_update_tgt_state(ptgt, FCP_SET, state); 14661 mutex_exit(&ptgt->tgt_mutex); 14662 } 14663 } 14664 14665 static int 14666 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt, 14667 int lcount, int tcount, int cause) 14668 { 14669 int rval; 14670 14671 mutex_enter(&pptr->port_mutex); 14672 rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause); 14673 mutex_exit(&pptr->port_mutex); 14674 14675 return (rval); 14676 } 14677 14678 14679 static int 14680 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt, 14681 int lcount, int tcount, int cause) 14682 { 14683 int finish_init = 0; 14684 int finish_tgt = 0; 14685 int do_finish_init = 0; 14686 int rval = FCP_NO_CHANGE; 14687 14688 if (cause == FCP_CAUSE_LINK_CHANGE || 14689 cause == FCP_CAUSE_LINK_DOWN) { 14690 do_finish_init = 1; 14691 } 14692 14693 if (ptgt != NULL) { 14694 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 14695 FCP_BUF_LEVEL_2, 0, 14696 "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;" 14697 " cause = %d, d_id = 0x%x, tgt_done = %d", 14698 pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount, 14699 pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause, 14700 ptgt->tgt_d_id, ptgt->tgt_done); 14701 14702 mutex_enter(&ptgt->tgt_mutex); 14703 14704 if (tcount && (ptgt->tgt_change_cnt != tcount)) { 14705 rval = FCP_DEV_CHANGE; 14706 if (do_finish_init && ptgt->tgt_done == 0) { 14707 ptgt->tgt_done++; 14708 finish_init = 1; 14709 } 14710 } else { 14711 if (--ptgt->tgt_tmp_cnt <= 0) { 14712 ptgt->tgt_tmp_cnt = 0; 14713 finish_tgt = 1; 14714 14715 if (do_finish_init) { 14716 finish_init = 1; 14717 } 14718 } 14719 } 14720 mutex_exit(&ptgt->tgt_mutex); 14721 } else { 14722 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 14723 FCP_BUF_LEVEL_2, 0, 14724 "Call Finish Init for NO target"); 14725 14726 if (do_finish_init) { 14727 finish_init = 1; 14728 } 14729 } 14730 14731 if (finish_tgt) { 14732 ASSERT(ptgt != NULL); 14733 14734 mutex_enter(&ptgt->tgt_mutex); 14735 #ifdef DEBUG 14736 bzero(ptgt->tgt_tmp_cnt_stack, 14737 sizeof (ptgt->tgt_tmp_cnt_stack)); 14738 14739 ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack, 14740 FCP_STACK_DEPTH); 14741 #endif /* DEBUG */ 14742 mutex_exit(&ptgt->tgt_mutex); 14743 14744 (void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause); 14745 } 14746 14747 if (finish_init && lcount == pptr->port_link_cnt) { 14748 ASSERT(pptr->port_tmp_cnt > 0); 14749 if (--pptr->port_tmp_cnt == 0) { 14750 fcp_finish_init(pptr); 14751 } 14752 } else if (lcount != pptr->port_link_cnt) { 14753 FCP_TRACE(fcp_logq, pptr->port_instbuf, 14754 fcp_trace, FCP_BUF_LEVEL_2, 0, 14755 "fcp_call_finish_init_held,1: state change occured" 14756 " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0); 14757 } 14758 14759 return (rval); 14760 } 14761 14762 static void 14763 fcp_reconfigure_luns(void * tgt_handle) 14764 { 14765 uint32_t dev_cnt; 14766 fc_portmap_t *devlist; 14767 struct fcp_tgt *ptgt = (struct fcp_tgt *)tgt_handle; 14768 struct fcp_port *pptr = ptgt->tgt_port; 14769 14770 /* 14771 * If the timer that fires this off got canceled too late, the 14772 * target could have been destroyed. 14773 */ 14774 14775 if (ptgt->tgt_tid == NULL) { 14776 return; 14777 } 14778 14779 devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP); 14780 if (devlist == NULL) { 14781 fcp_log(CE_WARN, pptr->port_dip, 14782 "!fcp%d: failed to allocate for portmap", 14783 pptr->port_instance); 14784 return; 14785 } 14786 14787 dev_cnt = 1; 14788 devlist->map_pd = ptgt->tgt_pd_handle; 14789 devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr; 14790 devlist->map_did.port_id = ptgt->tgt_d_id; 14791 14792 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE); 14793 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE); 14794 14795 devlist->map_state = PORT_DEVICE_LOGGED_IN; 14796 devlist->map_type = PORT_DEVICE_REPORTLUN_CHANGED; 14797 devlist->map_flags = 0; 14798 14799 fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE, 14800 pptr->port_topology, devlist, dev_cnt, pptr->port_id); 14801 14802 /* 14803 * Clear the tgt_tid after no more references to 14804 * the fcp_tgt 14805 */ 14806 mutex_enter(&ptgt->tgt_mutex); 14807 ptgt->tgt_tid = NULL; 14808 mutex_exit(&ptgt->tgt_mutex); 14809 14810 kmem_free(devlist, sizeof (*devlist)); 14811 } 14812 14813 14814 static void 14815 fcp_free_targets(struct fcp_port *pptr) 14816 { 14817 int i; 14818 struct fcp_tgt *ptgt; 14819 14820 mutex_enter(&pptr->port_mutex); 14821 for (i = 0; i < FCP_NUM_HASH; i++) { 14822 ptgt = pptr->port_tgt_hash_table[i]; 14823 while (ptgt != NULL) { 14824 struct fcp_tgt *next_tgt = ptgt->tgt_next; 14825 14826 fcp_free_target(ptgt); 14827 ptgt = next_tgt; 14828 } 14829 } 14830 mutex_exit(&pptr->port_mutex); 14831 } 14832 14833 14834 static void 14835 fcp_free_target(struct fcp_tgt *ptgt) 14836 { 14837 struct fcp_lun *plun; 14838 timeout_id_t tid; 14839 14840 mutex_enter(&ptgt->tgt_mutex); 14841 tid = ptgt->tgt_tid; 14842 14843 /* 14844 * Cancel any pending timeouts for this target. 14845 */ 14846 14847 if (tid != NULL) { 14848 /* 14849 * Set tgt_tid to NULL first to avoid a race in the callback. 14850 * If tgt_tid is NULL, the callback will simply return. 14851 */ 14852 ptgt->tgt_tid = NULL; 14853 mutex_exit(&ptgt->tgt_mutex); 14854 (void) untimeout(tid); 14855 mutex_enter(&ptgt->tgt_mutex); 14856 } 14857 14858 plun = ptgt->tgt_lun; 14859 while (plun != NULL) { 14860 struct fcp_lun *next_lun = plun->lun_next; 14861 14862 fcp_dealloc_lun(plun); 14863 plun = next_lun; 14864 } 14865 14866 mutex_exit(&ptgt->tgt_mutex); 14867 fcp_dealloc_tgt(ptgt); 14868 } 14869 14870 /* 14871 * Function: fcp_is_retryable 14872 * 14873 * Description: Indicates if the internal packet is retryable. 14874 * 14875 * Argument: *icmd FCP internal packet. 14876 * 14877 * Return Value: 0 Not retryable 14878 * 1 Retryable 14879 * 14880 * Context: User, Kernel and Interrupt context 14881 */ 14882 static int 14883 fcp_is_retryable(struct fcp_ipkt *icmd) 14884 { 14885 if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED | 14886 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) { 14887 return (0); 14888 } 14889 14890 return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) < 14891 icmd->ipkt_port->port_deadline) ? 1 : 0); 14892 } 14893 14894 /* 14895 * Function: fcp_create_on_demand 14896 * 14897 * Argument: *pptr FCP port. 14898 * *pwwn Port WWN. 14899 * 14900 * Return Value: 0 Success 14901 * EIO 14902 * ENOMEM 14903 * EBUSY 14904 * EINVAL 14905 * 14906 * Context: User and Kernel context 14907 */ 14908 static int 14909 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn) 14910 { 14911 int wait_ms; 14912 int tcount; 14913 int lcount; 14914 int ret; 14915 int error; 14916 int rval = EIO; 14917 int ntries; 14918 fc_portmap_t *devlist; 14919 opaque_t pd; 14920 struct fcp_lun *plun; 14921 struct fcp_tgt *ptgt; 14922 int old_manual = 0; 14923 14924 /* Allocates the fc_portmap_t structure. */ 14925 devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP); 14926 14927 /* 14928 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown 14929 * in the commented statement below: 14930 * 14931 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT; 14932 * 14933 * Below, the deadline for the discovery process is set. 14934 */ 14935 mutex_enter(&pptr->port_mutex); 14936 pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE; 14937 mutex_exit(&pptr->port_mutex); 14938 14939 /* 14940 * We try to find the remote port based on the WWN provided by the 14941 * caller. We actually ask fp/fctl if it has it. 14942 */ 14943 pd = fc_ulp_get_remote_port(pptr->port_fp_handle, 14944 (la_wwn_t *)pwwn, &error, 1); 14945 14946 if (pd == NULL) { 14947 kmem_free(devlist, sizeof (*devlist)); 14948 return (rval); 14949 } 14950 14951 /* 14952 * The remote port was found. We ask fp/fctl to update our 14953 * fc_portmap_t structure. 14954 */ 14955 ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, 14956 (la_wwn_t *)pwwn, devlist); 14957 if (ret != FC_SUCCESS) { 14958 kmem_free(devlist, sizeof (*devlist)); 14959 return (rval); 14960 } 14961 14962 /* 14963 * The map flag field is set to indicates that the creation is being 14964 * done at the user request (Ioclt probably luxadm or cfgadm). 14965 */ 14966 devlist->map_type = PORT_DEVICE_USER_CREATE; 14967 14968 mutex_enter(&pptr->port_mutex); 14969 14970 /* 14971 * We check to see if fcp already has a target that describes the 14972 * device being created. If not it is created. 14973 */ 14974 ptgt = fcp_lookup_target(pptr, pwwn); 14975 if (ptgt == NULL) { 14976 lcount = pptr->port_link_cnt; 14977 mutex_exit(&pptr->port_mutex); 14978 14979 ptgt = fcp_alloc_tgt(pptr, devlist, lcount); 14980 if (ptgt == NULL) { 14981 fcp_log(CE_WARN, pptr->port_dip, 14982 "!FC target allocation failed"); 14983 return (ENOMEM); 14984 } 14985 14986 mutex_enter(&pptr->port_mutex); 14987 } 14988 14989 mutex_enter(&ptgt->tgt_mutex); 14990 ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE; 14991 ptgt->tgt_tmp_cnt = 1; 14992 ptgt->tgt_device_created = 0; 14993 /* 14994 * If fabric and auto config is set but the target was 14995 * manually unconfigured then reset to the manual_config_only to 14996 * 0 so the device will get configured. 14997 */ 14998 if (FC_TOP_EXTERNAL(pptr->port_topology) && 14999 fcp_enable_auto_configuration && 15000 ptgt->tgt_manual_config_only == 1) { 15001 old_manual = 1; 15002 ptgt->tgt_manual_config_only = 0; 15003 } 15004 mutex_exit(&ptgt->tgt_mutex); 15005 15006 fcp_update_targets(pptr, devlist, 1, 15007 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE); 15008 15009 lcount = pptr->port_link_cnt; 15010 tcount = ptgt->tgt_change_cnt; 15011 15012 if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount, 15013 tcount, FCP_CAUSE_USER_CREATE) == TRUE) { 15014 if (FC_TOP_EXTERNAL(pptr->port_topology) && 15015 fcp_enable_auto_configuration && old_manual) { 15016 mutex_enter(&ptgt->tgt_mutex); 15017 ptgt->tgt_manual_config_only = 1; 15018 mutex_exit(&ptgt->tgt_mutex); 15019 } 15020 15021 if (pptr->port_link_cnt != lcount || 15022 ptgt->tgt_change_cnt != tcount) { 15023 rval = EBUSY; 15024 } 15025 mutex_exit(&pptr->port_mutex); 15026 15027 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 15028 FCP_BUF_LEVEL_3, 0, 15029 "fcp_create_on_demand: mapflags ptgt=%x, " 15030 "lcount=%x::port_link_cnt=%x, " 15031 "tcount=%x: tgt_change_cnt=%x, rval=%x", 15032 ptgt, lcount, pptr->port_link_cnt, 15033 tcount, ptgt->tgt_change_cnt, rval); 15034 return (rval); 15035 } 15036 15037 /* 15038 * Due to lack of synchronization mechanisms, we perform 15039 * periodic monitoring of our request; Because requests 15040 * get dropped when another one supercedes (either because 15041 * of a link change or a target change), it is difficult to 15042 * provide a clean synchronization mechanism (such as a 15043 * semaphore or a conditional variable) without exhaustively 15044 * rewriting the mainline discovery code of this driver. 15045 */ 15046 wait_ms = 500; 15047 15048 ntries = fcp_max_target_retries; 15049 15050 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 15051 FCP_BUF_LEVEL_3, 0, 15052 "fcp_create_on_demand(1): ntries=%x, ptgt=%x, " 15053 "lcount=%x::port_link_cnt=%x, " 15054 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x " 15055 "tgt_tmp_cnt =%x", 15056 ntries, ptgt, lcount, pptr->port_link_cnt, 15057 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created, 15058 ptgt->tgt_tmp_cnt); 15059 15060 mutex_enter(&ptgt->tgt_mutex); 15061 while (ntries-- != 0 && pptr->port_link_cnt == lcount && 15062 ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) { 15063 mutex_exit(&ptgt->tgt_mutex); 15064 mutex_exit(&pptr->port_mutex); 15065 15066 delay(drv_usectohz(wait_ms * 1000)); 15067 15068 mutex_enter(&pptr->port_mutex); 15069 mutex_enter(&ptgt->tgt_mutex); 15070 } 15071 15072 15073 if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) { 15074 rval = EBUSY; 15075 } else { 15076 if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state == 15077 FCP_TGT_NODE_PRESENT) { 15078 rval = 0; 15079 } 15080 } 15081 15082 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 15083 FCP_BUF_LEVEL_3, 0, 15084 "fcp_create_on_demand(2): ntries=%x, ptgt=%x, " 15085 "lcount=%x::port_link_cnt=%x, " 15086 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x " 15087 "tgt_tmp_cnt =%x", 15088 ntries, ptgt, lcount, pptr->port_link_cnt, 15089 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created, 15090 ptgt->tgt_tmp_cnt); 15091 15092 if (rval) { 15093 if (FC_TOP_EXTERNAL(pptr->port_topology) && 15094 fcp_enable_auto_configuration && old_manual) { 15095 ptgt->tgt_manual_config_only = 1; 15096 } 15097 mutex_exit(&ptgt->tgt_mutex); 15098 mutex_exit(&pptr->port_mutex); 15099 kmem_free(devlist, sizeof (*devlist)); 15100 15101 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 15102 FCP_BUF_LEVEL_3, 0, 15103 "fcp_create_on_demand(3): ntries=%x, ptgt=%x, " 15104 "lcount=%x::port_link_cnt=%x, " 15105 "tcount=%x::tgt_change_cnt=%x, rval=%x, " 15106 "tgt_device_created=%x, tgt D_ID=%x", 15107 ntries, ptgt, lcount, pptr->port_link_cnt, 15108 tcount, ptgt->tgt_change_cnt, rval, 15109 ptgt->tgt_device_created, ptgt->tgt_d_id); 15110 return (rval); 15111 } 15112 15113 if ((plun = ptgt->tgt_lun) != NULL) { 15114 tcount = plun->lun_tgt->tgt_change_cnt; 15115 } else { 15116 rval = EINVAL; 15117 } 15118 lcount = pptr->port_link_cnt; 15119 15120 /* 15121 * Configuring the target with no LUNs will fail. We 15122 * should reset the node state so that it is not 15123 * automatically configured when the LUNs are added 15124 * to this target. 15125 */ 15126 if (ptgt->tgt_lun_cnt == 0) { 15127 ptgt->tgt_node_state = FCP_TGT_NODE_NONE; 15128 } 15129 mutex_exit(&ptgt->tgt_mutex); 15130 mutex_exit(&pptr->port_mutex); 15131 15132 while (plun) { 15133 child_info_t *cip; 15134 15135 mutex_enter(&plun->lun_mutex); 15136 cip = plun->lun_cip; 15137 mutex_exit(&plun->lun_mutex); 15138 15139 mutex_enter(&ptgt->tgt_mutex); 15140 if (!(plun->lun_state & FCP_LUN_OFFLINE)) { 15141 mutex_exit(&ptgt->tgt_mutex); 15142 15143 rval = fcp_pass_to_hp_and_wait(pptr, plun, cip, 15144 FCP_ONLINE, lcount, tcount, 15145 NDI_ONLINE_ATTACH); 15146 if (rval != NDI_SUCCESS) { 15147 FCP_TRACE(fcp_logq, 15148 pptr->port_instbuf, fcp_trace, 15149 FCP_BUF_LEVEL_3, 0, 15150 "fcp_create_on_demand: " 15151 "pass_to_hp_and_wait failed " 15152 "rval=%x", rval); 15153 rval = EIO; 15154 } else { 15155 mutex_enter(&LUN_TGT->tgt_mutex); 15156 plun->lun_state &= ~(FCP_LUN_OFFLINE | 15157 FCP_LUN_BUSY); 15158 mutex_exit(&LUN_TGT->tgt_mutex); 15159 } 15160 mutex_enter(&ptgt->tgt_mutex); 15161 } 15162 15163 plun = plun->lun_next; 15164 mutex_exit(&ptgt->tgt_mutex); 15165 } 15166 15167 kmem_free(devlist, sizeof (*devlist)); 15168 15169 if (FC_TOP_EXTERNAL(pptr->port_topology) && 15170 fcp_enable_auto_configuration && old_manual) { 15171 mutex_enter(&ptgt->tgt_mutex); 15172 /* if successful then set manual to 0 */ 15173 if (rval == 0) { 15174 ptgt->tgt_manual_config_only = 0; 15175 } else { 15176 /* reset to 1 so the user has to do the config */ 15177 ptgt->tgt_manual_config_only = 1; 15178 } 15179 mutex_exit(&ptgt->tgt_mutex); 15180 } 15181 15182 return (rval); 15183 } 15184 15185 15186 static void 15187 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len) 15188 { 15189 int count; 15190 uchar_t byte; 15191 15192 count = 0; 15193 while (*string) { 15194 byte = FCP_ATOB(*string); string++; 15195 byte = byte << 4 | FCP_ATOB(*string); string++; 15196 bytes[count++] = byte; 15197 15198 if (count >= byte_len) { 15199 break; 15200 } 15201 } 15202 } 15203 15204 static void 15205 fcp_wwn_to_ascii(uchar_t wwn[], char *string) 15206 { 15207 int i; 15208 15209 for (i = 0; i < FC_WWN_SIZE; i++) { 15210 (void) sprintf(string + (i * 2), 15211 "%02x", wwn[i]); 15212 } 15213 15214 } 15215 15216 static void 15217 fcp_print_error(fc_packet_t *fpkt) 15218 { 15219 struct fcp_ipkt *icmd = (struct fcp_ipkt *) 15220 fpkt->pkt_ulp_private; 15221 struct fcp_port *pptr; 15222 struct fcp_tgt *ptgt; 15223 struct fcp_lun *plun; 15224 caddr_t buf; 15225 int scsi_cmd = 0; 15226 15227 ptgt = icmd->ipkt_tgt; 15228 plun = icmd->ipkt_lun; 15229 pptr = ptgt->tgt_port; 15230 15231 buf = kmem_zalloc(256, KM_NOSLEEP); 15232 if (buf == NULL) { 15233 return; 15234 } 15235 15236 switch (icmd->ipkt_opcode) { 15237 case SCMD_REPORT_LUN: 15238 (void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x" 15239 " lun=0x%%x failed"); 15240 scsi_cmd++; 15241 break; 15242 15243 case SCMD_INQUIRY_PAGE83: 15244 (void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x" 15245 " lun=0x%%x failed"); 15246 scsi_cmd++; 15247 break; 15248 15249 case SCMD_INQUIRY: 15250 (void) sprintf(buf, "!INQUIRY to D_ID=0x%%x" 15251 " lun=0x%%x failed"); 15252 scsi_cmd++; 15253 break; 15254 15255 case LA_ELS_PLOGI: 15256 (void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed"); 15257 break; 15258 15259 case LA_ELS_PRLI: 15260 (void) sprintf(buf, "!PRLI to D_ID=0x%%x failed"); 15261 break; 15262 } 15263 15264 if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) { 15265 struct fcp_rsp response, *rsp; 15266 uchar_t asc, ascq; 15267 caddr_t sense_key = NULL; 15268 struct fcp_rsp_info fcp_rsp_err, *bep; 15269 15270 if (icmd->ipkt_nodma) { 15271 rsp = (struct fcp_rsp *)fpkt->pkt_resp; 15272 bep = (struct fcp_rsp_info *)((caddr_t)rsp + 15273 sizeof (struct fcp_rsp)); 15274 } else { 15275 rsp = &response; 15276 bep = &fcp_rsp_err; 15277 15278 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc, 15279 sizeof (struct fcp_rsp)); 15280 15281 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), 15282 bep, fpkt->pkt_resp_acc, 15283 sizeof (struct fcp_rsp_info)); 15284 } 15285 15286 15287 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) { 15288 (void) sprintf(buf + strlen(buf), 15289 " : Bad FCP response values rsvd1=%%x, rsvd2=%%x," 15290 " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x," 15291 " senselen=%%x. Giving up"); 15292 15293 fcp_log(CE_WARN, pptr->port_dip, buf, 15294 ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0, 15295 rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0, 15296 rsp->fcp_u.fcp_status.reserved_1, 15297 rsp->fcp_response_len, rsp->fcp_sense_len); 15298 15299 kmem_free(buf, 256); 15300 return; 15301 } 15302 15303 if (rsp->fcp_u.fcp_status.rsp_len_set && 15304 bep->rsp_code != FCP_NO_FAILURE) { 15305 (void) sprintf(buf + strlen(buf), 15306 " FCP Response code = 0x%x", bep->rsp_code); 15307 } 15308 15309 if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) { 15310 struct scsi_extended_sense sense_info, *sense_ptr; 15311 15312 if (icmd->ipkt_nodma) { 15313 sense_ptr = (struct scsi_extended_sense *) 15314 ((caddr_t)fpkt->pkt_resp + 15315 sizeof (struct fcp_rsp) + 15316 rsp->fcp_response_len); 15317 } else { 15318 sense_ptr = &sense_info; 15319 15320 FCP_CP_IN(fpkt->pkt_resp + 15321 sizeof (struct fcp_rsp) + 15322 rsp->fcp_response_len, &sense_info, 15323 fpkt->pkt_resp_acc, 15324 sizeof (struct scsi_extended_sense)); 15325 } 15326 15327 if (sense_ptr->es_key < NUM_SENSE_KEYS + 15328 NUM_IMPL_SENSE_KEYS) { 15329 sense_key = sense_keys[sense_ptr->es_key]; 15330 } else { 15331 sense_key = "Undefined"; 15332 } 15333 15334 asc = sense_ptr->es_add_code; 15335 ascq = sense_ptr->es_qual_code; 15336 15337 (void) sprintf(buf + strlen(buf), 15338 ": sense key=%%s, ASC=%%x," " ASCQ=%%x." 15339 " Giving up"); 15340 15341 fcp_log(CE_WARN, pptr->port_dip, buf, 15342 ptgt->tgt_d_id, plun->lun_num, sense_key, 15343 asc, ascq); 15344 } else { 15345 (void) sprintf(buf + strlen(buf), 15346 " : SCSI status=%%x. Giving up"); 15347 15348 fcp_log(CE_WARN, pptr->port_dip, buf, 15349 ptgt->tgt_d_id, plun->lun_num, 15350 rsp->fcp_u.fcp_status.scsi_status); 15351 } 15352 } else { 15353 caddr_t state, reason, action, expln; 15354 15355 (void) fc_ulp_pkt_error(fpkt, &state, &reason, 15356 &action, &expln); 15357 15358 (void) sprintf(buf + strlen(buf), ": State:%%s," 15359 " Reason:%%s. Giving up"); 15360 15361 if (scsi_cmd) { 15362 fcp_log(CE_WARN, pptr->port_dip, buf, 15363 ptgt->tgt_d_id, plun->lun_num, state, reason); 15364 } else { 15365 fcp_log(CE_WARN, pptr->port_dip, buf, 15366 ptgt->tgt_d_id, state, reason); 15367 } 15368 } 15369 15370 kmem_free(buf, 256); 15371 } 15372 15373 15374 static int 15375 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt, 15376 struct fcp_ipkt *icmd, int rval, caddr_t op) 15377 { 15378 int ret = DDI_FAILURE; 15379 char *error; 15380 15381 switch (rval) { 15382 case FC_DEVICE_BUSY_NEW_RSCN: 15383 /* 15384 * This means that there was a new RSCN that the transport 15385 * knows about (which the ULP *may* know about too) but the 15386 * pkt that was sent down was related to an older RSCN. So, we 15387 * are just going to reset the retry count and deadline and 15388 * continue to retry. The idea is that transport is currently 15389 * working on the new RSCN and will soon let the ULPs know 15390 * about it and when it does the existing logic will kick in 15391 * where it will change the tcount to indicate that something 15392 * changed on the target. So, rediscovery will start and there 15393 * will not be an infinite retry. 15394 * 15395 * For a full flow of how the RSCN info is transferred back and 15396 * forth, see fp.c 15397 */ 15398 icmd->ipkt_retries = 0; 15399 icmd->ipkt_port->port_deadline = fcp_watchdog_time + 15400 FCP_ICMD_DEADLINE; 15401 15402 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 15403 FCP_BUF_LEVEL_3, 0, 15404 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x", 15405 rval, ptgt->tgt_d_id); 15406 /* FALLTHROUGH */ 15407 15408 case FC_STATEC_BUSY: 15409 case FC_DEVICE_BUSY: 15410 case FC_PBUSY: 15411 case FC_FBUSY: 15412 case FC_TRAN_BUSY: 15413 case FC_OFFLINE: 15414 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 15415 FCP_BUF_LEVEL_3, 0, 15416 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x", 15417 rval, ptgt->tgt_d_id); 15418 if (icmd->ipkt_retries < FCP_MAX_RETRIES && 15419 fcp_is_retryable(icmd)) { 15420 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt); 15421 ret = DDI_SUCCESS; 15422 } 15423 break; 15424 15425 case FC_LOGINREQ: 15426 /* 15427 * FC_LOGINREQ used to be handled just like all the cases 15428 * above. It has been changed to handled a PRLI that fails 15429 * with FC_LOGINREQ different than other ipkts that fail 15430 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is 15431 * a simple matter to turn it into a PLOGI instead, so that's 15432 * exactly what we do here. 15433 */ 15434 if (icmd->ipkt_opcode == LA_ELS_PRLI) { 15435 ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt, 15436 icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt, 15437 icmd->ipkt_change_cnt, icmd->ipkt_cause); 15438 } else { 15439 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace, 15440 FCP_BUF_LEVEL_3, 0, 15441 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x", 15442 rval, ptgt->tgt_d_id); 15443 if (icmd->ipkt_retries < FCP_MAX_RETRIES && 15444 fcp_is_retryable(icmd)) { 15445 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt); 15446 ret = DDI_SUCCESS; 15447 } 15448 } 15449 break; 15450 15451 default: 15452 mutex_enter(&pptr->port_mutex); 15453 mutex_enter(&ptgt->tgt_mutex); 15454 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) { 15455 mutex_exit(&ptgt->tgt_mutex); 15456 mutex_exit(&pptr->port_mutex); 15457 15458 (void) fc_ulp_error(rval, &error); 15459 fcp_log(CE_WARN, pptr->port_dip, 15460 "!Failed to send %s to D_ID=%x error=%s", 15461 op, ptgt->tgt_d_id, error); 15462 } else { 15463 FCP_TRACE(fcp_logq, pptr->port_instbuf, 15464 fcp_trace, FCP_BUF_LEVEL_2, 0, 15465 "fcp_handle_ipkt_errors,1: state change occured" 15466 " for D_ID=0x%x", ptgt->tgt_d_id); 15467 mutex_exit(&ptgt->tgt_mutex); 15468 mutex_exit(&pptr->port_mutex); 15469 } 15470 break; 15471 } 15472 15473 return (ret); 15474 } 15475 15476 15477 /* 15478 * Check of outstanding commands on any LUN for this target 15479 */ 15480 static int 15481 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt) 15482 { 15483 struct fcp_lun *plun; 15484 struct fcp_pkt *cmd; 15485 15486 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) { 15487 mutex_enter(&plun->lun_mutex); 15488 for (cmd = plun->lun_pkt_head; cmd != NULL; 15489 cmd = cmd->cmd_forw) { 15490 if (cmd->cmd_state == FCP_PKT_ISSUED) { 15491 mutex_exit(&plun->lun_mutex); 15492 return (FC_SUCCESS); 15493 } 15494 } 15495 mutex_exit(&plun->lun_mutex); 15496 } 15497 15498 return (FC_FAILURE); 15499 } 15500 15501 static fc_portmap_t * 15502 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt) 15503 { 15504 int i; 15505 fc_portmap_t *devlist; 15506 fc_portmap_t *devptr = NULL; 15507 struct fcp_tgt *ptgt; 15508 15509 mutex_enter(&pptr->port_mutex); 15510 for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) { 15511 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL; 15512 ptgt = ptgt->tgt_next) { 15513 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) { 15514 ++*dev_cnt; 15515 } 15516 } 15517 } 15518 15519 devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt, 15520 KM_NOSLEEP); 15521 if (devlist == NULL) { 15522 mutex_exit(&pptr->port_mutex); 15523 fcp_log(CE_WARN, pptr->port_dip, 15524 "!fcp%d: failed to allocate for portmap for construct map", 15525 pptr->port_instance); 15526 return (devptr); 15527 } 15528 15529 for (i = 0; i < FCP_NUM_HASH; i++) { 15530 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL; 15531 ptgt = ptgt->tgt_next) { 15532 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) { 15533 int ret; 15534 15535 ret = fc_ulp_pwwn_to_portmap( 15536 pptr->port_fp_handle, 15537 (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0], 15538 devlist); 15539 15540 if (ret == FC_SUCCESS) { 15541 devlist++; 15542 continue; 15543 } 15544 15545 devlist->map_pd = NULL; 15546 devlist->map_did.port_id = ptgt->tgt_d_id; 15547 devlist->map_hard_addr.hard_addr = 15548 ptgt->tgt_hard_addr; 15549 15550 devlist->map_state = PORT_DEVICE_INVALID; 15551 devlist->map_type = PORT_DEVICE_OLD; 15552 15553 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], 15554 &devlist->map_nwwn, FC_WWN_SIZE); 15555 15556 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], 15557 &devlist->map_pwwn, FC_WWN_SIZE); 15558 15559 devlist++; 15560 } 15561 } 15562 } 15563 15564 mutex_exit(&pptr->port_mutex); 15565 15566 return (devptr); 15567 } 15568 /* 15569 * Inimate MPxIO that the lun is busy and cannot accept regular IO 15570 */ 15571 static void 15572 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr) 15573 { 15574 int i; 15575 struct fcp_tgt *ptgt; 15576 struct fcp_lun *plun; 15577 15578 for (i = 0; i < FCP_NUM_HASH; i++) { 15579 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL; 15580 ptgt = ptgt->tgt_next) { 15581 mutex_enter(&ptgt->tgt_mutex); 15582 for (plun = ptgt->tgt_lun; plun != NULL; 15583 plun = plun->lun_next) { 15584 if (plun->lun_mpxio && 15585 plun->lun_state & FCP_LUN_BUSY) { 15586 if (!fcp_pass_to_hp(pptr, plun, 15587 plun->lun_cip, 15588 FCP_MPXIO_PATH_SET_BUSY, 15589 pptr->port_link_cnt, 15590 ptgt->tgt_change_cnt, 0, 0)) { 15591 FCP_TRACE(fcp_logq, 15592 pptr->port_instbuf, 15593 fcp_trace, 15594 FCP_BUF_LEVEL_2, 0, 15595 "path_verifybusy: " 15596 "disable lun %p failed!", 15597 plun); 15598 } 15599 } 15600 } 15601 mutex_exit(&ptgt->tgt_mutex); 15602 } 15603 } 15604 } 15605 15606 static int 15607 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what) 15608 { 15609 dev_info_t *cdip = NULL; 15610 dev_info_t *pdip = NULL; 15611 15612 ASSERT(plun); 15613 15614 mutex_enter(&plun->lun_mutex); 15615 if (fcp_is_child_present(plun, cip) == FC_FAILURE) { 15616 mutex_exit(&plun->lun_mutex); 15617 return (NDI_FAILURE); 15618 } 15619 mutex_exit(&plun->lun_mutex); 15620 cdip = mdi_pi_get_client(PIP(cip)); 15621 pdip = mdi_pi_get_phci(PIP(cip)); 15622 15623 ASSERT(cdip != NULL); 15624 ASSERT(pdip != NULL); 15625 15626 if (what == FCP_MPXIO_PATH_CLEAR_BUSY) { 15627 /* LUN ready for IO */ 15628 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT); 15629 } else { 15630 /* LUN busy to accept IO */ 15631 (void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT); 15632 } 15633 return (NDI_SUCCESS); 15634 } 15635 15636 /* 15637 * Caller must free the returned string of MAXPATHLEN len 15638 * If the device is offline (-1 instance number) NULL 15639 * will be returned. 15640 */ 15641 static char * 15642 fcp_get_lun_path(struct fcp_lun *plun) 15643 { 15644 dev_info_t *dip = NULL; 15645 char *path = NULL; 15646 mdi_pathinfo_t *pip = NULL; 15647 15648 if (plun == NULL) { 15649 return (NULL); 15650 } 15651 15652 mutex_enter(&plun->lun_mutex); 15653 if (plun->lun_mpxio == 0) { 15654 dip = DIP(plun->lun_cip); 15655 mutex_exit(&plun->lun_mutex); 15656 } else { 15657 /* 15658 * lun_cip must be accessed with lun_mutex held. Here 15659 * plun->lun_cip either points to a valid node or it is NULL. 15660 * Make a copy so that we can release lun_mutex. 15661 */ 15662 pip = PIP(plun->lun_cip); 15663 15664 /* 15665 * Increase ref count on the path so that we can release 15666 * lun_mutex and still be sure that the pathinfo node (and thus 15667 * also the client) is not deallocated. If pip is NULL, this 15668 * has no effect. 15669 */ 15670 mdi_hold_path(pip); 15671 15672 mutex_exit(&plun->lun_mutex); 15673 15674 /* Get the client. If pip is NULL, we get NULL. */ 15675 dip = mdi_pi_get_client(pip); 15676 } 15677 15678 if (dip == NULL) 15679 goto out; 15680 if (ddi_get_instance(dip) < 0) 15681 goto out; 15682 15683 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 15684 if (path == NULL) 15685 goto out; 15686 15687 (void) ddi_pathname(dip, path); 15688 15689 /* Clean up. */ 15690 out: 15691 if (pip != NULL) 15692 mdi_rele_path(pip); 15693 15694 /* 15695 * In reality, the user wants a fully valid path (one they can open) 15696 * but this string is lacking the mount point, and the minor node. 15697 * It would be nice if we could "figure these out" somehow 15698 * and fill them in. Otherwise, the userland code has to understand 15699 * driver specific details of which minor node is the "best" or 15700 * "right" one to expose. (Ex: which slice is the whole disk, or 15701 * which tape doesn't rewind) 15702 */ 15703 return (path); 15704 } 15705 15706 static int 15707 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag, 15708 ddi_bus_config_op_t op, void *arg, dev_info_t **childp) 15709 { 15710 int64_t reset_delay; 15711 int rval, retry = 0; 15712 struct fcp_port *pptr = fcp_dip2port(parent); 15713 15714 reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) - 15715 (ddi_get_lbolt64() - pptr->port_attach_time); 15716 if (reset_delay < 0) { 15717 reset_delay = 0; 15718 } 15719 15720 if (fcp_bus_config_debug) { 15721 flag |= NDI_DEVI_DEBUG; 15722 } 15723 15724 switch (op) { 15725 case BUS_CONFIG_ONE: 15726 /* 15727 * Retry the command since we need to ensure 15728 * the fabric devices are available for root 15729 */ 15730 while (retry++ < fcp_max_bus_config_retries) { 15731 rval = (ndi_busop_bus_config(parent, 15732 flag | NDI_MDI_FALLBACK, op, 15733 arg, childp, (clock_t)reset_delay)); 15734 if (rval == 0) { 15735 return (rval); 15736 } 15737 } 15738 15739 /* 15740 * drain taskq to make sure nodes are created and then 15741 * try again. 15742 */ 15743 taskq_wait(DEVI(parent)->devi_taskq); 15744 return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK, 15745 op, arg, childp, 0)); 15746 15747 case BUS_CONFIG_DRIVER: 15748 case BUS_CONFIG_ALL: { 15749 /* 15750 * delay till all devices report in (port_tmp_cnt == 0) 15751 * or FCP_INIT_WAIT_TIMEOUT 15752 */ 15753 mutex_enter(&pptr->port_mutex); 15754 while ((reset_delay > 0) && pptr->port_tmp_cnt) { 15755 (void) cv_timedwait(&pptr->port_config_cv, 15756 &pptr->port_mutex, 15757 ddi_get_lbolt() + (clock_t)reset_delay); 15758 reset_delay = 15759 (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) - 15760 (ddi_get_lbolt64() - pptr->port_attach_time); 15761 } 15762 mutex_exit(&pptr->port_mutex); 15763 /* drain taskq to make sure nodes are created */ 15764 taskq_wait(DEVI(parent)->devi_taskq); 15765 return (ndi_busop_bus_config(parent, flag, op, 15766 arg, childp, 0)); 15767 } 15768 15769 default: 15770 return (NDI_FAILURE); 15771 } 15772 /*NOTREACHED*/ 15773 } 15774 15775 static int 15776 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag, 15777 ddi_bus_config_op_t op, void *arg) 15778 { 15779 if (fcp_bus_config_debug) { 15780 flag |= NDI_DEVI_DEBUG; 15781 } 15782 15783 return (ndi_busop_bus_unconfig(parent, flag, op, arg)); 15784 } 15785 15786 15787 /* 15788 * Routine to copy GUID into the lun structure. 15789 * returns 0 if copy was successful and 1 if encountered a 15790 * failure and did not copy the guid. 15791 */ 15792 static int 15793 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp) 15794 { 15795 15796 int retval = 0; 15797 15798 /* add one for the null terminator */ 15799 const unsigned int len = strlen(guidp) + 1; 15800 15801 if ((guidp == NULL) || (plun == NULL)) { 15802 return (1); 15803 } 15804 15805 /* 15806 * if the plun->lun_guid already has been allocated, 15807 * then check the size. if the size is exact, reuse 15808 * it....if not free it an allocate the required size. 15809 * The reallocation should NOT typically happen 15810 * unless the GUIDs reported changes between passes. 15811 * We free up and alloc again even if the 15812 * size was more than required. This is due to the 15813 * fact that the field lun_guid_size - serves 15814 * dual role of indicating the size of the wwn 15815 * size and ALSO the allocation size. 15816 */ 15817 if (plun->lun_guid) { 15818 if (plun->lun_guid_size != len) { 15819 /* 15820 * free the allocated memory and 15821 * initialize the field 15822 * lun_guid_size to 0. 15823 */ 15824 kmem_free(plun->lun_guid, plun->lun_guid_size); 15825 plun->lun_guid = NULL; 15826 plun->lun_guid_size = 0; 15827 } 15828 } 15829 /* 15830 * alloc only if not already done. 15831 */ 15832 if (plun->lun_guid == NULL) { 15833 plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP); 15834 if (plun->lun_guid == NULL) { 15835 cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:" 15836 "Unable to allocate" 15837 "Memory for GUID!!! size %d", len); 15838 retval = 1; 15839 } else { 15840 plun->lun_guid_size = len; 15841 } 15842 } 15843 if (plun->lun_guid) { 15844 /* 15845 * now copy the GUID 15846 */ 15847 bcopy(guidp, plun->lun_guid, plun->lun_guid_size); 15848 } 15849 return (retval); 15850 } 15851 15852 /* 15853 * fcp_reconfig_wait 15854 * 15855 * Wait for a rediscovery/reconfiguration to complete before continuing. 15856 */ 15857 15858 static void 15859 fcp_reconfig_wait(struct fcp_port *pptr) 15860 { 15861 clock_t reconfig_start, wait_timeout; 15862 15863 /* 15864 * Quick check. If pptr->port_tmp_cnt is 0, there is no 15865 * reconfiguration in progress. 15866 */ 15867 15868 mutex_enter(&pptr->port_mutex); 15869 if (pptr->port_tmp_cnt == 0) { 15870 mutex_exit(&pptr->port_mutex); 15871 return; 15872 } 15873 mutex_exit(&pptr->port_mutex); 15874 15875 /* 15876 * If we cause a reconfig by raising power, delay until all devices 15877 * report in (port_tmp_cnt returns to 0) 15878 */ 15879 15880 reconfig_start = ddi_get_lbolt(); 15881 wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT); 15882 15883 mutex_enter(&pptr->port_mutex); 15884 15885 while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) && 15886 pptr->port_tmp_cnt) { 15887 15888 (void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex, 15889 reconfig_start + wait_timeout); 15890 } 15891 15892 mutex_exit(&pptr->port_mutex); 15893 15894 /* 15895 * Even if fcp_tmp_count isn't 0, continue without error. The port 15896 * we want may still be ok. If not, it will error out later 15897 */ 15898 } 15899 15900 /* 15901 * Read masking info from fp.conf and construct the global fcp_lun_blacklist. 15902 * We rely on the fcp_global_mutex to provide protection against changes to 15903 * the fcp_lun_blacklist. 15904 * 15905 * You can describe a list of target port WWNs and LUN numbers which will 15906 * not be configured. LUN numbers will be interpreted as decimal. White 15907 * spaces and ',' can be used in the list of LUN numbers. 15908 * 15909 * To prevent LUNs 1 and 2 from being configured for target 15910 * port 510000f010fd92a1 and target port 510000e012079df1, set: 15911 * 15912 * pwwn-lun-blacklist= 15913 * "510000f010fd92a1,1,2", 15914 * "510000e012079df1,1,2"; 15915 */ 15916 static void 15917 fcp_read_blacklist(dev_info_t *dip, 15918 struct fcp_black_list_entry **pplun_blacklist) 15919 { 15920 char **prop_array = NULL; 15921 char *curr_pwwn = NULL; 15922 char *curr_lun = NULL; 15923 uint32_t prop_item = 0; 15924 int idx = 0; 15925 int len = 0; 15926 15927 ASSERT(mutex_owned(&fcp_global_mutex)); 15928 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip, 15929 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 15930 LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) { 15931 return; 15932 } 15933 15934 for (idx = 0; idx < prop_item; idx++) { 15935 15936 curr_pwwn = prop_array[idx]; 15937 while (*curr_pwwn == ' ') { 15938 curr_pwwn++; 15939 } 15940 if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) { 15941 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist" 15942 ", please check.", curr_pwwn); 15943 continue; 15944 } 15945 if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') && 15946 (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) { 15947 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist" 15948 ", please check.", curr_pwwn); 15949 continue; 15950 } 15951 for (len = 0; len < sizeof (la_wwn_t) * 2; len++) { 15952 if (isxdigit(curr_pwwn[len]) != TRUE) { 15953 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the " 15954 "blacklist, please check.", curr_pwwn); 15955 break; 15956 } 15957 } 15958 if (len != sizeof (la_wwn_t) * 2) { 15959 continue; 15960 } 15961 15962 curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1; 15963 *(curr_lun - 1) = '\0'; 15964 fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist); 15965 } 15966 15967 ddi_prop_free(prop_array); 15968 } 15969 15970 /* 15971 * Get the masking info about one remote target port designated by wwn. 15972 * Lun ids could be separated by ',' or white spaces. 15973 */ 15974 static void 15975 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun, 15976 struct fcp_black_list_entry **pplun_blacklist) 15977 { 15978 int idx = 0; 15979 uint32_t offset = 0; 15980 unsigned long lun_id = 0; 15981 char lunid_buf[16]; 15982 char *pend = NULL; 15983 int illegal_digit = 0; 15984 15985 while (offset < strlen(curr_lun)) { 15986 while ((curr_lun[offset + idx] != ',') && 15987 (curr_lun[offset + idx] != '\0') && 15988 (curr_lun[offset + idx] != ' ')) { 15989 if (isdigit(curr_lun[offset + idx]) == 0) { 15990 illegal_digit++; 15991 } 15992 idx++; 15993 } 15994 if (illegal_digit > 0) { 15995 offset += (idx+1); /* To the start of next lun */ 15996 idx = 0; 15997 illegal_digit = 0; 15998 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in " 15999 "the blacklist, please check digits.", 16000 curr_lun, curr_pwwn); 16001 continue; 16002 } 16003 if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) { 16004 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in " 16005 "the blacklist, please check the length of LUN#.", 16006 curr_lun, curr_pwwn); 16007 break; 16008 } 16009 if (idx == 0) { /* ignore ' ' or ',' or '\0' */ 16010 offset++; 16011 continue; 16012 } 16013 16014 bcopy(curr_lun + offset, lunid_buf, idx); 16015 lunid_buf[idx] = '\0'; 16016 if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) { 16017 fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist); 16018 } else { 16019 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in " 16020 "the blacklist, please check %s.", 16021 curr_lun, curr_pwwn, lunid_buf); 16022 } 16023 offset += (idx+1); /* To the start of next lun */ 16024 idx = 0; 16025 } 16026 } 16027 16028 /* 16029 * Add one masking record 16030 */ 16031 static void 16032 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id, 16033 struct fcp_black_list_entry **pplun_blacklist) 16034 { 16035 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist; 16036 struct fcp_black_list_entry *new_entry = NULL; 16037 la_wwn_t wwn; 16038 16039 fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t)); 16040 while (tmp_entry) { 16041 if ((bcmp(&tmp_entry->wwn, &wwn, 16042 sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) { 16043 return; 16044 } 16045 16046 tmp_entry = tmp_entry->next; 16047 } 16048 16049 /* add to black list */ 16050 new_entry = (struct fcp_black_list_entry *)kmem_zalloc 16051 (sizeof (struct fcp_black_list_entry), KM_SLEEP); 16052 bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t)); 16053 new_entry->lun = lun_id; 16054 new_entry->masked = 0; 16055 new_entry->next = *pplun_blacklist; 16056 *pplun_blacklist = new_entry; 16057 } 16058 16059 /* 16060 * Check if we should mask the specified lun of this fcp_tgt 16061 */ 16062 static int 16063 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id) 16064 { 16065 struct fcp_black_list_entry *remote_port; 16066 16067 remote_port = fcp_lun_blacklist; 16068 while (remote_port != NULL) { 16069 if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) { 16070 if (remote_port->lun == lun_id) { 16071 remote_port->masked++; 16072 if (remote_port->masked == 1) { 16073 fcp_log(CE_NOTE, NULL, "LUN %d of port " 16074 "%02x%02x%02x%02x%02x%02x%02x%02x " 16075 "is masked due to black listing.\n", 16076 lun_id, wwn->raw_wwn[0], 16077 wwn->raw_wwn[1], wwn->raw_wwn[2], 16078 wwn->raw_wwn[3], wwn->raw_wwn[4], 16079 wwn->raw_wwn[5], wwn->raw_wwn[6], 16080 wwn->raw_wwn[7]); 16081 } 16082 return (TRUE); 16083 } 16084 } 16085 remote_port = remote_port->next; 16086 } 16087 return (FALSE); 16088 } 16089 16090 /* 16091 * Release all allocated resources 16092 */ 16093 static void 16094 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist) 16095 { 16096 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist; 16097 struct fcp_black_list_entry *current_entry = NULL; 16098 16099 ASSERT(mutex_owned(&fcp_global_mutex)); 16100 /* 16101 * Traverse all luns 16102 */ 16103 while (tmp_entry) { 16104 current_entry = tmp_entry; 16105 tmp_entry = tmp_entry->next; 16106 kmem_free(current_entry, sizeof (struct fcp_black_list_entry)); 16107 } 16108 *pplun_blacklist = NULL; 16109 } 16110 16111 /* 16112 * In fcp module, 16113 * pkt@scsi_pkt, cmd@fcp_pkt, icmd@fcp_ipkt, fpkt@fc_packet, pptr@fcp_port 16114 */ 16115 static struct scsi_pkt * 16116 fcp_pseudo_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 16117 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 16118 int flags, int (*callback)(), caddr_t arg) 16119 { 16120 fcp_port_t *pptr = ADDR2FCP(ap); 16121 fcp_pkt_t *cmd = NULL; 16122 fc_frame_hdr_t *hp; 16123 16124 /* 16125 * First step: get the packet 16126 */ 16127 if (pkt == NULL) { 16128 pkt = scsi_hba_pkt_alloc(pptr->port_dip, ap, cmdlen, statuslen, 16129 tgtlen, sizeof (fcp_pkt_t) + pptr->port_priv_pkt_len, 16130 callback, arg); 16131 if (pkt == NULL) { 16132 return (NULL); 16133 } 16134 16135 /* 16136 * All fields in scsi_pkt will be initialized properly or 16137 * set to zero. We need do nothing for scsi_pkt. 16138 */ 16139 /* 16140 * But it's our responsibility to link other related data 16141 * structures. Their initialization will be done, just 16142 * before the scsi_pkt will be sent to FCA. 16143 */ 16144 cmd = PKT2CMD(pkt); 16145 cmd->cmd_pkt = pkt; 16146 cmd->cmd_fp_pkt = &cmd->cmd_fc_packet; 16147 /* 16148 * fc_packet_t 16149 */ 16150 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd; 16151 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd + 16152 sizeof (struct fcp_pkt)); 16153 cmd->cmd_fp_pkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd; 16154 cmd->cmd_fp_pkt->pkt_cmdlen = sizeof (struct fcp_cmd); 16155 cmd->cmd_fp_pkt->pkt_resp = cmd->cmd_fcp_rsp; 16156 cmd->cmd_fp_pkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE; 16157 /* 16158 * Fill in the Fabric Channel Header 16159 */ 16160 hp = &cmd->cmd_fp_pkt->pkt_cmd_fhdr; 16161 hp->r_ctl = R_CTL_COMMAND; 16162 hp->rsvd = 0; 16163 hp->type = FC_TYPE_SCSI_FCP; 16164 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 16165 hp->seq_id = 0; 16166 hp->df_ctl = 0; 16167 hp->seq_cnt = 0; 16168 hp->ox_id = 0xffff; 16169 hp->rx_id = 0xffff; 16170 hp->ro = 0; 16171 } else { 16172 /* 16173 * We need think if we should reset any elements in 16174 * related data structures. 16175 */ 16176 FCP_TRACE(fcp_logq, pptr->port_instbuf, 16177 fcp_trace, FCP_BUF_LEVEL_6, 0, 16178 "reusing pkt, flags %d", flags); 16179 cmd = PKT2CMD(pkt); 16180 if (cmd->cmd_fp_pkt->pkt_pd) { 16181 cmd->cmd_fp_pkt->pkt_pd = NULL; 16182 } 16183 } 16184 16185 /* 16186 * Second step: dma allocation/move 16187 */ 16188 if (bp && bp->b_bcount != 0) { 16189 /* 16190 * Mark if it's read or write 16191 */ 16192 if (bp->b_flags & B_READ) { 16193 cmd->cmd_flags |= CFLAG_IS_READ; 16194 } else { 16195 cmd->cmd_flags &= ~CFLAG_IS_READ; 16196 } 16197 16198 bp_mapin(bp); 16199 cmd->cmd_fp_pkt->pkt_data = bp->b_un.b_addr; 16200 cmd->cmd_fp_pkt->pkt_datalen = bp->b_bcount; 16201 cmd->cmd_fp_pkt->pkt_data_resid = 0; 16202 } else { 16203 /* 16204 * It seldom happens, except when CLUSTER or SCSI_VHCI wants 16205 * to send zero-length read/write. 16206 */ 16207 cmd->cmd_fp_pkt->pkt_data = NULL; 16208 cmd->cmd_fp_pkt->pkt_datalen = 0; 16209 } 16210 16211 return (pkt); 16212 } 16213 16214 static void 16215 fcp_pseudo_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 16216 { 16217 fcp_port_t *pptr = ADDR2FCP(ap); 16218 16219 /* 16220 * First we let FCA to uninitilize private part. 16221 */ 16222 (void) fc_ulp_uninit_packet(pptr->port_fp_handle, 16223 PKT2CMD(pkt)->cmd_fp_pkt); 16224 16225 /* 16226 * Then we uninitialize fc_packet. 16227 */ 16228 16229 /* 16230 * Thirdly, we uninitializae fcp_pkt. 16231 */ 16232 16233 /* 16234 * In the end, we free scsi_pkt. 16235 */ 16236 scsi_hba_pkt_free(ap, pkt); 16237 } 16238 16239 static int 16240 fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt) 16241 { 16242 fcp_port_t *pptr = ADDR2FCP(ap); 16243 fcp_lun_t *plun = ADDR2LUN(ap); 16244 fcp_tgt_t *ptgt = plun->lun_tgt; 16245 fcp_pkt_t *cmd = PKT2CMD(pkt); 16246 fcp_cmd_t *fcmd = &cmd->cmd_fcp_cmd; 16247 fc_packet_t *fpkt = cmd->cmd_fp_pkt; 16248 int rval; 16249 16250 fpkt->pkt_pd = ptgt->tgt_pd_handle; 16251 (void) fc_ulp_init_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt, 1); 16252 16253 /* 16254 * Firstly, we need initialize fcp_pkt_t 16255 * Secondly, we need initialize fcp_cmd_t. 16256 */ 16257 bcopy(pkt->pkt_cdbp, fcmd->fcp_cdb, pkt->pkt_cdblen); 16258 fcmd->fcp_data_len = fpkt->pkt_datalen; 16259 fcmd->fcp_ent_addr = plun->lun_addr; 16260 if (pkt->pkt_flags & FLAG_HTAG) { 16261 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q; 16262 } else if (pkt->pkt_flags & FLAG_OTAG) { 16263 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED; 16264 } else if (pkt->pkt_flags & FLAG_STAG) { 16265 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE; 16266 } else { 16267 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED; 16268 } 16269 16270 if (cmd->cmd_flags & CFLAG_IS_READ) { 16271 fcmd->fcp_cntl.cntl_read_data = 1; 16272 fcmd->fcp_cntl.cntl_write_data = 0; 16273 } else { 16274 fcmd->fcp_cntl.cntl_read_data = 0; 16275 fcmd->fcp_cntl.cntl_write_data = 1; 16276 } 16277 16278 /* 16279 * Then we need initialize fc_packet_t too. 16280 */ 16281 fpkt->pkt_timeout = pkt->pkt_time + 2; 16282 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id; 16283 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id; 16284 if (cmd->cmd_flags & CFLAG_IS_READ) { 16285 fpkt->pkt_tran_type = FC_PKT_FCP_READ; 16286 } else { 16287 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE; 16288 } 16289 16290 if (pkt->pkt_flags & FLAG_NOINTR) { 16291 fpkt->pkt_comp = NULL; 16292 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR); 16293 } else { 16294 fpkt->pkt_comp = fcp_cmd_callback; 16295 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR); 16296 if (pkt->pkt_flags & FLAG_IMMEDIATE_CB) { 16297 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB; 16298 } 16299 } 16300 16301 /* 16302 * Lastly, we need initialize scsi_pkt 16303 */ 16304 pkt->pkt_reason = CMD_CMPLT; 16305 pkt->pkt_state = 0; 16306 pkt->pkt_statistics = 0; 16307 pkt->pkt_resid = 0; 16308 16309 /* 16310 * if interrupts aren't allowed (e.g. at dump time) then we'll 16311 * have to do polled I/O 16312 */ 16313 if (pkt->pkt_flags & FLAG_NOINTR) { 16314 return (fcp_dopoll(pptr, cmd)); 16315 } 16316 16317 cmd->cmd_state = FCP_PKT_ISSUED; 16318 rval = fcp_transport(pptr->port_fp_handle, fpkt, 0); 16319 if (rval == FC_SUCCESS) { 16320 return (TRAN_ACCEPT); 16321 } 16322 16323 /* 16324 * Need more consideration 16325 * 16326 * pkt->pkt_flags & FLAG_NOQUEUE could abort other pkt 16327 */ 16328 cmd->cmd_state = FCP_PKT_IDLE; 16329 if (rval == FC_TRAN_BUSY) { 16330 return (TRAN_BUSY); 16331 } else { 16332 return (TRAN_FATAL_ERROR); 16333 } 16334 } 16335 16336 /* 16337 * scsi_poll will always call tran_sync_pkt for pseudo FC-HBAs 16338 * SCSA will initialize it to scsi_sync_cache_pkt for physical FC-HBAs 16339 */ 16340 static void 16341 fcp_pseudo_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 16342 { 16343 FCP_TRACE(fcp_logq, "fcp_pseudo_sync_pkt", fcp_trace, 16344 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt); 16345 } 16346 16347 /* 16348 * scsi_dmafree will always call tran_dmafree, when STATE_ARQ_DONE 16349 */ 16350 static void 16351 fcp_pseudo_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 16352 { 16353 FCP_TRACE(fcp_logq, "fcp_pseudo_dmafree", fcp_trace, 16354 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt); 16355 }