1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * Copyright (c) 2016 by Delphix. All rights reserved. 25 */ 26 27 /* 28 * SunOS 5.x Multithreaded STREAMS DLPI FCIP Module 29 * This is a pseudo driver module to handle encapsulation of IP and ARP 30 * datagrams over FibreChannel interfaces. FCIP is a cloneable STREAMS 31 * driver module which interfaces with IP/ARP using DLPI. This module 32 * is a Style-2 DLS provider. 33 * 34 * The implementation of this module is based on RFC 2625 which gives 35 * details on the encapsulation of IP/ARP data over FibreChannel. 36 * The fcip module needs to resolve an IP address to a port address before 37 * sending data to a destination port. A FC device port has 2 addresses 38 * associated with it: A 8 byte World Wide unique Port Name and a 3 byte 39 * volatile Port number or Port_ID. 40 * 41 * The mapping between a IP address and the World Wide Port Name is handled 42 * by the ARP layer since the IP over FC draft requires the MAC address to 43 * be the least significant six bytes of the WorldWide Port Names. The 44 * fcip module however needs to identify the destination port uniquely when 45 * the destination FC device has multiple FC ports. 46 * 47 * The FC layer mapping between the World Wide Port Name and the Port_ID 48 * will be handled through the use of a fabric name server or through the 49 * use of the FARP ELS command as described in the draft. Since the Port_IDs 50 * are volatile, the mapping between the World Wide Port Name and Port_IDs 51 * must be maintained and validated before use each time a datagram 52 * needs to be sent to the destination ports. The FC transport module 53 * informs the fcip module of all changes to states of ports on the 54 * fabric through registered callbacks. This enables the fcip module 55 * to maintain the WW_PN to Port_ID mappings current. 56 * 57 * For details on how this module interfaces with the FibreChannel Transport 58 * modules, refer to PSARC/1997/385. Chapter 3 of the FibreChannel Transport 59 * Programming guide details the APIs between ULPs and the Transport. 60 * 61 * Now for some Caveats: 62 * 63 * RFC 2625 requires that a FibreChannel Port name (the Port WWN) have 64 * the NAA bits set to '0001' indicating a IEEE 48bit address which 65 * corresponds to a ULA (Universal LAN MAC address). But with FibreChannel 66 * adapters containing 2 or more ports, IEEE naming cannot identify the 67 * ports on an adapter uniquely so we will in the first implementation 68 * be operating only on Port 0 of each adapter. 69 */ 70 71 #include <sys/types.h> 72 #include <sys/errno.h> 73 #include <sys/debug.h> 74 #include <sys/time.h> 75 #include <sys/sysmacros.h> 76 #include <sys/systm.h> 77 #include <sys/user.h> 78 #include <sys/stropts.h> 79 #include <sys/stream.h> 80 #include <sys/strlog.h> 81 #include <sys/strsubr.h> 82 #include <sys/cmn_err.h> 83 #include <sys/cpu.h> 84 #include <sys/kmem.h> 85 #include <sys/conf.h> 86 #include <sys/ddi.h> 87 #include <sys/sunddi.h> 88 #include <sys/ksynch.h> 89 #include <sys/stat.h> 90 #include <sys/kstat.h> 91 #include <sys/vtrace.h> 92 #include <sys/strsun.h> 93 #include <sys/varargs.h> 94 #include <sys/modctl.h> 95 #include <sys/thread.h> 96 #include <sys/var.h> 97 #include <sys/proc.h> 98 #include <inet/common.h> 99 #include <netinet/ip6.h> 100 #include <inet/ip.h> 101 #include <inet/arp.h> 102 #include <inet/mi.h> 103 #include <inet/nd.h> 104 #include <sys/dlpi.h> 105 #include <sys/ethernet.h> 106 #include <sys/file.h> 107 #include <sys/syslog.h> 108 #include <sys/disp.h> 109 #include <sys/taskq.h> 110 111 /* 112 * Leadville includes 113 */ 114 115 #include <sys/fibre-channel/fc.h> 116 #include <sys/fibre-channel/impl/fc_ulpif.h> 117 #include <sys/fibre-channel/ulp/fcip.h> 118 119 /* 120 * TNF Probe/trace facility include 121 */ 122 #if defined(lint) || defined(FCIP_TNF_ENABLED) 123 #include <sys/tnf_probe.h> 124 #endif 125 126 #define FCIP_ESBALLOC 127 128 /* 129 * Function prototypes 130 */ 131 132 /* standard loadable modules entry points */ 133 static int fcip_attach(dev_info_t *, ddi_attach_cmd_t); 134 static int fcip_detach(dev_info_t *, ddi_detach_cmd_t); 135 static void fcip_dodetach(struct fcipstr *slp); 136 static int fcip_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, 137 void *arg, void **result); 138 139 140 /* streams specific */ 141 static void fcip_setipq(struct fcip *fptr); 142 static int fcip_wput(queue_t *, mblk_t *); 143 static int fcip_wsrv(queue_t *); 144 static void fcip_proto(queue_t *, mblk_t *); 145 static void fcip_ioctl(queue_t *, mblk_t *); 146 static int fcip_open(queue_t *wq, dev_t *devp, int flag, 147 int sflag, cred_t *credp); 148 static int fcip_close(queue_t *rq, int flag, int otyp, cred_t *credp); 149 static int fcip_start(queue_t *wq, mblk_t *mp, struct fcip *fptr, 150 struct fcip_dest *fdestp, int flags); 151 static void fcip_sendup(struct fcip *fptr, mblk_t *mp, 152 struct fcipstr *(*acceptfunc)()); 153 static struct fcipstr *fcip_accept(struct fcipstr *slp, struct fcip *fptr, 154 int type, la_wwn_t *dhostp); 155 static mblk_t *fcip_addudind(struct fcip *fptr, mblk_t *mp, 156 fcph_network_hdr_t *nhdr, int type); 157 static int fcip_setup_mac_addr(struct fcip *fptr); 158 static void fcip_kstat_init(struct fcip *fptr); 159 static int fcip_stat_update(kstat_t *, int); 160 161 162 /* dlpi specific */ 163 static void fcip_spareq(queue_t *wq, mblk_t *mp); 164 static void fcip_pareq(queue_t *wq, mblk_t *mp); 165 static void fcip_ubreq(queue_t *wq, mblk_t *mp); 166 static void fcip_breq(queue_t *wq, mblk_t *mp); 167 static void fcip_dreq(queue_t *wq, mblk_t *mp); 168 static void fcip_areq(queue_t *wq, mblk_t *mp); 169 static void fcip_udreq(queue_t *wq, mblk_t *mp); 170 static void fcip_ireq(queue_t *wq, mblk_t *mp); 171 static void fcip_dl_ioc_hdr_info(queue_t *wq, mblk_t *mp); 172 173 174 /* solaris sundry, DR/CPR etc */ 175 static int fcip_cache_constructor(void *buf, void *arg, int size); 176 static void fcip_cache_destructor(void *buf, void *size); 177 static int fcip_handle_suspend(fcip_port_info_t *fport, fc_detach_cmd_t cmd); 178 static int fcip_handle_resume(fcip_port_info_t *fport, 179 fc_ulp_port_info_t *port_info, fc_attach_cmd_t cmd); 180 static fcip_port_info_t *fcip_softstate_free(fcip_port_info_t *fport); 181 static int fcip_port_attach_handler(struct fcip *fptr); 182 183 184 /* 185 * ulp - transport interface function prototypes 186 */ 187 static int fcip_port_attach(opaque_t ulp_handle, fc_ulp_port_info_t *, 188 fc_attach_cmd_t cmd, uint32_t sid); 189 static int fcip_port_detach(opaque_t ulp_handle, fc_ulp_port_info_t *, 190 fc_detach_cmd_t cmd); 191 static int fcip_port_ioctl(opaque_t ulp_handle, opaque_t port_handle, 192 dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval, 193 uint32_t claimed); 194 static void fcip_statec_cb(opaque_t ulp_handle, opaque_t phandle, 195 uint32_t port_state, uint32_t port_top, fc_portmap_t changelist[], 196 uint32_t listlen, uint32_t sid); 197 static int fcip_els_cb(opaque_t ulp_handle, opaque_t phandle, 198 fc_unsol_buf_t *buf, uint32_t claimed); 199 static int fcip_data_cb(opaque_t ulp_handle, opaque_t phandle, 200 fc_unsol_buf_t *payload, uint32_t claimed); 201 202 203 /* Routing table specific */ 204 static void fcip_handle_topology(struct fcip *fptr); 205 static int fcip_init_port(struct fcip *fptr); 206 struct fcip_routing_table *fcip_lookup_rtable(struct fcip *fptr, 207 la_wwn_t *pwwn, int matchflag); 208 static void fcip_rt_update(struct fcip *fptr, fc_portmap_t *devlist, 209 uint32_t listlen); 210 static void fcip_rt_flush(struct fcip *fptr); 211 static void fcip_rte_remove_deferred(void *arg); 212 static int fcip_do_plogi(struct fcip *fptr, struct fcip_routing_table *frp); 213 214 215 /* dest table specific */ 216 static struct fcip_dest *fcip_get_dest(struct fcip *fptr, 217 la_wwn_t *dlphys); 218 static struct fcip_dest *fcip_add_dest(struct fcip *fptr, 219 struct fcip_routing_table *frp); 220 static int fcip_dest_add_broadcast_entry(struct fcip *fptr, int new_flag); 221 static uint32_t fcip_get_broadcast_did(struct fcip *fptr); 222 static void fcip_cleanup_dest(struct fcip *fptr); 223 224 225 /* helper functions */ 226 static fcip_port_info_t *fcip_get_port(opaque_t phandle); 227 static int fcip_wwn_compare(la_wwn_t *wwn1, la_wwn_t *wwn2, int flag); 228 static void fcip_ether_to_str(struct ether_addr *e, caddr_t s); 229 static int fcip_port_get_num_pkts(struct fcip *fptr); 230 static int fcip_check_port_busy(struct fcip *fptr); 231 static void fcip_check_remove_minor_node(void); 232 static int fcip_set_wwn(la_wwn_t *pwwn); 233 static int fcip_plogi_in_progress(struct fcip *fptr); 234 static int fcip_check_port_exists(struct fcip *fptr); 235 static int fcip_is_supported_fc_topology(int fc_topology); 236 237 238 /* pkt specific */ 239 static fcip_pkt_t *fcip_pkt_alloc(struct fcip *fptr, mblk_t *bp, 240 int flags, int datalen); 241 static void fcip_pkt_free(struct fcip_pkt *fcip_pkt, int flags); 242 static fcip_pkt_t *fcip_ipkt_alloc(struct fcip *fptr, int cmdlen, 243 int resplen, opaque_t pd, int flags); 244 static void fcip_ipkt_free(fcip_pkt_t *fcip_pkt); 245 static void fcip_ipkt_callback(fc_packet_t *fc_pkt); 246 static void fcip_free_pkt_dma(fcip_pkt_t *fcip_pkt); 247 static void fcip_pkt_callback(fc_packet_t *fc_pkt); 248 static void fcip_init_unicast_pkt(fcip_pkt_t *fcip_pkt, fc_portid_t sid, 249 fc_portid_t did, void (*comp) ()); 250 static int fcip_transport(fcip_pkt_t *fcip_pkt); 251 static void fcip_pkt_timeout(void *arg); 252 static void fcip_timeout(void *arg); 253 static void fcip_fdestp_enqueue_pkt(struct fcip_dest *fdestp, 254 fcip_pkt_t *fcip_pkt); 255 static int fcip_fdestp_dequeue_pkt(struct fcip_dest *fdestp, 256 fcip_pkt_t *fcip_pkt); 257 static int fcip_sendup_constructor(void *buf, void *arg, int flags); 258 static void fcip_sendup_thr(void *arg); 259 static int fcip_sendup_alloc_enque(struct fcip *ftpr, mblk_t *mp, 260 struct fcipstr *(*f)()); 261 262 /* 263 * zero copy inbound data handling 264 */ 265 #ifdef FCIP_ESBALLOC 266 static void fcip_ubfree(char *arg); 267 #endif /* FCIP_ESBALLOC */ 268 269 #if !defined(FCIP_ESBALLOC) 270 static void *fcip_allocb(size_t size, uint_t pri); 271 #endif 272 273 274 /* FCIP FARP support functions */ 275 static struct fcip_dest *fcip_do_farp(struct fcip *fptr, la_wwn_t *pwwn, 276 char *ip_addr, size_t ip_addr_len, int flags); 277 static void fcip_init_broadcast_pkt(fcip_pkt_t *fcip_pkt, void (*comp) (), 278 int is_els); 279 static int fcip_handle_farp_request(struct fcip *fptr, la_els_farp_t *fcmd); 280 static int fcip_handle_farp_response(struct fcip *fptr, la_els_farp_t *fcmd); 281 static void fcip_cache_arp_broadcast(struct fcip *ftpr, fc_unsol_buf_t *buf); 282 static void fcip_port_ns(void *arg); 283 284 #ifdef DEBUG 285 286 #include <sys/debug.h> 287 288 #define FCIP_DEBUG_DEFAULT 0x1 289 #define FCIP_DEBUG_ATTACH 0x2 290 #define FCIP_DEBUG_INIT 0x4 291 #define FCIP_DEBUG_DETACH 0x8 292 #define FCIP_DEBUG_DLPI 0x10 293 #define FCIP_DEBUG_ELS 0x20 294 #define FCIP_DEBUG_DOWNSTREAM 0x40 295 #define FCIP_DEBUG_UPSTREAM 0x80 296 #define FCIP_DEBUG_MISC 0x100 297 298 #define FCIP_DEBUG_STARTUP (FCIP_DEBUG_ATTACH|FCIP_DEBUG_INIT) 299 #define FCIP_DEBUG_DATAOUT (FCIP_DEBUG_DLPI|FCIP_DEBUG_DOWNSTREAM) 300 #define FCIP_DEBUG_DATAIN (FCIP_DEBUG_ELS|FCIP_DEBUG_UPSTREAM) 301 302 static int fcip_debug = FCIP_DEBUG_DEFAULT; 303 304 #define FCIP_DEBUG(level, args) \ 305 if (fcip_debug & (level)) cmn_err args; 306 307 #else /* DEBUG */ 308 309 #define FCIP_DEBUG(level, args) /* do nothing */ 310 311 #endif /* DEBUG */ 312 313 #define KIOIP KSTAT_INTR_PTR(fcip->fcip_intrstats) 314 315 /* 316 * Endian independent ethernet to WWN copy 317 */ 318 #define ether_to_wwn(E, W) \ 319 bzero((void *)(W), sizeof (la_wwn_t)); \ 320 bcopy((void *)(E), (void *)&((W)->raw_wwn[2]), ETHERADDRL); \ 321 (W)->raw_wwn[0] |= 0x10 322 323 /* 324 * wwn_to_ether : Endian independent, copies a WWN to struct ether_addr. 325 * The args to the macro are pointers to WWN and ether_addr structures 326 */ 327 #define wwn_to_ether(W, E) \ 328 bcopy((void *)&((W)->raw_wwn[2]), (void *)E, ETHERADDRL) 329 330 /* 331 * The module_info structure contains identification and limit values. 332 * All queues associated with a certain driver share the same module_info 333 * structures. This structure defines the characteristics of that driver/ 334 * module's queues. The module name must be unique. The max and min packet 335 * sizes limit the no. of characters in M_DATA messages. The Hi and Lo 336 * water marks are for flow control when a module has a service procedure. 337 */ 338 static struct module_info fcipminfo = { 339 FCIPIDNUM, /* mi_idnum : Module ID num */ 340 FCIPNAME, /* mi_idname: Module Name */ 341 FCIPMINPSZ, /* mi_minpsz: Min packet size */ 342 FCIPMAXPSZ, /* mi_maxpsz: Max packet size */ 343 FCIPHIWAT, /* mi_hiwat : High water mark */ 344 FCIPLOWAT /* mi_lowat : Low water mark */ 345 }; 346 347 /* 348 * The qinit structres contain the module put, service. open and close 349 * procedure pointers. All modules and drivers with the same streamtab 350 * file (i.e same fmodsw or cdevsw entry points) point to the same 351 * upstream (read) and downstream (write) qinit structs. 352 */ 353 static struct qinit fcip_rinit = { 354 NULL, /* qi_putp */ 355 NULL, /* qi_srvp */ 356 fcip_open, /* qi_qopen */ 357 fcip_close, /* qi_qclose */ 358 NULL, /* qi_qadmin */ 359 &fcipminfo, /* qi_minfo */ 360 NULL /* qi_mstat */ 361 }; 362 363 static struct qinit fcip_winit = { 364 fcip_wput, /* qi_putp */ 365 fcip_wsrv, /* qi_srvp */ 366 NULL, /* qi_qopen */ 367 NULL, /* qi_qclose */ 368 NULL, /* qi_qadmin */ 369 &fcipminfo, /* qi_minfo */ 370 NULL /* qi_mstat */ 371 }; 372 373 /* 374 * streamtab contains pointers to the read and write qinit structures 375 */ 376 377 static struct streamtab fcip_info = { 378 &fcip_rinit, /* st_rdinit */ 379 &fcip_winit, /* st_wrinit */ 380 NULL, /* st_muxrinit */ 381 NULL, /* st_muxwrinit */ 382 }; 383 384 static struct cb_ops fcip_cb_ops = { 385 nodev, /* open */ 386 nodev, /* close */ 387 nodev, /* strategy */ 388 nodev, /* print */ 389 nodev, /* dump */ 390 nodev, /* read */ 391 nodev, /* write */ 392 nodev, /* ioctl */ 393 nodev, /* devmap */ 394 nodev, /* mmap */ 395 nodev, /* segmap */ 396 nochpoll, /* poll */ 397 ddi_prop_op, /* cb_prop_op */ 398 &fcip_info, /* streamtab */ 399 D_MP | D_HOTPLUG, /* Driver compatibility flag */ 400 CB_REV, /* rev */ 401 nodev, /* int (*cb_aread)() */ 402 nodev /* int (*cb_awrite)() */ 403 }; 404 405 /* 406 * autoconfiguration routines. 407 */ 408 static struct dev_ops fcip_ops = { 409 DEVO_REV, /* devo_rev, */ 410 0, /* refcnt */ 411 fcip_getinfo, /* info */ 412 nulldev, /* identify */ 413 nulldev, /* probe */ 414 fcip_attach, /* attach */ 415 fcip_detach, /* detach */ 416 nodev, /* RESET */ 417 &fcip_cb_ops, /* driver operations */ 418 NULL, /* bus operations */ 419 ddi_power /* power management */ 420 }; 421 422 #define FCIP_VERSION "1.61" 423 #define FCIP_NAME "SunFC FCIP v" FCIP_VERSION 424 425 #define PORT_DRIVER "fp" 426 427 #define GETSTRUCT(struct, number) \ 428 ((struct *)kmem_zalloc((size_t)(sizeof (struct) * (number)), \ 429 KM_SLEEP)) 430 431 static struct modldrv modldrv = { 432 &mod_driverops, /* Type of module - driver */ 433 FCIP_NAME, /* Name of module */ 434 &fcip_ops, /* driver ops */ 435 }; 436 437 static struct modlinkage modlinkage = { 438 MODREV_1, (void *)&modldrv, NULL 439 }; 440 441 442 /* 443 * Now for some global statics 444 */ 445 static uint32_t fcip_ub_nbufs = FCIP_UB_NBUFS; 446 static uint32_t fcip_ub_size = FCIP_UB_SIZE; 447 static int fcip_pkt_ttl_ticks = FCIP_PKT_TTL; 448 static int fcip_tick_incr = 1; 449 static int fcip_wait_cmds = FCIP_WAIT_CMDS; 450 static int fcip_num_attaching = 0; 451 static int fcip_port_attach_pending = 0; 452 static int fcip_create_nodes_on_demand = 1; /* keep it similar to fcp */ 453 static int fcip_cache_on_arp_broadcast = 0; 454 static int fcip_farp_supported = 0; 455 static int fcip_minor_node_created = 0; 456 457 /* 458 * Supported FCAs 459 */ 460 #define QLC_PORT_1_ID_BITS 0x100 461 #define QLC_PORT_2_ID_BITS 0x101 462 #define QLC_PORT_NAA 0x2 463 #define QLC_MODULE_NAME "qlc" 464 #define IS_QLC_PORT(port_dip) \ 465 (strcmp(ddi_driver_name(ddi_get_parent((port_dip))),\ 466 QLC_MODULE_NAME) == 0) 467 468 469 /* 470 * fcip softstate structures head. 471 */ 472 473 static void *fcip_softp = NULL; 474 475 /* 476 * linked list of active (inuse) driver streams 477 */ 478 479 static int fcip_num_instances = 0; 480 static dev_info_t *fcip_module_dip = (dev_info_t *)0; 481 482 483 /* 484 * Ethernet broadcast address: Broadcast addressing in IP over fibre 485 * channel should be the IEEE ULA (also the low 6 bytes of the Port WWN). 486 * 487 * The broadcast addressing varies for differing topologies a node may be in: 488 * - On a private loop the ARP broadcast is a class 3 sequence sent 489 * using OPNfr (Open Broadcast Replicate primitive) followed by 490 * the ARP frame to D_ID 0xFFFFFF 491 * 492 * - On a public Loop the broadcast sequence is sent to AL_PA 0x00 493 * (no OPNfr primitive). 494 * 495 * - For direct attach and point to point topologies we just send 496 * the frame to D_ID 0xFFFFFF 497 * 498 * For public loop the handling would probably be different - for now 499 * I'll just declare this struct - It can be deleted if not necessary. 500 * 501 */ 502 503 504 /* 505 * DL_INFO_ACK template for the fcip module. The dl_info_ack_t structure is 506 * returned as a part of an DL_INFO_ACK message which is a M_PCPROTO message 507 * returned in response to a DL_INFO_REQ message sent to us from a DLS user 508 * Let us fake an ether header as much as possible. 509 * 510 * dl_addr_length is the Provider's DLSAP addr which is SAP addr + 511 * Physical addr of the provider. We set this to 512 * ushort_t + sizeof (la_wwn_t) for Fibre Channel ports. 513 * dl_mac_type Lets just use DL_ETHER - we can try using DL_IPFC, a new 514 * dlpi.h define later. 515 * dl_sap_length -2 indicating the SAP address follows the Physical addr 516 * component in the DLSAP addr. 517 * dl_service_mode: DLCLDS - connectionless data link service. 518 * 519 */ 520 521 static dl_info_ack_t fcip_infoack = { 522 DL_INFO_ACK, /* dl_primitive */ 523 FCIPMTU, /* dl_max_sdu */ 524 0, /* dl_min_sdu */ 525 FCIPADDRL, /* dl_addr_length */ 526 DL_ETHER, /* dl_mac_type */ 527 0, /* dl_reserved */ 528 0, /* dl_current_state */ 529 -2, /* dl_sap_length */ 530 DL_CLDLS, /* dl_service_mode */ 531 0, /* dl_qos_length */ 532 0, /* dl_qos_offset */ 533 0, /* dl_range_length */ 534 0, /* dl_range_offset */ 535 DL_STYLE2, /* dl_provider_style */ 536 sizeof (dl_info_ack_t), /* dl_addr_offset */ 537 DL_VERSION_2, /* dl_version */ 538 ETHERADDRL, /* dl_brdcst_addr_length */ 539 sizeof (dl_info_ack_t) + FCIPADDRL, /* dl_brdcst_addr_offset */ 540 0 /* dl_growth */ 541 }; 542 543 /* 544 * FCIP broadcast address definition. 545 */ 546 static struct ether_addr fcipnhbroadcastaddr = { 547 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 548 }; 549 550 /* 551 * RFC2625 requires the broadcast ARP address in the ARP data payload to 552 * be set to 0x00 00 00 00 00 00 for ARP broadcast packets 553 */ 554 static struct ether_addr fcip_arpbroadcast_addr = { 555 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 556 }; 557 558 559 #define ether_bcopy(src, dest) bcopy((src), (dest), ETHERADDRL); 560 561 /* 562 * global kernel locks 563 */ 564 static kcondvar_t fcip_global_cv; 565 static kmutex_t fcip_global_mutex; 566 567 /* 568 * fctl external defines 569 */ 570 extern int fc_ulp_add(fc_ulp_modinfo_t *); 571 572 /* 573 * fctl data structures 574 */ 575 576 #define FCIP_REV 0x07 577 578 /* linked list of port info structures */ 579 static fcip_port_info_t *fcip_port_head = NULL; 580 581 /* linked list of fcip structures */ 582 static struct fcipstr *fcipstrup = NULL; 583 static krwlock_t fcipstruplock; 584 585 586 /* 587 * Module information structure. This structure gives the FC Transport modules 588 * information about an ULP that registers with it. 589 */ 590 static fc_ulp_modinfo_t fcip_modinfo = { 591 0, /* for xref checks? */ 592 FCTL_ULP_MODREV_4, /* FCIP revision */ 593 FC_TYPE_IS8802_SNAP, /* type 5 for SNAP encapsulated datagrams */ 594 FCIP_NAME, /* module name as in the modldrv struct */ 595 0x0, /* get all statec callbacks for now */ 596 fcip_port_attach, /* port attach callback */ 597 fcip_port_detach, /* port detach callback */ 598 fcip_port_ioctl, /* port ioctl callback */ 599 fcip_els_cb, /* els callback */ 600 fcip_data_cb, /* data callback */ 601 fcip_statec_cb /* state change callback */ 602 }; 603 604 605 /* 606 * Solaris 9 and up, the /kernel/drv/fp.conf file will have the following entry 607 * 608 * ddi-forceattach=1; 609 * 610 * This will ensure that fp is loaded at bootup. No additional checks are needed 611 */ 612 int 613 _init(void) 614 { 615 int rval; 616 617 FCIP_TNF_LOAD(); 618 619 /* 620 * Initialize the mutexs used by port attach and other callbacks. 621 * The transport can call back into our port_attach_callback 622 * routine even before _init() completes and bad things can happen. 623 */ 624 mutex_init(&fcip_global_mutex, NULL, MUTEX_DRIVER, NULL); 625 cv_init(&fcip_global_cv, NULL, CV_DRIVER, NULL); 626 rw_init(&fcipstruplock, NULL, RW_DRIVER, NULL); 627 628 mutex_enter(&fcip_global_mutex); 629 fcip_port_attach_pending = 1; 630 mutex_exit(&fcip_global_mutex); 631 632 /* 633 * Now attempt to register fcip with the transport. 634 * If fc_ulp_add fails, fcip module will not be loaded. 635 */ 636 rval = fc_ulp_add(&fcip_modinfo); 637 if (rval != FC_SUCCESS) { 638 mutex_destroy(&fcip_global_mutex); 639 cv_destroy(&fcip_global_cv); 640 rw_destroy(&fcipstruplock); 641 switch (rval) { 642 case FC_ULP_SAMEMODULE: 643 FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN, 644 "!fcip: module is already registered with" 645 " transport")); 646 rval = EEXIST; 647 break; 648 case FC_ULP_SAMETYPE: 649 FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN, 650 "!fcip: Another module of the same ULP type 0x%x" 651 " is already registered with the transport", 652 fcip_modinfo.ulp_type)); 653 rval = EEXIST; 654 break; 655 case FC_BADULP: 656 FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN, 657 "!fcip: Current fcip version 0x%x does not match" 658 " fctl version", 659 fcip_modinfo.ulp_rev)); 660 rval = ENODEV; 661 break; 662 default: 663 FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN, 664 "!fcip: fc_ulp_add failed with status 0x%x", rval)); 665 rval = ENODEV; 666 break; 667 } 668 FCIP_TNF_UNLOAD(&modlinkage); 669 return (rval); 670 } 671 672 if ((rval = ddi_soft_state_init(&fcip_softp, sizeof (struct fcip), 673 FCIP_NUM_INSTANCES)) != 0) { 674 mutex_destroy(&fcip_global_mutex); 675 cv_destroy(&fcip_global_cv); 676 rw_destroy(&fcipstruplock); 677 (void) fc_ulp_remove(&fcip_modinfo); 678 FCIP_TNF_UNLOAD(&modlinkage); 679 return (rval); 680 } 681 682 if ((rval = mod_install(&modlinkage)) != 0) { 683 FCIP_TNF_UNLOAD(&modlinkage); 684 (void) fc_ulp_remove(&fcip_modinfo); 685 mutex_destroy(&fcip_global_mutex); 686 cv_destroy(&fcip_global_cv); 687 rw_destroy(&fcipstruplock); 688 ddi_soft_state_fini(&fcip_softp); 689 } 690 return (rval); 691 } 692 693 /* 694 * Unload the port driver if this was the only ULP loaded and then 695 * deregister with the transport. 696 */ 697 int 698 _fini(void) 699 { 700 int rval; 701 int rval1; 702 703 /* 704 * Do not permit the module to be unloaded before a port 705 * attach callback has happened. 706 */ 707 mutex_enter(&fcip_global_mutex); 708 if (fcip_num_attaching || fcip_port_attach_pending) { 709 mutex_exit(&fcip_global_mutex); 710 return (EBUSY); 711 } 712 mutex_exit(&fcip_global_mutex); 713 714 if ((rval = mod_remove(&modlinkage)) != 0) { 715 return (rval); 716 } 717 718 /* 719 * unregister with the transport layer 720 */ 721 rval1 = fc_ulp_remove(&fcip_modinfo); 722 723 /* 724 * If the ULP was not registered with the transport, init should 725 * have failed. If transport has no knowledge of our existence 726 * we should simply bail out and succeed 727 */ 728 #ifdef DEBUG 729 if (rval1 == FC_BADULP) { 730 FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN, 731 "fcip: ULP was never registered with the transport")); 732 rval = ENODEV; 733 } else if (rval1 == FC_BADTYPE) { 734 FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN, 735 "fcip: No ULP of this type 0x%x was registered with " 736 "transport", fcip_modinfo.ulp_type)); 737 rval = ENODEV; 738 } 739 #endif /* DEBUG */ 740 741 mutex_destroy(&fcip_global_mutex); 742 rw_destroy(&fcipstruplock); 743 cv_destroy(&fcip_global_cv); 744 ddi_soft_state_fini(&fcip_softp); 745 746 FCIP_TNF_UNLOAD(&modlinkage); 747 748 return (rval); 749 } 750 751 /* 752 * Info about this loadable module 753 */ 754 int 755 _info(struct modinfo *modinfop) 756 { 757 return (mod_info(&modlinkage, modinfop)); 758 } 759 760 /* 761 * The port attach callback is invoked by the port driver when a FCA 762 * port comes online and binds with the transport layer. The transport 763 * then callsback into all ULP modules registered with it. The Port attach 764 * call back will also provide the ULP module with the Port's WWN and S_ID 765 */ 766 /* ARGSUSED */ 767 static int 768 fcip_port_attach(opaque_t ulp_handle, fc_ulp_port_info_t *port_info, 769 fc_attach_cmd_t cmd, uint32_t sid) 770 { 771 int rval = FC_FAILURE; 772 int instance; 773 struct fcip *fptr; 774 fcip_port_info_t *fport = NULL; 775 fcip_port_info_t *cur_fport; 776 fc_portid_t src_id; 777 778 switch (cmd) { 779 case FC_CMD_ATTACH: { 780 la_wwn_t *ww_pn = NULL; 781 /* 782 * It was determined that, as per spec, the lower 48 bits of 783 * the port-WWN will always be unique. This will make the MAC 784 * address (i.e the lower 48 bits of the WWN), that IP/ARP 785 * depend on, unique too. Hence we should be able to remove the 786 * restriction of attaching to only one of the ports of 787 * multi port FCAs. 788 * 789 * Earlier, fcip used to attach only to qlc module and fail 790 * silently for attach failures resulting from unknown FCAs or 791 * unsupported FCA ports. Now, we'll do no such checks. 792 */ 793 ww_pn = &port_info->port_pwwn; 794 795 FCIP_TNF_PROBE_2((fcip_port_attach, "fcip io", /* CSTYLED */, 796 tnf_string, msg, "port id bits", 797 tnf_opaque, nport_id, ww_pn->w.nport_id)); 798 FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_NOTE, 799 "port id bits: 0x%x", ww_pn->w.nport_id)); 800 /* 801 * A port has come online 802 */ 803 mutex_enter(&fcip_global_mutex); 804 fcip_num_instances++; 805 fcip_num_attaching++; 806 807 if (fcip_port_head == NULL) { 808 /* OK to sleep here ? */ 809 fport = kmem_zalloc(sizeof (fcip_port_info_t), 810 KM_NOSLEEP); 811 if (fport == NULL) { 812 fcip_num_instances--; 813 fcip_num_attaching--; 814 ASSERT(fcip_num_attaching >= 0); 815 mutex_exit(&fcip_global_mutex); 816 rval = FC_FAILURE; 817 cmn_err(CE_WARN, "!fcip(%d): port attach " 818 "failed: alloc failed", 819 ddi_get_instance(port_info->port_dip)); 820 goto done; 821 } 822 fcip_port_head = fport; 823 } else { 824 /* 825 * traverse the port list and also check for 826 * duplicate port attaches - Nothing wrong in being 827 * paranoid Heh Heh. 828 */ 829 cur_fport = fcip_port_head; 830 while (cur_fport != NULL) { 831 if (cur_fport->fcipp_handle == 832 port_info->port_handle) { 833 fcip_num_instances--; 834 fcip_num_attaching--; 835 ASSERT(fcip_num_attaching >= 0); 836 mutex_exit(&fcip_global_mutex); 837 FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN, 838 "!fcip(%d): port already " 839 "attached!!", ddi_get_instance( 840 port_info->port_dip))); 841 rval = FC_FAILURE; 842 goto done; 843 } 844 cur_fport = cur_fport->fcipp_next; 845 } 846 fport = kmem_zalloc(sizeof (fcip_port_info_t), 847 KM_NOSLEEP); 848 if (fport == NULL) { 849 rval = FC_FAILURE; 850 fcip_num_instances--; 851 fcip_num_attaching--; 852 ASSERT(fcip_num_attaching >= 0); 853 mutex_exit(&fcip_global_mutex); 854 cmn_err(CE_WARN, "!fcip(%d): port attach " 855 "failed: alloc failed", 856 ddi_get_instance(port_info->port_dip)); 857 goto done; 858 } 859 fport->fcipp_next = fcip_port_head; 860 fcip_port_head = fport; 861 } 862 863 mutex_exit(&fcip_global_mutex); 864 865 /* 866 * now fill in the details about the port itself 867 */ 868 fport->fcipp_linkage = *port_info->port_linkage; 869 fport->fcipp_handle = port_info->port_handle; 870 fport->fcipp_dip = port_info->port_dip; 871 fport->fcipp_topology = port_info->port_flags; 872 fport->fcipp_pstate = port_info->port_state; 873 fport->fcipp_naa = port_info->port_pwwn.w.naa_id; 874 bcopy(&port_info->port_pwwn, &fport->fcipp_pwwn, 875 sizeof (la_wwn_t)); 876 bcopy(&port_info->port_nwwn, &fport->fcipp_nwwn, 877 sizeof (la_wwn_t)); 878 fport->fcipp_fca_pkt_size = port_info->port_fca_pkt_size; 879 fport->fcipp_cmd_dma_attr = *port_info->port_cmd_dma_attr; 880 fport->fcipp_resp_dma_attr = *port_info->port_resp_dma_attr; 881 fport->fcipp_fca_acc_attr = *port_info->port_acc_attr; 882 src_id.port_id = sid; 883 src_id.priv_lilp_posit = 0; 884 fport->fcipp_sid = src_id; 885 886 /* 887 * allocate soft state for this instance 888 */ 889 instance = ddi_get_instance(fport->fcipp_dip); 890 if (ddi_soft_state_zalloc(fcip_softp, 891 instance) != DDI_SUCCESS) { 892 rval = FC_FAILURE; 893 cmn_err(CE_WARN, "!fcip(%d): port attach failed: " 894 "soft state alloc failed", instance); 895 goto failure; 896 } 897 898 fptr = ddi_get_soft_state(fcip_softp, instance); 899 900 if (fptr == NULL) { 901 rval = FC_FAILURE; 902 cmn_err(CE_WARN, "!fcip(%d): port attach failed: " 903 "failure to get soft state", instance); 904 goto failure; 905 } 906 907 /* 908 * initialize all mutexes and locks required for this module 909 */ 910 mutex_init(&fptr->fcip_mutex, NULL, MUTEX_DRIVER, NULL); 911 mutex_init(&fptr->fcip_ub_mutex, NULL, MUTEX_DRIVER, NULL); 912 mutex_init(&fptr->fcip_rt_mutex, NULL, MUTEX_DRIVER, NULL); 913 mutex_init(&fptr->fcip_dest_mutex, NULL, MUTEX_DRIVER, NULL); 914 mutex_init(&fptr->fcip_sendup_mutex, NULL, MUTEX_DRIVER, NULL); 915 cv_init(&fptr->fcip_farp_cv, NULL, CV_DRIVER, NULL); 916 cv_init(&fptr->fcip_sendup_cv, NULL, CV_DRIVER, NULL); 917 cv_init(&fptr->fcip_ub_cv, NULL, CV_DRIVER, NULL); 918 919 mutex_enter(&fptr->fcip_mutex); 920 921 fptr->fcip_dip = fport->fcipp_dip; /* parent's dip */ 922 fptr->fcip_instance = instance; 923 fptr->fcip_ub_upstream = 0; 924 925 if (FC_PORT_STATE_MASK(port_info->port_state) == 926 FC_STATE_ONLINE) { 927 fptr->fcip_port_state = FCIP_PORT_ONLINE; 928 if (fptr->fcip_flags & FCIP_LINK_DOWN) { 929 fptr->fcip_flags &= ~FCIP_LINK_DOWN; 930 } 931 } else { 932 fptr->fcip_port_state = FCIP_PORT_OFFLINE; 933 } 934 935 fptr->fcip_flags |= FCIP_ATTACHING; 936 fptr->fcip_port_info = fport; 937 938 /* 939 * Extract our MAC addr from our port's WWN. The lower 48 940 * bits will be our MAC address 941 */ 942 wwn_to_ether(&fport->fcipp_nwwn, &fptr->fcip_macaddr); 943 944 fport->fcipp_fcip = fptr; 945 946 FCIP_DEBUG(FCIP_DEBUG_ATTACH, 947 (CE_NOTE, "fcipdest : 0x%lx, rtable : 0x%lx", 948 (long)(sizeof (fptr->fcip_dest)), 949 (long)(sizeof (fptr->fcip_rtable)))); 950 951 bzero(fptr->fcip_dest, sizeof (fptr->fcip_dest)); 952 bzero(fptr->fcip_rtable, sizeof (fptr->fcip_rtable)); 953 954 /* 955 * create a taskq to handle sundry jobs for the driver 956 * This way we can have jobs run in parallel 957 */ 958 fptr->fcip_tq = taskq_create("fcip_tasks", 959 FCIP_NUM_THREADS, MINCLSYSPRI, FCIP_MIN_TASKS, 960 FCIP_MAX_TASKS, TASKQ_PREPOPULATE); 961 962 mutex_exit(&fptr->fcip_mutex); 963 964 /* 965 * create a separate thread to handle all unsolicited 966 * callback handling. This is because unsolicited_callback 967 * can happen from an interrupt context and the upstream 968 * modules can put new messages right back in the same 969 * thread context. This usually works fine, but sometimes 970 * we may have to block to obtain the dest struct entries 971 * for some remote ports. 972 */ 973 mutex_enter(&fptr->fcip_sendup_mutex); 974 if (thread_create(NULL, DEFAULTSTKSZ, 975 (void (*)())fcip_sendup_thr, (caddr_t)fptr, 0, &p0, 976 TS_RUN, minclsyspri) == NULL) { 977 mutex_exit(&fptr->fcip_sendup_mutex); 978 cmn_err(CE_WARN, 979 "!unable to create fcip sendup thread for " 980 " instance: 0x%x", instance); 981 rval = FC_FAILURE; 982 goto done; 983 } 984 fptr->fcip_sendup_thr_initted = 1; 985 fptr->fcip_sendup_head = fptr->fcip_sendup_tail = NULL; 986 mutex_exit(&fptr->fcip_sendup_mutex); 987 988 989 /* Let the attach handler do the rest */ 990 if (fcip_port_attach_handler(fptr) != FC_SUCCESS) { 991 /* 992 * We have already cleaned up so return 993 */ 994 rval = FC_FAILURE; 995 cmn_err(CE_WARN, "!fcip(%d): port attach failed", 996 instance); 997 goto done; 998 } 999 1000 FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_CONT, 1001 "!fcip attach for port instance (0x%x) successful", 1002 instance)); 1003 1004 rval = FC_SUCCESS; 1005 goto done; 1006 } 1007 case FC_CMD_POWER_UP: 1008 /* FALLTHROUGH */ 1009 case FC_CMD_RESUME: 1010 mutex_enter(&fcip_global_mutex); 1011 fport = fcip_port_head; 1012 while (fport != NULL) { 1013 if (fport->fcipp_handle == port_info->port_handle) { 1014 break; 1015 } 1016 fport = fport->fcipp_next; 1017 } 1018 if (fport == NULL) { 1019 rval = FC_SUCCESS; 1020 mutex_exit(&fcip_global_mutex); 1021 goto done; 1022 } 1023 rval = fcip_handle_resume(fport, port_info, cmd); 1024 mutex_exit(&fcip_global_mutex); 1025 goto done; 1026 1027 default: 1028 FCIP_TNF_PROBE_2((fcip_port_attach, "fcip io", /* CSTYLED */, 1029 tnf_string, msg, "unknown command type", 1030 tnf_uint, cmd, cmd)); 1031 FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN, 1032 "unknown cmd type 0x%x in port_attach", cmd)); 1033 rval = FC_FAILURE; 1034 goto done; 1035 } 1036 1037 failure: 1038 if (fport) { 1039 mutex_enter(&fcip_global_mutex); 1040 fcip_num_attaching--; 1041 ASSERT(fcip_num_attaching >= 0); 1042 (void) fcip_softstate_free(fport); 1043 fcip_port_attach_pending = 0; 1044 mutex_exit(&fcip_global_mutex); 1045 } 1046 return (rval); 1047 1048 done: 1049 mutex_enter(&fcip_global_mutex); 1050 fcip_port_attach_pending = 0; 1051 mutex_exit(&fcip_global_mutex); 1052 return (rval); 1053 } 1054 1055 /* 1056 * fcip_port_attach_handler : Completes the port attach operation after 1057 * the ulp_port_attach routine has completed its ground work. The job 1058 * of this function among other things is to obtain and handle topology 1059 * specifics, initialize a port, setup broadcast address entries in 1060 * the fcip tables etc. This routine cleans up behind itself on failures. 1061 * Returns FC_SUCCESS or FC_FAILURE. 1062 */ 1063 static int 1064 fcip_port_attach_handler(struct fcip *fptr) 1065 { 1066 fcip_port_info_t *fport = fptr->fcip_port_info; 1067 int rval = FC_FAILURE; 1068 1069 ASSERT(fport != NULL); 1070 1071 mutex_enter(&fcip_global_mutex); 1072 1073 FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_NOTE, 1074 "fcip module dip: %p instance: %d", 1075 (void *)fcip_module_dip, ddi_get_instance(fptr->fcip_dip))); 1076 1077 if (fcip_module_dip == NULL) { 1078 clock_t fcip_lbolt; 1079 1080 fcip_lbolt = ddi_get_lbolt(); 1081 /* 1082 * we need to use the fcip devinfo for creating 1083 * the clone device node, but the fcip attach 1084 * (from its conf file entry claiming to be a 1085 * child of pseudo) may not have happened yet. 1086 * wait here for 10 seconds and fail port attach 1087 * if the fcip devinfo is not attached yet 1088 */ 1089 fcip_lbolt += drv_usectohz(FCIP_INIT_DELAY); 1090 1091 FCIP_DEBUG(FCIP_DEBUG_ATTACH, 1092 (CE_WARN, "cv_timedwait lbolt %lx", fcip_lbolt)); 1093 1094 (void) cv_timedwait(&fcip_global_cv, &fcip_global_mutex, 1095 fcip_lbolt); 1096 1097 if (fcip_module_dip == NULL) { 1098 mutex_exit(&fcip_global_mutex); 1099 1100 FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN, 1101 "fcip attach did not happen")); 1102 goto port_attach_cleanup; 1103 } 1104 } 1105 1106 if ((!fcip_minor_node_created) && 1107 fcip_is_supported_fc_topology(fport->fcipp_topology)) { 1108 /* 1109 * Checking for same topologies which are considered valid 1110 * by fcip_handle_topology(). Dont create a minor node if 1111 * nothing is hanging off the FC port. 1112 */ 1113 if (ddi_create_minor_node(fcip_module_dip, "fcip", S_IFCHR, 1114 ddi_get_instance(fptr->fcip_dip), DDI_PSEUDO, 1115 CLONE_DEV) == DDI_FAILURE) { 1116 mutex_exit(&fcip_global_mutex); 1117 FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN, 1118 "failed to create minor node for fcip(%d)", 1119 ddi_get_instance(fptr->fcip_dip))); 1120 goto port_attach_cleanup; 1121 } 1122 fcip_minor_node_created++; 1123 } 1124 mutex_exit(&fcip_global_mutex); 1125 1126 /* 1127 * initialize port for traffic 1128 */ 1129 if (fcip_init_port(fptr) != FC_SUCCESS) { 1130 /* fcip_init_port has already cleaned up its stuff */ 1131 1132 mutex_enter(&fcip_global_mutex); 1133 1134 if ((fcip_num_instances == 1) && 1135 (fcip_minor_node_created == 1)) { 1136 /* Remove minor node iff this is the last instance */ 1137 ddi_remove_minor_node(fcip_module_dip, NULL); 1138 } 1139 1140 mutex_exit(&fcip_global_mutex); 1141 1142 goto port_attach_cleanup; 1143 } 1144 1145 mutex_enter(&fptr->fcip_mutex); 1146 fptr->fcip_flags &= ~FCIP_ATTACHING; 1147 fptr->fcip_flags |= FCIP_INITED; 1148 fptr->fcip_timeout_ticks = 0; 1149 1150 /* 1151 * start the timeout threads 1152 */ 1153 fptr->fcip_timeout_id = timeout(fcip_timeout, fptr, 1154 drv_usectohz(1000000)); 1155 1156 mutex_exit(&fptr->fcip_mutex); 1157 mutex_enter(&fcip_global_mutex); 1158 fcip_num_attaching--; 1159 ASSERT(fcip_num_attaching >= 0); 1160 mutex_exit(&fcip_global_mutex); 1161 rval = FC_SUCCESS; 1162 return (rval); 1163 1164 port_attach_cleanup: 1165 mutex_enter(&fcip_global_mutex); 1166 (void) fcip_softstate_free(fport); 1167 fcip_num_attaching--; 1168 ASSERT(fcip_num_attaching >= 0); 1169 mutex_exit(&fcip_global_mutex); 1170 rval = FC_FAILURE; 1171 return (rval); 1172 } 1173 1174 1175 /* 1176 * Handler for DDI_RESUME operations. Port must be ready to restart IP 1177 * traffic on resume 1178 */ 1179 static int 1180 fcip_handle_resume(fcip_port_info_t *fport, fc_ulp_port_info_t *port_info, 1181 fc_attach_cmd_t cmd) 1182 { 1183 int rval = FC_SUCCESS; 1184 struct fcip *fptr = fport->fcipp_fcip; 1185 struct fcipstr *tslp; 1186 int index; 1187 1188 1189 ASSERT(fptr != NULL); 1190 1191 mutex_enter(&fptr->fcip_mutex); 1192 1193 if (cmd == FC_CMD_POWER_UP) { 1194 fptr->fcip_flags &= ~(FCIP_POWER_DOWN); 1195 if (fptr->fcip_flags & FCIP_SUSPENDED) { 1196 mutex_exit(&fptr->fcip_mutex); 1197 return (FC_SUCCESS); 1198 } 1199 } else if (cmd == FC_CMD_RESUME) { 1200 fptr->fcip_flags &= ~(FCIP_SUSPENDED); 1201 } else { 1202 mutex_exit(&fptr->fcip_mutex); 1203 return (FC_FAILURE); 1204 } 1205 1206 /* 1207 * set the current port state and topology 1208 */ 1209 fport->fcipp_topology = port_info->port_flags; 1210 fport->fcipp_pstate = port_info->port_state; 1211 1212 rw_enter(&fcipstruplock, RW_READER); 1213 for (tslp = fcipstrup; tslp; tslp = tslp->sl_nextp) { 1214 if (tslp->sl_fcip == fptr) { 1215 break; 1216 } 1217 } 1218 rw_exit(&fcipstruplock); 1219 1220 /* 1221 * No active streams on this port 1222 */ 1223 if (tslp == NULL) { 1224 rval = FC_SUCCESS; 1225 goto done; 1226 } 1227 1228 mutex_enter(&fptr->fcip_rt_mutex); 1229 for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) { 1230 struct fcip_routing_table *frp; 1231 1232 frp = fptr->fcip_rtable[index]; 1233 while (frp) { 1234 uint32_t did; 1235 /* 1236 * Mark the broadcast RTE available again. It 1237 * was marked SUSPENDED during SUSPEND. 1238 */ 1239 did = fcip_get_broadcast_did(fptr); 1240 if (frp->fcipr_d_id.port_id == did) { 1241 frp->fcipr_state = 0; 1242 index = FCIP_RT_HASH_ELEMS; 1243 break; 1244 } 1245 frp = frp->fcipr_next; 1246 } 1247 } 1248 mutex_exit(&fptr->fcip_rt_mutex); 1249 1250 /* 1251 * fcip_handle_topology will update the port entries in the 1252 * routing table. 1253 * fcip_handle_topology also takes care of resetting the 1254 * fcipr_state field in the routing table structure. The entries 1255 * were set to RT_INVALID during suspend. 1256 */ 1257 fcip_handle_topology(fptr); 1258 1259 done: 1260 /* 1261 * Restart the timeout thread 1262 */ 1263 fptr->fcip_timeout_id = timeout(fcip_timeout, fptr, 1264 drv_usectohz(1000000)); 1265 mutex_exit(&fptr->fcip_mutex); 1266 return (rval); 1267 } 1268 1269 1270 /* 1271 * Insert a destination port entry into the routing table for 1272 * this port 1273 */ 1274 static void 1275 fcip_rt_update(struct fcip *fptr, fc_portmap_t *devlist, uint32_t listlen) 1276 { 1277 struct fcip_routing_table *frp; 1278 fcip_port_info_t *fport = fptr->fcip_port_info; 1279 int hash_bucket, i; 1280 fc_portmap_t *pmap; 1281 char wwn_buf[20]; 1282 1283 FCIP_TNF_PROBE_2((fcip_rt_update, "fcip io", /* CSTYLED */, 1284 tnf_string, msg, "enter", 1285 tnf_int, listlen, listlen)); 1286 1287 ASSERT(!mutex_owned(&fptr->fcip_mutex)); 1288 mutex_enter(&fptr->fcip_rt_mutex); 1289 1290 for (i = 0; i < listlen; i++) { 1291 pmap = &(devlist[i]); 1292 1293 frp = fcip_lookup_rtable(fptr, &(pmap->map_pwwn), 1294 FCIP_COMPARE_PWWN); 1295 /* 1296 * If an entry for a port in the devlist exists in the 1297 * in the per port routing table, make sure the data 1298 * is current. We need to do this irrespective of the 1299 * underlying port topology. 1300 */ 1301 switch (pmap->map_type) { 1302 /* FALLTHROUGH */ 1303 case PORT_DEVICE_NOCHANGE: 1304 /* FALLTHROUGH */ 1305 case PORT_DEVICE_USER_LOGIN: 1306 /* FALLTHROUGH */ 1307 case PORT_DEVICE_CHANGED: 1308 /* FALLTHROUGH */ 1309 case PORT_DEVICE_NEW: 1310 if (frp == NULL) { 1311 goto add_new_entry; 1312 } else if (frp) { 1313 goto update_entry; 1314 } else { 1315 continue; 1316 } 1317 1318 case PORT_DEVICE_OLD: 1319 /* FALLTHROUGH */ 1320 case PORT_DEVICE_USER_LOGOUT: 1321 /* 1322 * Mark entry for removal from Routing Table if 1323 * one exists. Let the timeout thread actually 1324 * remove the entry after we've given up hopes 1325 * of the port ever showing up. 1326 */ 1327 if (frp) { 1328 uint32_t did; 1329 1330 /* 1331 * Mark the routing table as invalid to bail 1332 * the packets early that are in transit 1333 */ 1334 did = fptr->fcip_broadcast_did; 1335 if (frp->fcipr_d_id.port_id != did) { 1336 frp->fcipr_pd = NULL; 1337 frp->fcipr_state = FCIP_RT_INVALID; 1338 frp->fcipr_invalid_timeout = 1339 fptr->fcip_timeout_ticks + 1340 FCIP_RTE_TIMEOUT; 1341 } 1342 } 1343 continue; 1344 1345 default: 1346 FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN, 1347 "unknown map flags in rt_update")); 1348 continue; 1349 } 1350 add_new_entry: 1351 ASSERT(frp == NULL); 1352 hash_bucket = FCIP_RT_HASH(pmap->map_pwwn.raw_wwn); 1353 1354 ASSERT(hash_bucket < FCIP_RT_HASH_ELEMS); 1355 1356 FCIP_TNF_PROBE_2((fcip_rt_update, "cfip io", /* CSTYLED */, 1357 tnf_string, msg, 1358 "add new entry", 1359 tnf_int, hashbucket, hash_bucket)); 1360 1361 frp = (struct fcip_routing_table *) 1362 kmem_zalloc(sizeof (struct fcip_routing_table), KM_SLEEP); 1363 /* insert at beginning of hash bucket */ 1364 frp->fcipr_next = fptr->fcip_rtable[hash_bucket]; 1365 fptr->fcip_rtable[hash_bucket] = frp; 1366 fc_wwn_to_str(&pmap->map_pwwn, wwn_buf); 1367 FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_NOTE, 1368 "added entry for pwwn %s and d_id 0x%x", 1369 wwn_buf, pmap->map_did.port_id)); 1370 update_entry: 1371 bcopy((void *)&pmap->map_pwwn, 1372 (void *)&frp->fcipr_pwwn, sizeof (la_wwn_t)); 1373 bcopy((void *)&pmap->map_nwwn, (void *)&frp->fcipr_nwwn, 1374 sizeof (la_wwn_t)); 1375 frp->fcipr_d_id = pmap->map_did; 1376 frp->fcipr_state = pmap->map_state; 1377 frp->fcipr_pd = pmap->map_pd; 1378 1379 /* 1380 * If there is no pd for a destination port that is not 1381 * a broadcast entry, the port is pretty much unusable - so 1382 * mark the port for removal so we can try adding back the 1383 * entry again. 1384 */ 1385 if ((frp->fcipr_pd == NULL) && 1386 (frp->fcipr_d_id.port_id != fptr->fcip_broadcast_did)) { 1387 frp->fcipr_state = PORT_DEVICE_INVALID; 1388 frp->fcipr_invalid_timeout = fptr->fcip_timeout_ticks + 1389 (FCIP_RTE_TIMEOUT / 2); 1390 } 1391 frp->fcipr_fca_dev = 1392 fc_ulp_get_fca_device(fport->fcipp_handle, pmap->map_did); 1393 1394 /* 1395 * login to the remote port. Don't worry about 1396 * plogi failures for now 1397 */ 1398 if (pmap->map_pd != NULL) { 1399 (void) fcip_do_plogi(fptr, frp); 1400 } else if (FC_TOP_EXTERNAL(fport->fcipp_topology)) { 1401 fc_wwn_to_str(&frp->fcipr_pwwn, wwn_buf); 1402 FCIP_DEBUG(FCIP_DEBUG_MISC, (CE_NOTE, 1403 "logging into pwwn %s, d_id 0x%x", 1404 wwn_buf, frp->fcipr_d_id.port_id)); 1405 (void) fcip_do_plogi(fptr, frp); 1406 } 1407 1408 FCIP_TNF_BYTE_ARRAY(fcip_rt_update, "fcip io", "detail", 1409 "new wwn in rt", pwwn, 1410 &frp->fcipr_pwwn, sizeof (la_wwn_t)); 1411 } 1412 mutex_exit(&fptr->fcip_rt_mutex); 1413 } 1414 1415 1416 /* 1417 * return a matching routing table entry for a given fcip instance 1418 */ 1419 struct fcip_routing_table * 1420 fcip_lookup_rtable(struct fcip *fptr, la_wwn_t *wwn, int matchflag) 1421 { 1422 struct fcip_routing_table *frp = NULL; 1423 int hash_bucket; 1424 1425 1426 FCIP_TNF_PROBE_1((fcip_lookup_rtable, "fcip io", /* CSTYLED */, 1427 tnf_string, msg, "enter")); 1428 FCIP_TNF_BYTE_ARRAY(fcip_lookup_rtable, "fcip io", "detail", 1429 "rtable lookup for", wwn, 1430 &wwn->raw_wwn, sizeof (la_wwn_t)); 1431 FCIP_TNF_PROBE_2((fcip_lookup_rtable, "fcip io", /* CSTYLED */, 1432 tnf_string, msg, "match by", 1433 tnf_int, matchflag, matchflag)); 1434 1435 ASSERT(mutex_owned(&fptr->fcip_rt_mutex)); 1436 1437 hash_bucket = FCIP_RT_HASH(wwn->raw_wwn); 1438 frp = fptr->fcip_rtable[hash_bucket]; 1439 while (frp != NULL) { 1440 1441 FCIP_TNF_BYTE_ARRAY(fcip_lookup_rtable, "fcip io", "detail", 1442 "rtable entry", nwwn, 1443 &(frp->fcipr_nwwn.raw_wwn), sizeof (la_wwn_t)); 1444 1445 if (fcip_wwn_compare(&frp->fcipr_pwwn, wwn, matchflag) == 0) { 1446 break; 1447 } 1448 1449 frp = frp->fcipr_next; 1450 } 1451 FCIP_TNF_PROBE_2((fcip_lookup_rtable, "fcip io", /* CSTYLED */, 1452 tnf_string, msg, "lookup result", 1453 tnf_opaque, frp, frp)); 1454 return (frp); 1455 } 1456 1457 /* 1458 * Attach of fcip under pseudo. The actual setup of the interface 1459 * actually happens in fcip_port_attach on a callback from the 1460 * transport. The port_attach callback however can proceed only 1461 * after the devinfo for fcip has been created under pseudo 1462 */ 1463 static int 1464 fcip_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 1465 { 1466 switch ((int)cmd) { 1467 1468 case DDI_ATTACH: { 1469 ASSERT(fcip_module_dip == NULL); 1470 fcip_module_dip = dip; 1471 1472 /* 1473 * this call originates as a result of fcip's conf 1474 * file entry and will result in a fcip instance being 1475 * a child of pseudo. We should ensure here that the port 1476 * driver (fp) has been loaded and initted since we would 1477 * never get a port attach callback without fp being loaded. 1478 * If we are unable to succesfully load and initalize fp - 1479 * just fail this attach. 1480 */ 1481 mutex_enter(&fcip_global_mutex); 1482 1483 FCIP_DEBUG(FCIP_DEBUG_ATTACH, 1484 (CE_WARN, "global cv - signaling")); 1485 1486 cv_signal(&fcip_global_cv); 1487 1488 FCIP_DEBUG(FCIP_DEBUG_ATTACH, 1489 (CE_WARN, "global cv - signaled")); 1490 mutex_exit(&fcip_global_mutex); 1491 return (DDI_SUCCESS); 1492 } 1493 case DDI_RESUME: 1494 /* 1495 * Resume appears trickier 1496 */ 1497 return (DDI_SUCCESS); 1498 default: 1499 return (DDI_FAILURE); 1500 } 1501 } 1502 1503 1504 /* 1505 * The detach entry point to permit unloading fcip. We make sure 1506 * there are no active streams before we proceed with the detach 1507 */ 1508 /* ARGSUSED */ 1509 static int 1510 fcip_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1511 { 1512 struct fcip *fptr; 1513 fcip_port_info_t *fport; 1514 int detached; 1515 1516 switch (cmd) { 1517 case DDI_DETACH: { 1518 /* 1519 * If we got here, any active streams should have been 1520 * unplumbed but check anyway 1521 */ 1522 mutex_enter(&fcip_global_mutex); 1523 if (fcipstrup != NULL) { 1524 mutex_exit(&fcip_global_mutex); 1525 return (DDI_FAILURE); 1526 } 1527 1528 if (fcip_port_head != NULL) { 1529 /* 1530 * Check to see if we have unattached/unbound 1531 * ports. If all the ports are unattached/unbound go 1532 * ahead and unregister with the transport 1533 */ 1534 fport = fcip_port_head; 1535 while (fport != NULL) { 1536 fptr = fport->fcipp_fcip; 1537 if (fptr == NULL) { 1538 continue; 1539 } 1540 mutex_enter(&fptr->fcip_mutex); 1541 fptr->fcip_flags |= FCIP_DETACHING; 1542 if (fptr->fcip_ipq || 1543 fptr->fcip_flags & (FCIP_IN_TIMEOUT | 1544 FCIP_IN_CALLBACK | FCIP_ATTACHING | 1545 FCIP_SUSPENDED | FCIP_POWER_DOWN | 1546 FCIP_REG_INPROGRESS)) { 1547 FCIP_TNF_PROBE_1((fcip_detach, 1548 "fcip io", /* CSTYLED */, 1549 tnf_string, msg, 1550 "fcip instance busy")); 1551 1552 mutex_exit(&fptr->fcip_mutex); 1553 FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN, 1554 "fcip instance busy")); 1555 break; 1556 } 1557 /* 1558 * Check for any outstanding pkts. If yes 1559 * fail the detach 1560 */ 1561 mutex_enter(&fptr->fcip_dest_mutex); 1562 if (fcip_port_get_num_pkts(fptr) > 0) { 1563 mutex_exit(&fptr->fcip_dest_mutex); 1564 mutex_exit(&fptr->fcip_mutex); 1565 FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN, 1566 "fcip instance busy - pkts " 1567 "pending")); 1568 break; 1569 } 1570 mutex_exit(&fptr->fcip_dest_mutex); 1571 1572 mutex_enter(&fptr->fcip_rt_mutex); 1573 if (fcip_plogi_in_progress(fptr)) { 1574 mutex_exit(&fptr->fcip_rt_mutex); 1575 mutex_exit(&fptr->fcip_mutex); 1576 FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN, 1577 "fcip instance busy - plogi in " 1578 "progress")); 1579 break; 1580 } 1581 mutex_exit(&fptr->fcip_rt_mutex); 1582 1583 mutex_exit(&fptr->fcip_mutex); 1584 fport = fport->fcipp_next; 1585 } 1586 /* 1587 * if fport is non NULL - we have active ports 1588 */ 1589 if (fport != NULL) { 1590 /* 1591 * Remove the DETACHING flags on the ports 1592 */ 1593 fport = fcip_port_head; 1594 while (fport != NULL) { 1595 fptr = fport->fcipp_fcip; 1596 mutex_enter(&fptr->fcip_mutex); 1597 fptr->fcip_flags &= ~(FCIP_DETACHING); 1598 mutex_exit(&fptr->fcip_mutex); 1599 fport = fport->fcipp_next; 1600 } 1601 mutex_exit(&fcip_global_mutex); 1602 return (DDI_FAILURE); 1603 } 1604 } 1605 1606 /* 1607 * free up all softstate structures 1608 */ 1609 fport = fcip_port_head; 1610 while (fport != NULL) { 1611 detached = 1; 1612 1613 fptr = fport->fcipp_fcip; 1614 if (fptr) { 1615 mutex_enter(&fptr->fcip_mutex); 1616 /* 1617 * Check to see if somebody beat us to the 1618 * punch 1619 */ 1620 detached = fptr->fcip_flags & FCIP_DETACHED; 1621 fptr->fcip_flags &= ~(FCIP_DETACHING); 1622 fptr->fcip_flags |= FCIP_DETACHED; 1623 mutex_exit(&fptr->fcip_mutex); 1624 } 1625 1626 if (!detached) { 1627 fport = fcip_softstate_free(fport); 1628 } else { 1629 /* 1630 * If the port was marked as detached 1631 * but it was still in the list, that 1632 * means another thread has marked it 1633 * but we got in while it released the 1634 * fcip_global_mutex in softstate_free. 1635 * Given that, we're still safe to use 1636 * fport->fcipp_next to find out what 1637 * the next port on the list is. 1638 */ 1639 fport = fport->fcipp_next; 1640 } 1641 1642 FCIP_DEBUG(FCIP_DEBUG_DETACH, 1643 (CE_NOTE, "detaching port")); 1644 1645 FCIP_TNF_PROBE_1((fcip_detach, 1646 "fcip io", /* CSTYLED */, tnf_string, 1647 msg, "detaching port")); 1648 } 1649 1650 /* 1651 * If we haven't removed all the port structures, we 1652 * aren't yet ready to be detached. 1653 */ 1654 if (fcip_port_head != NULL) { 1655 mutex_exit(&fcip_global_mutex); 1656 return (DDI_FAILURE); 1657 } 1658 1659 fcip_num_instances = 0; 1660 mutex_exit(&fcip_global_mutex); 1661 fcip_module_dip = NULL; 1662 return (DDI_SUCCESS); 1663 } 1664 case DDI_SUSPEND: 1665 return (DDI_SUCCESS); 1666 default: 1667 return (DDI_FAILURE); 1668 } 1669 } 1670 1671 /* 1672 * The port_detach callback is called from the transport when a 1673 * FC port is being removed from the transport's control. This routine 1674 * provides fcip with an opportunity to cleanup all activities and 1675 * structures on the port marked for removal. 1676 */ 1677 /* ARGSUSED */ 1678 static int 1679 fcip_port_detach(opaque_t ulp_handle, fc_ulp_port_info_t *port_info, 1680 fc_detach_cmd_t cmd) 1681 { 1682 int rval = FC_FAILURE; 1683 fcip_port_info_t *fport; 1684 struct fcip *fptr; 1685 struct fcipstr *strp; 1686 1687 switch (cmd) { 1688 case FC_CMD_DETACH: { 1689 mutex_enter(&fcip_global_mutex); 1690 1691 if (fcip_port_head == NULL) { 1692 /* 1693 * we are all done but our fini has not been 1694 * called yet!! Let's hope we have no active 1695 * fcip instances here. - strange secnario but 1696 * no harm in having this return a success. 1697 */ 1698 fcip_check_remove_minor_node(); 1699 1700 mutex_exit(&fcip_global_mutex); 1701 return (FC_SUCCESS); 1702 } else { 1703 /* 1704 * traverse the port list 1705 */ 1706 fport = fcip_port_head; 1707 while (fport != NULL) { 1708 if (fport->fcipp_handle == 1709 port_info->port_handle) { 1710 fptr = fport->fcipp_fcip; 1711 1712 /* 1713 * Fail the port detach if there is 1714 * still an attached, bound stream on 1715 * this interface. 1716 */ 1717 1718 rw_enter(&fcipstruplock, RW_READER); 1719 1720 for (strp = fcipstrup; strp != NULL; 1721 strp = strp->sl_nextp) { 1722 if (strp->sl_fcip == fptr) { 1723 rw_exit(&fcipstruplock); 1724 mutex_exit( 1725 &fcip_global_mutex); 1726 return (FC_FAILURE); 1727 } 1728 } 1729 1730 rw_exit(&fcipstruplock); 1731 1732 /* 1733 * fail port detach if we are in 1734 * the middle of a deferred port attach 1735 * or if the port has outstanding pkts 1736 */ 1737 if (fptr != NULL) { 1738 mutex_enter(&fptr->fcip_mutex); 1739 if (fcip_check_port_busy 1740 (fptr) || 1741 (fptr->fcip_flags & 1742 FCIP_DETACHED)) { 1743 mutex_exit( 1744 &fptr->fcip_mutex); 1745 mutex_exit( 1746 &fcip_global_mutex); 1747 return (FC_FAILURE); 1748 } 1749 1750 fptr->fcip_flags |= 1751 FCIP_DETACHED; 1752 mutex_exit(&fptr->fcip_mutex); 1753 } 1754 (void) fcip_softstate_free(fport); 1755 1756 fcip_check_remove_minor_node(); 1757 mutex_exit(&fcip_global_mutex); 1758 return (FC_SUCCESS); 1759 } 1760 fport = fport->fcipp_next; 1761 } 1762 ASSERT(fport == NULL); 1763 } 1764 mutex_exit(&fcip_global_mutex); 1765 break; 1766 } 1767 case FC_CMD_POWER_DOWN: 1768 /* FALLTHROUGH */ 1769 case FC_CMD_SUSPEND: 1770 mutex_enter(&fcip_global_mutex); 1771 fport = fcip_port_head; 1772 while (fport != NULL) { 1773 if (fport->fcipp_handle == port_info->port_handle) { 1774 break; 1775 } 1776 fport = fport->fcipp_next; 1777 } 1778 if (fport == NULL) { 1779 mutex_exit(&fcip_global_mutex); 1780 break; 1781 } 1782 rval = fcip_handle_suspend(fport, cmd); 1783 mutex_exit(&fcip_global_mutex); 1784 break; 1785 default: 1786 FCIP_DEBUG(FCIP_DEBUG_DETACH, 1787 (CE_WARN, "unknown port detach command!!")); 1788 break; 1789 } 1790 return (rval); 1791 } 1792 1793 1794 /* 1795 * Returns 0 if the port is not busy, else returns non zero. 1796 */ 1797 static int 1798 fcip_check_port_busy(struct fcip *fptr) 1799 { 1800 int rval = 0, num_pkts = 0; 1801 1802 ASSERT(fptr != NULL); 1803 ASSERT(MUTEX_HELD(&fptr->fcip_mutex)); 1804 1805 mutex_enter(&fptr->fcip_dest_mutex); 1806 1807 if (fptr->fcip_flags & FCIP_PORT_BUSY || 1808 ((num_pkts = fcip_port_get_num_pkts(fptr)) > 0) || 1809 fptr->fcip_num_ipkts_pending) { 1810 rval = 1; 1811 FCIP_DEBUG(FCIP_DEBUG_DETACH, 1812 (CE_NOTE, "!fcip_check_port_busy: port is busy " 1813 "fcip_flags: 0x%x, num_pkts: 0x%x, ipkts_pending: 0x%lx!", 1814 fptr->fcip_flags, num_pkts, fptr->fcip_num_ipkts_pending)); 1815 } 1816 1817 mutex_exit(&fptr->fcip_dest_mutex); 1818 return (rval); 1819 } 1820 1821 /* 1822 * Helper routine to remove fcip's minor node 1823 * There is one minor node per system and it should be removed if there are no 1824 * other fcip instances (which has a 1:1 mapping for fp instances) present 1825 */ 1826 static void 1827 fcip_check_remove_minor_node(void) 1828 { 1829 ASSERT(MUTEX_HELD(&fcip_global_mutex)); 1830 1831 /* 1832 * If there are no more fcip (fp) instances, remove the 1833 * minor node for fcip. 1834 * Reset fcip_minor_node_created to invalidate it. 1835 */ 1836 if (fcip_num_instances == 0 && (fcip_module_dip != NULL)) { 1837 ddi_remove_minor_node(fcip_module_dip, NULL); 1838 fcip_minor_node_created = 0; 1839 } 1840 } 1841 1842 /* 1843 * This routine permits the suspend operation during a CPR/System 1844 * power management operation. The routine basically quiesces I/Os 1845 * on all active interfaces 1846 */ 1847 static int 1848 fcip_handle_suspend(fcip_port_info_t *fport, fc_detach_cmd_t cmd) 1849 { 1850 struct fcip *fptr = fport->fcipp_fcip; 1851 timeout_id_t tid; 1852 int index; 1853 int tryagain = 0; 1854 int count; 1855 struct fcipstr *tslp; 1856 1857 1858 ASSERT(fptr != NULL); 1859 mutex_enter(&fptr->fcip_mutex); 1860 1861 /* 1862 * Fail if we are in the middle of a callback. Don't use delay during 1863 * suspend since clock intrs are not available so busy wait 1864 */ 1865 count = 0; 1866 while (count++ < 15 && 1867 ((fptr->fcip_flags & FCIP_IN_CALLBACK) || 1868 (fptr->fcip_flags & FCIP_IN_TIMEOUT))) { 1869 mutex_exit(&fptr->fcip_mutex); 1870 drv_usecwait(1000000); 1871 mutex_enter(&fptr->fcip_mutex); 1872 } 1873 1874 if (fptr->fcip_flags & FCIP_IN_CALLBACK || 1875 fptr->fcip_flags & FCIP_IN_TIMEOUT) { 1876 mutex_exit(&fptr->fcip_mutex); 1877 return (FC_FAILURE); 1878 } 1879 1880 if (cmd == FC_CMD_POWER_DOWN) { 1881 if (fptr->fcip_flags & FCIP_SUSPENDED) { 1882 fptr->fcip_flags |= FCIP_POWER_DOWN; 1883 mutex_exit(&fptr->fcip_mutex); 1884 goto success; 1885 } else { 1886 fptr->fcip_flags |= FCIP_POWER_DOWN; 1887 } 1888 } else if (cmd == FC_CMD_SUSPEND) { 1889 fptr->fcip_flags |= FCIP_SUSPENDED; 1890 } else { 1891 mutex_exit(&fptr->fcip_mutex); 1892 return (FC_FAILURE); 1893 } 1894 1895 mutex_exit(&fptr->fcip_mutex); 1896 /* 1897 * If no streams are plumbed - its the easiest case - Just 1898 * bail out without having to do much 1899 */ 1900 1901 rw_enter(&fcipstruplock, RW_READER); 1902 for (tslp = fcipstrup; tslp; tslp = tslp->sl_nextp) { 1903 if (tslp->sl_fcip == fptr) { 1904 break; 1905 } 1906 } 1907 rw_exit(&fcipstruplock); 1908 1909 /* 1910 * No active streams on this port 1911 */ 1912 if (tslp == NULL) { 1913 goto success; 1914 } 1915 1916 /* 1917 * Walk through each Routing table structure and check if 1918 * the destination table has any outstanding commands. If yes 1919 * wait for the commands to drain. Since we go through each 1920 * routing table entry in succession, it may be wise to wait 1921 * only a few seconds for each entry. 1922 */ 1923 mutex_enter(&fptr->fcip_rt_mutex); 1924 while (!tryagain) { 1925 1926 tryagain = 0; 1927 for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) { 1928 struct fcip_routing_table *frp; 1929 struct fcip_dest *fdestp; 1930 la_wwn_t *pwwn; 1931 int hash_bucket; 1932 1933 frp = fptr->fcip_rtable[index]; 1934 while (frp) { 1935 /* 1936 * Mark the routing table as SUSPENDED. Even 1937 * mark the broadcast entry SUSPENDED to 1938 * prevent any ARP or other broadcasts. We 1939 * can reset the state of the broadcast 1940 * RTE when we resume. 1941 */ 1942 frp->fcipr_state = FCIP_RT_SUSPENDED; 1943 pwwn = &frp->fcipr_pwwn; 1944 1945 /* 1946 * Get hold of destination pointer 1947 */ 1948 mutex_enter(&fptr->fcip_dest_mutex); 1949 1950 hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn); 1951 ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS); 1952 1953 fdestp = fptr->fcip_dest[hash_bucket]; 1954 while (fdestp != NULL) { 1955 mutex_enter(&fdestp->fcipd_mutex); 1956 if (fdestp->fcipd_rtable) { 1957 if (fcip_wwn_compare(pwwn, 1958 &fdestp->fcipd_pwwn, 1959 FCIP_COMPARE_PWWN) == 0) { 1960 mutex_exit( 1961 &fdestp->fcipd_mutex); 1962 break; 1963 } 1964 } 1965 mutex_exit(&fdestp->fcipd_mutex); 1966 fdestp = fdestp->fcipd_next; 1967 } 1968 1969 mutex_exit(&fptr->fcip_dest_mutex); 1970 if (fdestp == NULL) { 1971 frp = frp->fcipr_next; 1972 continue; 1973 } 1974 1975 /* 1976 * Wait for fcip_wait_cmds seconds for 1977 * the commands to drain. 1978 */ 1979 count = 0; 1980 mutex_enter(&fdestp->fcipd_mutex); 1981 while (fdestp->fcipd_ncmds && 1982 count < fcip_wait_cmds) { 1983 mutex_exit(&fdestp->fcipd_mutex); 1984 mutex_exit(&fptr->fcip_rt_mutex); 1985 drv_usecwait(1000000); 1986 mutex_enter(&fptr->fcip_rt_mutex); 1987 mutex_enter(&fdestp->fcipd_mutex); 1988 count++; 1989 } 1990 /* 1991 * Check if we were able to drain all cmds 1992 * successfully. Else continue with other 1993 * ports and try during the second pass 1994 */ 1995 if (fdestp->fcipd_ncmds) { 1996 tryagain++; 1997 } 1998 mutex_exit(&fdestp->fcipd_mutex); 1999 2000 frp = frp->fcipr_next; 2001 } 2002 } 2003 if (tryagain == 0) { 2004 break; 2005 } 2006 } 2007 mutex_exit(&fptr->fcip_rt_mutex); 2008 2009 if (tryagain) { 2010 mutex_enter(&fptr->fcip_mutex); 2011 fptr->fcip_flags &= ~(FCIP_SUSPENDED | FCIP_POWER_DOWN); 2012 mutex_exit(&fptr->fcip_mutex); 2013 return (FC_FAILURE); 2014 } 2015 2016 success: 2017 mutex_enter(&fptr->fcip_mutex); 2018 tid = fptr->fcip_timeout_id; 2019 fptr->fcip_timeout_id = NULL; 2020 mutex_exit(&fptr->fcip_mutex); 2021 2022 (void) untimeout(tid); 2023 2024 return (FC_SUCCESS); 2025 } 2026 2027 /* 2028 * the getinfo(9E) entry point 2029 */ 2030 /* ARGSUSED */ 2031 static int 2032 fcip_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 2033 { 2034 int rval = DDI_FAILURE; 2035 2036 switch (cmd) { 2037 case DDI_INFO_DEVT2DEVINFO: 2038 *result = fcip_module_dip; 2039 if (*result) 2040 rval = DDI_SUCCESS; 2041 break; 2042 2043 case DDI_INFO_DEVT2INSTANCE: 2044 *result = (void *)0; 2045 rval = DDI_SUCCESS; 2046 break; 2047 default: 2048 break; 2049 } 2050 2051 return (rval); 2052 } 2053 2054 /* 2055 * called from fcip_attach to initialize kstats for the link 2056 */ 2057 /* ARGSUSED */ 2058 static void 2059 fcip_kstat_init(struct fcip *fptr) 2060 { 2061 int instance; 2062 char buf[16]; 2063 struct fcipstat *fcipstatp; 2064 2065 ASSERT(mutex_owned(&fptr->fcip_mutex)); 2066 2067 instance = ddi_get_instance(fptr->fcip_dip); 2068 (void) sprintf(buf, "fcip%d", instance); 2069 2070 #ifdef kstat 2071 fptr->fcip_kstatp = kstat_create("fcip", instance, buf, "net", 2072 KSTAT_TYPE_NAMED, 2073 (sizeof (struct fcipstat)/ sizeof (kstat_named_t)), 2074 KSTAT_FLAG_PERSISTENT); 2075 #else 2076 fptr->fcip_kstatp = kstat_create("fcip", instance, buf, "net", 2077 KSTAT_TYPE_NAMED, 2078 (sizeof (struct fcipstat)/ sizeof (kstat_named_t)), 0); 2079 #endif 2080 if (fptr->fcip_kstatp == NULL) { 2081 FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN, "kstat created failed")); 2082 return; 2083 } 2084 2085 fcipstatp = (struct fcipstat *)fptr->fcip_kstatp->ks_data; 2086 kstat_named_init(&fcipstatp->fcips_ipackets, "ipackets", 2087 KSTAT_DATA_ULONG); 2088 kstat_named_init(&fcipstatp->fcips_ierrors, "ierrors", 2089 KSTAT_DATA_ULONG); 2090 kstat_named_init(&fcipstatp->fcips_opackets, "opackets", 2091 KSTAT_DATA_ULONG); 2092 kstat_named_init(&fcipstatp->fcips_oerrors, "oerrors", 2093 KSTAT_DATA_ULONG); 2094 kstat_named_init(&fcipstatp->fcips_collisions, "collisions", 2095 KSTAT_DATA_ULONG); 2096 kstat_named_init(&fcipstatp->fcips_nocanput, "nocanput", 2097 KSTAT_DATA_ULONG); 2098 kstat_named_init(&fcipstatp->fcips_allocbfail, "allocbfail", 2099 KSTAT_DATA_ULONG); 2100 2101 kstat_named_init(&fcipstatp->fcips_defer, "defer", 2102 KSTAT_DATA_ULONG); 2103 kstat_named_init(&fcipstatp->fcips_fram, "fram", 2104 KSTAT_DATA_ULONG); 2105 kstat_named_init(&fcipstatp->fcips_crc, "crc", 2106 KSTAT_DATA_ULONG); 2107 kstat_named_init(&fcipstatp->fcips_oflo, "oflo", 2108 KSTAT_DATA_ULONG); 2109 kstat_named_init(&fcipstatp->fcips_uflo, "uflo", 2110 KSTAT_DATA_ULONG); 2111 kstat_named_init(&fcipstatp->fcips_missed, "missed", 2112 KSTAT_DATA_ULONG); 2113 kstat_named_init(&fcipstatp->fcips_tlcol, "tlcol", 2114 KSTAT_DATA_ULONG); 2115 kstat_named_init(&fcipstatp->fcips_trtry, "trtry", 2116 KSTAT_DATA_ULONG); 2117 kstat_named_init(&fcipstatp->fcips_tnocar, "tnocar", 2118 KSTAT_DATA_ULONG); 2119 kstat_named_init(&fcipstatp->fcips_inits, "inits", 2120 KSTAT_DATA_ULONG); 2121 kstat_named_init(&fcipstatp->fcips_notbufs, "notbufs", 2122 KSTAT_DATA_ULONG); 2123 kstat_named_init(&fcipstatp->fcips_norbufs, "norbufs", 2124 KSTAT_DATA_ULONG); 2125 kstat_named_init(&fcipstatp->fcips_allocbfail, "allocbfail", 2126 KSTAT_DATA_ULONG); 2127 2128 /* 2129 * required by kstat for MIB II objects(RFC 1213) 2130 */ 2131 kstat_named_init(&fcipstatp->fcips_rcvbytes, "fcips_rcvbytes", 2132 KSTAT_DATA_ULONG); /* # octets received */ 2133 /* MIB - ifInOctets */ 2134 kstat_named_init(&fcipstatp->fcips_xmtbytes, "fcips_xmtbytes", 2135 KSTAT_DATA_ULONG); /* # octets xmitted */ 2136 /* MIB - ifOutOctets */ 2137 kstat_named_init(&fcipstatp->fcips_multircv, "fcips_multircv", 2138 KSTAT_DATA_ULONG); /* # multicast packets */ 2139 /* delivered to upper layer */ 2140 /* MIB - ifInNUcastPkts */ 2141 kstat_named_init(&fcipstatp->fcips_multixmt, "fcips_multixmt", 2142 KSTAT_DATA_ULONG); /* # multicast packets */ 2143 /* requested to be sent */ 2144 /* MIB - ifOutNUcastPkts */ 2145 kstat_named_init(&fcipstatp->fcips_brdcstrcv, "fcips_brdcstrcv", 2146 KSTAT_DATA_ULONG); /* # broadcast packets */ 2147 /* delivered to upper layer */ 2148 /* MIB - ifInNUcastPkts */ 2149 kstat_named_init(&fcipstatp->fcips_brdcstxmt, "fcips_brdcstxmt", 2150 KSTAT_DATA_ULONG); /* # broadcast packets */ 2151 /* requested to be sent */ 2152 /* MIB - ifOutNUcastPkts */ 2153 kstat_named_init(&fcipstatp->fcips_norcvbuf, "fcips_norcvbuf", 2154 KSTAT_DATA_ULONG); /* # rcv packets discarded */ 2155 /* MIB - ifInDiscards */ 2156 kstat_named_init(&fcipstatp->fcips_noxmtbuf, "fcips_noxmtbuf", 2157 KSTAT_DATA_ULONG); /* # xmt packets discarded */ 2158 2159 fptr->fcip_kstatp->ks_update = fcip_stat_update; 2160 fptr->fcip_kstatp->ks_private = (void *) fptr; 2161 kstat_install(fptr->fcip_kstatp); 2162 } 2163 2164 /* 2165 * Update the defined kstats for netstat et al to use 2166 */ 2167 /* ARGSUSED */ 2168 static int 2169 fcip_stat_update(kstat_t *fcip_statp, int val) 2170 { 2171 struct fcipstat *fcipstatp; 2172 struct fcip *fptr; 2173 2174 fptr = (struct fcip *)fcip_statp->ks_private; 2175 fcipstatp = (struct fcipstat *)fcip_statp->ks_data; 2176 2177 if (val == KSTAT_WRITE) { 2178 fptr->fcip_ipackets = fcipstatp->fcips_ipackets.value.ul; 2179 fptr->fcip_ierrors = fcipstatp->fcips_ierrors.value.ul; 2180 fptr->fcip_opackets = fcipstatp->fcips_opackets.value.ul; 2181 fptr->fcip_oerrors = fcipstatp->fcips_oerrors.value.ul; 2182 fptr->fcip_collisions = fcipstatp->fcips_collisions.value.ul; 2183 fptr->fcip_defer = fcipstatp->fcips_defer.value.ul; 2184 fptr->fcip_fram = fcipstatp->fcips_fram.value.ul; 2185 fptr->fcip_crc = fcipstatp->fcips_crc.value.ul; 2186 fptr->fcip_oflo = fcipstatp->fcips_oflo.value.ul; 2187 fptr->fcip_uflo = fcipstatp->fcips_uflo.value.ul; 2188 fptr->fcip_missed = fcipstatp->fcips_missed.value.ul; 2189 fptr->fcip_tlcol = fcipstatp->fcips_tlcol.value.ul; 2190 fptr->fcip_trtry = fcipstatp->fcips_trtry.value.ul; 2191 fptr->fcip_tnocar = fcipstatp->fcips_tnocar.value.ul; 2192 fptr->fcip_inits = fcipstatp->fcips_inits.value.ul; 2193 fptr->fcip_notbufs = fcipstatp->fcips_notbufs.value.ul; 2194 fptr->fcip_norbufs = fcipstatp->fcips_norbufs.value.ul; 2195 fptr->fcip_nocanput = fcipstatp->fcips_nocanput.value.ul; 2196 fptr->fcip_allocbfail = fcipstatp->fcips_allocbfail.value.ul; 2197 fptr->fcip_rcvbytes = fcipstatp->fcips_rcvbytes.value.ul; 2198 fptr->fcip_xmtbytes = fcipstatp->fcips_xmtbytes.value.ul; 2199 fptr->fcip_multircv = fcipstatp->fcips_multircv.value.ul; 2200 fptr->fcip_multixmt = fcipstatp->fcips_multixmt.value.ul; 2201 fptr->fcip_brdcstrcv = fcipstatp->fcips_brdcstrcv.value.ul; 2202 fptr->fcip_norcvbuf = fcipstatp->fcips_norcvbuf.value.ul; 2203 fptr->fcip_noxmtbuf = fcipstatp->fcips_noxmtbuf.value.ul; 2204 fptr->fcip_allocbfail = fcipstatp->fcips_allocbfail.value.ul; 2205 fptr->fcip_allocbfail = fcipstatp->fcips_allocbfail.value.ul; 2206 fptr->fcip_allocbfail = fcipstatp->fcips_allocbfail.value.ul; 2207 fptr->fcip_allocbfail = fcipstatp->fcips_allocbfail.value.ul; 2208 fptr->fcip_allocbfail = fcipstatp->fcips_allocbfail.value.ul; 2209 fptr->fcip_allocbfail = fcipstatp->fcips_allocbfail.value.ul; 2210 fptr->fcip_allocbfail = fcipstatp->fcips_allocbfail.value.ul; 2211 fptr->fcip_allocbfail = fcipstatp->fcips_allocbfail.value.ul; 2212 2213 } else { 2214 fcipstatp->fcips_ipackets.value.ul = fptr->fcip_ipackets; 2215 fcipstatp->fcips_ierrors.value.ul = fptr->fcip_ierrors; 2216 fcipstatp->fcips_opackets.value.ul = fptr->fcip_opackets; 2217 fcipstatp->fcips_oerrors.value.ul = fptr->fcip_oerrors; 2218 fcipstatp->fcips_collisions.value.ul = fptr->fcip_collisions; 2219 fcipstatp->fcips_nocanput.value.ul = fptr->fcip_nocanput; 2220 fcipstatp->fcips_allocbfail.value.ul = fptr->fcip_allocbfail; 2221 fcipstatp->fcips_defer.value.ul = fptr->fcip_defer; 2222 fcipstatp->fcips_fram.value.ul = fptr->fcip_fram; 2223 fcipstatp->fcips_crc.value.ul = fptr->fcip_crc; 2224 fcipstatp->fcips_oflo.value.ul = fptr->fcip_oflo; 2225 fcipstatp->fcips_uflo.value.ul = fptr->fcip_uflo; 2226 fcipstatp->fcips_missed.value.ul = fptr->fcip_missed; 2227 fcipstatp->fcips_tlcol.value.ul = fptr->fcip_tlcol; 2228 fcipstatp->fcips_trtry.value.ul = fptr->fcip_trtry; 2229 fcipstatp->fcips_tnocar.value.ul = fptr->fcip_tnocar; 2230 fcipstatp->fcips_inits.value.ul = fptr->fcip_inits; 2231 fcipstatp->fcips_norbufs.value.ul = fptr->fcip_norbufs; 2232 fcipstatp->fcips_notbufs.value.ul = fptr->fcip_notbufs; 2233 fcipstatp->fcips_rcvbytes.value.ul = fptr->fcip_rcvbytes; 2234 fcipstatp->fcips_xmtbytes.value.ul = fptr->fcip_xmtbytes; 2235 fcipstatp->fcips_multircv.value.ul = fptr->fcip_multircv; 2236 fcipstatp->fcips_multixmt.value.ul = fptr->fcip_multixmt; 2237 fcipstatp->fcips_brdcstrcv.value.ul = fptr->fcip_brdcstrcv; 2238 fcipstatp->fcips_brdcstxmt.value.ul = fptr->fcip_brdcstxmt; 2239 fcipstatp->fcips_norcvbuf.value.ul = fptr->fcip_norcvbuf; 2240 fcipstatp->fcips_noxmtbuf.value.ul = fptr->fcip_noxmtbuf; 2241 2242 } 2243 return (0); 2244 } 2245 2246 2247 /* 2248 * fcip_statec_cb: handles all required state change callback notifications 2249 * it receives from the transport 2250 */ 2251 /* ARGSUSED */ 2252 static void 2253 fcip_statec_cb(opaque_t ulp_handle, opaque_t phandle, 2254 uint32_t port_state, uint32_t port_top, fc_portmap_t changelist[], 2255 uint32_t listlen, uint32_t sid) 2256 { 2257 fcip_port_info_t *fport; 2258 struct fcip *fptr; 2259 struct fcipstr *slp; 2260 queue_t *wrq; 2261 int instance; 2262 int index; 2263 struct fcip_routing_table *frtp; 2264 2265 fport = fcip_get_port(phandle); 2266 2267 if (fport == NULL) { 2268 return; 2269 } 2270 2271 fptr = fport->fcipp_fcip; 2272 ASSERT(fptr != NULL); 2273 2274 if (fptr == NULL) { 2275 return; 2276 } 2277 2278 instance = ddi_get_instance(fport->fcipp_dip); 2279 2280 FCIP_TNF_PROBE_4((fcip_statec_cb, "fcip io", /* CSTYLED */, 2281 tnf_string, msg, "state change callback", 2282 tnf_uint, instance, instance, 2283 tnf_uint, S_ID, sid, 2284 tnf_int, count, listlen)); 2285 FCIP_DEBUG(FCIP_DEBUG_ELS, 2286 (CE_NOTE, "fcip%d, state change callback: state:0x%x, " 2287 "S_ID:0x%x, count:0x%x", instance, port_state, sid, listlen)); 2288 2289 mutex_enter(&fptr->fcip_mutex); 2290 2291 if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) || 2292 (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) { 2293 mutex_exit(&fptr->fcip_mutex); 2294 return; 2295 } 2296 2297 /* 2298 * set fcip flags to indicate we are in the middle of a 2299 * state change callback so we can wait till the statechange 2300 * is handled before succeeding/failing the SUSPEND/POWER DOWN. 2301 */ 2302 fptr->fcip_flags |= FCIP_IN_SC_CB; 2303 2304 fport->fcipp_pstate = port_state; 2305 2306 /* 2307 * Check if topology changed. If Yes - Modify the broadcast 2308 * RTE entries to understand the new broadcast D_IDs 2309 */ 2310 if (fport->fcipp_topology != port_top && 2311 (port_top != FC_TOP_UNKNOWN)) { 2312 /* REMOVE later */ 2313 FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE, 2314 "topology changed: Old topology: 0x%x New topology 0x%x", 2315 fport->fcipp_topology, port_top)); 2316 /* 2317 * If topology changed - attempt a rediscovery of 2318 * devices. Helps specially in Fabric/Public loops 2319 * and if on_demand_node_creation is disabled 2320 */ 2321 fport->fcipp_topology = port_top; 2322 fcip_handle_topology(fptr); 2323 } 2324 2325 mutex_exit(&fptr->fcip_mutex); 2326 2327 switch (FC_PORT_STATE_MASK(port_state)) { 2328 case FC_STATE_ONLINE: 2329 /* FALLTHROUGH */ 2330 case FC_STATE_LIP: 2331 /* FALLTHROUGH */ 2332 case FC_STATE_LIP_LBIT_SET: 2333 2334 /* 2335 * nothing to do here actually other than if we 2336 * were actually logged onto a port in the devlist 2337 * (which indicates active communication between 2338 * the host port and the port in the changelist). 2339 * If however we are in a private loop or point to 2340 * point mode, we need to check for any IP capable 2341 * ports and update our routing table. 2342 */ 2343 switch (port_top) { 2344 case FC_TOP_FABRIC: 2345 /* 2346 * This indicates a fabric port with a NameServer. 2347 * Check the devlist to see if we are in active 2348 * communication with a port on the devlist. 2349 */ 2350 FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE, 2351 "Statec_cb: fabric topology")); 2352 fcip_rt_update(fptr, changelist, listlen); 2353 break; 2354 case FC_TOP_NO_NS: 2355 /* 2356 * No nameserver - so treat it like a Private loop 2357 * or point to point topology and get a map of 2358 * devices on the link and get IP capable ports to 2359 * to update the routing table. 2360 */ 2361 FCIP_DEBUG(FCIP_DEBUG_ELS, 2362 (CE_NOTE, "Statec_cb: NO_NS topology")); 2363 /* FALLTHROUGH */ 2364 case FC_TOP_PRIVATE_LOOP: 2365 FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE, 2366 "Statec_cb: Pvt_Loop topology")); 2367 /* FALLTHROUGH */ 2368 case FC_TOP_PT_PT: 2369 /* 2370 * call get_port_map() and update routing table 2371 */ 2372 fcip_rt_update(fptr, changelist, listlen); 2373 break; 2374 default: 2375 FCIP_DEBUG(FCIP_DEBUG_ELS, 2376 (CE_NOTE, "Statec_cb: Unknown topology")); 2377 } 2378 2379 /* 2380 * We should now enable the Queues and permit I/Os 2381 * to flow through downstream. The update of routing 2382 * table should have flushed out any port entries that 2383 * don't exist or are not available after the state change 2384 */ 2385 mutex_enter(&fptr->fcip_mutex); 2386 fptr->fcip_port_state = FCIP_PORT_ONLINE; 2387 if (fptr->fcip_flags & FCIP_LINK_DOWN) { 2388 fptr->fcip_flags &= ~FCIP_LINK_DOWN; 2389 } 2390 mutex_exit(&fptr->fcip_mutex); 2391 2392 /* 2393 * Enable write queues 2394 */ 2395 rw_enter(&fcipstruplock, RW_READER); 2396 for (slp = fcipstrup; slp != NULL; slp = slp->sl_nextp) { 2397 if (slp && slp->sl_fcip == fptr) { 2398 wrq = WR(slp->sl_rq); 2399 if (wrq->q_flag & QFULL) { 2400 qenable(wrq); 2401 } 2402 } 2403 } 2404 rw_exit(&fcipstruplock); 2405 break; 2406 case FC_STATE_OFFLINE: 2407 /* 2408 * mark the port_state OFFLINE and wait for it to 2409 * become online. Any new messages in this state will 2410 * simply be queued back up. If the port does not 2411 * come online in a short while, we can begin failing 2412 * messages and flush the routing table 2413 */ 2414 mutex_enter(&fptr->fcip_mutex); 2415 fptr->fcip_mark_offline = fptr->fcip_timeout_ticks + 2416 FCIP_OFFLINE_TIMEOUT; 2417 fptr->fcip_port_state = FCIP_PORT_OFFLINE; 2418 mutex_exit(&fptr->fcip_mutex); 2419 2420 /* 2421 * Mark all Routing table entries as invalid to prevent 2422 * any commands from trickling through to ports that 2423 * have disappeared from under us 2424 */ 2425 mutex_enter(&fptr->fcip_rt_mutex); 2426 for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) { 2427 frtp = fptr->fcip_rtable[index]; 2428 while (frtp) { 2429 frtp->fcipr_state = PORT_DEVICE_INVALID; 2430 frtp = frtp->fcipr_next; 2431 } 2432 } 2433 mutex_exit(&fptr->fcip_rt_mutex); 2434 2435 break; 2436 2437 case FC_STATE_RESET_REQUESTED: 2438 /* 2439 * Release all Unsolicited buffers back to transport/FCA. 2440 * This also means the port state is marked offline - so 2441 * we may have to do what OFFLINE state requires us to do. 2442 * Care must be taken to wait for any active unsolicited 2443 * buffer with the other Streams modules - so wait for 2444 * a freeb if the unsolicited buffer is passed back all 2445 * the way upstream. 2446 */ 2447 mutex_enter(&fptr->fcip_mutex); 2448 2449 #ifdef FCIP_ESBALLOC 2450 while (fptr->fcip_ub_upstream) { 2451 cv_wait(&fptr->fcip_ub_cv, &fptr->fcip_mutex); 2452 } 2453 #endif /* FCIP_ESBALLOC */ 2454 2455 fptr->fcip_mark_offline = fptr->fcip_timeout_ticks + 2456 FCIP_OFFLINE_TIMEOUT; 2457 fptr->fcip_port_state = FCIP_PORT_OFFLINE; 2458 mutex_exit(&fptr->fcip_mutex); 2459 break; 2460 2461 case FC_STATE_DEVICE_CHANGE: 2462 if (listlen) { 2463 fcip_rt_update(fptr, changelist, listlen); 2464 } 2465 break; 2466 case FC_STATE_RESET: 2467 /* 2468 * Not much to do I guess - wait for port to become 2469 * ONLINE. If the port doesn't become online in a short 2470 * while, the upper layers abort any request themselves. 2471 * We can just putback the messages in the streams queues 2472 * if the link is offline 2473 */ 2474 break; 2475 } 2476 mutex_enter(&fptr->fcip_mutex); 2477 fptr->fcip_flags &= ~(FCIP_IN_SC_CB); 2478 mutex_exit(&fptr->fcip_mutex); 2479 } 2480 2481 /* 2482 * Given a port handle, return the fcip_port_info structure corresponding 2483 * to that port handle. The transport allocates and communicates with 2484 * ULPs using port handles 2485 */ 2486 static fcip_port_info_t * 2487 fcip_get_port(opaque_t phandle) 2488 { 2489 fcip_port_info_t *fport; 2490 2491 ASSERT(phandle != NULL); 2492 2493 mutex_enter(&fcip_global_mutex); 2494 fport = fcip_port_head; 2495 2496 while (fport != NULL) { 2497 if (fport->fcipp_handle == phandle) { 2498 /* found */ 2499 break; 2500 } 2501 fport = fport->fcipp_next; 2502 } 2503 2504 mutex_exit(&fcip_global_mutex); 2505 2506 return (fport); 2507 } 2508 2509 /* 2510 * Handle inbound ELS requests received by the transport. We are only 2511 * intereseted in FARP/InARP mostly. 2512 */ 2513 /* ARGSUSED */ 2514 static int 2515 fcip_els_cb(opaque_t ulp_handle, opaque_t phandle, 2516 fc_unsol_buf_t *buf, uint32_t claimed) 2517 { 2518 fcip_port_info_t *fport; 2519 struct fcip *fptr; 2520 int instance; 2521 uchar_t r_ctl; 2522 uchar_t ls_code; 2523 la_els_farp_t farp_cmd; 2524 la_els_farp_t *fcmd; 2525 int rval = FC_UNCLAIMED; 2526 2527 fport = fcip_get_port(phandle); 2528 if (fport == NULL) { 2529 return (FC_UNCLAIMED); 2530 } 2531 2532 fptr = fport->fcipp_fcip; 2533 ASSERT(fptr != NULL); 2534 if (fptr == NULL) { 2535 return (FC_UNCLAIMED); 2536 } 2537 2538 instance = ddi_get_instance(fport->fcipp_dip); 2539 2540 mutex_enter(&fptr->fcip_mutex); 2541 if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) || 2542 (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) { 2543 mutex_exit(&fptr->fcip_mutex); 2544 return (FC_UNCLAIMED); 2545 } 2546 2547 /* 2548 * set fcip flags to indicate we are in the middle of a 2549 * ELS callback so we can wait till the statechange 2550 * is handled before succeeding/failing the SUSPEND/POWER DOWN. 2551 */ 2552 fptr->fcip_flags |= FCIP_IN_ELS_CB; 2553 mutex_exit(&fptr->fcip_mutex); 2554 2555 FCIP_TNF_PROBE_2((fcip_els_cb, "fcip io", /* CSTYLED */, 2556 tnf_string, msg, "ELS callback", 2557 tnf_uint, instance, instance)); 2558 2559 FCIP_DEBUG(FCIP_DEBUG_ELS, 2560 (CE_NOTE, "fcip%d, ELS callback , ", instance)); 2561 2562 r_ctl = buf->ub_frame.r_ctl; 2563 switch (r_ctl & R_CTL_ROUTING) { 2564 case R_CTL_EXTENDED_SVC: 2565 if (r_ctl == R_CTL_ELS_REQ) { 2566 ls_code = buf->ub_buffer[0]; 2567 if (ls_code == LA_ELS_FARP_REQ) { 2568 /* 2569 * Inbound FARP broadcast request 2570 */ 2571 if (buf->ub_bufsize != sizeof (la_els_farp_t)) { 2572 FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN, 2573 "Invalid FARP req buffer size " 2574 "expected 0x%lx, got 0x%x", 2575 (long)(sizeof (la_els_farp_t)), 2576 buf->ub_bufsize)); 2577 rval = FC_UNCLAIMED; 2578 goto els_cb_done; 2579 } 2580 fcmd = (la_els_farp_t *)buf; 2581 if (fcip_wwn_compare(&fcmd->resp_nwwn, 2582 &fport->fcipp_nwwn, 2583 FCIP_COMPARE_NWWN) != 0) { 2584 rval = FC_UNCLAIMED; 2585 goto els_cb_done; 2586 } 2587 /* 2588 * copy the FARP request and release the 2589 * unsolicited buffer 2590 */ 2591 fcmd = &farp_cmd; 2592 bcopy((void *)buf, (void *)fcmd, 2593 sizeof (la_els_farp_t)); 2594 (void) fc_ulp_ubrelease(fport->fcipp_handle, 1, 2595 &buf->ub_token); 2596 2597 if (fcip_farp_supported && 2598 fcip_handle_farp_request(fptr, fcmd) == 2599 FC_SUCCESS) { 2600 /* 2601 * We successfully sent out a FARP 2602 * reply to the requesting port 2603 */ 2604 rval = FC_SUCCESS; 2605 goto els_cb_done; 2606 } else { 2607 rval = FC_UNCLAIMED; 2608 goto els_cb_done; 2609 } 2610 } 2611 } else if (r_ctl == R_CTL_ELS_RSP) { 2612 ls_code = buf->ub_buffer[0]; 2613 if (ls_code == LA_ELS_FARP_REPLY) { 2614 /* 2615 * We received a REPLY to our FARP request 2616 */ 2617 if (buf->ub_bufsize != sizeof (la_els_farp_t)) { 2618 FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN, 2619 "Invalid FARP req buffer size " 2620 "expected 0x%lx, got 0x%x", 2621 (long)(sizeof (la_els_farp_t)), 2622 buf->ub_bufsize)); 2623 rval = FC_UNCLAIMED; 2624 goto els_cb_done; 2625 } 2626 fcmd = &farp_cmd; 2627 bcopy((void *)buf, (void *)fcmd, 2628 sizeof (la_els_farp_t)); 2629 (void) fc_ulp_ubrelease(fport->fcipp_handle, 1, 2630 &buf->ub_token); 2631 if (fcip_farp_supported && 2632 fcip_handle_farp_response(fptr, fcmd) == 2633 FC_SUCCESS) { 2634 FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE, 2635 "Successfully recevied a FARP " 2636 "response")); 2637 mutex_enter(&fptr->fcip_mutex); 2638 fptr->fcip_farp_rsp_flag = 1; 2639 cv_signal(&fptr->fcip_farp_cv); 2640 mutex_exit(&fptr->fcip_mutex); 2641 rval = FC_SUCCESS; 2642 goto els_cb_done; 2643 } else { 2644 FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN, 2645 "Unable to handle a FARP response " 2646 "receive")); 2647 rval = FC_UNCLAIMED; 2648 goto els_cb_done; 2649 } 2650 } 2651 } 2652 break; 2653 default: 2654 break; 2655 } 2656 els_cb_done: 2657 mutex_enter(&fptr->fcip_mutex); 2658 fptr->fcip_flags &= ~(FCIP_IN_ELS_CB); 2659 mutex_exit(&fptr->fcip_mutex); 2660 return (rval); 2661 } 2662 2663 2664 /* 2665 * Handle inbound FARP requests 2666 */ 2667 static int 2668 fcip_handle_farp_request(struct fcip *fptr, la_els_farp_t *fcmd) 2669 { 2670 fcip_pkt_t *fcip_pkt; 2671 fc_packet_t *fc_pkt; 2672 fcip_port_info_t *fport = fptr->fcip_port_info; 2673 int rval = FC_FAILURE; 2674 opaque_t fca_dev; 2675 fc_portmap_t map; 2676 struct fcip_routing_table *frp; 2677 struct fcip_dest *fdestp; 2678 2679 /* 2680 * Add an entry for the remote port into our routing and destination 2681 * tables. 2682 */ 2683 map.map_did = fcmd->req_id; 2684 map.map_hard_addr.hard_addr = fcmd->req_id.port_id; 2685 map.map_state = PORT_DEVICE_VALID; 2686 map.map_type = PORT_DEVICE_NEW; 2687 map.map_flags = 0; 2688 map.map_pd = NULL; 2689 bcopy((void *)&fcmd->req_pwwn, (void *)&map.map_pwwn, 2690 sizeof (la_wwn_t)); 2691 bcopy((void *)&fcmd->req_nwwn, (void *)&map.map_nwwn, 2692 sizeof (la_wwn_t)); 2693 fcip_rt_update(fptr, &map, 1); 2694 mutex_enter(&fptr->fcip_rt_mutex); 2695 frp = fcip_lookup_rtable(fptr, &fcmd->req_pwwn, FCIP_COMPARE_NWWN); 2696 mutex_exit(&fptr->fcip_rt_mutex); 2697 2698 fdestp = fcip_add_dest(fptr, frp); 2699 2700 fcip_pkt = fcip_ipkt_alloc(fptr, sizeof (la_els_farp_t), 2701 sizeof (la_els_farp_t), NULL, KM_SLEEP); 2702 if (fcip_pkt == NULL) { 2703 rval = FC_FAILURE; 2704 goto farp_done; 2705 } 2706 /* 2707 * Fill in our port's PWWN and NWWN 2708 */ 2709 fcmd->resp_pwwn = fport->fcipp_pwwn; 2710 fcmd->resp_nwwn = fport->fcipp_nwwn; 2711 2712 fcip_init_unicast_pkt(fcip_pkt, fport->fcipp_sid, 2713 fcmd->req_id, NULL); 2714 2715 fca_dev = 2716 fc_ulp_get_fca_device(fport->fcipp_handle, fcmd->req_id); 2717 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt); 2718 fc_pkt->pkt_cmd_fhdr.r_ctl = R_CTL_ELS_RSP; 2719 fc_pkt->pkt_fca_device = fca_dev; 2720 fcip_pkt->fcip_pkt_dest = fdestp; 2721 2722 /* 2723 * Attempt a PLOGI again 2724 */ 2725 if (fcmd->resp_flags & FARP_INIT_P_LOGI) { 2726 if (fcip_do_plogi(fptr, frp) != FC_SUCCESS) { 2727 /* 2728 * Login to the remote port failed. There is no 2729 * point continuing with the FARP request further 2730 * so bail out here. 2731 */ 2732 frp->fcipr_state = PORT_DEVICE_INVALID; 2733 rval = FC_FAILURE; 2734 goto farp_done; 2735 } 2736 } 2737 2738 FCIP_CP_OUT(fcmd, fc_pkt->pkt_cmd, fc_pkt->pkt_cmd_acc, 2739 sizeof (la_els_farp_t)); 2740 2741 rval = fc_ulp_issue_els(fport->fcipp_handle, fc_pkt); 2742 if (rval != FC_SUCCESS) { 2743 FCIP_TNF_PROBE_2((fcip_handle_farp_request, "fcip io", 2744 /* CSTYLED */, tnf_string, msg, 2745 "fcip_transport of farp reply failed", 2746 tnf_uint, rval, rval)); 2747 FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN, 2748 "fcip_transport of farp reply failed 0x%x", rval)); 2749 } 2750 2751 farp_done: 2752 return (rval); 2753 } 2754 2755 2756 /* 2757 * Handle FARP responses to our FARP requests. When we receive a FARP 2758 * reply, we need to add the entry for the Port that replied into our 2759 * routing and destination hash tables. It is possible that the remote 2760 * port did not login into us (FARP responses can be received without 2761 * a PLOGI) 2762 */ 2763 static int 2764 fcip_handle_farp_response(struct fcip *fptr, la_els_farp_t *fcmd) 2765 { 2766 int rval = FC_FAILURE; 2767 fc_portmap_t map; 2768 struct fcip_routing_table *frp; 2769 struct fcip_dest *fdestp; 2770 2771 /* 2772 * Add an entry for the remote port into our routing and destination 2773 * tables. 2774 */ 2775 map.map_did = fcmd->dest_id; 2776 map.map_hard_addr.hard_addr = fcmd->dest_id.port_id; 2777 map.map_state = PORT_DEVICE_VALID; 2778 map.map_type = PORT_DEVICE_NEW; 2779 map.map_flags = 0; 2780 map.map_pd = NULL; 2781 bcopy((void *)&fcmd->resp_pwwn, (void *)&map.map_pwwn, 2782 sizeof (la_wwn_t)); 2783 bcopy((void *)&fcmd->resp_nwwn, (void *)&map.map_nwwn, 2784 sizeof (la_wwn_t)); 2785 fcip_rt_update(fptr, &map, 1); 2786 mutex_enter(&fptr->fcip_rt_mutex); 2787 frp = fcip_lookup_rtable(fptr, &fcmd->resp_pwwn, FCIP_COMPARE_NWWN); 2788 mutex_exit(&fptr->fcip_rt_mutex); 2789 2790 fdestp = fcip_add_dest(fptr, frp); 2791 2792 if (fdestp != NULL) { 2793 rval = FC_SUCCESS; 2794 } 2795 return (rval); 2796 } 2797 2798 2799 #define FCIP_HDRS_LENGTH \ 2800 sizeof (fcph_network_hdr_t)+sizeof (llc_snap_hdr_t)+sizeof (ipha_t) 2801 2802 /* 2803 * fcip_data_cb is the heart of most IP operations. This routine is called 2804 * by the transport when any unsolicited IP data arrives at a port (which 2805 * is almost all IP data). This routine then strips off the Network header 2806 * from the payload (after authenticating the received payload ofcourse), 2807 * creates a message blk and sends the data upstream. You will see ugly 2808 * #defines because of problems with using esballoc() as opposed to 2809 * allocb to prevent an extra copy of data. We should probably move to 2810 * esballoc entirely when the MTU eventually will be larger than 1500 bytes 2811 * since copies will get more expensive then. At 1500 byte MTUs, there is 2812 * no noticable difference between using allocb and esballoc. The other 2813 * caveat is that the qlc firmware still cannot tell us accurately the 2814 * no. of valid bytes in the unsol buffer it DMA'ed so we have to resort 2815 * to looking into the IP header and hoping that the no. of bytes speficified 2816 * in the header was actually received. 2817 */ 2818 /* ARGSUSED */ 2819 static int 2820 fcip_data_cb(opaque_t ulp_handle, opaque_t phandle, 2821 fc_unsol_buf_t *buf, uint32_t claimed) 2822 { 2823 fcip_port_info_t *fport; 2824 struct fcip *fptr; 2825 fcph_network_hdr_t *nhdr; 2826 llc_snap_hdr_t *snaphdr; 2827 mblk_t *bp; 2828 uint32_t len; 2829 uint32_t hdrlen; 2830 ushort_t type; 2831 ipha_t *iphdr; 2832 int rval; 2833 2834 #ifdef FCIP_ESBALLOC 2835 frtn_t *free_ubuf; 2836 struct fcip_esballoc_arg *fesb_argp; 2837 #endif /* FCIP_ESBALLOC */ 2838 2839 fport = fcip_get_port(phandle); 2840 if (fport == NULL) { 2841 return (FC_UNCLAIMED); 2842 } 2843 2844 fptr = fport->fcipp_fcip; 2845 ASSERT(fptr != NULL); 2846 2847 if (fptr == NULL) { 2848 return (FC_UNCLAIMED); 2849 } 2850 2851 mutex_enter(&fptr->fcip_mutex); 2852 if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) || 2853 (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) { 2854 mutex_exit(&fptr->fcip_mutex); 2855 rval = FC_UNCLAIMED; 2856 goto data_cb_done; 2857 } 2858 2859 /* 2860 * set fcip flags to indicate we are in the middle of a 2861 * data callback so we can wait till the statechange 2862 * is handled before succeeding/failing the SUSPEND/POWER DOWN. 2863 */ 2864 fptr->fcip_flags |= FCIP_IN_DATA_CB; 2865 mutex_exit(&fptr->fcip_mutex); 2866 2867 FCIP_TNF_PROBE_2((fcip_data_cb, "fcip io", /* CSTYLED */, 2868 tnf_string, msg, "data callback", 2869 tnf_int, instance, ddi_get_instance(fport->fcipp_dip))); 2870 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, 2871 (CE_NOTE, "fcip%d, data callback", 2872 ddi_get_instance(fport->fcipp_dip))); 2873 2874 /* 2875 * get to the network and snap headers in the payload 2876 */ 2877 nhdr = (fcph_network_hdr_t *)buf->ub_buffer; 2878 snaphdr = (llc_snap_hdr_t *)(buf->ub_buffer + 2879 sizeof (fcph_network_hdr_t)); 2880 2881 hdrlen = sizeof (fcph_network_hdr_t) + sizeof (llc_snap_hdr_t); 2882 2883 /* 2884 * get the IP header to obtain the no. of bytes we need to read 2885 * off from the unsol buffer. This obviously is because not all 2886 * data fills up the unsol buffer completely and the firmware 2887 * doesn't tell us how many valid bytes are in there as well 2888 */ 2889 iphdr = (ipha_t *)(buf->ub_buffer + hdrlen); 2890 snaphdr->pid = BE_16(snaphdr->pid); 2891 type = snaphdr->pid; 2892 2893 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, 2894 (CE_CONT, "SNAPHDR: dsap %x, ssap %x, ctrl %x\n", 2895 snaphdr->dsap, snaphdr->ssap, snaphdr->ctrl)); 2896 2897 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, 2898 (CE_CONT, "oui[0] 0x%x oui[1] 0x%x oui[2] 0x%x pid 0x%x\n", 2899 snaphdr->oui[0], snaphdr->oui[1], snaphdr->oui[2], snaphdr->pid)); 2900 2901 /* Authneticate, Authenticate */ 2902 if (type == ETHERTYPE_IP) { 2903 len = hdrlen + BE_16(iphdr->ipha_length); 2904 } else if (type == ETHERTYPE_ARP) { 2905 len = hdrlen + 28; 2906 } else { 2907 len = buf->ub_bufsize; 2908 } 2909 2910 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, 2911 (CE_CONT, "effective packet length is %d bytes.\n", len)); 2912 2913 if (len < hdrlen || len > FCIP_UB_SIZE) { 2914 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, 2915 (CE_NOTE, "Incorrect buffer size %d bytes", len)); 2916 rval = FC_UNCLAIMED; 2917 goto data_cb_done; 2918 } 2919 2920 if (buf->ub_frame.type != FC_TYPE_IS8802_SNAP) { 2921 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_NOTE, "Not IP/ARP data")); 2922 rval = FC_UNCLAIMED; 2923 goto data_cb_done; 2924 } 2925 2926 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_NOTE, "checking wwn")); 2927 2928 if ((fcip_wwn_compare(&nhdr->net_dest_addr, &fport->fcipp_pwwn, 2929 FCIP_COMPARE_NWWN) != 0) && 2930 (!IS_BROADCAST_ADDR(&nhdr->net_dest_addr))) { 2931 rval = FC_UNCLAIMED; 2932 goto data_cb_done; 2933 } else if (fcip_cache_on_arp_broadcast && 2934 IS_BROADCAST_ADDR(&nhdr->net_dest_addr)) { 2935 fcip_cache_arp_broadcast(fptr, buf); 2936 } 2937 2938 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_NOTE, "Allocate streams block")); 2939 2940 /* 2941 * Using esballoc instead of allocb should be faster, atleast at 2942 * larger MTUs than 1500 bytes. Someday we'll get there :) 2943 */ 2944 #if defined(FCIP_ESBALLOC) 2945 /* 2946 * allocate memory for the frtn function arg. The Function 2947 * (fcip_ubfree) arg is a struct fcip_esballoc_arg type 2948 * which contains pointers to the unsol buffer and the 2949 * opaque port handle for releasing the unsol buffer back to 2950 * the FCA for reuse 2951 */ 2952 fesb_argp = (struct fcip_esballoc_arg *) 2953 kmem_zalloc(sizeof (struct fcip_esballoc_arg), KM_NOSLEEP); 2954 2955 if (fesb_argp == NULL) { 2956 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, 2957 (CE_WARN, "esballoc of mblk failed in data_cb")); 2958 rval = FC_UNCLAIMED; 2959 goto data_cb_done; 2960 } 2961 /* 2962 * Check with KM_NOSLEEP 2963 */ 2964 free_ubuf = (frtn_t *)kmem_zalloc(sizeof (frtn_t), KM_NOSLEEP); 2965 if (free_ubuf == NULL) { 2966 kmem_free(fesb_argp, sizeof (struct fcip_esballoc_arg)); 2967 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, 2968 (CE_WARN, "esballoc of mblk failed in data_cb")); 2969 rval = FC_UNCLAIMED; 2970 goto data_cb_done; 2971 } 2972 2973 fesb_argp->frtnp = free_ubuf; 2974 fesb_argp->buf = buf; 2975 fesb_argp->phandle = phandle; 2976 free_ubuf->free_func = fcip_ubfree; 2977 free_ubuf->free_arg = (char *)fesb_argp; 2978 if ((bp = (mblk_t *)esballoc((unsigned char *)buf->ub_buffer, 2979 len, BPRI_MED, free_ubuf)) == NULL) { 2980 kmem_free(fesb_argp, sizeof (struct fcip_esballoc_arg)); 2981 kmem_free(free_ubuf, sizeof (frtn_t)); 2982 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, 2983 (CE_WARN, "esballoc of mblk failed in data_cb")); 2984 rval = FC_UNCLAIMED; 2985 goto data_cb_done; 2986 } 2987 #elif !defined(FCIP_ESBALLOC) 2988 /* 2989 * allocate streams mblk and copy the contents of the 2990 * unsolicited buffer into this newly alloc'ed mblk 2991 */ 2992 if ((bp = (mblk_t *)fcip_allocb((size_t)len, BPRI_LO)) == NULL) { 2993 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, 2994 (CE_WARN, "alloc of mblk failed in data_cb")); 2995 rval = FC_UNCLAIMED; 2996 goto data_cb_done; 2997 } 2998 2999 /* 3000 * Unsolicited buffers handed up to us from the FCA must be 3001 * endian clean so just bcopy the data into our mblk. Else 3002 * we may have to either copy the data byte by byte or 3003 * use the ddi_rep_get* routines to do the copy for us. 3004 */ 3005 bcopy(buf->ub_buffer, bp->b_rptr, len); 3006 3007 /* 3008 * for esballoc'ed mblks - free the UB in the frtn function 3009 * along with the memory allocated for the function arg. 3010 * for allocb'ed mblk - release the unsolicited buffer here 3011 */ 3012 (void) fc_ulp_ubrelease(phandle, 1, &buf->ub_token); 3013 3014 #endif /* FCIP_ESBALLOC */ 3015 3016 bp->b_wptr = bp->b_rptr + len; 3017 fptr->fcip_ipackets++; 3018 3019 if (type == ETHERTYPE_IP) { 3020 mutex_enter(&fptr->fcip_mutex); 3021 fptr->fcip_ub_upstream++; 3022 mutex_exit(&fptr->fcip_mutex); 3023 bp->b_rptr += hdrlen; 3024 3025 /* 3026 * Check if ipq is valid in the sendup thread 3027 */ 3028 if (fcip_sendup_alloc_enque(fptr, bp, NULL) != FC_SUCCESS) { 3029 freemsg(bp); 3030 } 3031 } else { 3032 /* 3033 * We won't get ethernet 802.3 packets in FCIP but we may get 3034 * types other than ETHERTYPE_IP, such as ETHERTYPE_ARP. Let 3035 * fcip_sendup() do the matching. 3036 */ 3037 mutex_enter(&fptr->fcip_mutex); 3038 fptr->fcip_ub_upstream++; 3039 mutex_exit(&fptr->fcip_mutex); 3040 if (fcip_sendup_alloc_enque(fptr, bp, 3041 fcip_accept) != FC_SUCCESS) { 3042 freemsg(bp); 3043 } 3044 } 3045 3046 rval = FC_SUCCESS; 3047 3048 /* 3049 * Unset fcip_flags to indicate we are out of callback and return 3050 */ 3051 data_cb_done: 3052 mutex_enter(&fptr->fcip_mutex); 3053 fptr->fcip_flags &= ~(FCIP_IN_DATA_CB); 3054 mutex_exit(&fptr->fcip_mutex); 3055 return (rval); 3056 } 3057 3058 #if !defined(FCIP_ESBALLOC) 3059 /* 3060 * Allocate a message block for the inbound data to be sent upstream. 3061 */ 3062 static void * 3063 fcip_allocb(size_t size, uint_t pri) 3064 { 3065 mblk_t *mp; 3066 3067 if ((mp = allocb(size, pri)) == NULL) { 3068 return (NULL); 3069 } 3070 return (mp); 3071 } 3072 3073 #endif 3074 3075 /* 3076 * This helper routine kmem cache alloc's a sendup element for enquing 3077 * into the sendup list for callbacks upstream from the dedicated sendup 3078 * thread. We enque the msg buf into the sendup list and cv_signal the 3079 * sendup thread to finish the callback for us. 3080 */ 3081 static int 3082 fcip_sendup_alloc_enque(struct fcip *fptr, mblk_t *mp, struct fcipstr *(*f)()) 3083 { 3084 struct fcip_sendup_elem *msg_elem; 3085 int rval = FC_FAILURE; 3086 3087 FCIP_TNF_PROBE_1((fcip_sendup_alloc_enque, "fcip io", /* CSTYLED */, 3088 tnf_string, msg, "sendup msg enque")); 3089 msg_elem = kmem_cache_alloc(fptr->fcip_sendup_cache, KM_NOSLEEP); 3090 if (msg_elem == NULL) { 3091 /* drop pkt to floor - update stats */ 3092 rval = FC_FAILURE; 3093 goto sendup_alloc_done; 3094 } 3095 msg_elem->fcipsu_mp = mp; 3096 msg_elem->fcipsu_func = f; 3097 3098 mutex_enter(&fptr->fcip_sendup_mutex); 3099 if (fptr->fcip_sendup_head == NULL) { 3100 fptr->fcip_sendup_head = fptr->fcip_sendup_tail = msg_elem; 3101 } else { 3102 fptr->fcip_sendup_tail->fcipsu_next = msg_elem; 3103 fptr->fcip_sendup_tail = msg_elem; 3104 } 3105 fptr->fcip_sendup_cnt++; 3106 cv_signal(&fptr->fcip_sendup_cv); 3107 mutex_exit(&fptr->fcip_sendup_mutex); 3108 rval = FC_SUCCESS; 3109 3110 sendup_alloc_done: 3111 return (rval); 3112 } 3113 3114 /* 3115 * One of the ways of performing the WWN to D_ID mapping required for 3116 * IPFC data is to cache the unsolicited ARP broadcast messages received 3117 * and update the routing table to add entry for the destination port 3118 * if we are the intended recipient of the ARP broadcast message. This is 3119 * one of the methods recommended in the rfc to obtain the WWN to D_ID mapping 3120 * but is not typically used unless enabled. The driver prefers to use the 3121 * nameserver/lilp map to obtain this mapping. 3122 */ 3123 static void 3124 fcip_cache_arp_broadcast(struct fcip *fptr, fc_unsol_buf_t *buf) 3125 { 3126 fcip_port_info_t *fport; 3127 fcph_network_hdr_t *nhdr; 3128 struct fcip_routing_table *frp; 3129 fc_portmap_t map; 3130 3131 fport = fptr->fcip_port_info; 3132 if (fport == NULL) { 3133 return; 3134 } 3135 ASSERT(fport != NULL); 3136 3137 nhdr = (fcph_network_hdr_t *)buf->ub_buffer; 3138 3139 mutex_enter(&fptr->fcip_rt_mutex); 3140 frp = fcip_lookup_rtable(fptr, &nhdr->net_src_addr, FCIP_COMPARE_NWWN); 3141 mutex_exit(&fptr->fcip_rt_mutex); 3142 if (frp == NULL) { 3143 map.map_did.port_id = buf->ub_frame.s_id; 3144 map.map_hard_addr.hard_addr = buf->ub_frame.s_id; 3145 map.map_state = PORT_DEVICE_VALID; 3146 map.map_type = PORT_DEVICE_NEW; 3147 map.map_flags = 0; 3148 map.map_pd = NULL; 3149 bcopy((void *)&nhdr->net_src_addr, (void *)&map.map_pwwn, 3150 sizeof (la_wwn_t)); 3151 bcopy((void *)&nhdr->net_src_addr, (void *)&map.map_nwwn, 3152 sizeof (la_wwn_t)); 3153 fcip_rt_update(fptr, &map, 1); 3154 mutex_enter(&fptr->fcip_rt_mutex); 3155 frp = fcip_lookup_rtable(fptr, &nhdr->net_src_addr, 3156 FCIP_COMPARE_NWWN); 3157 mutex_exit(&fptr->fcip_rt_mutex); 3158 3159 (void) fcip_add_dest(fptr, frp); 3160 } 3161 3162 } 3163 3164 /* 3165 * This is a dedicated thread to do callbacks from fcip's data callback 3166 * routines into the modules upstream. The reason for this thread is 3167 * the data callback function can be called from an interrupt context and 3168 * the upstream modules *can* make calls downstream in the same thread 3169 * context. If the call is to a fabric port which is not yet in our 3170 * routing tables, we may have to query the nameserver/fabric for the 3171 * MAC addr to Port_ID mapping which may be blocking calls. 3172 */ 3173 static void 3174 fcip_sendup_thr(void *arg) 3175 { 3176 struct fcip *fptr = (struct fcip *)arg; 3177 struct fcip_sendup_elem *msg_elem; 3178 queue_t *ip4q = NULL; 3179 3180 CALLB_CPR_INIT(&fptr->fcip_cpr_info, &fptr->fcip_sendup_mutex, 3181 callb_generic_cpr, "fcip_sendup_thr"); 3182 3183 mutex_enter(&fptr->fcip_sendup_mutex); 3184 for (;;) { 3185 3186 while (fptr->fcip_sendup_thr_initted && 3187 fptr->fcip_sendup_head == NULL) { 3188 CALLB_CPR_SAFE_BEGIN(&fptr->fcip_cpr_info); 3189 cv_wait(&fptr->fcip_sendup_cv, 3190 &fptr->fcip_sendup_mutex); 3191 CALLB_CPR_SAFE_END(&fptr->fcip_cpr_info, 3192 &fptr->fcip_sendup_mutex); 3193 } 3194 3195 if (fptr->fcip_sendup_thr_initted == 0) { 3196 break; 3197 } 3198 3199 FCIP_TNF_PROBE_1((fcip_sendup_thr, "fcip io", /* CSTYLED */, 3200 tnf_string, msg, "fcip sendup thr - new msg")); 3201 3202 msg_elem = fptr->fcip_sendup_head; 3203 fptr->fcip_sendup_head = msg_elem->fcipsu_next; 3204 msg_elem->fcipsu_next = NULL; 3205 mutex_exit(&fptr->fcip_sendup_mutex); 3206 3207 if (msg_elem->fcipsu_func == NULL) { 3208 /* 3209 * Message for ipq. Check to see if the ipq is 3210 * is still valid. Since the thread is asynchronous, 3211 * there could have been a close on the stream 3212 */ 3213 mutex_enter(&fptr->fcip_mutex); 3214 if (fptr->fcip_ipq && canputnext(fptr->fcip_ipq)) { 3215 ip4q = fptr->fcip_ipq; 3216 mutex_exit(&fptr->fcip_mutex); 3217 putnext(ip4q, msg_elem->fcipsu_mp); 3218 } else { 3219 mutex_exit(&fptr->fcip_mutex); 3220 freemsg(msg_elem->fcipsu_mp); 3221 } 3222 } else { 3223 fcip_sendup(fptr, msg_elem->fcipsu_mp, 3224 msg_elem->fcipsu_func); 3225 } 3226 3227 #if !defined(FCIP_ESBALLOC) 3228 /* 3229 * for allocb'ed mblk - decrement upstream count here 3230 */ 3231 mutex_enter(&fptr->fcip_mutex); 3232 ASSERT(fptr->fcip_ub_upstream > 0); 3233 fptr->fcip_ub_upstream--; 3234 mutex_exit(&fptr->fcip_mutex); 3235 #endif /* FCIP_ESBALLOC */ 3236 3237 kmem_cache_free(fptr->fcip_sendup_cache, (void *)msg_elem); 3238 mutex_enter(&fptr->fcip_sendup_mutex); 3239 fptr->fcip_sendup_cnt--; 3240 } 3241 3242 3243 CALLB_CPR_EXIT(&fptr->fcip_cpr_info); 3244 3245 /* Wake up fcip detach thread by the end */ 3246 cv_signal(&fptr->fcip_sendup_cv); 3247 3248 thread_exit(); 3249 } 3250 3251 #ifdef FCIP_ESBALLOC 3252 3253 /* 3254 * called from the stream head when it is done using an unsolicited buffer. 3255 * We release this buffer then to the FCA for reuse. 3256 */ 3257 static void 3258 fcip_ubfree(char *arg) 3259 { 3260 struct fcip_esballoc_arg *fesb_argp = (struct fcip_esballoc_arg *)arg; 3261 fc_unsol_buf_t *ubuf; 3262 frtn_t *frtnp; 3263 fcip_port_info_t *fport; 3264 struct fcip *fptr; 3265 3266 3267 fport = fcip_get_port(fesb_argp->phandle); 3268 fptr = fport->fcipp_fcip; 3269 3270 ASSERT(fesb_argp != NULL); 3271 ubuf = fesb_argp->buf; 3272 frtnp = fesb_argp->frtnp; 3273 3274 3275 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, 3276 (CE_WARN, "freeing ubuf after esballoc in fcip_ubfree")); 3277 (void) fc_ulp_ubrelease(fesb_argp->phandle, 1, &ubuf->ub_token); 3278 3279 mutex_enter(&fptr->fcip_mutex); 3280 ASSERT(fptr->fcip_ub_upstream > 0); 3281 fptr->fcip_ub_upstream--; 3282 cv_signal(&fptr->fcip_ub_cv); 3283 mutex_exit(&fptr->fcip_mutex); 3284 3285 kmem_free(frtnp, sizeof (frtn_t)); 3286 kmem_free(fesb_argp, sizeof (struct fcip_esballoc_arg)); 3287 } 3288 3289 #endif /* FCIP_ESBALLOC */ 3290 3291 /* 3292 * handle data other than that of type ETHERTYPE_IP and send it on its 3293 * way upstream to the right streams module to handle 3294 */ 3295 static void 3296 fcip_sendup(struct fcip *fptr, mblk_t *mp, struct fcipstr *(*acceptfunc)()) 3297 { 3298 struct fcipstr *slp, *nslp; 3299 la_wwn_t *dhostp; 3300 mblk_t *nmp; 3301 uint32_t isgroupaddr; 3302 int type; 3303 uint32_t hdrlen; 3304 fcph_network_hdr_t *nhdr; 3305 llc_snap_hdr_t *snaphdr; 3306 3307 FCIP_TNF_PROBE_1((fcip_sendup, "fcip io", /* CSTYLED */, 3308 tnf_string, msg, "fcip sendup")); 3309 nhdr = (fcph_network_hdr_t *)mp->b_rptr; 3310 snaphdr = 3311 (llc_snap_hdr_t *)(mp->b_rptr + sizeof (fcph_network_hdr_t)); 3312 dhostp = &nhdr->net_dest_addr; 3313 type = snaphdr->pid; 3314 hdrlen = sizeof (fcph_network_hdr_t) + sizeof (llc_snap_hdr_t); 3315 3316 /* No group address with fibre channel */ 3317 isgroupaddr = 0; 3318 3319 /* 3320 * While holding a reader lock on the linked list of streams structures, 3321 * attempt to match the address criteria for each stream 3322 * and pass up the raw M_DATA ("fastpath") or a DL_UNITDATA_IND. 3323 */ 3324 3325 rw_enter(&fcipstruplock, RW_READER); 3326 3327 if ((slp = (*acceptfunc)(fcipstrup, fptr, type, dhostp)) == NULL) { 3328 rw_exit(&fcipstruplock); 3329 FCIP_TNF_PROBE_1((fcip_sendup, "fcip io", /* CSTYLED */, 3330 tnf_string, msg, "fcip sendup - no slp")); 3331 freemsg(mp); 3332 return; 3333 } 3334 3335 /* 3336 * Loop on matching open streams until (*acceptfunc)() returns NULL. 3337 */ 3338 for (; nslp = (*acceptfunc)(slp->sl_nextp, fptr, type, dhostp); 3339 slp = nslp) { 3340 if (canputnext(slp->sl_rq)) { 3341 if (nmp = dupmsg(mp)) { 3342 if ((slp->sl_flags & FCIP_SLFAST) && 3343 !isgroupaddr) { 3344 nmp->b_rptr += hdrlen; 3345 putnext(slp->sl_rq, nmp); 3346 } else if (slp->sl_flags & FCIP_SLRAW) { 3347 /* No headers when FCIP_SLRAW is set */ 3348 putnext(slp->sl_rq, nmp); 3349 } else if ((nmp = fcip_addudind(fptr, nmp, 3350 nhdr, type))) { 3351 putnext(slp->sl_rq, nmp); 3352 } 3353 } 3354 } 3355 } 3356 3357 /* 3358 * Do the last one. 3359 */ 3360 if (canputnext(slp->sl_rq)) { 3361 if (slp->sl_flags & FCIP_SLFAST) { 3362 mp->b_rptr += hdrlen; 3363 putnext(slp->sl_rq, mp); 3364 } else if (slp->sl_flags & FCIP_SLRAW) { 3365 putnext(slp->sl_rq, mp); 3366 } else if ((mp = fcip_addudind(fptr, mp, nhdr, type))) { 3367 putnext(slp->sl_rq, mp); 3368 } 3369 } else { 3370 freemsg(mp); 3371 } 3372 FCIP_TNF_PROBE_1((fcip_sendup, "fcip io", /* CSTYLED */, 3373 tnf_string, msg, "fcip sendup done")); 3374 3375 rw_exit(&fcipstruplock); 3376 } 3377 3378 /* 3379 * Match the stream based on type and wwn if necessary. 3380 * Destination wwn dhostp is passed to this routine is reserved 3381 * for future usage. We don't need to use it right now since port 3382 * to fcip instance mapping is unique and wwn is already validated when 3383 * packet comes to fcip. 3384 */ 3385 /* ARGSUSED */ 3386 static struct fcipstr * 3387 fcip_accept(struct fcipstr *slp, struct fcip *fptr, int type, la_wwn_t *dhostp) 3388 { 3389 t_uscalar_t sap; 3390 3391 FCIP_TNF_PROBE_1((fcip_accept, "fcip io", /* CSTYLED */, 3392 tnf_string, msg, "fcip accept")); 3393 3394 for (; slp; slp = slp->sl_nextp) { 3395 sap = slp->sl_sap; 3396 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_CONT, 3397 "fcip_accept: checking next sap = %x, type = %x", 3398 sap, type)); 3399 3400 if ((slp->sl_fcip == fptr) && (type == sap)) { 3401 return (slp); 3402 } 3403 } 3404 return (NULL); 3405 } 3406 3407 /* 3408 * Handle DL_UNITDATA_IND messages 3409 */ 3410 static mblk_t * 3411 fcip_addudind(struct fcip *fptr, mblk_t *mp, fcph_network_hdr_t *nhdr, 3412 int type) 3413 { 3414 dl_unitdata_ind_t *dludindp; 3415 struct fcipdladdr *dlap; 3416 mblk_t *nmp; 3417 int size; 3418 uint32_t hdrlen; 3419 struct ether_addr src_addr; 3420 struct ether_addr dest_addr; 3421 3422 3423 hdrlen = (sizeof (llc_snap_hdr_t) + sizeof (fcph_network_hdr_t)); 3424 mp->b_rptr += hdrlen; 3425 3426 FCIP_TNF_PROBE_1((fcip_addudind, "fcip io", /* CSTYLED */, 3427 tnf_string, msg, "fcip addudind")); 3428 3429 /* 3430 * Allocate an M_PROTO mblk for the DL_UNITDATA_IND. 3431 */ 3432 size = sizeof (dl_unitdata_ind_t) + FCIPADDRL + FCIPADDRL; 3433 if ((nmp = allocb(size, BPRI_LO)) == NULL) { 3434 fptr->fcip_allocbfail++; 3435 freemsg(mp); 3436 return (NULL); 3437 } 3438 DB_TYPE(nmp) = M_PROTO; 3439 nmp->b_wptr = nmp->b_datap->db_lim; 3440 nmp->b_rptr = nmp->b_wptr - size; 3441 3442 /* 3443 * Construct a DL_UNITDATA_IND primitive. 3444 */ 3445 dludindp = (dl_unitdata_ind_t *)nmp->b_rptr; 3446 dludindp->dl_primitive = DL_UNITDATA_IND; 3447 dludindp->dl_dest_addr_length = FCIPADDRL; 3448 dludindp->dl_dest_addr_offset = sizeof (dl_unitdata_ind_t); 3449 dludindp->dl_src_addr_length = FCIPADDRL; 3450 dludindp->dl_src_addr_offset = sizeof (dl_unitdata_ind_t) + FCIPADDRL; 3451 dludindp->dl_group_address = 0; /* not DL_MULTI */ 3452 3453 dlap = (struct fcipdladdr *)(nmp->b_rptr + sizeof (dl_unitdata_ind_t)); 3454 wwn_to_ether(&nhdr->net_dest_addr, &dest_addr); 3455 ether_bcopy(&dest_addr, &dlap->dl_phys); 3456 dlap->dl_sap = (uint16_t)type; 3457 3458 dlap = (struct fcipdladdr *)(nmp->b_rptr + sizeof (dl_unitdata_ind_t) 3459 + FCIPADDRL); 3460 wwn_to_ether(&nhdr->net_src_addr, &src_addr); 3461 ether_bcopy(&src_addr, &dlap->dl_phys); 3462 dlap->dl_sap = (uint16_t)type; 3463 3464 /* 3465 * Link the M_PROTO and M_DATA together. 3466 */ 3467 nmp->b_cont = mp; 3468 return (nmp); 3469 } 3470 3471 3472 /* 3473 * The open routine. For clone opens, we return the next available minor 3474 * no. for the stream to use 3475 */ 3476 /* ARGSUSED */ 3477 static int 3478 fcip_open(queue_t *rq, dev_t *devp, int flag, int sflag, cred_t *credp) 3479 { 3480 struct fcipstr *slp; 3481 struct fcipstr **prevslp; 3482 minor_t minor; 3483 3484 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "in fcip_open")); 3485 FCIP_TNF_PROBE_1((fcip_open, "fcip io", /* CSTYLED */, 3486 tnf_string, msg, "enter")); 3487 /* 3488 * We need to ensure that the port driver is loaded before 3489 * we proceed 3490 */ 3491 if (ddi_hold_installed_driver(ddi_name_to_major(PORT_DRIVER)) == NULL) { 3492 /* no port driver instances found */ 3493 FCIP_DEBUG(FCIP_DEBUG_STARTUP, (CE_WARN, 3494 "!ddi_hold_installed_driver of fp failed\n")); 3495 return (ENXIO); 3496 } 3497 /* serialize opens */ 3498 rw_enter(&fcipstruplock, RW_WRITER); 3499 3500 prevslp = &fcipstrup; 3501 if (sflag == CLONEOPEN) { 3502 minor = 0; 3503 for (; (slp = *prevslp) != NULL; prevslp = &slp->sl_nextp) { 3504 if (minor < slp->sl_minor) { 3505 break; 3506 } 3507 minor ++; 3508 } 3509 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, 3510 "getmajor returns 0x%x", getmajor(*devp))); 3511 *devp = makedevice(getmajor(*devp), minor); 3512 } else { 3513 minor = getminor(*devp); 3514 } 3515 3516 /* 3517 * check if our qp's private area is already initialized. If yes 3518 * the stream is already open - just return 3519 */ 3520 if (rq->q_ptr) { 3521 goto done; 3522 } 3523 3524 slp = GETSTRUCT(struct fcipstr, 1); 3525 slp->sl_minor = minor; 3526 slp->sl_rq = rq; 3527 slp->sl_sap = 0; 3528 slp->sl_flags = 0; 3529 slp->sl_state = DL_UNATTACHED; 3530 slp->sl_fcip = NULL; 3531 3532 mutex_init(&slp->sl_lock, NULL, MUTEX_DRIVER, NULL); 3533 3534 /* 3535 * link this new stream entry into list of active streams 3536 */ 3537 slp->sl_nextp = *prevslp; 3538 *prevslp = slp; 3539 3540 rq->q_ptr = WR(rq)->q_ptr = (char *)slp; 3541 3542 /* 3543 * Disable automatic enabling of our write service procedures 3544 * we need to control this explicitly. This will prevent 3545 * anyone scheduling of our write service procedures. 3546 */ 3547 noenable(WR(rq)); 3548 3549 done: 3550 rw_exit(&fcipstruplock); 3551 /* 3552 * enable our put and service routines on the read side 3553 */ 3554 qprocson(rq); 3555 3556 /* 3557 * There is only one instance of fcip (instance = 0) 3558 * for multiple instances of hardware 3559 */ 3560 (void) qassociate(rq, 0); /* don't allow drcompat to be pushed */ 3561 return (0); 3562 } 3563 3564 /* 3565 * close an opened stream. The minor no. will then be available for 3566 * future opens. 3567 */ 3568 /* ARGSUSED */ 3569 static int 3570 fcip_close(queue_t *rq, int flag, int otyp, cred_t *credp) 3571 { 3572 struct fcipstr *slp; 3573 struct fcipstr **prevslp; 3574 3575 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "in fcip_close")); 3576 FCIP_TNF_PROBE_1((fcip_close, "fcip io", /* CSTYLED */, 3577 tnf_string, msg, "enter")); 3578 ASSERT(rq); 3579 /* we should also have the active stream pointer in q_ptr */ 3580 ASSERT(rq->q_ptr); 3581 3582 ddi_rele_driver(ddi_name_to_major(PORT_DRIVER)); 3583 /* 3584 * disable our put and service procedures. We had enabled them 3585 * on open 3586 */ 3587 qprocsoff(rq); 3588 slp = (struct fcipstr *)rq->q_ptr; 3589 3590 /* 3591 * Implicitly detach stream a stream from an interface. 3592 */ 3593 if (slp->sl_fcip) { 3594 fcip_dodetach(slp); 3595 } 3596 3597 (void) qassociate(rq, -1); /* undo association in open */ 3598 3599 rw_enter(&fcipstruplock, RW_WRITER); 3600 3601 /* 3602 * unlink this stream from the active stream list and free it 3603 */ 3604 for (prevslp = &fcipstrup; (slp = *prevslp) != NULL; 3605 prevslp = &slp->sl_nextp) { 3606 if (slp == (struct fcipstr *)rq->q_ptr) { 3607 break; 3608 } 3609 } 3610 3611 /* we should have found slp */ 3612 ASSERT(slp); 3613 3614 *prevslp = slp->sl_nextp; 3615 mutex_destroy(&slp->sl_lock); 3616 kmem_free(slp, sizeof (struct fcipstr)); 3617 rq->q_ptr = WR(rq)->q_ptr = NULL; 3618 3619 rw_exit(&fcipstruplock); 3620 return (0); 3621 } 3622 3623 /* 3624 * This is not an extension of the DDI_DETACH request. This routine 3625 * only detaches a stream from an interface 3626 */ 3627 static void 3628 fcip_dodetach(struct fcipstr *slp) 3629 { 3630 struct fcipstr *tslp; 3631 struct fcip *fptr; 3632 3633 FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_NOTE, "in fcip_dodetach")); 3634 FCIP_TNF_PROBE_1((fcip_dodetach, "fcip io", /* CSTYLED */, 3635 tnf_string, msg, "enter")); 3636 ASSERT(slp->sl_fcip != NULL); 3637 3638 fptr = slp->sl_fcip; 3639 slp->sl_fcip = NULL; 3640 3641 /* 3642 * we don't support promiscuous mode currently but check 3643 * for and disable any promiscuous mode operation 3644 */ 3645 if (slp->sl_flags & SLALLPHYS) { 3646 slp->sl_flags &= ~SLALLPHYS; 3647 } 3648 3649 /* 3650 * disable ALLMULTI mode if all mulitcast addr are ON 3651 */ 3652 if (slp->sl_flags & SLALLMULTI) { 3653 slp->sl_flags &= ~SLALLMULTI; 3654 } 3655 3656 /* 3657 * we are most likely going to perform multicast by 3658 * broadcasting to the well known addr (D_ID) 0xFFFFFF or 3659 * ALPA 0x00 in case of public loops 3660 */ 3661 3662 3663 /* 3664 * detach unit from device structure. 3665 */ 3666 for (tslp = fcipstrup; tslp != NULL; tslp = tslp->sl_nextp) { 3667 if (tslp->sl_fcip == fptr) { 3668 break; 3669 } 3670 } 3671 if (tslp == NULL) { 3672 FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN, 3673 "fcip_dodeatch - active stream struct not found")); 3674 3675 /* unregister with Fabric nameserver?? */ 3676 } 3677 slp->sl_state = DL_UNATTACHED; 3678 3679 fcip_setipq(fptr); 3680 } 3681 3682 3683 /* 3684 * Set or clear device ipq pointer. 3685 * Walk thru all the streams on this device, if a ETHERTYPE_IP 3686 * stream is found, assign device ipq to its sl_rq. 3687 */ 3688 static void 3689 fcip_setipq(struct fcip *fptr) 3690 { 3691 struct fcipstr *slp; 3692 int ok = 1; 3693 queue_t *ipq = NULL; 3694 3695 FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "entered fcip_setipq")); 3696 3697 rw_enter(&fcipstruplock, RW_READER); 3698 3699 for (slp = fcipstrup; slp != NULL; slp = slp->sl_nextp) { 3700 if (slp->sl_fcip == fptr) { 3701 if (slp->sl_flags & (SLALLPHYS|SLALLSAP)) { 3702 ok = 0; 3703 } 3704 if (slp->sl_sap == ETHERTYPE_IP) { 3705 if (ipq == NULL) { 3706 ipq = slp->sl_rq; 3707 } else { 3708 ok = 0; 3709 } 3710 } 3711 } 3712 } 3713 3714 rw_exit(&fcipstruplock); 3715 3716 if (fcip_check_port_exists(fptr)) { 3717 /* fptr passed to us is stale */ 3718 return; 3719 } 3720 3721 mutex_enter(&fptr->fcip_mutex); 3722 if (ok) { 3723 fptr->fcip_ipq = ipq; 3724 } else { 3725 fptr->fcip_ipq = NULL; 3726 } 3727 mutex_exit(&fptr->fcip_mutex); 3728 } 3729 3730 3731 /* ARGSUSED */ 3732 static void 3733 fcip_ioctl(queue_t *wq, mblk_t *mp) 3734 { 3735 struct iocblk *iocp = (struct iocblk *)mp->b_rptr; 3736 struct fcipstr *slp = (struct fcipstr *)wq->q_ptr; 3737 3738 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 3739 (CE_NOTE, "in fcip ioctl : %d", iocp->ioc_cmd)); 3740 FCIP_TNF_PROBE_1((fcip_ioctl, "fcip io", /* CSTYLED */, 3741 tnf_string, msg, "enter")); 3742 3743 switch (iocp->ioc_cmd) { 3744 case DLIOCRAW: 3745 slp->sl_flags |= FCIP_SLRAW; 3746 miocack(wq, mp, 0, 0); 3747 break; 3748 3749 case DL_IOC_HDR_INFO: 3750 fcip_dl_ioc_hdr_info(wq, mp); 3751 break; 3752 3753 default: 3754 miocnak(wq, mp, 0, EINVAL); 3755 break; 3756 } 3757 } 3758 3759 /* 3760 * The streams 'Put' routine. 3761 */ 3762 /* ARGSUSED */ 3763 static int 3764 fcip_wput(queue_t *wq, mblk_t *mp) 3765 { 3766 struct fcipstr *slp = (struct fcipstr *)wq->q_ptr; 3767 struct fcip *fptr; 3768 struct fcip_dest *fdestp; 3769 fcph_network_hdr_t *headerp; 3770 3771 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 3772 (CE_NOTE, "in fcip_wput :: type:%x", DB_TYPE(mp))); 3773 3774 switch (DB_TYPE(mp)) { 3775 case M_DATA: { 3776 3777 fptr = slp->sl_fcip; 3778 3779 if (((slp->sl_flags & (FCIP_SLFAST|FCIP_SLRAW)) == 0) || 3780 (slp->sl_state != DL_IDLE) || 3781 (fptr == NULL)) { 3782 /* 3783 * set error in the message block and send a reply 3784 * back upstream. Sun's merror routine does this 3785 * for us more cleanly. 3786 */ 3787 merror(wq, mp, EPROTO); 3788 break; 3789 } 3790 3791 /* 3792 * if any messages are already enqueued or if the interface 3793 * is in promiscuous mode, causing the packets to loop back 3794 * up, then enqueue the message. Otherwise just transmit 3795 * the message. putq() puts the message on fcip's 3796 * write queue and qenable() puts the queue (wq) on 3797 * the list of queues to be called by the streams scheduler. 3798 */ 3799 if (wq->q_first) { 3800 (void) putq(wq, mp); 3801 fptr->fcip_wantw = 1; 3802 qenable(wq); 3803 } else if (fptr->fcip_flags & FCIP_PROMISC) { 3804 /* 3805 * Promiscous mode not supported but add this code in 3806 * case it will be supported in future. 3807 */ 3808 (void) putq(wq, mp); 3809 qenable(wq); 3810 } else { 3811 3812 headerp = (fcph_network_hdr_t *)mp->b_rptr; 3813 fdestp = fcip_get_dest(fptr, &headerp->net_dest_addr); 3814 3815 if (fdestp == NULL) { 3816 merror(wq, mp, EPROTO); 3817 break; 3818 } 3819 3820 ASSERT(fdestp != NULL); 3821 3822 (void) fcip_start(wq, mp, fptr, fdestp, KM_SLEEP); 3823 } 3824 break; 3825 } 3826 case M_PROTO: 3827 case M_PCPROTO: 3828 /* 3829 * to prevent recursive calls into fcip_proto 3830 * (PROTO and PCPROTO messages are handled by fcip_proto) 3831 * let the service procedure handle these messages by 3832 * calling putq here. 3833 */ 3834 (void) putq(wq, mp); 3835 qenable(wq); 3836 break; 3837 3838 case M_IOCTL: 3839 fcip_ioctl(wq, mp); 3840 break; 3841 3842 case M_FLUSH: 3843 if (*mp->b_rptr & FLUSHW) { 3844 flushq(wq, FLUSHALL); 3845 *mp->b_rptr &= ~FLUSHW; 3846 } 3847 /* 3848 * we have both FLUSHW and FLUSHR set with FLUSHRW 3849 */ 3850 if (*mp->b_rptr & FLUSHR) { 3851 /* 3852 * send msg back upstream. qreply() takes care 3853 * of using the RD(wq) queue on its reply 3854 */ 3855 qreply(wq, mp); 3856 } else { 3857 freemsg(mp); 3858 } 3859 break; 3860 3861 default: 3862 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 3863 (CE_NOTE, "default msg type: %x", DB_TYPE(mp))); 3864 freemsg(mp); 3865 break; 3866 } 3867 return (0); 3868 } 3869 3870 3871 /* 3872 * Handle M_PROTO and M_PCPROTO messages 3873 */ 3874 /* ARGSUSED */ 3875 static void 3876 fcip_proto(queue_t *wq, mblk_t *mp) 3877 { 3878 union DL_primitives *dlp; 3879 struct fcipstr *slp; 3880 t_uscalar_t prim; 3881 3882 slp = (struct fcipstr *)wq->q_ptr; 3883 dlp = (union DL_primitives *)mp->b_rptr; 3884 prim = dlp->dl_primitive; /* the DLPI command */ 3885 3886 FCIP_TNF_PROBE_5((fcip_proto, "fcip io", /* CSTYLED */, 3887 tnf_string, msg, "enter", 3888 tnf_opaque, wq, wq, 3889 tnf_opaque, mp, mp, 3890 tnf_opaque, MP_DB_TYPE, DB_TYPE(mp), 3891 tnf_opaque, dl_primitive, dlp->dl_primitive)); 3892 3893 FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "dl_primitve : %x", prim)); 3894 3895 mutex_enter(&slp->sl_lock); 3896 3897 switch (prim) { 3898 case DL_UNITDATA_REQ: 3899 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */, 3900 tnf_string, msg, "unit data request")); 3901 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "unit data request")); 3902 fcip_udreq(wq, mp); 3903 break; 3904 3905 case DL_ATTACH_REQ: 3906 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */, 3907 tnf_string, msg, "Attach request")); 3908 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Attach request")); 3909 fcip_areq(wq, mp); 3910 break; 3911 3912 case DL_DETACH_REQ: 3913 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */, 3914 tnf_string, msg, "Detach request")); 3915 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Detach request")); 3916 fcip_dreq(wq, mp); 3917 break; 3918 3919 case DL_BIND_REQ: 3920 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Bind request")); 3921 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */, 3922 tnf_string, msg, "Bind request")); 3923 fcip_breq(wq, mp); 3924 break; 3925 3926 case DL_UNBIND_REQ: 3927 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */, 3928 tnf_string, msg, "unbind request")); 3929 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "unbind request")); 3930 fcip_ubreq(wq, mp); 3931 break; 3932 3933 case DL_INFO_REQ: 3934 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */, 3935 tnf_string, msg, "Info request")); 3936 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Info request")); 3937 fcip_ireq(wq, mp); 3938 break; 3939 3940 case DL_SET_PHYS_ADDR_REQ: 3941 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */, 3942 tnf_string, msg, "set phy addr request")); 3943 FCIP_DEBUG(FCIP_DEBUG_DLPI, 3944 (CE_NOTE, "set phy addr request")); 3945 fcip_spareq(wq, mp); 3946 break; 3947 3948 case DL_PHYS_ADDR_REQ: 3949 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */, 3950 tnf_string, msg, "phy addr request")); 3951 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "phy addr request")); 3952 fcip_pareq(wq, mp); 3953 break; 3954 3955 case DL_ENABMULTI_REQ: 3956 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */, 3957 tnf_string, msg, "Enable Multicast request")); 3958 FCIP_DEBUG(FCIP_DEBUG_DLPI, 3959 (CE_NOTE, "Enable Multicast request")); 3960 dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0); 3961 break; 3962 3963 case DL_DISABMULTI_REQ: 3964 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */, 3965 tnf_string, msg, "Disable Multicast request")); 3966 FCIP_DEBUG(FCIP_DEBUG_DLPI, 3967 (CE_NOTE, "Disable Multicast request")); 3968 dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0); 3969 break; 3970 3971 case DL_PROMISCON_REQ: 3972 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */, 3973 tnf_string, msg, "Promiscuous mode ON request")); 3974 FCIP_DEBUG(FCIP_DEBUG_DLPI, 3975 (CE_NOTE, "Promiscuous mode ON request")); 3976 dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0); 3977 break; 3978 3979 case DL_PROMISCOFF_REQ: 3980 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */, 3981 tnf_string, msg, "Promiscuous mode OFF request")); 3982 FCIP_DEBUG(FCIP_DEBUG_DLPI, 3983 (CE_NOTE, "Promiscuous mode OFF request")); 3984 dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0); 3985 break; 3986 3987 default: 3988 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */, 3989 tnf_string, msg, "Unsupported request")); 3990 dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0); 3991 break; 3992 } 3993 mutex_exit(&slp->sl_lock); 3994 } 3995 3996 /* 3997 * Always enqueue M_PROTO and M_PCPROTO messages pn the wq and M_DATA 3998 * messages sometimes. Processing of M_PROTO and M_PCPROTO messages 3999 * require us to hold fcip's internal locks across (upstream) putnext 4000 * calls. Specifically fcip_intr could hold fcip_intrlock and fcipstruplock 4001 * when it calls putnext(). That thread could loop back around to call 4002 * fcip_wput and eventually fcip_init() to cause a recursive mutex panic 4003 * 4004 * M_DATA messages are enqueued only if we are out of xmit resources. Once 4005 * the transmit resources are available the service procedure is enabled 4006 * and an attempt is made to xmit all messages on the wq. 4007 */ 4008 /* ARGSUSED */ 4009 static int 4010 fcip_wsrv(queue_t *wq) 4011 { 4012 mblk_t *mp; 4013 struct fcipstr *slp; 4014 struct fcip *fptr; 4015 struct fcip_dest *fdestp; 4016 fcph_network_hdr_t *headerp; 4017 4018 slp = (struct fcipstr *)wq->q_ptr; 4019 fptr = slp->sl_fcip; 4020 4021 FCIP_TNF_PROBE_2((fcip_wsrv, "fcip io", /* CSTYLED */, 4022 tnf_string, msg, "enter", 4023 tnf_opaque, wq, wq)); 4024 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "fcip wsrv")); 4025 4026 while (mp = getq(wq)) { 4027 switch (DB_TYPE(mp)) { 4028 case M_DATA: 4029 if (fptr && mp) { 4030 headerp = (fcph_network_hdr_t *)mp->b_rptr; 4031 fdestp = fcip_get_dest(fptr, 4032 &headerp->net_dest_addr); 4033 if (fdestp == NULL) { 4034 freemsg(mp); 4035 goto done; 4036 } 4037 if (fcip_start(wq, mp, fptr, fdestp, 4038 KM_SLEEP)) { 4039 goto done; 4040 } 4041 } else { 4042 freemsg(mp); 4043 } 4044 break; 4045 4046 case M_PROTO: 4047 case M_PCPROTO: 4048 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 4049 (CE_NOTE, "PROT msg in wsrv")); 4050 fcip_proto(wq, mp); 4051 break; 4052 default: 4053 break; 4054 } 4055 } 4056 done: 4057 return (0); 4058 } 4059 4060 4061 /* 4062 * This routine is called from fcip_wsrv to send a message downstream 4063 * on the fibre towards its destination. This routine performs the 4064 * actual WWN to D_ID mapping by looking up the routing and destination 4065 * tables. 4066 */ 4067 /* ARGSUSED */ 4068 static int 4069 fcip_start(queue_t *wq, mblk_t *mp, struct fcip *fptr, 4070 struct fcip_dest *fdestp, int flags) 4071 { 4072 int rval; 4073 int free; 4074 fcip_pkt_t *fcip_pkt; 4075 fc_packet_t *fc_pkt; 4076 fcip_port_info_t *fport = fptr->fcip_port_info; 4077 size_t datalen; 4078 4079 FCIP_TNF_PROBE_4((fcip_start, "fcip io", /* CSTYLED */, 4080 tnf_string, msg, "enter", tnf_opaque, wq, wq, 4081 tnf_opaque, mp, mp, 4082 tnf_opaque, MP_DB_TYPE, DB_TYPE(mp))); 4083 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "in fcipstart")); 4084 4085 ASSERT(fdestp != NULL); 4086 4087 /* 4088 * Only return if port has gone offline and not come back online 4089 * in a while 4090 */ 4091 if (fptr->fcip_flags & FCIP_LINK_DOWN) { 4092 freemsg(mp); 4093 return (0); 4094 } 4095 4096 /* 4097 * The message block coming in here already has the network and 4098 * llc_snap hdr stuffed in 4099 */ 4100 /* 4101 * Traditionally ethernet drivers at sun handle 3 cases here - 4102 * 1. messages with one mblk 4103 * 2. messages with 2 mblks 4104 * 3. messages with >2 mblks 4105 * For now lets handle all the 3 cases in a single case where we 4106 * put them together in one mblk that has all the data 4107 */ 4108 4109 if (mp->b_cont != NULL) { 4110 if (!pullupmsg(mp, -1)) { 4111 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 4112 (CE_WARN, "failed to concat message")); 4113 freemsg(mp); 4114 return (1); 4115 } 4116 } 4117 4118 datalen = msgsize(mp); 4119 4120 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, 4121 "msgsize with nhdr & llcsnap hdr in fcip_pkt_alloc 0x%lx", 4122 datalen)); 4123 4124 /* 4125 * We cannot have requests larger than FCIPMTU+Headers 4126 */ 4127 if (datalen > (FCIPMTU + sizeof (llc_snap_hdr_t) + 4128 sizeof (fcph_network_hdr_t))) { 4129 freemsg(mp); 4130 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, 4131 "fcip_pkt_alloc: datalen is larger than " 4132 "max possible size.")); 4133 return (1); 4134 } 4135 4136 fcip_pkt = fcip_pkt_alloc(fptr, mp, flags, datalen); 4137 if (fcip_pkt == NULL) { 4138 (void) putbq(wq, mp); 4139 return (1); 4140 } 4141 4142 fcip_pkt->fcip_pkt_mp = mp; 4143 fcip_pkt->fcip_pkt_wq = wq; 4144 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt); 4145 4146 mutex_enter(&fdestp->fcipd_mutex); 4147 /* 4148 * If the device dynamically disappeared, just fail the request. 4149 */ 4150 if (fdestp->fcipd_rtable == NULL) { 4151 mutex_exit(&fdestp->fcipd_mutex); 4152 fcip_pkt_free(fcip_pkt, 1); 4153 return (1); 4154 } 4155 4156 /* 4157 * Now that we've assigned pkt_pd, we can call fc_ulp_init_packet 4158 */ 4159 4160 fc_pkt->pkt_pd = fdestp->fcipd_pd; 4161 4162 if (fc_ulp_init_packet((opaque_t)fport->fcipp_handle, 4163 fc_pkt, flags) != FC_SUCCESS) { 4164 mutex_exit(&fdestp->fcipd_mutex); 4165 fcip_pkt_free(fcip_pkt, 1); 4166 return (1); 4167 } 4168 4169 fcip_fdestp_enqueue_pkt(fdestp, fcip_pkt); 4170 fcip_pkt->fcip_pkt_dest = fdestp; 4171 fc_pkt->pkt_fca_device = fdestp->fcipd_fca_dev; 4172 4173 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, 4174 "setting cmdlen to 0x%x: rsp 0x%x : data 0x%x", 4175 fc_pkt->pkt_cmdlen, fc_pkt->pkt_rsplen, fc_pkt->pkt_datalen)); 4176 4177 fcip_init_unicast_pkt(fcip_pkt, fport->fcipp_sid, 4178 fdestp->fcipd_did, fcip_pkt_callback); 4179 4180 fdestp->fcipd_ncmds++; 4181 4182 mutex_exit(&fdestp->fcipd_mutex); 4183 if ((rval = fcip_transport(fcip_pkt)) == FC_SUCCESS) { 4184 fptr->fcip_opackets++; 4185 return (0); 4186 } 4187 4188 free = (rval == FC_STATEC_BUSY || rval == FC_OFFLINE || 4189 rval == FC_TRAN_BUSY) ? 0 : 1; 4190 4191 mutex_enter(&fdestp->fcipd_mutex); 4192 rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt); 4193 4194 if (!rval) { 4195 fcip_pkt = NULL; 4196 } else { 4197 fdestp->fcipd_ncmds--; 4198 } 4199 mutex_exit(&fdestp->fcipd_mutex); 4200 4201 if (fcip_pkt != NULL) { 4202 fcip_pkt_free(fcip_pkt, free); 4203 } 4204 4205 if (!free) { 4206 (void) putbq(wq, mp); 4207 } 4208 4209 return (1); 4210 } 4211 4212 4213 /* 4214 * This routine enqueus a packet marked to be issued to the 4215 * transport in the dest structure. This enables us to timeout any 4216 * request stuck with the FCA/transport for long periods of time 4217 * without a response. fcip_pkt_timeout will attempt to clean up 4218 * any packets hung in this state of limbo. 4219 */ 4220 static void 4221 fcip_fdestp_enqueue_pkt(struct fcip_dest *fdestp, fcip_pkt_t *fcip_pkt) 4222 { 4223 ASSERT(mutex_owned(&fdestp->fcipd_mutex)); 4224 FCIP_TNF_PROBE_1((fcip_fdestp_enqueue_pkt, "fcip io", /* CSTYLED */, 4225 tnf_string, msg, "destp enq pkt")); 4226 4227 /* 4228 * Just hang it off the head of packet list 4229 */ 4230 fcip_pkt->fcip_pkt_next = fdestp->fcipd_head; 4231 fcip_pkt->fcip_pkt_prev = NULL; 4232 fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_LIST; 4233 4234 if (fdestp->fcipd_head != NULL) { 4235 ASSERT(fdestp->fcipd_head->fcip_pkt_prev == NULL); 4236 fdestp->fcipd_head->fcip_pkt_prev = fcip_pkt; 4237 } 4238 4239 fdestp->fcipd_head = fcip_pkt; 4240 } 4241 4242 4243 /* 4244 * dequeues any packets after the transport/FCA tells us it has 4245 * been successfully sent on its way. Ofcourse it doesn't mean that 4246 * the packet will actually reach its destination but its atleast 4247 * a step closer in that direction 4248 */ 4249 static int 4250 fcip_fdestp_dequeue_pkt(struct fcip_dest *fdestp, fcip_pkt_t *fcip_pkt) 4251 { 4252 fcip_pkt_t *fcipd_pkt; 4253 4254 ASSERT(mutex_owned(&fdestp->fcipd_mutex)); 4255 if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_TIMEOUT) { 4256 fcipd_pkt = fdestp->fcipd_head; 4257 while (fcipd_pkt) { 4258 if (fcipd_pkt == fcip_pkt) { 4259 fcip_pkt_t *pptr = NULL; 4260 4261 if (fcipd_pkt == fdestp->fcipd_head) { 4262 ASSERT(fcipd_pkt->fcip_pkt_prev == 4263 NULL); 4264 fdestp->fcipd_head = 4265 fcipd_pkt->fcip_pkt_next; 4266 } else { 4267 pptr = fcipd_pkt->fcip_pkt_prev; 4268 ASSERT(pptr != NULL); 4269 pptr->fcip_pkt_next = 4270 fcipd_pkt->fcip_pkt_next; 4271 } 4272 if (fcipd_pkt->fcip_pkt_next) { 4273 pptr = fcipd_pkt->fcip_pkt_next; 4274 pptr->fcip_pkt_prev = 4275 fcipd_pkt->fcip_pkt_prev; 4276 } 4277 fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_LIST; 4278 break; 4279 } 4280 fcipd_pkt = fcipd_pkt->fcip_pkt_next; 4281 } 4282 } else { 4283 if (fcip_pkt->fcip_pkt_prev == NULL) { 4284 ASSERT(fdestp->fcipd_head == fcip_pkt); 4285 fdestp->fcipd_head = fcip_pkt->fcip_pkt_next; 4286 } else { 4287 fcip_pkt->fcip_pkt_prev->fcip_pkt_next = 4288 fcip_pkt->fcip_pkt_next; 4289 } 4290 4291 if (fcip_pkt->fcip_pkt_next) { 4292 fcip_pkt->fcip_pkt_next->fcip_pkt_prev = 4293 fcip_pkt->fcip_pkt_prev; 4294 } 4295 4296 fcipd_pkt = fcip_pkt; 4297 fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_LIST; 4298 } 4299 4300 return (fcipd_pkt == fcip_pkt); 4301 } 4302 4303 /* 4304 * The transport routine - this is the routine that actually calls 4305 * into the FCA driver (through the transport ofcourse) to transmit a 4306 * datagram on the fibre. The dest struct assoicated with the port to 4307 * which the data is intended is already bound to the packet, this routine 4308 * only takes care of marking the packet a broadcast packet if it is 4309 * intended to be a broadcast request. This permits the transport to send 4310 * the packet down on the wire even if it doesn't have an entry for the 4311 * D_ID in its d_id hash tables. 4312 */ 4313 static int 4314 fcip_transport(fcip_pkt_t *fcip_pkt) 4315 { 4316 struct fcip *fptr; 4317 fc_packet_t *fc_pkt; 4318 fcip_port_info_t *fport; 4319 struct fcip_dest *fdestp; 4320 uint32_t did; 4321 int rval = FC_FAILURE; 4322 struct fcip_routing_table *frp = NULL; 4323 4324 FCIP_TNF_PROBE_1((fcip_transport, "fcip io", /* CSTYLED */, 4325 tnf_string, msg, "enter")); 4326 4327 fptr = fcip_pkt->fcip_pkt_fptr; 4328 fport = fptr->fcip_port_info; 4329 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt); 4330 fdestp = fcip_pkt->fcip_pkt_dest; 4331 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN, "fcip_transport called")); 4332 4333 did = fptr->fcip_broadcast_did; 4334 if (fc_pkt->pkt_cmd_fhdr.d_id == did && 4335 fc_pkt->pkt_tran_type != FC_PKT_BROADCAST) { 4336 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 4337 (CE_NOTE, "trantype set to BROADCAST")); 4338 fc_pkt->pkt_tran_type = FC_PKT_BROADCAST; 4339 } 4340 4341 mutex_enter(&fptr->fcip_mutex); 4342 if ((fc_pkt->pkt_tran_type != FC_PKT_BROADCAST) && 4343 (fc_pkt->pkt_pd == NULL)) { 4344 mutex_exit(&fptr->fcip_mutex); 4345 FCIP_TNF_PROBE_1((fcip_transport, "fcip io", /* CSTYLED */, 4346 tnf_string, msg, "fcip transport no pd")); 4347 return (rval); 4348 } else if (fptr->fcip_port_state == FCIP_PORT_OFFLINE) { 4349 mutex_exit(&fptr->fcip_mutex); 4350 FCIP_TNF_PROBE_1((fcip_transport, "fcip io", /* CSTYLED */, 4351 tnf_string, msg, "fcip transport port offline")); 4352 return (FC_TRAN_BUSY); 4353 } 4354 mutex_exit(&fptr->fcip_mutex); 4355 4356 if (fdestp) { 4357 struct fcip_routing_table *frp; 4358 4359 frp = fdestp->fcipd_rtable; 4360 mutex_enter(&fptr->fcip_rt_mutex); 4361 mutex_enter(&fdestp->fcipd_mutex); 4362 if (fc_pkt->pkt_pd != NULL) { 4363 if ((frp == NULL) || 4364 (frp && FCIP_RTE_UNAVAIL(frp->fcipr_state))) { 4365 mutex_exit(&fdestp->fcipd_mutex); 4366 mutex_exit(&fptr->fcip_rt_mutex); 4367 if (frp && 4368 (frp->fcipr_state == FCIP_RT_INVALID)) { 4369 FCIP_TNF_PROBE_1((fcip_transport, 4370 "fcip io", /* CSTYLED */, 4371 tnf_string, msg, 4372 "fcip transport - TRANBUSY")); 4373 return (FC_TRAN_BUSY); 4374 } else { 4375 FCIP_TNF_PROBE_1((fcip_transport, 4376 "fcip io", /* CSTYLED */, 4377 tnf_string, msg, 4378 "fcip transport: frp unavailable")); 4379 return (rval); 4380 } 4381 } 4382 } 4383 mutex_exit(&fdestp->fcipd_mutex); 4384 mutex_exit(&fptr->fcip_rt_mutex); 4385 ASSERT(fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_LIST); 4386 } 4387 4388 /* Explicitly invalidate this field till fcip decides to use it */ 4389 fc_pkt->pkt_ulp_rscn_infop = NULL; 4390 4391 rval = fc_ulp_transport(fport->fcipp_handle, fc_pkt); 4392 if (rval == FC_STATEC_BUSY || rval == FC_OFFLINE) { 4393 /* 4394 * Need to queue up the command for retry 4395 */ 4396 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 4397 (CE_WARN, "ulp_transport failed: 0x%x", rval)); 4398 } else if (rval == FC_LOGINREQ && (frp != NULL)) { 4399 (void) fcip_do_plogi(fptr, frp); 4400 } else if (rval == FC_BADPACKET && (frp != NULL)) { 4401 /* 4402 * There is a distinct possiblity in our scheme of things 4403 * that we have a routing table entry with a NULL pd struct. 4404 * Mark the routing table entry for removal if it is not a 4405 * broadcast entry 4406 */ 4407 if ((frp->fcipr_d_id.port_id != 0x0) && 4408 (frp->fcipr_d_id.port_id != 0xffffff)) { 4409 mutex_enter(&fptr->fcip_rt_mutex); 4410 frp->fcipr_pd = NULL; 4411 frp->fcipr_state = PORT_DEVICE_INVALID; 4412 mutex_exit(&fptr->fcip_rt_mutex); 4413 } 4414 } 4415 4416 FCIP_TNF_PROBE_1((fcip_transport, "fcip io", /* CSTYLED */, 4417 tnf_string, msg, "fcip transport done")); 4418 return (rval); 4419 } 4420 4421 /* 4422 * Call back routine. Called by the FCA/transport when the messages 4423 * has been put onto the wire towards its intended destination. We can 4424 * now free the fc_packet associated with the message 4425 */ 4426 static void 4427 fcip_pkt_callback(fc_packet_t *fc_pkt) 4428 { 4429 int rval; 4430 fcip_pkt_t *fcip_pkt; 4431 struct fcip_dest *fdestp; 4432 4433 fcip_pkt = (fcip_pkt_t *)fc_pkt->pkt_ulp_private; 4434 fdestp = fcip_pkt->fcip_pkt_dest; 4435 4436 /* 4437 * take the lock early so that we don't have a race condition 4438 * with fcip_timeout 4439 * 4440 * fdestp->fcipd_mutex isn't really intended to lock per 4441 * packet struct - see bug 5105592 for permanent solution 4442 */ 4443 mutex_enter(&fdestp->fcipd_mutex); 4444 4445 fcip_pkt->fcip_pkt_flags |= FCIP_PKT_RETURNED; 4446 fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_ABORT; 4447 if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_TIMEOUT) { 4448 mutex_exit(&fdestp->fcipd_mutex); 4449 return; 4450 } 4451 4452 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "pkt callback")); 4453 4454 ASSERT(fdestp->fcipd_rtable != NULL); 4455 ASSERT(fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_LIST); 4456 rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt); 4457 fdestp->fcipd_ncmds--; 4458 mutex_exit(&fdestp->fcipd_mutex); 4459 4460 if (rval) { 4461 fcip_pkt_free(fcip_pkt, 1); 4462 } 4463 4464 FCIP_TNF_PROBE_1((fcip_pkt_callback, "fcip io", /* CSTYLED */, 4465 tnf_string, msg, "pkt callback done")); 4466 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "pkt callback done")); 4467 } 4468 4469 /* 4470 * Return 1 if the topology is supported, else return 0. 4471 * Topology support is consistent with what the whole 4472 * stack supports together. 4473 */ 4474 static int 4475 fcip_is_supported_fc_topology(int fc_topology) 4476 { 4477 switch (fc_topology) { 4478 4479 case FC_TOP_PRIVATE_LOOP : 4480 case FC_TOP_PUBLIC_LOOP : 4481 case FC_TOP_FABRIC : 4482 case FC_TOP_NO_NS : 4483 return (1); 4484 default : 4485 return (0); 4486 } 4487 } 4488 4489 /* 4490 * handle any topology specific initializations here 4491 * this routine must be called while holding fcip_mutex 4492 */ 4493 /* ARGSUSED */ 4494 static void 4495 fcip_handle_topology(struct fcip *fptr) 4496 { 4497 4498 fcip_port_info_t *fport = fptr->fcip_port_info; 4499 4500 ASSERT(mutex_owned(&fptr->fcip_mutex)); 4501 4502 /* 4503 * Since we know the port's topology - handle topology 4504 * specific details here. In Point to Point and Private Loop 4505 * topologies - we would probably not have a name server 4506 */ 4507 4508 FCIP_TNF_PROBE_3((fcip_handle_topology, "fcip io", /* CSTYLED */, 4509 tnf_string, msg, "enter", 4510 tnf_uint, port_state, fport->fcipp_pstate, 4511 tnf_uint, topology, fport->fcipp_topology)); 4512 FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "port state: %x, topology %x", 4513 fport->fcipp_pstate, fport->fcipp_topology)); 4514 4515 fptr->fcip_broadcast_did = fcip_get_broadcast_did(fptr); 4516 mutex_exit(&fptr->fcip_mutex); 4517 (void) fcip_dest_add_broadcast_entry(fptr, 0); 4518 mutex_enter(&fptr->fcip_mutex); 4519 4520 if (!fcip_is_supported_fc_topology(fport->fcipp_topology)) { 4521 FCIP_DEBUG(FCIP_DEBUG_INIT, 4522 (CE_WARN, "fcip(0x%x): Unsupported port topology (0x%x)", 4523 fptr->fcip_instance, fport->fcipp_topology)); 4524 return; 4525 } 4526 4527 switch (fport->fcipp_topology) { 4528 case FC_TOP_PRIVATE_LOOP: { 4529 4530 fc_portmap_t *port_map; 4531 uint32_t listlen, alloclen; 4532 /* 4533 * we may have to maintain routing. Get a list of 4534 * all devices on this port that the transport layer is 4535 * aware of. Check if any of them is a IS8802 type port, 4536 * if yes get its WWN and DID mapping and cache it in 4537 * the purport routing table. Since there is no 4538 * State Change notification for private loop/point_point 4539 * topologies - this table may not be accurate. The static 4540 * routing table is updated on a state change callback. 4541 */ 4542 FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN, "port state valid!!")); 4543 fptr->fcip_port_state = FCIP_PORT_ONLINE; 4544 listlen = alloclen = FCIP_MAX_PORTS; 4545 port_map = (fc_portmap_t *) 4546 kmem_zalloc((FCIP_MAX_PORTS * sizeof (fc_portmap_t)), 4547 KM_SLEEP); 4548 if (fc_ulp_getportmap(fport->fcipp_handle, &port_map, 4549 &listlen, FC_ULP_PLOGI_PRESERVE) == FC_SUCCESS) { 4550 mutex_exit(&fptr->fcip_mutex); 4551 fcip_rt_update(fptr, port_map, listlen); 4552 mutex_enter(&fptr->fcip_mutex); 4553 } 4554 if (listlen > alloclen) { 4555 alloclen = listlen; 4556 } 4557 kmem_free(port_map, (alloclen * sizeof (fc_portmap_t))); 4558 /* 4559 * Now fall through and register with the transport 4560 * that this port is IP capable 4561 */ 4562 } 4563 /* FALLTHROUGH */ 4564 case FC_TOP_NO_NS: 4565 /* 4566 * If we don't have a nameserver, lets wait until we 4567 * have to send out a packet to a remote port and then 4568 * try and discover the port using ARP/FARP. 4569 */ 4570 /* FALLTHROUGH */ 4571 case FC_TOP_PUBLIC_LOOP: 4572 case FC_TOP_FABRIC: { 4573 fc_portmap_t *port_map; 4574 uint32_t listlen, alloclen; 4575 4576 /* FC_TYPE of 0x05 goes to word 0, LSB */ 4577 fptr->fcip_port_state = FCIP_PORT_ONLINE; 4578 4579 if (!(fptr->fcip_flags & FCIP_REG_INPROGRESS)) { 4580 fptr->fcip_flags |= FCIP_REG_INPROGRESS; 4581 if (taskq_dispatch(fptr->fcip_tq, fcip_port_ns, 4582 fptr, KM_NOSLEEP) == 0) { 4583 fptr->fcip_flags &= ~FCIP_REG_INPROGRESS; 4584 } 4585 } 4586 4587 /* 4588 * If fcip_create_nodes_on_demand is overridden to force 4589 * discovery of all nodes in Fabric/Public loop topologies 4590 * we need to query for and obtain all nodes and log into 4591 * them as with private loop devices 4592 */ 4593 if (!fcip_create_nodes_on_demand) { 4594 fptr->fcip_port_state = FCIP_PORT_ONLINE; 4595 listlen = alloclen = FCIP_MAX_PORTS; 4596 port_map = (fc_portmap_t *) 4597 kmem_zalloc((FCIP_MAX_PORTS * 4598 sizeof (fc_portmap_t)), KM_SLEEP); 4599 if (fc_ulp_getportmap(fport->fcipp_handle, &port_map, 4600 &listlen, FC_ULP_PLOGI_PRESERVE) == FC_SUCCESS) { 4601 mutex_exit(&fptr->fcip_mutex); 4602 fcip_rt_update(fptr, port_map, listlen); 4603 mutex_enter(&fptr->fcip_mutex); 4604 } 4605 if (listlen > alloclen) { 4606 alloclen = listlen; 4607 } 4608 kmem_free(port_map, 4609 (alloclen * sizeof (fc_portmap_t))); 4610 } 4611 break; 4612 } 4613 4614 default: 4615 break; 4616 } 4617 } 4618 4619 static void 4620 fcip_port_ns(void *arg) 4621 { 4622 struct fcip *fptr = (struct fcip *)arg; 4623 fcip_port_info_t *fport = fptr->fcip_port_info; 4624 fc_ns_cmd_t ns_cmd; 4625 uint32_t types[8]; 4626 ns_rfc_type_t rfc; 4627 4628 mutex_enter(&fptr->fcip_mutex); 4629 if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) || 4630 (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) { 4631 fptr->fcip_flags &= ~FCIP_REG_INPROGRESS; 4632 mutex_exit(&fptr->fcip_mutex); 4633 return; 4634 } 4635 mutex_exit(&fptr->fcip_mutex); 4636 4637 /* 4638 * Prepare the Name server structure to 4639 * register with the transport in case of 4640 * Fabric configuration. 4641 */ 4642 bzero(&rfc, sizeof (rfc)); 4643 bzero(types, sizeof (types)); 4644 4645 types[FC4_TYPE_WORD_POS(FC_TYPE_IS8802_SNAP)] = (1 << 4646 FC4_TYPE_BIT_POS(FC_TYPE_IS8802_SNAP)); 4647 4648 rfc.rfc_port_id.port_id = fport->fcipp_sid.port_id; 4649 bcopy(types, rfc.rfc_types, sizeof (types)); 4650 4651 ns_cmd.ns_flags = 0; 4652 ns_cmd.ns_cmd = NS_RFT_ID; 4653 ns_cmd.ns_req_len = sizeof (rfc); 4654 ns_cmd.ns_req_payload = (caddr_t)&rfc; 4655 ns_cmd.ns_resp_len = 0; 4656 ns_cmd.ns_resp_payload = NULL; 4657 4658 /* 4659 * Perform the Name Server Registration for FC IS8802_SNAP Type. 4660 * We don't expect a reply for registering port type 4661 */ 4662 (void) fc_ulp_port_ns(fptr->fcip_port_info->fcipp_handle, 4663 (opaque_t)0, &ns_cmd); 4664 4665 mutex_enter(&fptr->fcip_mutex); 4666 fptr->fcip_flags &= ~FCIP_REG_INPROGRESS; 4667 mutex_exit(&fptr->fcip_mutex); 4668 } 4669 4670 /* 4671 * setup this instance of fcip. This routine inits kstats, allocates 4672 * unsolicited buffers, determines' this port's siblings and handles 4673 * topology specific details which includes registering with the name 4674 * server and also setting up the routing table for this port for 4675 * private loops and point to point topologies 4676 */ 4677 static int 4678 fcip_init_port(struct fcip *fptr) 4679 { 4680 int rval = FC_SUCCESS; 4681 fcip_port_info_t *fport = fptr->fcip_port_info; 4682 static char buf[64]; 4683 size_t tok_buf_size; 4684 4685 ASSERT(fport != NULL); 4686 4687 FCIP_TNF_PROBE_1((fcip_init_port, "fcip io", /* CSTYLED */, 4688 tnf_string, msg, "enter")); 4689 mutex_enter(&fptr->fcip_mutex); 4690 4691 /* 4692 * setup mac address for this port. Don't be too worried if 4693 * the WWN is zero, there is probably nothing attached to 4694 * to the port. There is no point allocating unsolicited buffers 4695 * for an unused port so return success if we don't have a MAC 4696 * address. Do the port init on a state change notification. 4697 */ 4698 if (fcip_setup_mac_addr(fptr) == FCIP_INVALID_WWN) { 4699 fptr->fcip_port_state = FCIP_PORT_OFFLINE; 4700 rval = FC_SUCCESS; 4701 goto done; 4702 } 4703 4704 /* 4705 * clear routing table hash list for this port 4706 */ 4707 fcip_rt_flush(fptr); 4708 4709 /* 4710 * init kstats for this instance 4711 */ 4712 fcip_kstat_init(fptr); 4713 4714 /* 4715 * Allocate unsolicited buffers 4716 */ 4717 fptr->fcip_ub_nbufs = fcip_ub_nbufs; 4718 tok_buf_size = sizeof (*fptr->fcip_ub_tokens) * fcip_ub_nbufs; 4719 4720 FCIP_TNF_PROBE_2((fcip_init_port, "fcip io", /* CSTYLED */, 4721 tnf_string, msg, "debug", 4722 tnf_int, tokBufsize, tok_buf_size)); 4723 4724 FCIP_DEBUG(FCIP_DEBUG_INIT, 4725 (CE_WARN, "tokBufsize: 0x%lx", tok_buf_size)); 4726 4727 fptr->fcip_ub_tokens = kmem_zalloc(tok_buf_size, KM_SLEEP); 4728 4729 if (fptr->fcip_ub_tokens == NULL) { 4730 rval = FC_FAILURE; 4731 FCIP_DEBUG(FCIP_DEBUG_INIT, 4732 (CE_WARN, "fcip(%d): failed to allocate unsol buf", 4733 fptr->fcip_instance)); 4734 goto done; 4735 } 4736 rval = fc_ulp_uballoc(fport->fcipp_handle, &fptr->fcip_ub_nbufs, 4737 fcip_ub_size, FC_TYPE_IS8802_SNAP, fptr->fcip_ub_tokens); 4738 4739 if (rval != FC_SUCCESS) { 4740 FCIP_DEBUG(FCIP_DEBUG_INIT, 4741 (CE_WARN, "fcip(%d): fc_ulp_uballoc failed with 0x%x!!", 4742 fptr->fcip_instance, rval)); 4743 } 4744 4745 switch (rval) { 4746 case FC_SUCCESS: 4747 break; 4748 4749 case FC_OFFLINE: 4750 fptr->fcip_port_state = FCIP_PORT_OFFLINE; 4751 rval = FC_FAILURE; 4752 goto done; 4753 4754 case FC_UB_ERROR: 4755 FCIP_TNF_PROBE_1((fcip_init_port, "fcip io", /* CSTYLED */, 4756 tnf_string, msg, "invalid ub alloc request")); 4757 FCIP_DEBUG(FCIP_DEBUG_INIT, 4758 (CE_WARN, "invalid ub alloc request !!")); 4759 rval = FC_FAILURE; 4760 goto done; 4761 4762 case FC_FAILURE: 4763 /* 4764 * requested bytes could not be alloced 4765 */ 4766 if (fptr->fcip_ub_nbufs != fcip_ub_nbufs) { 4767 cmn_err(CE_WARN, 4768 "!fcip(0x%x): Failed to alloc unsolicited bufs", 4769 ddi_get_instance(fport->fcipp_dip)); 4770 rval = FC_FAILURE; 4771 goto done; 4772 } 4773 break; 4774 4775 default: 4776 rval = FC_FAILURE; 4777 break; 4778 } 4779 4780 /* 4781 * Preallocate a Cache of fcip packets for transmit and receive 4782 * We don't want to be holding on to unsolicited buffers while 4783 * we transmit the message upstream 4784 */ 4785 FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "allocating fcip_pkt cache")); 4786 4787 (void) sprintf(buf, "fcip%d_cache", fptr->fcip_instance); 4788 fptr->fcip_xmit_cache = kmem_cache_create(buf, 4789 (fport->fcipp_fca_pkt_size + sizeof (fcip_pkt_t)), 4790 8, fcip_cache_constructor, fcip_cache_destructor, 4791 NULL, (void *)fport, NULL, 0); 4792 4793 (void) sprintf(buf, "fcip%d_sendup_cache", fptr->fcip_instance); 4794 fptr->fcip_sendup_cache = kmem_cache_create(buf, 4795 sizeof (struct fcip_sendup_elem), 4796 8, fcip_sendup_constructor, NULL, NULL, (void *)fport, NULL, 0); 4797 4798 if (fptr->fcip_xmit_cache == NULL) { 4799 FCIP_TNF_PROBE_2((fcip_init_port, "fcip io", /* CSTYLED */, 4800 tnf_string, msg, "unable to allocate xmit cache", 4801 tnf_int, instance, fptr->fcip_instance)); 4802 FCIP_DEBUG(FCIP_DEBUG_INIT, 4803 (CE_WARN, "fcip%d unable to allocate xmit cache", 4804 fptr->fcip_instance)); 4805 rval = FC_FAILURE; 4806 goto done; 4807 } 4808 4809 /* 4810 * We may need to handle routing tables for point to point and 4811 * fcal topologies and register with NameServer for Fabric 4812 * topologies. 4813 */ 4814 fcip_handle_topology(fptr); 4815 mutex_exit(&fptr->fcip_mutex); 4816 if (fcip_dest_add_broadcast_entry(fptr, 1) != FC_SUCCESS) { 4817 FCIP_DEBUG(FCIP_DEBUG_INIT, 4818 (CE_WARN, "fcip(0x%x):add broadcast entry failed!!", 4819 fptr->fcip_instance)); 4820 mutex_enter(&fptr->fcip_mutex); 4821 rval = FC_FAILURE; 4822 goto done; 4823 } 4824 4825 rval = FC_SUCCESS; 4826 return (rval); 4827 4828 done: 4829 /* 4830 * we don't always come here from port_attach - so cleanup 4831 * anything done in the init_port routine 4832 */ 4833 if (fptr->fcip_kstatp) { 4834 kstat_delete(fptr->fcip_kstatp); 4835 fptr->fcip_kstatp = NULL; 4836 } 4837 4838 if (fptr->fcip_xmit_cache) { 4839 kmem_cache_destroy(fptr->fcip_xmit_cache); 4840 fptr->fcip_xmit_cache = NULL; 4841 } 4842 4843 if (fptr->fcip_sendup_cache) { 4844 kmem_cache_destroy(fptr->fcip_sendup_cache); 4845 fptr->fcip_sendup_cache = NULL; 4846 } 4847 4848 /* release unsolicited buffers */ 4849 if (fptr->fcip_ub_tokens) { 4850 uint64_t *tokens = fptr->fcip_ub_tokens; 4851 fptr->fcip_ub_tokens = NULL; 4852 4853 mutex_exit(&fptr->fcip_mutex); 4854 (void) fc_ulp_ubfree(fport->fcipp_handle, fptr->fcip_ub_nbufs, 4855 tokens); 4856 kmem_free(tokens, tok_buf_size); 4857 4858 } else { 4859 mutex_exit(&fptr->fcip_mutex); 4860 } 4861 4862 return (rval); 4863 } 4864 4865 /* 4866 * Sets up a port's MAC address from its WWN 4867 */ 4868 static int 4869 fcip_setup_mac_addr(struct fcip *fptr) 4870 { 4871 fcip_port_info_t *fport = fptr->fcip_port_info; 4872 4873 ASSERT(mutex_owned(&fptr->fcip_mutex)); 4874 4875 fptr->fcip_addrflags = 0; 4876 4877 /* 4878 * we cannot choose a MAC address for our interface - we have 4879 * to live with whatever node WWN we get (minus the top two 4880 * MSbytes for the MAC address) from the transport layer. We will 4881 * treat the WWN as our factory MAC address. 4882 */ 4883 4884 if ((fport->fcipp_nwwn.w.wwn_hi != 0) || 4885 (fport->fcipp_nwwn.w.wwn_lo != 0)) { 4886 char etherstr[ETHERSTRL]; 4887 4888 wwn_to_ether(&fport->fcipp_nwwn, &fptr->fcip_macaddr); 4889 fcip_ether_to_str(&fptr->fcip_macaddr, etherstr); 4890 FCIP_DEBUG(FCIP_DEBUG_INIT, 4891 (CE_NOTE, "setupmacaddr ouraddr %s", etherstr)); 4892 4893 fptr->fcip_addrflags = (FCIP_FACTADDR_PRESENT | 4894 FCIP_FACTADDR_USE); 4895 } else { 4896 /* 4897 * No WWN - just return failure - there's not much 4898 * we can do since we cannot set the WWN. 4899 */ 4900 FCIP_DEBUG(FCIP_DEBUG_INIT, 4901 (CE_WARN, "Port does not have a valid WWN")); 4902 return (FCIP_INVALID_WWN); 4903 } 4904 return (FC_SUCCESS); 4905 } 4906 4907 4908 /* 4909 * flush routing table entries 4910 */ 4911 static void 4912 fcip_rt_flush(struct fcip *fptr) 4913 { 4914 int index; 4915 4916 mutex_enter(&fptr->fcip_rt_mutex); 4917 for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) { 4918 struct fcip_routing_table *frtp, *frtp_next; 4919 frtp = fptr->fcip_rtable[index]; 4920 while (frtp) { 4921 frtp_next = frtp->fcipr_next; 4922 kmem_free(frtp, sizeof (struct fcip_routing_table)); 4923 frtp = frtp_next; 4924 } 4925 fptr->fcip_rtable[index] = NULL; 4926 } 4927 mutex_exit(&fptr->fcip_rt_mutex); 4928 } 4929 4930 /* 4931 * Free up the fcip softstate and all allocated resources for the 4932 * fcip instance assoicated with a given port driver instance 4933 * 4934 * Given that the list of structures pointed to by fcip_port_head, 4935 * this function is called from multiple sources, and the 4936 * fcip_global_mutex that protects fcip_port_head must be dropped, 4937 * our best solution is to return a value that indicates the next 4938 * port in the list. This way the caller doesn't need to worry 4939 * about the race condition where it saves off a pointer to the 4940 * next structure in the list and by the time this routine returns, 4941 * that next structure has already been freed. 4942 */ 4943 static fcip_port_info_t * 4944 fcip_softstate_free(fcip_port_info_t *fport) 4945 { 4946 struct fcip *fptr = NULL; 4947 int instance; 4948 timeout_id_t tid; 4949 opaque_t phandle = NULL; 4950 fcip_port_info_t *prev_fport, *cur_fport, *next_fport = NULL; 4951 4952 ASSERT(MUTEX_HELD(&fcip_global_mutex)); 4953 4954 if (fport) { 4955 phandle = fport->fcipp_handle; 4956 fptr = fport->fcipp_fcip; 4957 } else { 4958 return (next_fport); 4959 } 4960 4961 if (fptr) { 4962 mutex_enter(&fptr->fcip_mutex); 4963 instance = ddi_get_instance(fptr->fcip_dip); 4964 4965 /* 4966 * dismantle timeout thread for this instance of fcip 4967 */ 4968 tid = fptr->fcip_timeout_id; 4969 fptr->fcip_timeout_id = NULL; 4970 4971 mutex_exit(&fptr->fcip_mutex); 4972 (void) untimeout(tid); 4973 mutex_enter(&fptr->fcip_mutex); 4974 4975 ASSERT(fcip_num_instances >= 0); 4976 fcip_num_instances--; 4977 4978 /* 4979 * stop sendup thread 4980 */ 4981 mutex_enter(&fptr->fcip_sendup_mutex); 4982 if (fptr->fcip_sendup_thr_initted) { 4983 fptr->fcip_sendup_thr_initted = 0; 4984 cv_signal(&fptr->fcip_sendup_cv); 4985 cv_wait(&fptr->fcip_sendup_cv, 4986 &fptr->fcip_sendup_mutex); 4987 } 4988 ASSERT(fptr->fcip_sendup_head == NULL); 4989 fptr->fcip_sendup_head = fptr->fcip_sendup_tail = NULL; 4990 mutex_exit(&fptr->fcip_sendup_mutex); 4991 4992 /* 4993 * dismantle taskq 4994 */ 4995 if (fptr->fcip_tq) { 4996 taskq_t *tq = fptr->fcip_tq; 4997 4998 fptr->fcip_tq = NULL; 4999 5000 mutex_exit(&fptr->fcip_mutex); 5001 taskq_destroy(tq); 5002 mutex_enter(&fptr->fcip_mutex); 5003 } 5004 5005 if (fptr->fcip_kstatp) { 5006 kstat_delete(fptr->fcip_kstatp); 5007 fptr->fcip_kstatp = NULL; 5008 } 5009 5010 /* flush the routing table entries */ 5011 fcip_rt_flush(fptr); 5012 5013 if (fptr->fcip_xmit_cache) { 5014 kmem_cache_destroy(fptr->fcip_xmit_cache); 5015 fptr->fcip_xmit_cache = NULL; 5016 } 5017 5018 if (fptr->fcip_sendup_cache) { 5019 kmem_cache_destroy(fptr->fcip_sendup_cache); 5020 fptr->fcip_sendup_cache = NULL; 5021 } 5022 5023 fcip_cleanup_dest(fptr); 5024 5025 /* release unsolicited buffers */ 5026 if (fptr->fcip_ub_tokens) { 5027 uint64_t *tokens = fptr->fcip_ub_tokens; 5028 5029 fptr->fcip_ub_tokens = NULL; 5030 mutex_exit(&fptr->fcip_mutex); 5031 if (phandle) { 5032 /* 5033 * release the global mutex here to 5034 * permit any data pending callbacks to 5035 * complete. Else we will deadlock in the 5036 * FCA waiting for all unsol buffers to be 5037 * returned. 5038 */ 5039 mutex_exit(&fcip_global_mutex); 5040 (void) fc_ulp_ubfree(phandle, 5041 fptr->fcip_ub_nbufs, tokens); 5042 mutex_enter(&fcip_global_mutex); 5043 } 5044 kmem_free(tokens, (sizeof (*tokens) * fcip_ub_nbufs)); 5045 } else { 5046 mutex_exit(&fptr->fcip_mutex); 5047 } 5048 5049 mutex_destroy(&fptr->fcip_mutex); 5050 mutex_destroy(&fptr->fcip_ub_mutex); 5051 mutex_destroy(&fptr->fcip_rt_mutex); 5052 mutex_destroy(&fptr->fcip_dest_mutex); 5053 mutex_destroy(&fptr->fcip_sendup_mutex); 5054 cv_destroy(&fptr->fcip_farp_cv); 5055 cv_destroy(&fptr->fcip_sendup_cv); 5056 cv_destroy(&fptr->fcip_ub_cv); 5057 5058 ddi_soft_state_free(fcip_softp, instance); 5059 } 5060 5061 /* 5062 * Now dequeue the fcip_port_info from the port list 5063 */ 5064 cur_fport = fcip_port_head; 5065 prev_fport = NULL; 5066 while (cur_fport != NULL) { 5067 if (cur_fport == fport) { 5068 break; 5069 } 5070 prev_fport = cur_fport; 5071 cur_fport = cur_fport->fcipp_next; 5072 } 5073 5074 /* 5075 * Assert that we found a port in our port list 5076 */ 5077 ASSERT(cur_fport == fport); 5078 5079 if (prev_fport) { 5080 /* 5081 * Not the first port in the port list 5082 */ 5083 prev_fport->fcipp_next = fport->fcipp_next; 5084 } else { 5085 /* 5086 * first port 5087 */ 5088 fcip_port_head = fport->fcipp_next; 5089 } 5090 next_fport = fport->fcipp_next; 5091 kmem_free(fport, sizeof (fcip_port_info_t)); 5092 5093 return (next_fport); 5094 } 5095 5096 5097 /* 5098 * This is called by transport for any ioctl operations performed 5099 * on the devctl or other transport minor nodes. It is currently 5100 * unused for fcip 5101 */ 5102 /* ARGSUSED */ 5103 static int 5104 fcip_port_ioctl(opaque_t ulp_handle, opaque_t port_handle, dev_t dev, 5105 int cmd, intptr_t data, int mode, cred_t *credp, int *rval, 5106 uint32_t claimed) 5107 { 5108 return (FC_UNCLAIMED); 5109 } 5110 5111 /* 5112 * DL_INFO_REQ - returns information about the DLPI stream to the DLS user 5113 * requesting information about this interface 5114 */ 5115 static void 5116 fcip_ireq(queue_t *wq, mblk_t *mp) 5117 { 5118 struct fcipstr *slp; 5119 struct fcip *fptr; 5120 dl_info_ack_t *dlip; 5121 struct fcipdladdr *dlap; 5122 la_wwn_t *ep; 5123 int size; 5124 char etherstr[ETHERSTRL]; 5125 5126 slp = (struct fcipstr *)wq->q_ptr; 5127 5128 fptr = slp->sl_fcip; 5129 5130 FCIP_DEBUG(FCIP_DEBUG_DLPI, 5131 (CE_NOTE, "fcip_ireq: info request req rcvd")); 5132 5133 FCIP_TNF_PROBE_1((fcip_ireq, "fcip io", /* CSTYLED */, 5134 tnf_string, msg, "fcip ireq entered")); 5135 5136 if (MBLKL(mp) < DL_INFO_REQ_SIZE) { 5137 dlerrorack(wq, mp, DL_INFO_REQ, DL_BADPRIM, 0); 5138 return; 5139 } 5140 5141 /* 5142 * Exchange current message for a DL_INFO_ACK 5143 */ 5144 size = sizeof (dl_info_ack_t) + FCIPADDRL + ETHERADDRL; 5145 if ((mp = mexchange(wq, mp, size, M_PCPROTO, DL_INFO_ACK)) == NULL) { 5146 return; 5147 } 5148 5149 /* 5150 * FILL in the DL_INFO_ACK fields and reply 5151 */ 5152 dlip = (dl_info_ack_t *)mp->b_rptr; 5153 *dlip = fcip_infoack; 5154 dlip->dl_current_state = slp->sl_state; 5155 dlap = (struct fcipdladdr *)(mp->b_rptr + dlip->dl_addr_offset); 5156 dlap->dl_sap = slp->sl_sap; 5157 5158 5159 if (fptr) { 5160 fcip_ether_to_str(&fptr->fcip_macaddr, etherstr); 5161 FCIP_DEBUG(FCIP_DEBUG_DLPI, 5162 (CE_NOTE, "ireq - our mac: %s", etherstr)); 5163 ether_bcopy(&fptr->fcip_macaddr, &dlap->dl_phys); 5164 } else { 5165 bzero((caddr_t)&dlap->dl_phys, ETHERADDRL); 5166 } 5167 5168 ep = (la_wwn_t *)(mp->b_rptr + dlip->dl_brdcst_addr_offset); 5169 ether_bcopy(&fcip_arpbroadcast_addr, ep); 5170 5171 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "sending back info req..")); 5172 qreply(wq, mp); 5173 } 5174 5175 5176 /* 5177 * To handle DL_UNITDATA_REQ requests. 5178 */ 5179 5180 static void 5181 fcip_udreq(queue_t *wq, mblk_t *mp) 5182 { 5183 struct fcipstr *slp; 5184 struct fcip *fptr; 5185 fcip_port_info_t *fport; 5186 dl_unitdata_req_t *dludp; 5187 mblk_t *nmp; 5188 struct fcipdladdr *dlap; 5189 fcph_network_hdr_t *headerp; 5190 llc_snap_hdr_t *lsnap; 5191 t_uscalar_t off, len; 5192 struct fcip_dest *fdestp; 5193 la_wwn_t wwn; 5194 int hdr_size; 5195 5196 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "inside fcip_udreq")); 5197 5198 FCIP_TNF_PROBE_1((fcip_udreq, "fcip io", /* CSTYLED */, 5199 tnf_string, msg, "fcip udreq entered")); 5200 5201 slp = (struct fcipstr *)wq->q_ptr; 5202 5203 if (slp->sl_state != DL_IDLE) { 5204 dlerrorack(wq, mp, DL_UNITDATA_REQ, DL_OUTSTATE, 0); 5205 return; 5206 } 5207 5208 fptr = slp->sl_fcip; 5209 5210 if (fptr == NULL) { 5211 dlerrorack(wq, mp, DL_UNITDATA_REQ, DL_OUTSTATE, 0); 5212 return; 5213 } 5214 5215 fport = fptr->fcip_port_info; 5216 5217 dludp = (dl_unitdata_req_t *)mp->b_rptr; 5218 off = dludp->dl_dest_addr_offset; 5219 len = dludp->dl_dest_addr_length; 5220 5221 /* 5222 * Validate destination address format 5223 */ 5224 if (!MBLKIN(mp, off, len) || (len != FCIPADDRL)) { 5225 dluderrorind(wq, mp, (mp->b_rptr + off), len, DL_BADADDR, 0); 5226 return; 5227 } 5228 5229 /* 5230 * Error if no M_DATA follows 5231 */ 5232 nmp = mp->b_cont; 5233 if (nmp == NULL) { 5234 dluderrorind(wq, mp, (mp->b_rptr + off), len, DL_BADDATA, 0); 5235 return; 5236 } 5237 dlap = (struct fcipdladdr *)(mp->b_rptr + off); 5238 5239 /* 5240 * Now get the destination structure for the remote NPORT 5241 */ 5242 ether_to_wwn(&dlap->dl_phys, &wwn); 5243 fdestp = fcip_get_dest(fptr, &wwn); 5244 5245 if (fdestp == NULL) { 5246 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, 5247 "udreq - couldn't find dest struct for remote port"); 5248 dluderrorind(wq, mp, (mp->b_rptr + off), len, DL_BADDATA, 0)); 5249 return; 5250 } 5251 5252 /* 5253 * Network header + SAP 5254 */ 5255 hdr_size = sizeof (fcph_network_hdr_t) + sizeof (llc_snap_hdr_t); 5256 5257 /* DB_REF gives the no. of msgs pointing to this block */ 5258 if ((DB_REF(nmp) == 1) && 5259 (MBLKHEAD(nmp) >= hdr_size) && 5260 (((uintptr_t)mp->b_rptr & 0x1) == 0)) { 5261 la_wwn_t wwn; 5262 nmp->b_rptr -= hdr_size; 5263 5264 /* first put the network header */ 5265 headerp = (fcph_network_hdr_t *)nmp->b_rptr; 5266 if (ether_cmp(&dlap->dl_phys, &fcip_arpbroadcast_addr) == 0) { 5267 ether_to_wwn(&fcipnhbroadcastaddr, &wwn); 5268 } else { 5269 ether_to_wwn(&dlap->dl_phys, &wwn); 5270 } 5271 bcopy(&wwn, &headerp->net_dest_addr, sizeof (la_wwn_t)); 5272 ether_to_wwn(&fptr->fcip_macaddr, &wwn); 5273 bcopy(&wwn, &headerp->net_src_addr, sizeof (la_wwn_t)); 5274 5275 /* Now the snap header */ 5276 lsnap = (llc_snap_hdr_t *)(nmp->b_rptr + 5277 sizeof (fcph_network_hdr_t)); 5278 lsnap->dsap = 0xAA; 5279 lsnap->ssap = 0xAA; 5280 lsnap->ctrl = 0x03; 5281 lsnap->oui[0] = 0x00; 5282 lsnap->oui[1] = 0x00; /* 80 */ 5283 lsnap->oui[2] = 0x00; /* C2 */ 5284 lsnap->pid = BE_16((dlap->dl_sap)); 5285 5286 freeb(mp); 5287 mp = nmp; 5288 5289 } else { 5290 la_wwn_t wwn; 5291 5292 DB_TYPE(mp) = M_DATA; 5293 headerp = (fcph_network_hdr_t *)mp->b_rptr; 5294 5295 /* 5296 * Only fill in the low 48bits of WWN for now - we can 5297 * fill in the NAA_ID after we find the port in the 5298 * routing tables 5299 */ 5300 if (ether_cmp(&dlap->dl_phys, &fcip_arpbroadcast_addr) == 0) { 5301 ether_to_wwn(&fcipnhbroadcastaddr, &wwn); 5302 } else { 5303 ether_to_wwn(&dlap->dl_phys, &wwn); 5304 } 5305 bcopy(&wwn, &headerp->net_dest_addr, sizeof (la_wwn_t)); 5306 /* need to send our PWWN */ 5307 bcopy(&fport->fcipp_pwwn, &headerp->net_src_addr, 5308 sizeof (la_wwn_t)); 5309 5310 lsnap = (llc_snap_hdr_t *)(nmp->b_rptr + 5311 sizeof (fcph_network_hdr_t)); 5312 lsnap->dsap = 0xAA; 5313 lsnap->ssap = 0xAA; 5314 lsnap->ctrl = 0x03; 5315 lsnap->oui[0] = 0x00; 5316 lsnap->oui[1] = 0x00; 5317 lsnap->oui[2] = 0x00; 5318 lsnap->pid = BE_16(dlap->dl_sap); 5319 5320 mp->b_wptr = mp->b_rptr + hdr_size; 5321 } 5322 5323 /* 5324 * Ethernet drivers have a lot of gunk here to put the Type 5325 * information (for Ethernet encapsulation (RFC 894) or the 5326 * Length (for 802.2/802.3) - I guess we'll just ignore that 5327 * here. 5328 */ 5329 5330 /* 5331 * Start the I/O on this port. If fcip_start failed for some reason 5332 * we call putbq in fcip_start so we don't need to check the 5333 * return value from fcip_start 5334 */ 5335 (void) fcip_start(wq, mp, fptr, fdestp, KM_SLEEP); 5336 } 5337 5338 /* 5339 * DL_ATTACH_REQ: attaches a PPA with a stream. ATTACH requets are needed 5340 * for style 2 DLS providers to identify the physical medium through which 5341 * the streams communication will happen 5342 */ 5343 static void 5344 fcip_areq(queue_t *wq, mblk_t *mp) 5345 { 5346 struct fcipstr *slp; 5347 union DL_primitives *dlp; 5348 fcip_port_info_t *fport; 5349 struct fcip *fptr; 5350 int ppa; 5351 5352 slp = (struct fcipstr *)wq->q_ptr; 5353 dlp = (union DL_primitives *)mp->b_rptr; 5354 5355 if (MBLKL(mp) < DL_ATTACH_REQ_SIZE) { 5356 dlerrorack(wq, mp, DL_ATTACH_REQ, DL_BADPRIM, 0); 5357 return; 5358 } 5359 5360 if (slp->sl_state != DL_UNATTACHED) { 5361 dlerrorack(wq, mp, DL_ATTACH_REQ, DL_OUTSTATE, 0); 5362 return; 5363 } 5364 5365 ppa = dlp->attach_req.dl_ppa; 5366 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "attach req: ppa %x", ppa)); 5367 5368 /* 5369 * check if the PPA is valid 5370 */ 5371 5372 mutex_enter(&fcip_global_mutex); 5373 5374 for (fport = fcip_port_head; fport; fport = fport->fcipp_next) { 5375 if ((fptr = fport->fcipp_fcip) == NULL) { 5376 continue; 5377 } 5378 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "ppa %x, inst %x", ppa, 5379 ddi_get_instance(fptr->fcip_dip))); 5380 5381 if (ppa == ddi_get_instance(fptr->fcip_dip)) { 5382 FCIP_DEBUG(FCIP_DEBUG_DLPI, 5383 (CE_NOTE, "ppa found %x", ppa)); 5384 break; 5385 } 5386 } 5387 5388 if (fport == NULL) { 5389 FCIP_DEBUG(FCIP_DEBUG_DLPI, 5390 (CE_NOTE, "dlerrorack coz fport==NULL")); 5391 5392 mutex_exit(&fcip_global_mutex); 5393 5394 if (fc_ulp_get_port_handle(ppa) == NULL) { 5395 dlerrorack(wq, mp, DL_ATTACH_REQ, DL_BADPPA, 0); 5396 return; 5397 } 5398 5399 /* 5400 * Wait for Port attach callback to trigger. If port_detach 5401 * got in while we were waiting, then ddi_get_soft_state 5402 * will return NULL, and we'll return error. 5403 */ 5404 5405 delay(drv_usectohz(FCIP_INIT_DELAY)); 5406 mutex_enter(&fcip_global_mutex); 5407 5408 fptr = ddi_get_soft_state(fcip_softp, ppa); 5409 if (fptr == NULL) { 5410 mutex_exit(&fcip_global_mutex); 5411 dlerrorack(wq, mp, DL_ATTACH_REQ, DL_BADPPA, 0); 5412 return; 5413 } 5414 } 5415 5416 /* 5417 * set link to device and update our state 5418 */ 5419 slp->sl_fcip = fptr; 5420 slp->sl_state = DL_UNBOUND; 5421 5422 mutex_exit(&fcip_global_mutex); 5423 5424 #ifdef DEBUG 5425 mutex_enter(&fptr->fcip_mutex); 5426 if (fptr->fcip_flags & FCIP_LINK_DOWN) { 5427 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_WARN, "port not online yet")); 5428 } 5429 mutex_exit(&fptr->fcip_mutex); 5430 #endif 5431 5432 dlokack(wq, mp, DL_ATTACH_REQ); 5433 } 5434 5435 5436 /* 5437 * DL_DETACH request - detaches a PPA from a stream 5438 */ 5439 static void 5440 fcip_dreq(queue_t *wq, mblk_t *mp) 5441 { 5442 struct fcipstr *slp; 5443 5444 slp = (struct fcipstr *)wq->q_ptr; 5445 5446 if (MBLKL(mp) < DL_DETACH_REQ_SIZE) { 5447 dlerrorack(wq, mp, DL_DETACH_REQ, DL_BADPRIM, 0); 5448 return; 5449 } 5450 5451 if (slp->sl_state != DL_UNBOUND) { 5452 dlerrorack(wq, mp, DL_DETACH_REQ, DL_OUTSTATE, 0); 5453 return; 5454 } 5455 5456 fcip_dodetach(slp); 5457 dlokack(wq, mp, DL_DETACH_REQ); 5458 } 5459 5460 /* 5461 * DL_BIND request: requests a DLS provider to bind a DLSAP to the stream. 5462 * DLS users communicate with a physical interface through DLSAPs. Multiple 5463 * DLSAPs can be bound to the same stream (PPA) 5464 */ 5465 static void 5466 fcip_breq(queue_t *wq, mblk_t *mp) 5467 { 5468 struct fcipstr *slp; 5469 union DL_primitives *dlp; 5470 struct fcip *fptr; 5471 struct fcipdladdr fcipaddr; 5472 t_uscalar_t sap; 5473 int xidtest; 5474 5475 slp = (struct fcipstr *)wq->q_ptr; 5476 5477 if (MBLKL(mp) < DL_BIND_REQ_SIZE) { 5478 dlerrorack(wq, mp, DL_BIND_REQ, DL_BADPRIM, 0); 5479 return; 5480 } 5481 5482 if (slp->sl_state != DL_UNBOUND) { 5483 dlerrorack(wq, mp, DL_BIND_REQ, DL_OUTSTATE, 0); 5484 return; 5485 } 5486 5487 dlp = (union DL_primitives *)mp->b_rptr; 5488 fptr = slp->sl_fcip; 5489 5490 if (fptr == NULL) { 5491 dlerrorack(wq, mp, DL_BIND_REQ, DL_OUTSTATE, 0); 5492 return; 5493 } 5494 5495 sap = dlp->bind_req.dl_sap; 5496 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "fcip_breq - sap: %x", sap)); 5497 xidtest = dlp->bind_req.dl_xidtest_flg; 5498 5499 if (xidtest) { 5500 dlerrorack(wq, mp, DL_BIND_REQ, DL_NOAUTO, 0); 5501 return; 5502 } 5503 5504 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "DLBIND: sap : %x", sap)); 5505 5506 if (sap > ETHERTYPE_MAX) { 5507 dlerrorack(wq, mp, dlp->dl_primitive, DL_BADSAP, 0); 5508 return; 5509 } 5510 /* 5511 * save SAP for this stream and change the link state 5512 */ 5513 slp->sl_sap = sap; 5514 slp->sl_state = DL_IDLE; 5515 5516 fcipaddr.dl_sap = sap; 5517 ether_bcopy(&fptr->fcip_macaddr, &fcipaddr.dl_phys); 5518 dlbindack(wq, mp, sap, &fcipaddr, FCIPADDRL, 0, 0); 5519 5520 fcip_setipq(fptr); 5521 } 5522 5523 /* 5524 * DL_UNBIND request to unbind a previously bound DLSAP, from this stream 5525 */ 5526 static void 5527 fcip_ubreq(queue_t *wq, mblk_t *mp) 5528 { 5529 struct fcipstr *slp; 5530 5531 slp = (struct fcipstr *)wq->q_ptr; 5532 5533 if (MBLKL(mp) < DL_UNBIND_REQ_SIZE) { 5534 dlerrorack(wq, mp, DL_UNBIND_REQ, DL_BADPRIM, 0); 5535 return; 5536 } 5537 5538 if (slp->sl_state != DL_IDLE) { 5539 dlerrorack(wq, mp, DL_UNBIND_REQ, DL_OUTSTATE, 0); 5540 return; 5541 } 5542 5543 slp->sl_state = DL_UNBOUND; 5544 slp->sl_sap = 0; 5545 5546 (void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW); 5547 dlokack(wq, mp, DL_UNBIND_REQ); 5548 5549 fcip_setipq(slp->sl_fcip); 5550 } 5551 5552 /* 5553 * Return our physical address 5554 */ 5555 static void 5556 fcip_pareq(queue_t *wq, mblk_t *mp) 5557 { 5558 struct fcipstr *slp; 5559 union DL_primitives *dlp; 5560 int type; 5561 struct fcip *fptr; 5562 fcip_port_info_t *fport; 5563 struct ether_addr addr; 5564 5565 slp = (struct fcipstr *)wq->q_ptr; 5566 5567 if (MBLKL(mp) < DL_PHYS_ADDR_REQ_SIZE) { 5568 dlerrorack(wq, mp, DL_PHYS_ADDR_REQ, DL_BADPRIM, 0); 5569 return; 5570 } 5571 5572 dlp = (union DL_primitives *)mp->b_rptr; 5573 type = dlp->physaddr_req.dl_addr_type; 5574 fptr = slp->sl_fcip; 5575 5576 if (fptr == NULL) { 5577 dlerrorack(wq, mp, DL_PHYS_ADDR_REQ, DL_OUTSTATE, 0); 5578 return; 5579 } 5580 5581 fport = fptr->fcip_port_info; 5582 5583 switch (type) { 5584 case DL_FACT_PHYS_ADDR: 5585 FCIP_DEBUG(FCIP_DEBUG_DLPI, 5586 (CE_NOTE, "returning factory phys addr")); 5587 wwn_to_ether(&fport->fcipp_pwwn, &addr); 5588 break; 5589 5590 case DL_CURR_PHYS_ADDR: 5591 FCIP_DEBUG(FCIP_DEBUG_DLPI, 5592 (CE_NOTE, "returning current phys addr")); 5593 ether_bcopy(&fptr->fcip_macaddr, &addr); 5594 break; 5595 5596 default: 5597 FCIP_DEBUG(FCIP_DEBUG_DLPI, 5598 (CE_NOTE, "Not known cmd type in phys addr")); 5599 dlerrorack(wq, mp, DL_PHYS_ADDR_REQ, DL_NOTSUPPORTED, 0); 5600 return; 5601 } 5602 dlphysaddrack(wq, mp, &addr, ETHERADDRL); 5603 } 5604 5605 /* 5606 * Set physical address DLPI request 5607 */ 5608 static void 5609 fcip_spareq(queue_t *wq, mblk_t *mp) 5610 { 5611 struct fcipstr *slp; 5612 union DL_primitives *dlp; 5613 t_uscalar_t off, len; 5614 struct ether_addr *addrp; 5615 la_wwn_t wwn; 5616 struct fcip *fptr; 5617 fc_ns_cmd_t fcip_ns_cmd; 5618 5619 slp = (struct fcipstr *)wq->q_ptr; 5620 5621 if (MBLKL(mp) < DL_SET_PHYS_ADDR_REQ_SIZE) { 5622 dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0); 5623 return; 5624 } 5625 5626 dlp = (union DL_primitives *)mp->b_rptr; 5627 len = dlp->set_physaddr_req.dl_addr_length; 5628 off = dlp->set_physaddr_req.dl_addr_offset; 5629 5630 if (!MBLKIN(mp, off, len)) { 5631 dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0); 5632 return; 5633 } 5634 5635 addrp = (struct ether_addr *)(mp->b_rptr + off); 5636 5637 /* 5638 * If the length of physical address is not correct or address 5639 * specified is a broadcast address or multicast addr - 5640 * return an error. 5641 */ 5642 if ((len != ETHERADDRL) || 5643 ((addrp->ether_addr_octet[0] & 01) == 1) || 5644 (ether_cmp(addrp, &fcip_arpbroadcast_addr) == 0)) { 5645 dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADADDR, 0); 5646 return; 5647 } 5648 5649 /* 5650 * check if a stream is attached to this device. Else return an error 5651 */ 5652 if ((fptr = slp->sl_fcip) == NULL) { 5653 dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_OUTSTATE, 0); 5654 return; 5655 } 5656 5657 /* 5658 * set the new interface local address. We request the transport 5659 * layer to change the Port WWN for this device - return an error 5660 * if we don't succeed. 5661 */ 5662 5663 ether_to_wwn(addrp, &wwn); 5664 if (fcip_set_wwn(&wwn) == FC_SUCCESS) { 5665 FCIP_DEBUG(FCIP_DEBUG_DLPI, 5666 (CE_WARN, "WWN changed in spareq")); 5667 } else { 5668 dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADADDR, 0); 5669 } 5670 5671 /* 5672 * register The new Port WWN and Node WWN with the transport 5673 * and Nameserver. Hope the transport ensures all current I/O 5674 * has stopped before actually attempting to register a new 5675 * port and Node WWN else we are hosed. Maybe a Link reset 5676 * will get everyone's attention. 5677 */ 5678 fcip_ns_cmd.ns_flags = 0; 5679 fcip_ns_cmd.ns_cmd = NS_RPN_ID; 5680 fcip_ns_cmd.ns_req_len = sizeof (la_wwn_t); 5681 fcip_ns_cmd.ns_req_payload = (caddr_t)&wwn.raw_wwn[0]; 5682 fcip_ns_cmd.ns_resp_len = 0; 5683 fcip_ns_cmd.ns_resp_payload = (caddr_t)0; 5684 if (fc_ulp_port_ns(fptr->fcip_port_info->fcipp_handle, 5685 (opaque_t)0, &fcip_ns_cmd) != FC_SUCCESS) { 5686 FCIP_DEBUG(FCIP_DEBUG_DLPI, 5687 (CE_WARN, "setting Port WWN failed")); 5688 dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0); 5689 return; 5690 } 5691 5692 dlokack(wq, mp, DL_SET_PHYS_ADDR_REQ); 5693 } 5694 5695 /* 5696 * change our port's WWN if permitted by hardware 5697 */ 5698 /* ARGSUSED */ 5699 static int 5700 fcip_set_wwn(la_wwn_t *pwwn) 5701 { 5702 /* 5703 * We're usually not allowed to change the WWN of adapters 5704 * but some adapters do permit us to change the WWN - don't 5705 * permit setting of WWNs (yet?) - This behavior could be 5706 * modified if needed 5707 */ 5708 return (FC_FAILURE); 5709 } 5710 5711 5712 /* 5713 * This routine fills in the header for fastpath data requests. What this 5714 * does in simple terms is, instead of sending all data through the Unitdata 5715 * request dlpi code paths (which will then append the protocol specific 5716 * header - network and snap headers in our case), the upper layers issue 5717 * a M_IOCTL with a DL_IOC_HDR_INFO request and ask the streams endpoint 5718 * driver to give the header it needs appended and the upper layer 5719 * allocates and fills in the header and calls our put routine 5720 */ 5721 static void 5722 fcip_dl_ioc_hdr_info(queue_t *wq, mblk_t *mp) 5723 { 5724 mblk_t *nmp; 5725 struct fcipstr *slp; 5726 struct fcipdladdr *dlap; 5727 dl_unitdata_req_t *dlup; 5728 fcph_network_hdr_t *headerp; 5729 la_wwn_t wwn; 5730 llc_snap_hdr_t *lsnap; 5731 struct fcip *fptr; 5732 fcip_port_info_t *fport; 5733 t_uscalar_t off, len; 5734 size_t hdrlen; 5735 int error; 5736 5737 slp = (struct fcipstr *)wq->q_ptr; 5738 fptr = slp->sl_fcip; 5739 if (fptr == NULL) { 5740 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 5741 (CE_NOTE, "dliochdr : returns EINVAL1")); 5742 miocnak(wq, mp, 0, EINVAL); 5743 return; 5744 } 5745 5746 error = miocpullup(mp, sizeof (dl_unitdata_req_t) + FCIPADDRL); 5747 if (error != 0) { 5748 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 5749 (CE_NOTE, "dliochdr : returns %d", error)); 5750 miocnak(wq, mp, 0, error); 5751 return; 5752 } 5753 5754 fport = fptr->fcip_port_info; 5755 5756 /* 5757 * check if the DL_UNITDATA_REQ destination addr has valid offset 5758 * and length values 5759 */ 5760 dlup = (dl_unitdata_req_t *)mp->b_cont->b_rptr; 5761 off = dlup->dl_dest_addr_offset; 5762 len = dlup->dl_dest_addr_length; 5763 if (dlup->dl_primitive != DL_UNITDATA_REQ || 5764 !MBLKIN(mp->b_cont, off, len) || (len != FCIPADDRL)) { 5765 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 5766 (CE_NOTE, "dliochdr : returns EINVAL2")); 5767 miocnak(wq, mp, 0, EINVAL); 5768 return; 5769 } 5770 5771 dlap = (struct fcipdladdr *)(mp->b_cont->b_rptr + off); 5772 5773 /* 5774 * Allocate a new mblk to hold the ether header 5775 */ 5776 5777 /* 5778 * setup space for network header 5779 */ 5780 hdrlen = (sizeof (llc_snap_hdr_t) + sizeof (fcph_network_hdr_t)); 5781 if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL) { 5782 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 5783 (CE_NOTE, "dliochdr : returns ENOMEM")); 5784 miocnak(wq, mp, 0, ENOMEM); 5785 return; 5786 } 5787 nmp->b_wptr += hdrlen; 5788 5789 /* 5790 * Fill in the Network Hdr and LLC SNAP header; 5791 */ 5792 headerp = (fcph_network_hdr_t *)nmp->b_rptr; 5793 /* 5794 * just fill in the Node WWN here - we can fill in the NAA_ID when 5795 * we search the routing table 5796 */ 5797 if (ether_cmp(&dlap->dl_phys, &fcip_arpbroadcast_addr) == 0) { 5798 ether_to_wwn(&fcipnhbroadcastaddr, &wwn); 5799 } else { 5800 ether_to_wwn(&dlap->dl_phys, &wwn); 5801 } 5802 bcopy(&wwn, &headerp->net_dest_addr, sizeof (la_wwn_t)); 5803 bcopy(&fport->fcipp_pwwn, &headerp->net_src_addr, sizeof (la_wwn_t)); 5804 lsnap = (llc_snap_hdr_t *)(nmp->b_rptr + sizeof (fcph_network_hdr_t)); 5805 lsnap->dsap = 0xAA; 5806 lsnap->ssap = 0xAA; 5807 lsnap->ctrl = 0x03; 5808 lsnap->oui[0] = 0x00; 5809 lsnap->oui[1] = 0x00; 5810 lsnap->oui[2] = 0x00; 5811 lsnap->pid = BE_16(dlap->dl_sap); 5812 5813 /* 5814 * Link new mblk in after the "request" mblks. 5815 */ 5816 linkb(mp, nmp); 5817 5818 slp->sl_flags |= FCIP_SLFAST; 5819 5820 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 5821 (CE_NOTE, "dliochdr : returns success ")); 5822 miocack(wq, mp, msgsize(mp->b_cont), 0); 5823 } 5824 5825 5826 /* 5827 * Establish a kmem cache for fcip packets 5828 */ 5829 static int 5830 fcip_cache_constructor(void *buf, void *arg, int flags) 5831 { 5832 fcip_pkt_t *fcip_pkt = buf; 5833 fc_packet_t *fc_pkt; 5834 fcip_port_info_t *fport = (fcip_port_info_t *)arg; 5835 int (*cb) (caddr_t); 5836 struct fcip *fptr; 5837 5838 cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 5839 5840 ASSERT(fport != NULL); 5841 5842 fptr = fport->fcipp_fcip; 5843 5844 /* 5845 * we allocated space for our private area at the end of the 5846 * fc packet. Make sure we point to it correctly. Ideally we 5847 * should just push fc_packet_private to the beginning or end 5848 * of the fc_packet structure 5849 */ 5850 fcip_pkt->fcip_pkt_next = NULL; 5851 fcip_pkt->fcip_pkt_prev = NULL; 5852 fcip_pkt->fcip_pkt_dest = NULL; 5853 fcip_pkt->fcip_pkt_state = 0; 5854 fcip_pkt->fcip_pkt_reason = 0; 5855 fcip_pkt->fcip_pkt_flags = 0; 5856 fcip_pkt->fcip_pkt_fptr = fptr; 5857 fcip_pkt->fcip_pkt_dma_flags = 0; 5858 5859 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt); 5860 fc_pkt->pkt_ulp_rscn_infop = NULL; 5861 5862 /* 5863 * We use pkt_cmd_dma for OUTBOUND requests. We don't expect 5864 * any responses for outbound IP data so no need to setup 5865 * response or data dma handles. 5866 */ 5867 if (ddi_dma_alloc_handle(fport->fcipp_dip, 5868 &fport->fcipp_cmd_dma_attr, cb, NULL, 5869 &fc_pkt->pkt_cmd_dma) != DDI_SUCCESS) { 5870 return (FCIP_FAILURE); 5871 } 5872 5873 fc_pkt->pkt_cmd_acc = fc_pkt->pkt_resp_acc = NULL; 5874 fc_pkt->pkt_fca_private = (opaque_t)((caddr_t)buf + 5875 sizeof (fcip_pkt_t)); 5876 fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt; 5877 5878 fc_pkt->pkt_cmd_cookie_cnt = fc_pkt->pkt_resp_cookie_cnt = 5879 fc_pkt->pkt_data_cookie_cnt = 0; 5880 fc_pkt->pkt_cmd_cookie = fc_pkt->pkt_resp_cookie = 5881 fc_pkt->pkt_data_cookie = NULL; 5882 5883 return (FCIP_SUCCESS); 5884 } 5885 5886 /* 5887 * destroy the fcip kmem cache 5888 */ 5889 static void 5890 fcip_cache_destructor(void *buf, void *arg) 5891 { 5892 fcip_pkt_t *fcip_pkt = (fcip_pkt_t *)buf; 5893 fc_packet_t *fc_pkt; 5894 fcip_port_info_t *fport = (fcip_port_info_t *)arg; 5895 struct fcip *fptr; 5896 5897 ASSERT(fport != NULL); 5898 5899 fptr = fport->fcipp_fcip; 5900 5901 ASSERT(fptr == fcip_pkt->fcip_pkt_fptr); 5902 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt); 5903 5904 if (fc_pkt->pkt_cmd_dma) { 5905 ddi_dma_free_handle(&fc_pkt->pkt_cmd_dma); 5906 } 5907 } 5908 5909 /* 5910 * the fcip destination structure is hashed on Node WWN assuming 5911 * a NAA_ID of 0x1 (IEEE) 5912 */ 5913 static struct fcip_dest * 5914 fcip_get_dest(struct fcip *fptr, la_wwn_t *pwwn) 5915 { 5916 struct fcip_dest *fdestp = NULL; 5917 fcip_port_info_t *fport; 5918 int hash_bucket; 5919 opaque_t pd; 5920 int rval; 5921 struct fcip_routing_table *frp; 5922 la_wwn_t twwn; 5923 uint32_t *twwnp = (uint32_t *)&twwn; 5924 5925 hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn); 5926 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 5927 (CE_NOTE, "get dest hashbucket : 0x%x", hash_bucket)); 5928 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 5929 (CE_NOTE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x", 5930 pwwn->raw_wwn[2], pwwn->raw_wwn[3], pwwn->raw_wwn[4], 5931 pwwn->raw_wwn[5], pwwn->raw_wwn[6], pwwn->raw_wwn[7])); 5932 5933 ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS); 5934 5935 if (fcip_check_port_exists(fptr)) { 5936 /* fptr is stale, return fdestp */ 5937 return (fdestp); 5938 } 5939 fport = fptr->fcip_port_info; 5940 5941 /* 5942 * First check if we have active I/Os going on with the 5943 * destination port (an entry would exist in fcip_dest hash table) 5944 */ 5945 mutex_enter(&fptr->fcip_dest_mutex); 5946 fdestp = fptr->fcip_dest[hash_bucket]; 5947 while (fdestp != NULL) { 5948 mutex_enter(&fdestp->fcipd_mutex); 5949 if (fdestp->fcipd_rtable) { 5950 if (fcip_wwn_compare(pwwn, &fdestp->fcipd_pwwn, 5951 FCIP_COMPARE_NWWN) == 0) { 5952 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 5953 (CE_NOTE, "found fdestp")); 5954 mutex_exit(&fdestp->fcipd_mutex); 5955 mutex_exit(&fptr->fcip_dest_mutex); 5956 return (fdestp); 5957 } 5958 } 5959 mutex_exit(&fdestp->fcipd_mutex); 5960 fdestp = fdestp->fcipd_next; 5961 } 5962 mutex_exit(&fptr->fcip_dest_mutex); 5963 5964 /* 5965 * We did not find the destination port information in our 5966 * active port list so search for an entry in our routing 5967 * table. 5968 */ 5969 mutex_enter(&fptr->fcip_rt_mutex); 5970 frp = fcip_lookup_rtable(fptr, pwwn, FCIP_COMPARE_NWWN); 5971 mutex_exit(&fptr->fcip_rt_mutex); 5972 5973 if (frp == NULL || (frp && (!FCIP_RTE_UNAVAIL(frp->fcipr_state)) && 5974 frp->fcipr_state != PORT_DEVICE_LOGGED_IN) || 5975 (frp && frp->fcipr_pd == NULL)) { 5976 /* 5977 * No entry for the destination port in our routing 5978 * table too. First query the transport to see if it 5979 * already has structures for the destination port in 5980 * its hash tables. This must be done for all topologies 5981 * since we could have retired entries in the hash tables 5982 * which may have to be re-added without a statechange 5983 * callback happening. Its better to try and get an entry 5984 * for the destination port rather than simply failing a 5985 * request though it may be an overkill in private loop 5986 * topologies. 5987 * If a entry for the remote port exists in the transport's 5988 * hash tables, we are fine and can add the entry to our 5989 * routing and dest hash lists, Else for fabric configs we 5990 * query the nameserver if one exists or issue FARP ELS. 5991 */ 5992 5993 /* 5994 * We need to do a PortName based Nameserver 5995 * query operation. So get the right PortWWN 5996 * for the adapter. 5997 */ 5998 bcopy(pwwn, &twwn, sizeof (la_wwn_t)); 5999 6000 /* 6001 * Try IEEE Name (Format 1) first, this is the default and 6002 * Emulex uses this format. 6003 */ 6004 pd = fc_ulp_get_remote_port(fport->fcipp_handle, 6005 &twwn, &rval, 1); 6006 6007 if (rval != FC_SUCCESS) { 6008 /* 6009 * If IEEE Name (Format 1) query failed, try IEEE 6010 * Extended Name (Format 2) which Qlogic uses. 6011 * And try port 1 on Qlogic FC-HBA first. 6012 * Note: On x86, we need to byte swap the 32-bit 6013 * word first, after the modification, swap it back. 6014 */ 6015 *twwnp = BE_32(*twwnp); 6016 twwn.w.nport_id = QLC_PORT_1_ID_BITS; 6017 twwn.w.naa_id = QLC_PORT_NAA; 6018 *twwnp = BE_32(*twwnp); 6019 pd = fc_ulp_get_remote_port(fport->fcipp_handle, 6020 &twwn, &rval, 1); 6021 } 6022 6023 if (rval != FC_SUCCESS) { 6024 /* If still failed, try port 2 on Qlogic FC-HBA. */ 6025 *twwnp = BE_32(*twwnp); 6026 twwn.w.nport_id = QLC_PORT_2_ID_BITS; 6027 *twwnp = BE_32(*twwnp); 6028 pd = fc_ulp_get_remote_port(fport->fcipp_handle, 6029 &twwn, &rval, 1); 6030 } 6031 6032 if (rval == FC_SUCCESS) { 6033 fc_portmap_t map; 6034 /* 6035 * Add the newly found destination structure 6036 * to our routing table. Create a map with 6037 * the device we found. We could ask the 6038 * transport to give us the list of all 6039 * devices connected to our port but we 6040 * probably don't need to know all the devices 6041 * so let us just constuct a list with only 6042 * one device instead. 6043 */ 6044 6045 fc_ulp_copy_portmap(&map, pd); 6046 fcip_rt_update(fptr, &map, 1); 6047 6048 mutex_enter(&fptr->fcip_rt_mutex); 6049 frp = fcip_lookup_rtable(fptr, pwwn, 6050 FCIP_COMPARE_NWWN); 6051 mutex_exit(&fptr->fcip_rt_mutex); 6052 6053 fdestp = fcip_add_dest(fptr, frp); 6054 } else if (fcip_farp_supported && 6055 (FC_TOP_EXTERNAL(fport->fcipp_topology) || 6056 (fport->fcipp_topology == FC_TOP_PT_PT))) { 6057 /* 6058 * The Name server request failed so 6059 * issue an FARP 6060 */ 6061 fdestp = fcip_do_farp(fptr, pwwn, NULL, 6062 0, 0); 6063 } else { 6064 fdestp = NULL; 6065 } 6066 } else if (frp && frp->fcipr_state == PORT_DEVICE_LOGGED_IN) { 6067 /* 6068 * Prepare a dest structure to return to caller 6069 */ 6070 fdestp = fcip_add_dest(fptr, frp); 6071 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 6072 (CE_NOTE, "in fcip get dest non fabric")); 6073 } 6074 return (fdestp); 6075 } 6076 6077 6078 /* 6079 * Endian clean WWN compare. 6080 * Returns 0 if they compare OK, else return non zero value. 6081 * flag can be bitwise OR of FCIP_COMPARE_NWWN, FCIP_COMPARE_PWWN, 6082 * FCIP_COMPARE_BROADCAST. 6083 */ 6084 static int 6085 fcip_wwn_compare(la_wwn_t *wwn1, la_wwn_t *wwn2, int flag) 6086 { 6087 int rval = 0; 6088 if ((wwn1->raw_wwn[2] != wwn2->raw_wwn[2]) || 6089 (wwn1->raw_wwn[3] != wwn2->raw_wwn[3]) || 6090 (wwn1->raw_wwn[4] != wwn2->raw_wwn[4]) || 6091 (wwn1->raw_wwn[5] != wwn2->raw_wwn[5]) || 6092 (wwn1->raw_wwn[6] != wwn2->raw_wwn[6]) || 6093 (wwn1->raw_wwn[7] != wwn2->raw_wwn[7])) { 6094 rval = 1; 6095 } else if ((flag == FCIP_COMPARE_PWWN) && 6096 (((wwn1->raw_wwn[0] & 0xf0) != (wwn2->raw_wwn[0] & 0xf0)) || 6097 (wwn1->raw_wwn[1] != wwn2->raw_wwn[1]))) { 6098 rval = 1; 6099 } 6100 return (rval); 6101 } 6102 6103 6104 /* 6105 * Add an entry for a remote port in the dest hash table. Dest hash table 6106 * has entries for ports in the routing hash table with which we decide 6107 * to establish IP communication with. The no. of entries in the dest hash 6108 * table must always be less than or equal to the entries in the routing 6109 * hash table. Every entry in the dest hash table ofcourse must have a 6110 * corresponding entry in the routing hash table 6111 */ 6112 static struct fcip_dest * 6113 fcip_add_dest(struct fcip *fptr, struct fcip_routing_table *frp) 6114 { 6115 struct fcip_dest *fdestp = NULL; 6116 la_wwn_t *pwwn; 6117 int hash_bucket; 6118 struct fcip_dest *fdest_new; 6119 6120 if (frp == NULL) { 6121 return (fdestp); 6122 } 6123 6124 pwwn = &frp->fcipr_pwwn; 6125 mutex_enter(&fptr->fcip_dest_mutex); 6126 hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn); 6127 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 6128 (CE_NOTE, "add dest hash_bucket: 0x%x", hash_bucket)); 6129 6130 ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS); 6131 6132 fdestp = fptr->fcip_dest[hash_bucket]; 6133 while (fdestp != NULL) { 6134 mutex_enter(&fdestp->fcipd_mutex); 6135 if (fdestp->fcipd_rtable) { 6136 if (fcip_wwn_compare(pwwn, &fdestp->fcipd_pwwn, 6137 FCIP_COMPARE_PWWN) == 0) { 6138 mutex_exit(&fdestp->fcipd_mutex); 6139 mutex_exit(&fptr->fcip_dest_mutex); 6140 return (fdestp); 6141 } 6142 } 6143 mutex_exit(&fdestp->fcipd_mutex); 6144 fdestp = fdestp->fcipd_next; 6145 } 6146 6147 ASSERT(fdestp == NULL); 6148 6149 fdest_new = (struct fcip_dest *) 6150 kmem_zalloc(sizeof (struct fcip_dest), KM_SLEEP); 6151 6152 mutex_init(&fdest_new->fcipd_mutex, NULL, MUTEX_DRIVER, NULL); 6153 fdest_new->fcipd_next = fptr->fcip_dest[hash_bucket]; 6154 fdest_new->fcipd_refcnt = 0; 6155 fdest_new->fcipd_rtable = frp; 6156 fdest_new->fcipd_ncmds = 0; 6157 fptr->fcip_dest[hash_bucket] = fdest_new; 6158 fdest_new->fcipd_flags = FCIP_PORT_NOTLOGGED; 6159 6160 mutex_exit(&fptr->fcip_dest_mutex); 6161 return (fdest_new); 6162 } 6163 6164 /* 6165 * Cleanup the dest hash table and remove all entries 6166 */ 6167 static void 6168 fcip_cleanup_dest(struct fcip *fptr) 6169 { 6170 struct fcip_dest *fdestp = NULL; 6171 struct fcip_dest *fdest_delp = NULL; 6172 int i; 6173 6174 mutex_enter(&fptr->fcip_dest_mutex); 6175 6176 for (i = 0; i < FCIP_DEST_HASH_ELEMS; i++) { 6177 fdestp = fptr->fcip_dest[i]; 6178 while (fdestp != NULL) { 6179 mutex_destroy(&fdestp->fcipd_mutex); 6180 fdest_delp = fdestp; 6181 fdestp = fdestp->fcipd_next; 6182 kmem_free(fdest_delp, sizeof (struct fcip_dest)); 6183 fptr->fcip_dest[i] = NULL; 6184 } 6185 } 6186 mutex_exit(&fptr->fcip_dest_mutex); 6187 } 6188 6189 6190 /* 6191 * Send FARP requests for Fabric ports when we don't have the port 6192 * we wish to talk to in our routing hash table. FARP is specially required 6193 * to talk to FC switches for inband switch management. Most FC switches 6194 * today have a switch FC IP address for IP over FC inband switch management 6195 * but the WWN and Port_ID for this traffic is not available through the 6196 * Nameservers since the switch themeselves are transparent. 6197 */ 6198 /* ARGSUSED */ 6199 static struct fcip_dest * 6200 fcip_do_farp(struct fcip *fptr, la_wwn_t *pwwn, char *ip_addr, 6201 size_t ip_addr_len, int flags) 6202 { 6203 fcip_pkt_t *fcip_pkt; 6204 fc_packet_t *fc_pkt; 6205 fcip_port_info_t *fport = fptr->fcip_port_info; 6206 la_els_farp_t farp_cmd; 6207 la_els_farp_t *fcmd; 6208 struct fcip_dest *fdestp = NULL; 6209 int rval; 6210 clock_t farp_lbolt; 6211 la_wwn_t broadcast_wwn; 6212 struct fcip_dest *bdestp; 6213 struct fcip_routing_table *frp; 6214 6215 bdestp = fcip_get_dest(fptr, &broadcast_wwn); 6216 6217 if (bdestp == NULL) { 6218 return (fdestp); 6219 } 6220 6221 fcip_pkt = fcip_ipkt_alloc(fptr, sizeof (la_els_farp_t), 6222 sizeof (la_els_farp_t), bdestp->fcipd_pd, KM_SLEEP); 6223 6224 if (fcip_pkt == NULL) { 6225 return (fdestp); 6226 } 6227 6228 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt); 6229 ether_to_wwn(&fcip_arpbroadcast_addr, &broadcast_wwn); 6230 6231 mutex_enter(&bdestp->fcipd_mutex); 6232 if (bdestp->fcipd_rtable == NULL) { 6233 mutex_exit(&bdestp->fcipd_mutex); 6234 fcip_ipkt_free(fcip_pkt); 6235 return (fdestp); 6236 } 6237 6238 fcip_pkt->fcip_pkt_dest = bdestp; 6239 fc_pkt->pkt_fca_device = bdestp->fcipd_fca_dev; 6240 6241 bdestp->fcipd_ncmds++; 6242 mutex_exit(&bdestp->fcipd_mutex); 6243 6244 fcip_init_broadcast_pkt(fcip_pkt, NULL, 1); 6245 fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_LIST; 6246 6247 /* 6248 * Now initialize the FARP payload itself 6249 */ 6250 fcmd = &farp_cmd; 6251 fcmd->ls_code.ls_code = LA_ELS_FARP_REQ; 6252 fcmd->ls_code.mbz = 0; 6253 /* 6254 * for now just match the Port WWN since the other match addr 6255 * code points are optional. We can explore matching the IP address 6256 * if needed 6257 */ 6258 if (ip_addr) { 6259 fcmd->match_addr = FARP_MATCH_WW_PN_IPv4; 6260 } else { 6261 fcmd->match_addr = FARP_MATCH_WW_PN; 6262 } 6263 6264 /* 6265 * Request the responder port to log into us - that way 6266 * the Transport is aware of the remote port when we create 6267 * an entry for it in our tables 6268 */ 6269 fcmd->resp_flags = FARP_INIT_REPLY | FARP_INIT_P_LOGI; 6270 fcmd->req_id = fport->fcipp_sid; 6271 fcmd->dest_id.port_id = fc_pkt->pkt_cmd_fhdr.d_id; 6272 bcopy(&fport->fcipp_pwwn, &fcmd->req_pwwn, sizeof (la_wwn_t)); 6273 bcopy(&fport->fcipp_nwwn, &fcmd->req_nwwn, sizeof (la_wwn_t)); 6274 bcopy(pwwn, &fcmd->resp_pwwn, sizeof (la_wwn_t)); 6275 /* 6276 * copy in source IP address if we get to know it 6277 */ 6278 if (ip_addr) { 6279 bcopy(ip_addr, fcmd->resp_ip, ip_addr_len); 6280 } 6281 6282 fc_pkt->pkt_cmdlen = sizeof (la_els_farp_t); 6283 fc_pkt->pkt_rsplen = sizeof (la_els_farp_t); 6284 fc_pkt->pkt_tran_type = FC_PKT_EXCHANGE; 6285 fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt; 6286 6287 /* 6288 * Endian safe copy 6289 */ 6290 FCIP_CP_OUT(fcmd, fc_pkt->pkt_cmd, fc_pkt->pkt_cmd_acc, 6291 sizeof (la_els_farp_t)); 6292 6293 /* 6294 * send the packet in polled mode. 6295 */ 6296 rval = fc_ulp_issue_els(fport->fcipp_handle, fc_pkt); 6297 if (rval != FC_SUCCESS) { 6298 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN, 6299 "fcip_transport of farp pkt failed 0x%x", rval)); 6300 fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_LIST; 6301 fcip_ipkt_free(fcip_pkt); 6302 6303 mutex_enter(&bdestp->fcipd_mutex); 6304 bdestp->fcipd_ncmds--; 6305 mutex_exit(&bdestp->fcipd_mutex); 6306 6307 return (fdestp); 6308 } 6309 6310 farp_lbolt = ddi_get_lbolt(); 6311 farp_lbolt += drv_usectohz(FCIP_FARP_TIMEOUT); 6312 6313 mutex_enter(&fptr->fcip_mutex); 6314 fptr->fcip_farp_rsp_flag = 0; 6315 while (!fptr->fcip_farp_rsp_flag) { 6316 if (cv_timedwait(&fptr->fcip_farp_cv, &fptr->fcip_mutex, 6317 farp_lbolt) == -1) { 6318 /* 6319 * No FARP response from any destination port 6320 * so bail out. 6321 */ 6322 fptr->fcip_farp_rsp_flag = 1; 6323 } else { 6324 /* 6325 * We received a FARP response - check to see if the 6326 * response was in reply to our FARP request. 6327 */ 6328 6329 mutex_enter(&fptr->fcip_rt_mutex); 6330 frp = fcip_lookup_rtable(fptr, pwwn, FCIP_COMPARE_NWWN); 6331 mutex_exit(&fptr->fcip_rt_mutex); 6332 6333 if ((frp != NULL) && 6334 !FCIP_RTE_UNAVAIL(frp->fcipr_state)) { 6335 fdestp = fcip_get_dest(fptr, pwwn); 6336 } else { 6337 /* 6338 * Not our FARP response so go back and wait 6339 * again till FARP_TIMEOUT expires 6340 */ 6341 fptr->fcip_farp_rsp_flag = 0; 6342 } 6343 } 6344 } 6345 mutex_exit(&fptr->fcip_mutex); 6346 6347 fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_LIST; 6348 fcip_ipkt_free(fcip_pkt); 6349 mutex_enter(&bdestp->fcipd_mutex); 6350 bdestp->fcipd_ncmds--; 6351 mutex_exit(&bdestp->fcipd_mutex); 6352 return (fdestp); 6353 } 6354 6355 6356 6357 /* 6358 * Helper routine to PLOGI to a remote port we wish to talk to. 6359 * This may not be required since the port driver does logins anyway, 6360 * but this can be required in fabric cases since FARP requests/responses 6361 * don't require you to be logged in? 6362 */ 6363 6364 /* ARGSUSED */ 6365 static int 6366 fcip_do_plogi(struct fcip *fptr, struct fcip_routing_table *frp) 6367 { 6368 fcip_pkt_t *fcip_pkt; 6369 fc_packet_t *fc_pkt; 6370 fcip_port_info_t *fport = fptr->fcip_port_info; 6371 la_els_logi_t logi; 6372 int rval; 6373 fc_frame_hdr_t *fr_hdr; 6374 6375 /* 6376 * Don't bother to login for broadcast RTE entries 6377 */ 6378 if ((frp->fcipr_d_id.port_id == 0x0) || 6379 (frp->fcipr_d_id.port_id == 0xffffff)) { 6380 return (FC_FAILURE); 6381 } 6382 6383 /* 6384 * We shouldn't pound in too many logins here 6385 * 6386 */ 6387 if (frp->fcipr_state == FCIP_RT_LOGIN_PROGRESS || 6388 frp->fcipr_state == PORT_DEVICE_LOGGED_IN) { 6389 return (FC_SUCCESS); 6390 } 6391 6392 fcip_pkt = fcip_ipkt_alloc(fptr, sizeof (la_els_logi_t), 6393 sizeof (la_els_logi_t), frp->fcipr_pd, KM_SLEEP); 6394 6395 if (fcip_pkt == NULL) { 6396 return (FC_FAILURE); 6397 } 6398 6399 /* 6400 * Update back pointer for login state update 6401 */ 6402 fcip_pkt->fcip_pkt_frp = frp; 6403 frp->fcipr_state = FCIP_RT_LOGIN_PROGRESS; 6404 6405 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt); 6406 6407 /* 6408 * Initialize frame header for ELS 6409 */ 6410 fr_hdr = &fc_pkt->pkt_cmd_fhdr; 6411 fr_hdr->r_ctl = R_CTL_ELS_REQ; 6412 fr_hdr->type = FC_TYPE_EXTENDED_LS; 6413 fr_hdr->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 6414 fr_hdr->df_ctl = 0; 6415 fr_hdr->s_id = fport->fcipp_sid.port_id; 6416 fr_hdr->d_id = frp->fcipr_d_id.port_id; 6417 fr_hdr->seq_cnt = 0; 6418 fr_hdr->ox_id = 0xffff; 6419 fr_hdr->rx_id = 0xffff; 6420 fr_hdr->ro = 0; 6421 6422 fc_pkt->pkt_rsplen = sizeof (la_els_logi_t); 6423 fc_pkt->pkt_comp = fcip_ipkt_callback; 6424 fc_pkt->pkt_tran_type = FC_PKT_EXCHANGE; 6425 fc_pkt->pkt_timeout = 10; /* 10 seconds */ 6426 fcip_pkt->fcip_pkt_ttl = fptr->fcip_timeout_ticks + fc_pkt->pkt_timeout; 6427 fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt; 6428 6429 /* 6430 * Everybody does class 3, so let's just set it. If the transport 6431 * knows better, it will deal with the class appropriately. 6432 */ 6433 6434 fc_pkt->pkt_tran_flags = FC_TRAN_INTR | FC_TRAN_CLASS3; 6435 6436 /* 6437 * we need only fill in the ls_code and the cmd frame header 6438 */ 6439 bzero((void *)&logi, sizeof (la_els_logi_t)); 6440 logi.ls_code.ls_code = LA_ELS_PLOGI; 6441 logi.ls_code.mbz = 0; 6442 6443 FCIP_CP_OUT((uint8_t *)&logi, fc_pkt->pkt_cmd, fc_pkt->pkt_cmd_acc, 6444 sizeof (la_els_logi_t)); 6445 6446 rval = fc_ulp_login(fport->fcipp_handle, &fc_pkt, 1); 6447 if (rval != FC_SUCCESS) { 6448 cmn_err(CE_WARN, 6449 "!fc_ulp_login failed for d_id: 0x%x, rval: 0x%x", 6450 frp->fcipr_d_id.port_id, rval); 6451 fcip_ipkt_free(fcip_pkt); 6452 } 6453 return (rval); 6454 } 6455 6456 /* 6457 * The packet callback routine - called from the transport/FCA after 6458 * it is done DMA'ing/sending out the packet contents on the wire so 6459 * that the alloc'ed packet can be freed 6460 */ 6461 static void 6462 fcip_ipkt_callback(fc_packet_t *fc_pkt) 6463 { 6464 ls_code_t logi_req; 6465 ls_code_t logi_resp; 6466 fcip_pkt_t *fcip_pkt; 6467 fc_frame_hdr_t *fr_hdr; 6468 struct fcip *fptr; 6469 fcip_port_info_t *fport; 6470 struct fcip_routing_table *frp; 6471 6472 fr_hdr = &fc_pkt->pkt_cmd_fhdr; 6473 6474 FCIP_CP_IN(fc_pkt->pkt_resp, (uint8_t *)&logi_resp, 6475 fc_pkt->pkt_resp_acc, sizeof (logi_resp)); 6476 6477 FCIP_CP_IN(fc_pkt->pkt_cmd, (uint8_t *)&logi_req, fc_pkt->pkt_cmd_acc, 6478 sizeof (logi_req)); 6479 6480 fcip_pkt = (fcip_pkt_t *)fc_pkt->pkt_ulp_private; 6481 frp = fcip_pkt->fcip_pkt_frp; 6482 fptr = fcip_pkt->fcip_pkt_fptr; 6483 fport = fptr->fcip_port_info; 6484 6485 ASSERT(logi_req.ls_code == LA_ELS_PLOGI); 6486 6487 if (fc_pkt->pkt_state != FC_PKT_SUCCESS || 6488 logi_resp.ls_code != LA_ELS_ACC) { 6489 /* EMPTY */ 6490 6491 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN, 6492 "opcode : 0x%x to d_id: 0x%x failed", 6493 logi_req.ls_code, fr_hdr->d_id)); 6494 6495 mutex_enter(&fptr->fcip_rt_mutex); 6496 frp->fcipr_state = PORT_DEVICE_INVALID; 6497 frp->fcipr_invalid_timeout = fptr->fcip_timeout_ticks + 6498 (FCIP_RTE_TIMEOUT / 2); 6499 mutex_exit(&fptr->fcip_rt_mutex); 6500 } else { 6501 fc_portid_t d_id; 6502 6503 d_id.port_id = fr_hdr->d_id; 6504 d_id.priv_lilp_posit = 0; 6505 6506 /* 6507 * Update PLOGI results; FCA Handle, and Port device handles 6508 */ 6509 mutex_enter(&fptr->fcip_rt_mutex); 6510 frp->fcipr_pd = fc_pkt->pkt_pd; 6511 frp->fcipr_fca_dev = 6512 fc_ulp_get_fca_device(fport->fcipp_handle, d_id); 6513 frp->fcipr_state = PORT_DEVICE_LOGGED_IN; 6514 mutex_exit(&fptr->fcip_rt_mutex); 6515 } 6516 6517 fcip_ipkt_free(fcip_pkt); 6518 } 6519 6520 6521 /* 6522 * pkt_alloc routine for outbound IP datagrams. The cache constructor 6523 * Only initializes the pkt_cmd_dma (which is where the outbound datagram 6524 * is stuffed) since we don't expect response 6525 */ 6526 static fcip_pkt_t * 6527 fcip_pkt_alloc(struct fcip *fptr, mblk_t *bp, int flags, int datalen) 6528 { 6529 fcip_pkt_t *fcip_pkt; 6530 fc_packet_t *fc_pkt; 6531 ddi_dma_cookie_t pkt_cookie; 6532 ddi_dma_cookie_t *cp; 6533 uint32_t cnt; 6534 fcip_port_info_t *fport = fptr->fcip_port_info; 6535 6536 fcip_pkt = kmem_cache_alloc(fptr->fcip_xmit_cache, flags); 6537 if (fcip_pkt == NULL) { 6538 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN, 6539 "fcip_pkt_alloc: kmem_cache_alloc failed")); 6540 return (NULL); 6541 } 6542 6543 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt); 6544 fcip_pkt->fcip_pkt_fcpktp = fc_pkt; 6545 fc_pkt->pkt_tran_flags = 0; 6546 fcip_pkt->fcip_pkt_dma_flags = 0; 6547 6548 /* 6549 * the cache constructor has allocated the dma handle 6550 */ 6551 fc_pkt->pkt_cmd = (caddr_t)bp->b_rptr; 6552 if (ddi_dma_addr_bind_handle(fc_pkt->pkt_cmd_dma, NULL, 6553 (caddr_t)bp->b_rptr, datalen, DDI_DMA_WRITE | DDI_DMA_CONSISTENT, 6554 DDI_DMA_DONTWAIT, NULL, &pkt_cookie, 6555 &fc_pkt->pkt_cmd_cookie_cnt) != DDI_DMA_MAPPED) { 6556 goto fail; 6557 } 6558 6559 fcip_pkt->fcip_pkt_dma_flags |= FCIP_CMD_DMA_BOUND; 6560 6561 if (fc_pkt->pkt_cmd_cookie_cnt > 6562 fport->fcipp_cmd_dma_attr.dma_attr_sgllen) { 6563 goto fail; 6564 } 6565 6566 ASSERT(fc_pkt->pkt_cmd_cookie_cnt != 0); 6567 6568 cp = fc_pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc( 6569 fc_pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie), 6570 KM_NOSLEEP); 6571 6572 if (cp == NULL) { 6573 goto fail; 6574 } 6575 6576 *cp = pkt_cookie; 6577 cp++; 6578 for (cnt = 1; cnt < fc_pkt->pkt_cmd_cookie_cnt; cnt++, cp++) { 6579 ddi_dma_nextcookie(fc_pkt->pkt_cmd_dma, &pkt_cookie); 6580 *cp = pkt_cookie; 6581 } 6582 6583 fc_pkt->pkt_cmdlen = datalen; 6584 6585 fcip_pkt->fcip_pkt_mp = NULL; 6586 fcip_pkt->fcip_pkt_wq = NULL; 6587 fcip_pkt->fcip_pkt_dest = NULL; 6588 fcip_pkt->fcip_pkt_next = NULL; 6589 fcip_pkt->fcip_pkt_prev = NULL; 6590 fcip_pkt->fcip_pkt_state = 0; 6591 fcip_pkt->fcip_pkt_reason = 0; 6592 fcip_pkt->fcip_pkt_flags = 0; 6593 fcip_pkt->fcip_pkt_frp = NULL; 6594 6595 return (fcip_pkt); 6596 fail: 6597 if (fcip_pkt) { 6598 fcip_pkt_free(fcip_pkt, 0); 6599 } 6600 return ((fcip_pkt_t *)0); 6601 } 6602 6603 /* 6604 * Free a packet and all its associated resources 6605 */ 6606 static void 6607 fcip_pkt_free(struct fcip_pkt *fcip_pkt, int free_mblk) 6608 { 6609 fc_packet_t *fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt); 6610 struct fcip *fptr = fcip_pkt->fcip_pkt_fptr; 6611 6612 if (fc_pkt->pkt_cmd_cookie != NULL) { 6613 kmem_free(fc_pkt->pkt_cmd_cookie, fc_pkt->pkt_cmd_cookie_cnt * 6614 sizeof (ddi_dma_cookie_t)); 6615 fc_pkt->pkt_cmd_cookie = NULL; 6616 } 6617 6618 fcip_free_pkt_dma(fcip_pkt); 6619 if (free_mblk && fcip_pkt->fcip_pkt_mp) { 6620 freemsg(fcip_pkt->fcip_pkt_mp); 6621 fcip_pkt->fcip_pkt_mp = NULL; 6622 } 6623 6624 (void) fc_ulp_uninit_packet(fptr->fcip_port_info->fcipp_handle, fc_pkt); 6625 6626 kmem_cache_free(fptr->fcip_xmit_cache, (void *)fcip_pkt); 6627 } 6628 6629 /* 6630 * Allocate a Packet for internal driver use. This is for requests 6631 * that originate from within the driver 6632 */ 6633 static fcip_pkt_t * 6634 fcip_ipkt_alloc(struct fcip *fptr, int cmdlen, int resplen, 6635 opaque_t pd, int flags) 6636 { 6637 fcip_pkt_t *fcip_pkt; 6638 fc_packet_t *fc_pkt; 6639 int (*cb)(caddr_t); 6640 fcip_port_info_t *fport = fptr->fcip_port_info; 6641 size_t real_len; 6642 uint_t held_here = 0; 6643 ddi_dma_cookie_t pkt_cookie; 6644 ddi_dma_cookie_t *cp; 6645 uint32_t cnt; 6646 6647 cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 6648 6649 fcip_pkt = kmem_zalloc((sizeof (fcip_pkt_t) + 6650 fport->fcipp_fca_pkt_size), flags); 6651 6652 if (fcip_pkt == NULL) { 6653 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 6654 (CE_WARN, "pkt alloc of ineternal pkt failed")); 6655 goto fail; 6656 } 6657 6658 fcip_pkt->fcip_pkt_flags = FCIP_PKT_INTERNAL; 6659 fcip_pkt->fcip_pkt_fptr = fptr; 6660 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt); 6661 fcip_pkt->fcip_pkt_fcpktp = fc_pkt; 6662 fc_pkt->pkt_tran_flags = 0; 6663 fc_pkt->pkt_cmdlen = 0; 6664 fc_pkt->pkt_rsplen = 0; 6665 fc_pkt->pkt_datalen = 0; 6666 fc_pkt->pkt_fca_private = (opaque_t)((caddr_t)fcip_pkt + 6667 sizeof (fcip_pkt_t)); 6668 fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt; 6669 6670 if (cmdlen) { 6671 if (ddi_dma_alloc_handle(fptr->fcip_dip, 6672 &fport->fcipp_cmd_dma_attr, cb, NULL, 6673 &fc_pkt->pkt_cmd_dma) != DDI_SUCCESS) { 6674 goto fail; 6675 } 6676 6677 if (ddi_dma_mem_alloc(fc_pkt->pkt_cmd_dma, cmdlen, 6678 &fport->fcipp_fca_acc_attr, DDI_DMA_CONSISTENT, 6679 cb, NULL, (caddr_t *)&fc_pkt->pkt_cmd, 6680 &real_len, &fc_pkt->pkt_cmd_acc) != DDI_SUCCESS) { 6681 goto fail; 6682 } 6683 6684 fcip_pkt->fcip_pkt_dma_flags |= FCIP_CMD_DMA_MEM; 6685 fc_pkt->pkt_cmdlen = cmdlen; 6686 6687 if (real_len < cmdlen) { 6688 goto fail; 6689 } 6690 6691 if (ddi_dma_addr_bind_handle(fc_pkt->pkt_cmd_dma, NULL, 6692 (caddr_t)fc_pkt->pkt_cmd, real_len, 6693 DDI_DMA_WRITE | DDI_DMA_CONSISTENT, cb, NULL, 6694 &pkt_cookie, &fc_pkt->pkt_cmd_cookie_cnt) != 6695 DDI_DMA_MAPPED) { 6696 goto fail; 6697 } 6698 6699 fcip_pkt->fcip_pkt_dma_flags |= FCIP_CMD_DMA_BOUND; 6700 6701 if (fc_pkt->pkt_cmd_cookie_cnt > 6702 fport->fcipp_cmd_dma_attr.dma_attr_sgllen) { 6703 goto fail; 6704 } 6705 6706 ASSERT(fc_pkt->pkt_cmd_cookie_cnt != 0); 6707 6708 cp = fc_pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc( 6709 fc_pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie), 6710 KM_NOSLEEP); 6711 6712 if (cp == NULL) { 6713 goto fail; 6714 } 6715 6716 *cp = pkt_cookie; 6717 cp++; 6718 for (cnt = 1; cnt < fc_pkt->pkt_cmd_cookie_cnt; cnt++, cp++) { 6719 ddi_dma_nextcookie(fc_pkt->pkt_cmd_dma, &pkt_cookie); 6720 *cp = pkt_cookie; 6721 } 6722 } 6723 6724 if (resplen) { 6725 if (ddi_dma_alloc_handle(fptr->fcip_dip, 6726 &fport->fcipp_resp_dma_attr, cb, NULL, 6727 &fc_pkt->pkt_resp_dma) != DDI_SUCCESS) { 6728 goto fail; 6729 } 6730 6731 if (ddi_dma_mem_alloc(fc_pkt->pkt_resp_dma, resplen, 6732 &fport->fcipp_fca_acc_attr, DDI_DMA_CONSISTENT, 6733 cb, NULL, (caddr_t *)&fc_pkt->pkt_resp, 6734 &real_len, &fc_pkt->pkt_resp_acc) != DDI_SUCCESS) { 6735 goto fail; 6736 } 6737 6738 fcip_pkt->fcip_pkt_dma_flags |= FCIP_RESP_DMA_MEM; 6739 6740 if (real_len < resplen) { 6741 goto fail; 6742 } 6743 6744 if (ddi_dma_addr_bind_handle(fc_pkt->pkt_resp_dma, NULL, 6745 (caddr_t)fc_pkt->pkt_resp, real_len, 6746 DDI_DMA_WRITE | DDI_DMA_CONSISTENT, cb, NULL, 6747 &pkt_cookie, &fc_pkt->pkt_resp_cookie_cnt) != 6748 DDI_DMA_MAPPED) { 6749 goto fail; 6750 } 6751 6752 fcip_pkt->fcip_pkt_dma_flags |= FCIP_RESP_DMA_BOUND; 6753 fc_pkt->pkt_rsplen = resplen; 6754 6755 if (fc_pkt->pkt_resp_cookie_cnt > 6756 fport->fcipp_resp_dma_attr.dma_attr_sgllen) { 6757 goto fail; 6758 } 6759 6760 ASSERT(fc_pkt->pkt_resp_cookie_cnt != 0); 6761 6762 cp = fc_pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc( 6763 fc_pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie), 6764 KM_NOSLEEP); 6765 6766 if (cp == NULL) { 6767 goto fail; 6768 } 6769 6770 *cp = pkt_cookie; 6771 cp++; 6772 for (cnt = 1; cnt < fc_pkt->pkt_resp_cookie_cnt; cnt++, cp++) { 6773 ddi_dma_nextcookie(fc_pkt->pkt_resp_dma, &pkt_cookie); 6774 *cp = pkt_cookie; 6775 } 6776 } 6777 6778 /* 6779 * Initialize pkt_pd prior to calling fc_ulp_init_packet 6780 */ 6781 6782 fc_pkt->pkt_pd = pd; 6783 6784 /* 6785 * Ask the FCA to bless the internal packet 6786 */ 6787 if (fc_ulp_init_packet((opaque_t)fport->fcipp_handle, 6788 fc_pkt, flags) != FC_SUCCESS) { 6789 goto fail; 6790 } 6791 6792 /* 6793 * Keep track of # of ipkts alloc-ed 6794 * This function can get called with mutex either held or not. So, we'll 6795 * grab mutex if it is not already held by this thread. 6796 * This has to be cleaned up someday. 6797 */ 6798 if (!MUTEX_HELD(&fptr->fcip_mutex)) { 6799 held_here = 1; 6800 mutex_enter(&fptr->fcip_mutex); 6801 } 6802 6803 fptr->fcip_num_ipkts_pending++; 6804 6805 if (held_here) 6806 mutex_exit(&fptr->fcip_mutex); 6807 6808 return (fcip_pkt); 6809 fail: 6810 if (fcip_pkt) { 6811 fcip_ipkt_free(fcip_pkt); 6812 } 6813 6814 return (NULL); 6815 } 6816 6817 /* 6818 * free up an internal IP packet (like a FARP pkt etc) 6819 */ 6820 static void 6821 fcip_ipkt_free(fcip_pkt_t *fcip_pkt) 6822 { 6823 fc_packet_t *fc_pkt; 6824 struct fcip *fptr = fcip_pkt->fcip_pkt_fptr; 6825 fcip_port_info_t *fport = fptr->fcip_port_info; 6826 6827 ASSERT(fptr != NULL); 6828 ASSERT(!mutex_owned(&fptr->fcip_mutex)); 6829 6830 /* One less ipkt to wait for */ 6831 mutex_enter(&fptr->fcip_mutex); 6832 if (fptr->fcip_num_ipkts_pending) /* Safety check */ 6833 fptr->fcip_num_ipkts_pending--; 6834 mutex_exit(&fptr->fcip_mutex); 6835 6836 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt); 6837 6838 if (fc_pkt->pkt_cmd_cookie != NULL) { 6839 kmem_free(fc_pkt->pkt_cmd_cookie, fc_pkt->pkt_cmd_cookie_cnt * 6840 sizeof (ddi_dma_cookie_t)); 6841 fc_pkt->pkt_cmd_cookie = NULL; 6842 } 6843 6844 if (fc_pkt->pkt_resp_cookie != NULL) { 6845 kmem_free(fc_pkt->pkt_resp_cookie, fc_pkt->pkt_resp_cookie_cnt * 6846 sizeof (ddi_dma_cookie_t)); 6847 fc_pkt->pkt_resp_cookie = NULL; 6848 } 6849 6850 if (fc_ulp_uninit_packet(fport->fcipp_handle, fc_pkt) != FC_SUCCESS) { 6851 FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN, 6852 "fc_ulp_uninit_pkt failed for internal fc pkt 0x%p", 6853 (void *)fc_pkt)); 6854 } 6855 fcip_free_pkt_dma(fcip_pkt); 6856 kmem_free(fcip_pkt, (sizeof (fcip_pkt_t) + fport->fcipp_fca_pkt_size)); 6857 } 6858 6859 /* 6860 * initialize a unicast request. This is a misnomer because even the 6861 * broadcast requests are initialized with this routine 6862 */ 6863 static void 6864 fcip_init_unicast_pkt(fcip_pkt_t *fcip_pkt, fc_portid_t sid, fc_portid_t did, 6865 void (*comp) ()) 6866 { 6867 fc_packet_t *fc_pkt; 6868 fc_frame_hdr_t *fr_hdr; 6869 struct fcip *fptr = fcip_pkt->fcip_pkt_fptr; 6870 6871 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt); 6872 fr_hdr = &fc_pkt->pkt_cmd_fhdr; 6873 6874 fr_hdr->r_ctl = R_CTL_DEVICE_DATA | R_CTL_UNSOL_DATA; 6875 fr_hdr->s_id = sid.port_id; 6876 fr_hdr->d_id = did.port_id; 6877 fr_hdr->type = FC_TYPE_IS8802_SNAP; 6878 fr_hdr->f_ctl = F_CTL_FIRST_SEQ | F_CTL_LAST_SEQ; 6879 fr_hdr->df_ctl = DF_CTL_NET_HDR; 6880 fr_hdr->seq_cnt = 0; 6881 fr_hdr->ox_id = 0xffff; 6882 fr_hdr->rx_id = 0xffff; 6883 fr_hdr->ro = 0; 6884 /* 6885 * reset all the length fields 6886 */ 6887 fc_pkt->pkt_rsplen = 0; 6888 fc_pkt->pkt_datalen = 0; 6889 fc_pkt->pkt_comp = comp; 6890 if (comp) { 6891 fc_pkt->pkt_tran_flags |= FC_TRAN_INTR; 6892 } else { 6893 fc_pkt->pkt_tran_flags |= FC_TRAN_NO_INTR; 6894 } 6895 fc_pkt->pkt_tran_type = FC_PKT_OUTBOUND | FC_PKT_IP_WRITE; 6896 fc_pkt->pkt_timeout = fcip_pkt_ttl_ticks; 6897 fcip_pkt->fcip_pkt_ttl = fptr->fcip_timeout_ticks + fc_pkt->pkt_timeout; 6898 } 6899 6900 6901 /* 6902 * Initialize a fcip_packet for broadcast data transfers 6903 */ 6904 static void 6905 fcip_init_broadcast_pkt(fcip_pkt_t *fcip_pkt, void (*comp) (), int is_els) 6906 { 6907 fc_packet_t *fc_pkt; 6908 fc_frame_hdr_t *fr_hdr; 6909 struct fcip *fptr = fcip_pkt->fcip_pkt_fptr; 6910 fcip_port_info_t *fport = fptr->fcip_port_info; 6911 uint32_t sid; 6912 uint32_t did; 6913 6914 FCIP_TNF_PROBE_1((fcip_init_broadcast_pkt, "fcip io", /* CSTYLED */, 6915 tnf_string, msg, "enter")); 6916 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt); 6917 fr_hdr = &fc_pkt->pkt_cmd_fhdr; 6918 sid = fport->fcipp_sid.port_id; 6919 6920 if (is_els) { 6921 fr_hdr->r_ctl = R_CTL_ELS_REQ; 6922 } else { 6923 fr_hdr->r_ctl = R_CTL_DEVICE_DATA | R_CTL_UNSOL_DATA; 6924 } 6925 fr_hdr->s_id = sid; 6926 /* 6927 * The destination broadcast address depends on the topology 6928 * of the underlying port 6929 */ 6930 did = fptr->fcip_broadcast_did; 6931 /* 6932 * mark pkt a broadcast pkt 6933 */ 6934 fc_pkt->pkt_tran_type = FC_PKT_BROADCAST; 6935 6936 fr_hdr->d_id = did; 6937 fr_hdr->type = FC_TYPE_IS8802_SNAP; 6938 fr_hdr->f_ctl = F_CTL_FIRST_SEQ | F_CTL_LAST_SEQ | F_CTL_END_SEQ; 6939 fr_hdr->f_ctl &= ~(F_CTL_SEQ_INITIATIVE); 6940 fr_hdr->df_ctl = DF_CTL_NET_HDR; 6941 fr_hdr->seq_cnt = 0; 6942 fr_hdr->ox_id = 0xffff; 6943 fr_hdr->rx_id = 0xffff; 6944 fr_hdr->ro = 0; 6945 fc_pkt->pkt_comp = comp; 6946 6947 if (comp) { 6948 fc_pkt->pkt_tran_flags |= FC_TRAN_INTR; 6949 } else { 6950 fc_pkt->pkt_tran_flags |= FC_TRAN_NO_INTR; 6951 } 6952 6953 fc_pkt->pkt_tran_type = FC_PKT_BROADCAST; 6954 fc_pkt->pkt_timeout = fcip_pkt_ttl_ticks; 6955 fcip_pkt->fcip_pkt_ttl = fptr->fcip_timeout_ticks + fc_pkt->pkt_timeout; 6956 } 6957 6958 6959 6960 /* 6961 * Free up all DMA resources associated with an allocated packet 6962 */ 6963 static void 6964 fcip_free_pkt_dma(fcip_pkt_t *fcip_pkt) 6965 { 6966 fc_packet_t *fc_pkt; 6967 6968 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt); 6969 6970 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 6971 (CE_NOTE, "in freepktdma : flags 0x%x", 6972 fcip_pkt->fcip_pkt_dma_flags)); 6973 6974 if (fcip_pkt->fcip_pkt_dma_flags & FCIP_CMD_DMA_BOUND) { 6975 (void) ddi_dma_unbind_handle(fc_pkt->pkt_cmd_dma); 6976 } 6977 if (fcip_pkt->fcip_pkt_dma_flags & FCIP_CMD_DMA_MEM) { 6978 ddi_dma_mem_free(&fc_pkt->pkt_cmd_acc); 6979 } 6980 6981 if (fcip_pkt->fcip_pkt_dma_flags & FCIP_RESP_DMA_BOUND) { 6982 (void) ddi_dma_unbind_handle(fc_pkt->pkt_resp_dma); 6983 } 6984 if (fcip_pkt->fcip_pkt_dma_flags & FCIP_RESP_DMA_MEM) { 6985 ddi_dma_mem_free(&fc_pkt->pkt_resp_acc); 6986 } 6987 /* 6988 * for internal commands, we need to free up the dma handles too. 6989 * This is done in the cache destructor for non internal cmds 6990 */ 6991 if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_INTERNAL) { 6992 if (fc_pkt->pkt_cmd_dma) { 6993 ddi_dma_free_handle(&fc_pkt->pkt_cmd_dma); 6994 } 6995 if (fc_pkt->pkt_resp_dma) { 6996 ddi_dma_free_handle(&fc_pkt->pkt_resp_dma); 6997 } 6998 } 6999 } 7000 7001 7002 /* 7003 * helper routine to generate a string, given an ether addr 7004 */ 7005 static void 7006 fcip_ether_to_str(struct ether_addr *e, caddr_t s) 7007 { 7008 int i; 7009 7010 for (i = 0; i < sizeof (struct ether_addr); i++, s += 2) { 7011 FCIP_DEBUG(FCIP_DEBUG_MISC, 7012 (CE_CONT, "0x%02X:", e->ether_addr_octet[i])); 7013 (void) sprintf(s, "%02X", e->ether_addr_octet[i]); 7014 } 7015 7016 *s = '\0'; 7017 } 7018 7019 /* 7020 * When a broadcast request comes from the upper streams modules, it 7021 * is ugly to look into every datagram to figure out if it is a broadcast 7022 * datagram or a unicast packet. Instead just add the broadcast entries 7023 * into our routing and dest tables and the standard hash table look ups 7024 * will find the entries. It is a lot cleaner this way. Also Solaris ifconfig 7025 * seems to be very ethernet specific and it requires broadcasts to the 7026 * ether broadcast addr of 0xffffffffff to succeed even though we specified 7027 * in the dl_info request that our broadcast MAC addr is 0x0000000000 7028 * (can't figure out why RFC2625 did this though). So add broadcast entries 7029 * for both MAC address 7030 */ 7031 static int 7032 fcip_dest_add_broadcast_entry(struct fcip *fptr, int new_flag) 7033 { 7034 fc_portmap_t map; 7035 struct fcip_routing_table *frp; 7036 uint32_t did; 7037 la_wwn_t broadcast_wwn; 7038 7039 /* 7040 * get port_id of destination for broadcast - this is topology 7041 * dependent 7042 */ 7043 did = fptr->fcip_broadcast_did; 7044 7045 ether_to_wwn(&fcip_arpbroadcast_addr, &broadcast_wwn); 7046 bcopy((void *)&broadcast_wwn, (void *)&map.map_pwwn, sizeof (la_wwn_t)); 7047 bcopy((void *)&broadcast_wwn, (void *)&map.map_nwwn, sizeof (la_wwn_t)); 7048 7049 map.map_did.port_id = did; 7050 map.map_hard_addr.hard_addr = did; 7051 map.map_state = PORT_DEVICE_VALID; 7052 if (new_flag) { 7053 map.map_type = PORT_DEVICE_NEW; 7054 } else { 7055 map.map_type = PORT_DEVICE_CHANGED; 7056 } 7057 map.map_flags = 0; 7058 map.map_pd = NULL; 7059 bzero(&map.map_fc4_types, sizeof (map.map_fc4_types)); 7060 fcip_rt_update(fptr, &map, 1); 7061 mutex_enter(&fptr->fcip_rt_mutex); 7062 frp = fcip_lookup_rtable(fptr, &broadcast_wwn, FCIP_COMPARE_NWWN); 7063 mutex_exit(&fptr->fcip_rt_mutex); 7064 if (frp == NULL) { 7065 return (FC_FAILURE); 7066 } 7067 (void) fcip_add_dest(fptr, frp); 7068 /* 7069 * The Upper IP layers expect the traditional broadcast MAC addr 7070 * of 0xff ff ff ff ff ff to work too if we want to plumb the fcip 7071 * stream through the /etc/hostname.fcipXX file. Instead of checking 7072 * each phys addr for a match with fcip's ARP header broadcast 7073 * addr (0x00 00 00 00 00 00), its simply easier to add another 7074 * broadcast entry for 0xff ff ff ff ff ff. 7075 */ 7076 ether_to_wwn(&fcipnhbroadcastaddr, &broadcast_wwn); 7077 bcopy((void *)&broadcast_wwn, (void *)&map.map_pwwn, sizeof (la_wwn_t)); 7078 bcopy((void *)&broadcast_wwn, (void *)&map.map_nwwn, sizeof (la_wwn_t)); 7079 fcip_rt_update(fptr, &map, 1); 7080 mutex_enter(&fptr->fcip_rt_mutex); 7081 frp = fcip_lookup_rtable(fptr, &broadcast_wwn, FCIP_COMPARE_NWWN); 7082 mutex_exit(&fptr->fcip_rt_mutex); 7083 if (frp == NULL) { 7084 return (FC_FAILURE); 7085 } 7086 (void) fcip_add_dest(fptr, frp); 7087 return (FC_SUCCESS); 7088 } 7089 7090 /* 7091 * We need to obtain the D_ID of the broadcast port for transmitting all 7092 * our broadcast (and multicast) requests. The broadcast D_ID as we know 7093 * is dependent on the link topology 7094 */ 7095 static uint32_t 7096 fcip_get_broadcast_did(struct fcip *fptr) 7097 { 7098 fcip_port_info_t *fport = fptr->fcip_port_info; 7099 uint32_t did = 0; 7100 uint32_t sid; 7101 7102 FCIP_TNF_PROBE_2((fcip_get_broadcast_did, "fcip io", /* CSTYLED */, 7103 tnf_string, msg, "enter", 7104 tnf_opaque, fptr, fptr)); 7105 7106 sid = fport->fcipp_sid.port_id; 7107 7108 switch (fport->fcipp_topology) { 7109 7110 case FC_TOP_PT_PT: { 7111 fc_portmap_t *port_map = NULL; 7112 uint32_t listlen = 0; 7113 7114 if (fc_ulp_getportmap(fport->fcipp_handle, &port_map, 7115 &listlen, FC_ULP_PLOGI_DONTCARE) == FC_SUCCESS) { 7116 FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, 7117 "fcip_gpmap: listlen : 0x%x", listlen)); 7118 if (listlen == 1) { 7119 did = port_map->map_did.port_id; 7120 } 7121 } 7122 if (port_map) { 7123 kmem_free(port_map, listlen * sizeof (fc_portmap_t)); 7124 } 7125 if (listlen != 1) { 7126 /* Dummy return value */ 7127 return (0x00FFFFFF); 7128 } 7129 break; 7130 } 7131 7132 case FC_TOP_NO_NS: 7133 /* FALLTHROUGH */ 7134 case FC_TOP_FABRIC: 7135 /* 7136 * The broadcast address is the same whether or not 7137 * the switch/fabric contains a Name service. 7138 */ 7139 did = 0x00FFFFFF; 7140 break; 7141 7142 case FC_TOP_PUBLIC_LOOP: 7143 /* 7144 * The open replicate primitive must not be used. The 7145 * broadcast sequence is simply sent to ALPA 0x00. The 7146 * fabric controller then propagates the broadcast to all 7147 * other ports. The fabric propagates the broadcast by 7148 * using the OPNfr primitive. 7149 */ 7150 did = 0x00; 7151 break; 7152 7153 case FC_TOP_PRIVATE_LOOP: 7154 /* 7155 * The source port for broadcast in private loop mode 7156 * must send an OPN(fr) signal forcing all ports in the 7157 * loop to replicate the frames that they receive. 7158 */ 7159 did = 0x00FFFFFF; 7160 break; 7161 7162 case FC_TOP_UNKNOWN: 7163 /* FALLTHROUGH */ 7164 default: 7165 did = sid; 7166 FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN, 7167 "fcip(0x%x):unknown topology in init_broadcast_pkt", 7168 fptr->fcip_instance)); 7169 break; 7170 } 7171 FCIP_TNF_PROBE_2((fcip_get_broadcast_did, "fcip io", /* CSTYLED */, 7172 tnf_string, msg, "return", 7173 tnf_opaque, did, did)); 7174 7175 return (did); 7176 } 7177 7178 7179 /* 7180 * fcip timeout performs 2 operations: 7181 * 1. timeout any packets sent to the FCA for which a callback hasn't 7182 * happened. If you are wondering why we need a callback since all 7183 * traffic in FCIP is unidirectional, hence all exchanges are unidirectional 7184 * but wait, we can only free up the resources after we know the FCA has 7185 * DMA'ed out the data. pretty obvious eh :) 7186 * 7187 * 2. Retire and routing table entries we marked up for retiring. This is 7188 * to give the link a chance to recover instead of marking a port down 7189 * when we have lost all communication with it after a link transition 7190 */ 7191 static void 7192 fcip_timeout(void *arg) 7193 { 7194 struct fcip *fptr = (struct fcip *)arg; 7195 int i; 7196 fcip_pkt_t *fcip_pkt; 7197 struct fcip_dest *fdestp; 7198 int index; 7199 struct fcip_routing_table *frtp; 7200 int dispatch_rte_removal = 0; 7201 7202 mutex_enter(&fptr->fcip_mutex); 7203 7204 fptr->fcip_flags |= FCIP_IN_TIMEOUT; 7205 fptr->fcip_timeout_ticks += fcip_tick_incr; 7206 7207 if (fptr->fcip_flags & (FCIP_DETACHED | FCIP_DETACHING | \ 7208 FCIP_SUSPENDED | FCIP_POWER_DOWN)) { 7209 fptr->fcip_flags &= ~(FCIP_IN_TIMEOUT); 7210 mutex_exit(&fptr->fcip_mutex); 7211 return; 7212 } 7213 7214 if (fptr->fcip_port_state == FCIP_PORT_OFFLINE) { 7215 if (fptr->fcip_timeout_ticks > fptr->fcip_mark_offline) { 7216 fptr->fcip_flags |= FCIP_LINK_DOWN; 7217 } 7218 } 7219 if (!fptr->fcip_flags & FCIP_RTE_REMOVING) { 7220 dispatch_rte_removal = 1; 7221 } 7222 mutex_exit(&fptr->fcip_mutex); 7223 7224 /* 7225 * Check if we have any Invalid routing table entries in our 7226 * hashtable we have marked off for deferred removal. If any, 7227 * we can spawn a taskq thread to do the cleanup for us. We 7228 * need to avoid cleanup in the timeout thread since we may 7229 * have to wait for outstanding commands to complete before 7230 * we retire a routing table entry. Also dispatch the taskq 7231 * thread only if we are already do not have a taskq thread 7232 * dispatched. 7233 */ 7234 if (dispatch_rte_removal) { 7235 mutex_enter(&fptr->fcip_rt_mutex); 7236 for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) { 7237 frtp = fptr->fcip_rtable[index]; 7238 while (frtp) { 7239 if ((frtp->fcipr_state == FCIP_RT_INVALID) && 7240 (fptr->fcip_timeout_ticks > 7241 frtp->fcipr_invalid_timeout)) { 7242 /* 7243 * If we cannot schedule a task thread 7244 * let us attempt again on the next 7245 * tick rather than call 7246 * fcip_rte_remove_deferred() from here 7247 * directly since the routine can sleep. 7248 */ 7249 frtp->fcipr_state = FCIP_RT_RETIRED; 7250 7251 mutex_enter(&fptr->fcip_mutex); 7252 fptr->fcip_flags |= FCIP_RTE_REMOVING; 7253 mutex_exit(&fptr->fcip_mutex); 7254 7255 if (taskq_dispatch(fptr->fcip_tq, 7256 fcip_rte_remove_deferred, fptr, 7257 KM_NOSLEEP) == 0) { 7258 /* 7259 * failed - so mark the entry 7260 * as invalid again. 7261 */ 7262 frtp->fcipr_state = 7263 FCIP_RT_INVALID; 7264 7265 mutex_enter(&fptr->fcip_mutex); 7266 fptr->fcip_flags &= 7267 ~FCIP_RTE_REMOVING; 7268 mutex_exit(&fptr->fcip_mutex); 7269 } 7270 } 7271 frtp = frtp->fcipr_next; 7272 } 7273 } 7274 mutex_exit(&fptr->fcip_rt_mutex); 7275 } 7276 7277 mutex_enter(&fptr->fcip_dest_mutex); 7278 7279 /* 7280 * Now timeout any packets stuck with the transport/FCA for too long 7281 */ 7282 for (i = 0; i < FCIP_DEST_HASH_ELEMS; i++) { 7283 fdestp = fptr->fcip_dest[i]; 7284 while (fdestp != NULL) { 7285 mutex_enter(&fdestp->fcipd_mutex); 7286 for (fcip_pkt = fdestp->fcipd_head; fcip_pkt != NULL; 7287 fcip_pkt = fcip_pkt->fcip_pkt_next) { 7288 if (fcip_pkt->fcip_pkt_flags & 7289 (FCIP_PKT_RETURNED | FCIP_PKT_IN_TIMEOUT | 7290 FCIP_PKT_IN_ABORT)) { 7291 continue; 7292 } 7293 if (fptr->fcip_timeout_ticks > 7294 fcip_pkt->fcip_pkt_ttl) { 7295 fcip_pkt->fcip_pkt_flags |= 7296 FCIP_PKT_IN_TIMEOUT; 7297 7298 mutex_exit(&fdestp->fcipd_mutex); 7299 if (taskq_dispatch(fptr->fcip_tq, 7300 fcip_pkt_timeout, fcip_pkt, 7301 KM_NOSLEEP) == 0) { 7302 /* 7303 * timeout immediately 7304 */ 7305 fcip_pkt_timeout(fcip_pkt); 7306 } 7307 mutex_enter(&fdestp->fcipd_mutex); 7308 /* 7309 * The linked list is altered because 7310 * of one of the following reasons: 7311 * a. Timeout code dequeued a pkt 7312 * b. Pkt completion happened 7313 * 7314 * So restart the spin starting at 7315 * the head again; This is a bit 7316 * excessive, but okay since 7317 * fcip_timeout_ticks isn't incremented 7318 * for this spin, we will skip the 7319 * not-to-be-timedout packets quickly 7320 */ 7321 fcip_pkt = fdestp->fcipd_head; 7322 if (fcip_pkt == NULL) { 7323 break; 7324 } 7325 } 7326 } 7327 mutex_exit(&fdestp->fcipd_mutex); 7328 fdestp = fdestp->fcipd_next; 7329 } 7330 } 7331 mutex_exit(&fptr->fcip_dest_mutex); 7332 7333 /* 7334 * reschedule the timeout thread 7335 */ 7336 mutex_enter(&fptr->fcip_mutex); 7337 7338 fptr->fcip_timeout_id = timeout(fcip_timeout, fptr, 7339 drv_usectohz(1000000)); 7340 fptr->fcip_flags &= ~(FCIP_IN_TIMEOUT); 7341 mutex_exit(&fptr->fcip_mutex); 7342 } 7343 7344 7345 /* 7346 * This routine is either called from taskq or directly from fcip_timeout 7347 * does the actual job of aborting the packet 7348 */ 7349 static void 7350 fcip_pkt_timeout(void *arg) 7351 { 7352 fcip_pkt_t *fcip_pkt = (fcip_pkt_t *)arg; 7353 struct fcip_dest *fdestp; 7354 struct fcip *fptr; 7355 fc_packet_t *fc_pkt; 7356 fcip_port_info_t *fport; 7357 int rval; 7358 7359 fdestp = fcip_pkt->fcip_pkt_dest; 7360 fptr = fcip_pkt->fcip_pkt_fptr; 7361 fport = fptr->fcip_port_info; 7362 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt); 7363 7364 /* 7365 * try to abort the pkt 7366 */ 7367 fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_ABORT; 7368 rval = fc_ulp_abort(fport->fcipp_handle, fc_pkt, KM_NOSLEEP); 7369 7370 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, 7371 (CE_NOTE, "fc_ulp_abort returns: 0x%x", rval)); 7372 7373 if (rval == FC_SUCCESS) { 7374 ASSERT(fdestp != NULL); 7375 7376 /* 7377 * dequeue the pkt from the dest structure pkt list 7378 */ 7379 fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_ABORT; 7380 mutex_enter(&fdestp->fcipd_mutex); 7381 rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt); 7382 ASSERT(rval == 1); 7383 mutex_exit(&fdestp->fcipd_mutex); 7384 7385 /* 7386 * Now cleanup the pkt and free the mblk 7387 */ 7388 fcip_pkt_free(fcip_pkt, 1); 7389 } else { 7390 /* 7391 * abort failed - just mark the pkt as done and 7392 * wait for it to complete in fcip_pkt_callback since 7393 * the pkt has already been xmitted by the FCA 7394 */ 7395 fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_TIMEOUT; 7396 if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_RETURNED) { 7397 fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_ABORT; 7398 mutex_enter(&fdestp->fcipd_mutex); 7399 rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt); 7400 ASSERT(rval == 1); 7401 mutex_exit(&fdestp->fcipd_mutex); 7402 7403 fcip_pkt_free(fcip_pkt, 1); 7404 } 7405 return; 7406 } 7407 } 7408 7409 7410 /* 7411 * Remove a routing table entry marked for deferred removal. This routine 7412 * unlike fcip_pkt_timeout, is always called from a taskq context 7413 */ 7414 static void 7415 fcip_rte_remove_deferred(void *arg) 7416 { 7417 struct fcip *fptr = (struct fcip *)arg; 7418 int hash_bucket; 7419 struct fcip_dest *fdestp; 7420 la_wwn_t *pwwn; 7421 int index; 7422 struct fcip_routing_table *frtp, *frtp_next, *frtp_prev; 7423 7424 7425 mutex_enter(&fptr->fcip_rt_mutex); 7426 for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) { 7427 frtp = fptr->fcip_rtable[index]; 7428 frtp_prev = NULL; 7429 while (frtp) { 7430 frtp_next = frtp->fcipr_next; 7431 7432 if (frtp->fcipr_state == FCIP_RT_RETIRED) { 7433 7434 pwwn = &frtp->fcipr_pwwn; 7435 /* 7436 * Get hold of destination pointer 7437 */ 7438 mutex_enter(&fptr->fcip_dest_mutex); 7439 7440 hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn); 7441 ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS); 7442 7443 fdestp = fptr->fcip_dest[hash_bucket]; 7444 while (fdestp != NULL) { 7445 mutex_enter(&fdestp->fcipd_mutex); 7446 if (fdestp->fcipd_rtable) { 7447 if (fcip_wwn_compare(pwwn, 7448 &fdestp->fcipd_pwwn, 7449 FCIP_COMPARE_PWWN) == 0) { 7450 mutex_exit( 7451 &fdestp->fcipd_mutex); 7452 break; 7453 } 7454 } 7455 mutex_exit(&fdestp->fcipd_mutex); 7456 fdestp = fdestp->fcipd_next; 7457 } 7458 7459 mutex_exit(&fptr->fcip_dest_mutex); 7460 if (fdestp == NULL) { 7461 frtp_prev = frtp; 7462 frtp = frtp_next; 7463 continue; 7464 } 7465 7466 mutex_enter(&fdestp->fcipd_mutex); 7467 if (fdestp->fcipd_ncmds) { 7468 /* 7469 * Instead of waiting to drain commands 7470 * let us revisit this RT entry in 7471 * the next pass. 7472 */ 7473 mutex_exit(&fdestp->fcipd_mutex); 7474 frtp_prev = frtp; 7475 frtp = frtp_next; 7476 continue; 7477 } 7478 7479 /* 7480 * We are clean, so remove the RTE 7481 */ 7482 fdestp->fcipd_rtable = NULL; 7483 mutex_exit(&fdestp->fcipd_mutex); 7484 7485 FCIP_TNF_PROBE_2((fcip_rte_remove_deferred, 7486 "fcip io", /* CSTYLED */, 7487 tnf_string, msg, 7488 "remove retired routing entry", 7489 tnf_int, index, index)); 7490 7491 if (frtp_prev == NULL) { 7492 /* first element */ 7493 fptr->fcip_rtable[index] = 7494 frtp->fcipr_next; 7495 } else { 7496 frtp_prev->fcipr_next = 7497 frtp->fcipr_next; 7498 } 7499 kmem_free(frtp, 7500 sizeof (struct fcip_routing_table)); 7501 7502 frtp = frtp_next; 7503 } else { 7504 frtp_prev = frtp; 7505 frtp = frtp_next; 7506 } 7507 } 7508 } 7509 mutex_exit(&fptr->fcip_rt_mutex); 7510 /* 7511 * Clear the RTE_REMOVING flag 7512 */ 7513 mutex_enter(&fptr->fcip_mutex); 7514 fptr->fcip_flags &= ~FCIP_RTE_REMOVING; 7515 mutex_exit(&fptr->fcip_mutex); 7516 } 7517 7518 /* 7519 * Walk through all the dest hash table entries and count up the total 7520 * no. of packets outstanding against a given port 7521 */ 7522 static int 7523 fcip_port_get_num_pkts(struct fcip *fptr) 7524 { 7525 int num_cmds = 0; 7526 int i; 7527 struct fcip_dest *fdestp; 7528 7529 ASSERT(mutex_owned(&fptr->fcip_dest_mutex)); 7530 7531 for (i = 0; i < FCIP_DEST_HASH_ELEMS; i++) { 7532 fdestp = fptr->fcip_dest[i]; 7533 while (fdestp != NULL) { 7534 mutex_enter(&fdestp->fcipd_mutex); 7535 7536 ASSERT(fdestp->fcipd_ncmds >= 0); 7537 7538 if (fdestp->fcipd_ncmds > 0) { 7539 num_cmds += fdestp->fcipd_ncmds; 7540 } 7541 mutex_exit(&fdestp->fcipd_mutex); 7542 fdestp = fdestp->fcipd_next; 7543 } 7544 } 7545 7546 return (num_cmds); 7547 } 7548 7549 7550 /* 7551 * Walk through the routing table for this state instance and see if there is a 7552 * PLOGI in progress for any of the entries. Return success even if we find one. 7553 */ 7554 static int 7555 fcip_plogi_in_progress(struct fcip *fptr) 7556 { 7557 int i; 7558 struct fcip_routing_table *frp; 7559 7560 ASSERT(mutex_owned(&fptr->fcip_rt_mutex)); 7561 7562 for (i = 0; i < FCIP_RT_HASH_ELEMS; i++) { 7563 frp = fptr->fcip_rtable[i]; 7564 while (frp) { 7565 if (frp->fcipr_state == FCIP_RT_LOGIN_PROGRESS) { 7566 /* Found an entry where PLOGI is in progress */ 7567 return (1); 7568 } 7569 frp = frp->fcipr_next; 7570 } 7571 } 7572 7573 return (0); 7574 } 7575 7576 /* 7577 * Walk through the fcip port global list and check if the given port exists in 7578 * the list. Returns "0" if port exists and "1" if otherwise. 7579 */ 7580 static int 7581 fcip_check_port_exists(struct fcip *fptr) 7582 { 7583 fcip_port_info_t *cur_fport; 7584 fcip_port_info_t *fport; 7585 7586 mutex_enter(&fcip_global_mutex); 7587 fport = fptr->fcip_port_info; 7588 cur_fport = fcip_port_head; 7589 while (cur_fport != NULL) { 7590 if (cur_fport == fport) { 7591 /* Found */ 7592 mutex_exit(&fcip_global_mutex); 7593 return (0); 7594 } else { 7595 cur_fport = cur_fport->fcipp_next; 7596 } 7597 } 7598 mutex_exit(&fcip_global_mutex); 7599 7600 return (1); 7601 } 7602 7603 /* 7604 * Constructor to initialize the sendup elements for callback into 7605 * modules upstream 7606 */ 7607 7608 /* ARGSUSED */ 7609 static int 7610 fcip_sendup_constructor(void *buf, void *arg, int flags) 7611 { 7612 struct fcip_sendup_elem *msg_elem = (struct fcip_sendup_elem *)buf; 7613 fcip_port_info_t *fport = (fcip_port_info_t *)arg; 7614 7615 ASSERT(fport != NULL); 7616 7617 msg_elem->fcipsu_mp = NULL; 7618 msg_elem->fcipsu_func = NULL; 7619 msg_elem->fcipsu_next = NULL; 7620 7621 return (FCIP_SUCCESS); 7622 }