Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/ulp/fcip.c
+++ new/usr/src/uts/common/io/fibre-channel/ulp/fcip.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /*
27 27 * SunOS 5.x Multithreaded STREAMS DLPI FCIP Module
28 28 * This is a pseudo driver module to handle encapsulation of IP and ARP
29 29 * datagrams over FibreChannel interfaces. FCIP is a cloneable STREAMS
30 30 * driver module which interfaces with IP/ARP using DLPI. This module
31 31 * is a Style-2 DLS provider.
32 32 *
33 33 * The implementation of this module is based on RFC 2625 which gives
34 34 * details on the encapsulation of IP/ARP data over FibreChannel.
35 35 * The fcip module needs to resolve an IP address to a port address before
36 36 * sending data to a destination port. A FC device port has 2 addresses
37 37 * associated with it: A 8 byte World Wide unique Port Name and a 3 byte
38 38 * volatile Port number or Port_ID.
39 39 *
40 40 * The mapping between a IP address and the World Wide Port Name is handled
41 41 * by the ARP layer since the IP over FC draft requires the MAC address to
42 42 * be the least significant six bytes of the WorldWide Port Names. The
43 43 * fcip module however needs to identify the destination port uniquely when
44 44 * the destination FC device has multiple FC ports.
45 45 *
46 46 * The FC layer mapping between the World Wide Port Name and the Port_ID
47 47 * will be handled through the use of a fabric name server or through the
48 48 * use of the FARP ELS command as described in the draft. Since the Port_IDs
49 49 * are volatile, the mapping between the World Wide Port Name and Port_IDs
50 50 * must be maintained and validated before use each time a datagram
51 51 * needs to be sent to the destination ports. The FC transport module
52 52 * informs the fcip module of all changes to states of ports on the
53 53 * fabric through registered callbacks. This enables the fcip module
54 54 * to maintain the WW_PN to Port_ID mappings current.
55 55 *
56 56 * For details on how this module interfaces with the FibreChannel Transport
57 57 * modules, refer to PSARC/1997/385. Chapter 3 of the FibreChannel Transport
58 58 * Programming guide details the APIs between ULPs and the Transport.
59 59 *
60 60 * Now for some Caveats:
61 61 *
62 62 * RFC 2625 requires that a FibreChannel Port name (the Port WWN) have
63 63 * the NAA bits set to '0001' indicating a IEEE 48bit address which
64 64 * corresponds to a ULA (Universal LAN MAC address). But with FibreChannel
65 65 * adapters containing 2 or more ports, IEEE naming cannot identify the
66 66 * ports on an adapter uniquely so we will in the first implementation
67 67 * be operating only on Port 0 of each adapter.
68 68 */
69 69
70 70 #include <sys/types.h>
71 71 #include <sys/errno.h>
72 72 #include <sys/debug.h>
73 73 #include <sys/time.h>
74 74 #include <sys/sysmacros.h>
75 75 #include <sys/systm.h>
76 76 #include <sys/user.h>
77 77 #include <sys/stropts.h>
78 78 #include <sys/stream.h>
79 79 #include <sys/strlog.h>
80 80 #include <sys/strsubr.h>
81 81 #include <sys/cmn_err.h>
82 82 #include <sys/cpu.h>
83 83 #include <sys/kmem.h>
84 84 #include <sys/conf.h>
85 85 #include <sys/ddi.h>
86 86 #include <sys/sunddi.h>
87 87 #include <sys/ksynch.h>
88 88 #include <sys/stat.h>
89 89 #include <sys/kstat.h>
90 90 #include <sys/vtrace.h>
91 91 #include <sys/strsun.h>
92 92 #include <sys/varargs.h>
93 93 #include <sys/modctl.h>
94 94 #include <sys/thread.h>
95 95 #include <sys/var.h>
96 96 #include <sys/proc.h>
97 97 #include <inet/common.h>
98 98 #include <netinet/ip6.h>
99 99 #include <inet/ip.h>
100 100 #include <inet/arp.h>
101 101 #include <inet/mi.h>
102 102 #include <inet/nd.h>
103 103 #include <sys/dlpi.h>
104 104 #include <sys/ethernet.h>
105 105 #include <sys/file.h>
106 106 #include <sys/syslog.h>
107 107 #include <sys/disp.h>
108 108 #include <sys/taskq.h>
109 109
110 110 /*
111 111 * Leadville includes
112 112 */
113 113
114 114 #include <sys/fibre-channel/fc.h>
115 115 #include <sys/fibre-channel/impl/fc_ulpif.h>
116 116 #include <sys/fibre-channel/ulp/fcip.h>
117 117
118 118 /*
119 119 * TNF Probe/trace facility include
120 120 */
121 121 #if defined(lint) || defined(FCIP_TNF_ENABLED)
122 122 #include <sys/tnf_probe.h>
123 123 #endif
124 124
125 125 #define FCIP_ESBALLOC
126 126
127 127 /*
128 128 * Function prototypes
129 129 */
130 130
131 131 /* standard loadable modules entry points */
132 132 static int fcip_attach(dev_info_t *, ddi_attach_cmd_t);
133 133 static int fcip_detach(dev_info_t *, ddi_detach_cmd_t);
134 134 static void fcip_dodetach(struct fcipstr *slp);
135 135 static int fcip_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd,
136 136 void *arg, void **result);
137 137
138 138
139 139 /* streams specific */
140 140 static void fcip_setipq(struct fcip *fptr);
141 141 static int fcip_wput(queue_t *, mblk_t *);
142 142 static int fcip_wsrv(queue_t *);
143 143 static void fcip_proto(queue_t *, mblk_t *);
144 144 static void fcip_ioctl(queue_t *, mblk_t *);
145 145 static int fcip_open(queue_t *wq, dev_t *devp, int flag,
146 146 int sflag, cred_t *credp);
147 147 static int fcip_close(queue_t *rq, int flag, int otyp, cred_t *credp);
148 148 static int fcip_start(queue_t *wq, mblk_t *mp, struct fcip *fptr,
149 149 struct fcip_dest *fdestp, int flags);
150 150 static void fcip_sendup(struct fcip *fptr, mblk_t *mp,
151 151 struct fcipstr *(*acceptfunc)());
152 152 static struct fcipstr *fcip_accept(struct fcipstr *slp, struct fcip *fptr,
153 153 int type, la_wwn_t *dhostp);
154 154 static mblk_t *fcip_addudind(struct fcip *fptr, mblk_t *mp,
155 155 fcph_network_hdr_t *nhdr, int type);
156 156 static int fcip_setup_mac_addr(struct fcip *fptr);
157 157 static void fcip_kstat_init(struct fcip *fptr);
158 158 static int fcip_stat_update(kstat_t *, int);
159 159
160 160
161 161 /* dlpi specific */
162 162 static void fcip_spareq(queue_t *wq, mblk_t *mp);
163 163 static void fcip_pareq(queue_t *wq, mblk_t *mp);
164 164 static void fcip_ubreq(queue_t *wq, mblk_t *mp);
165 165 static void fcip_breq(queue_t *wq, mblk_t *mp);
166 166 static void fcip_dreq(queue_t *wq, mblk_t *mp);
167 167 static void fcip_areq(queue_t *wq, mblk_t *mp);
168 168 static void fcip_udreq(queue_t *wq, mblk_t *mp);
169 169 static void fcip_ireq(queue_t *wq, mblk_t *mp);
170 170 static void fcip_dl_ioc_hdr_info(queue_t *wq, mblk_t *mp);
171 171
172 172
173 173 /* solaris sundry, DR/CPR etc */
174 174 static int fcip_cache_constructor(void *buf, void *arg, int size);
175 175 static void fcip_cache_destructor(void *buf, void *size);
176 176 static int fcip_handle_suspend(fcip_port_info_t *fport, fc_detach_cmd_t cmd);
177 177 static int fcip_handle_resume(fcip_port_info_t *fport,
178 178 fc_ulp_port_info_t *port_info, fc_attach_cmd_t cmd);
179 179 static fcip_port_info_t *fcip_softstate_free(fcip_port_info_t *fport);
180 180 static int fcip_port_attach_handler(struct fcip *fptr);
181 181
182 182
183 183 /*
184 184 * ulp - transport interface function prototypes
185 185 */
186 186 static int fcip_port_attach(opaque_t ulp_handle, fc_ulp_port_info_t *,
187 187 fc_attach_cmd_t cmd, uint32_t sid);
188 188 static int fcip_port_detach(opaque_t ulp_handle, fc_ulp_port_info_t *,
189 189 fc_detach_cmd_t cmd);
190 190 static int fcip_port_ioctl(opaque_t ulp_handle, opaque_t port_handle,
191 191 dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
192 192 uint32_t claimed);
193 193 static void fcip_statec_cb(opaque_t ulp_handle, opaque_t phandle,
194 194 uint32_t port_state, uint32_t port_top, fc_portmap_t changelist[],
195 195 uint32_t listlen, uint32_t sid);
196 196 static int fcip_els_cb(opaque_t ulp_handle, opaque_t phandle,
197 197 fc_unsol_buf_t *buf, uint32_t claimed);
198 198 static int fcip_data_cb(opaque_t ulp_handle, opaque_t phandle,
199 199 fc_unsol_buf_t *payload, uint32_t claimed);
200 200
201 201
202 202 /* Routing table specific */
203 203 static void fcip_handle_topology(struct fcip *fptr);
204 204 static int fcip_init_port(struct fcip *fptr);
205 205 struct fcip_routing_table *fcip_lookup_rtable(struct fcip *fptr,
206 206 la_wwn_t *pwwn, int matchflag);
207 207 static void fcip_rt_update(struct fcip *fptr, fc_portmap_t *devlist,
208 208 uint32_t listlen);
209 209 static void fcip_rt_flush(struct fcip *fptr);
210 210 static void fcip_rte_remove_deferred(void *arg);
211 211 static int fcip_do_plogi(struct fcip *fptr, struct fcip_routing_table *frp);
212 212
213 213
214 214 /* dest table specific */
215 215 static struct fcip_dest *fcip_get_dest(struct fcip *fptr,
216 216 la_wwn_t *dlphys);
217 217 static struct fcip_dest *fcip_add_dest(struct fcip *fptr,
218 218 struct fcip_routing_table *frp);
219 219 static int fcip_dest_add_broadcast_entry(struct fcip *fptr, int new_flag);
220 220 static uint32_t fcip_get_broadcast_did(struct fcip *fptr);
221 221 static void fcip_cleanup_dest(struct fcip *fptr);
222 222
223 223
224 224 /* helper functions */
225 225 static fcip_port_info_t *fcip_get_port(opaque_t phandle);
226 226 static int fcip_wwn_compare(la_wwn_t *wwn1, la_wwn_t *wwn2, int flag);
227 227 static void fcip_ether_to_str(struct ether_addr *e, caddr_t s);
228 228 static int fcip_port_get_num_pkts(struct fcip *fptr);
229 229 static int fcip_check_port_busy(struct fcip *fptr);
230 230 static void fcip_check_remove_minor_node(void);
231 231 static int fcip_set_wwn(la_wwn_t *pwwn);
232 232 static int fcip_plogi_in_progress(struct fcip *fptr);
233 233 static int fcip_check_port_exists(struct fcip *fptr);
234 234 static int fcip_is_supported_fc_topology(int fc_topology);
235 235
236 236
237 237 /* pkt specific */
238 238 static fcip_pkt_t *fcip_pkt_alloc(struct fcip *fptr, mblk_t *bp,
239 239 int flags, int datalen);
240 240 static void fcip_pkt_free(struct fcip_pkt *fcip_pkt, int flags);
241 241 static fcip_pkt_t *fcip_ipkt_alloc(struct fcip *fptr, int cmdlen,
242 242 int resplen, opaque_t pd, int flags);
243 243 static void fcip_ipkt_free(fcip_pkt_t *fcip_pkt);
244 244 static void fcip_ipkt_callback(fc_packet_t *fc_pkt);
245 245 static void fcip_free_pkt_dma(fcip_pkt_t *fcip_pkt);
246 246 static void fcip_pkt_callback(fc_packet_t *fc_pkt);
247 247 static void fcip_init_unicast_pkt(fcip_pkt_t *fcip_pkt, fc_portid_t sid,
248 248 fc_portid_t did, void (*comp) ());
249 249 static int fcip_transport(fcip_pkt_t *fcip_pkt);
250 250 static void fcip_pkt_timeout(void *arg);
251 251 static void fcip_timeout(void *arg);
252 252 static void fcip_fdestp_enqueue_pkt(struct fcip_dest *fdestp,
253 253 fcip_pkt_t *fcip_pkt);
254 254 static int fcip_fdestp_dequeue_pkt(struct fcip_dest *fdestp,
255 255 fcip_pkt_t *fcip_pkt);
256 256 static int fcip_sendup_constructor(void *buf, void *arg, int flags);
257 257 static void fcip_sendup_thr(void *arg);
258 258 static int fcip_sendup_alloc_enque(struct fcip *ftpr, mblk_t *mp,
259 259 struct fcipstr *(*f)());
260 260
261 261 /*
262 262 * zero copy inbound data handling
263 263 */
264 264 #ifdef FCIP_ESBALLOC
265 265 static void fcip_ubfree(char *arg);
266 266 #endif /* FCIP_ESBALLOC */
267 267
268 268 #if !defined(FCIP_ESBALLOC)
269 269 static void *fcip_allocb(size_t size, uint_t pri);
270 270 #endif
271 271
272 272
273 273 /* FCIP FARP support functions */
274 274 static struct fcip_dest *fcip_do_farp(struct fcip *fptr, la_wwn_t *pwwn,
275 275 char *ip_addr, size_t ip_addr_len, int flags);
276 276 static void fcip_init_broadcast_pkt(fcip_pkt_t *fcip_pkt, void (*comp) (),
277 277 int is_els);
278 278 static int fcip_handle_farp_request(struct fcip *fptr, la_els_farp_t *fcmd);
279 279 static int fcip_handle_farp_response(struct fcip *fptr, la_els_farp_t *fcmd);
280 280 static void fcip_cache_arp_broadcast(struct fcip *ftpr, fc_unsol_buf_t *buf);
281 281 static void fcip_port_ns(void *arg);
282 282
283 283 #ifdef DEBUG
284 284
285 285 #include <sys/debug.h>
286 286
287 287 #define FCIP_DEBUG_DEFAULT 0x1
288 288 #define FCIP_DEBUG_ATTACH 0x2
289 289 #define FCIP_DEBUG_INIT 0x4
290 290 #define FCIP_DEBUG_DETACH 0x8
291 291 #define FCIP_DEBUG_DLPI 0x10
292 292 #define FCIP_DEBUG_ELS 0x20
293 293 #define FCIP_DEBUG_DOWNSTREAM 0x40
294 294 #define FCIP_DEBUG_UPSTREAM 0x80
295 295 #define FCIP_DEBUG_MISC 0x100
296 296
297 297 #define FCIP_DEBUG_STARTUP (FCIP_DEBUG_ATTACH|FCIP_DEBUG_INIT)
298 298 #define FCIP_DEBUG_DATAOUT (FCIP_DEBUG_DLPI|FCIP_DEBUG_DOWNSTREAM)
299 299 #define FCIP_DEBUG_DATAIN (FCIP_DEBUG_ELS|FCIP_DEBUG_UPSTREAM)
300 300
301 301 static int fcip_debug = FCIP_DEBUG_DEFAULT;
302 302
303 303 #define FCIP_DEBUG(level, args) \
304 304 if (fcip_debug & (level)) cmn_err args;
305 305
306 306 #else /* DEBUG */
307 307
308 308 #define FCIP_DEBUG(level, args) /* do nothing */
309 309
310 310 #endif /* DEBUG */
311 311
312 312 #define KIOIP KSTAT_INTR_PTR(fcip->fcip_intrstats)
313 313
314 314 /*
315 315 * Endian independent ethernet to WWN copy
316 316 */
317 317 #define ether_to_wwn(E, W) \
318 318 bzero((void *)(W), sizeof (la_wwn_t)); \
319 319 bcopy((void *)(E), (void *)&((W)->raw_wwn[2]), ETHERADDRL); \
320 320 (W)->raw_wwn[0] |= 0x10
321 321
322 322 /*
323 323 * wwn_to_ether : Endian independent, copies a WWN to struct ether_addr.
324 324 * The args to the macro are pointers to WWN and ether_addr structures
325 325 */
326 326 #define wwn_to_ether(W, E) \
327 327 bcopy((void *)&((W)->raw_wwn[2]), (void *)E, ETHERADDRL)
328 328
329 329 /*
330 330 * The module_info structure contains identification and limit values.
331 331 * All queues associated with a certain driver share the same module_info
332 332 * structures. This structure defines the characteristics of that driver/
333 333 * module's queues. The module name must be unique. The max and min packet
334 334 * sizes limit the no. of characters in M_DATA messages. The Hi and Lo
335 335 * water marks are for flow control when a module has a service procedure.
336 336 */
337 337 static struct module_info fcipminfo = {
338 338 FCIPIDNUM, /* mi_idnum : Module ID num */
339 339 FCIPNAME, /* mi_idname: Module Name */
340 340 FCIPMINPSZ, /* mi_minpsz: Min packet size */
341 341 FCIPMAXPSZ, /* mi_maxpsz: Max packet size */
342 342 FCIPHIWAT, /* mi_hiwat : High water mark */
343 343 FCIPLOWAT /* mi_lowat : Low water mark */
344 344 };
345 345
346 346 /*
347 347 * The qinit structres contain the module put, service. open and close
348 348 * procedure pointers. All modules and drivers with the same streamtab
349 349 * file (i.e same fmodsw or cdevsw entry points) point to the same
350 350 * upstream (read) and downstream (write) qinit structs.
351 351 */
352 352 static struct qinit fcip_rinit = {
353 353 NULL, /* qi_putp */
354 354 NULL, /* qi_srvp */
355 355 fcip_open, /* qi_qopen */
356 356 fcip_close, /* qi_qclose */
357 357 NULL, /* qi_qadmin */
358 358 &fcipminfo, /* qi_minfo */
359 359 NULL /* qi_mstat */
360 360 };
361 361
362 362 static struct qinit fcip_winit = {
363 363 fcip_wput, /* qi_putp */
364 364 fcip_wsrv, /* qi_srvp */
365 365 NULL, /* qi_qopen */
366 366 NULL, /* qi_qclose */
367 367 NULL, /* qi_qadmin */
368 368 &fcipminfo, /* qi_minfo */
369 369 NULL /* qi_mstat */
370 370 };
371 371
372 372 /*
373 373 * streamtab contains pointers to the read and write qinit structures
374 374 */
375 375
376 376 static struct streamtab fcip_info = {
377 377 &fcip_rinit, /* st_rdinit */
378 378 &fcip_winit, /* st_wrinit */
379 379 NULL, /* st_muxrinit */
380 380 NULL, /* st_muxwrinit */
381 381 };
382 382
383 383 static struct cb_ops fcip_cb_ops = {
384 384 nodev, /* open */
385 385 nodev, /* close */
386 386 nodev, /* strategy */
387 387 nodev, /* print */
388 388 nodev, /* dump */
389 389 nodev, /* read */
390 390 nodev, /* write */
391 391 nodev, /* ioctl */
392 392 nodev, /* devmap */
393 393 nodev, /* mmap */
394 394 nodev, /* segmap */
395 395 nochpoll, /* poll */
396 396 ddi_prop_op, /* cb_prop_op */
397 397 &fcip_info, /* streamtab */
398 398 D_MP | D_HOTPLUG, /* Driver compatibility flag */
399 399 CB_REV, /* rev */
400 400 nodev, /* int (*cb_aread)() */
401 401 nodev /* int (*cb_awrite)() */
402 402 };
403 403
404 404 /*
405 405 * autoconfiguration routines.
406 406 */
407 407 static struct dev_ops fcip_ops = {
408 408 DEVO_REV, /* devo_rev, */
409 409 0, /* refcnt */
410 410 fcip_getinfo, /* info */
411 411 nulldev, /* identify */
412 412 nulldev, /* probe */
413 413 fcip_attach, /* attach */
414 414 fcip_detach, /* detach */
415 415 nodev, /* RESET */
416 416 &fcip_cb_ops, /* driver operations */
417 417 NULL, /* bus operations */
418 418 ddi_power /* power management */
419 419 };
420 420
421 421 #define FCIP_VERSION "1.61"
422 422 #define FCIP_NAME "SunFC FCIP v" FCIP_VERSION
423 423
424 424 #define PORT_DRIVER "fp"
425 425
426 426 #define GETSTRUCT(struct, number) \
↓ open down ↓ |
426 lines elided |
↑ open up ↑ |
427 427 ((struct *)kmem_zalloc((size_t)(sizeof (struct) * (number)), \
428 428 KM_SLEEP))
429 429
430 430 static struct modldrv modldrv = {
431 431 &mod_driverops, /* Type of module - driver */
432 432 FCIP_NAME, /* Name of module */
433 433 &fcip_ops, /* driver ops */
434 434 };
435 435
436 436 static struct modlinkage modlinkage = {
437 - MODREV_1, (void *)&modldrv, NULL
437 + MODREV_1, { (void *)&modldrv, NULL }
438 438 };
439 439
440 440
441 441 /*
442 442 * Now for some global statics
443 443 */
444 444 static uint32_t fcip_ub_nbufs = FCIP_UB_NBUFS;
445 445 static uint32_t fcip_ub_size = FCIP_UB_SIZE;
446 446 static int fcip_pkt_ttl_ticks = FCIP_PKT_TTL;
447 447 static int fcip_tick_incr = 1;
448 448 static int fcip_wait_cmds = FCIP_WAIT_CMDS;
449 449 static int fcip_num_attaching = 0;
450 450 static int fcip_port_attach_pending = 0;
451 451 static int fcip_create_nodes_on_demand = 1; /* keep it similar to fcp */
452 452 static int fcip_cache_on_arp_broadcast = 0;
453 453 static int fcip_farp_supported = 0;
454 454 static int fcip_minor_node_created = 0;
455 455
456 456 /*
457 457 * Supported FCAs
458 458 */
459 459 #define QLC_PORT_1_ID_BITS 0x100
460 460 #define QLC_PORT_2_ID_BITS 0x101
461 461 #define QLC_PORT_NAA 0x2
462 462 #define QLC_MODULE_NAME "qlc"
463 463 #define IS_QLC_PORT(port_dip) \
464 464 (strcmp(ddi_driver_name(ddi_get_parent((port_dip))),\
465 465 QLC_MODULE_NAME) == 0)
466 466
467 467
468 468 /*
469 469 * fcip softstate structures head.
470 470 */
471 471
472 472 static void *fcip_softp = NULL;
473 473
474 474 /*
475 475 * linked list of active (inuse) driver streams
476 476 */
477 477
478 478 static int fcip_num_instances = 0;
479 479 static dev_info_t *fcip_module_dip = (dev_info_t *)0;
480 480
481 481
482 482 /*
483 483 * Ethernet broadcast address: Broadcast addressing in IP over fibre
484 484 * channel should be the IEEE ULA (also the low 6 bytes of the Port WWN).
485 485 *
486 486 * The broadcast addressing varies for differing topologies a node may be in:
487 487 * - On a private loop the ARP broadcast is a class 3 sequence sent
488 488 * using OPNfr (Open Broadcast Replicate primitive) followed by
489 489 * the ARP frame to D_ID 0xFFFFFF
490 490 *
491 491 * - On a public Loop the broadcast sequence is sent to AL_PA 0x00
492 492 * (no OPNfr primitive).
493 493 *
494 494 * - For direct attach and point to point topologies we just send
495 495 * the frame to D_ID 0xFFFFFF
496 496 *
497 497 * For public loop the handling would probably be different - for now
498 498 * I'll just declare this struct - It can be deleted if not necessary.
499 499 *
500 500 */
501 501
502 502
503 503 /*
504 504 * DL_INFO_ACK template for the fcip module. The dl_info_ack_t structure is
505 505 * returned as a part of an DL_INFO_ACK message which is a M_PCPROTO message
506 506 * returned in response to a DL_INFO_REQ message sent to us from a DLS user
507 507 * Let us fake an ether header as much as possible.
508 508 *
509 509 * dl_addr_length is the Provider's DLSAP addr which is SAP addr +
510 510 * Physical addr of the provider. We set this to
511 511 * ushort_t + sizeof (la_wwn_t) for Fibre Channel ports.
512 512 * dl_mac_type Lets just use DL_ETHER - we can try using DL_IPFC, a new
513 513 * dlpi.h define later.
514 514 * dl_sap_length -2 indicating the SAP address follows the Physical addr
515 515 * component in the DLSAP addr.
516 516 * dl_service_mode: DLCLDS - connectionless data link service.
517 517 *
518 518 */
519 519
520 520 static dl_info_ack_t fcip_infoack = {
521 521 DL_INFO_ACK, /* dl_primitive */
522 522 FCIPMTU, /* dl_max_sdu */
523 523 0, /* dl_min_sdu */
524 524 FCIPADDRL, /* dl_addr_length */
525 525 DL_ETHER, /* dl_mac_type */
526 526 0, /* dl_reserved */
527 527 0, /* dl_current_state */
528 528 -2, /* dl_sap_length */
529 529 DL_CLDLS, /* dl_service_mode */
530 530 0, /* dl_qos_length */
531 531 0, /* dl_qos_offset */
532 532 0, /* dl_range_length */
533 533 0, /* dl_range_offset */
534 534 DL_STYLE2, /* dl_provider_style */
535 535 sizeof (dl_info_ack_t), /* dl_addr_offset */
↓ open down ↓ |
88 lines elided |
↑ open up ↑ |
536 536 DL_VERSION_2, /* dl_version */
537 537 ETHERADDRL, /* dl_brdcst_addr_length */
538 538 sizeof (dl_info_ack_t) + FCIPADDRL, /* dl_brdcst_addr_offset */
539 539 0 /* dl_growth */
540 540 };
541 541
542 542 /*
543 543 * FCIP broadcast address definition.
544 544 */
545 545 static struct ether_addr fcipnhbroadcastaddr = {
546 - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
546 + { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
547 547 };
548 548
549 549 /*
550 550 * RFC2625 requires the broadcast ARP address in the ARP data payload to
551 551 * be set to 0x00 00 00 00 00 00 for ARP broadcast packets
552 552 */
553 553 static struct ether_addr fcip_arpbroadcast_addr = {
554 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
554 + { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
555 555 };
556 556
557 557
558 558 #define ether_bcopy(src, dest) bcopy((src), (dest), ETHERADDRL);
559 559
560 560 /*
561 561 * global kernel locks
562 562 */
563 563 static kcondvar_t fcip_global_cv;
564 564 static kmutex_t fcip_global_mutex;
565 565
566 566 /*
567 567 * fctl external defines
568 568 */
569 569 extern int fc_ulp_add(fc_ulp_modinfo_t *);
570 570
571 571 /*
572 572 * fctl data structures
573 573 */
574 574
575 575 #define FCIP_REV 0x07
576 576
577 577 /* linked list of port info structures */
578 578 static fcip_port_info_t *fcip_port_head = NULL;
579 579
580 580 /* linked list of fcip structures */
581 581 static struct fcipstr *fcipstrup = NULL;
582 582 static krwlock_t fcipstruplock;
583 583
584 584
585 585 /*
586 586 * Module information structure. This structure gives the FC Transport modules
587 587 * information about an ULP that registers with it.
588 588 */
589 589 static fc_ulp_modinfo_t fcip_modinfo = {
590 590 0, /* for xref checks? */
591 591 FCTL_ULP_MODREV_4, /* FCIP revision */
592 592 FC_TYPE_IS8802_SNAP, /* type 5 for SNAP encapsulated datagrams */
593 593 FCIP_NAME, /* module name as in the modldrv struct */
594 594 0x0, /* get all statec callbacks for now */
595 595 fcip_port_attach, /* port attach callback */
596 596 fcip_port_detach, /* port detach callback */
597 597 fcip_port_ioctl, /* port ioctl callback */
598 598 fcip_els_cb, /* els callback */
599 599 fcip_data_cb, /* data callback */
600 600 fcip_statec_cb /* state change callback */
601 601 };
602 602
603 603
604 604 /*
605 605 * Solaris 9 and up, the /kernel/drv/fp.conf file will have the following entry
606 606 *
607 607 * ddi-forceattach=1;
608 608 *
609 609 * This will ensure that fp is loaded at bootup. No additional checks are needed
610 610 */
611 611 int
612 612 _init(void)
613 613 {
614 614 int rval;
615 615
616 616 FCIP_TNF_LOAD();
617 617
618 618 /*
619 619 * Initialize the mutexs used by port attach and other callbacks.
620 620 * The transport can call back into our port_attach_callback
621 621 * routine even before _init() completes and bad things can happen.
622 622 */
623 623 mutex_init(&fcip_global_mutex, NULL, MUTEX_DRIVER, NULL);
624 624 cv_init(&fcip_global_cv, NULL, CV_DRIVER, NULL);
625 625 rw_init(&fcipstruplock, NULL, RW_DRIVER, NULL);
626 626
627 627 mutex_enter(&fcip_global_mutex);
628 628 fcip_port_attach_pending = 1;
629 629 mutex_exit(&fcip_global_mutex);
630 630
631 631 /*
632 632 * Now attempt to register fcip with the transport.
633 633 * If fc_ulp_add fails, fcip module will not be loaded.
634 634 */
635 635 rval = fc_ulp_add(&fcip_modinfo);
636 636 if (rval != FC_SUCCESS) {
637 637 mutex_destroy(&fcip_global_mutex);
638 638 cv_destroy(&fcip_global_cv);
639 639 rw_destroy(&fcipstruplock);
640 640 switch (rval) {
641 641 case FC_ULP_SAMEMODULE:
642 642 FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
643 643 "!fcip: module is already registered with"
644 644 " transport"));
645 645 rval = EEXIST;
646 646 break;
647 647 case FC_ULP_SAMETYPE:
648 648 FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
649 649 "!fcip: Another module of the same ULP type 0x%x"
650 650 " is already registered with the transport",
651 651 fcip_modinfo.ulp_type));
652 652 rval = EEXIST;
653 653 break;
654 654 case FC_BADULP:
655 655 FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
656 656 "!fcip: Current fcip version 0x%x does not match"
657 657 " fctl version",
658 658 fcip_modinfo.ulp_rev));
659 659 rval = ENODEV;
660 660 break;
661 661 default:
662 662 FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
663 663 "!fcip: fc_ulp_add failed with status 0x%x", rval));
664 664 rval = ENODEV;
665 665 break;
666 666 }
667 667 FCIP_TNF_UNLOAD(&modlinkage);
668 668 return (rval);
669 669 }
670 670
671 671 if ((rval = ddi_soft_state_init(&fcip_softp, sizeof (struct fcip),
672 672 FCIP_NUM_INSTANCES)) != 0) {
673 673 mutex_destroy(&fcip_global_mutex);
674 674 cv_destroy(&fcip_global_cv);
675 675 rw_destroy(&fcipstruplock);
676 676 (void) fc_ulp_remove(&fcip_modinfo);
677 677 FCIP_TNF_UNLOAD(&modlinkage);
678 678 return (rval);
679 679 }
680 680
681 681 if ((rval = mod_install(&modlinkage)) != 0) {
682 682 FCIP_TNF_UNLOAD(&modlinkage);
683 683 (void) fc_ulp_remove(&fcip_modinfo);
684 684 mutex_destroy(&fcip_global_mutex);
685 685 cv_destroy(&fcip_global_cv);
686 686 rw_destroy(&fcipstruplock);
687 687 ddi_soft_state_fini(&fcip_softp);
688 688 }
689 689 return (rval);
690 690 }
691 691
692 692 /*
693 693 * Unload the port driver if this was the only ULP loaded and then
694 694 * deregister with the transport.
695 695 */
696 696 int
697 697 _fini(void)
698 698 {
699 699 int rval;
700 700 int rval1;
701 701
702 702 /*
703 703 * Do not permit the module to be unloaded before a port
704 704 * attach callback has happened.
705 705 */
706 706 mutex_enter(&fcip_global_mutex);
707 707 if (fcip_num_attaching || fcip_port_attach_pending) {
708 708 mutex_exit(&fcip_global_mutex);
709 709 return (EBUSY);
710 710 }
711 711 mutex_exit(&fcip_global_mutex);
712 712
713 713 if ((rval = mod_remove(&modlinkage)) != 0) {
714 714 return (rval);
715 715 }
716 716
717 717 /*
718 718 * unregister with the transport layer
719 719 */
720 720 rval1 = fc_ulp_remove(&fcip_modinfo);
721 721
722 722 /*
723 723 * If the ULP was not registered with the transport, init should
724 724 * have failed. If transport has no knowledge of our existence
725 725 * we should simply bail out and succeed
726 726 */
727 727 #ifdef DEBUG
728 728 if (rval1 == FC_BADULP) {
729 729 FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
730 730 "fcip: ULP was never registered with the transport"));
731 731 rval = ENODEV;
732 732 } else if (rval1 == FC_BADTYPE) {
733 733 FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
734 734 "fcip: No ULP of this type 0x%x was registered with "
735 735 "transport", fcip_modinfo.ulp_type));
736 736 rval = ENODEV;
737 737 }
738 738 #endif /* DEBUG */
739 739
740 740 mutex_destroy(&fcip_global_mutex);
741 741 rw_destroy(&fcipstruplock);
742 742 cv_destroy(&fcip_global_cv);
743 743 ddi_soft_state_fini(&fcip_softp);
744 744
745 745 FCIP_TNF_UNLOAD(&modlinkage);
746 746
747 747 return (rval);
748 748 }
749 749
750 750 /*
751 751 * Info about this loadable module
752 752 */
753 753 int
754 754 _info(struct modinfo *modinfop)
755 755 {
756 756 return (mod_info(&modlinkage, modinfop));
757 757 }
758 758
759 759 /*
760 760 * The port attach callback is invoked by the port driver when a FCA
761 761 * port comes online and binds with the transport layer. The transport
762 762 * then callsback into all ULP modules registered with it. The Port attach
763 763 * call back will also provide the ULP module with the Port's WWN and S_ID
764 764 */
765 765 /* ARGSUSED */
766 766 static int
767 767 fcip_port_attach(opaque_t ulp_handle, fc_ulp_port_info_t *port_info,
768 768 fc_attach_cmd_t cmd, uint32_t sid)
769 769 {
770 770 int rval = FC_FAILURE;
771 771 int instance;
772 772 struct fcip *fptr;
773 773 fcip_port_info_t *fport = NULL;
774 774 fcip_port_info_t *cur_fport;
775 775 fc_portid_t src_id;
776 776
777 777 switch (cmd) {
778 778 case FC_CMD_ATTACH: {
779 779 la_wwn_t *ww_pn = NULL;
780 780 /*
781 781 * It was determined that, as per spec, the lower 48 bits of
782 782 * the port-WWN will always be unique. This will make the MAC
783 783 * address (i.e the lower 48 bits of the WWN), that IP/ARP
784 784 * depend on, unique too. Hence we should be able to remove the
785 785 * restriction of attaching to only one of the ports of
786 786 * multi port FCAs.
787 787 *
788 788 * Earlier, fcip used to attach only to qlc module and fail
789 789 * silently for attach failures resulting from unknown FCAs or
790 790 * unsupported FCA ports. Now, we'll do no such checks.
791 791 */
792 792 ww_pn = &port_info->port_pwwn;
793 793
794 794 FCIP_TNF_PROBE_2((fcip_port_attach, "fcip io", /* CSTYLED */,
795 795 tnf_string, msg, "port id bits",
796 796 tnf_opaque, nport_id, ww_pn->w.nport_id));
797 797 FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_NOTE,
798 798 "port id bits: 0x%x", ww_pn->w.nport_id));
799 799 /*
800 800 * A port has come online
801 801 */
802 802 mutex_enter(&fcip_global_mutex);
803 803 fcip_num_instances++;
804 804 fcip_num_attaching++;
805 805
806 806 if (fcip_port_head == NULL) {
807 807 /* OK to sleep here ? */
808 808 fport = kmem_zalloc(sizeof (fcip_port_info_t),
809 809 KM_NOSLEEP);
810 810 if (fport == NULL) {
811 811 fcip_num_instances--;
812 812 fcip_num_attaching--;
813 813 ASSERT(fcip_num_attaching >= 0);
814 814 mutex_exit(&fcip_global_mutex);
815 815 rval = FC_FAILURE;
816 816 cmn_err(CE_WARN, "!fcip(%d): port attach "
817 817 "failed: alloc failed",
818 818 ddi_get_instance(port_info->port_dip));
819 819 goto done;
820 820 }
821 821 fcip_port_head = fport;
822 822 } else {
823 823 /*
824 824 * traverse the port list and also check for
825 825 * duplicate port attaches - Nothing wrong in being
826 826 * paranoid Heh Heh.
827 827 */
828 828 cur_fport = fcip_port_head;
829 829 while (cur_fport != NULL) {
830 830 if (cur_fport->fcipp_handle ==
831 831 port_info->port_handle) {
832 832 fcip_num_instances--;
833 833 fcip_num_attaching--;
834 834 ASSERT(fcip_num_attaching >= 0);
835 835 mutex_exit(&fcip_global_mutex);
836 836 FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN,
837 837 "!fcip(%d): port already "
838 838 "attached!!", ddi_get_instance(
839 839 port_info->port_dip)));
840 840 rval = FC_FAILURE;
841 841 goto done;
842 842 }
843 843 cur_fport = cur_fport->fcipp_next;
844 844 }
845 845 fport = kmem_zalloc(sizeof (fcip_port_info_t),
846 846 KM_NOSLEEP);
847 847 if (fport == NULL) {
848 848 rval = FC_FAILURE;
849 849 fcip_num_instances--;
850 850 fcip_num_attaching--;
851 851 ASSERT(fcip_num_attaching >= 0);
852 852 mutex_exit(&fcip_global_mutex);
853 853 cmn_err(CE_WARN, "!fcip(%d): port attach "
854 854 "failed: alloc failed",
855 855 ddi_get_instance(port_info->port_dip));
856 856 goto done;
857 857 }
858 858 fport->fcipp_next = fcip_port_head;
859 859 fcip_port_head = fport;
860 860 }
861 861
862 862 mutex_exit(&fcip_global_mutex);
863 863
864 864 /*
865 865 * now fill in the details about the port itself
866 866 */
867 867 fport->fcipp_linkage = *port_info->port_linkage;
868 868 fport->fcipp_handle = port_info->port_handle;
869 869 fport->fcipp_dip = port_info->port_dip;
870 870 fport->fcipp_topology = port_info->port_flags;
871 871 fport->fcipp_pstate = port_info->port_state;
872 872 fport->fcipp_naa = port_info->port_pwwn.w.naa_id;
873 873 bcopy(&port_info->port_pwwn, &fport->fcipp_pwwn,
874 874 sizeof (la_wwn_t));
875 875 bcopy(&port_info->port_nwwn, &fport->fcipp_nwwn,
876 876 sizeof (la_wwn_t));
877 877 fport->fcipp_fca_pkt_size = port_info->port_fca_pkt_size;
878 878 fport->fcipp_cmd_dma_attr = *port_info->port_cmd_dma_attr;
879 879 fport->fcipp_resp_dma_attr = *port_info->port_resp_dma_attr;
880 880 fport->fcipp_fca_acc_attr = *port_info->port_acc_attr;
881 881 src_id.port_id = sid;
882 882 src_id.priv_lilp_posit = 0;
883 883 fport->fcipp_sid = src_id;
884 884
885 885 /*
886 886 * allocate soft state for this instance
887 887 */
888 888 instance = ddi_get_instance(fport->fcipp_dip);
889 889 if (ddi_soft_state_zalloc(fcip_softp,
890 890 instance) != DDI_SUCCESS) {
891 891 rval = FC_FAILURE;
892 892 cmn_err(CE_WARN, "!fcip(%d): port attach failed: "
893 893 "soft state alloc failed", instance);
894 894 goto failure;
895 895 }
896 896
897 897 fptr = ddi_get_soft_state(fcip_softp, instance);
898 898
899 899 if (fptr == NULL) {
900 900 rval = FC_FAILURE;
901 901 cmn_err(CE_WARN, "!fcip(%d): port attach failed: "
902 902 "failure to get soft state", instance);
903 903 goto failure;
904 904 }
905 905
906 906 /*
907 907 * initialize all mutexes and locks required for this module
908 908 */
909 909 mutex_init(&fptr->fcip_mutex, NULL, MUTEX_DRIVER, NULL);
910 910 mutex_init(&fptr->fcip_ub_mutex, NULL, MUTEX_DRIVER, NULL);
911 911 mutex_init(&fptr->fcip_rt_mutex, NULL, MUTEX_DRIVER, NULL);
912 912 mutex_init(&fptr->fcip_dest_mutex, NULL, MUTEX_DRIVER, NULL);
913 913 mutex_init(&fptr->fcip_sendup_mutex, NULL, MUTEX_DRIVER, NULL);
914 914 cv_init(&fptr->fcip_farp_cv, NULL, CV_DRIVER, NULL);
915 915 cv_init(&fptr->fcip_sendup_cv, NULL, CV_DRIVER, NULL);
916 916 cv_init(&fptr->fcip_ub_cv, NULL, CV_DRIVER, NULL);
917 917
918 918 mutex_enter(&fptr->fcip_mutex);
919 919
920 920 fptr->fcip_dip = fport->fcipp_dip; /* parent's dip */
921 921 fptr->fcip_instance = instance;
922 922 fptr->fcip_ub_upstream = 0;
923 923
924 924 if (FC_PORT_STATE_MASK(port_info->port_state) ==
925 925 FC_STATE_ONLINE) {
926 926 fptr->fcip_port_state = FCIP_PORT_ONLINE;
927 927 if (fptr->fcip_flags & FCIP_LINK_DOWN) {
928 928 fptr->fcip_flags &= ~FCIP_LINK_DOWN;
929 929 }
930 930 } else {
931 931 fptr->fcip_port_state = FCIP_PORT_OFFLINE;
932 932 }
933 933
934 934 fptr->fcip_flags |= FCIP_ATTACHING;
935 935 fptr->fcip_port_info = fport;
936 936
937 937 /*
938 938 * Extract our MAC addr from our port's WWN. The lower 48
939 939 * bits will be our MAC address
940 940 */
941 941 wwn_to_ether(&fport->fcipp_nwwn, &fptr->fcip_macaddr);
942 942
943 943 fport->fcipp_fcip = fptr;
944 944
945 945 FCIP_DEBUG(FCIP_DEBUG_ATTACH,
946 946 (CE_NOTE, "fcipdest : 0x%lx, rtable : 0x%lx",
947 947 (long)(sizeof (fptr->fcip_dest)),
948 948 (long)(sizeof (fptr->fcip_rtable))));
949 949
950 950 bzero(fptr->fcip_dest, sizeof (fptr->fcip_dest));
951 951 bzero(fptr->fcip_rtable, sizeof (fptr->fcip_rtable));
952 952
953 953 /*
954 954 * create a taskq to handle sundry jobs for the driver
955 955 * This way we can have jobs run in parallel
956 956 */
957 957 fptr->fcip_tq = taskq_create("fcip_tasks",
958 958 FCIP_NUM_THREADS, MINCLSYSPRI, FCIP_MIN_TASKS,
959 959 FCIP_MAX_TASKS, TASKQ_PREPOPULATE);
960 960
961 961 mutex_exit(&fptr->fcip_mutex);
962 962
963 963 /*
964 964 * create a separate thread to handle all unsolicited
965 965 * callback handling. This is because unsolicited_callback
966 966 * can happen from an interrupt context and the upstream
967 967 * modules can put new messages right back in the same
968 968 * thread context. This usually works fine, but sometimes
969 969 * we may have to block to obtain the dest struct entries
970 970 * for some remote ports.
971 971 */
972 972 mutex_enter(&fptr->fcip_sendup_mutex);
973 973 if (thread_create(NULL, DEFAULTSTKSZ,
974 974 (void (*)())fcip_sendup_thr, (caddr_t)fptr, 0, &p0,
975 975 TS_RUN, minclsyspri) == NULL) {
976 976 mutex_exit(&fptr->fcip_sendup_mutex);
977 977 cmn_err(CE_WARN,
978 978 "!unable to create fcip sendup thread for "
979 979 " instance: 0x%x", instance);
980 980 rval = FC_FAILURE;
981 981 goto done;
982 982 }
983 983 fptr->fcip_sendup_thr_initted = 1;
984 984 fptr->fcip_sendup_head = fptr->fcip_sendup_tail = NULL;
985 985 mutex_exit(&fptr->fcip_sendup_mutex);
986 986
987 987
988 988 /* Let the attach handler do the rest */
989 989 if (fcip_port_attach_handler(fptr) != FC_SUCCESS) {
990 990 /*
991 991 * We have already cleaned up so return
992 992 */
993 993 rval = FC_FAILURE;
994 994 cmn_err(CE_WARN, "!fcip(%d): port attach failed",
995 995 instance);
996 996 goto done;
997 997 }
998 998
999 999 FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_CONT,
1000 1000 "!fcip attach for port instance (0x%x) successful",
1001 1001 instance));
1002 1002
1003 1003 rval = FC_SUCCESS;
1004 1004 goto done;
1005 1005 }
1006 1006 case FC_CMD_POWER_UP:
1007 1007 /* FALLTHROUGH */
1008 1008 case FC_CMD_RESUME:
1009 1009 mutex_enter(&fcip_global_mutex);
1010 1010 fport = fcip_port_head;
1011 1011 while (fport != NULL) {
1012 1012 if (fport->fcipp_handle == port_info->port_handle) {
1013 1013 break;
1014 1014 }
1015 1015 fport = fport->fcipp_next;
1016 1016 }
1017 1017 if (fport == NULL) {
1018 1018 rval = FC_SUCCESS;
1019 1019 mutex_exit(&fcip_global_mutex);
1020 1020 goto done;
1021 1021 }
1022 1022 rval = fcip_handle_resume(fport, port_info, cmd);
1023 1023 mutex_exit(&fcip_global_mutex);
1024 1024 goto done;
1025 1025
1026 1026 default:
1027 1027 FCIP_TNF_PROBE_2((fcip_port_attach, "fcip io", /* CSTYLED */,
1028 1028 tnf_string, msg, "unknown command type",
1029 1029 tnf_uint, cmd, cmd));
1030 1030 FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN,
1031 1031 "unknown cmd type 0x%x in port_attach", cmd));
1032 1032 rval = FC_FAILURE;
1033 1033 goto done;
1034 1034 }
1035 1035
1036 1036 failure:
1037 1037 if (fport) {
1038 1038 mutex_enter(&fcip_global_mutex);
1039 1039 fcip_num_attaching--;
1040 1040 ASSERT(fcip_num_attaching >= 0);
1041 1041 (void) fcip_softstate_free(fport);
1042 1042 fcip_port_attach_pending = 0;
1043 1043 mutex_exit(&fcip_global_mutex);
1044 1044 }
1045 1045 return (rval);
1046 1046
1047 1047 done:
1048 1048 mutex_enter(&fcip_global_mutex);
1049 1049 fcip_port_attach_pending = 0;
1050 1050 mutex_exit(&fcip_global_mutex);
1051 1051 return (rval);
1052 1052 }
1053 1053
1054 1054 /*
1055 1055 * fcip_port_attach_handler : Completes the port attach operation after
1056 1056 * the ulp_port_attach routine has completed its ground work. The job
1057 1057 * of this function among other things is to obtain and handle topology
1058 1058 * specifics, initialize a port, setup broadcast address entries in
1059 1059 * the fcip tables etc. This routine cleans up behind itself on failures.
1060 1060 * Returns FC_SUCCESS or FC_FAILURE.
1061 1061 */
1062 1062 static int
1063 1063 fcip_port_attach_handler(struct fcip *fptr)
1064 1064 {
1065 1065 fcip_port_info_t *fport = fptr->fcip_port_info;
1066 1066 int rval = FC_FAILURE;
1067 1067
1068 1068 ASSERT(fport != NULL);
1069 1069
1070 1070 mutex_enter(&fcip_global_mutex);
1071 1071
1072 1072 FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_NOTE,
1073 1073 "fcip module dip: %p instance: %d",
1074 1074 (void *)fcip_module_dip, ddi_get_instance(fptr->fcip_dip)));
1075 1075
1076 1076 if (fcip_module_dip == NULL) {
1077 1077 clock_t fcip_lbolt;
1078 1078
1079 1079 fcip_lbolt = ddi_get_lbolt();
1080 1080 /*
1081 1081 * we need to use the fcip devinfo for creating
1082 1082 * the clone device node, but the fcip attach
1083 1083 * (from its conf file entry claiming to be a
1084 1084 * child of pseudo) may not have happened yet.
1085 1085 * wait here for 10 seconds and fail port attach
1086 1086 * if the fcip devinfo is not attached yet
1087 1087 */
1088 1088 fcip_lbolt += drv_usectohz(FCIP_INIT_DELAY);
1089 1089
1090 1090 FCIP_DEBUG(FCIP_DEBUG_ATTACH,
1091 1091 (CE_WARN, "cv_timedwait lbolt %lx", fcip_lbolt));
1092 1092
1093 1093 (void) cv_timedwait(&fcip_global_cv, &fcip_global_mutex,
1094 1094 fcip_lbolt);
1095 1095
1096 1096 if (fcip_module_dip == NULL) {
1097 1097 mutex_exit(&fcip_global_mutex);
1098 1098
1099 1099 FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN,
1100 1100 "fcip attach did not happen"));
1101 1101 goto port_attach_cleanup;
1102 1102 }
1103 1103 }
1104 1104
1105 1105 if ((!fcip_minor_node_created) &&
1106 1106 fcip_is_supported_fc_topology(fport->fcipp_topology)) {
1107 1107 /*
1108 1108 * Checking for same topologies which are considered valid
1109 1109 * by fcip_handle_topology(). Dont create a minor node if
1110 1110 * nothing is hanging off the FC port.
1111 1111 */
1112 1112 if (ddi_create_minor_node(fcip_module_dip, "fcip", S_IFCHR,
1113 1113 ddi_get_instance(fptr->fcip_dip), DDI_PSEUDO,
1114 1114 CLONE_DEV) == DDI_FAILURE) {
1115 1115 mutex_exit(&fcip_global_mutex);
1116 1116 FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN,
1117 1117 "failed to create minor node for fcip(%d)",
1118 1118 ddi_get_instance(fptr->fcip_dip)));
1119 1119 goto port_attach_cleanup;
1120 1120 }
1121 1121 fcip_minor_node_created++;
1122 1122 }
1123 1123 mutex_exit(&fcip_global_mutex);
1124 1124
1125 1125 /*
1126 1126 * initialize port for traffic
1127 1127 */
1128 1128 if (fcip_init_port(fptr) != FC_SUCCESS) {
1129 1129 /* fcip_init_port has already cleaned up its stuff */
1130 1130
1131 1131 mutex_enter(&fcip_global_mutex);
1132 1132
1133 1133 if ((fcip_num_instances == 1) &&
1134 1134 (fcip_minor_node_created == 1)) {
1135 1135 /* Remove minor node iff this is the last instance */
1136 1136 ddi_remove_minor_node(fcip_module_dip, NULL);
1137 1137 }
1138 1138
1139 1139 mutex_exit(&fcip_global_mutex);
1140 1140
1141 1141 goto port_attach_cleanup;
1142 1142 }
1143 1143
1144 1144 mutex_enter(&fptr->fcip_mutex);
1145 1145 fptr->fcip_flags &= ~FCIP_ATTACHING;
1146 1146 fptr->fcip_flags |= FCIP_INITED;
1147 1147 fptr->fcip_timeout_ticks = 0;
1148 1148
1149 1149 /*
1150 1150 * start the timeout threads
1151 1151 */
1152 1152 fptr->fcip_timeout_id = timeout(fcip_timeout, fptr,
1153 1153 drv_usectohz(1000000));
1154 1154
1155 1155 mutex_exit(&fptr->fcip_mutex);
1156 1156 mutex_enter(&fcip_global_mutex);
1157 1157 fcip_num_attaching--;
1158 1158 ASSERT(fcip_num_attaching >= 0);
1159 1159 mutex_exit(&fcip_global_mutex);
1160 1160 rval = FC_SUCCESS;
1161 1161 return (rval);
1162 1162
1163 1163 port_attach_cleanup:
1164 1164 mutex_enter(&fcip_global_mutex);
1165 1165 (void) fcip_softstate_free(fport);
1166 1166 fcip_num_attaching--;
1167 1167 ASSERT(fcip_num_attaching >= 0);
1168 1168 mutex_exit(&fcip_global_mutex);
1169 1169 rval = FC_FAILURE;
1170 1170 return (rval);
1171 1171 }
1172 1172
1173 1173
1174 1174 /*
1175 1175 * Handler for DDI_RESUME operations. Port must be ready to restart IP
1176 1176 * traffic on resume
1177 1177 */
1178 1178 static int
1179 1179 fcip_handle_resume(fcip_port_info_t *fport, fc_ulp_port_info_t *port_info,
1180 1180 fc_attach_cmd_t cmd)
1181 1181 {
1182 1182 int rval = FC_SUCCESS;
1183 1183 struct fcip *fptr = fport->fcipp_fcip;
1184 1184 struct fcipstr *tslp;
1185 1185 int index;
1186 1186
1187 1187
1188 1188 ASSERT(fptr != NULL);
1189 1189
1190 1190 mutex_enter(&fptr->fcip_mutex);
1191 1191
1192 1192 if (cmd == FC_CMD_POWER_UP) {
1193 1193 fptr->fcip_flags &= ~(FCIP_POWER_DOWN);
1194 1194 if (fptr->fcip_flags & FCIP_SUSPENDED) {
1195 1195 mutex_exit(&fptr->fcip_mutex);
1196 1196 return (FC_SUCCESS);
1197 1197 }
1198 1198 } else if (cmd == FC_CMD_RESUME) {
1199 1199 fptr->fcip_flags &= ~(FCIP_SUSPENDED);
1200 1200 } else {
1201 1201 mutex_exit(&fptr->fcip_mutex);
1202 1202 return (FC_FAILURE);
1203 1203 }
1204 1204
1205 1205 /*
1206 1206 * set the current port state and topology
1207 1207 */
1208 1208 fport->fcipp_topology = port_info->port_flags;
1209 1209 fport->fcipp_pstate = port_info->port_state;
1210 1210
1211 1211 rw_enter(&fcipstruplock, RW_READER);
1212 1212 for (tslp = fcipstrup; tslp; tslp = tslp->sl_nextp) {
1213 1213 if (tslp->sl_fcip == fptr) {
1214 1214 break;
1215 1215 }
1216 1216 }
1217 1217 rw_exit(&fcipstruplock);
1218 1218
1219 1219 /*
1220 1220 * No active streams on this port
1221 1221 */
1222 1222 if (tslp == NULL) {
1223 1223 rval = FC_SUCCESS;
1224 1224 goto done;
1225 1225 }
1226 1226
1227 1227 mutex_enter(&fptr->fcip_rt_mutex);
1228 1228 for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
1229 1229 struct fcip_routing_table *frp;
1230 1230
1231 1231 frp = fptr->fcip_rtable[index];
1232 1232 while (frp) {
1233 1233 uint32_t did;
1234 1234 /*
1235 1235 * Mark the broadcast RTE available again. It
1236 1236 * was marked SUSPENDED during SUSPEND.
1237 1237 */
1238 1238 did = fcip_get_broadcast_did(fptr);
1239 1239 if (frp->fcipr_d_id.port_id == did) {
1240 1240 frp->fcipr_state = 0;
1241 1241 index = FCIP_RT_HASH_ELEMS;
1242 1242 break;
1243 1243 }
1244 1244 frp = frp->fcipr_next;
1245 1245 }
1246 1246 }
1247 1247 mutex_exit(&fptr->fcip_rt_mutex);
1248 1248
1249 1249 /*
1250 1250 * fcip_handle_topology will update the port entries in the
1251 1251 * routing table.
1252 1252 * fcip_handle_topology also takes care of resetting the
1253 1253 * fcipr_state field in the routing table structure. The entries
1254 1254 * were set to RT_INVALID during suspend.
1255 1255 */
1256 1256 fcip_handle_topology(fptr);
1257 1257
1258 1258 done:
1259 1259 /*
1260 1260 * Restart the timeout thread
1261 1261 */
1262 1262 fptr->fcip_timeout_id = timeout(fcip_timeout, fptr,
1263 1263 drv_usectohz(1000000));
1264 1264 mutex_exit(&fptr->fcip_mutex);
1265 1265 return (rval);
1266 1266 }
1267 1267
1268 1268
1269 1269 /*
1270 1270 * Insert a destination port entry into the routing table for
1271 1271 * this port
1272 1272 */
1273 1273 static void
1274 1274 fcip_rt_update(struct fcip *fptr, fc_portmap_t *devlist, uint32_t listlen)
1275 1275 {
1276 1276 struct fcip_routing_table *frp;
1277 1277 fcip_port_info_t *fport = fptr->fcip_port_info;
1278 1278 int hash_bucket, i;
1279 1279 fc_portmap_t *pmap;
1280 1280 char wwn_buf[20];
1281 1281
1282 1282 FCIP_TNF_PROBE_2((fcip_rt_update, "fcip io", /* CSTYLED */,
1283 1283 tnf_string, msg, "enter",
1284 1284 tnf_int, listlen, listlen));
1285 1285
1286 1286 ASSERT(!mutex_owned(&fptr->fcip_mutex));
1287 1287 mutex_enter(&fptr->fcip_rt_mutex);
1288 1288
1289 1289 for (i = 0; i < listlen; i++) {
1290 1290 pmap = &(devlist[i]);
1291 1291
1292 1292 frp = fcip_lookup_rtable(fptr, &(pmap->map_pwwn),
1293 1293 FCIP_COMPARE_PWWN);
1294 1294 /*
1295 1295 * If an entry for a port in the devlist exists in the
1296 1296 * in the per port routing table, make sure the data
1297 1297 * is current. We need to do this irrespective of the
1298 1298 * underlying port topology.
1299 1299 */
1300 1300 switch (pmap->map_type) {
1301 1301 /* FALLTHROUGH */
1302 1302 case PORT_DEVICE_NOCHANGE:
1303 1303 /* FALLTHROUGH */
1304 1304 case PORT_DEVICE_USER_LOGIN:
1305 1305 /* FALLTHROUGH */
1306 1306 case PORT_DEVICE_CHANGED:
1307 1307 /* FALLTHROUGH */
1308 1308 case PORT_DEVICE_NEW:
1309 1309 if (frp == NULL) {
1310 1310 goto add_new_entry;
1311 1311 } else if (frp) {
1312 1312 goto update_entry;
1313 1313 } else {
1314 1314 continue;
1315 1315 }
1316 1316
1317 1317 case PORT_DEVICE_OLD:
1318 1318 /* FALLTHROUGH */
1319 1319 case PORT_DEVICE_USER_LOGOUT:
1320 1320 /*
1321 1321 * Mark entry for removal from Routing Table if
1322 1322 * one exists. Let the timeout thread actually
1323 1323 * remove the entry after we've given up hopes
1324 1324 * of the port ever showing up.
1325 1325 */
1326 1326 if (frp) {
1327 1327 uint32_t did;
1328 1328
1329 1329 /*
1330 1330 * Mark the routing table as invalid to bail
1331 1331 * the packets early that are in transit
1332 1332 */
1333 1333 did = fptr->fcip_broadcast_did;
1334 1334 if (frp->fcipr_d_id.port_id != did) {
1335 1335 frp->fcipr_pd = NULL;
1336 1336 frp->fcipr_state = FCIP_RT_INVALID;
1337 1337 frp->fcipr_invalid_timeout =
1338 1338 fptr->fcip_timeout_ticks +
1339 1339 FCIP_RTE_TIMEOUT;
1340 1340 }
1341 1341 }
1342 1342 continue;
1343 1343
1344 1344 default:
1345 1345 FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN,
1346 1346 "unknown map flags in rt_update"));
1347 1347 continue;
1348 1348 }
1349 1349 add_new_entry:
1350 1350 ASSERT(frp == NULL);
1351 1351 hash_bucket = FCIP_RT_HASH(pmap->map_pwwn.raw_wwn);
1352 1352
1353 1353 ASSERT(hash_bucket < FCIP_RT_HASH_ELEMS);
1354 1354
1355 1355 FCIP_TNF_PROBE_2((fcip_rt_update, "cfip io", /* CSTYLED */,
1356 1356 tnf_string, msg,
1357 1357 "add new entry",
1358 1358 tnf_int, hashbucket, hash_bucket));
1359 1359
1360 1360 frp = (struct fcip_routing_table *)
1361 1361 kmem_zalloc(sizeof (struct fcip_routing_table), KM_SLEEP);
1362 1362 /* insert at beginning of hash bucket */
1363 1363 frp->fcipr_next = fptr->fcip_rtable[hash_bucket];
1364 1364 fptr->fcip_rtable[hash_bucket] = frp;
1365 1365 fc_wwn_to_str(&pmap->map_pwwn, wwn_buf);
1366 1366 FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_NOTE,
1367 1367 "added entry for pwwn %s and d_id 0x%x",
1368 1368 wwn_buf, pmap->map_did.port_id));
1369 1369 update_entry:
1370 1370 bcopy((void *)&pmap->map_pwwn,
1371 1371 (void *)&frp->fcipr_pwwn, sizeof (la_wwn_t));
1372 1372 bcopy((void *)&pmap->map_nwwn, (void *)&frp->fcipr_nwwn,
1373 1373 sizeof (la_wwn_t));
1374 1374 frp->fcipr_d_id = pmap->map_did;
1375 1375 frp->fcipr_state = pmap->map_state;
1376 1376 frp->fcipr_pd = pmap->map_pd;
1377 1377
1378 1378 /*
1379 1379 * If there is no pd for a destination port that is not
1380 1380 * a broadcast entry, the port is pretty much unusable - so
1381 1381 * mark the port for removal so we can try adding back the
1382 1382 * entry again.
1383 1383 */
1384 1384 if ((frp->fcipr_pd == NULL) &&
1385 1385 (frp->fcipr_d_id.port_id != fptr->fcip_broadcast_did)) {
1386 1386 frp->fcipr_state = PORT_DEVICE_INVALID;
1387 1387 frp->fcipr_invalid_timeout = fptr->fcip_timeout_ticks +
1388 1388 (FCIP_RTE_TIMEOUT / 2);
1389 1389 }
1390 1390 frp->fcipr_fca_dev =
1391 1391 fc_ulp_get_fca_device(fport->fcipp_handle, pmap->map_did);
1392 1392
1393 1393 /*
1394 1394 * login to the remote port. Don't worry about
1395 1395 * plogi failures for now
1396 1396 */
1397 1397 if (pmap->map_pd != NULL) {
1398 1398 (void) fcip_do_plogi(fptr, frp);
1399 1399 } else if (FC_TOP_EXTERNAL(fport->fcipp_topology)) {
1400 1400 fc_wwn_to_str(&frp->fcipr_pwwn, wwn_buf);
1401 1401 FCIP_DEBUG(FCIP_DEBUG_MISC, (CE_NOTE,
1402 1402 "logging into pwwn %s, d_id 0x%x",
1403 1403 wwn_buf, frp->fcipr_d_id.port_id));
1404 1404 (void) fcip_do_plogi(fptr, frp);
1405 1405 }
1406 1406
1407 1407 FCIP_TNF_BYTE_ARRAY(fcip_rt_update, "fcip io", "detail",
1408 1408 "new wwn in rt", pwwn,
1409 1409 &frp->fcipr_pwwn, sizeof (la_wwn_t));
1410 1410 }
1411 1411 mutex_exit(&fptr->fcip_rt_mutex);
1412 1412 }
1413 1413
1414 1414
1415 1415 /*
1416 1416 * return a matching routing table entry for a given fcip instance
1417 1417 */
1418 1418 struct fcip_routing_table *
1419 1419 fcip_lookup_rtable(struct fcip *fptr, la_wwn_t *wwn, int matchflag)
1420 1420 {
1421 1421 struct fcip_routing_table *frp = NULL;
1422 1422 int hash_bucket;
1423 1423
1424 1424
1425 1425 FCIP_TNF_PROBE_1((fcip_lookup_rtable, "fcip io", /* CSTYLED */,
1426 1426 tnf_string, msg, "enter"));
1427 1427 FCIP_TNF_BYTE_ARRAY(fcip_lookup_rtable, "fcip io", "detail",
1428 1428 "rtable lookup for", wwn,
1429 1429 &wwn->raw_wwn, sizeof (la_wwn_t));
1430 1430 FCIP_TNF_PROBE_2((fcip_lookup_rtable, "fcip io", /* CSTYLED */,
1431 1431 tnf_string, msg, "match by",
1432 1432 tnf_int, matchflag, matchflag));
1433 1433
1434 1434 ASSERT(mutex_owned(&fptr->fcip_rt_mutex));
1435 1435
1436 1436 hash_bucket = FCIP_RT_HASH(wwn->raw_wwn);
1437 1437 frp = fptr->fcip_rtable[hash_bucket];
1438 1438 while (frp != NULL) {
1439 1439
1440 1440 FCIP_TNF_BYTE_ARRAY(fcip_lookup_rtable, "fcip io", "detail",
1441 1441 "rtable entry", nwwn,
1442 1442 &(frp->fcipr_nwwn.raw_wwn), sizeof (la_wwn_t));
1443 1443
1444 1444 if (fcip_wwn_compare(&frp->fcipr_pwwn, wwn, matchflag) == 0) {
1445 1445 break;
1446 1446 }
1447 1447
1448 1448 frp = frp->fcipr_next;
1449 1449 }
1450 1450 FCIP_TNF_PROBE_2((fcip_lookup_rtable, "fcip io", /* CSTYLED */,
1451 1451 tnf_string, msg, "lookup result",
1452 1452 tnf_opaque, frp, frp));
1453 1453 return (frp);
1454 1454 }
1455 1455
1456 1456 /*
1457 1457 * Attach of fcip under pseudo. The actual setup of the interface
1458 1458 * actually happens in fcip_port_attach on a callback from the
1459 1459 * transport. The port_attach callback however can proceed only
1460 1460 * after the devinfo for fcip has been created under pseudo
1461 1461 */
1462 1462 static int
1463 1463 fcip_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1464 1464 {
1465 1465 switch ((int)cmd) {
1466 1466
1467 1467 case DDI_ATTACH: {
1468 1468 ASSERT(fcip_module_dip == NULL);
1469 1469 fcip_module_dip = dip;
1470 1470
1471 1471 /*
1472 1472 * this call originates as a result of fcip's conf
1473 1473 * file entry and will result in a fcip instance being
1474 1474 * a child of pseudo. We should ensure here that the port
1475 1475 * driver (fp) has been loaded and initted since we would
1476 1476 * never get a port attach callback without fp being loaded.
1477 1477 * If we are unable to succesfully load and initalize fp -
1478 1478 * just fail this attach.
1479 1479 */
1480 1480 mutex_enter(&fcip_global_mutex);
1481 1481
1482 1482 FCIP_DEBUG(FCIP_DEBUG_ATTACH,
1483 1483 (CE_WARN, "global cv - signaling"));
1484 1484
1485 1485 cv_signal(&fcip_global_cv);
1486 1486
1487 1487 FCIP_DEBUG(FCIP_DEBUG_ATTACH,
1488 1488 (CE_WARN, "global cv - signaled"));
1489 1489 mutex_exit(&fcip_global_mutex);
1490 1490 return (DDI_SUCCESS);
1491 1491 }
1492 1492 case DDI_RESUME:
1493 1493 /*
1494 1494 * Resume appears trickier
1495 1495 */
1496 1496 return (DDI_SUCCESS);
1497 1497 default:
1498 1498 return (DDI_FAILURE);
1499 1499 }
1500 1500 }
1501 1501
1502 1502
1503 1503 /*
1504 1504 * The detach entry point to permit unloading fcip. We make sure
1505 1505 * there are no active streams before we proceed with the detach
1506 1506 */
1507 1507 /* ARGSUSED */
1508 1508 static int
1509 1509 fcip_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1510 1510 {
1511 1511 struct fcip *fptr;
1512 1512 fcip_port_info_t *fport;
1513 1513 int detached;
1514 1514
1515 1515 switch (cmd) {
1516 1516 case DDI_DETACH: {
1517 1517 /*
1518 1518 * If we got here, any active streams should have been
1519 1519 * unplumbed but check anyway
1520 1520 */
1521 1521 mutex_enter(&fcip_global_mutex);
1522 1522 if (fcipstrup != NULL) {
1523 1523 mutex_exit(&fcip_global_mutex);
1524 1524 return (DDI_FAILURE);
1525 1525 }
1526 1526
1527 1527 if (fcip_port_head != NULL) {
1528 1528 /*
1529 1529 * Check to see if we have unattached/unbound
1530 1530 * ports. If all the ports are unattached/unbound go
1531 1531 * ahead and unregister with the transport
1532 1532 */
1533 1533 fport = fcip_port_head;
1534 1534 while (fport != NULL) {
1535 1535 fptr = fport->fcipp_fcip;
1536 1536 if (fptr == NULL) {
1537 1537 continue;
1538 1538 }
1539 1539 mutex_enter(&fptr->fcip_mutex);
1540 1540 fptr->fcip_flags |= FCIP_DETACHING;
1541 1541 if (fptr->fcip_ipq ||
1542 1542 fptr->fcip_flags & (FCIP_IN_TIMEOUT |
1543 1543 FCIP_IN_CALLBACK | FCIP_ATTACHING |
1544 1544 FCIP_SUSPENDED | FCIP_POWER_DOWN |
1545 1545 FCIP_REG_INPROGRESS)) {
1546 1546 FCIP_TNF_PROBE_1((fcip_detach,
1547 1547 "fcip io", /* CSTYLED */,
1548 1548 tnf_string, msg,
1549 1549 "fcip instance busy"));
1550 1550
1551 1551 mutex_exit(&fptr->fcip_mutex);
1552 1552 FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN,
1553 1553 "fcip instance busy"));
1554 1554 break;
1555 1555 }
1556 1556 /*
1557 1557 * Check for any outstanding pkts. If yes
1558 1558 * fail the detach
1559 1559 */
1560 1560 mutex_enter(&fptr->fcip_dest_mutex);
1561 1561 if (fcip_port_get_num_pkts(fptr) > 0) {
1562 1562 mutex_exit(&fptr->fcip_dest_mutex);
1563 1563 mutex_exit(&fptr->fcip_mutex);
1564 1564 FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN,
1565 1565 "fcip instance busy - pkts "
1566 1566 "pending"));
1567 1567 break;
1568 1568 }
1569 1569 mutex_exit(&fptr->fcip_dest_mutex);
1570 1570
1571 1571 mutex_enter(&fptr->fcip_rt_mutex);
1572 1572 if (fcip_plogi_in_progress(fptr)) {
1573 1573 mutex_exit(&fptr->fcip_rt_mutex);
1574 1574 mutex_exit(&fptr->fcip_mutex);
1575 1575 FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN,
1576 1576 "fcip instance busy - plogi in "
1577 1577 "progress"));
1578 1578 break;
1579 1579 }
1580 1580 mutex_exit(&fptr->fcip_rt_mutex);
1581 1581
1582 1582 mutex_exit(&fptr->fcip_mutex);
1583 1583 fport = fport->fcipp_next;
1584 1584 }
1585 1585 /*
1586 1586 * if fport is non NULL - we have active ports
1587 1587 */
1588 1588 if (fport != NULL) {
1589 1589 /*
1590 1590 * Remove the DETACHING flags on the ports
1591 1591 */
1592 1592 fport = fcip_port_head;
1593 1593 while (fport != NULL) {
1594 1594 fptr = fport->fcipp_fcip;
1595 1595 mutex_enter(&fptr->fcip_mutex);
1596 1596 fptr->fcip_flags &= ~(FCIP_DETACHING);
1597 1597 mutex_exit(&fptr->fcip_mutex);
1598 1598 fport = fport->fcipp_next;
1599 1599 }
1600 1600 mutex_exit(&fcip_global_mutex);
1601 1601 return (DDI_FAILURE);
1602 1602 }
1603 1603 }
1604 1604
1605 1605 /*
1606 1606 * free up all softstate structures
1607 1607 */
1608 1608 fport = fcip_port_head;
1609 1609 while (fport != NULL) {
1610 1610 detached = 1;
1611 1611
1612 1612 fptr = fport->fcipp_fcip;
1613 1613 if (fptr) {
1614 1614 mutex_enter(&fptr->fcip_mutex);
1615 1615 /*
1616 1616 * Check to see if somebody beat us to the
1617 1617 * punch
1618 1618 */
1619 1619 detached = fptr->fcip_flags & FCIP_DETACHED;
1620 1620 fptr->fcip_flags &= ~(FCIP_DETACHING);
1621 1621 fptr->fcip_flags |= FCIP_DETACHED;
1622 1622 mutex_exit(&fptr->fcip_mutex);
1623 1623 }
1624 1624
1625 1625 if (!detached) {
1626 1626 fport = fcip_softstate_free(fport);
1627 1627 } else {
1628 1628 /*
1629 1629 * If the port was marked as detached
1630 1630 * but it was still in the list, that
1631 1631 * means another thread has marked it
1632 1632 * but we got in while it released the
1633 1633 * fcip_global_mutex in softstate_free.
1634 1634 * Given that, we're still safe to use
1635 1635 * fport->fcipp_next to find out what
1636 1636 * the next port on the list is.
1637 1637 */
1638 1638 fport = fport->fcipp_next;
1639 1639 }
1640 1640
1641 1641 FCIP_DEBUG(FCIP_DEBUG_DETACH,
1642 1642 (CE_NOTE, "detaching port"));
1643 1643
1644 1644 FCIP_TNF_PROBE_1((fcip_detach,
1645 1645 "fcip io", /* CSTYLED */, tnf_string,
1646 1646 msg, "detaching port"));
1647 1647 }
1648 1648
1649 1649 /*
1650 1650 * If we haven't removed all the port structures, we
1651 1651 * aren't yet ready to be detached.
1652 1652 */
1653 1653 if (fcip_port_head != NULL) {
1654 1654 mutex_exit(&fcip_global_mutex);
1655 1655 return (DDI_FAILURE);
1656 1656 }
1657 1657
1658 1658 fcip_num_instances = 0;
1659 1659 mutex_exit(&fcip_global_mutex);
1660 1660 fcip_module_dip = NULL;
1661 1661 return (DDI_SUCCESS);
1662 1662 }
1663 1663 case DDI_SUSPEND:
1664 1664 return (DDI_SUCCESS);
1665 1665 default:
1666 1666 return (DDI_FAILURE);
1667 1667 }
1668 1668 }
1669 1669
1670 1670 /*
1671 1671 * The port_detach callback is called from the transport when a
1672 1672 * FC port is being removed from the transport's control. This routine
1673 1673 * provides fcip with an opportunity to cleanup all activities and
1674 1674 * structures on the port marked for removal.
1675 1675 */
1676 1676 /* ARGSUSED */
1677 1677 static int
1678 1678 fcip_port_detach(opaque_t ulp_handle, fc_ulp_port_info_t *port_info,
1679 1679 fc_detach_cmd_t cmd)
1680 1680 {
1681 1681 int rval = FC_FAILURE;
1682 1682 fcip_port_info_t *fport;
1683 1683 struct fcip *fptr;
1684 1684 struct fcipstr *strp;
1685 1685
1686 1686 switch (cmd) {
1687 1687 case FC_CMD_DETACH: {
1688 1688 mutex_enter(&fcip_global_mutex);
1689 1689
1690 1690 if (fcip_port_head == NULL) {
1691 1691 /*
1692 1692 * we are all done but our fini has not been
1693 1693 * called yet!! Let's hope we have no active
1694 1694 * fcip instances here. - strange secnario but
1695 1695 * no harm in having this return a success.
1696 1696 */
1697 1697 fcip_check_remove_minor_node();
1698 1698
1699 1699 mutex_exit(&fcip_global_mutex);
1700 1700 return (FC_SUCCESS);
1701 1701 } else {
1702 1702 /*
1703 1703 * traverse the port list
1704 1704 */
1705 1705 fport = fcip_port_head;
1706 1706 while (fport != NULL) {
1707 1707 if (fport->fcipp_handle ==
1708 1708 port_info->port_handle) {
1709 1709 fptr = fport->fcipp_fcip;
1710 1710
1711 1711 /*
1712 1712 * Fail the port detach if there is
1713 1713 * still an attached, bound stream on
1714 1714 * this interface.
1715 1715 */
1716 1716
1717 1717 rw_enter(&fcipstruplock, RW_READER);
1718 1718
1719 1719 for (strp = fcipstrup; strp != NULL;
1720 1720 strp = strp->sl_nextp) {
1721 1721 if (strp->sl_fcip == fptr) {
1722 1722 rw_exit(&fcipstruplock);
1723 1723 mutex_exit(
1724 1724 &fcip_global_mutex);
1725 1725 return (FC_FAILURE);
1726 1726 }
1727 1727 }
1728 1728
1729 1729 rw_exit(&fcipstruplock);
1730 1730
1731 1731 /*
1732 1732 * fail port detach if we are in
1733 1733 * the middle of a deferred port attach
1734 1734 * or if the port has outstanding pkts
1735 1735 */
1736 1736 if (fptr != NULL) {
1737 1737 mutex_enter(&fptr->fcip_mutex);
1738 1738 if (fcip_check_port_busy
1739 1739 (fptr) ||
1740 1740 (fptr->fcip_flags &
1741 1741 FCIP_DETACHED)) {
1742 1742 mutex_exit(
1743 1743 &fptr->fcip_mutex);
1744 1744 mutex_exit(
1745 1745 &fcip_global_mutex);
1746 1746 return (FC_FAILURE);
1747 1747 }
1748 1748
1749 1749 fptr->fcip_flags |=
1750 1750 FCIP_DETACHED;
1751 1751 mutex_exit(&fptr->fcip_mutex);
1752 1752 }
1753 1753 (void) fcip_softstate_free(fport);
1754 1754
1755 1755 fcip_check_remove_minor_node();
1756 1756 mutex_exit(&fcip_global_mutex);
1757 1757 return (FC_SUCCESS);
1758 1758 }
1759 1759 fport = fport->fcipp_next;
1760 1760 }
1761 1761 ASSERT(fport == NULL);
1762 1762 }
1763 1763 mutex_exit(&fcip_global_mutex);
1764 1764 break;
1765 1765 }
1766 1766 case FC_CMD_POWER_DOWN:
1767 1767 /* FALLTHROUGH */
1768 1768 case FC_CMD_SUSPEND:
1769 1769 mutex_enter(&fcip_global_mutex);
1770 1770 fport = fcip_port_head;
1771 1771 while (fport != NULL) {
1772 1772 if (fport->fcipp_handle == port_info->port_handle) {
1773 1773 break;
1774 1774 }
1775 1775 fport = fport->fcipp_next;
1776 1776 }
1777 1777 if (fport == NULL) {
1778 1778 mutex_exit(&fcip_global_mutex);
1779 1779 break;
1780 1780 }
1781 1781 rval = fcip_handle_suspend(fport, cmd);
1782 1782 mutex_exit(&fcip_global_mutex);
1783 1783 break;
1784 1784 default:
1785 1785 FCIP_DEBUG(FCIP_DEBUG_DETACH,
1786 1786 (CE_WARN, "unknown port detach command!!"));
1787 1787 break;
1788 1788 }
1789 1789 return (rval);
1790 1790 }
1791 1791
1792 1792
1793 1793 /*
1794 1794 * Returns 0 if the port is not busy, else returns non zero.
1795 1795 */
1796 1796 static int
1797 1797 fcip_check_port_busy(struct fcip *fptr)
1798 1798 {
1799 1799 int rval = 0, num_pkts = 0;
1800 1800
1801 1801 ASSERT(fptr != NULL);
1802 1802 ASSERT(MUTEX_HELD(&fptr->fcip_mutex));
1803 1803
1804 1804 mutex_enter(&fptr->fcip_dest_mutex);
1805 1805
1806 1806 if (fptr->fcip_flags & FCIP_PORT_BUSY ||
1807 1807 ((num_pkts = fcip_port_get_num_pkts(fptr)) > 0) ||
1808 1808 fptr->fcip_num_ipkts_pending) {
1809 1809 rval = 1;
1810 1810 FCIP_DEBUG(FCIP_DEBUG_DETACH,
1811 1811 (CE_NOTE, "!fcip_check_port_busy: port is busy "
1812 1812 "fcip_flags: 0x%x, num_pkts: 0x%x, ipkts_pending: 0x%lx!",
1813 1813 fptr->fcip_flags, num_pkts, fptr->fcip_num_ipkts_pending));
1814 1814 }
1815 1815
1816 1816 mutex_exit(&fptr->fcip_dest_mutex);
1817 1817 return (rval);
1818 1818 }
1819 1819
1820 1820 /*
1821 1821 * Helper routine to remove fcip's minor node
1822 1822 * There is one minor node per system and it should be removed if there are no
1823 1823 * other fcip instances (which has a 1:1 mapping for fp instances) present
1824 1824 */
1825 1825 static void
1826 1826 fcip_check_remove_minor_node(void)
1827 1827 {
1828 1828 ASSERT(MUTEX_HELD(&fcip_global_mutex));
1829 1829
1830 1830 /*
1831 1831 * If there are no more fcip (fp) instances, remove the
1832 1832 * minor node for fcip.
1833 1833 * Reset fcip_minor_node_created to invalidate it.
1834 1834 */
1835 1835 if (fcip_num_instances == 0 && (fcip_module_dip != NULL)) {
1836 1836 ddi_remove_minor_node(fcip_module_dip, NULL);
1837 1837 fcip_minor_node_created = 0;
1838 1838 }
1839 1839 }
1840 1840
1841 1841 /*
1842 1842 * This routine permits the suspend operation during a CPR/System
1843 1843 * power management operation. The routine basically quiesces I/Os
1844 1844 * on all active interfaces
1845 1845 */
1846 1846 static int
1847 1847 fcip_handle_suspend(fcip_port_info_t *fport, fc_detach_cmd_t cmd)
1848 1848 {
1849 1849 struct fcip *fptr = fport->fcipp_fcip;
1850 1850 timeout_id_t tid;
1851 1851 int index;
1852 1852 int tryagain = 0;
1853 1853 int count;
1854 1854 struct fcipstr *tslp;
1855 1855
1856 1856
1857 1857 ASSERT(fptr != NULL);
1858 1858 mutex_enter(&fptr->fcip_mutex);
1859 1859
1860 1860 /*
1861 1861 * Fail if we are in the middle of a callback. Don't use delay during
1862 1862 * suspend since clock intrs are not available so busy wait
1863 1863 */
1864 1864 count = 0;
1865 1865 while (count++ < 15 &&
1866 1866 ((fptr->fcip_flags & FCIP_IN_CALLBACK) ||
1867 1867 (fptr->fcip_flags & FCIP_IN_TIMEOUT))) {
1868 1868 mutex_exit(&fptr->fcip_mutex);
1869 1869 drv_usecwait(1000000);
1870 1870 mutex_enter(&fptr->fcip_mutex);
1871 1871 }
1872 1872
1873 1873 if (fptr->fcip_flags & FCIP_IN_CALLBACK ||
1874 1874 fptr->fcip_flags & FCIP_IN_TIMEOUT) {
1875 1875 mutex_exit(&fptr->fcip_mutex);
1876 1876 return (FC_FAILURE);
1877 1877 }
1878 1878
1879 1879 if (cmd == FC_CMD_POWER_DOWN) {
1880 1880 if (fptr->fcip_flags & FCIP_SUSPENDED) {
1881 1881 fptr->fcip_flags |= FCIP_POWER_DOWN;
1882 1882 mutex_exit(&fptr->fcip_mutex);
1883 1883 goto success;
1884 1884 } else {
1885 1885 fptr->fcip_flags |= FCIP_POWER_DOWN;
1886 1886 }
1887 1887 } else if (cmd == FC_CMD_SUSPEND) {
1888 1888 fptr->fcip_flags |= FCIP_SUSPENDED;
1889 1889 } else {
1890 1890 mutex_exit(&fptr->fcip_mutex);
1891 1891 return (FC_FAILURE);
1892 1892 }
1893 1893
1894 1894 mutex_exit(&fptr->fcip_mutex);
1895 1895 /*
1896 1896 * If no streams are plumbed - its the easiest case - Just
1897 1897 * bail out without having to do much
1898 1898 */
1899 1899
1900 1900 rw_enter(&fcipstruplock, RW_READER);
1901 1901 for (tslp = fcipstrup; tslp; tslp = tslp->sl_nextp) {
1902 1902 if (tslp->sl_fcip == fptr) {
1903 1903 break;
1904 1904 }
1905 1905 }
1906 1906 rw_exit(&fcipstruplock);
1907 1907
1908 1908 /*
1909 1909 * No active streams on this port
1910 1910 */
1911 1911 if (tslp == NULL) {
1912 1912 goto success;
1913 1913 }
1914 1914
1915 1915 /*
1916 1916 * Walk through each Routing table structure and check if
1917 1917 * the destination table has any outstanding commands. If yes
1918 1918 * wait for the commands to drain. Since we go through each
1919 1919 * routing table entry in succession, it may be wise to wait
1920 1920 * only a few seconds for each entry.
1921 1921 */
1922 1922 mutex_enter(&fptr->fcip_rt_mutex);
1923 1923 while (!tryagain) {
1924 1924
1925 1925 tryagain = 0;
1926 1926 for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
1927 1927 struct fcip_routing_table *frp;
1928 1928 struct fcip_dest *fdestp;
1929 1929 la_wwn_t *pwwn;
1930 1930 int hash_bucket;
1931 1931
1932 1932 frp = fptr->fcip_rtable[index];
1933 1933 while (frp) {
1934 1934 /*
1935 1935 * Mark the routing table as SUSPENDED. Even
1936 1936 * mark the broadcast entry SUSPENDED to
1937 1937 * prevent any ARP or other broadcasts. We
1938 1938 * can reset the state of the broadcast
1939 1939 * RTE when we resume.
1940 1940 */
1941 1941 frp->fcipr_state = FCIP_RT_SUSPENDED;
1942 1942 pwwn = &frp->fcipr_pwwn;
1943 1943
1944 1944 /*
1945 1945 * Get hold of destination pointer
1946 1946 */
1947 1947 mutex_enter(&fptr->fcip_dest_mutex);
1948 1948
1949 1949 hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn);
1950 1950 ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS);
1951 1951
1952 1952 fdestp = fptr->fcip_dest[hash_bucket];
1953 1953 while (fdestp != NULL) {
1954 1954 mutex_enter(&fdestp->fcipd_mutex);
1955 1955 if (fdestp->fcipd_rtable) {
1956 1956 if (fcip_wwn_compare(pwwn,
1957 1957 &fdestp->fcipd_pwwn,
1958 1958 FCIP_COMPARE_PWWN) == 0) {
1959 1959 mutex_exit(
1960 1960 &fdestp->fcipd_mutex);
1961 1961 break;
1962 1962 }
1963 1963 }
1964 1964 mutex_exit(&fdestp->fcipd_mutex);
1965 1965 fdestp = fdestp->fcipd_next;
1966 1966 }
1967 1967
1968 1968 mutex_exit(&fptr->fcip_dest_mutex);
1969 1969 if (fdestp == NULL) {
1970 1970 frp = frp->fcipr_next;
1971 1971 continue;
1972 1972 }
1973 1973
1974 1974 /*
1975 1975 * Wait for fcip_wait_cmds seconds for
1976 1976 * the commands to drain.
1977 1977 */
1978 1978 count = 0;
1979 1979 mutex_enter(&fdestp->fcipd_mutex);
1980 1980 while (fdestp->fcipd_ncmds &&
1981 1981 count < fcip_wait_cmds) {
1982 1982 mutex_exit(&fdestp->fcipd_mutex);
1983 1983 mutex_exit(&fptr->fcip_rt_mutex);
1984 1984 drv_usecwait(1000000);
1985 1985 mutex_enter(&fptr->fcip_rt_mutex);
1986 1986 mutex_enter(&fdestp->fcipd_mutex);
1987 1987 count++;
1988 1988 }
1989 1989 /*
1990 1990 * Check if we were able to drain all cmds
1991 1991 * successfully. Else continue with other
1992 1992 * ports and try during the second pass
1993 1993 */
1994 1994 if (fdestp->fcipd_ncmds) {
1995 1995 tryagain++;
1996 1996 }
1997 1997 mutex_exit(&fdestp->fcipd_mutex);
1998 1998
1999 1999 frp = frp->fcipr_next;
2000 2000 }
2001 2001 }
2002 2002 if (tryagain == 0) {
2003 2003 break;
2004 2004 }
2005 2005 }
2006 2006 mutex_exit(&fptr->fcip_rt_mutex);
2007 2007
2008 2008 if (tryagain) {
2009 2009 mutex_enter(&fptr->fcip_mutex);
2010 2010 fptr->fcip_flags &= ~(FCIP_SUSPENDED | FCIP_POWER_DOWN);
2011 2011 mutex_exit(&fptr->fcip_mutex);
2012 2012 return (FC_FAILURE);
2013 2013 }
2014 2014
2015 2015 success:
2016 2016 mutex_enter(&fptr->fcip_mutex);
2017 2017 tid = fptr->fcip_timeout_id;
2018 2018 fptr->fcip_timeout_id = NULL;
2019 2019 mutex_exit(&fptr->fcip_mutex);
2020 2020
2021 2021 (void) untimeout(tid);
2022 2022
2023 2023 return (FC_SUCCESS);
2024 2024 }
2025 2025
2026 2026 /*
2027 2027 * the getinfo(9E) entry point
2028 2028 */
2029 2029 /* ARGSUSED */
2030 2030 static int
2031 2031 fcip_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
2032 2032 {
2033 2033 int rval = DDI_FAILURE;
2034 2034
2035 2035 switch (cmd) {
2036 2036 case DDI_INFO_DEVT2DEVINFO:
2037 2037 *result = fcip_module_dip;
2038 2038 if (*result)
2039 2039 rval = DDI_SUCCESS;
2040 2040 break;
2041 2041
2042 2042 case DDI_INFO_DEVT2INSTANCE:
2043 2043 *result = (void *)0;
2044 2044 rval = DDI_SUCCESS;
2045 2045 break;
2046 2046 default:
2047 2047 break;
2048 2048 }
2049 2049
2050 2050 return (rval);
2051 2051 }
2052 2052
2053 2053 /*
2054 2054 * called from fcip_attach to initialize kstats for the link
2055 2055 */
2056 2056 /* ARGSUSED */
2057 2057 static void
2058 2058 fcip_kstat_init(struct fcip *fptr)
2059 2059 {
2060 2060 int instance;
2061 2061 char buf[16];
2062 2062 struct fcipstat *fcipstatp;
2063 2063
2064 2064 ASSERT(mutex_owned(&fptr->fcip_mutex));
2065 2065
2066 2066 instance = ddi_get_instance(fptr->fcip_dip);
2067 2067 (void) sprintf(buf, "fcip%d", instance);
2068 2068
2069 2069 #ifdef kstat
2070 2070 fptr->fcip_kstatp = kstat_create("fcip", instance, buf, "net",
2071 2071 KSTAT_TYPE_NAMED,
2072 2072 (sizeof (struct fcipstat)/ sizeof (kstat_named_t)),
2073 2073 KSTAT_FLAG_PERSISTENT);
2074 2074 #else
2075 2075 fptr->fcip_kstatp = kstat_create("fcip", instance, buf, "net",
2076 2076 KSTAT_TYPE_NAMED,
2077 2077 (sizeof (struct fcipstat)/ sizeof (kstat_named_t)), 0);
2078 2078 #endif
2079 2079 if (fptr->fcip_kstatp == NULL) {
2080 2080 FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN, "kstat created failed"));
2081 2081 return;
2082 2082 }
2083 2083
2084 2084 fcipstatp = (struct fcipstat *)fptr->fcip_kstatp->ks_data;
2085 2085 kstat_named_init(&fcipstatp->fcips_ipackets, "ipackets",
2086 2086 KSTAT_DATA_ULONG);
2087 2087 kstat_named_init(&fcipstatp->fcips_ierrors, "ierrors",
2088 2088 KSTAT_DATA_ULONG);
2089 2089 kstat_named_init(&fcipstatp->fcips_opackets, "opackets",
2090 2090 KSTAT_DATA_ULONG);
2091 2091 kstat_named_init(&fcipstatp->fcips_oerrors, "oerrors",
2092 2092 KSTAT_DATA_ULONG);
2093 2093 kstat_named_init(&fcipstatp->fcips_collisions, "collisions",
2094 2094 KSTAT_DATA_ULONG);
2095 2095 kstat_named_init(&fcipstatp->fcips_nocanput, "nocanput",
2096 2096 KSTAT_DATA_ULONG);
2097 2097 kstat_named_init(&fcipstatp->fcips_allocbfail, "allocbfail",
2098 2098 KSTAT_DATA_ULONG);
2099 2099
2100 2100 kstat_named_init(&fcipstatp->fcips_defer, "defer",
2101 2101 KSTAT_DATA_ULONG);
2102 2102 kstat_named_init(&fcipstatp->fcips_fram, "fram",
2103 2103 KSTAT_DATA_ULONG);
2104 2104 kstat_named_init(&fcipstatp->fcips_crc, "crc",
2105 2105 KSTAT_DATA_ULONG);
2106 2106 kstat_named_init(&fcipstatp->fcips_oflo, "oflo",
2107 2107 KSTAT_DATA_ULONG);
2108 2108 kstat_named_init(&fcipstatp->fcips_uflo, "uflo",
2109 2109 KSTAT_DATA_ULONG);
2110 2110 kstat_named_init(&fcipstatp->fcips_missed, "missed",
2111 2111 KSTAT_DATA_ULONG);
2112 2112 kstat_named_init(&fcipstatp->fcips_tlcol, "tlcol",
2113 2113 KSTAT_DATA_ULONG);
2114 2114 kstat_named_init(&fcipstatp->fcips_trtry, "trtry",
2115 2115 KSTAT_DATA_ULONG);
2116 2116 kstat_named_init(&fcipstatp->fcips_tnocar, "tnocar",
2117 2117 KSTAT_DATA_ULONG);
2118 2118 kstat_named_init(&fcipstatp->fcips_inits, "inits",
2119 2119 KSTAT_DATA_ULONG);
2120 2120 kstat_named_init(&fcipstatp->fcips_notbufs, "notbufs",
2121 2121 KSTAT_DATA_ULONG);
2122 2122 kstat_named_init(&fcipstatp->fcips_norbufs, "norbufs",
2123 2123 KSTAT_DATA_ULONG);
2124 2124 kstat_named_init(&fcipstatp->fcips_allocbfail, "allocbfail",
2125 2125 KSTAT_DATA_ULONG);
2126 2126
2127 2127 /*
2128 2128 * required by kstat for MIB II objects(RFC 1213)
2129 2129 */
2130 2130 kstat_named_init(&fcipstatp->fcips_rcvbytes, "fcips_rcvbytes",
2131 2131 KSTAT_DATA_ULONG); /* # octets received */
2132 2132 /* MIB - ifInOctets */
2133 2133 kstat_named_init(&fcipstatp->fcips_xmtbytes, "fcips_xmtbytes",
2134 2134 KSTAT_DATA_ULONG); /* # octets xmitted */
2135 2135 /* MIB - ifOutOctets */
2136 2136 kstat_named_init(&fcipstatp->fcips_multircv, "fcips_multircv",
2137 2137 KSTAT_DATA_ULONG); /* # multicast packets */
2138 2138 /* delivered to upper layer */
2139 2139 /* MIB - ifInNUcastPkts */
2140 2140 kstat_named_init(&fcipstatp->fcips_multixmt, "fcips_multixmt",
2141 2141 KSTAT_DATA_ULONG); /* # multicast packets */
2142 2142 /* requested to be sent */
2143 2143 /* MIB - ifOutNUcastPkts */
2144 2144 kstat_named_init(&fcipstatp->fcips_brdcstrcv, "fcips_brdcstrcv",
2145 2145 KSTAT_DATA_ULONG); /* # broadcast packets */
2146 2146 /* delivered to upper layer */
2147 2147 /* MIB - ifInNUcastPkts */
2148 2148 kstat_named_init(&fcipstatp->fcips_brdcstxmt, "fcips_brdcstxmt",
2149 2149 KSTAT_DATA_ULONG); /* # broadcast packets */
2150 2150 /* requested to be sent */
2151 2151 /* MIB - ifOutNUcastPkts */
2152 2152 kstat_named_init(&fcipstatp->fcips_norcvbuf, "fcips_norcvbuf",
2153 2153 KSTAT_DATA_ULONG); /* # rcv packets discarded */
2154 2154 /* MIB - ifInDiscards */
2155 2155 kstat_named_init(&fcipstatp->fcips_noxmtbuf, "fcips_noxmtbuf",
2156 2156 KSTAT_DATA_ULONG); /* # xmt packets discarded */
2157 2157
2158 2158 fptr->fcip_kstatp->ks_update = fcip_stat_update;
2159 2159 fptr->fcip_kstatp->ks_private = (void *) fptr;
2160 2160 kstat_install(fptr->fcip_kstatp);
2161 2161 }
2162 2162
2163 2163 /*
2164 2164 * Update the defined kstats for netstat et al to use
2165 2165 */
2166 2166 /* ARGSUSED */
2167 2167 static int
2168 2168 fcip_stat_update(kstat_t *fcip_statp, int val)
2169 2169 {
2170 2170 struct fcipstat *fcipstatp;
2171 2171 struct fcip *fptr;
2172 2172
2173 2173 fptr = (struct fcip *)fcip_statp->ks_private;
2174 2174 fcipstatp = (struct fcipstat *)fcip_statp->ks_data;
2175 2175
2176 2176 if (val == KSTAT_WRITE) {
2177 2177 fptr->fcip_ipackets = fcipstatp->fcips_ipackets.value.ul;
2178 2178 fptr->fcip_ierrors = fcipstatp->fcips_ierrors.value.ul;
2179 2179 fptr->fcip_opackets = fcipstatp->fcips_opackets.value.ul;
2180 2180 fptr->fcip_oerrors = fcipstatp->fcips_oerrors.value.ul;
2181 2181 fptr->fcip_collisions = fcipstatp->fcips_collisions.value.ul;
2182 2182 fptr->fcip_defer = fcipstatp->fcips_defer.value.ul;
2183 2183 fptr->fcip_fram = fcipstatp->fcips_fram.value.ul;
2184 2184 fptr->fcip_crc = fcipstatp->fcips_crc.value.ul;
2185 2185 fptr->fcip_oflo = fcipstatp->fcips_oflo.value.ul;
2186 2186 fptr->fcip_uflo = fcipstatp->fcips_uflo.value.ul;
2187 2187 fptr->fcip_missed = fcipstatp->fcips_missed.value.ul;
2188 2188 fptr->fcip_tlcol = fcipstatp->fcips_tlcol.value.ul;
2189 2189 fptr->fcip_trtry = fcipstatp->fcips_trtry.value.ul;
2190 2190 fptr->fcip_tnocar = fcipstatp->fcips_tnocar.value.ul;
2191 2191 fptr->fcip_inits = fcipstatp->fcips_inits.value.ul;
2192 2192 fptr->fcip_notbufs = fcipstatp->fcips_notbufs.value.ul;
2193 2193 fptr->fcip_norbufs = fcipstatp->fcips_norbufs.value.ul;
2194 2194 fptr->fcip_nocanput = fcipstatp->fcips_nocanput.value.ul;
2195 2195 fptr->fcip_allocbfail = fcipstatp->fcips_allocbfail.value.ul;
2196 2196 fptr->fcip_rcvbytes = fcipstatp->fcips_rcvbytes.value.ul;
2197 2197 fptr->fcip_xmtbytes = fcipstatp->fcips_xmtbytes.value.ul;
2198 2198 fptr->fcip_multircv = fcipstatp->fcips_multircv.value.ul;
2199 2199 fptr->fcip_multixmt = fcipstatp->fcips_multixmt.value.ul;
2200 2200 fptr->fcip_brdcstrcv = fcipstatp->fcips_brdcstrcv.value.ul;
2201 2201 fptr->fcip_norcvbuf = fcipstatp->fcips_norcvbuf.value.ul;
2202 2202 fptr->fcip_noxmtbuf = fcipstatp->fcips_noxmtbuf.value.ul;
2203 2203 fptr->fcip_allocbfail = fcipstatp->fcips_allocbfail.value.ul;
2204 2204 fptr->fcip_allocbfail = fcipstatp->fcips_allocbfail.value.ul;
2205 2205 fptr->fcip_allocbfail = fcipstatp->fcips_allocbfail.value.ul;
2206 2206 fptr->fcip_allocbfail = fcipstatp->fcips_allocbfail.value.ul;
2207 2207 fptr->fcip_allocbfail = fcipstatp->fcips_allocbfail.value.ul;
2208 2208 fptr->fcip_allocbfail = fcipstatp->fcips_allocbfail.value.ul;
2209 2209 fptr->fcip_allocbfail = fcipstatp->fcips_allocbfail.value.ul;
2210 2210 fptr->fcip_allocbfail = fcipstatp->fcips_allocbfail.value.ul;
2211 2211
2212 2212 } else {
2213 2213 fcipstatp->fcips_ipackets.value.ul = fptr->fcip_ipackets;
2214 2214 fcipstatp->fcips_ierrors.value.ul = fptr->fcip_ierrors;
2215 2215 fcipstatp->fcips_opackets.value.ul = fptr->fcip_opackets;
2216 2216 fcipstatp->fcips_oerrors.value.ul = fptr->fcip_oerrors;
2217 2217 fcipstatp->fcips_collisions.value.ul = fptr->fcip_collisions;
2218 2218 fcipstatp->fcips_nocanput.value.ul = fptr->fcip_nocanput;
2219 2219 fcipstatp->fcips_allocbfail.value.ul = fptr->fcip_allocbfail;
2220 2220 fcipstatp->fcips_defer.value.ul = fptr->fcip_defer;
2221 2221 fcipstatp->fcips_fram.value.ul = fptr->fcip_fram;
2222 2222 fcipstatp->fcips_crc.value.ul = fptr->fcip_crc;
2223 2223 fcipstatp->fcips_oflo.value.ul = fptr->fcip_oflo;
2224 2224 fcipstatp->fcips_uflo.value.ul = fptr->fcip_uflo;
2225 2225 fcipstatp->fcips_missed.value.ul = fptr->fcip_missed;
2226 2226 fcipstatp->fcips_tlcol.value.ul = fptr->fcip_tlcol;
2227 2227 fcipstatp->fcips_trtry.value.ul = fptr->fcip_trtry;
2228 2228 fcipstatp->fcips_tnocar.value.ul = fptr->fcip_tnocar;
2229 2229 fcipstatp->fcips_inits.value.ul = fptr->fcip_inits;
2230 2230 fcipstatp->fcips_norbufs.value.ul = fptr->fcip_norbufs;
2231 2231 fcipstatp->fcips_notbufs.value.ul = fptr->fcip_notbufs;
2232 2232 fcipstatp->fcips_rcvbytes.value.ul = fptr->fcip_rcvbytes;
2233 2233 fcipstatp->fcips_xmtbytes.value.ul = fptr->fcip_xmtbytes;
2234 2234 fcipstatp->fcips_multircv.value.ul = fptr->fcip_multircv;
2235 2235 fcipstatp->fcips_multixmt.value.ul = fptr->fcip_multixmt;
2236 2236 fcipstatp->fcips_brdcstrcv.value.ul = fptr->fcip_brdcstrcv;
2237 2237 fcipstatp->fcips_brdcstxmt.value.ul = fptr->fcip_brdcstxmt;
2238 2238 fcipstatp->fcips_norcvbuf.value.ul = fptr->fcip_norcvbuf;
2239 2239 fcipstatp->fcips_noxmtbuf.value.ul = fptr->fcip_noxmtbuf;
2240 2240
2241 2241 }
2242 2242 return (0);
2243 2243 }
2244 2244
2245 2245
2246 2246 /*
2247 2247 * fcip_statec_cb: handles all required state change callback notifications
2248 2248 * it receives from the transport
2249 2249 */
2250 2250 /* ARGSUSED */
2251 2251 static void
2252 2252 fcip_statec_cb(opaque_t ulp_handle, opaque_t phandle,
2253 2253 uint32_t port_state, uint32_t port_top, fc_portmap_t changelist[],
2254 2254 uint32_t listlen, uint32_t sid)
2255 2255 {
2256 2256 fcip_port_info_t *fport;
2257 2257 struct fcip *fptr;
2258 2258 struct fcipstr *slp;
2259 2259 queue_t *wrq;
2260 2260 int instance;
2261 2261 int index;
2262 2262 struct fcip_routing_table *frtp;
2263 2263
2264 2264 fport = fcip_get_port(phandle);
2265 2265
2266 2266 if (fport == NULL) {
2267 2267 return;
2268 2268 }
2269 2269
2270 2270 fptr = fport->fcipp_fcip;
2271 2271 ASSERT(fptr != NULL);
2272 2272
2273 2273 if (fptr == NULL) {
2274 2274 return;
2275 2275 }
2276 2276
2277 2277 instance = ddi_get_instance(fport->fcipp_dip);
2278 2278
2279 2279 FCIP_TNF_PROBE_4((fcip_statec_cb, "fcip io", /* CSTYLED */,
2280 2280 tnf_string, msg, "state change callback",
2281 2281 tnf_uint, instance, instance,
2282 2282 tnf_uint, S_ID, sid,
2283 2283 tnf_int, count, listlen));
2284 2284 FCIP_DEBUG(FCIP_DEBUG_ELS,
2285 2285 (CE_NOTE, "fcip%d, state change callback: state:0x%x, "
2286 2286 "S_ID:0x%x, count:0x%x", instance, port_state, sid, listlen));
2287 2287
2288 2288 mutex_enter(&fptr->fcip_mutex);
2289 2289
2290 2290 if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) ||
2291 2291 (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) {
2292 2292 mutex_exit(&fptr->fcip_mutex);
2293 2293 return;
2294 2294 }
2295 2295
2296 2296 /*
2297 2297 * set fcip flags to indicate we are in the middle of a
2298 2298 * state change callback so we can wait till the statechange
2299 2299 * is handled before succeeding/failing the SUSPEND/POWER DOWN.
2300 2300 */
2301 2301 fptr->fcip_flags |= FCIP_IN_SC_CB;
2302 2302
2303 2303 fport->fcipp_pstate = port_state;
2304 2304
2305 2305 /*
2306 2306 * Check if topology changed. If Yes - Modify the broadcast
2307 2307 * RTE entries to understand the new broadcast D_IDs
2308 2308 */
2309 2309 if (fport->fcipp_topology != port_top &&
2310 2310 (port_top != FC_TOP_UNKNOWN)) {
2311 2311 /* REMOVE later */
2312 2312 FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE,
2313 2313 "topology changed: Old topology: 0x%x New topology 0x%x",
2314 2314 fport->fcipp_topology, port_top));
2315 2315 /*
2316 2316 * If topology changed - attempt a rediscovery of
2317 2317 * devices. Helps specially in Fabric/Public loops
2318 2318 * and if on_demand_node_creation is disabled
2319 2319 */
2320 2320 fport->fcipp_topology = port_top;
2321 2321 fcip_handle_topology(fptr);
2322 2322 }
2323 2323
2324 2324 mutex_exit(&fptr->fcip_mutex);
2325 2325
2326 2326 switch (FC_PORT_STATE_MASK(port_state)) {
2327 2327 case FC_STATE_ONLINE:
2328 2328 /* FALLTHROUGH */
2329 2329 case FC_STATE_LIP:
2330 2330 /* FALLTHROUGH */
2331 2331 case FC_STATE_LIP_LBIT_SET:
2332 2332
2333 2333 /*
2334 2334 * nothing to do here actually other than if we
2335 2335 * were actually logged onto a port in the devlist
2336 2336 * (which indicates active communication between
2337 2337 * the host port and the port in the changelist).
2338 2338 * If however we are in a private loop or point to
2339 2339 * point mode, we need to check for any IP capable
2340 2340 * ports and update our routing table.
2341 2341 */
2342 2342 switch (port_top) {
2343 2343 case FC_TOP_FABRIC:
2344 2344 /*
2345 2345 * This indicates a fabric port with a NameServer.
2346 2346 * Check the devlist to see if we are in active
2347 2347 * communication with a port on the devlist.
2348 2348 */
2349 2349 FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE,
2350 2350 "Statec_cb: fabric topology"));
2351 2351 fcip_rt_update(fptr, changelist, listlen);
2352 2352 break;
2353 2353 case FC_TOP_NO_NS:
2354 2354 /*
2355 2355 * No nameserver - so treat it like a Private loop
2356 2356 * or point to point topology and get a map of
2357 2357 * devices on the link and get IP capable ports to
2358 2358 * to update the routing table.
2359 2359 */
2360 2360 FCIP_DEBUG(FCIP_DEBUG_ELS,
2361 2361 (CE_NOTE, "Statec_cb: NO_NS topology"));
2362 2362 /* FALLTHROUGH */
2363 2363 case FC_TOP_PRIVATE_LOOP:
2364 2364 FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE,
2365 2365 "Statec_cb: Pvt_Loop topology"));
2366 2366 /* FALLTHROUGH */
2367 2367 case FC_TOP_PT_PT:
2368 2368 /*
2369 2369 * call get_port_map() and update routing table
2370 2370 */
2371 2371 fcip_rt_update(fptr, changelist, listlen);
2372 2372 break;
2373 2373 default:
2374 2374 FCIP_DEBUG(FCIP_DEBUG_ELS,
2375 2375 (CE_NOTE, "Statec_cb: Unknown topology"));
2376 2376 }
2377 2377
2378 2378 /*
2379 2379 * We should now enable the Queues and permit I/Os
2380 2380 * to flow through downstream. The update of routing
2381 2381 * table should have flushed out any port entries that
2382 2382 * don't exist or are not available after the state change
2383 2383 */
2384 2384 mutex_enter(&fptr->fcip_mutex);
2385 2385 fptr->fcip_port_state = FCIP_PORT_ONLINE;
2386 2386 if (fptr->fcip_flags & FCIP_LINK_DOWN) {
2387 2387 fptr->fcip_flags &= ~FCIP_LINK_DOWN;
2388 2388 }
2389 2389 mutex_exit(&fptr->fcip_mutex);
2390 2390
2391 2391 /*
2392 2392 * Enable write queues
2393 2393 */
2394 2394 rw_enter(&fcipstruplock, RW_READER);
2395 2395 for (slp = fcipstrup; slp != NULL; slp = slp->sl_nextp) {
2396 2396 if (slp && slp->sl_fcip == fptr) {
2397 2397 wrq = WR(slp->sl_rq);
2398 2398 if (wrq->q_flag & QFULL) {
2399 2399 qenable(wrq);
2400 2400 }
2401 2401 }
2402 2402 }
2403 2403 rw_exit(&fcipstruplock);
2404 2404 break;
2405 2405 case FC_STATE_OFFLINE:
2406 2406 /*
2407 2407 * mark the port_state OFFLINE and wait for it to
2408 2408 * become online. Any new messages in this state will
2409 2409 * simply be queued back up. If the port does not
2410 2410 * come online in a short while, we can begin failing
2411 2411 * messages and flush the routing table
2412 2412 */
2413 2413 mutex_enter(&fptr->fcip_mutex);
2414 2414 fptr->fcip_mark_offline = fptr->fcip_timeout_ticks +
2415 2415 FCIP_OFFLINE_TIMEOUT;
2416 2416 fptr->fcip_port_state = FCIP_PORT_OFFLINE;
2417 2417 mutex_exit(&fptr->fcip_mutex);
2418 2418
2419 2419 /*
2420 2420 * Mark all Routing table entries as invalid to prevent
2421 2421 * any commands from trickling through to ports that
2422 2422 * have disappeared from under us
2423 2423 */
2424 2424 mutex_enter(&fptr->fcip_rt_mutex);
2425 2425 for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
2426 2426 frtp = fptr->fcip_rtable[index];
2427 2427 while (frtp) {
2428 2428 frtp->fcipr_state = PORT_DEVICE_INVALID;
2429 2429 frtp = frtp->fcipr_next;
2430 2430 }
2431 2431 }
2432 2432 mutex_exit(&fptr->fcip_rt_mutex);
2433 2433
2434 2434 break;
2435 2435
2436 2436 case FC_STATE_RESET_REQUESTED:
2437 2437 /*
2438 2438 * Release all Unsolicited buffers back to transport/FCA.
2439 2439 * This also means the port state is marked offline - so
2440 2440 * we may have to do what OFFLINE state requires us to do.
2441 2441 * Care must be taken to wait for any active unsolicited
2442 2442 * buffer with the other Streams modules - so wait for
2443 2443 * a freeb if the unsolicited buffer is passed back all
2444 2444 * the way upstream.
2445 2445 */
2446 2446 mutex_enter(&fptr->fcip_mutex);
2447 2447
2448 2448 #ifdef FCIP_ESBALLOC
2449 2449 while (fptr->fcip_ub_upstream) {
2450 2450 cv_wait(&fptr->fcip_ub_cv, &fptr->fcip_mutex);
2451 2451 }
2452 2452 #endif /* FCIP_ESBALLOC */
2453 2453
2454 2454 fptr->fcip_mark_offline = fptr->fcip_timeout_ticks +
2455 2455 FCIP_OFFLINE_TIMEOUT;
2456 2456 fptr->fcip_port_state = FCIP_PORT_OFFLINE;
2457 2457 mutex_exit(&fptr->fcip_mutex);
2458 2458 break;
2459 2459
2460 2460 case FC_STATE_DEVICE_CHANGE:
2461 2461 if (listlen) {
2462 2462 fcip_rt_update(fptr, changelist, listlen);
2463 2463 }
2464 2464 break;
2465 2465 case FC_STATE_RESET:
2466 2466 /*
2467 2467 * Not much to do I guess - wait for port to become
2468 2468 * ONLINE. If the port doesn't become online in a short
2469 2469 * while, the upper layers abort any request themselves.
2470 2470 * We can just putback the messages in the streams queues
2471 2471 * if the link is offline
2472 2472 */
2473 2473 break;
2474 2474 }
2475 2475 mutex_enter(&fptr->fcip_mutex);
2476 2476 fptr->fcip_flags &= ~(FCIP_IN_SC_CB);
2477 2477 mutex_exit(&fptr->fcip_mutex);
2478 2478 }
2479 2479
2480 2480 /*
2481 2481 * Given a port handle, return the fcip_port_info structure corresponding
2482 2482 * to that port handle. The transport allocates and communicates with
2483 2483 * ULPs using port handles
2484 2484 */
2485 2485 static fcip_port_info_t *
2486 2486 fcip_get_port(opaque_t phandle)
2487 2487 {
2488 2488 fcip_port_info_t *fport;
2489 2489
2490 2490 ASSERT(phandle != NULL);
2491 2491
2492 2492 mutex_enter(&fcip_global_mutex);
2493 2493 fport = fcip_port_head;
2494 2494
2495 2495 while (fport != NULL) {
2496 2496 if (fport->fcipp_handle == phandle) {
2497 2497 /* found */
2498 2498 break;
2499 2499 }
2500 2500 fport = fport->fcipp_next;
2501 2501 }
2502 2502
2503 2503 mutex_exit(&fcip_global_mutex);
2504 2504
2505 2505 return (fport);
2506 2506 }
2507 2507
2508 2508 /*
2509 2509 * Handle inbound ELS requests received by the transport. We are only
2510 2510 * intereseted in FARP/InARP mostly.
2511 2511 */
2512 2512 /* ARGSUSED */
2513 2513 static int
2514 2514 fcip_els_cb(opaque_t ulp_handle, opaque_t phandle,
2515 2515 fc_unsol_buf_t *buf, uint32_t claimed)
2516 2516 {
2517 2517 fcip_port_info_t *fport;
2518 2518 struct fcip *fptr;
2519 2519 int instance;
2520 2520 uchar_t r_ctl;
2521 2521 uchar_t ls_code;
2522 2522 la_els_farp_t farp_cmd;
2523 2523 la_els_farp_t *fcmd;
2524 2524 int rval = FC_UNCLAIMED;
2525 2525
2526 2526 fport = fcip_get_port(phandle);
2527 2527 if (fport == NULL) {
2528 2528 return (FC_UNCLAIMED);
2529 2529 }
2530 2530
2531 2531 fptr = fport->fcipp_fcip;
2532 2532 ASSERT(fptr != NULL);
2533 2533 if (fptr == NULL) {
2534 2534 return (FC_UNCLAIMED);
2535 2535 }
2536 2536
2537 2537 instance = ddi_get_instance(fport->fcipp_dip);
2538 2538
2539 2539 mutex_enter(&fptr->fcip_mutex);
2540 2540 if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) ||
2541 2541 (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) {
2542 2542 mutex_exit(&fptr->fcip_mutex);
2543 2543 return (FC_UNCLAIMED);
2544 2544 }
2545 2545
2546 2546 /*
2547 2547 * set fcip flags to indicate we are in the middle of a
2548 2548 * ELS callback so we can wait till the statechange
2549 2549 * is handled before succeeding/failing the SUSPEND/POWER DOWN.
2550 2550 */
2551 2551 fptr->fcip_flags |= FCIP_IN_ELS_CB;
2552 2552 mutex_exit(&fptr->fcip_mutex);
2553 2553
2554 2554 FCIP_TNF_PROBE_2((fcip_els_cb, "fcip io", /* CSTYLED */,
2555 2555 tnf_string, msg, "ELS callback",
2556 2556 tnf_uint, instance, instance));
2557 2557
2558 2558 FCIP_DEBUG(FCIP_DEBUG_ELS,
2559 2559 (CE_NOTE, "fcip%d, ELS callback , ", instance));
2560 2560
2561 2561 r_ctl = buf->ub_frame.r_ctl;
2562 2562 switch (r_ctl & R_CTL_ROUTING) {
2563 2563 case R_CTL_EXTENDED_SVC:
2564 2564 if (r_ctl == R_CTL_ELS_REQ) {
2565 2565 ls_code = buf->ub_buffer[0];
2566 2566 if (ls_code == LA_ELS_FARP_REQ) {
2567 2567 /*
2568 2568 * Inbound FARP broadcast request
2569 2569 */
2570 2570 if (buf->ub_bufsize != sizeof (la_els_farp_t)) {
2571 2571 FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
2572 2572 "Invalid FARP req buffer size "
2573 2573 "expected 0x%lx, got 0x%x",
2574 2574 (long)(sizeof (la_els_farp_t)),
2575 2575 buf->ub_bufsize));
2576 2576 rval = FC_UNCLAIMED;
2577 2577 goto els_cb_done;
2578 2578 }
2579 2579 fcmd = (la_els_farp_t *)buf;
2580 2580 if (fcip_wwn_compare(&fcmd->resp_nwwn,
2581 2581 &fport->fcipp_nwwn,
2582 2582 FCIP_COMPARE_NWWN) != 0) {
2583 2583 rval = FC_UNCLAIMED;
2584 2584 goto els_cb_done;
2585 2585 }
2586 2586 /*
2587 2587 * copy the FARP request and release the
2588 2588 * unsolicited buffer
2589 2589 */
2590 2590 fcmd = &farp_cmd;
2591 2591 bcopy((void *)buf, (void *)fcmd,
2592 2592 sizeof (la_els_farp_t));
2593 2593 (void) fc_ulp_ubrelease(fport->fcipp_handle, 1,
2594 2594 &buf->ub_token);
2595 2595
2596 2596 if (fcip_farp_supported &&
2597 2597 fcip_handle_farp_request(fptr, fcmd) ==
2598 2598 FC_SUCCESS) {
2599 2599 /*
2600 2600 * We successfully sent out a FARP
2601 2601 * reply to the requesting port
2602 2602 */
2603 2603 rval = FC_SUCCESS;
2604 2604 goto els_cb_done;
2605 2605 } else {
2606 2606 rval = FC_UNCLAIMED;
2607 2607 goto els_cb_done;
2608 2608 }
2609 2609 }
2610 2610 } else if (r_ctl == R_CTL_ELS_RSP) {
2611 2611 ls_code = buf->ub_buffer[0];
2612 2612 if (ls_code == LA_ELS_FARP_REPLY) {
2613 2613 /*
2614 2614 * We received a REPLY to our FARP request
2615 2615 */
2616 2616 if (buf->ub_bufsize != sizeof (la_els_farp_t)) {
2617 2617 FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
2618 2618 "Invalid FARP req buffer size "
2619 2619 "expected 0x%lx, got 0x%x",
2620 2620 (long)(sizeof (la_els_farp_t)),
2621 2621 buf->ub_bufsize));
2622 2622 rval = FC_UNCLAIMED;
2623 2623 goto els_cb_done;
2624 2624 }
2625 2625 fcmd = &farp_cmd;
2626 2626 bcopy((void *)buf, (void *)fcmd,
2627 2627 sizeof (la_els_farp_t));
2628 2628 (void) fc_ulp_ubrelease(fport->fcipp_handle, 1,
2629 2629 &buf->ub_token);
2630 2630 if (fcip_farp_supported &&
2631 2631 fcip_handle_farp_response(fptr, fcmd) ==
2632 2632 FC_SUCCESS) {
2633 2633 FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE,
2634 2634 "Successfully recevied a FARP "
2635 2635 "response"));
2636 2636 mutex_enter(&fptr->fcip_mutex);
2637 2637 fptr->fcip_farp_rsp_flag = 1;
2638 2638 cv_signal(&fptr->fcip_farp_cv);
2639 2639 mutex_exit(&fptr->fcip_mutex);
2640 2640 rval = FC_SUCCESS;
2641 2641 goto els_cb_done;
2642 2642 } else {
2643 2643 FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
2644 2644 "Unable to handle a FARP response "
2645 2645 "receive"));
2646 2646 rval = FC_UNCLAIMED;
2647 2647 goto els_cb_done;
2648 2648 }
2649 2649 }
2650 2650 }
2651 2651 break;
2652 2652 default:
2653 2653 break;
2654 2654 }
2655 2655 els_cb_done:
2656 2656 mutex_enter(&fptr->fcip_mutex);
2657 2657 fptr->fcip_flags &= ~(FCIP_IN_ELS_CB);
2658 2658 mutex_exit(&fptr->fcip_mutex);
2659 2659 return (rval);
2660 2660 }
2661 2661
2662 2662
2663 2663 /*
2664 2664 * Handle inbound FARP requests
2665 2665 */
2666 2666 static int
2667 2667 fcip_handle_farp_request(struct fcip *fptr, la_els_farp_t *fcmd)
2668 2668 {
2669 2669 fcip_pkt_t *fcip_pkt;
2670 2670 fc_packet_t *fc_pkt;
2671 2671 fcip_port_info_t *fport = fptr->fcip_port_info;
2672 2672 int rval = FC_FAILURE;
2673 2673 opaque_t fca_dev;
2674 2674 fc_portmap_t map;
2675 2675 struct fcip_routing_table *frp;
2676 2676 struct fcip_dest *fdestp;
2677 2677
2678 2678 /*
2679 2679 * Add an entry for the remote port into our routing and destination
2680 2680 * tables.
2681 2681 */
2682 2682 map.map_did = fcmd->req_id;
2683 2683 map.map_hard_addr.hard_addr = fcmd->req_id.port_id;
2684 2684 map.map_state = PORT_DEVICE_VALID;
2685 2685 map.map_type = PORT_DEVICE_NEW;
2686 2686 map.map_flags = 0;
2687 2687 map.map_pd = NULL;
2688 2688 bcopy((void *)&fcmd->req_pwwn, (void *)&map.map_pwwn,
2689 2689 sizeof (la_wwn_t));
2690 2690 bcopy((void *)&fcmd->req_nwwn, (void *)&map.map_nwwn,
2691 2691 sizeof (la_wwn_t));
2692 2692 fcip_rt_update(fptr, &map, 1);
2693 2693 mutex_enter(&fptr->fcip_rt_mutex);
2694 2694 frp = fcip_lookup_rtable(fptr, &fcmd->req_pwwn, FCIP_COMPARE_NWWN);
2695 2695 mutex_exit(&fptr->fcip_rt_mutex);
2696 2696
2697 2697 fdestp = fcip_add_dest(fptr, frp);
2698 2698
2699 2699 fcip_pkt = fcip_ipkt_alloc(fptr, sizeof (la_els_farp_t),
2700 2700 sizeof (la_els_farp_t), NULL, KM_SLEEP);
2701 2701 if (fcip_pkt == NULL) {
2702 2702 rval = FC_FAILURE;
2703 2703 goto farp_done;
2704 2704 }
2705 2705 /*
2706 2706 * Fill in our port's PWWN and NWWN
2707 2707 */
2708 2708 fcmd->resp_pwwn = fport->fcipp_pwwn;
2709 2709 fcmd->resp_nwwn = fport->fcipp_nwwn;
2710 2710
2711 2711 fcip_init_unicast_pkt(fcip_pkt, fport->fcipp_sid,
2712 2712 fcmd->req_id, NULL);
2713 2713
2714 2714 fca_dev =
2715 2715 fc_ulp_get_fca_device(fport->fcipp_handle, fcmd->req_id);
2716 2716 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
2717 2717 fc_pkt->pkt_cmd_fhdr.r_ctl = R_CTL_ELS_RSP;
2718 2718 fc_pkt->pkt_fca_device = fca_dev;
2719 2719 fcip_pkt->fcip_pkt_dest = fdestp;
2720 2720
2721 2721 /*
2722 2722 * Attempt a PLOGI again
2723 2723 */
2724 2724 if (fcmd->resp_flags & FARP_INIT_P_LOGI) {
2725 2725 if (fcip_do_plogi(fptr, frp) != FC_SUCCESS) {
2726 2726 /*
2727 2727 * Login to the remote port failed. There is no
2728 2728 * point continuing with the FARP request further
2729 2729 * so bail out here.
2730 2730 */
2731 2731 frp->fcipr_state = PORT_DEVICE_INVALID;
2732 2732 rval = FC_FAILURE;
2733 2733 goto farp_done;
2734 2734 }
2735 2735 }
2736 2736
2737 2737 FCIP_CP_OUT(fcmd, fc_pkt->pkt_cmd, fc_pkt->pkt_cmd_acc,
2738 2738 sizeof (la_els_farp_t));
2739 2739
2740 2740 rval = fc_ulp_issue_els(fport->fcipp_handle, fc_pkt);
2741 2741 if (rval != FC_SUCCESS) {
2742 2742 FCIP_TNF_PROBE_2((fcip_handle_farp_request, "fcip io",
2743 2743 /* CSTYLED */, tnf_string, msg,
2744 2744 "fcip_transport of farp reply failed",
2745 2745 tnf_uint, rval, rval));
2746 2746 FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
2747 2747 "fcip_transport of farp reply failed 0x%x", rval));
2748 2748 }
2749 2749
2750 2750 farp_done:
2751 2751 return (rval);
2752 2752 }
2753 2753
2754 2754
2755 2755 /*
2756 2756 * Handle FARP responses to our FARP requests. When we receive a FARP
2757 2757 * reply, we need to add the entry for the Port that replied into our
2758 2758 * routing and destination hash tables. It is possible that the remote
2759 2759 * port did not login into us (FARP responses can be received without
2760 2760 * a PLOGI)
2761 2761 */
2762 2762 static int
2763 2763 fcip_handle_farp_response(struct fcip *fptr, la_els_farp_t *fcmd)
2764 2764 {
2765 2765 int rval = FC_FAILURE;
2766 2766 fc_portmap_t map;
2767 2767 struct fcip_routing_table *frp;
2768 2768 struct fcip_dest *fdestp;
2769 2769
2770 2770 /*
2771 2771 * Add an entry for the remote port into our routing and destination
2772 2772 * tables.
2773 2773 */
2774 2774 map.map_did = fcmd->dest_id;
2775 2775 map.map_hard_addr.hard_addr = fcmd->dest_id.port_id;
2776 2776 map.map_state = PORT_DEVICE_VALID;
2777 2777 map.map_type = PORT_DEVICE_NEW;
2778 2778 map.map_flags = 0;
2779 2779 map.map_pd = NULL;
2780 2780 bcopy((void *)&fcmd->resp_pwwn, (void *)&map.map_pwwn,
2781 2781 sizeof (la_wwn_t));
2782 2782 bcopy((void *)&fcmd->resp_nwwn, (void *)&map.map_nwwn,
2783 2783 sizeof (la_wwn_t));
2784 2784 fcip_rt_update(fptr, &map, 1);
2785 2785 mutex_enter(&fptr->fcip_rt_mutex);
2786 2786 frp = fcip_lookup_rtable(fptr, &fcmd->resp_pwwn, FCIP_COMPARE_NWWN);
2787 2787 mutex_exit(&fptr->fcip_rt_mutex);
2788 2788
2789 2789 fdestp = fcip_add_dest(fptr, frp);
2790 2790
2791 2791 if (fdestp != NULL) {
2792 2792 rval = FC_SUCCESS;
2793 2793 }
2794 2794 return (rval);
2795 2795 }
2796 2796
2797 2797
2798 2798 #define FCIP_HDRS_LENGTH \
2799 2799 sizeof (fcph_network_hdr_t)+sizeof (llc_snap_hdr_t)+sizeof (ipha_t)
2800 2800
2801 2801 /*
2802 2802 * fcip_data_cb is the heart of most IP operations. This routine is called
2803 2803 * by the transport when any unsolicited IP data arrives at a port (which
2804 2804 * is almost all IP data). This routine then strips off the Network header
2805 2805 * from the payload (after authenticating the received payload ofcourse),
2806 2806 * creates a message blk and sends the data upstream. You will see ugly
2807 2807 * #defines because of problems with using esballoc() as opposed to
2808 2808 * allocb to prevent an extra copy of data. We should probably move to
2809 2809 * esballoc entirely when the MTU eventually will be larger than 1500 bytes
2810 2810 * since copies will get more expensive then. At 1500 byte MTUs, there is
2811 2811 * no noticable difference between using allocb and esballoc. The other
2812 2812 * caveat is that the qlc firmware still cannot tell us accurately the
2813 2813 * no. of valid bytes in the unsol buffer it DMA'ed so we have to resort
2814 2814 * to looking into the IP header and hoping that the no. of bytes speficified
2815 2815 * in the header was actually received.
2816 2816 */
2817 2817 /* ARGSUSED */
2818 2818 static int
2819 2819 fcip_data_cb(opaque_t ulp_handle, opaque_t phandle,
2820 2820 fc_unsol_buf_t *buf, uint32_t claimed)
2821 2821 {
2822 2822 fcip_port_info_t *fport;
2823 2823 struct fcip *fptr;
2824 2824 fcph_network_hdr_t *nhdr;
2825 2825 llc_snap_hdr_t *snaphdr;
2826 2826 mblk_t *bp;
2827 2827 uint32_t len;
2828 2828 uint32_t hdrlen;
2829 2829 ushort_t type;
2830 2830 ipha_t *iphdr;
2831 2831 int rval;
2832 2832
2833 2833 #ifdef FCIP_ESBALLOC
2834 2834 frtn_t *free_ubuf;
2835 2835 struct fcip_esballoc_arg *fesb_argp;
2836 2836 #endif /* FCIP_ESBALLOC */
2837 2837
2838 2838 fport = fcip_get_port(phandle);
2839 2839 if (fport == NULL) {
2840 2840 return (FC_UNCLAIMED);
2841 2841 }
2842 2842
2843 2843 fptr = fport->fcipp_fcip;
2844 2844 ASSERT(fptr != NULL);
2845 2845
2846 2846 if (fptr == NULL) {
2847 2847 return (FC_UNCLAIMED);
2848 2848 }
2849 2849
2850 2850 mutex_enter(&fptr->fcip_mutex);
2851 2851 if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) ||
2852 2852 (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) {
2853 2853 mutex_exit(&fptr->fcip_mutex);
2854 2854 rval = FC_UNCLAIMED;
2855 2855 goto data_cb_done;
2856 2856 }
2857 2857
2858 2858 /*
2859 2859 * set fcip flags to indicate we are in the middle of a
2860 2860 * data callback so we can wait till the statechange
2861 2861 * is handled before succeeding/failing the SUSPEND/POWER DOWN.
2862 2862 */
2863 2863 fptr->fcip_flags |= FCIP_IN_DATA_CB;
2864 2864 mutex_exit(&fptr->fcip_mutex);
2865 2865
2866 2866 FCIP_TNF_PROBE_2((fcip_data_cb, "fcip io", /* CSTYLED */,
2867 2867 tnf_string, msg, "data callback",
2868 2868 tnf_int, instance, ddi_get_instance(fport->fcipp_dip)));
2869 2869 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2870 2870 (CE_NOTE, "fcip%d, data callback",
2871 2871 ddi_get_instance(fport->fcipp_dip)));
2872 2872
2873 2873 /*
2874 2874 * get to the network and snap headers in the payload
2875 2875 */
2876 2876 nhdr = (fcph_network_hdr_t *)buf->ub_buffer;
2877 2877 snaphdr = (llc_snap_hdr_t *)(buf->ub_buffer +
2878 2878 sizeof (fcph_network_hdr_t));
2879 2879
2880 2880 hdrlen = sizeof (fcph_network_hdr_t) + sizeof (llc_snap_hdr_t);
2881 2881
2882 2882 /*
2883 2883 * get the IP header to obtain the no. of bytes we need to read
2884 2884 * off from the unsol buffer. This obviously is because not all
2885 2885 * data fills up the unsol buffer completely and the firmware
2886 2886 * doesn't tell us how many valid bytes are in there as well
2887 2887 */
2888 2888 iphdr = (ipha_t *)(buf->ub_buffer + hdrlen);
2889 2889 snaphdr->pid = BE_16(snaphdr->pid);
2890 2890 type = snaphdr->pid;
2891 2891
2892 2892 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2893 2893 (CE_CONT, "SNAPHDR: dsap %x, ssap %x, ctrl %x\n",
2894 2894 snaphdr->dsap, snaphdr->ssap, snaphdr->ctrl));
2895 2895
2896 2896 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2897 2897 (CE_CONT, "oui[0] 0x%x oui[1] 0x%x oui[2] 0x%x pid 0x%x\n",
2898 2898 snaphdr->oui[0], snaphdr->oui[1], snaphdr->oui[2], snaphdr->pid));
2899 2899
2900 2900 /* Authneticate, Authenticate */
2901 2901 if (type == ETHERTYPE_IP) {
2902 2902 len = hdrlen + BE_16(iphdr->ipha_length);
2903 2903 } else if (type == ETHERTYPE_ARP) {
2904 2904 len = hdrlen + 28;
2905 2905 } else {
2906 2906 len = buf->ub_bufsize;
2907 2907 }
2908 2908
2909 2909 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2910 2910 (CE_CONT, "effective packet length is %d bytes.\n", len));
2911 2911
2912 2912 if (len < hdrlen || len > FCIP_UB_SIZE) {
2913 2913 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2914 2914 (CE_NOTE, "Incorrect buffer size %d bytes", len));
2915 2915 rval = FC_UNCLAIMED;
2916 2916 goto data_cb_done;
2917 2917 }
2918 2918
2919 2919 if (buf->ub_frame.type != FC_TYPE_IS8802_SNAP) {
2920 2920 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_NOTE, "Not IP/ARP data"));
2921 2921 rval = FC_UNCLAIMED;
2922 2922 goto data_cb_done;
2923 2923 }
2924 2924
2925 2925 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_NOTE, "checking wwn"));
2926 2926
2927 2927 if ((fcip_wwn_compare(&nhdr->net_dest_addr, &fport->fcipp_pwwn,
2928 2928 FCIP_COMPARE_NWWN) != 0) &&
2929 2929 (!IS_BROADCAST_ADDR(&nhdr->net_dest_addr))) {
2930 2930 rval = FC_UNCLAIMED;
2931 2931 goto data_cb_done;
2932 2932 } else if (fcip_cache_on_arp_broadcast &&
2933 2933 IS_BROADCAST_ADDR(&nhdr->net_dest_addr)) {
2934 2934 fcip_cache_arp_broadcast(fptr, buf);
2935 2935 }
2936 2936
2937 2937 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_NOTE, "Allocate streams block"));
2938 2938
2939 2939 /*
2940 2940 * Using esballoc instead of allocb should be faster, atleast at
2941 2941 * larger MTUs than 1500 bytes. Someday we'll get there :)
2942 2942 */
2943 2943 #if defined(FCIP_ESBALLOC)
2944 2944 /*
2945 2945 * allocate memory for the frtn function arg. The Function
2946 2946 * (fcip_ubfree) arg is a struct fcip_esballoc_arg type
2947 2947 * which contains pointers to the unsol buffer and the
2948 2948 * opaque port handle for releasing the unsol buffer back to
2949 2949 * the FCA for reuse
2950 2950 */
2951 2951 fesb_argp = (struct fcip_esballoc_arg *)
2952 2952 kmem_zalloc(sizeof (struct fcip_esballoc_arg), KM_NOSLEEP);
2953 2953
2954 2954 if (fesb_argp == NULL) {
2955 2955 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2956 2956 (CE_WARN, "esballoc of mblk failed in data_cb"));
2957 2957 rval = FC_UNCLAIMED;
2958 2958 goto data_cb_done;
2959 2959 }
2960 2960 /*
2961 2961 * Check with KM_NOSLEEP
2962 2962 */
2963 2963 free_ubuf = (frtn_t *)kmem_zalloc(sizeof (frtn_t), KM_NOSLEEP);
2964 2964 if (free_ubuf == NULL) {
2965 2965 kmem_free(fesb_argp, sizeof (struct fcip_esballoc_arg));
2966 2966 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2967 2967 (CE_WARN, "esballoc of mblk failed in data_cb"));
2968 2968 rval = FC_UNCLAIMED;
2969 2969 goto data_cb_done;
2970 2970 }
2971 2971
2972 2972 fesb_argp->frtnp = free_ubuf;
2973 2973 fesb_argp->buf = buf;
2974 2974 fesb_argp->phandle = phandle;
2975 2975 free_ubuf->free_func = fcip_ubfree;
2976 2976 free_ubuf->free_arg = (char *)fesb_argp;
2977 2977 if ((bp = (mblk_t *)esballoc((unsigned char *)buf->ub_buffer,
2978 2978 len, BPRI_MED, free_ubuf)) == NULL) {
2979 2979 kmem_free(fesb_argp, sizeof (struct fcip_esballoc_arg));
2980 2980 kmem_free(free_ubuf, sizeof (frtn_t));
2981 2981 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2982 2982 (CE_WARN, "esballoc of mblk failed in data_cb"));
2983 2983 rval = FC_UNCLAIMED;
2984 2984 goto data_cb_done;
2985 2985 }
2986 2986 #elif !defined(FCIP_ESBALLOC)
2987 2987 /*
2988 2988 * allocate streams mblk and copy the contents of the
2989 2989 * unsolicited buffer into this newly alloc'ed mblk
2990 2990 */
2991 2991 if ((bp = (mblk_t *)fcip_allocb((size_t)len, BPRI_LO)) == NULL) {
2992 2992 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2993 2993 (CE_WARN, "alloc of mblk failed in data_cb"));
2994 2994 rval = FC_UNCLAIMED;
2995 2995 goto data_cb_done;
2996 2996 }
2997 2997
2998 2998 /*
2999 2999 * Unsolicited buffers handed up to us from the FCA must be
3000 3000 * endian clean so just bcopy the data into our mblk. Else
3001 3001 * we may have to either copy the data byte by byte or
3002 3002 * use the ddi_rep_get* routines to do the copy for us.
3003 3003 */
3004 3004 bcopy(buf->ub_buffer, bp->b_rptr, len);
3005 3005
3006 3006 /*
3007 3007 * for esballoc'ed mblks - free the UB in the frtn function
3008 3008 * along with the memory allocated for the function arg.
3009 3009 * for allocb'ed mblk - release the unsolicited buffer here
3010 3010 */
3011 3011 (void) fc_ulp_ubrelease(phandle, 1, &buf->ub_token);
3012 3012
3013 3013 #endif /* FCIP_ESBALLOC */
3014 3014
3015 3015 bp->b_wptr = bp->b_rptr + len;
3016 3016 fptr->fcip_ipackets++;
3017 3017
3018 3018 if (type == ETHERTYPE_IP) {
3019 3019 mutex_enter(&fptr->fcip_mutex);
3020 3020 fptr->fcip_ub_upstream++;
3021 3021 mutex_exit(&fptr->fcip_mutex);
3022 3022 bp->b_rptr += hdrlen;
3023 3023
3024 3024 /*
3025 3025 * Check if ipq is valid in the sendup thread
3026 3026 */
3027 3027 if (fcip_sendup_alloc_enque(fptr, bp, NULL) != FC_SUCCESS) {
3028 3028 freemsg(bp);
3029 3029 }
3030 3030 } else {
3031 3031 /*
3032 3032 * We won't get ethernet 802.3 packets in FCIP but we may get
3033 3033 * types other than ETHERTYPE_IP, such as ETHERTYPE_ARP. Let
3034 3034 * fcip_sendup() do the matching.
3035 3035 */
3036 3036 mutex_enter(&fptr->fcip_mutex);
3037 3037 fptr->fcip_ub_upstream++;
3038 3038 mutex_exit(&fptr->fcip_mutex);
3039 3039 if (fcip_sendup_alloc_enque(fptr, bp,
3040 3040 fcip_accept) != FC_SUCCESS) {
3041 3041 freemsg(bp);
3042 3042 }
3043 3043 }
3044 3044
3045 3045 rval = FC_SUCCESS;
3046 3046
3047 3047 /*
3048 3048 * Unset fcip_flags to indicate we are out of callback and return
3049 3049 */
3050 3050 data_cb_done:
3051 3051 mutex_enter(&fptr->fcip_mutex);
3052 3052 fptr->fcip_flags &= ~(FCIP_IN_DATA_CB);
3053 3053 mutex_exit(&fptr->fcip_mutex);
3054 3054 return (rval);
3055 3055 }
3056 3056
3057 3057 #if !defined(FCIP_ESBALLOC)
3058 3058 /*
3059 3059 * Allocate a message block for the inbound data to be sent upstream.
3060 3060 */
3061 3061 static void *
3062 3062 fcip_allocb(size_t size, uint_t pri)
3063 3063 {
3064 3064 mblk_t *mp;
3065 3065
3066 3066 if ((mp = allocb(size, pri)) == NULL) {
3067 3067 return (NULL);
3068 3068 }
3069 3069 return (mp);
3070 3070 }
3071 3071
3072 3072 #endif
3073 3073
3074 3074 /*
3075 3075 * This helper routine kmem cache alloc's a sendup element for enquing
3076 3076 * into the sendup list for callbacks upstream from the dedicated sendup
3077 3077 * thread. We enque the msg buf into the sendup list and cv_signal the
3078 3078 * sendup thread to finish the callback for us.
3079 3079 */
3080 3080 static int
3081 3081 fcip_sendup_alloc_enque(struct fcip *fptr, mblk_t *mp, struct fcipstr *(*f)())
3082 3082 {
3083 3083 struct fcip_sendup_elem *msg_elem;
3084 3084 int rval = FC_FAILURE;
3085 3085
3086 3086 FCIP_TNF_PROBE_1((fcip_sendup_alloc_enque, "fcip io", /* CSTYLED */,
3087 3087 tnf_string, msg, "sendup msg enque"));
3088 3088 msg_elem = kmem_cache_alloc(fptr->fcip_sendup_cache, KM_NOSLEEP);
3089 3089 if (msg_elem == NULL) {
3090 3090 /* drop pkt to floor - update stats */
3091 3091 rval = FC_FAILURE;
3092 3092 goto sendup_alloc_done;
3093 3093 }
3094 3094 msg_elem->fcipsu_mp = mp;
3095 3095 msg_elem->fcipsu_func = f;
3096 3096
3097 3097 mutex_enter(&fptr->fcip_sendup_mutex);
3098 3098 if (fptr->fcip_sendup_head == NULL) {
3099 3099 fptr->fcip_sendup_head = fptr->fcip_sendup_tail = msg_elem;
3100 3100 } else {
3101 3101 fptr->fcip_sendup_tail->fcipsu_next = msg_elem;
3102 3102 fptr->fcip_sendup_tail = msg_elem;
3103 3103 }
3104 3104 fptr->fcip_sendup_cnt++;
3105 3105 cv_signal(&fptr->fcip_sendup_cv);
3106 3106 mutex_exit(&fptr->fcip_sendup_mutex);
3107 3107 rval = FC_SUCCESS;
3108 3108
3109 3109 sendup_alloc_done:
3110 3110 return (rval);
3111 3111 }
3112 3112
3113 3113 /*
3114 3114 * One of the ways of performing the WWN to D_ID mapping required for
3115 3115 * IPFC data is to cache the unsolicited ARP broadcast messages received
3116 3116 * and update the routing table to add entry for the destination port
3117 3117 * if we are the intended recipient of the ARP broadcast message. This is
3118 3118 * one of the methods recommended in the rfc to obtain the WWN to D_ID mapping
3119 3119 * but is not typically used unless enabled. The driver prefers to use the
3120 3120 * nameserver/lilp map to obtain this mapping.
3121 3121 */
3122 3122 static void
3123 3123 fcip_cache_arp_broadcast(struct fcip *fptr, fc_unsol_buf_t *buf)
3124 3124 {
3125 3125 fcip_port_info_t *fport;
3126 3126 fcph_network_hdr_t *nhdr;
3127 3127 struct fcip_routing_table *frp;
3128 3128 fc_portmap_t map;
3129 3129
3130 3130 fport = fptr->fcip_port_info;
3131 3131 if (fport == NULL) {
3132 3132 return;
3133 3133 }
3134 3134 ASSERT(fport != NULL);
3135 3135
3136 3136 nhdr = (fcph_network_hdr_t *)buf->ub_buffer;
3137 3137
3138 3138 mutex_enter(&fptr->fcip_rt_mutex);
3139 3139 frp = fcip_lookup_rtable(fptr, &nhdr->net_src_addr, FCIP_COMPARE_NWWN);
3140 3140 mutex_exit(&fptr->fcip_rt_mutex);
3141 3141 if (frp == NULL) {
3142 3142 map.map_did.port_id = buf->ub_frame.s_id;
3143 3143 map.map_hard_addr.hard_addr = buf->ub_frame.s_id;
3144 3144 map.map_state = PORT_DEVICE_VALID;
3145 3145 map.map_type = PORT_DEVICE_NEW;
3146 3146 map.map_flags = 0;
3147 3147 map.map_pd = NULL;
3148 3148 bcopy((void *)&nhdr->net_src_addr, (void *)&map.map_pwwn,
3149 3149 sizeof (la_wwn_t));
3150 3150 bcopy((void *)&nhdr->net_src_addr, (void *)&map.map_nwwn,
3151 3151 sizeof (la_wwn_t));
3152 3152 fcip_rt_update(fptr, &map, 1);
3153 3153 mutex_enter(&fptr->fcip_rt_mutex);
3154 3154 frp = fcip_lookup_rtable(fptr, &nhdr->net_src_addr,
3155 3155 FCIP_COMPARE_NWWN);
3156 3156 mutex_exit(&fptr->fcip_rt_mutex);
3157 3157
3158 3158 (void) fcip_add_dest(fptr, frp);
3159 3159 }
3160 3160
3161 3161 }
3162 3162
3163 3163 /*
3164 3164 * This is a dedicated thread to do callbacks from fcip's data callback
3165 3165 * routines into the modules upstream. The reason for this thread is
3166 3166 * the data callback function can be called from an interrupt context and
3167 3167 * the upstream modules *can* make calls downstream in the same thread
3168 3168 * context. If the call is to a fabric port which is not yet in our
3169 3169 * routing tables, we may have to query the nameserver/fabric for the
3170 3170 * MAC addr to Port_ID mapping which may be blocking calls.
3171 3171 */
3172 3172 static void
3173 3173 fcip_sendup_thr(void *arg)
3174 3174 {
3175 3175 struct fcip *fptr = (struct fcip *)arg;
3176 3176 struct fcip_sendup_elem *msg_elem;
3177 3177 queue_t *ip4q = NULL;
3178 3178
3179 3179 CALLB_CPR_INIT(&fptr->fcip_cpr_info, &fptr->fcip_sendup_mutex,
3180 3180 callb_generic_cpr, "fcip_sendup_thr");
3181 3181
3182 3182 mutex_enter(&fptr->fcip_sendup_mutex);
3183 3183 for (;;) {
3184 3184
3185 3185 while (fptr->fcip_sendup_thr_initted &&
3186 3186 fptr->fcip_sendup_head == NULL) {
3187 3187 CALLB_CPR_SAFE_BEGIN(&fptr->fcip_cpr_info);
3188 3188 cv_wait(&fptr->fcip_sendup_cv,
3189 3189 &fptr->fcip_sendup_mutex);
3190 3190 CALLB_CPR_SAFE_END(&fptr->fcip_cpr_info,
3191 3191 &fptr->fcip_sendup_mutex);
3192 3192 }
3193 3193
3194 3194 if (fptr->fcip_sendup_thr_initted == 0) {
3195 3195 break;
3196 3196 }
3197 3197
3198 3198 FCIP_TNF_PROBE_1((fcip_sendup_thr, "fcip io", /* CSTYLED */,
3199 3199 tnf_string, msg, "fcip sendup thr - new msg"));
3200 3200
3201 3201 msg_elem = fptr->fcip_sendup_head;
3202 3202 fptr->fcip_sendup_head = msg_elem->fcipsu_next;
3203 3203 msg_elem->fcipsu_next = NULL;
3204 3204 mutex_exit(&fptr->fcip_sendup_mutex);
3205 3205
3206 3206 if (msg_elem->fcipsu_func == NULL) {
3207 3207 /*
3208 3208 * Message for ipq. Check to see if the ipq is
3209 3209 * is still valid. Since the thread is asynchronous,
3210 3210 * there could have been a close on the stream
3211 3211 */
3212 3212 mutex_enter(&fptr->fcip_mutex);
3213 3213 if (fptr->fcip_ipq && canputnext(fptr->fcip_ipq)) {
3214 3214 ip4q = fptr->fcip_ipq;
3215 3215 mutex_exit(&fptr->fcip_mutex);
3216 3216 putnext(ip4q, msg_elem->fcipsu_mp);
3217 3217 } else {
3218 3218 mutex_exit(&fptr->fcip_mutex);
3219 3219 freemsg(msg_elem->fcipsu_mp);
3220 3220 }
3221 3221 } else {
3222 3222 fcip_sendup(fptr, msg_elem->fcipsu_mp,
3223 3223 msg_elem->fcipsu_func);
3224 3224 }
3225 3225
3226 3226 #if !defined(FCIP_ESBALLOC)
3227 3227 /*
3228 3228 * for allocb'ed mblk - decrement upstream count here
3229 3229 */
3230 3230 mutex_enter(&fptr->fcip_mutex);
3231 3231 ASSERT(fptr->fcip_ub_upstream > 0);
3232 3232 fptr->fcip_ub_upstream--;
3233 3233 mutex_exit(&fptr->fcip_mutex);
3234 3234 #endif /* FCIP_ESBALLOC */
3235 3235
3236 3236 kmem_cache_free(fptr->fcip_sendup_cache, (void *)msg_elem);
3237 3237 mutex_enter(&fptr->fcip_sendup_mutex);
3238 3238 fptr->fcip_sendup_cnt--;
3239 3239 }
3240 3240
3241 3241
3242 3242 #ifndef __lock_lint
3243 3243 CALLB_CPR_EXIT(&fptr->fcip_cpr_info);
3244 3244 #else
3245 3245 mutex_exit(&fptr->fcip_sendup_mutex);
3246 3246 #endif /* __lock_lint */
3247 3247
3248 3248 /* Wake up fcip detach thread by the end */
3249 3249 cv_signal(&fptr->fcip_sendup_cv);
3250 3250
3251 3251 thread_exit();
3252 3252 }
3253 3253
3254 3254 #ifdef FCIP_ESBALLOC
3255 3255
3256 3256 /*
3257 3257 * called from the stream head when it is done using an unsolicited buffer.
3258 3258 * We release this buffer then to the FCA for reuse.
3259 3259 */
3260 3260 static void
3261 3261 fcip_ubfree(char *arg)
3262 3262 {
3263 3263 struct fcip_esballoc_arg *fesb_argp = (struct fcip_esballoc_arg *)arg;
3264 3264 fc_unsol_buf_t *ubuf;
3265 3265 frtn_t *frtnp;
3266 3266 fcip_port_info_t *fport;
3267 3267 struct fcip *fptr;
3268 3268
3269 3269
3270 3270 fport = fcip_get_port(fesb_argp->phandle);
3271 3271 fptr = fport->fcipp_fcip;
3272 3272
3273 3273 ASSERT(fesb_argp != NULL);
3274 3274 ubuf = fesb_argp->buf;
3275 3275 frtnp = fesb_argp->frtnp;
3276 3276
3277 3277
3278 3278 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
3279 3279 (CE_WARN, "freeing ubuf after esballoc in fcip_ubfree"));
3280 3280 (void) fc_ulp_ubrelease(fesb_argp->phandle, 1, &ubuf->ub_token);
3281 3281
3282 3282 mutex_enter(&fptr->fcip_mutex);
3283 3283 ASSERT(fptr->fcip_ub_upstream > 0);
3284 3284 fptr->fcip_ub_upstream--;
3285 3285 cv_signal(&fptr->fcip_ub_cv);
3286 3286 mutex_exit(&fptr->fcip_mutex);
3287 3287
3288 3288 kmem_free(frtnp, sizeof (frtn_t));
3289 3289 kmem_free(fesb_argp, sizeof (struct fcip_esballoc_arg));
3290 3290 }
3291 3291
3292 3292 #endif /* FCIP_ESBALLOC */
3293 3293
3294 3294 /*
3295 3295 * handle data other than that of type ETHERTYPE_IP and send it on its
3296 3296 * way upstream to the right streams module to handle
3297 3297 */
3298 3298 static void
3299 3299 fcip_sendup(struct fcip *fptr, mblk_t *mp, struct fcipstr *(*acceptfunc)())
3300 3300 {
3301 3301 struct fcipstr *slp, *nslp;
3302 3302 la_wwn_t *dhostp;
3303 3303 mblk_t *nmp;
3304 3304 uint32_t isgroupaddr;
3305 3305 int type;
3306 3306 uint32_t hdrlen;
3307 3307 fcph_network_hdr_t *nhdr;
3308 3308 llc_snap_hdr_t *snaphdr;
3309 3309
3310 3310 FCIP_TNF_PROBE_1((fcip_sendup, "fcip io", /* CSTYLED */,
3311 3311 tnf_string, msg, "fcip sendup"));
3312 3312 nhdr = (fcph_network_hdr_t *)mp->b_rptr;
3313 3313 snaphdr =
3314 3314 (llc_snap_hdr_t *)(mp->b_rptr + sizeof (fcph_network_hdr_t));
3315 3315 dhostp = &nhdr->net_dest_addr;
3316 3316 type = snaphdr->pid;
3317 3317 hdrlen = sizeof (fcph_network_hdr_t) + sizeof (llc_snap_hdr_t);
3318 3318
3319 3319 /* No group address with fibre channel */
3320 3320 isgroupaddr = 0;
3321 3321
3322 3322 /*
3323 3323 * While holding a reader lock on the linked list of streams structures,
3324 3324 * attempt to match the address criteria for each stream
3325 3325 * and pass up the raw M_DATA ("fastpath") or a DL_UNITDATA_IND.
3326 3326 */
3327 3327
3328 3328 rw_enter(&fcipstruplock, RW_READER);
3329 3329
3330 3330 if ((slp = (*acceptfunc)(fcipstrup, fptr, type, dhostp)) == NULL) {
3331 3331 rw_exit(&fcipstruplock);
3332 3332 FCIP_TNF_PROBE_1((fcip_sendup, "fcip io", /* CSTYLED */,
3333 3333 tnf_string, msg, "fcip sendup - no slp"));
3334 3334 freemsg(mp);
3335 3335 return;
3336 3336 }
3337 3337
3338 3338 /*
3339 3339 * Loop on matching open streams until (*acceptfunc)() returns NULL.
3340 3340 */
3341 3341 for (; nslp = (*acceptfunc)(slp->sl_nextp, fptr, type, dhostp);
3342 3342 slp = nslp) {
3343 3343 if (canputnext(slp->sl_rq)) {
3344 3344 if (nmp = dupmsg(mp)) {
3345 3345 if ((slp->sl_flags & FCIP_SLFAST) &&
3346 3346 !isgroupaddr) {
3347 3347 nmp->b_rptr += hdrlen;
3348 3348 putnext(slp->sl_rq, nmp);
3349 3349 } else if (slp->sl_flags & FCIP_SLRAW) {
3350 3350 /* No headers when FCIP_SLRAW is set */
3351 3351 putnext(slp->sl_rq, nmp);
3352 3352 } else if ((nmp = fcip_addudind(fptr, nmp,
3353 3353 nhdr, type))) {
3354 3354 putnext(slp->sl_rq, nmp);
3355 3355 }
3356 3356 }
3357 3357 }
3358 3358 }
3359 3359
3360 3360 /*
3361 3361 * Do the last one.
3362 3362 */
3363 3363 if (canputnext(slp->sl_rq)) {
3364 3364 if (slp->sl_flags & FCIP_SLFAST) {
3365 3365 mp->b_rptr += hdrlen;
3366 3366 putnext(slp->sl_rq, mp);
3367 3367 } else if (slp->sl_flags & FCIP_SLRAW) {
3368 3368 putnext(slp->sl_rq, mp);
3369 3369 } else if ((mp = fcip_addudind(fptr, mp, nhdr, type))) {
3370 3370 putnext(slp->sl_rq, mp);
3371 3371 }
3372 3372 } else {
3373 3373 freemsg(mp);
3374 3374 }
3375 3375 FCIP_TNF_PROBE_1((fcip_sendup, "fcip io", /* CSTYLED */,
3376 3376 tnf_string, msg, "fcip sendup done"));
3377 3377
3378 3378 rw_exit(&fcipstruplock);
3379 3379 }
3380 3380
3381 3381 /*
3382 3382 * Match the stream based on type and wwn if necessary.
3383 3383 * Destination wwn dhostp is passed to this routine is reserved
3384 3384 * for future usage. We don't need to use it right now since port
3385 3385 * to fcip instance mapping is unique and wwn is already validated when
3386 3386 * packet comes to fcip.
3387 3387 */
3388 3388 /* ARGSUSED */
3389 3389 static struct fcipstr *
3390 3390 fcip_accept(struct fcipstr *slp, struct fcip *fptr, int type, la_wwn_t *dhostp)
3391 3391 {
3392 3392 t_uscalar_t sap;
3393 3393
3394 3394 FCIP_TNF_PROBE_1((fcip_accept, "fcip io", /* CSTYLED */,
3395 3395 tnf_string, msg, "fcip accept"));
3396 3396
3397 3397 for (; slp; slp = slp->sl_nextp) {
3398 3398 sap = slp->sl_sap;
3399 3399 FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_CONT,
3400 3400 "fcip_accept: checking next sap = %x, type = %x",
3401 3401 sap, type));
3402 3402
3403 3403 if ((slp->sl_fcip == fptr) && (type == sap)) {
3404 3404 return (slp);
3405 3405 }
3406 3406 }
3407 3407 return (NULL);
3408 3408 }
3409 3409
3410 3410 /*
3411 3411 * Handle DL_UNITDATA_IND messages
3412 3412 */
3413 3413 static mblk_t *
3414 3414 fcip_addudind(struct fcip *fptr, mblk_t *mp, fcph_network_hdr_t *nhdr,
3415 3415 int type)
3416 3416 {
3417 3417 dl_unitdata_ind_t *dludindp;
3418 3418 struct fcipdladdr *dlap;
3419 3419 mblk_t *nmp;
3420 3420 int size;
3421 3421 uint32_t hdrlen;
3422 3422 struct ether_addr src_addr;
3423 3423 struct ether_addr dest_addr;
3424 3424
3425 3425
3426 3426 hdrlen = (sizeof (llc_snap_hdr_t) + sizeof (fcph_network_hdr_t));
3427 3427 mp->b_rptr += hdrlen;
3428 3428
3429 3429 FCIP_TNF_PROBE_1((fcip_addudind, "fcip io", /* CSTYLED */,
3430 3430 tnf_string, msg, "fcip addudind"));
3431 3431
3432 3432 /*
3433 3433 * Allocate an M_PROTO mblk for the DL_UNITDATA_IND.
3434 3434 */
3435 3435 size = sizeof (dl_unitdata_ind_t) + FCIPADDRL + FCIPADDRL;
3436 3436 if ((nmp = allocb(size, BPRI_LO)) == NULL) {
3437 3437 fptr->fcip_allocbfail++;
3438 3438 freemsg(mp);
3439 3439 return (NULL);
3440 3440 }
3441 3441 DB_TYPE(nmp) = M_PROTO;
3442 3442 nmp->b_wptr = nmp->b_datap->db_lim;
3443 3443 nmp->b_rptr = nmp->b_wptr - size;
3444 3444
3445 3445 /*
3446 3446 * Construct a DL_UNITDATA_IND primitive.
3447 3447 */
3448 3448 dludindp = (dl_unitdata_ind_t *)nmp->b_rptr;
3449 3449 dludindp->dl_primitive = DL_UNITDATA_IND;
3450 3450 dludindp->dl_dest_addr_length = FCIPADDRL;
3451 3451 dludindp->dl_dest_addr_offset = sizeof (dl_unitdata_ind_t);
3452 3452 dludindp->dl_src_addr_length = FCIPADDRL;
3453 3453 dludindp->dl_src_addr_offset = sizeof (dl_unitdata_ind_t) + FCIPADDRL;
3454 3454 dludindp->dl_group_address = 0; /* not DL_MULTI */
3455 3455
3456 3456 dlap = (struct fcipdladdr *)(nmp->b_rptr + sizeof (dl_unitdata_ind_t));
3457 3457 wwn_to_ether(&nhdr->net_dest_addr, &dest_addr);
3458 3458 ether_bcopy(&dest_addr, &dlap->dl_phys);
3459 3459 dlap->dl_sap = (uint16_t)type;
3460 3460
3461 3461 dlap = (struct fcipdladdr *)(nmp->b_rptr + sizeof (dl_unitdata_ind_t)
3462 3462 + FCIPADDRL);
3463 3463 wwn_to_ether(&nhdr->net_src_addr, &src_addr);
3464 3464 ether_bcopy(&src_addr, &dlap->dl_phys);
3465 3465 dlap->dl_sap = (uint16_t)type;
3466 3466
3467 3467 /*
3468 3468 * Link the M_PROTO and M_DATA together.
3469 3469 */
3470 3470 nmp->b_cont = mp;
3471 3471 return (nmp);
3472 3472 }
3473 3473
3474 3474
3475 3475 /*
3476 3476 * The open routine. For clone opens, we return the next available minor
3477 3477 * no. for the stream to use
3478 3478 */
3479 3479 /* ARGSUSED */
3480 3480 static int
3481 3481 fcip_open(queue_t *rq, dev_t *devp, int flag, int sflag, cred_t *credp)
3482 3482 {
3483 3483 struct fcipstr *slp;
3484 3484 struct fcipstr **prevslp;
3485 3485 minor_t minor;
3486 3486
3487 3487 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "in fcip_open"));
3488 3488 FCIP_TNF_PROBE_1((fcip_open, "fcip io", /* CSTYLED */,
3489 3489 tnf_string, msg, "enter"));
3490 3490 /*
3491 3491 * We need to ensure that the port driver is loaded before
3492 3492 * we proceed
3493 3493 */
3494 3494 if (ddi_hold_installed_driver(ddi_name_to_major(PORT_DRIVER)) == NULL) {
3495 3495 /* no port driver instances found */
3496 3496 FCIP_DEBUG(FCIP_DEBUG_STARTUP, (CE_WARN,
3497 3497 "!ddi_hold_installed_driver of fp failed\n"));
3498 3498 return (ENXIO);
3499 3499 }
3500 3500 /* serialize opens */
3501 3501 rw_enter(&fcipstruplock, RW_WRITER);
3502 3502
3503 3503 prevslp = &fcipstrup;
3504 3504 if (sflag == CLONEOPEN) {
3505 3505 minor = 0;
3506 3506 for (; (slp = *prevslp) != NULL; prevslp = &slp->sl_nextp) {
3507 3507 if (minor < slp->sl_minor) {
3508 3508 break;
3509 3509 }
3510 3510 minor ++;
3511 3511 }
3512 3512 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE,
3513 3513 "getmajor returns 0x%x", getmajor(*devp)));
3514 3514 *devp = makedevice(getmajor(*devp), minor);
3515 3515 } else {
3516 3516 minor = getminor(*devp);
3517 3517 }
3518 3518
3519 3519 /*
3520 3520 * check if our qp's private area is already initialized. If yes
3521 3521 * the stream is already open - just return
3522 3522 */
3523 3523 if (rq->q_ptr) {
3524 3524 goto done;
3525 3525 }
3526 3526
3527 3527 slp = GETSTRUCT(struct fcipstr, 1);
3528 3528 slp->sl_minor = minor;
3529 3529 slp->sl_rq = rq;
3530 3530 slp->sl_sap = 0;
3531 3531 slp->sl_flags = 0;
3532 3532 slp->sl_state = DL_UNATTACHED;
3533 3533 slp->sl_fcip = NULL;
3534 3534
3535 3535 mutex_init(&slp->sl_lock, NULL, MUTEX_DRIVER, NULL);
3536 3536
3537 3537 /*
3538 3538 * link this new stream entry into list of active streams
3539 3539 */
3540 3540 slp->sl_nextp = *prevslp;
3541 3541 *prevslp = slp;
3542 3542
3543 3543 rq->q_ptr = WR(rq)->q_ptr = (char *)slp;
3544 3544
3545 3545 /*
3546 3546 * Disable automatic enabling of our write service procedures
3547 3547 * we need to control this explicitly. This will prevent
3548 3548 * anyone scheduling of our write service procedures.
3549 3549 */
3550 3550 noenable(WR(rq));
3551 3551
3552 3552 done:
3553 3553 rw_exit(&fcipstruplock);
3554 3554 /*
3555 3555 * enable our put and service routines on the read side
3556 3556 */
3557 3557 qprocson(rq);
3558 3558
3559 3559 /*
3560 3560 * There is only one instance of fcip (instance = 0)
3561 3561 * for multiple instances of hardware
3562 3562 */
3563 3563 (void) qassociate(rq, 0); /* don't allow drcompat to be pushed */
3564 3564 return (0);
3565 3565 }
3566 3566
3567 3567 /*
3568 3568 * close an opened stream. The minor no. will then be available for
3569 3569 * future opens.
3570 3570 */
3571 3571 /* ARGSUSED */
3572 3572 static int
3573 3573 fcip_close(queue_t *rq, int flag, int otyp, cred_t *credp)
3574 3574 {
3575 3575 struct fcipstr *slp;
3576 3576 struct fcipstr **prevslp;
3577 3577
3578 3578 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "in fcip_close"));
3579 3579 FCIP_TNF_PROBE_1((fcip_close, "fcip io", /* CSTYLED */,
3580 3580 tnf_string, msg, "enter"));
3581 3581 ASSERT(rq);
3582 3582 /* we should also have the active stream pointer in q_ptr */
3583 3583 ASSERT(rq->q_ptr);
3584 3584
3585 3585 ddi_rele_driver(ddi_name_to_major(PORT_DRIVER));
3586 3586 /*
3587 3587 * disable our put and service procedures. We had enabled them
3588 3588 * on open
3589 3589 */
3590 3590 qprocsoff(rq);
3591 3591 slp = (struct fcipstr *)rq->q_ptr;
3592 3592
3593 3593 /*
3594 3594 * Implicitly detach stream a stream from an interface.
3595 3595 */
3596 3596 if (slp->sl_fcip) {
3597 3597 fcip_dodetach(slp);
3598 3598 }
3599 3599
3600 3600 (void) qassociate(rq, -1); /* undo association in open */
3601 3601
3602 3602 rw_enter(&fcipstruplock, RW_WRITER);
3603 3603
3604 3604 /*
3605 3605 * unlink this stream from the active stream list and free it
3606 3606 */
3607 3607 for (prevslp = &fcipstrup; (slp = *prevslp) != NULL;
3608 3608 prevslp = &slp->sl_nextp) {
3609 3609 if (slp == (struct fcipstr *)rq->q_ptr) {
3610 3610 break;
3611 3611 }
3612 3612 }
3613 3613
3614 3614 /* we should have found slp */
3615 3615 ASSERT(slp);
3616 3616
3617 3617 *prevslp = slp->sl_nextp;
3618 3618 mutex_destroy(&slp->sl_lock);
3619 3619 kmem_free(slp, sizeof (struct fcipstr));
3620 3620 rq->q_ptr = WR(rq)->q_ptr = NULL;
3621 3621
3622 3622 rw_exit(&fcipstruplock);
3623 3623 return (0);
3624 3624 }
3625 3625
3626 3626 /*
3627 3627 * This is not an extension of the DDI_DETACH request. This routine
3628 3628 * only detaches a stream from an interface
3629 3629 */
3630 3630 static void
3631 3631 fcip_dodetach(struct fcipstr *slp)
3632 3632 {
3633 3633 struct fcipstr *tslp;
3634 3634 struct fcip *fptr;
3635 3635
3636 3636 FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_NOTE, "in fcip_dodetach"));
3637 3637 FCIP_TNF_PROBE_1((fcip_dodetach, "fcip io", /* CSTYLED */,
3638 3638 tnf_string, msg, "enter"));
3639 3639 ASSERT(slp->sl_fcip != NULL);
3640 3640
3641 3641 fptr = slp->sl_fcip;
3642 3642 slp->sl_fcip = NULL;
3643 3643
3644 3644 /*
3645 3645 * we don't support promiscuous mode currently but check
3646 3646 * for and disable any promiscuous mode operation
3647 3647 */
3648 3648 if (slp->sl_flags & SLALLPHYS) {
3649 3649 slp->sl_flags &= ~SLALLPHYS;
3650 3650 }
3651 3651
3652 3652 /*
3653 3653 * disable ALLMULTI mode if all mulitcast addr are ON
3654 3654 */
3655 3655 if (slp->sl_flags & SLALLMULTI) {
3656 3656 slp->sl_flags &= ~SLALLMULTI;
3657 3657 }
3658 3658
3659 3659 /*
3660 3660 * we are most likely going to perform multicast by
3661 3661 * broadcasting to the well known addr (D_ID) 0xFFFFFF or
3662 3662 * ALPA 0x00 in case of public loops
3663 3663 */
3664 3664
3665 3665
3666 3666 /*
3667 3667 * detach unit from device structure.
3668 3668 */
3669 3669 for (tslp = fcipstrup; tslp != NULL; tslp = tslp->sl_nextp) {
3670 3670 if (tslp->sl_fcip == fptr) {
3671 3671 break;
3672 3672 }
3673 3673 }
3674 3674 if (tslp == NULL) {
3675 3675 FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN,
3676 3676 "fcip_dodeatch - active stream struct not found"));
3677 3677
3678 3678 /* unregister with Fabric nameserver?? */
3679 3679 }
3680 3680 slp->sl_state = DL_UNATTACHED;
3681 3681
3682 3682 fcip_setipq(fptr);
3683 3683 }
3684 3684
3685 3685
3686 3686 /*
3687 3687 * Set or clear device ipq pointer.
3688 3688 * Walk thru all the streams on this device, if a ETHERTYPE_IP
3689 3689 * stream is found, assign device ipq to its sl_rq.
3690 3690 */
3691 3691 static void
3692 3692 fcip_setipq(struct fcip *fptr)
3693 3693 {
3694 3694 struct fcipstr *slp;
3695 3695 int ok = 1;
3696 3696 queue_t *ipq = NULL;
3697 3697
3698 3698 FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "entered fcip_setipq"));
3699 3699
3700 3700 rw_enter(&fcipstruplock, RW_READER);
3701 3701
3702 3702 for (slp = fcipstrup; slp != NULL; slp = slp->sl_nextp) {
3703 3703 if (slp->sl_fcip == fptr) {
3704 3704 if (slp->sl_flags & (SLALLPHYS|SLALLSAP)) {
3705 3705 ok = 0;
3706 3706 }
3707 3707 if (slp->sl_sap == ETHERTYPE_IP) {
3708 3708 if (ipq == NULL) {
3709 3709 ipq = slp->sl_rq;
3710 3710 } else {
3711 3711 ok = 0;
3712 3712 }
3713 3713 }
3714 3714 }
3715 3715 }
3716 3716
3717 3717 rw_exit(&fcipstruplock);
3718 3718
3719 3719 if (fcip_check_port_exists(fptr)) {
3720 3720 /* fptr passed to us is stale */
3721 3721 return;
3722 3722 }
3723 3723
3724 3724 mutex_enter(&fptr->fcip_mutex);
3725 3725 if (ok) {
3726 3726 fptr->fcip_ipq = ipq;
3727 3727 } else {
3728 3728 fptr->fcip_ipq = NULL;
3729 3729 }
3730 3730 mutex_exit(&fptr->fcip_mutex);
3731 3731 }
3732 3732
3733 3733
3734 3734 /* ARGSUSED */
3735 3735 static void
3736 3736 fcip_ioctl(queue_t *wq, mblk_t *mp)
3737 3737 {
3738 3738 struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
3739 3739 struct fcipstr *slp = (struct fcipstr *)wq->q_ptr;
3740 3740
3741 3741 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
3742 3742 (CE_NOTE, "in fcip ioctl : %d", iocp->ioc_cmd));
3743 3743 FCIP_TNF_PROBE_1((fcip_ioctl, "fcip io", /* CSTYLED */,
3744 3744 tnf_string, msg, "enter"));
3745 3745
3746 3746 switch (iocp->ioc_cmd) {
3747 3747 case DLIOCRAW:
3748 3748 slp->sl_flags |= FCIP_SLRAW;
3749 3749 miocack(wq, mp, 0, 0);
3750 3750 break;
3751 3751
3752 3752 case DL_IOC_HDR_INFO:
3753 3753 fcip_dl_ioc_hdr_info(wq, mp);
3754 3754 break;
3755 3755
3756 3756 default:
3757 3757 miocnak(wq, mp, 0, EINVAL);
3758 3758 break;
3759 3759 }
3760 3760 }
3761 3761
3762 3762 /*
3763 3763 * The streams 'Put' routine.
3764 3764 */
3765 3765 /* ARGSUSED */
3766 3766 static int
3767 3767 fcip_wput(queue_t *wq, mblk_t *mp)
3768 3768 {
3769 3769 struct fcipstr *slp = (struct fcipstr *)wq->q_ptr;
3770 3770 struct fcip *fptr;
3771 3771 struct fcip_dest *fdestp;
3772 3772 fcph_network_hdr_t *headerp;
3773 3773
3774 3774 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
3775 3775 (CE_NOTE, "in fcip_wput :: type:%x", DB_TYPE(mp)));
3776 3776
3777 3777 switch (DB_TYPE(mp)) {
3778 3778 case M_DATA: {
3779 3779
3780 3780 fptr = slp->sl_fcip;
3781 3781
3782 3782 if (((slp->sl_flags & (FCIP_SLFAST|FCIP_SLRAW)) == 0) ||
3783 3783 (slp->sl_state != DL_IDLE) ||
3784 3784 (fptr == NULL)) {
3785 3785 /*
3786 3786 * set error in the message block and send a reply
3787 3787 * back upstream. Sun's merror routine does this
3788 3788 * for us more cleanly.
3789 3789 */
3790 3790 merror(wq, mp, EPROTO);
3791 3791 break;
3792 3792 }
3793 3793
3794 3794 /*
3795 3795 * if any messages are already enqueued or if the interface
3796 3796 * is in promiscuous mode, causing the packets to loop back
3797 3797 * up, then enqueue the message. Otherwise just transmit
3798 3798 * the message. putq() puts the message on fcip's
3799 3799 * write queue and qenable() puts the queue (wq) on
3800 3800 * the list of queues to be called by the streams scheduler.
3801 3801 */
3802 3802 if (wq->q_first) {
3803 3803 (void) putq(wq, mp);
3804 3804 fptr->fcip_wantw = 1;
3805 3805 qenable(wq);
3806 3806 } else if (fptr->fcip_flags & FCIP_PROMISC) {
3807 3807 /*
3808 3808 * Promiscous mode not supported but add this code in
3809 3809 * case it will be supported in future.
3810 3810 */
3811 3811 (void) putq(wq, mp);
3812 3812 qenable(wq);
3813 3813 } else {
3814 3814
3815 3815 headerp = (fcph_network_hdr_t *)mp->b_rptr;
3816 3816 fdestp = fcip_get_dest(fptr, &headerp->net_dest_addr);
3817 3817
3818 3818 if (fdestp == NULL) {
3819 3819 merror(wq, mp, EPROTO);
3820 3820 break;
3821 3821 }
3822 3822
3823 3823 ASSERT(fdestp != NULL);
3824 3824
3825 3825 (void) fcip_start(wq, mp, fptr, fdestp, KM_SLEEP);
3826 3826 }
3827 3827 break;
3828 3828 }
3829 3829 case M_PROTO:
3830 3830 case M_PCPROTO:
3831 3831 /*
3832 3832 * to prevent recursive calls into fcip_proto
3833 3833 * (PROTO and PCPROTO messages are handled by fcip_proto)
3834 3834 * let the service procedure handle these messages by
3835 3835 * calling putq here.
3836 3836 */
3837 3837 (void) putq(wq, mp);
3838 3838 qenable(wq);
3839 3839 break;
3840 3840
3841 3841 case M_IOCTL:
3842 3842 fcip_ioctl(wq, mp);
3843 3843 break;
3844 3844
3845 3845 case M_FLUSH:
3846 3846 if (*mp->b_rptr & FLUSHW) {
3847 3847 flushq(wq, FLUSHALL);
3848 3848 *mp->b_rptr &= ~FLUSHW;
3849 3849 }
3850 3850 /*
3851 3851 * we have both FLUSHW and FLUSHR set with FLUSHRW
3852 3852 */
3853 3853 if (*mp->b_rptr & FLUSHR) {
3854 3854 /*
3855 3855 * send msg back upstream. qreply() takes care
3856 3856 * of using the RD(wq) queue on its reply
3857 3857 */
3858 3858 qreply(wq, mp);
3859 3859 } else {
3860 3860 freemsg(mp);
3861 3861 }
3862 3862 break;
3863 3863
3864 3864 default:
3865 3865 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
3866 3866 (CE_NOTE, "default msg type: %x", DB_TYPE(mp)));
3867 3867 freemsg(mp);
3868 3868 break;
3869 3869 }
3870 3870 return (0);
3871 3871 }
3872 3872
3873 3873
3874 3874 /*
3875 3875 * Handle M_PROTO and M_PCPROTO messages
3876 3876 */
3877 3877 /* ARGSUSED */
3878 3878 static void
3879 3879 fcip_proto(queue_t *wq, mblk_t *mp)
3880 3880 {
3881 3881 union DL_primitives *dlp;
3882 3882 struct fcipstr *slp;
3883 3883 t_uscalar_t prim;
3884 3884
3885 3885 slp = (struct fcipstr *)wq->q_ptr;
3886 3886 dlp = (union DL_primitives *)mp->b_rptr;
3887 3887 prim = dlp->dl_primitive; /* the DLPI command */
3888 3888
3889 3889 FCIP_TNF_PROBE_5((fcip_proto, "fcip io", /* CSTYLED */,
3890 3890 tnf_string, msg, "enter",
3891 3891 tnf_opaque, wq, wq,
3892 3892 tnf_opaque, mp, mp,
3893 3893 tnf_opaque, MP_DB_TYPE, DB_TYPE(mp),
3894 3894 tnf_opaque, dl_primitive, dlp->dl_primitive));
3895 3895
3896 3896 FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "dl_primitve : %x", prim));
3897 3897
3898 3898 mutex_enter(&slp->sl_lock);
3899 3899
3900 3900 switch (prim) {
3901 3901 case DL_UNITDATA_REQ:
3902 3902 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3903 3903 tnf_string, msg, "unit data request"));
3904 3904 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "unit data request"));
3905 3905 fcip_udreq(wq, mp);
3906 3906 break;
3907 3907
3908 3908 case DL_ATTACH_REQ:
3909 3909 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3910 3910 tnf_string, msg, "Attach request"));
3911 3911 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Attach request"));
3912 3912 fcip_areq(wq, mp);
3913 3913 break;
3914 3914
3915 3915 case DL_DETACH_REQ:
3916 3916 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3917 3917 tnf_string, msg, "Detach request"));
3918 3918 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Detach request"));
3919 3919 fcip_dreq(wq, mp);
3920 3920 break;
3921 3921
3922 3922 case DL_BIND_REQ:
3923 3923 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Bind request"));
3924 3924 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3925 3925 tnf_string, msg, "Bind request"));
3926 3926 fcip_breq(wq, mp);
3927 3927 break;
3928 3928
3929 3929 case DL_UNBIND_REQ:
3930 3930 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3931 3931 tnf_string, msg, "unbind request"));
3932 3932 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "unbind request"));
3933 3933 fcip_ubreq(wq, mp);
3934 3934 break;
3935 3935
3936 3936 case DL_INFO_REQ:
3937 3937 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3938 3938 tnf_string, msg, "Info request"));
3939 3939 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Info request"));
3940 3940 fcip_ireq(wq, mp);
3941 3941 break;
3942 3942
3943 3943 case DL_SET_PHYS_ADDR_REQ:
3944 3944 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3945 3945 tnf_string, msg, "set phy addr request"));
3946 3946 FCIP_DEBUG(FCIP_DEBUG_DLPI,
3947 3947 (CE_NOTE, "set phy addr request"));
3948 3948 fcip_spareq(wq, mp);
3949 3949 break;
3950 3950
3951 3951 case DL_PHYS_ADDR_REQ:
3952 3952 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3953 3953 tnf_string, msg, "phy addr request"));
3954 3954 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "phy addr request"));
3955 3955 fcip_pareq(wq, mp);
3956 3956 break;
3957 3957
3958 3958 case DL_ENABMULTI_REQ:
3959 3959 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3960 3960 tnf_string, msg, "Enable Multicast request"));
3961 3961 FCIP_DEBUG(FCIP_DEBUG_DLPI,
3962 3962 (CE_NOTE, "Enable Multicast request"));
3963 3963 dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3964 3964 break;
3965 3965
3966 3966 case DL_DISABMULTI_REQ:
3967 3967 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3968 3968 tnf_string, msg, "Disable Multicast request"));
3969 3969 FCIP_DEBUG(FCIP_DEBUG_DLPI,
3970 3970 (CE_NOTE, "Disable Multicast request"));
3971 3971 dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3972 3972 break;
3973 3973
3974 3974 case DL_PROMISCON_REQ:
3975 3975 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3976 3976 tnf_string, msg, "Promiscuous mode ON request"));
3977 3977 FCIP_DEBUG(FCIP_DEBUG_DLPI,
3978 3978 (CE_NOTE, "Promiscuous mode ON request"));
3979 3979 dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3980 3980 break;
3981 3981
3982 3982 case DL_PROMISCOFF_REQ:
3983 3983 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3984 3984 tnf_string, msg, "Promiscuous mode OFF request"));
3985 3985 FCIP_DEBUG(FCIP_DEBUG_DLPI,
3986 3986 (CE_NOTE, "Promiscuous mode OFF request"));
3987 3987 dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3988 3988 break;
3989 3989
3990 3990 default:
3991 3991 FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3992 3992 tnf_string, msg, "Unsupported request"));
3993 3993 dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3994 3994 break;
3995 3995 }
3996 3996 mutex_exit(&slp->sl_lock);
3997 3997 }
3998 3998
3999 3999 /*
4000 4000 * Always enqueue M_PROTO and M_PCPROTO messages pn the wq and M_DATA
4001 4001 * messages sometimes. Processing of M_PROTO and M_PCPROTO messages
4002 4002 * require us to hold fcip's internal locks across (upstream) putnext
4003 4003 * calls. Specifically fcip_intr could hold fcip_intrlock and fcipstruplock
4004 4004 * when it calls putnext(). That thread could loop back around to call
4005 4005 * fcip_wput and eventually fcip_init() to cause a recursive mutex panic
4006 4006 *
4007 4007 * M_DATA messages are enqueued only if we are out of xmit resources. Once
4008 4008 * the transmit resources are available the service procedure is enabled
4009 4009 * and an attempt is made to xmit all messages on the wq.
4010 4010 */
4011 4011 /* ARGSUSED */
4012 4012 static int
4013 4013 fcip_wsrv(queue_t *wq)
4014 4014 {
4015 4015 mblk_t *mp;
4016 4016 struct fcipstr *slp;
4017 4017 struct fcip *fptr;
4018 4018 struct fcip_dest *fdestp;
4019 4019 fcph_network_hdr_t *headerp;
4020 4020
4021 4021 slp = (struct fcipstr *)wq->q_ptr;
4022 4022 fptr = slp->sl_fcip;
4023 4023
4024 4024 FCIP_TNF_PROBE_2((fcip_wsrv, "fcip io", /* CSTYLED */,
4025 4025 tnf_string, msg, "enter",
4026 4026 tnf_opaque, wq, wq));
4027 4027 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "fcip wsrv"));
4028 4028
4029 4029 while (mp = getq(wq)) {
4030 4030 switch (DB_TYPE(mp)) {
4031 4031 case M_DATA:
4032 4032 if (fptr && mp) {
4033 4033 headerp = (fcph_network_hdr_t *)mp->b_rptr;
4034 4034 fdestp = fcip_get_dest(fptr,
4035 4035 &headerp->net_dest_addr);
4036 4036 if (fdestp == NULL) {
4037 4037 freemsg(mp);
4038 4038 goto done;
4039 4039 }
4040 4040 if (fcip_start(wq, mp, fptr, fdestp,
4041 4041 KM_SLEEP)) {
4042 4042 goto done;
4043 4043 }
4044 4044 } else {
4045 4045 freemsg(mp);
4046 4046 }
4047 4047 break;
4048 4048
4049 4049 case M_PROTO:
4050 4050 case M_PCPROTO:
4051 4051 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
4052 4052 (CE_NOTE, "PROT msg in wsrv"));
4053 4053 fcip_proto(wq, mp);
4054 4054 break;
4055 4055 default:
4056 4056 break;
4057 4057 }
4058 4058 }
4059 4059 done:
4060 4060 return (0);
4061 4061 }
4062 4062
4063 4063
4064 4064 /*
4065 4065 * This routine is called from fcip_wsrv to send a message downstream
4066 4066 * on the fibre towards its destination. This routine performs the
4067 4067 * actual WWN to D_ID mapping by looking up the routing and destination
4068 4068 * tables.
4069 4069 */
4070 4070 /* ARGSUSED */
4071 4071 static int
4072 4072 fcip_start(queue_t *wq, mblk_t *mp, struct fcip *fptr,
4073 4073 struct fcip_dest *fdestp, int flags)
4074 4074 {
4075 4075 int rval;
4076 4076 int free;
4077 4077 fcip_pkt_t *fcip_pkt;
4078 4078 fc_packet_t *fc_pkt;
4079 4079 fcip_port_info_t *fport = fptr->fcip_port_info;
4080 4080 size_t datalen;
4081 4081
4082 4082 FCIP_TNF_PROBE_4((fcip_start, "fcip io", /* CSTYLED */,
4083 4083 tnf_string, msg, "enter", tnf_opaque, wq, wq,
4084 4084 tnf_opaque, mp, mp,
4085 4085 tnf_opaque, MP_DB_TYPE, DB_TYPE(mp)));
4086 4086 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "in fcipstart"));
4087 4087
4088 4088 ASSERT(fdestp != NULL);
4089 4089
4090 4090 /*
4091 4091 * Only return if port has gone offline and not come back online
4092 4092 * in a while
4093 4093 */
4094 4094 if (fptr->fcip_flags & FCIP_LINK_DOWN) {
4095 4095 freemsg(mp);
4096 4096 return (0);
4097 4097 }
4098 4098
4099 4099 /*
4100 4100 * The message block coming in here already has the network and
4101 4101 * llc_snap hdr stuffed in
4102 4102 */
4103 4103 /*
4104 4104 * Traditionally ethernet drivers at sun handle 3 cases here -
4105 4105 * 1. messages with one mblk
4106 4106 * 2. messages with 2 mblks
4107 4107 * 3. messages with >2 mblks
4108 4108 * For now lets handle all the 3 cases in a single case where we
4109 4109 * put them together in one mblk that has all the data
4110 4110 */
4111 4111
4112 4112 if (mp->b_cont != NULL) {
4113 4113 if (!pullupmsg(mp, -1)) {
4114 4114 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
4115 4115 (CE_WARN, "failed to concat message"));
4116 4116 freemsg(mp);
4117 4117 return (1);
4118 4118 }
4119 4119 }
4120 4120
4121 4121 datalen = msgsize(mp);
4122 4122
4123 4123 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE,
4124 4124 "msgsize with nhdr & llcsnap hdr in fcip_pkt_alloc 0x%lx",
4125 4125 datalen));
4126 4126
4127 4127 /*
4128 4128 * We cannot have requests larger than FCIPMTU+Headers
4129 4129 */
4130 4130 if (datalen > (FCIPMTU + sizeof (llc_snap_hdr_t) +
4131 4131 sizeof (fcph_network_hdr_t))) {
4132 4132 freemsg(mp);
4133 4133 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE,
4134 4134 "fcip_pkt_alloc: datalen is larger than "
4135 4135 "max possible size."));
4136 4136 return (1);
4137 4137 }
4138 4138
4139 4139 fcip_pkt = fcip_pkt_alloc(fptr, mp, flags, datalen);
4140 4140 if (fcip_pkt == NULL) {
4141 4141 (void) putbq(wq, mp);
4142 4142 return (1);
4143 4143 }
4144 4144
4145 4145 fcip_pkt->fcip_pkt_mp = mp;
4146 4146 fcip_pkt->fcip_pkt_wq = wq;
4147 4147 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
4148 4148
4149 4149 mutex_enter(&fdestp->fcipd_mutex);
4150 4150 /*
4151 4151 * If the device dynamically disappeared, just fail the request.
4152 4152 */
4153 4153 if (fdestp->fcipd_rtable == NULL) {
4154 4154 mutex_exit(&fdestp->fcipd_mutex);
4155 4155 fcip_pkt_free(fcip_pkt, 1);
4156 4156 return (1);
4157 4157 }
4158 4158
4159 4159 /*
4160 4160 * Now that we've assigned pkt_pd, we can call fc_ulp_init_packet
4161 4161 */
4162 4162
4163 4163 fc_pkt->pkt_pd = fdestp->fcipd_pd;
4164 4164
4165 4165 if (fc_ulp_init_packet((opaque_t)fport->fcipp_handle,
4166 4166 fc_pkt, flags) != FC_SUCCESS) {
4167 4167 mutex_exit(&fdestp->fcipd_mutex);
4168 4168 fcip_pkt_free(fcip_pkt, 1);
4169 4169 return (1);
4170 4170 }
4171 4171
4172 4172 fcip_fdestp_enqueue_pkt(fdestp, fcip_pkt);
4173 4173 fcip_pkt->fcip_pkt_dest = fdestp;
4174 4174 fc_pkt->pkt_fca_device = fdestp->fcipd_fca_dev;
4175 4175
4176 4176 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE,
4177 4177 "setting cmdlen to 0x%x: rsp 0x%x : data 0x%x",
4178 4178 fc_pkt->pkt_cmdlen, fc_pkt->pkt_rsplen, fc_pkt->pkt_datalen));
4179 4179
4180 4180 fcip_init_unicast_pkt(fcip_pkt, fport->fcipp_sid,
4181 4181 fdestp->fcipd_did, fcip_pkt_callback);
4182 4182
4183 4183 fdestp->fcipd_ncmds++;
4184 4184
4185 4185 mutex_exit(&fdestp->fcipd_mutex);
4186 4186 if ((rval = fcip_transport(fcip_pkt)) == FC_SUCCESS) {
4187 4187 fptr->fcip_opackets++;
4188 4188 return (0);
4189 4189 }
4190 4190
4191 4191 free = (rval == FC_STATEC_BUSY || rval == FC_OFFLINE ||
4192 4192 rval == FC_TRAN_BUSY) ? 0 : 1;
4193 4193
4194 4194 mutex_enter(&fdestp->fcipd_mutex);
4195 4195 rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt);
4196 4196
4197 4197 if (!rval) {
4198 4198 fcip_pkt = NULL;
4199 4199 } else {
4200 4200 fdestp->fcipd_ncmds--;
4201 4201 }
4202 4202 mutex_exit(&fdestp->fcipd_mutex);
4203 4203
4204 4204 if (fcip_pkt != NULL) {
4205 4205 fcip_pkt_free(fcip_pkt, free);
4206 4206 }
4207 4207
4208 4208 if (!free) {
4209 4209 (void) putbq(wq, mp);
4210 4210 }
4211 4211
4212 4212 return (1);
4213 4213 }
4214 4214
4215 4215
4216 4216 /*
4217 4217 * This routine enqueus a packet marked to be issued to the
4218 4218 * transport in the dest structure. This enables us to timeout any
4219 4219 * request stuck with the FCA/transport for long periods of time
4220 4220 * without a response. fcip_pkt_timeout will attempt to clean up
4221 4221 * any packets hung in this state of limbo.
4222 4222 */
4223 4223 static void
4224 4224 fcip_fdestp_enqueue_pkt(struct fcip_dest *fdestp, fcip_pkt_t *fcip_pkt)
4225 4225 {
4226 4226 ASSERT(mutex_owned(&fdestp->fcipd_mutex));
4227 4227 FCIP_TNF_PROBE_1((fcip_fdestp_enqueue_pkt, "fcip io", /* CSTYLED */,
4228 4228 tnf_string, msg, "destp enq pkt"));
4229 4229
4230 4230 /*
4231 4231 * Just hang it off the head of packet list
4232 4232 */
4233 4233 fcip_pkt->fcip_pkt_next = fdestp->fcipd_head;
4234 4234 fcip_pkt->fcip_pkt_prev = NULL;
4235 4235 fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_LIST;
4236 4236
4237 4237 if (fdestp->fcipd_head != NULL) {
4238 4238 ASSERT(fdestp->fcipd_head->fcip_pkt_prev == NULL);
4239 4239 fdestp->fcipd_head->fcip_pkt_prev = fcip_pkt;
4240 4240 }
4241 4241
4242 4242 fdestp->fcipd_head = fcip_pkt;
4243 4243 }
4244 4244
4245 4245
4246 4246 /*
4247 4247 * dequeues any packets after the transport/FCA tells us it has
4248 4248 * been successfully sent on its way. Ofcourse it doesn't mean that
4249 4249 * the packet will actually reach its destination but its atleast
4250 4250 * a step closer in that direction
4251 4251 */
4252 4252 static int
4253 4253 fcip_fdestp_dequeue_pkt(struct fcip_dest *fdestp, fcip_pkt_t *fcip_pkt)
4254 4254 {
4255 4255 fcip_pkt_t *fcipd_pkt;
4256 4256
4257 4257 ASSERT(mutex_owned(&fdestp->fcipd_mutex));
4258 4258 if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_TIMEOUT) {
4259 4259 fcipd_pkt = fdestp->fcipd_head;
4260 4260 while (fcipd_pkt) {
4261 4261 if (fcipd_pkt == fcip_pkt) {
4262 4262 fcip_pkt_t *pptr = NULL;
4263 4263
4264 4264 if (fcipd_pkt == fdestp->fcipd_head) {
4265 4265 ASSERT(fcipd_pkt->fcip_pkt_prev ==
4266 4266 NULL);
4267 4267 fdestp->fcipd_head =
4268 4268 fcipd_pkt->fcip_pkt_next;
4269 4269 } else {
4270 4270 pptr = fcipd_pkt->fcip_pkt_prev;
4271 4271 ASSERT(pptr != NULL);
4272 4272 pptr->fcip_pkt_next =
4273 4273 fcipd_pkt->fcip_pkt_next;
4274 4274 }
4275 4275 if (fcipd_pkt->fcip_pkt_next) {
4276 4276 pptr = fcipd_pkt->fcip_pkt_next;
4277 4277 pptr->fcip_pkt_prev =
4278 4278 fcipd_pkt->fcip_pkt_prev;
4279 4279 }
4280 4280 fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_LIST;
4281 4281 break;
4282 4282 }
4283 4283 fcipd_pkt = fcipd_pkt->fcip_pkt_next;
4284 4284 }
4285 4285 } else {
4286 4286 if (fcip_pkt->fcip_pkt_prev == NULL) {
4287 4287 ASSERT(fdestp->fcipd_head == fcip_pkt);
4288 4288 fdestp->fcipd_head = fcip_pkt->fcip_pkt_next;
4289 4289 } else {
4290 4290 fcip_pkt->fcip_pkt_prev->fcip_pkt_next =
4291 4291 fcip_pkt->fcip_pkt_next;
4292 4292 }
4293 4293
4294 4294 if (fcip_pkt->fcip_pkt_next) {
4295 4295 fcip_pkt->fcip_pkt_next->fcip_pkt_prev =
4296 4296 fcip_pkt->fcip_pkt_prev;
4297 4297 }
4298 4298
4299 4299 fcipd_pkt = fcip_pkt;
4300 4300 fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_LIST;
4301 4301 }
4302 4302
4303 4303 return (fcipd_pkt == fcip_pkt);
4304 4304 }
4305 4305
4306 4306 /*
4307 4307 * The transport routine - this is the routine that actually calls
4308 4308 * into the FCA driver (through the transport ofcourse) to transmit a
4309 4309 * datagram on the fibre. The dest struct assoicated with the port to
4310 4310 * which the data is intended is already bound to the packet, this routine
4311 4311 * only takes care of marking the packet a broadcast packet if it is
4312 4312 * intended to be a broadcast request. This permits the transport to send
4313 4313 * the packet down on the wire even if it doesn't have an entry for the
4314 4314 * D_ID in its d_id hash tables.
4315 4315 */
4316 4316 static int
4317 4317 fcip_transport(fcip_pkt_t *fcip_pkt)
4318 4318 {
4319 4319 struct fcip *fptr;
4320 4320 fc_packet_t *fc_pkt;
4321 4321 fcip_port_info_t *fport;
4322 4322 struct fcip_dest *fdestp;
4323 4323 uint32_t did;
4324 4324 int rval = FC_FAILURE;
4325 4325 struct fcip_routing_table *frp = NULL;
4326 4326
4327 4327 FCIP_TNF_PROBE_1((fcip_transport, "fcip io", /* CSTYLED */,
4328 4328 tnf_string, msg, "enter"));
4329 4329
4330 4330 fptr = fcip_pkt->fcip_pkt_fptr;
4331 4331 fport = fptr->fcip_port_info;
4332 4332 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
4333 4333 fdestp = fcip_pkt->fcip_pkt_dest;
4334 4334 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN, "fcip_transport called"));
4335 4335
4336 4336 did = fptr->fcip_broadcast_did;
4337 4337 if (fc_pkt->pkt_cmd_fhdr.d_id == did &&
4338 4338 fc_pkt->pkt_tran_type != FC_PKT_BROADCAST) {
4339 4339 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
4340 4340 (CE_NOTE, "trantype set to BROADCAST"));
4341 4341 fc_pkt->pkt_tran_type = FC_PKT_BROADCAST;
4342 4342 }
4343 4343
4344 4344 mutex_enter(&fptr->fcip_mutex);
4345 4345 if ((fc_pkt->pkt_tran_type != FC_PKT_BROADCAST) &&
4346 4346 (fc_pkt->pkt_pd == NULL)) {
4347 4347 mutex_exit(&fptr->fcip_mutex);
4348 4348 FCIP_TNF_PROBE_1((fcip_transport, "fcip io", /* CSTYLED */,
4349 4349 tnf_string, msg, "fcip transport no pd"));
4350 4350 return (rval);
4351 4351 } else if (fptr->fcip_port_state == FCIP_PORT_OFFLINE) {
4352 4352 mutex_exit(&fptr->fcip_mutex);
4353 4353 FCIP_TNF_PROBE_1((fcip_transport, "fcip io", /* CSTYLED */,
4354 4354 tnf_string, msg, "fcip transport port offline"));
4355 4355 return (FC_TRAN_BUSY);
4356 4356 }
4357 4357 mutex_exit(&fptr->fcip_mutex);
4358 4358
4359 4359 if (fdestp) {
4360 4360 struct fcip_routing_table *frp;
4361 4361
4362 4362 frp = fdestp->fcipd_rtable;
4363 4363 mutex_enter(&fptr->fcip_rt_mutex);
4364 4364 mutex_enter(&fdestp->fcipd_mutex);
4365 4365 if (fc_pkt->pkt_pd != NULL) {
4366 4366 if ((frp == NULL) ||
4367 4367 (frp && FCIP_RTE_UNAVAIL(frp->fcipr_state))) {
4368 4368 mutex_exit(&fdestp->fcipd_mutex);
4369 4369 mutex_exit(&fptr->fcip_rt_mutex);
4370 4370 if (frp &&
4371 4371 (frp->fcipr_state == FCIP_RT_INVALID)) {
4372 4372 FCIP_TNF_PROBE_1((fcip_transport,
4373 4373 "fcip io", /* CSTYLED */,
4374 4374 tnf_string, msg,
4375 4375 "fcip transport - TRANBUSY"));
4376 4376 return (FC_TRAN_BUSY);
4377 4377 } else {
4378 4378 FCIP_TNF_PROBE_1((fcip_transport,
4379 4379 "fcip io", /* CSTYLED */,
4380 4380 tnf_string, msg,
4381 4381 "fcip transport: frp unavailable"));
4382 4382 return (rval);
4383 4383 }
4384 4384 }
4385 4385 }
4386 4386 mutex_exit(&fdestp->fcipd_mutex);
4387 4387 mutex_exit(&fptr->fcip_rt_mutex);
4388 4388 ASSERT(fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_LIST);
4389 4389 }
4390 4390
4391 4391 /* Explicitly invalidate this field till fcip decides to use it */
4392 4392 fc_pkt->pkt_ulp_rscn_infop = NULL;
4393 4393
4394 4394 rval = fc_ulp_transport(fport->fcipp_handle, fc_pkt);
4395 4395 if (rval == FC_STATEC_BUSY || rval == FC_OFFLINE) {
4396 4396 /*
4397 4397 * Need to queue up the command for retry
4398 4398 */
4399 4399 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
4400 4400 (CE_WARN, "ulp_transport failed: 0x%x", rval));
4401 4401 } else if (rval == FC_LOGINREQ && (frp != NULL)) {
4402 4402 (void) fcip_do_plogi(fptr, frp);
4403 4403 } else if (rval == FC_BADPACKET && (frp != NULL)) {
4404 4404 /*
4405 4405 * There is a distinct possiblity in our scheme of things
4406 4406 * that we have a routing table entry with a NULL pd struct.
4407 4407 * Mark the routing table entry for removal if it is not a
4408 4408 * broadcast entry
4409 4409 */
4410 4410 if ((frp->fcipr_d_id.port_id != 0x0) &&
4411 4411 (frp->fcipr_d_id.port_id != 0xffffff)) {
4412 4412 mutex_enter(&fptr->fcip_rt_mutex);
4413 4413 frp->fcipr_pd = NULL;
4414 4414 frp->fcipr_state = PORT_DEVICE_INVALID;
4415 4415 mutex_exit(&fptr->fcip_rt_mutex);
4416 4416 }
4417 4417 }
4418 4418
4419 4419 FCIP_TNF_PROBE_1((fcip_transport, "fcip io", /* CSTYLED */,
4420 4420 tnf_string, msg, "fcip transport done"));
4421 4421 return (rval);
4422 4422 }
4423 4423
4424 4424 /*
4425 4425 * Call back routine. Called by the FCA/transport when the messages
4426 4426 * has been put onto the wire towards its intended destination. We can
4427 4427 * now free the fc_packet associated with the message
4428 4428 */
4429 4429 static void
4430 4430 fcip_pkt_callback(fc_packet_t *fc_pkt)
4431 4431 {
4432 4432 int rval;
4433 4433 fcip_pkt_t *fcip_pkt;
4434 4434 struct fcip_dest *fdestp;
4435 4435
4436 4436 fcip_pkt = (fcip_pkt_t *)fc_pkt->pkt_ulp_private;
4437 4437 fdestp = fcip_pkt->fcip_pkt_dest;
4438 4438
4439 4439 /*
4440 4440 * take the lock early so that we don't have a race condition
4441 4441 * with fcip_timeout
4442 4442 *
4443 4443 * fdestp->fcipd_mutex isn't really intended to lock per
4444 4444 * packet struct - see bug 5105592 for permanent solution
4445 4445 */
4446 4446 mutex_enter(&fdestp->fcipd_mutex);
4447 4447
4448 4448 fcip_pkt->fcip_pkt_flags |= FCIP_PKT_RETURNED;
4449 4449 fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_ABORT;
4450 4450 if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_TIMEOUT) {
4451 4451 mutex_exit(&fdestp->fcipd_mutex);
4452 4452 return;
4453 4453 }
4454 4454
4455 4455 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "pkt callback"));
4456 4456
4457 4457 ASSERT(fdestp->fcipd_rtable != NULL);
4458 4458 ASSERT(fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_LIST);
4459 4459 rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt);
4460 4460 fdestp->fcipd_ncmds--;
4461 4461 mutex_exit(&fdestp->fcipd_mutex);
4462 4462
4463 4463 if (rval) {
4464 4464 fcip_pkt_free(fcip_pkt, 1);
4465 4465 }
4466 4466
4467 4467 FCIP_TNF_PROBE_1((fcip_pkt_callback, "fcip io", /* CSTYLED */,
4468 4468 tnf_string, msg, "pkt callback done"));
4469 4469 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "pkt callback done"));
4470 4470 }
4471 4471
4472 4472 /*
4473 4473 * Return 1 if the topology is supported, else return 0.
4474 4474 * Topology support is consistent with what the whole
4475 4475 * stack supports together.
4476 4476 */
4477 4477 static int
4478 4478 fcip_is_supported_fc_topology(int fc_topology)
4479 4479 {
4480 4480 switch (fc_topology) {
4481 4481
4482 4482 case FC_TOP_PRIVATE_LOOP :
4483 4483 case FC_TOP_PUBLIC_LOOP :
4484 4484 case FC_TOP_FABRIC :
4485 4485 case FC_TOP_NO_NS :
4486 4486 return (1);
4487 4487 default :
4488 4488 return (0);
4489 4489 }
4490 4490 }
4491 4491
4492 4492 /*
4493 4493 * handle any topology specific initializations here
4494 4494 * this routine must be called while holding fcip_mutex
4495 4495 */
4496 4496 /* ARGSUSED */
4497 4497 static void
4498 4498 fcip_handle_topology(struct fcip *fptr)
4499 4499 {
4500 4500
4501 4501 fcip_port_info_t *fport = fptr->fcip_port_info;
4502 4502
4503 4503 ASSERT(mutex_owned(&fptr->fcip_mutex));
4504 4504
4505 4505 /*
4506 4506 * Since we know the port's topology - handle topology
4507 4507 * specific details here. In Point to Point and Private Loop
4508 4508 * topologies - we would probably not have a name server
4509 4509 */
4510 4510
4511 4511 FCIP_TNF_PROBE_3((fcip_handle_topology, "fcip io", /* CSTYLED */,
4512 4512 tnf_string, msg, "enter",
4513 4513 tnf_uint, port_state, fport->fcipp_pstate,
4514 4514 tnf_uint, topology, fport->fcipp_topology));
4515 4515 FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "port state: %x, topology %x",
4516 4516 fport->fcipp_pstate, fport->fcipp_topology));
4517 4517
4518 4518 fptr->fcip_broadcast_did = fcip_get_broadcast_did(fptr);
4519 4519 mutex_exit(&fptr->fcip_mutex);
4520 4520 (void) fcip_dest_add_broadcast_entry(fptr, 0);
4521 4521 mutex_enter(&fptr->fcip_mutex);
4522 4522
4523 4523 if (!fcip_is_supported_fc_topology(fport->fcipp_topology)) {
4524 4524 FCIP_DEBUG(FCIP_DEBUG_INIT,
4525 4525 (CE_WARN, "fcip(0x%x): Unsupported port topology (0x%x)",
4526 4526 fptr->fcip_instance, fport->fcipp_topology));
4527 4527 return;
4528 4528 }
4529 4529
4530 4530 switch (fport->fcipp_topology) {
4531 4531 case FC_TOP_PRIVATE_LOOP: {
4532 4532
4533 4533 fc_portmap_t *port_map;
4534 4534 uint32_t listlen, alloclen;
4535 4535 /*
4536 4536 * we may have to maintain routing. Get a list of
4537 4537 * all devices on this port that the transport layer is
4538 4538 * aware of. Check if any of them is a IS8802 type port,
4539 4539 * if yes get its WWN and DID mapping and cache it in
4540 4540 * the purport routing table. Since there is no
4541 4541 * State Change notification for private loop/point_point
4542 4542 * topologies - this table may not be accurate. The static
4543 4543 * routing table is updated on a state change callback.
4544 4544 */
4545 4545 FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN, "port state valid!!"));
4546 4546 fptr->fcip_port_state = FCIP_PORT_ONLINE;
4547 4547 listlen = alloclen = FCIP_MAX_PORTS;
4548 4548 port_map = (fc_portmap_t *)
4549 4549 kmem_zalloc((FCIP_MAX_PORTS * sizeof (fc_portmap_t)),
4550 4550 KM_SLEEP);
4551 4551 if (fc_ulp_getportmap(fport->fcipp_handle, &port_map,
4552 4552 &listlen, FC_ULP_PLOGI_PRESERVE) == FC_SUCCESS) {
4553 4553 mutex_exit(&fptr->fcip_mutex);
4554 4554 fcip_rt_update(fptr, port_map, listlen);
4555 4555 mutex_enter(&fptr->fcip_mutex);
4556 4556 }
4557 4557 if (listlen > alloclen) {
4558 4558 alloclen = listlen;
4559 4559 }
4560 4560 kmem_free(port_map, (alloclen * sizeof (fc_portmap_t)));
4561 4561 /*
4562 4562 * Now fall through and register with the transport
4563 4563 * that this port is IP capable
4564 4564 */
4565 4565 }
4566 4566 /* FALLTHROUGH */
4567 4567 case FC_TOP_NO_NS:
4568 4568 /*
4569 4569 * If we don't have a nameserver, lets wait until we
4570 4570 * have to send out a packet to a remote port and then
4571 4571 * try and discover the port using ARP/FARP.
4572 4572 */
4573 4573 /* FALLTHROUGH */
4574 4574 case FC_TOP_PUBLIC_LOOP:
4575 4575 case FC_TOP_FABRIC: {
4576 4576 fc_portmap_t *port_map;
4577 4577 uint32_t listlen, alloclen;
4578 4578
4579 4579 /* FC_TYPE of 0x05 goes to word 0, LSB */
4580 4580 fptr->fcip_port_state = FCIP_PORT_ONLINE;
4581 4581
4582 4582 if (!(fptr->fcip_flags & FCIP_REG_INPROGRESS)) {
4583 4583 fptr->fcip_flags |= FCIP_REG_INPROGRESS;
4584 4584 if (taskq_dispatch(fptr->fcip_tq, fcip_port_ns,
4585 4585 fptr, KM_NOSLEEP) == 0) {
4586 4586 fptr->fcip_flags &= ~FCIP_REG_INPROGRESS;
4587 4587 }
4588 4588 }
4589 4589
4590 4590 /*
4591 4591 * If fcip_create_nodes_on_demand is overridden to force
4592 4592 * discovery of all nodes in Fabric/Public loop topologies
4593 4593 * we need to query for and obtain all nodes and log into
4594 4594 * them as with private loop devices
4595 4595 */
4596 4596 if (!fcip_create_nodes_on_demand) {
4597 4597 fptr->fcip_port_state = FCIP_PORT_ONLINE;
4598 4598 listlen = alloclen = FCIP_MAX_PORTS;
4599 4599 port_map = (fc_portmap_t *)
4600 4600 kmem_zalloc((FCIP_MAX_PORTS *
4601 4601 sizeof (fc_portmap_t)), KM_SLEEP);
4602 4602 if (fc_ulp_getportmap(fport->fcipp_handle, &port_map,
4603 4603 &listlen, FC_ULP_PLOGI_PRESERVE) == FC_SUCCESS) {
4604 4604 mutex_exit(&fptr->fcip_mutex);
4605 4605 fcip_rt_update(fptr, port_map, listlen);
4606 4606 mutex_enter(&fptr->fcip_mutex);
4607 4607 }
4608 4608 if (listlen > alloclen) {
4609 4609 alloclen = listlen;
4610 4610 }
4611 4611 kmem_free(port_map,
4612 4612 (alloclen * sizeof (fc_portmap_t)));
4613 4613 }
4614 4614 break;
4615 4615 }
4616 4616
4617 4617 default:
4618 4618 break;
4619 4619 }
4620 4620 }
4621 4621
4622 4622 static void
4623 4623 fcip_port_ns(void *arg)
4624 4624 {
4625 4625 struct fcip *fptr = (struct fcip *)arg;
4626 4626 fcip_port_info_t *fport = fptr->fcip_port_info;
4627 4627 fc_ns_cmd_t ns_cmd;
4628 4628 uint32_t types[8];
4629 4629 ns_rfc_type_t rfc;
4630 4630
4631 4631 mutex_enter(&fptr->fcip_mutex);
4632 4632 if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) ||
4633 4633 (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) {
4634 4634 fptr->fcip_flags &= ~FCIP_REG_INPROGRESS;
4635 4635 mutex_exit(&fptr->fcip_mutex);
4636 4636 return;
4637 4637 }
4638 4638 mutex_exit(&fptr->fcip_mutex);
4639 4639
4640 4640 /*
4641 4641 * Prepare the Name server structure to
4642 4642 * register with the transport in case of
4643 4643 * Fabric configuration.
4644 4644 */
4645 4645 bzero(&rfc, sizeof (rfc));
4646 4646 bzero(types, sizeof (types));
4647 4647
4648 4648 types[FC4_TYPE_WORD_POS(FC_TYPE_IS8802_SNAP)] = (1 <<
4649 4649 FC4_TYPE_BIT_POS(FC_TYPE_IS8802_SNAP));
4650 4650
4651 4651 rfc.rfc_port_id.port_id = fport->fcipp_sid.port_id;
4652 4652 bcopy(types, rfc.rfc_types, sizeof (types));
4653 4653
4654 4654 ns_cmd.ns_flags = 0;
4655 4655 ns_cmd.ns_cmd = NS_RFT_ID;
4656 4656 ns_cmd.ns_req_len = sizeof (rfc);
4657 4657 ns_cmd.ns_req_payload = (caddr_t)&rfc;
4658 4658 ns_cmd.ns_resp_len = 0;
4659 4659 ns_cmd.ns_resp_payload = NULL;
4660 4660
4661 4661 /*
4662 4662 * Perform the Name Server Registration for FC IS8802_SNAP Type.
4663 4663 * We don't expect a reply for registering port type
4664 4664 */
4665 4665 (void) fc_ulp_port_ns(fptr->fcip_port_info->fcipp_handle,
4666 4666 (opaque_t)0, &ns_cmd);
4667 4667
4668 4668 mutex_enter(&fptr->fcip_mutex);
4669 4669 fptr->fcip_flags &= ~FCIP_REG_INPROGRESS;
4670 4670 mutex_exit(&fptr->fcip_mutex);
4671 4671 }
4672 4672
4673 4673 /*
4674 4674 * setup this instance of fcip. This routine inits kstats, allocates
4675 4675 * unsolicited buffers, determines' this port's siblings and handles
4676 4676 * topology specific details which includes registering with the name
4677 4677 * server and also setting up the routing table for this port for
4678 4678 * private loops and point to point topologies
4679 4679 */
4680 4680 static int
4681 4681 fcip_init_port(struct fcip *fptr)
4682 4682 {
4683 4683 int rval = FC_SUCCESS;
4684 4684 fcip_port_info_t *fport = fptr->fcip_port_info;
4685 4685 static char buf[64];
4686 4686 size_t tok_buf_size;
4687 4687
4688 4688 ASSERT(fport != NULL);
4689 4689
4690 4690 FCIP_TNF_PROBE_1((fcip_init_port, "fcip io", /* CSTYLED */,
4691 4691 tnf_string, msg, "enter"));
4692 4692 mutex_enter(&fptr->fcip_mutex);
4693 4693
4694 4694 /*
4695 4695 * setup mac address for this port. Don't be too worried if
4696 4696 * the WWN is zero, there is probably nothing attached to
4697 4697 * to the port. There is no point allocating unsolicited buffers
4698 4698 * for an unused port so return success if we don't have a MAC
4699 4699 * address. Do the port init on a state change notification.
4700 4700 */
4701 4701 if (fcip_setup_mac_addr(fptr) == FCIP_INVALID_WWN) {
4702 4702 fptr->fcip_port_state = FCIP_PORT_OFFLINE;
4703 4703 rval = FC_SUCCESS;
4704 4704 goto done;
4705 4705 }
4706 4706
4707 4707 /*
4708 4708 * clear routing table hash list for this port
4709 4709 */
4710 4710 fcip_rt_flush(fptr);
4711 4711
4712 4712 /*
4713 4713 * init kstats for this instance
4714 4714 */
4715 4715 fcip_kstat_init(fptr);
4716 4716
4717 4717 /*
4718 4718 * Allocate unsolicited buffers
4719 4719 */
4720 4720 fptr->fcip_ub_nbufs = fcip_ub_nbufs;
4721 4721 tok_buf_size = sizeof (*fptr->fcip_ub_tokens) * fcip_ub_nbufs;
4722 4722
4723 4723 FCIP_TNF_PROBE_2((fcip_init_port, "fcip io", /* CSTYLED */,
4724 4724 tnf_string, msg, "debug",
4725 4725 tnf_int, tokBufsize, tok_buf_size));
4726 4726
4727 4727 FCIP_DEBUG(FCIP_DEBUG_INIT,
4728 4728 (CE_WARN, "tokBufsize: 0x%lx", tok_buf_size));
4729 4729
4730 4730 fptr->fcip_ub_tokens = kmem_zalloc(tok_buf_size, KM_SLEEP);
4731 4731
4732 4732 if (fptr->fcip_ub_tokens == NULL) {
4733 4733 rval = FC_FAILURE;
4734 4734 FCIP_DEBUG(FCIP_DEBUG_INIT,
4735 4735 (CE_WARN, "fcip(%d): failed to allocate unsol buf",
4736 4736 fptr->fcip_instance));
4737 4737 goto done;
4738 4738 }
4739 4739 rval = fc_ulp_uballoc(fport->fcipp_handle, &fptr->fcip_ub_nbufs,
4740 4740 fcip_ub_size, FC_TYPE_IS8802_SNAP, fptr->fcip_ub_tokens);
4741 4741
4742 4742 if (rval != FC_SUCCESS) {
4743 4743 FCIP_DEBUG(FCIP_DEBUG_INIT,
4744 4744 (CE_WARN, "fcip(%d): fc_ulp_uballoc failed with 0x%x!!",
4745 4745 fptr->fcip_instance, rval));
4746 4746 }
4747 4747
4748 4748 switch (rval) {
4749 4749 case FC_SUCCESS:
4750 4750 break;
4751 4751
4752 4752 case FC_OFFLINE:
4753 4753 fptr->fcip_port_state = FCIP_PORT_OFFLINE;
4754 4754 rval = FC_FAILURE;
4755 4755 goto done;
4756 4756
4757 4757 case FC_UB_ERROR:
4758 4758 FCIP_TNF_PROBE_1((fcip_init_port, "fcip io", /* CSTYLED */,
4759 4759 tnf_string, msg, "invalid ub alloc request"));
4760 4760 FCIP_DEBUG(FCIP_DEBUG_INIT,
4761 4761 (CE_WARN, "invalid ub alloc request !!"));
4762 4762 rval = FC_FAILURE;
4763 4763 goto done;
4764 4764
4765 4765 case FC_FAILURE:
4766 4766 /*
4767 4767 * requested bytes could not be alloced
4768 4768 */
4769 4769 if (fptr->fcip_ub_nbufs != fcip_ub_nbufs) {
4770 4770 cmn_err(CE_WARN,
4771 4771 "!fcip(0x%x): Failed to alloc unsolicited bufs",
4772 4772 ddi_get_instance(fport->fcipp_dip));
4773 4773 rval = FC_FAILURE;
4774 4774 goto done;
4775 4775 }
4776 4776 break;
4777 4777
4778 4778 default:
4779 4779 rval = FC_FAILURE;
4780 4780 break;
4781 4781 }
4782 4782
4783 4783 /*
4784 4784 * Preallocate a Cache of fcip packets for transmit and receive
4785 4785 * We don't want to be holding on to unsolicited buffers while
4786 4786 * we transmit the message upstream
4787 4787 */
4788 4788 FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "allocating fcip_pkt cache"));
4789 4789
4790 4790 (void) sprintf(buf, "fcip%d_cache", fptr->fcip_instance);
4791 4791 fptr->fcip_xmit_cache = kmem_cache_create(buf,
4792 4792 (fport->fcipp_fca_pkt_size + sizeof (fcip_pkt_t)),
4793 4793 8, fcip_cache_constructor, fcip_cache_destructor,
4794 4794 NULL, (void *)fport, NULL, 0);
4795 4795
4796 4796 (void) sprintf(buf, "fcip%d_sendup_cache", fptr->fcip_instance);
4797 4797 fptr->fcip_sendup_cache = kmem_cache_create(buf,
4798 4798 sizeof (struct fcip_sendup_elem),
4799 4799 8, fcip_sendup_constructor, NULL, NULL, (void *)fport, NULL, 0);
4800 4800
4801 4801 if (fptr->fcip_xmit_cache == NULL) {
4802 4802 FCIP_TNF_PROBE_2((fcip_init_port, "fcip io", /* CSTYLED */,
4803 4803 tnf_string, msg, "unable to allocate xmit cache",
4804 4804 tnf_int, instance, fptr->fcip_instance));
4805 4805 FCIP_DEBUG(FCIP_DEBUG_INIT,
4806 4806 (CE_WARN, "fcip%d unable to allocate xmit cache",
4807 4807 fptr->fcip_instance));
4808 4808 rval = FC_FAILURE;
4809 4809 goto done;
4810 4810 }
4811 4811
4812 4812 /*
4813 4813 * We may need to handle routing tables for point to point and
4814 4814 * fcal topologies and register with NameServer for Fabric
4815 4815 * topologies.
4816 4816 */
4817 4817 fcip_handle_topology(fptr);
4818 4818 mutex_exit(&fptr->fcip_mutex);
4819 4819 if (fcip_dest_add_broadcast_entry(fptr, 1) != FC_SUCCESS) {
4820 4820 FCIP_DEBUG(FCIP_DEBUG_INIT,
4821 4821 (CE_WARN, "fcip(0x%x):add broadcast entry failed!!",
4822 4822 fptr->fcip_instance));
4823 4823 mutex_enter(&fptr->fcip_mutex);
4824 4824 rval = FC_FAILURE;
4825 4825 goto done;
4826 4826 }
4827 4827
4828 4828 rval = FC_SUCCESS;
4829 4829 return (rval);
4830 4830
4831 4831 done:
4832 4832 /*
4833 4833 * we don't always come here from port_attach - so cleanup
4834 4834 * anything done in the init_port routine
4835 4835 */
4836 4836 if (fptr->fcip_kstatp) {
4837 4837 kstat_delete(fptr->fcip_kstatp);
4838 4838 fptr->fcip_kstatp = NULL;
4839 4839 }
4840 4840
4841 4841 if (fptr->fcip_xmit_cache) {
4842 4842 kmem_cache_destroy(fptr->fcip_xmit_cache);
4843 4843 fptr->fcip_xmit_cache = NULL;
4844 4844 }
4845 4845
4846 4846 if (fptr->fcip_sendup_cache) {
4847 4847 kmem_cache_destroy(fptr->fcip_sendup_cache);
4848 4848 fptr->fcip_sendup_cache = NULL;
4849 4849 }
4850 4850
4851 4851 /* release unsolicited buffers */
4852 4852 if (fptr->fcip_ub_tokens) {
4853 4853 uint64_t *tokens = fptr->fcip_ub_tokens;
4854 4854 fptr->fcip_ub_tokens = NULL;
4855 4855
4856 4856 mutex_exit(&fptr->fcip_mutex);
4857 4857 (void) fc_ulp_ubfree(fport->fcipp_handle, fptr->fcip_ub_nbufs,
4858 4858 tokens);
4859 4859 kmem_free(tokens, tok_buf_size);
4860 4860
4861 4861 } else {
4862 4862 mutex_exit(&fptr->fcip_mutex);
4863 4863 }
4864 4864
4865 4865 return (rval);
4866 4866 }
4867 4867
4868 4868 /*
4869 4869 * Sets up a port's MAC address from its WWN
4870 4870 */
4871 4871 static int
4872 4872 fcip_setup_mac_addr(struct fcip *fptr)
4873 4873 {
4874 4874 fcip_port_info_t *fport = fptr->fcip_port_info;
4875 4875
4876 4876 ASSERT(mutex_owned(&fptr->fcip_mutex));
4877 4877
4878 4878 fptr->fcip_addrflags = 0;
4879 4879
4880 4880 /*
4881 4881 * we cannot choose a MAC address for our interface - we have
4882 4882 * to live with whatever node WWN we get (minus the top two
4883 4883 * MSbytes for the MAC address) from the transport layer. We will
4884 4884 * treat the WWN as our factory MAC address.
4885 4885 */
4886 4886
4887 4887 if ((fport->fcipp_nwwn.w.wwn_hi != 0) ||
4888 4888 (fport->fcipp_nwwn.w.wwn_lo != 0)) {
4889 4889 char etherstr[ETHERSTRL];
4890 4890
4891 4891 wwn_to_ether(&fport->fcipp_nwwn, &fptr->fcip_macaddr);
4892 4892 fcip_ether_to_str(&fptr->fcip_macaddr, etherstr);
4893 4893 FCIP_DEBUG(FCIP_DEBUG_INIT,
4894 4894 (CE_NOTE, "setupmacaddr ouraddr %s", etherstr));
4895 4895
4896 4896 fptr->fcip_addrflags = (FCIP_FACTADDR_PRESENT |
4897 4897 FCIP_FACTADDR_USE);
4898 4898 } else {
4899 4899 /*
4900 4900 * No WWN - just return failure - there's not much
4901 4901 * we can do since we cannot set the WWN.
4902 4902 */
4903 4903 FCIP_DEBUG(FCIP_DEBUG_INIT,
4904 4904 (CE_WARN, "Port does not have a valid WWN"));
4905 4905 return (FCIP_INVALID_WWN);
4906 4906 }
4907 4907 return (FC_SUCCESS);
4908 4908 }
4909 4909
4910 4910
4911 4911 /*
4912 4912 * flush routing table entries
4913 4913 */
4914 4914 static void
4915 4915 fcip_rt_flush(struct fcip *fptr)
4916 4916 {
4917 4917 int index;
4918 4918
4919 4919 mutex_enter(&fptr->fcip_rt_mutex);
4920 4920 for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
4921 4921 struct fcip_routing_table *frtp, *frtp_next;
4922 4922 frtp = fptr->fcip_rtable[index];
4923 4923 while (frtp) {
4924 4924 frtp_next = frtp->fcipr_next;
4925 4925 kmem_free(frtp, sizeof (struct fcip_routing_table));
4926 4926 frtp = frtp_next;
4927 4927 }
4928 4928 fptr->fcip_rtable[index] = NULL;
4929 4929 }
4930 4930 mutex_exit(&fptr->fcip_rt_mutex);
4931 4931 }
4932 4932
4933 4933 /*
4934 4934 * Free up the fcip softstate and all allocated resources for the
4935 4935 * fcip instance assoicated with a given port driver instance
4936 4936 *
4937 4937 * Given that the list of structures pointed to by fcip_port_head,
4938 4938 * this function is called from multiple sources, and the
4939 4939 * fcip_global_mutex that protects fcip_port_head must be dropped,
4940 4940 * our best solution is to return a value that indicates the next
4941 4941 * port in the list. This way the caller doesn't need to worry
4942 4942 * about the race condition where he saves off a pointer to the
4943 4943 * next structure in the list and by the time this routine returns,
4944 4944 * that next structure has already been freed.
4945 4945 */
4946 4946 static fcip_port_info_t *
4947 4947 fcip_softstate_free(fcip_port_info_t *fport)
4948 4948 {
4949 4949 struct fcip *fptr = NULL;
4950 4950 int instance;
4951 4951 timeout_id_t tid;
4952 4952 opaque_t phandle = NULL;
4953 4953 fcip_port_info_t *prev_fport, *cur_fport, *next_fport = NULL;
4954 4954
4955 4955 ASSERT(MUTEX_HELD(&fcip_global_mutex));
4956 4956
4957 4957 if (fport) {
4958 4958 phandle = fport->fcipp_handle;
4959 4959 fptr = fport->fcipp_fcip;
4960 4960 } else {
4961 4961 return (next_fport);
4962 4962 }
4963 4963
4964 4964 if (fptr) {
4965 4965 mutex_enter(&fptr->fcip_mutex);
4966 4966 instance = ddi_get_instance(fptr->fcip_dip);
4967 4967
4968 4968 /*
4969 4969 * dismantle timeout thread for this instance of fcip
4970 4970 */
4971 4971 tid = fptr->fcip_timeout_id;
4972 4972 fptr->fcip_timeout_id = NULL;
4973 4973
4974 4974 mutex_exit(&fptr->fcip_mutex);
4975 4975 (void) untimeout(tid);
4976 4976 mutex_enter(&fptr->fcip_mutex);
4977 4977
4978 4978 ASSERT(fcip_num_instances >= 0);
4979 4979 fcip_num_instances--;
4980 4980
4981 4981 /*
4982 4982 * stop sendup thread
4983 4983 */
4984 4984 mutex_enter(&fptr->fcip_sendup_mutex);
4985 4985 if (fptr->fcip_sendup_thr_initted) {
4986 4986 fptr->fcip_sendup_thr_initted = 0;
4987 4987 cv_signal(&fptr->fcip_sendup_cv);
4988 4988 cv_wait(&fptr->fcip_sendup_cv,
4989 4989 &fptr->fcip_sendup_mutex);
4990 4990 }
4991 4991 ASSERT(fptr->fcip_sendup_head == NULL);
4992 4992 fptr->fcip_sendup_head = fptr->fcip_sendup_tail = NULL;
4993 4993 mutex_exit(&fptr->fcip_sendup_mutex);
4994 4994
4995 4995 /*
4996 4996 * dismantle taskq
4997 4997 */
4998 4998 if (fptr->fcip_tq) {
4999 4999 taskq_t *tq = fptr->fcip_tq;
5000 5000
5001 5001 fptr->fcip_tq = NULL;
5002 5002
5003 5003 mutex_exit(&fptr->fcip_mutex);
5004 5004 taskq_destroy(tq);
5005 5005 mutex_enter(&fptr->fcip_mutex);
5006 5006 }
5007 5007
5008 5008 if (fptr->fcip_kstatp) {
5009 5009 kstat_delete(fptr->fcip_kstatp);
5010 5010 fptr->fcip_kstatp = NULL;
5011 5011 }
5012 5012
5013 5013 /* flush the routing table entries */
5014 5014 fcip_rt_flush(fptr);
5015 5015
5016 5016 if (fptr->fcip_xmit_cache) {
5017 5017 kmem_cache_destroy(fptr->fcip_xmit_cache);
5018 5018 fptr->fcip_xmit_cache = NULL;
5019 5019 }
5020 5020
5021 5021 if (fptr->fcip_sendup_cache) {
5022 5022 kmem_cache_destroy(fptr->fcip_sendup_cache);
5023 5023 fptr->fcip_sendup_cache = NULL;
5024 5024 }
5025 5025
5026 5026 fcip_cleanup_dest(fptr);
5027 5027
5028 5028 /* release unsolicited buffers */
5029 5029 if (fptr->fcip_ub_tokens) {
5030 5030 uint64_t *tokens = fptr->fcip_ub_tokens;
5031 5031
5032 5032 fptr->fcip_ub_tokens = NULL;
5033 5033 mutex_exit(&fptr->fcip_mutex);
5034 5034 if (phandle) {
5035 5035 /*
5036 5036 * release the global mutex here to
5037 5037 * permit any data pending callbacks to
5038 5038 * complete. Else we will deadlock in the
5039 5039 * FCA waiting for all unsol buffers to be
5040 5040 * returned.
5041 5041 */
5042 5042 mutex_exit(&fcip_global_mutex);
5043 5043 (void) fc_ulp_ubfree(phandle,
5044 5044 fptr->fcip_ub_nbufs, tokens);
5045 5045 mutex_enter(&fcip_global_mutex);
5046 5046 }
5047 5047 kmem_free(tokens, (sizeof (*tokens) * fcip_ub_nbufs));
5048 5048 } else {
5049 5049 mutex_exit(&fptr->fcip_mutex);
5050 5050 }
5051 5051
5052 5052 mutex_destroy(&fptr->fcip_mutex);
5053 5053 mutex_destroy(&fptr->fcip_ub_mutex);
5054 5054 mutex_destroy(&fptr->fcip_rt_mutex);
5055 5055 mutex_destroy(&fptr->fcip_dest_mutex);
5056 5056 mutex_destroy(&fptr->fcip_sendup_mutex);
5057 5057 cv_destroy(&fptr->fcip_farp_cv);
5058 5058 cv_destroy(&fptr->fcip_sendup_cv);
5059 5059 cv_destroy(&fptr->fcip_ub_cv);
5060 5060
5061 5061 ddi_soft_state_free(fcip_softp, instance);
5062 5062 }
5063 5063
5064 5064 /*
5065 5065 * Now dequeue the fcip_port_info from the port list
5066 5066 */
5067 5067 cur_fport = fcip_port_head;
5068 5068 prev_fport = NULL;
5069 5069 while (cur_fport != NULL) {
5070 5070 if (cur_fport == fport) {
5071 5071 break;
5072 5072 }
5073 5073 prev_fport = cur_fport;
5074 5074 cur_fport = cur_fport->fcipp_next;
5075 5075 }
5076 5076
5077 5077 /*
5078 5078 * Assert that we found a port in our port list
5079 5079 */
5080 5080 ASSERT(cur_fport == fport);
5081 5081
5082 5082 if (prev_fport) {
5083 5083 /*
5084 5084 * Not the first port in the port list
5085 5085 */
5086 5086 prev_fport->fcipp_next = fport->fcipp_next;
5087 5087 } else {
5088 5088 /*
5089 5089 * first port
5090 5090 */
5091 5091 fcip_port_head = fport->fcipp_next;
5092 5092 }
5093 5093 next_fport = fport->fcipp_next;
5094 5094 kmem_free(fport, sizeof (fcip_port_info_t));
5095 5095
5096 5096 return (next_fport);
5097 5097 }
5098 5098
5099 5099
5100 5100 /*
5101 5101 * This is called by transport for any ioctl operations performed
5102 5102 * on the devctl or other transport minor nodes. It is currently
5103 5103 * unused for fcip
5104 5104 */
5105 5105 /* ARGSUSED */
5106 5106 static int
5107 5107 fcip_port_ioctl(opaque_t ulp_handle, opaque_t port_handle, dev_t dev,
5108 5108 int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
5109 5109 uint32_t claimed)
5110 5110 {
5111 5111 return (FC_UNCLAIMED);
5112 5112 }
5113 5113
5114 5114 /*
5115 5115 * DL_INFO_REQ - returns information about the DLPI stream to the DLS user
5116 5116 * requesting information about this interface
5117 5117 */
5118 5118 static void
5119 5119 fcip_ireq(queue_t *wq, mblk_t *mp)
5120 5120 {
5121 5121 struct fcipstr *slp;
5122 5122 struct fcip *fptr;
5123 5123 dl_info_ack_t *dlip;
5124 5124 struct fcipdladdr *dlap;
5125 5125 la_wwn_t *ep;
5126 5126 int size;
5127 5127 char etherstr[ETHERSTRL];
5128 5128
5129 5129 slp = (struct fcipstr *)wq->q_ptr;
5130 5130
5131 5131 fptr = slp->sl_fcip;
5132 5132
5133 5133 FCIP_DEBUG(FCIP_DEBUG_DLPI,
5134 5134 (CE_NOTE, "fcip_ireq: info request req rcvd"));
5135 5135
5136 5136 FCIP_TNF_PROBE_1((fcip_ireq, "fcip io", /* CSTYLED */,
5137 5137 tnf_string, msg, "fcip ireq entered"));
5138 5138
5139 5139 if (MBLKL(mp) < DL_INFO_REQ_SIZE) {
5140 5140 dlerrorack(wq, mp, DL_INFO_REQ, DL_BADPRIM, 0);
5141 5141 return;
5142 5142 }
5143 5143
5144 5144 /*
5145 5145 * Exchange current message for a DL_INFO_ACK
5146 5146 */
5147 5147 size = sizeof (dl_info_ack_t) + FCIPADDRL + ETHERADDRL;
5148 5148 if ((mp = mexchange(wq, mp, size, M_PCPROTO, DL_INFO_ACK)) == NULL) {
5149 5149 return;
5150 5150 }
5151 5151
5152 5152 /*
5153 5153 * FILL in the DL_INFO_ACK fields and reply
5154 5154 */
5155 5155 dlip = (dl_info_ack_t *)mp->b_rptr;
5156 5156 *dlip = fcip_infoack;
5157 5157 dlip->dl_current_state = slp->sl_state;
5158 5158 dlap = (struct fcipdladdr *)(mp->b_rptr + dlip->dl_addr_offset);
5159 5159 dlap->dl_sap = slp->sl_sap;
5160 5160
5161 5161
5162 5162 if (fptr) {
5163 5163 fcip_ether_to_str(&fptr->fcip_macaddr, etherstr);
5164 5164 FCIP_DEBUG(FCIP_DEBUG_DLPI,
5165 5165 (CE_NOTE, "ireq - our mac: %s", etherstr));
5166 5166 ether_bcopy(&fptr->fcip_macaddr, &dlap->dl_phys);
5167 5167 } else {
5168 5168 bzero((caddr_t)&dlap->dl_phys, ETHERADDRL);
5169 5169 }
5170 5170
5171 5171 ep = (la_wwn_t *)(mp->b_rptr + dlip->dl_brdcst_addr_offset);
5172 5172 ether_bcopy(&fcip_arpbroadcast_addr, ep);
5173 5173
5174 5174 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "sending back info req.."));
5175 5175 qreply(wq, mp);
5176 5176 }
5177 5177
5178 5178
5179 5179 /*
5180 5180 * To handle DL_UNITDATA_REQ requests.
5181 5181 */
5182 5182
5183 5183 static void
5184 5184 fcip_udreq(queue_t *wq, mblk_t *mp)
5185 5185 {
5186 5186 struct fcipstr *slp;
5187 5187 struct fcip *fptr;
5188 5188 fcip_port_info_t *fport;
5189 5189 dl_unitdata_req_t *dludp;
5190 5190 mblk_t *nmp;
5191 5191 struct fcipdladdr *dlap;
5192 5192 fcph_network_hdr_t *headerp;
5193 5193 llc_snap_hdr_t *lsnap;
5194 5194 t_uscalar_t off, len;
5195 5195 struct fcip_dest *fdestp;
5196 5196 la_wwn_t wwn;
5197 5197 int hdr_size;
5198 5198
5199 5199 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "inside fcip_udreq"));
5200 5200
5201 5201 FCIP_TNF_PROBE_1((fcip_udreq, "fcip io", /* CSTYLED */,
5202 5202 tnf_string, msg, "fcip udreq entered"));
5203 5203
5204 5204 slp = (struct fcipstr *)wq->q_ptr;
5205 5205
5206 5206 if (slp->sl_state != DL_IDLE) {
5207 5207 dlerrorack(wq, mp, DL_UNITDATA_REQ, DL_OUTSTATE, 0);
5208 5208 return;
5209 5209 }
5210 5210
5211 5211 fptr = slp->sl_fcip;
5212 5212
5213 5213 if (fptr == NULL) {
5214 5214 dlerrorack(wq, mp, DL_UNITDATA_REQ, DL_OUTSTATE, 0);
5215 5215 return;
5216 5216 }
5217 5217
5218 5218 fport = fptr->fcip_port_info;
5219 5219
5220 5220 dludp = (dl_unitdata_req_t *)mp->b_rptr;
5221 5221 off = dludp->dl_dest_addr_offset;
5222 5222 len = dludp->dl_dest_addr_length;
5223 5223
5224 5224 /*
5225 5225 * Validate destination address format
5226 5226 */
5227 5227 if (!MBLKIN(mp, off, len) || (len != FCIPADDRL)) {
5228 5228 dluderrorind(wq, mp, (mp->b_rptr + off), len, DL_BADADDR, 0);
5229 5229 return;
5230 5230 }
5231 5231
5232 5232 /*
5233 5233 * Error if no M_DATA follows
5234 5234 */
5235 5235 nmp = mp->b_cont;
5236 5236 if (nmp == NULL) {
5237 5237 dluderrorind(wq, mp, (mp->b_rptr + off), len, DL_BADDATA, 0);
5238 5238 return;
5239 5239 }
5240 5240 dlap = (struct fcipdladdr *)(mp->b_rptr + off);
5241 5241
5242 5242 /*
5243 5243 * Now get the destination structure for the remote NPORT
5244 5244 */
5245 5245 ether_to_wwn(&dlap->dl_phys, &wwn);
5246 5246 fdestp = fcip_get_dest(fptr, &wwn);
5247 5247
5248 5248 if (fdestp == NULL) {
5249 5249 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE,
5250 5250 "udreq - couldn't find dest struct for remote port");
5251 5251 dluderrorind(wq, mp, (mp->b_rptr + off), len, DL_BADDATA, 0));
5252 5252 return;
5253 5253 }
5254 5254
5255 5255 /*
5256 5256 * Network header + SAP
5257 5257 */
5258 5258 hdr_size = sizeof (fcph_network_hdr_t) + sizeof (llc_snap_hdr_t);
5259 5259
5260 5260 /* DB_REF gives the no. of msgs pointing to this block */
5261 5261 if ((DB_REF(nmp) == 1) &&
5262 5262 (MBLKHEAD(nmp) >= hdr_size) &&
5263 5263 (((uintptr_t)mp->b_rptr & 0x1) == 0)) {
5264 5264 la_wwn_t wwn;
5265 5265 nmp->b_rptr -= hdr_size;
5266 5266
5267 5267 /* first put the network header */
5268 5268 headerp = (fcph_network_hdr_t *)nmp->b_rptr;
5269 5269 if (ether_cmp(&dlap->dl_phys, &fcip_arpbroadcast_addr) == 0) {
5270 5270 ether_to_wwn(&fcipnhbroadcastaddr, &wwn);
5271 5271 } else {
5272 5272 ether_to_wwn(&dlap->dl_phys, &wwn);
5273 5273 }
5274 5274 bcopy(&wwn, &headerp->net_dest_addr, sizeof (la_wwn_t));
5275 5275 ether_to_wwn(&fptr->fcip_macaddr, &wwn);
5276 5276 bcopy(&wwn, &headerp->net_src_addr, sizeof (la_wwn_t));
5277 5277
5278 5278 /* Now the snap header */
5279 5279 lsnap = (llc_snap_hdr_t *)(nmp->b_rptr +
5280 5280 sizeof (fcph_network_hdr_t));
5281 5281 lsnap->dsap = 0xAA;
5282 5282 lsnap->ssap = 0xAA;
5283 5283 lsnap->ctrl = 0x03;
5284 5284 lsnap->oui[0] = 0x00;
5285 5285 lsnap->oui[1] = 0x00; /* 80 */
5286 5286 lsnap->oui[2] = 0x00; /* C2 */
5287 5287 lsnap->pid = BE_16((dlap->dl_sap));
5288 5288
5289 5289 freeb(mp);
5290 5290 mp = nmp;
5291 5291
5292 5292 } else {
5293 5293 la_wwn_t wwn;
5294 5294
5295 5295 DB_TYPE(mp) = M_DATA;
5296 5296 headerp = (fcph_network_hdr_t *)mp->b_rptr;
5297 5297
5298 5298 /*
5299 5299 * Only fill in the low 48bits of WWN for now - we can
5300 5300 * fill in the NAA_ID after we find the port in the
5301 5301 * routing tables
5302 5302 */
5303 5303 if (ether_cmp(&dlap->dl_phys, &fcip_arpbroadcast_addr) == 0) {
5304 5304 ether_to_wwn(&fcipnhbroadcastaddr, &wwn);
5305 5305 } else {
5306 5306 ether_to_wwn(&dlap->dl_phys, &wwn);
5307 5307 }
5308 5308 bcopy(&wwn, &headerp->net_dest_addr, sizeof (la_wwn_t));
5309 5309 /* need to send our PWWN */
5310 5310 bcopy(&fport->fcipp_pwwn, &headerp->net_src_addr,
5311 5311 sizeof (la_wwn_t));
5312 5312
5313 5313 lsnap = (llc_snap_hdr_t *)(nmp->b_rptr +
5314 5314 sizeof (fcph_network_hdr_t));
5315 5315 lsnap->dsap = 0xAA;
5316 5316 lsnap->ssap = 0xAA;
5317 5317 lsnap->ctrl = 0x03;
5318 5318 lsnap->oui[0] = 0x00;
5319 5319 lsnap->oui[1] = 0x00;
5320 5320 lsnap->oui[2] = 0x00;
5321 5321 lsnap->pid = BE_16(dlap->dl_sap);
5322 5322
5323 5323 mp->b_wptr = mp->b_rptr + hdr_size;
5324 5324 }
5325 5325
5326 5326 /*
5327 5327 * Ethernet drivers have a lot of gunk here to put the Type
5328 5328 * information (for Ethernet encapsulation (RFC 894) or the
5329 5329 * Length (for 802.2/802.3) - I guess we'll just ignore that
5330 5330 * here.
5331 5331 */
5332 5332
5333 5333 /*
5334 5334 * Start the I/O on this port. If fcip_start failed for some reason
5335 5335 * we call putbq in fcip_start so we don't need to check the
5336 5336 * return value from fcip_start
5337 5337 */
5338 5338 (void) fcip_start(wq, mp, fptr, fdestp, KM_SLEEP);
5339 5339 }
5340 5340
5341 5341 /*
5342 5342 * DL_ATTACH_REQ: attaches a PPA with a stream. ATTACH requets are needed
5343 5343 * for style 2 DLS providers to identify the physical medium through which
5344 5344 * the streams communication will happen
5345 5345 */
5346 5346 static void
5347 5347 fcip_areq(queue_t *wq, mblk_t *mp)
5348 5348 {
5349 5349 struct fcipstr *slp;
5350 5350 union DL_primitives *dlp;
5351 5351 fcip_port_info_t *fport;
5352 5352 struct fcip *fptr;
5353 5353 int ppa;
5354 5354
5355 5355 slp = (struct fcipstr *)wq->q_ptr;
5356 5356 dlp = (union DL_primitives *)mp->b_rptr;
5357 5357
5358 5358 if (MBLKL(mp) < DL_ATTACH_REQ_SIZE) {
5359 5359 dlerrorack(wq, mp, DL_ATTACH_REQ, DL_BADPRIM, 0);
5360 5360 return;
5361 5361 }
5362 5362
5363 5363 if (slp->sl_state != DL_UNATTACHED) {
5364 5364 dlerrorack(wq, mp, DL_ATTACH_REQ, DL_OUTSTATE, 0);
5365 5365 return;
5366 5366 }
5367 5367
5368 5368 ppa = dlp->attach_req.dl_ppa;
5369 5369 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "attach req: ppa %x", ppa));
5370 5370
5371 5371 /*
5372 5372 * check if the PPA is valid
5373 5373 */
5374 5374
5375 5375 mutex_enter(&fcip_global_mutex);
5376 5376
5377 5377 for (fport = fcip_port_head; fport; fport = fport->fcipp_next) {
5378 5378 if ((fptr = fport->fcipp_fcip) == NULL) {
5379 5379 continue;
5380 5380 }
5381 5381 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "ppa %x, inst %x", ppa,
5382 5382 ddi_get_instance(fptr->fcip_dip)));
5383 5383
5384 5384 if (ppa == ddi_get_instance(fptr->fcip_dip)) {
5385 5385 FCIP_DEBUG(FCIP_DEBUG_DLPI,
5386 5386 (CE_NOTE, "ppa found %x", ppa));
5387 5387 break;
5388 5388 }
5389 5389 }
5390 5390
5391 5391 if (fport == NULL) {
5392 5392 FCIP_DEBUG(FCIP_DEBUG_DLPI,
5393 5393 (CE_NOTE, "dlerrorack coz fport==NULL"));
5394 5394
5395 5395 mutex_exit(&fcip_global_mutex);
5396 5396
5397 5397 if (fc_ulp_get_port_handle(ppa) == NULL) {
5398 5398 dlerrorack(wq, mp, DL_ATTACH_REQ, DL_BADPPA, 0);
5399 5399 return;
5400 5400 }
5401 5401
5402 5402 /*
5403 5403 * Wait for Port attach callback to trigger. If port_detach
5404 5404 * got in while we were waiting, then ddi_get_soft_state
5405 5405 * will return NULL, and we'll return error.
5406 5406 */
5407 5407
5408 5408 delay(drv_usectohz(FCIP_INIT_DELAY));
5409 5409 mutex_enter(&fcip_global_mutex);
5410 5410
5411 5411 fptr = ddi_get_soft_state(fcip_softp, ppa);
5412 5412 if (fptr == NULL) {
5413 5413 mutex_exit(&fcip_global_mutex);
5414 5414 dlerrorack(wq, mp, DL_ATTACH_REQ, DL_BADPPA, 0);
5415 5415 return;
5416 5416 }
5417 5417 }
5418 5418
5419 5419 /*
5420 5420 * set link to device and update our state
5421 5421 */
5422 5422 slp->sl_fcip = fptr;
5423 5423 slp->sl_state = DL_UNBOUND;
5424 5424
5425 5425 mutex_exit(&fcip_global_mutex);
5426 5426
5427 5427 #ifdef DEBUG
5428 5428 mutex_enter(&fptr->fcip_mutex);
5429 5429 if (fptr->fcip_flags & FCIP_LINK_DOWN) {
5430 5430 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_WARN, "port not online yet"));
5431 5431 }
5432 5432 mutex_exit(&fptr->fcip_mutex);
5433 5433 #endif
5434 5434
5435 5435 dlokack(wq, mp, DL_ATTACH_REQ);
5436 5436 }
5437 5437
5438 5438
5439 5439 /*
5440 5440 * DL_DETACH request - detaches a PPA from a stream
5441 5441 */
5442 5442 static void
5443 5443 fcip_dreq(queue_t *wq, mblk_t *mp)
5444 5444 {
5445 5445 struct fcipstr *slp;
5446 5446
5447 5447 slp = (struct fcipstr *)wq->q_ptr;
5448 5448
5449 5449 if (MBLKL(mp) < DL_DETACH_REQ_SIZE) {
5450 5450 dlerrorack(wq, mp, DL_DETACH_REQ, DL_BADPRIM, 0);
5451 5451 return;
5452 5452 }
5453 5453
5454 5454 if (slp->sl_state != DL_UNBOUND) {
5455 5455 dlerrorack(wq, mp, DL_DETACH_REQ, DL_OUTSTATE, 0);
5456 5456 return;
5457 5457 }
5458 5458
5459 5459 fcip_dodetach(slp);
5460 5460 dlokack(wq, mp, DL_DETACH_REQ);
5461 5461 }
5462 5462
5463 5463 /*
5464 5464 * DL_BIND request: requests a DLS provider to bind a DLSAP to the stream.
5465 5465 * DLS users communicate with a physical interface through DLSAPs. Multiple
5466 5466 * DLSAPs can be bound to the same stream (PPA)
5467 5467 */
5468 5468 static void
5469 5469 fcip_breq(queue_t *wq, mblk_t *mp)
5470 5470 {
5471 5471 struct fcipstr *slp;
5472 5472 union DL_primitives *dlp;
5473 5473 struct fcip *fptr;
5474 5474 struct fcipdladdr fcipaddr;
5475 5475 t_uscalar_t sap;
5476 5476 int xidtest;
5477 5477
5478 5478 slp = (struct fcipstr *)wq->q_ptr;
5479 5479
5480 5480 if (MBLKL(mp) < DL_BIND_REQ_SIZE) {
5481 5481 dlerrorack(wq, mp, DL_BIND_REQ, DL_BADPRIM, 0);
5482 5482 return;
5483 5483 }
5484 5484
5485 5485 if (slp->sl_state != DL_UNBOUND) {
5486 5486 dlerrorack(wq, mp, DL_BIND_REQ, DL_OUTSTATE, 0);
5487 5487 return;
5488 5488 }
5489 5489
5490 5490 dlp = (union DL_primitives *)mp->b_rptr;
5491 5491 fptr = slp->sl_fcip;
5492 5492
5493 5493 if (fptr == NULL) {
5494 5494 dlerrorack(wq, mp, DL_BIND_REQ, DL_OUTSTATE, 0);
5495 5495 return;
5496 5496 }
5497 5497
5498 5498 sap = dlp->bind_req.dl_sap;
5499 5499 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "fcip_breq - sap: %x", sap));
5500 5500 xidtest = dlp->bind_req.dl_xidtest_flg;
5501 5501
5502 5502 if (xidtest) {
5503 5503 dlerrorack(wq, mp, DL_BIND_REQ, DL_NOAUTO, 0);
5504 5504 return;
5505 5505 }
5506 5506
5507 5507 FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "DLBIND: sap : %x", sap));
5508 5508
5509 5509 if (sap > ETHERTYPE_MAX) {
5510 5510 dlerrorack(wq, mp, dlp->dl_primitive, DL_BADSAP, 0);
5511 5511 return;
5512 5512 }
5513 5513 /*
5514 5514 * save SAP for this stream and change the link state
5515 5515 */
5516 5516 slp->sl_sap = sap;
5517 5517 slp->sl_state = DL_IDLE;
5518 5518
5519 5519 fcipaddr.dl_sap = sap;
5520 5520 ether_bcopy(&fptr->fcip_macaddr, &fcipaddr.dl_phys);
5521 5521 dlbindack(wq, mp, sap, &fcipaddr, FCIPADDRL, 0, 0);
5522 5522
5523 5523 fcip_setipq(fptr);
5524 5524 }
5525 5525
5526 5526 /*
5527 5527 * DL_UNBIND request to unbind a previously bound DLSAP, from this stream
5528 5528 */
5529 5529 static void
5530 5530 fcip_ubreq(queue_t *wq, mblk_t *mp)
5531 5531 {
5532 5532 struct fcipstr *slp;
5533 5533
5534 5534 slp = (struct fcipstr *)wq->q_ptr;
5535 5535
5536 5536 if (MBLKL(mp) < DL_UNBIND_REQ_SIZE) {
5537 5537 dlerrorack(wq, mp, DL_UNBIND_REQ, DL_BADPRIM, 0);
5538 5538 return;
5539 5539 }
5540 5540
5541 5541 if (slp->sl_state != DL_IDLE) {
5542 5542 dlerrorack(wq, mp, DL_UNBIND_REQ, DL_OUTSTATE, 0);
5543 5543 return;
5544 5544 }
5545 5545
5546 5546 slp->sl_state = DL_UNBOUND;
5547 5547 slp->sl_sap = 0;
5548 5548
5549 5549 (void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW);
5550 5550 dlokack(wq, mp, DL_UNBIND_REQ);
5551 5551
5552 5552 fcip_setipq(slp->sl_fcip);
5553 5553 }
5554 5554
5555 5555 /*
5556 5556 * Return our physical address
5557 5557 */
5558 5558 static void
5559 5559 fcip_pareq(queue_t *wq, mblk_t *mp)
5560 5560 {
5561 5561 struct fcipstr *slp;
5562 5562 union DL_primitives *dlp;
5563 5563 int type;
5564 5564 struct fcip *fptr;
5565 5565 fcip_port_info_t *fport;
5566 5566 struct ether_addr addr;
5567 5567
5568 5568 slp = (struct fcipstr *)wq->q_ptr;
5569 5569
5570 5570 if (MBLKL(mp) < DL_PHYS_ADDR_REQ_SIZE) {
5571 5571 dlerrorack(wq, mp, DL_PHYS_ADDR_REQ, DL_BADPRIM, 0);
5572 5572 return;
5573 5573 }
5574 5574
5575 5575 dlp = (union DL_primitives *)mp->b_rptr;
5576 5576 type = dlp->physaddr_req.dl_addr_type;
5577 5577 fptr = slp->sl_fcip;
5578 5578
5579 5579 if (fptr == NULL) {
5580 5580 dlerrorack(wq, mp, DL_PHYS_ADDR_REQ, DL_OUTSTATE, 0);
5581 5581 return;
5582 5582 }
5583 5583
5584 5584 fport = fptr->fcip_port_info;
5585 5585
5586 5586 switch (type) {
5587 5587 case DL_FACT_PHYS_ADDR:
5588 5588 FCIP_DEBUG(FCIP_DEBUG_DLPI,
5589 5589 (CE_NOTE, "returning factory phys addr"));
5590 5590 wwn_to_ether(&fport->fcipp_pwwn, &addr);
5591 5591 break;
5592 5592
5593 5593 case DL_CURR_PHYS_ADDR:
5594 5594 FCIP_DEBUG(FCIP_DEBUG_DLPI,
5595 5595 (CE_NOTE, "returning current phys addr"));
5596 5596 ether_bcopy(&fptr->fcip_macaddr, &addr);
5597 5597 break;
5598 5598
5599 5599 default:
5600 5600 FCIP_DEBUG(FCIP_DEBUG_DLPI,
5601 5601 (CE_NOTE, "Not known cmd type in phys addr"));
5602 5602 dlerrorack(wq, mp, DL_PHYS_ADDR_REQ, DL_NOTSUPPORTED, 0);
5603 5603 return;
5604 5604 }
5605 5605 dlphysaddrack(wq, mp, &addr, ETHERADDRL);
5606 5606 }
5607 5607
5608 5608 /*
5609 5609 * Set physical address DLPI request
5610 5610 */
5611 5611 static void
5612 5612 fcip_spareq(queue_t *wq, mblk_t *mp)
5613 5613 {
5614 5614 struct fcipstr *slp;
5615 5615 union DL_primitives *dlp;
5616 5616 t_uscalar_t off, len;
5617 5617 struct ether_addr *addrp;
5618 5618 la_wwn_t wwn;
5619 5619 struct fcip *fptr;
5620 5620 fc_ns_cmd_t fcip_ns_cmd;
5621 5621
5622 5622 slp = (struct fcipstr *)wq->q_ptr;
5623 5623
5624 5624 if (MBLKL(mp) < DL_SET_PHYS_ADDR_REQ_SIZE) {
5625 5625 dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0);
5626 5626 return;
5627 5627 }
5628 5628
5629 5629 dlp = (union DL_primitives *)mp->b_rptr;
5630 5630 len = dlp->set_physaddr_req.dl_addr_length;
5631 5631 off = dlp->set_physaddr_req.dl_addr_offset;
5632 5632
5633 5633 if (!MBLKIN(mp, off, len)) {
5634 5634 dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0);
5635 5635 return;
5636 5636 }
5637 5637
5638 5638 addrp = (struct ether_addr *)(mp->b_rptr + off);
5639 5639
5640 5640 /*
5641 5641 * If the length of physical address is not correct or address
5642 5642 * specified is a broadcast address or multicast addr -
5643 5643 * return an error.
5644 5644 */
5645 5645 if ((len != ETHERADDRL) ||
5646 5646 ((addrp->ether_addr_octet[0] & 01) == 1) ||
5647 5647 (ether_cmp(addrp, &fcip_arpbroadcast_addr) == 0)) {
5648 5648 dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADADDR, 0);
5649 5649 return;
5650 5650 }
5651 5651
5652 5652 /*
5653 5653 * check if a stream is attached to this device. Else return an error
5654 5654 */
5655 5655 if ((fptr = slp->sl_fcip) == NULL) {
5656 5656 dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_OUTSTATE, 0);
5657 5657 return;
5658 5658 }
5659 5659
5660 5660 /*
5661 5661 * set the new interface local address. We request the transport
5662 5662 * layer to change the Port WWN for this device - return an error
5663 5663 * if we don't succeed.
5664 5664 */
5665 5665
5666 5666 ether_to_wwn(addrp, &wwn);
5667 5667 if (fcip_set_wwn(&wwn) == FC_SUCCESS) {
5668 5668 FCIP_DEBUG(FCIP_DEBUG_DLPI,
5669 5669 (CE_WARN, "WWN changed in spareq"));
5670 5670 } else {
5671 5671 dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADADDR, 0);
5672 5672 }
5673 5673
5674 5674 /*
5675 5675 * register The new Port WWN and Node WWN with the transport
5676 5676 * and Nameserver. Hope the transport ensures all current I/O
5677 5677 * has stopped before actually attempting to register a new
5678 5678 * port and Node WWN else we are hosed. Maybe a Link reset
5679 5679 * will get everyone's attention.
5680 5680 */
5681 5681 fcip_ns_cmd.ns_flags = 0;
5682 5682 fcip_ns_cmd.ns_cmd = NS_RPN_ID;
5683 5683 fcip_ns_cmd.ns_req_len = sizeof (la_wwn_t);
5684 5684 fcip_ns_cmd.ns_req_payload = (caddr_t)&wwn.raw_wwn[0];
5685 5685 fcip_ns_cmd.ns_resp_len = 0;
5686 5686 fcip_ns_cmd.ns_resp_payload = (caddr_t)0;
5687 5687 if (fc_ulp_port_ns(fptr->fcip_port_info->fcipp_handle,
5688 5688 (opaque_t)0, &fcip_ns_cmd) != FC_SUCCESS) {
5689 5689 FCIP_DEBUG(FCIP_DEBUG_DLPI,
5690 5690 (CE_WARN, "setting Port WWN failed"));
5691 5691 dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0);
5692 5692 return;
5693 5693 }
5694 5694
5695 5695 dlokack(wq, mp, DL_SET_PHYS_ADDR_REQ);
5696 5696 }
5697 5697
5698 5698 /*
5699 5699 * change our port's WWN if permitted by hardware
5700 5700 */
5701 5701 /* ARGSUSED */
5702 5702 static int
5703 5703 fcip_set_wwn(la_wwn_t *pwwn)
5704 5704 {
5705 5705 /*
5706 5706 * We're usually not allowed to change the WWN of adapters
5707 5707 * but some adapters do permit us to change the WWN - don't
5708 5708 * permit setting of WWNs (yet?) - This behavior could be
5709 5709 * modified if needed
5710 5710 */
5711 5711 return (FC_FAILURE);
5712 5712 }
5713 5713
5714 5714
5715 5715 /*
5716 5716 * This routine fills in the header for fastpath data requests. What this
5717 5717 * does in simple terms is, instead of sending all data through the Unitdata
5718 5718 * request dlpi code paths (which will then append the protocol specific
5719 5719 * header - network and snap headers in our case), the upper layers issue
5720 5720 * a M_IOCTL with a DL_IOC_HDR_INFO request and ask the streams endpoint
5721 5721 * driver to give the header it needs appended and the upper layer
5722 5722 * allocates and fills in the header and calls our put routine
5723 5723 */
5724 5724 static void
5725 5725 fcip_dl_ioc_hdr_info(queue_t *wq, mblk_t *mp)
5726 5726 {
5727 5727 mblk_t *nmp;
5728 5728 struct fcipstr *slp;
5729 5729 struct fcipdladdr *dlap;
5730 5730 dl_unitdata_req_t *dlup;
5731 5731 fcph_network_hdr_t *headerp;
5732 5732 la_wwn_t wwn;
5733 5733 llc_snap_hdr_t *lsnap;
5734 5734 struct fcip *fptr;
5735 5735 fcip_port_info_t *fport;
5736 5736 t_uscalar_t off, len;
5737 5737 size_t hdrlen;
5738 5738 int error;
5739 5739
5740 5740 slp = (struct fcipstr *)wq->q_ptr;
5741 5741 fptr = slp->sl_fcip;
5742 5742 if (fptr == NULL) {
5743 5743 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5744 5744 (CE_NOTE, "dliochdr : returns EINVAL1"));
5745 5745 miocnak(wq, mp, 0, EINVAL);
5746 5746 return;
5747 5747 }
5748 5748
5749 5749 error = miocpullup(mp, sizeof (dl_unitdata_req_t) + FCIPADDRL);
5750 5750 if (error != 0) {
5751 5751 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5752 5752 (CE_NOTE, "dliochdr : returns %d", error));
5753 5753 miocnak(wq, mp, 0, error);
5754 5754 return;
5755 5755 }
5756 5756
5757 5757 fport = fptr->fcip_port_info;
5758 5758
5759 5759 /*
5760 5760 * check if the DL_UNITDATA_REQ destination addr has valid offset
5761 5761 * and length values
5762 5762 */
5763 5763 dlup = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
5764 5764 off = dlup->dl_dest_addr_offset;
5765 5765 len = dlup->dl_dest_addr_length;
5766 5766 if (dlup->dl_primitive != DL_UNITDATA_REQ ||
5767 5767 !MBLKIN(mp->b_cont, off, len) || (len != FCIPADDRL)) {
5768 5768 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5769 5769 (CE_NOTE, "dliochdr : returns EINVAL2"));
5770 5770 miocnak(wq, mp, 0, EINVAL);
5771 5771 return;
5772 5772 }
5773 5773
5774 5774 dlap = (struct fcipdladdr *)(mp->b_cont->b_rptr + off);
5775 5775
5776 5776 /*
5777 5777 * Allocate a new mblk to hold the ether header
5778 5778 */
5779 5779
5780 5780 /*
5781 5781 * setup space for network header
5782 5782 */
5783 5783 hdrlen = (sizeof (llc_snap_hdr_t) + sizeof (fcph_network_hdr_t));
5784 5784 if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL) {
5785 5785 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5786 5786 (CE_NOTE, "dliochdr : returns ENOMEM"));
5787 5787 miocnak(wq, mp, 0, ENOMEM);
5788 5788 return;
5789 5789 }
5790 5790 nmp->b_wptr += hdrlen;
5791 5791
5792 5792 /*
5793 5793 * Fill in the Network Hdr and LLC SNAP header;
5794 5794 */
5795 5795 headerp = (fcph_network_hdr_t *)nmp->b_rptr;
5796 5796 /*
5797 5797 * just fill in the Node WWN here - we can fill in the NAA_ID when
5798 5798 * we search the routing table
5799 5799 */
5800 5800 if (ether_cmp(&dlap->dl_phys, &fcip_arpbroadcast_addr) == 0) {
5801 5801 ether_to_wwn(&fcipnhbroadcastaddr, &wwn);
5802 5802 } else {
5803 5803 ether_to_wwn(&dlap->dl_phys, &wwn);
5804 5804 }
5805 5805 bcopy(&wwn, &headerp->net_dest_addr, sizeof (la_wwn_t));
5806 5806 bcopy(&fport->fcipp_pwwn, &headerp->net_src_addr, sizeof (la_wwn_t));
5807 5807 lsnap = (llc_snap_hdr_t *)(nmp->b_rptr + sizeof (fcph_network_hdr_t));
5808 5808 lsnap->dsap = 0xAA;
5809 5809 lsnap->ssap = 0xAA;
5810 5810 lsnap->ctrl = 0x03;
5811 5811 lsnap->oui[0] = 0x00;
5812 5812 lsnap->oui[1] = 0x00;
5813 5813 lsnap->oui[2] = 0x00;
5814 5814 lsnap->pid = BE_16(dlap->dl_sap);
5815 5815
5816 5816 /*
5817 5817 * Link new mblk in after the "request" mblks.
5818 5818 */
5819 5819 linkb(mp, nmp);
5820 5820
5821 5821 slp->sl_flags |= FCIP_SLFAST;
5822 5822
5823 5823 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5824 5824 (CE_NOTE, "dliochdr : returns success "));
5825 5825 miocack(wq, mp, msgsize(mp->b_cont), 0);
5826 5826 }
5827 5827
5828 5828
5829 5829 /*
5830 5830 * Establish a kmem cache for fcip packets
5831 5831 */
5832 5832 static int
5833 5833 fcip_cache_constructor(void *buf, void *arg, int flags)
5834 5834 {
5835 5835 fcip_pkt_t *fcip_pkt = buf;
5836 5836 fc_packet_t *fc_pkt;
5837 5837 fcip_port_info_t *fport = (fcip_port_info_t *)arg;
5838 5838 int (*cb) (caddr_t);
5839 5839 struct fcip *fptr;
5840 5840
5841 5841 cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
5842 5842
5843 5843 ASSERT(fport != NULL);
5844 5844
5845 5845 fptr = fport->fcipp_fcip;
5846 5846
5847 5847 /*
5848 5848 * we allocated space for our private area at the end of the
5849 5849 * fc packet. Make sure we point to it correctly. Ideally we
5850 5850 * should just push fc_packet_private to the beginning or end
5851 5851 * of the fc_packet structure
5852 5852 */
5853 5853 fcip_pkt->fcip_pkt_next = NULL;
5854 5854 fcip_pkt->fcip_pkt_prev = NULL;
5855 5855 fcip_pkt->fcip_pkt_dest = NULL;
5856 5856 fcip_pkt->fcip_pkt_state = 0;
5857 5857 fcip_pkt->fcip_pkt_reason = 0;
5858 5858 fcip_pkt->fcip_pkt_flags = 0;
5859 5859 fcip_pkt->fcip_pkt_fptr = fptr;
5860 5860 fcip_pkt->fcip_pkt_dma_flags = 0;
5861 5861
5862 5862 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
5863 5863 fc_pkt->pkt_ulp_rscn_infop = NULL;
5864 5864
5865 5865 /*
5866 5866 * We use pkt_cmd_dma for OUTBOUND requests. We don't expect
5867 5867 * any responses for outbound IP data so no need to setup
5868 5868 * response or data dma handles.
5869 5869 */
5870 5870 if (ddi_dma_alloc_handle(fport->fcipp_dip,
5871 5871 &fport->fcipp_cmd_dma_attr, cb, NULL,
5872 5872 &fc_pkt->pkt_cmd_dma) != DDI_SUCCESS) {
5873 5873 return (FCIP_FAILURE);
5874 5874 }
5875 5875
5876 5876 fc_pkt->pkt_cmd_acc = fc_pkt->pkt_resp_acc = NULL;
5877 5877 fc_pkt->pkt_fca_private = (opaque_t)((caddr_t)buf +
5878 5878 sizeof (fcip_pkt_t));
5879 5879 fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt;
5880 5880
5881 5881 fc_pkt->pkt_cmd_cookie_cnt = fc_pkt->pkt_resp_cookie_cnt =
5882 5882 fc_pkt->pkt_data_cookie_cnt = 0;
5883 5883 fc_pkt->pkt_cmd_cookie = fc_pkt->pkt_resp_cookie =
5884 5884 fc_pkt->pkt_data_cookie = NULL;
5885 5885
5886 5886 return (FCIP_SUCCESS);
5887 5887 }
5888 5888
5889 5889 /*
5890 5890 * destroy the fcip kmem cache
5891 5891 */
5892 5892 static void
5893 5893 fcip_cache_destructor(void *buf, void *arg)
5894 5894 {
5895 5895 fcip_pkt_t *fcip_pkt = (fcip_pkt_t *)buf;
5896 5896 fc_packet_t *fc_pkt;
5897 5897 fcip_port_info_t *fport = (fcip_port_info_t *)arg;
5898 5898 struct fcip *fptr;
5899 5899
5900 5900 ASSERT(fport != NULL);
5901 5901
5902 5902 fptr = fport->fcipp_fcip;
5903 5903
5904 5904 ASSERT(fptr == fcip_pkt->fcip_pkt_fptr);
5905 5905 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
5906 5906
5907 5907 if (fc_pkt->pkt_cmd_dma) {
5908 5908 ddi_dma_free_handle(&fc_pkt->pkt_cmd_dma);
5909 5909 }
5910 5910 }
5911 5911
5912 5912 /*
5913 5913 * the fcip destination structure is hashed on Node WWN assuming
5914 5914 * a NAA_ID of 0x1 (IEEE)
5915 5915 */
5916 5916 static struct fcip_dest *
5917 5917 fcip_get_dest(struct fcip *fptr, la_wwn_t *pwwn)
5918 5918 {
5919 5919 struct fcip_dest *fdestp = NULL;
5920 5920 fcip_port_info_t *fport;
5921 5921 int hash_bucket;
5922 5922 opaque_t pd;
5923 5923 int rval;
5924 5924 struct fcip_routing_table *frp;
5925 5925 la_wwn_t twwn;
5926 5926 uint32_t *twwnp = (uint32_t *)&twwn;
5927 5927
5928 5928 hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn);
5929 5929 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5930 5930 (CE_NOTE, "get dest hashbucket : 0x%x", hash_bucket));
5931 5931 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5932 5932 (CE_NOTE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
5933 5933 pwwn->raw_wwn[2], pwwn->raw_wwn[3], pwwn->raw_wwn[4],
5934 5934 pwwn->raw_wwn[5], pwwn->raw_wwn[6], pwwn->raw_wwn[7]));
5935 5935
5936 5936 ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS);
5937 5937
5938 5938 if (fcip_check_port_exists(fptr)) {
5939 5939 /* fptr is stale, return fdestp */
5940 5940 return (fdestp);
5941 5941 }
5942 5942 fport = fptr->fcip_port_info;
5943 5943
5944 5944 /*
5945 5945 * First check if we have active I/Os going on with the
5946 5946 * destination port (an entry would exist in fcip_dest hash table)
5947 5947 */
5948 5948 mutex_enter(&fptr->fcip_dest_mutex);
5949 5949 fdestp = fptr->fcip_dest[hash_bucket];
5950 5950 while (fdestp != NULL) {
5951 5951 mutex_enter(&fdestp->fcipd_mutex);
5952 5952 if (fdestp->fcipd_rtable) {
5953 5953 if (fcip_wwn_compare(pwwn, &fdestp->fcipd_pwwn,
5954 5954 FCIP_COMPARE_NWWN) == 0) {
5955 5955 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5956 5956 (CE_NOTE, "found fdestp"));
5957 5957 mutex_exit(&fdestp->fcipd_mutex);
5958 5958 mutex_exit(&fptr->fcip_dest_mutex);
5959 5959 return (fdestp);
5960 5960 }
5961 5961 }
5962 5962 mutex_exit(&fdestp->fcipd_mutex);
5963 5963 fdestp = fdestp->fcipd_next;
5964 5964 }
5965 5965 mutex_exit(&fptr->fcip_dest_mutex);
5966 5966
5967 5967 /*
5968 5968 * We did not find the destination port information in our
5969 5969 * active port list so search for an entry in our routing
5970 5970 * table.
5971 5971 */
5972 5972 mutex_enter(&fptr->fcip_rt_mutex);
5973 5973 frp = fcip_lookup_rtable(fptr, pwwn, FCIP_COMPARE_NWWN);
5974 5974 mutex_exit(&fptr->fcip_rt_mutex);
5975 5975
5976 5976 if (frp == NULL || (frp && (!FCIP_RTE_UNAVAIL(frp->fcipr_state)) &&
5977 5977 frp->fcipr_state != PORT_DEVICE_LOGGED_IN) ||
5978 5978 (frp && frp->fcipr_pd == NULL)) {
5979 5979 /*
5980 5980 * No entry for the destination port in our routing
5981 5981 * table too. First query the transport to see if it
5982 5982 * already has structures for the destination port in
5983 5983 * its hash tables. This must be done for all topologies
5984 5984 * since we could have retired entries in the hash tables
5985 5985 * which may have to be re-added without a statechange
5986 5986 * callback happening. Its better to try and get an entry
5987 5987 * for the destination port rather than simply failing a
5988 5988 * request though it may be an overkill in private loop
5989 5989 * topologies.
5990 5990 * If a entry for the remote port exists in the transport's
5991 5991 * hash tables, we are fine and can add the entry to our
5992 5992 * routing and dest hash lists, Else for fabric configs we
5993 5993 * query the nameserver if one exists or issue FARP ELS.
5994 5994 */
5995 5995
5996 5996 /*
5997 5997 * We need to do a PortName based Nameserver
5998 5998 * query operation. So get the right PortWWN
5999 5999 * for the adapter.
6000 6000 */
6001 6001 bcopy(pwwn, &twwn, sizeof (la_wwn_t));
6002 6002
6003 6003 /*
6004 6004 * Try IEEE Name (Format 1) first, this is the default and
6005 6005 * Emulex uses this format.
6006 6006 */
6007 6007 pd = fc_ulp_get_remote_port(fport->fcipp_handle,
6008 6008 &twwn, &rval, 1);
6009 6009
6010 6010 if (rval != FC_SUCCESS) {
6011 6011 /*
6012 6012 * If IEEE Name (Format 1) query failed, try IEEE
6013 6013 * Extended Name (Format 2) which Qlogic uses.
6014 6014 * And try port 1 on Qlogic FC-HBA first.
6015 6015 * Note: On x86, we need to byte swap the 32-bit
6016 6016 * word first, after the modification, swap it back.
6017 6017 */
6018 6018 *twwnp = BE_32(*twwnp);
6019 6019 twwn.w.nport_id = QLC_PORT_1_ID_BITS;
6020 6020 twwn.w.naa_id = QLC_PORT_NAA;
6021 6021 *twwnp = BE_32(*twwnp);
6022 6022 pd = fc_ulp_get_remote_port(fport->fcipp_handle,
6023 6023 &twwn, &rval, 1);
6024 6024 }
6025 6025
6026 6026 if (rval != FC_SUCCESS) {
6027 6027 /* If still failed, try port 2 on Qlogic FC-HBA. */
6028 6028 *twwnp = BE_32(*twwnp);
6029 6029 twwn.w.nport_id = QLC_PORT_2_ID_BITS;
6030 6030 *twwnp = BE_32(*twwnp);
6031 6031 pd = fc_ulp_get_remote_port(fport->fcipp_handle,
6032 6032 &twwn, &rval, 1);
6033 6033 }
6034 6034
6035 6035 if (rval == FC_SUCCESS) {
6036 6036 fc_portmap_t map;
6037 6037 /*
6038 6038 * Add the newly found destination structure
6039 6039 * to our routing table. Create a map with
6040 6040 * the device we found. We could ask the
6041 6041 * transport to give us the list of all
6042 6042 * devices connected to our port but we
6043 6043 * probably don't need to know all the devices
6044 6044 * so let us just constuct a list with only
6045 6045 * one device instead.
6046 6046 */
6047 6047
6048 6048 fc_ulp_copy_portmap(&map, pd);
6049 6049 fcip_rt_update(fptr, &map, 1);
6050 6050
6051 6051 mutex_enter(&fptr->fcip_rt_mutex);
6052 6052 frp = fcip_lookup_rtable(fptr, pwwn,
6053 6053 FCIP_COMPARE_NWWN);
6054 6054 mutex_exit(&fptr->fcip_rt_mutex);
6055 6055
6056 6056 fdestp = fcip_add_dest(fptr, frp);
6057 6057 } else if (fcip_farp_supported &&
6058 6058 (FC_TOP_EXTERNAL(fport->fcipp_topology) ||
6059 6059 (fport->fcipp_topology == FC_TOP_PT_PT))) {
6060 6060 /*
6061 6061 * The Name server request failed so
6062 6062 * issue an FARP
6063 6063 */
6064 6064 fdestp = fcip_do_farp(fptr, pwwn, NULL,
6065 6065 0, 0);
6066 6066 } else {
6067 6067 fdestp = NULL;
6068 6068 }
6069 6069 } else if (frp && frp->fcipr_state == PORT_DEVICE_LOGGED_IN) {
6070 6070 /*
6071 6071 * Prepare a dest structure to return to caller
6072 6072 */
6073 6073 fdestp = fcip_add_dest(fptr, frp);
6074 6074 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
6075 6075 (CE_NOTE, "in fcip get dest non fabric"));
6076 6076 }
6077 6077 return (fdestp);
6078 6078 }
6079 6079
6080 6080
6081 6081 /*
6082 6082 * Endian clean WWN compare.
6083 6083 * Returns 0 if they compare OK, else return non zero value.
6084 6084 * flag can be bitwise OR of FCIP_COMPARE_NWWN, FCIP_COMPARE_PWWN,
6085 6085 * FCIP_COMPARE_BROADCAST.
6086 6086 */
6087 6087 static int
6088 6088 fcip_wwn_compare(la_wwn_t *wwn1, la_wwn_t *wwn2, int flag)
6089 6089 {
6090 6090 int rval = 0;
6091 6091 if ((wwn1->raw_wwn[2] != wwn2->raw_wwn[2]) ||
6092 6092 (wwn1->raw_wwn[3] != wwn2->raw_wwn[3]) ||
6093 6093 (wwn1->raw_wwn[4] != wwn2->raw_wwn[4]) ||
6094 6094 (wwn1->raw_wwn[5] != wwn2->raw_wwn[5]) ||
6095 6095 (wwn1->raw_wwn[6] != wwn2->raw_wwn[6]) ||
6096 6096 (wwn1->raw_wwn[7] != wwn2->raw_wwn[7])) {
6097 6097 rval = 1;
6098 6098 } else if ((flag == FCIP_COMPARE_PWWN) &&
6099 6099 (((wwn1->raw_wwn[0] & 0xf0) != (wwn2->raw_wwn[0] & 0xf0)) ||
6100 6100 (wwn1->raw_wwn[1] != wwn2->raw_wwn[1]))) {
6101 6101 rval = 1;
6102 6102 }
6103 6103 return (rval);
6104 6104 }
6105 6105
6106 6106
6107 6107 /*
6108 6108 * Add an entry for a remote port in the dest hash table. Dest hash table
6109 6109 * has entries for ports in the routing hash table with which we decide
6110 6110 * to establish IP communication with. The no. of entries in the dest hash
6111 6111 * table must always be less than or equal to the entries in the routing
6112 6112 * hash table. Every entry in the dest hash table ofcourse must have a
6113 6113 * corresponding entry in the routing hash table
6114 6114 */
6115 6115 static struct fcip_dest *
6116 6116 fcip_add_dest(struct fcip *fptr, struct fcip_routing_table *frp)
6117 6117 {
6118 6118 struct fcip_dest *fdestp = NULL;
6119 6119 la_wwn_t *pwwn;
6120 6120 int hash_bucket;
6121 6121 struct fcip_dest *fdest_new;
6122 6122
6123 6123 if (frp == NULL) {
6124 6124 return (fdestp);
6125 6125 }
6126 6126
6127 6127 pwwn = &frp->fcipr_pwwn;
6128 6128 mutex_enter(&fptr->fcip_dest_mutex);
6129 6129 hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn);
6130 6130 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
6131 6131 (CE_NOTE, "add dest hash_bucket: 0x%x", hash_bucket));
6132 6132
6133 6133 ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS);
6134 6134
6135 6135 fdestp = fptr->fcip_dest[hash_bucket];
6136 6136 while (fdestp != NULL) {
6137 6137 mutex_enter(&fdestp->fcipd_mutex);
6138 6138 if (fdestp->fcipd_rtable) {
6139 6139 if (fcip_wwn_compare(pwwn, &fdestp->fcipd_pwwn,
6140 6140 FCIP_COMPARE_PWWN) == 0) {
6141 6141 mutex_exit(&fdestp->fcipd_mutex);
6142 6142 mutex_exit(&fptr->fcip_dest_mutex);
6143 6143 return (fdestp);
6144 6144 }
6145 6145 }
6146 6146 mutex_exit(&fdestp->fcipd_mutex);
6147 6147 fdestp = fdestp->fcipd_next;
6148 6148 }
6149 6149
6150 6150 ASSERT(fdestp == NULL);
6151 6151
6152 6152 fdest_new = (struct fcip_dest *)
6153 6153 kmem_zalloc(sizeof (struct fcip_dest), KM_SLEEP);
6154 6154
6155 6155 mutex_init(&fdest_new->fcipd_mutex, NULL, MUTEX_DRIVER, NULL);
6156 6156 fdest_new->fcipd_next = fptr->fcip_dest[hash_bucket];
6157 6157 fdest_new->fcipd_refcnt = 0;
6158 6158 fdest_new->fcipd_rtable = frp;
6159 6159 fdest_new->fcipd_ncmds = 0;
6160 6160 fptr->fcip_dest[hash_bucket] = fdest_new;
6161 6161 fdest_new->fcipd_flags = FCIP_PORT_NOTLOGGED;
6162 6162
6163 6163 mutex_exit(&fptr->fcip_dest_mutex);
6164 6164 return (fdest_new);
6165 6165 }
6166 6166
6167 6167 /*
6168 6168 * Cleanup the dest hash table and remove all entries
6169 6169 */
6170 6170 static void
6171 6171 fcip_cleanup_dest(struct fcip *fptr)
6172 6172 {
6173 6173 struct fcip_dest *fdestp = NULL;
6174 6174 struct fcip_dest *fdest_delp = NULL;
6175 6175 int i;
6176 6176
6177 6177 mutex_enter(&fptr->fcip_dest_mutex);
6178 6178
6179 6179 for (i = 0; i < FCIP_DEST_HASH_ELEMS; i++) {
6180 6180 fdestp = fptr->fcip_dest[i];
6181 6181 while (fdestp != NULL) {
6182 6182 mutex_destroy(&fdestp->fcipd_mutex);
6183 6183 fdest_delp = fdestp;
6184 6184 fdestp = fdestp->fcipd_next;
6185 6185 kmem_free(fdest_delp, sizeof (struct fcip_dest));
6186 6186 fptr->fcip_dest[i] = NULL;
6187 6187 }
6188 6188 }
6189 6189 mutex_exit(&fptr->fcip_dest_mutex);
6190 6190 }
6191 6191
6192 6192
6193 6193 /*
6194 6194 * Send FARP requests for Fabric ports when we don't have the port
6195 6195 * we wish to talk to in our routing hash table. FARP is specially required
6196 6196 * to talk to FC switches for inband switch management. Most FC switches
6197 6197 * today have a switch FC IP address for IP over FC inband switch management
6198 6198 * but the WWN and Port_ID for this traffic is not available through the
6199 6199 * Nameservers since the switch themeselves are transparent.
6200 6200 */
6201 6201 /* ARGSUSED */
6202 6202 static struct fcip_dest *
6203 6203 fcip_do_farp(struct fcip *fptr, la_wwn_t *pwwn, char *ip_addr,
6204 6204 size_t ip_addr_len, int flags)
6205 6205 {
6206 6206 fcip_pkt_t *fcip_pkt;
6207 6207 fc_packet_t *fc_pkt;
6208 6208 fcip_port_info_t *fport = fptr->fcip_port_info;
6209 6209 la_els_farp_t farp_cmd;
6210 6210 la_els_farp_t *fcmd;
6211 6211 struct fcip_dest *fdestp = NULL;
6212 6212 int rval;
6213 6213 clock_t farp_lbolt;
6214 6214 la_wwn_t broadcast_wwn;
6215 6215 struct fcip_dest *bdestp;
6216 6216 struct fcip_routing_table *frp;
6217 6217
6218 6218 bdestp = fcip_get_dest(fptr, &broadcast_wwn);
6219 6219
6220 6220 if (bdestp == NULL) {
6221 6221 return (fdestp);
6222 6222 }
6223 6223
6224 6224 fcip_pkt = fcip_ipkt_alloc(fptr, sizeof (la_els_farp_t),
6225 6225 sizeof (la_els_farp_t), bdestp->fcipd_pd, KM_SLEEP);
6226 6226
6227 6227 if (fcip_pkt == NULL) {
6228 6228 return (fdestp);
6229 6229 }
6230 6230
6231 6231 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6232 6232 ether_to_wwn(&fcip_arpbroadcast_addr, &broadcast_wwn);
6233 6233
6234 6234 mutex_enter(&bdestp->fcipd_mutex);
6235 6235 if (bdestp->fcipd_rtable == NULL) {
6236 6236 mutex_exit(&bdestp->fcipd_mutex);
6237 6237 fcip_ipkt_free(fcip_pkt);
6238 6238 return (fdestp);
6239 6239 }
6240 6240
6241 6241 fcip_pkt->fcip_pkt_dest = bdestp;
6242 6242 fc_pkt->pkt_fca_device = bdestp->fcipd_fca_dev;
6243 6243
6244 6244 bdestp->fcipd_ncmds++;
6245 6245 mutex_exit(&bdestp->fcipd_mutex);
6246 6246
6247 6247 fcip_init_broadcast_pkt(fcip_pkt, NULL, 1);
6248 6248 fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_LIST;
6249 6249
6250 6250 /*
6251 6251 * Now initialize the FARP payload itself
6252 6252 */
6253 6253 fcmd = &farp_cmd;
6254 6254 fcmd->ls_code.ls_code = LA_ELS_FARP_REQ;
6255 6255 fcmd->ls_code.mbz = 0;
6256 6256 /*
6257 6257 * for now just match the Port WWN since the other match addr
6258 6258 * code points are optional. We can explore matching the IP address
6259 6259 * if needed
6260 6260 */
6261 6261 if (ip_addr) {
6262 6262 fcmd->match_addr = FARP_MATCH_WW_PN_IPv4;
6263 6263 } else {
6264 6264 fcmd->match_addr = FARP_MATCH_WW_PN;
6265 6265 }
6266 6266
6267 6267 /*
6268 6268 * Request the responder port to log into us - that way
6269 6269 * the Transport is aware of the remote port when we create
6270 6270 * an entry for it in our tables
6271 6271 */
6272 6272 fcmd->resp_flags = FARP_INIT_REPLY | FARP_INIT_P_LOGI;
6273 6273 fcmd->req_id = fport->fcipp_sid;
6274 6274 fcmd->dest_id.port_id = fc_pkt->pkt_cmd_fhdr.d_id;
6275 6275 bcopy(&fport->fcipp_pwwn, &fcmd->req_pwwn, sizeof (la_wwn_t));
6276 6276 bcopy(&fport->fcipp_nwwn, &fcmd->req_nwwn, sizeof (la_wwn_t));
6277 6277 bcopy(pwwn, &fcmd->resp_pwwn, sizeof (la_wwn_t));
6278 6278 /*
6279 6279 * copy in source IP address if we get to know it
6280 6280 */
6281 6281 if (ip_addr) {
6282 6282 bcopy(ip_addr, fcmd->resp_ip, ip_addr_len);
6283 6283 }
6284 6284
6285 6285 fc_pkt->pkt_cmdlen = sizeof (la_els_farp_t);
6286 6286 fc_pkt->pkt_rsplen = sizeof (la_els_farp_t);
6287 6287 fc_pkt->pkt_tran_type = FC_PKT_EXCHANGE;
6288 6288 fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt;
6289 6289
6290 6290 /*
6291 6291 * Endian safe copy
6292 6292 */
6293 6293 FCIP_CP_OUT(fcmd, fc_pkt->pkt_cmd, fc_pkt->pkt_cmd_acc,
6294 6294 sizeof (la_els_farp_t));
6295 6295
6296 6296 /*
6297 6297 * send the packet in polled mode.
6298 6298 */
6299 6299 rval = fc_ulp_issue_els(fport->fcipp_handle, fc_pkt);
6300 6300 if (rval != FC_SUCCESS) {
6301 6301 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN,
6302 6302 "fcip_transport of farp pkt failed 0x%x", rval));
6303 6303 fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_LIST;
6304 6304 fcip_ipkt_free(fcip_pkt);
6305 6305
6306 6306 mutex_enter(&bdestp->fcipd_mutex);
6307 6307 bdestp->fcipd_ncmds--;
6308 6308 mutex_exit(&bdestp->fcipd_mutex);
6309 6309
6310 6310 return (fdestp);
6311 6311 }
6312 6312
6313 6313 farp_lbolt = ddi_get_lbolt();
6314 6314 farp_lbolt += drv_usectohz(FCIP_FARP_TIMEOUT);
6315 6315
6316 6316 mutex_enter(&fptr->fcip_mutex);
6317 6317 fptr->fcip_farp_rsp_flag = 0;
6318 6318 while (!fptr->fcip_farp_rsp_flag) {
6319 6319 if (cv_timedwait(&fptr->fcip_farp_cv, &fptr->fcip_mutex,
6320 6320 farp_lbolt) == -1) {
6321 6321 /*
6322 6322 * No FARP response from any destination port
6323 6323 * so bail out.
6324 6324 */
6325 6325 fptr->fcip_farp_rsp_flag = 1;
6326 6326 } else {
6327 6327 /*
6328 6328 * We received a FARP response - check to see if the
6329 6329 * response was in reply to our FARP request.
6330 6330 */
6331 6331
6332 6332 mutex_enter(&fptr->fcip_rt_mutex);
6333 6333 frp = fcip_lookup_rtable(fptr, pwwn, FCIP_COMPARE_NWWN);
6334 6334 mutex_exit(&fptr->fcip_rt_mutex);
6335 6335
6336 6336 if ((frp != NULL) &&
6337 6337 !FCIP_RTE_UNAVAIL(frp->fcipr_state)) {
6338 6338 fdestp = fcip_get_dest(fptr, pwwn);
6339 6339 } else {
6340 6340 /*
6341 6341 * Not our FARP response so go back and wait
6342 6342 * again till FARP_TIMEOUT expires
6343 6343 */
6344 6344 fptr->fcip_farp_rsp_flag = 0;
6345 6345 }
6346 6346 }
6347 6347 }
6348 6348 mutex_exit(&fptr->fcip_mutex);
6349 6349
6350 6350 fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_LIST;
6351 6351 fcip_ipkt_free(fcip_pkt);
6352 6352 mutex_enter(&bdestp->fcipd_mutex);
6353 6353 bdestp->fcipd_ncmds--;
6354 6354 mutex_exit(&bdestp->fcipd_mutex);
6355 6355 return (fdestp);
6356 6356 }
6357 6357
6358 6358
6359 6359
6360 6360 /*
6361 6361 * Helper routine to PLOGI to a remote port we wish to talk to.
6362 6362 * This may not be required since the port driver does logins anyway,
6363 6363 * but this can be required in fabric cases since FARP requests/responses
6364 6364 * don't require you to be logged in?
6365 6365 */
6366 6366
6367 6367 /* ARGSUSED */
6368 6368 static int
6369 6369 fcip_do_plogi(struct fcip *fptr, struct fcip_routing_table *frp)
6370 6370 {
6371 6371 fcip_pkt_t *fcip_pkt;
6372 6372 fc_packet_t *fc_pkt;
6373 6373 fcip_port_info_t *fport = fptr->fcip_port_info;
6374 6374 la_els_logi_t logi;
6375 6375 int rval;
6376 6376 fc_frame_hdr_t *fr_hdr;
6377 6377
6378 6378 /*
6379 6379 * Don't bother to login for broadcast RTE entries
6380 6380 */
6381 6381 if ((frp->fcipr_d_id.port_id == 0x0) ||
6382 6382 (frp->fcipr_d_id.port_id == 0xffffff)) {
6383 6383 return (FC_FAILURE);
6384 6384 }
6385 6385
6386 6386 /*
6387 6387 * We shouldn't pound in too many logins here
6388 6388 *
6389 6389 */
6390 6390 if (frp->fcipr_state == FCIP_RT_LOGIN_PROGRESS ||
6391 6391 frp->fcipr_state == PORT_DEVICE_LOGGED_IN) {
6392 6392 return (FC_SUCCESS);
6393 6393 }
6394 6394
6395 6395 fcip_pkt = fcip_ipkt_alloc(fptr, sizeof (la_els_logi_t),
6396 6396 sizeof (la_els_logi_t), frp->fcipr_pd, KM_SLEEP);
6397 6397
6398 6398 if (fcip_pkt == NULL) {
6399 6399 return (FC_FAILURE);
6400 6400 }
6401 6401
6402 6402 /*
6403 6403 * Update back pointer for login state update
6404 6404 */
6405 6405 fcip_pkt->fcip_pkt_frp = frp;
6406 6406 frp->fcipr_state = FCIP_RT_LOGIN_PROGRESS;
6407 6407
6408 6408 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6409 6409
6410 6410 /*
6411 6411 * Initialize frame header for ELS
6412 6412 */
6413 6413 fr_hdr = &fc_pkt->pkt_cmd_fhdr;
6414 6414 fr_hdr->r_ctl = R_CTL_ELS_REQ;
6415 6415 fr_hdr->type = FC_TYPE_EXTENDED_LS;
6416 6416 fr_hdr->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6417 6417 fr_hdr->df_ctl = 0;
6418 6418 fr_hdr->s_id = fport->fcipp_sid.port_id;
6419 6419 fr_hdr->d_id = frp->fcipr_d_id.port_id;
6420 6420 fr_hdr->seq_cnt = 0;
6421 6421 fr_hdr->ox_id = 0xffff;
6422 6422 fr_hdr->rx_id = 0xffff;
6423 6423 fr_hdr->ro = 0;
6424 6424
6425 6425 fc_pkt->pkt_rsplen = sizeof (la_els_logi_t);
6426 6426 fc_pkt->pkt_comp = fcip_ipkt_callback;
6427 6427 fc_pkt->pkt_tran_type = FC_PKT_EXCHANGE;
6428 6428 fc_pkt->pkt_timeout = 10; /* 10 seconds */
6429 6429 fcip_pkt->fcip_pkt_ttl = fptr->fcip_timeout_ticks + fc_pkt->pkt_timeout;
6430 6430 fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt;
6431 6431
6432 6432 /*
6433 6433 * Everybody does class 3, so let's just set it. If the transport
6434 6434 * knows better, it will deal with the class appropriately.
6435 6435 */
6436 6436
6437 6437 fc_pkt->pkt_tran_flags = FC_TRAN_INTR | FC_TRAN_CLASS3;
6438 6438
6439 6439 /*
6440 6440 * we need only fill in the ls_code and the cmd frame header
6441 6441 */
6442 6442 bzero((void *)&logi, sizeof (la_els_logi_t));
6443 6443 logi.ls_code.ls_code = LA_ELS_PLOGI;
6444 6444 logi.ls_code.mbz = 0;
6445 6445
6446 6446 FCIP_CP_OUT((uint8_t *)&logi, fc_pkt->pkt_cmd, fc_pkt->pkt_cmd_acc,
6447 6447 sizeof (la_els_logi_t));
6448 6448
6449 6449 rval = fc_ulp_login(fport->fcipp_handle, &fc_pkt, 1);
6450 6450 if (rval != FC_SUCCESS) {
6451 6451 cmn_err(CE_WARN,
6452 6452 "!fc_ulp_login failed for d_id: 0x%x, rval: 0x%x",
6453 6453 frp->fcipr_d_id.port_id, rval);
6454 6454 fcip_ipkt_free(fcip_pkt);
6455 6455 }
6456 6456 return (rval);
6457 6457 }
6458 6458
6459 6459 /*
6460 6460 * The packet callback routine - called from the transport/FCA after
6461 6461 * it is done DMA'ing/sending out the packet contents on the wire so
6462 6462 * that the alloc'ed packet can be freed
6463 6463 */
6464 6464 static void
6465 6465 fcip_ipkt_callback(fc_packet_t *fc_pkt)
6466 6466 {
6467 6467 ls_code_t logi_req;
6468 6468 ls_code_t logi_resp;
6469 6469 fcip_pkt_t *fcip_pkt;
6470 6470 fc_frame_hdr_t *fr_hdr;
6471 6471 struct fcip *fptr;
6472 6472 fcip_port_info_t *fport;
6473 6473 struct fcip_routing_table *frp;
6474 6474
6475 6475 fr_hdr = &fc_pkt->pkt_cmd_fhdr;
6476 6476
6477 6477 FCIP_CP_IN(fc_pkt->pkt_resp, (uint8_t *)&logi_resp,
6478 6478 fc_pkt->pkt_resp_acc, sizeof (logi_resp));
6479 6479
6480 6480 FCIP_CP_IN(fc_pkt->pkt_cmd, (uint8_t *)&logi_req, fc_pkt->pkt_cmd_acc,
6481 6481 sizeof (logi_req));
6482 6482
6483 6483 fcip_pkt = (fcip_pkt_t *)fc_pkt->pkt_ulp_private;
6484 6484 frp = fcip_pkt->fcip_pkt_frp;
6485 6485 fptr = fcip_pkt->fcip_pkt_fptr;
6486 6486 fport = fptr->fcip_port_info;
6487 6487
6488 6488 ASSERT(logi_req.ls_code == LA_ELS_PLOGI);
6489 6489
6490 6490 if (fc_pkt->pkt_state != FC_PKT_SUCCESS ||
6491 6491 logi_resp.ls_code != LA_ELS_ACC) {
6492 6492 /* EMPTY */
6493 6493
6494 6494 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN,
6495 6495 "opcode : 0x%x to d_id: 0x%x failed",
6496 6496 logi_req.ls_code, fr_hdr->d_id));
6497 6497
6498 6498 mutex_enter(&fptr->fcip_rt_mutex);
6499 6499 frp->fcipr_state = PORT_DEVICE_INVALID;
6500 6500 frp->fcipr_invalid_timeout = fptr->fcip_timeout_ticks +
6501 6501 (FCIP_RTE_TIMEOUT / 2);
6502 6502 mutex_exit(&fptr->fcip_rt_mutex);
6503 6503 } else {
6504 6504 fc_portid_t d_id;
6505 6505
6506 6506 d_id.port_id = fr_hdr->d_id;
6507 6507 d_id.priv_lilp_posit = 0;
6508 6508
6509 6509 /*
6510 6510 * Update PLOGI results; FCA Handle, and Port device handles
6511 6511 */
6512 6512 mutex_enter(&fptr->fcip_rt_mutex);
6513 6513 frp->fcipr_pd = fc_pkt->pkt_pd;
6514 6514 frp->fcipr_fca_dev =
6515 6515 fc_ulp_get_fca_device(fport->fcipp_handle, d_id);
6516 6516 frp->fcipr_state = PORT_DEVICE_LOGGED_IN;
6517 6517 mutex_exit(&fptr->fcip_rt_mutex);
6518 6518 }
6519 6519
6520 6520 fcip_ipkt_free(fcip_pkt);
6521 6521 }
6522 6522
6523 6523
6524 6524 /*
6525 6525 * pkt_alloc routine for outbound IP datagrams. The cache constructor
6526 6526 * Only initializes the pkt_cmd_dma (which is where the outbound datagram
6527 6527 * is stuffed) since we don't expect response
6528 6528 */
6529 6529 static fcip_pkt_t *
6530 6530 fcip_pkt_alloc(struct fcip *fptr, mblk_t *bp, int flags, int datalen)
6531 6531 {
6532 6532 fcip_pkt_t *fcip_pkt;
6533 6533 fc_packet_t *fc_pkt;
6534 6534 ddi_dma_cookie_t pkt_cookie;
6535 6535 ddi_dma_cookie_t *cp;
6536 6536 uint32_t cnt;
6537 6537 fcip_port_info_t *fport = fptr->fcip_port_info;
6538 6538
6539 6539 fcip_pkt = kmem_cache_alloc(fptr->fcip_xmit_cache, flags);
6540 6540 if (fcip_pkt == NULL) {
6541 6541 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN,
6542 6542 "fcip_pkt_alloc: kmem_cache_alloc failed"));
6543 6543 return (NULL);
6544 6544 }
6545 6545
6546 6546 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6547 6547 fcip_pkt->fcip_pkt_fcpktp = fc_pkt;
6548 6548 fc_pkt->pkt_tran_flags = 0;
6549 6549 fcip_pkt->fcip_pkt_dma_flags = 0;
6550 6550
6551 6551 /*
6552 6552 * the cache constructor has allocated the dma handle
6553 6553 */
6554 6554 fc_pkt->pkt_cmd = (caddr_t)bp->b_rptr;
6555 6555 if (ddi_dma_addr_bind_handle(fc_pkt->pkt_cmd_dma, NULL,
6556 6556 (caddr_t)bp->b_rptr, datalen, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
6557 6557 DDI_DMA_DONTWAIT, NULL, &pkt_cookie,
6558 6558 &fc_pkt->pkt_cmd_cookie_cnt) != DDI_DMA_MAPPED) {
6559 6559 goto fail;
6560 6560 }
6561 6561
6562 6562 fcip_pkt->fcip_pkt_dma_flags |= FCIP_CMD_DMA_BOUND;
6563 6563
6564 6564 if (fc_pkt->pkt_cmd_cookie_cnt >
6565 6565 fport->fcipp_cmd_dma_attr.dma_attr_sgllen) {
6566 6566 goto fail;
6567 6567 }
6568 6568
6569 6569 ASSERT(fc_pkt->pkt_cmd_cookie_cnt != 0);
6570 6570
6571 6571 cp = fc_pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
6572 6572 fc_pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
6573 6573 KM_NOSLEEP);
6574 6574
6575 6575 if (cp == NULL) {
6576 6576 goto fail;
6577 6577 }
6578 6578
6579 6579 *cp = pkt_cookie;
6580 6580 cp++;
6581 6581 for (cnt = 1; cnt < fc_pkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
6582 6582 ddi_dma_nextcookie(fc_pkt->pkt_cmd_dma, &pkt_cookie);
6583 6583 *cp = pkt_cookie;
6584 6584 }
6585 6585
6586 6586 fc_pkt->pkt_cmdlen = datalen;
6587 6587
6588 6588 fcip_pkt->fcip_pkt_mp = NULL;
6589 6589 fcip_pkt->fcip_pkt_wq = NULL;
6590 6590 fcip_pkt->fcip_pkt_dest = NULL;
6591 6591 fcip_pkt->fcip_pkt_next = NULL;
6592 6592 fcip_pkt->fcip_pkt_prev = NULL;
6593 6593 fcip_pkt->fcip_pkt_state = 0;
6594 6594 fcip_pkt->fcip_pkt_reason = 0;
6595 6595 fcip_pkt->fcip_pkt_flags = 0;
6596 6596 fcip_pkt->fcip_pkt_frp = NULL;
6597 6597
6598 6598 return (fcip_pkt);
6599 6599 fail:
6600 6600 if (fcip_pkt) {
6601 6601 fcip_pkt_free(fcip_pkt, 0);
6602 6602 }
6603 6603 return ((fcip_pkt_t *)0);
6604 6604 }
6605 6605
6606 6606 /*
6607 6607 * Free a packet and all its associated resources
6608 6608 */
6609 6609 static void
6610 6610 fcip_pkt_free(struct fcip_pkt *fcip_pkt, int free_mblk)
6611 6611 {
6612 6612 fc_packet_t *fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6613 6613 struct fcip *fptr = fcip_pkt->fcip_pkt_fptr;
6614 6614
6615 6615 if (fc_pkt->pkt_cmd_cookie != NULL) {
6616 6616 kmem_free(fc_pkt->pkt_cmd_cookie, fc_pkt->pkt_cmd_cookie_cnt *
6617 6617 sizeof (ddi_dma_cookie_t));
6618 6618 fc_pkt->pkt_cmd_cookie = NULL;
6619 6619 }
6620 6620
6621 6621 fcip_free_pkt_dma(fcip_pkt);
6622 6622 if (free_mblk && fcip_pkt->fcip_pkt_mp) {
6623 6623 freemsg(fcip_pkt->fcip_pkt_mp);
6624 6624 fcip_pkt->fcip_pkt_mp = NULL;
6625 6625 }
6626 6626
6627 6627 (void) fc_ulp_uninit_packet(fptr->fcip_port_info->fcipp_handle, fc_pkt);
6628 6628
6629 6629 kmem_cache_free(fptr->fcip_xmit_cache, (void *)fcip_pkt);
6630 6630 }
6631 6631
6632 6632 /*
6633 6633 * Allocate a Packet for internal driver use. This is for requests
6634 6634 * that originate from within the driver
6635 6635 */
6636 6636 static fcip_pkt_t *
6637 6637 fcip_ipkt_alloc(struct fcip *fptr, int cmdlen, int resplen,
6638 6638 opaque_t pd, int flags)
6639 6639 {
6640 6640 fcip_pkt_t *fcip_pkt;
6641 6641 fc_packet_t *fc_pkt;
6642 6642 int (*cb)(caddr_t);
6643 6643 fcip_port_info_t *fport = fptr->fcip_port_info;
6644 6644 size_t real_len;
6645 6645 uint_t held_here = 0;
6646 6646 ddi_dma_cookie_t pkt_cookie;
6647 6647 ddi_dma_cookie_t *cp;
6648 6648 uint32_t cnt;
6649 6649
6650 6650 cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
6651 6651
6652 6652 fcip_pkt = kmem_zalloc((sizeof (fcip_pkt_t) +
6653 6653 fport->fcipp_fca_pkt_size), flags);
6654 6654
6655 6655 if (fcip_pkt == NULL) {
6656 6656 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
6657 6657 (CE_WARN, "pkt alloc of ineternal pkt failed"));
6658 6658 goto fail;
6659 6659 }
6660 6660
6661 6661 fcip_pkt->fcip_pkt_flags = FCIP_PKT_INTERNAL;
6662 6662 fcip_pkt->fcip_pkt_fptr = fptr;
6663 6663 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6664 6664 fcip_pkt->fcip_pkt_fcpktp = fc_pkt;
6665 6665 fc_pkt->pkt_tran_flags = 0;
6666 6666 fc_pkt->pkt_cmdlen = 0;
6667 6667 fc_pkt->pkt_rsplen = 0;
6668 6668 fc_pkt->pkt_datalen = 0;
6669 6669 fc_pkt->pkt_fca_private = (opaque_t)((caddr_t)fcip_pkt +
6670 6670 sizeof (fcip_pkt_t));
6671 6671 fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt;
6672 6672
6673 6673 if (cmdlen) {
6674 6674 if (ddi_dma_alloc_handle(fptr->fcip_dip,
6675 6675 &fport->fcipp_cmd_dma_attr, cb, NULL,
6676 6676 &fc_pkt->pkt_cmd_dma) != DDI_SUCCESS) {
6677 6677 goto fail;
6678 6678 }
6679 6679
6680 6680 if (ddi_dma_mem_alloc(fc_pkt->pkt_cmd_dma, cmdlen,
6681 6681 &fport->fcipp_fca_acc_attr, DDI_DMA_CONSISTENT,
6682 6682 cb, NULL, (caddr_t *)&fc_pkt->pkt_cmd,
6683 6683 &real_len, &fc_pkt->pkt_cmd_acc) != DDI_SUCCESS) {
6684 6684 goto fail;
6685 6685 }
6686 6686
6687 6687 fcip_pkt->fcip_pkt_dma_flags |= FCIP_CMD_DMA_MEM;
6688 6688 fc_pkt->pkt_cmdlen = cmdlen;
6689 6689
6690 6690 if (real_len < cmdlen) {
6691 6691 goto fail;
6692 6692 }
6693 6693
6694 6694 if (ddi_dma_addr_bind_handle(fc_pkt->pkt_cmd_dma, NULL,
6695 6695 (caddr_t)fc_pkt->pkt_cmd, real_len,
6696 6696 DDI_DMA_WRITE | DDI_DMA_CONSISTENT, cb, NULL,
6697 6697 &pkt_cookie, &fc_pkt->pkt_cmd_cookie_cnt) !=
6698 6698 DDI_DMA_MAPPED) {
6699 6699 goto fail;
6700 6700 }
6701 6701
6702 6702 fcip_pkt->fcip_pkt_dma_flags |= FCIP_CMD_DMA_BOUND;
6703 6703
6704 6704 if (fc_pkt->pkt_cmd_cookie_cnt >
6705 6705 fport->fcipp_cmd_dma_attr.dma_attr_sgllen) {
6706 6706 goto fail;
6707 6707 }
6708 6708
6709 6709 ASSERT(fc_pkt->pkt_cmd_cookie_cnt != 0);
6710 6710
6711 6711 cp = fc_pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
6712 6712 fc_pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
6713 6713 KM_NOSLEEP);
6714 6714
6715 6715 if (cp == NULL) {
6716 6716 goto fail;
6717 6717 }
6718 6718
6719 6719 *cp = pkt_cookie;
6720 6720 cp++;
6721 6721 for (cnt = 1; cnt < fc_pkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
6722 6722 ddi_dma_nextcookie(fc_pkt->pkt_cmd_dma, &pkt_cookie);
6723 6723 *cp = pkt_cookie;
6724 6724 }
6725 6725 }
6726 6726
6727 6727 if (resplen) {
6728 6728 if (ddi_dma_alloc_handle(fptr->fcip_dip,
6729 6729 &fport->fcipp_resp_dma_attr, cb, NULL,
6730 6730 &fc_pkt->pkt_resp_dma) != DDI_SUCCESS) {
6731 6731 goto fail;
6732 6732 }
6733 6733
6734 6734 if (ddi_dma_mem_alloc(fc_pkt->pkt_resp_dma, resplen,
6735 6735 &fport->fcipp_fca_acc_attr, DDI_DMA_CONSISTENT,
6736 6736 cb, NULL, (caddr_t *)&fc_pkt->pkt_resp,
6737 6737 &real_len, &fc_pkt->pkt_resp_acc) != DDI_SUCCESS) {
6738 6738 goto fail;
6739 6739 }
6740 6740
6741 6741 fcip_pkt->fcip_pkt_dma_flags |= FCIP_RESP_DMA_MEM;
6742 6742
6743 6743 if (real_len < resplen) {
6744 6744 goto fail;
6745 6745 }
6746 6746
6747 6747 if (ddi_dma_addr_bind_handle(fc_pkt->pkt_resp_dma, NULL,
6748 6748 (caddr_t)fc_pkt->pkt_resp, real_len,
6749 6749 DDI_DMA_WRITE | DDI_DMA_CONSISTENT, cb, NULL,
6750 6750 &pkt_cookie, &fc_pkt->pkt_resp_cookie_cnt) !=
6751 6751 DDI_DMA_MAPPED) {
6752 6752 goto fail;
6753 6753 }
6754 6754
6755 6755 fcip_pkt->fcip_pkt_dma_flags |= FCIP_RESP_DMA_BOUND;
6756 6756 fc_pkt->pkt_rsplen = resplen;
6757 6757
6758 6758 if (fc_pkt->pkt_resp_cookie_cnt >
6759 6759 fport->fcipp_resp_dma_attr.dma_attr_sgllen) {
6760 6760 goto fail;
6761 6761 }
6762 6762
6763 6763 ASSERT(fc_pkt->pkt_resp_cookie_cnt != 0);
6764 6764
6765 6765 cp = fc_pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
6766 6766 fc_pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
6767 6767 KM_NOSLEEP);
6768 6768
6769 6769 if (cp == NULL) {
6770 6770 goto fail;
6771 6771 }
6772 6772
6773 6773 *cp = pkt_cookie;
6774 6774 cp++;
6775 6775 for (cnt = 1; cnt < fc_pkt->pkt_resp_cookie_cnt; cnt++, cp++) {
6776 6776 ddi_dma_nextcookie(fc_pkt->pkt_resp_dma, &pkt_cookie);
6777 6777 *cp = pkt_cookie;
6778 6778 }
6779 6779 }
6780 6780
6781 6781 /*
6782 6782 * Initialize pkt_pd prior to calling fc_ulp_init_packet
6783 6783 */
6784 6784
6785 6785 fc_pkt->pkt_pd = pd;
6786 6786
6787 6787 /*
6788 6788 * Ask the FCA to bless the internal packet
6789 6789 */
6790 6790 if (fc_ulp_init_packet((opaque_t)fport->fcipp_handle,
6791 6791 fc_pkt, flags) != FC_SUCCESS) {
6792 6792 goto fail;
6793 6793 }
6794 6794
6795 6795 /*
6796 6796 * Keep track of # of ipkts alloc-ed
6797 6797 * This function can get called with mutex either held or not. So, we'll
6798 6798 * grab mutex if it is not already held by this thread.
6799 6799 * This has to be cleaned up someday.
6800 6800 */
6801 6801 if (!MUTEX_HELD(&fptr->fcip_mutex)) {
6802 6802 held_here = 1;
6803 6803 mutex_enter(&fptr->fcip_mutex);
6804 6804 }
6805 6805
6806 6806 fptr->fcip_num_ipkts_pending++;
6807 6807
6808 6808 if (held_here)
6809 6809 mutex_exit(&fptr->fcip_mutex);
6810 6810
6811 6811 return (fcip_pkt);
6812 6812 fail:
6813 6813 if (fcip_pkt) {
6814 6814 fcip_ipkt_free(fcip_pkt);
6815 6815 }
6816 6816
6817 6817 return (NULL);
6818 6818 }
6819 6819
6820 6820 /*
6821 6821 * free up an internal IP packet (like a FARP pkt etc)
6822 6822 */
6823 6823 static void
6824 6824 fcip_ipkt_free(fcip_pkt_t *fcip_pkt)
6825 6825 {
6826 6826 fc_packet_t *fc_pkt;
6827 6827 struct fcip *fptr = fcip_pkt->fcip_pkt_fptr;
6828 6828 fcip_port_info_t *fport = fptr->fcip_port_info;
6829 6829
6830 6830 ASSERT(fptr != NULL);
6831 6831 ASSERT(!mutex_owned(&fptr->fcip_mutex));
6832 6832
6833 6833 /* One less ipkt to wait for */
6834 6834 mutex_enter(&fptr->fcip_mutex);
6835 6835 if (fptr->fcip_num_ipkts_pending) /* Safety check */
6836 6836 fptr->fcip_num_ipkts_pending--;
6837 6837 mutex_exit(&fptr->fcip_mutex);
6838 6838
6839 6839 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6840 6840
6841 6841 if (fc_pkt->pkt_cmd_cookie != NULL) {
6842 6842 kmem_free(fc_pkt->pkt_cmd_cookie, fc_pkt->pkt_cmd_cookie_cnt *
6843 6843 sizeof (ddi_dma_cookie_t));
6844 6844 fc_pkt->pkt_cmd_cookie = NULL;
6845 6845 }
6846 6846
6847 6847 if (fc_pkt->pkt_resp_cookie != NULL) {
6848 6848 kmem_free(fc_pkt->pkt_resp_cookie, fc_pkt->pkt_resp_cookie_cnt *
6849 6849 sizeof (ddi_dma_cookie_t));
6850 6850 fc_pkt->pkt_resp_cookie = NULL;
6851 6851 }
6852 6852
6853 6853 if (fc_ulp_uninit_packet(fport->fcipp_handle, fc_pkt) != FC_SUCCESS) {
6854 6854 FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
6855 6855 "fc_ulp_uninit_pkt failed for internal fc pkt 0x%p",
6856 6856 (void *)fc_pkt));
6857 6857 }
6858 6858 fcip_free_pkt_dma(fcip_pkt);
6859 6859 kmem_free(fcip_pkt, (sizeof (fcip_pkt_t) + fport->fcipp_fca_pkt_size));
6860 6860 }
6861 6861
6862 6862 /*
6863 6863 * initialize a unicast request. This is a misnomer because even the
6864 6864 * broadcast requests are initialized with this routine
6865 6865 */
6866 6866 static void
6867 6867 fcip_init_unicast_pkt(fcip_pkt_t *fcip_pkt, fc_portid_t sid, fc_portid_t did,
6868 6868 void (*comp) ())
6869 6869 {
6870 6870 fc_packet_t *fc_pkt;
6871 6871 fc_frame_hdr_t *fr_hdr;
6872 6872 struct fcip *fptr = fcip_pkt->fcip_pkt_fptr;
6873 6873
6874 6874 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6875 6875 fr_hdr = &fc_pkt->pkt_cmd_fhdr;
6876 6876
6877 6877 fr_hdr->r_ctl = R_CTL_DEVICE_DATA | R_CTL_UNSOL_DATA;
6878 6878 fr_hdr->s_id = sid.port_id;
6879 6879 fr_hdr->d_id = did.port_id;
6880 6880 fr_hdr->type = FC_TYPE_IS8802_SNAP;
6881 6881 fr_hdr->f_ctl = F_CTL_FIRST_SEQ | F_CTL_LAST_SEQ;
6882 6882 fr_hdr->df_ctl = DF_CTL_NET_HDR;
6883 6883 fr_hdr->seq_cnt = 0;
6884 6884 fr_hdr->ox_id = 0xffff;
6885 6885 fr_hdr->rx_id = 0xffff;
6886 6886 fr_hdr->ro = 0;
6887 6887 /*
6888 6888 * reset all the length fields
6889 6889 */
6890 6890 fc_pkt->pkt_rsplen = 0;
6891 6891 fc_pkt->pkt_datalen = 0;
6892 6892 fc_pkt->pkt_comp = comp;
6893 6893 if (comp) {
6894 6894 fc_pkt->pkt_tran_flags |= FC_TRAN_INTR;
6895 6895 } else {
6896 6896 fc_pkt->pkt_tran_flags |= FC_TRAN_NO_INTR;
6897 6897 }
6898 6898 fc_pkt->pkt_tran_type = FC_PKT_OUTBOUND | FC_PKT_IP_WRITE;
6899 6899 fc_pkt->pkt_timeout = fcip_pkt_ttl_ticks;
6900 6900 fcip_pkt->fcip_pkt_ttl = fptr->fcip_timeout_ticks + fc_pkt->pkt_timeout;
6901 6901 }
6902 6902
6903 6903
6904 6904 /*
6905 6905 * Initialize a fcip_packet for broadcast data transfers
6906 6906 */
6907 6907 static void
6908 6908 fcip_init_broadcast_pkt(fcip_pkt_t *fcip_pkt, void (*comp) (), int is_els)
6909 6909 {
6910 6910 fc_packet_t *fc_pkt;
6911 6911 fc_frame_hdr_t *fr_hdr;
6912 6912 struct fcip *fptr = fcip_pkt->fcip_pkt_fptr;
6913 6913 fcip_port_info_t *fport = fptr->fcip_port_info;
6914 6914 uint32_t sid;
6915 6915 uint32_t did;
6916 6916
6917 6917 FCIP_TNF_PROBE_1((fcip_init_broadcast_pkt, "fcip io", /* CSTYLED */,
6918 6918 tnf_string, msg, "enter"));
6919 6919 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6920 6920 fr_hdr = &fc_pkt->pkt_cmd_fhdr;
6921 6921 sid = fport->fcipp_sid.port_id;
6922 6922
6923 6923 if (is_els) {
6924 6924 fr_hdr->r_ctl = R_CTL_ELS_REQ;
6925 6925 } else {
6926 6926 fr_hdr->r_ctl = R_CTL_DEVICE_DATA | R_CTL_UNSOL_DATA;
6927 6927 }
6928 6928 fr_hdr->s_id = sid;
6929 6929 /*
6930 6930 * The destination broadcast address depends on the topology
6931 6931 * of the underlying port
6932 6932 */
6933 6933 did = fptr->fcip_broadcast_did;
6934 6934 /*
6935 6935 * mark pkt a broadcast pkt
6936 6936 */
6937 6937 fc_pkt->pkt_tran_type = FC_PKT_BROADCAST;
6938 6938
6939 6939 fr_hdr->d_id = did;
6940 6940 fr_hdr->type = FC_TYPE_IS8802_SNAP;
6941 6941 fr_hdr->f_ctl = F_CTL_FIRST_SEQ | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
6942 6942 fr_hdr->f_ctl &= ~(F_CTL_SEQ_INITIATIVE);
6943 6943 fr_hdr->df_ctl = DF_CTL_NET_HDR;
6944 6944 fr_hdr->seq_cnt = 0;
6945 6945 fr_hdr->ox_id = 0xffff;
6946 6946 fr_hdr->rx_id = 0xffff;
6947 6947 fr_hdr->ro = 0;
6948 6948 fc_pkt->pkt_comp = comp;
6949 6949
6950 6950 if (comp) {
6951 6951 fc_pkt->pkt_tran_flags |= FC_TRAN_INTR;
6952 6952 } else {
6953 6953 fc_pkt->pkt_tran_flags |= FC_TRAN_NO_INTR;
6954 6954 }
6955 6955
6956 6956 fc_pkt->pkt_tran_type = FC_PKT_BROADCAST;
6957 6957 fc_pkt->pkt_timeout = fcip_pkt_ttl_ticks;
6958 6958 fcip_pkt->fcip_pkt_ttl = fptr->fcip_timeout_ticks + fc_pkt->pkt_timeout;
6959 6959 }
6960 6960
6961 6961
6962 6962
6963 6963 /*
6964 6964 * Free up all DMA resources associated with an allocated packet
6965 6965 */
6966 6966 static void
6967 6967 fcip_free_pkt_dma(fcip_pkt_t *fcip_pkt)
6968 6968 {
6969 6969 fc_packet_t *fc_pkt;
6970 6970
6971 6971 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6972 6972
6973 6973 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
6974 6974 (CE_NOTE, "in freepktdma : flags 0x%x",
6975 6975 fcip_pkt->fcip_pkt_dma_flags));
6976 6976
6977 6977 if (fcip_pkt->fcip_pkt_dma_flags & FCIP_CMD_DMA_BOUND) {
6978 6978 (void) ddi_dma_unbind_handle(fc_pkt->pkt_cmd_dma);
6979 6979 }
6980 6980 if (fcip_pkt->fcip_pkt_dma_flags & FCIP_CMD_DMA_MEM) {
6981 6981 ddi_dma_mem_free(&fc_pkt->pkt_cmd_acc);
6982 6982 }
6983 6983
6984 6984 if (fcip_pkt->fcip_pkt_dma_flags & FCIP_RESP_DMA_BOUND) {
6985 6985 (void) ddi_dma_unbind_handle(fc_pkt->pkt_resp_dma);
6986 6986 }
6987 6987 if (fcip_pkt->fcip_pkt_dma_flags & FCIP_RESP_DMA_MEM) {
6988 6988 ddi_dma_mem_free(&fc_pkt->pkt_resp_acc);
6989 6989 }
6990 6990 /*
6991 6991 * for internal commands, we need to free up the dma handles too.
6992 6992 * This is done in the cache destructor for non internal cmds
6993 6993 */
6994 6994 if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_INTERNAL) {
6995 6995 if (fc_pkt->pkt_cmd_dma) {
6996 6996 ddi_dma_free_handle(&fc_pkt->pkt_cmd_dma);
6997 6997 }
6998 6998 if (fc_pkt->pkt_resp_dma) {
6999 6999 ddi_dma_free_handle(&fc_pkt->pkt_resp_dma);
7000 7000 }
7001 7001 }
7002 7002 }
7003 7003
7004 7004
7005 7005 /*
7006 7006 * helper routine to generate a string, given an ether addr
7007 7007 */
7008 7008 static void
7009 7009 fcip_ether_to_str(struct ether_addr *e, caddr_t s)
7010 7010 {
7011 7011 int i;
7012 7012
7013 7013 for (i = 0; i < sizeof (struct ether_addr); i++, s += 2) {
7014 7014 FCIP_DEBUG(FCIP_DEBUG_MISC,
7015 7015 (CE_CONT, "0x%02X:", e->ether_addr_octet[i]));
7016 7016 (void) sprintf(s, "%02X", e->ether_addr_octet[i]);
7017 7017 }
7018 7018
7019 7019 *s = '\0';
7020 7020 }
7021 7021
7022 7022 /*
7023 7023 * When a broadcast request comes from the upper streams modules, it
7024 7024 * is ugly to look into every datagram to figure out if it is a broadcast
7025 7025 * datagram or a unicast packet. Instead just add the broadcast entries
7026 7026 * into our routing and dest tables and the standard hash table look ups
7027 7027 * will find the entries. It is a lot cleaner this way. Also Solaris ifconfig
7028 7028 * seems to be very ethernet specific and it requires broadcasts to the
7029 7029 * ether broadcast addr of 0xffffffffff to succeed even though we specified
7030 7030 * in the dl_info request that our broadcast MAC addr is 0x0000000000
7031 7031 * (can't figure out why RFC2625 did this though). So add broadcast entries
7032 7032 * for both MAC address
7033 7033 */
7034 7034 static int
7035 7035 fcip_dest_add_broadcast_entry(struct fcip *fptr, int new_flag)
7036 7036 {
7037 7037 fc_portmap_t map;
7038 7038 struct fcip_routing_table *frp;
7039 7039 uint32_t did;
7040 7040 la_wwn_t broadcast_wwn;
7041 7041
7042 7042 /*
7043 7043 * get port_id of destination for broadcast - this is topology
7044 7044 * dependent
7045 7045 */
7046 7046 did = fptr->fcip_broadcast_did;
7047 7047
7048 7048 ether_to_wwn(&fcip_arpbroadcast_addr, &broadcast_wwn);
7049 7049 bcopy((void *)&broadcast_wwn, (void *)&map.map_pwwn, sizeof (la_wwn_t));
7050 7050 bcopy((void *)&broadcast_wwn, (void *)&map.map_nwwn, sizeof (la_wwn_t));
7051 7051
7052 7052 map.map_did.port_id = did;
7053 7053 map.map_hard_addr.hard_addr = did;
7054 7054 map.map_state = PORT_DEVICE_VALID;
7055 7055 if (new_flag) {
7056 7056 map.map_type = PORT_DEVICE_NEW;
7057 7057 } else {
7058 7058 map.map_type = PORT_DEVICE_CHANGED;
7059 7059 }
7060 7060 map.map_flags = 0;
7061 7061 map.map_pd = NULL;
7062 7062 bzero(&map.map_fc4_types, sizeof (map.map_fc4_types));
7063 7063 fcip_rt_update(fptr, &map, 1);
7064 7064 mutex_enter(&fptr->fcip_rt_mutex);
7065 7065 frp = fcip_lookup_rtable(fptr, &broadcast_wwn, FCIP_COMPARE_NWWN);
7066 7066 mutex_exit(&fptr->fcip_rt_mutex);
7067 7067 if (frp == NULL) {
7068 7068 return (FC_FAILURE);
7069 7069 }
7070 7070 (void) fcip_add_dest(fptr, frp);
7071 7071 /*
7072 7072 * The Upper IP layers expect the traditional broadcast MAC addr
7073 7073 * of 0xff ff ff ff ff ff to work too if we want to plumb the fcip
7074 7074 * stream through the /etc/hostname.fcipXX file. Instead of checking
7075 7075 * each phys addr for a match with fcip's ARP header broadcast
7076 7076 * addr (0x00 00 00 00 00 00), its simply easier to add another
7077 7077 * broadcast entry for 0xff ff ff ff ff ff.
7078 7078 */
7079 7079 ether_to_wwn(&fcipnhbroadcastaddr, &broadcast_wwn);
7080 7080 bcopy((void *)&broadcast_wwn, (void *)&map.map_pwwn, sizeof (la_wwn_t));
7081 7081 bcopy((void *)&broadcast_wwn, (void *)&map.map_nwwn, sizeof (la_wwn_t));
7082 7082 fcip_rt_update(fptr, &map, 1);
7083 7083 mutex_enter(&fptr->fcip_rt_mutex);
7084 7084 frp = fcip_lookup_rtable(fptr, &broadcast_wwn, FCIP_COMPARE_NWWN);
7085 7085 mutex_exit(&fptr->fcip_rt_mutex);
7086 7086 if (frp == NULL) {
7087 7087 return (FC_FAILURE);
7088 7088 }
7089 7089 (void) fcip_add_dest(fptr, frp);
7090 7090 return (FC_SUCCESS);
7091 7091 }
7092 7092
7093 7093 /*
7094 7094 * We need to obtain the D_ID of the broadcast port for transmitting all
7095 7095 * our broadcast (and multicast) requests. The broadcast D_ID as we know
7096 7096 * is dependent on the link topology
7097 7097 */
7098 7098 static uint32_t
7099 7099 fcip_get_broadcast_did(struct fcip *fptr)
7100 7100 {
7101 7101 fcip_port_info_t *fport = fptr->fcip_port_info;
7102 7102 uint32_t did = 0;
7103 7103 uint32_t sid;
7104 7104
7105 7105 FCIP_TNF_PROBE_2((fcip_get_broadcast_did, "fcip io", /* CSTYLED */,
7106 7106 tnf_string, msg, "enter",
7107 7107 tnf_opaque, fptr, fptr));
7108 7108
7109 7109 sid = fport->fcipp_sid.port_id;
7110 7110
7111 7111 switch (fport->fcipp_topology) {
7112 7112
7113 7113 case FC_TOP_PT_PT: {
7114 7114 fc_portmap_t *port_map = NULL;
7115 7115 uint32_t listlen = 0;
7116 7116
7117 7117 if (fc_ulp_getportmap(fport->fcipp_handle, &port_map,
7118 7118 &listlen, FC_ULP_PLOGI_DONTCARE) == FC_SUCCESS) {
7119 7119 FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE,
7120 7120 "fcip_gpmap: listlen : 0x%x", listlen));
7121 7121 if (listlen == 1) {
7122 7122 did = port_map->map_did.port_id;
7123 7123 }
7124 7124 }
7125 7125 if (port_map) {
7126 7126 kmem_free(port_map, listlen * sizeof (fc_portmap_t));
7127 7127 }
7128 7128 if (listlen != 1) {
7129 7129 /* Dummy return value */
7130 7130 return (0x00FFFFFF);
7131 7131 }
7132 7132 break;
7133 7133 }
7134 7134
7135 7135 case FC_TOP_NO_NS:
7136 7136 /* FALLTHROUGH */
7137 7137 case FC_TOP_FABRIC:
7138 7138 /*
7139 7139 * The broadcast address is the same whether or not
7140 7140 * the switch/fabric contains a Name service.
7141 7141 */
7142 7142 did = 0x00FFFFFF;
7143 7143 break;
7144 7144
7145 7145 case FC_TOP_PUBLIC_LOOP:
7146 7146 /*
7147 7147 * The open replicate primitive must not be used. The
7148 7148 * broadcast sequence is simply sent to ALPA 0x00. The
7149 7149 * fabric controller then propagates the broadcast to all
7150 7150 * other ports. The fabric propagates the broadcast by
7151 7151 * using the OPNfr primitive.
7152 7152 */
7153 7153 did = 0x00;
7154 7154 break;
7155 7155
7156 7156 case FC_TOP_PRIVATE_LOOP:
7157 7157 /*
7158 7158 * The source port for broadcast in private loop mode
7159 7159 * must send an OPN(fr) signal forcing all ports in the
7160 7160 * loop to replicate the frames that they receive.
7161 7161 */
7162 7162 did = 0x00FFFFFF;
7163 7163 break;
7164 7164
7165 7165 case FC_TOP_UNKNOWN:
7166 7166 /* FALLTHROUGH */
7167 7167 default:
7168 7168 did = sid;
7169 7169 FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN,
7170 7170 "fcip(0x%x):unknown topology in init_broadcast_pkt",
7171 7171 fptr->fcip_instance));
7172 7172 break;
7173 7173 }
7174 7174 FCIP_TNF_PROBE_2((fcip_get_broadcast_did, "fcip io", /* CSTYLED */,
7175 7175 tnf_string, msg, "return",
7176 7176 tnf_opaque, did, did));
7177 7177
7178 7178 return (did);
7179 7179 }
7180 7180
7181 7181
7182 7182 /*
7183 7183 * fcip timeout performs 2 operations:
7184 7184 * 1. timeout any packets sent to the FCA for which a callback hasn't
7185 7185 * happened. If you are wondering why we need a callback since all
7186 7186 * traffic in FCIP is unidirectional, hence all exchanges are unidirectional
7187 7187 * but wait, we can only free up the resources after we know the FCA has
7188 7188 * DMA'ed out the data. pretty obvious eh :)
7189 7189 *
7190 7190 * 2. Retire and routing table entries we marked up for retiring. This is
7191 7191 * to give the link a chance to recover instead of marking a port down
7192 7192 * when we have lost all communication with it after a link transition
7193 7193 */
7194 7194 static void
7195 7195 fcip_timeout(void *arg)
7196 7196 {
7197 7197 struct fcip *fptr = (struct fcip *)arg;
7198 7198 int i;
7199 7199 fcip_pkt_t *fcip_pkt;
7200 7200 struct fcip_dest *fdestp;
7201 7201 int index;
7202 7202 struct fcip_routing_table *frtp;
7203 7203 int dispatch_rte_removal = 0;
7204 7204
7205 7205 mutex_enter(&fptr->fcip_mutex);
7206 7206
7207 7207 fptr->fcip_flags |= FCIP_IN_TIMEOUT;
7208 7208 fptr->fcip_timeout_ticks += fcip_tick_incr;
7209 7209
7210 7210 if (fptr->fcip_flags & (FCIP_DETACHED | FCIP_DETACHING | \
7211 7211 FCIP_SUSPENDED | FCIP_POWER_DOWN)) {
7212 7212 fptr->fcip_flags &= ~(FCIP_IN_TIMEOUT);
7213 7213 mutex_exit(&fptr->fcip_mutex);
7214 7214 return;
7215 7215 }
7216 7216
7217 7217 if (fptr->fcip_port_state == FCIP_PORT_OFFLINE) {
7218 7218 if (fptr->fcip_timeout_ticks > fptr->fcip_mark_offline) {
7219 7219 fptr->fcip_flags |= FCIP_LINK_DOWN;
7220 7220 }
7221 7221 }
7222 7222 if (!fptr->fcip_flags & FCIP_RTE_REMOVING) {
7223 7223 dispatch_rte_removal = 1;
7224 7224 }
7225 7225 mutex_exit(&fptr->fcip_mutex);
7226 7226
7227 7227 /*
7228 7228 * Check if we have any Invalid routing table entries in our
7229 7229 * hashtable we have marked off for deferred removal. If any,
7230 7230 * we can spawn a taskq thread to do the cleanup for us. We
7231 7231 * need to avoid cleanup in the timeout thread since we may
7232 7232 * have to wait for outstanding commands to complete before
7233 7233 * we retire a routing table entry. Also dispatch the taskq
7234 7234 * thread only if we are already do not have a taskq thread
7235 7235 * dispatched.
7236 7236 */
7237 7237 if (dispatch_rte_removal) {
7238 7238 mutex_enter(&fptr->fcip_rt_mutex);
7239 7239 for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
7240 7240 frtp = fptr->fcip_rtable[index];
7241 7241 while (frtp) {
7242 7242 if ((frtp->fcipr_state == FCIP_RT_INVALID) &&
7243 7243 (fptr->fcip_timeout_ticks >
7244 7244 frtp->fcipr_invalid_timeout)) {
7245 7245 /*
7246 7246 * If we cannot schedule a task thread
7247 7247 * let us attempt again on the next
7248 7248 * tick rather than call
7249 7249 * fcip_rte_remove_deferred() from here
7250 7250 * directly since the routine can sleep.
7251 7251 */
7252 7252 frtp->fcipr_state = FCIP_RT_RETIRED;
7253 7253
7254 7254 mutex_enter(&fptr->fcip_mutex);
7255 7255 fptr->fcip_flags |= FCIP_RTE_REMOVING;
7256 7256 mutex_exit(&fptr->fcip_mutex);
7257 7257
7258 7258 if (taskq_dispatch(fptr->fcip_tq,
7259 7259 fcip_rte_remove_deferred, fptr,
7260 7260 KM_NOSLEEP) == 0) {
7261 7261 /*
7262 7262 * failed - so mark the entry
7263 7263 * as invalid again.
7264 7264 */
7265 7265 frtp->fcipr_state =
7266 7266 FCIP_RT_INVALID;
7267 7267
7268 7268 mutex_enter(&fptr->fcip_mutex);
7269 7269 fptr->fcip_flags &=
7270 7270 ~FCIP_RTE_REMOVING;
7271 7271 mutex_exit(&fptr->fcip_mutex);
7272 7272 }
7273 7273 }
7274 7274 frtp = frtp->fcipr_next;
7275 7275 }
7276 7276 }
7277 7277 mutex_exit(&fptr->fcip_rt_mutex);
7278 7278 }
7279 7279
7280 7280 mutex_enter(&fptr->fcip_dest_mutex);
7281 7281
7282 7282 /*
7283 7283 * Now timeout any packets stuck with the transport/FCA for too long
7284 7284 */
7285 7285 for (i = 0; i < FCIP_DEST_HASH_ELEMS; i++) {
7286 7286 fdestp = fptr->fcip_dest[i];
7287 7287 while (fdestp != NULL) {
7288 7288 mutex_enter(&fdestp->fcipd_mutex);
7289 7289 for (fcip_pkt = fdestp->fcipd_head; fcip_pkt != NULL;
7290 7290 fcip_pkt = fcip_pkt->fcip_pkt_next) {
7291 7291 if (fcip_pkt->fcip_pkt_flags &
7292 7292 (FCIP_PKT_RETURNED | FCIP_PKT_IN_TIMEOUT |
7293 7293 FCIP_PKT_IN_ABORT)) {
7294 7294 continue;
7295 7295 }
7296 7296 if (fptr->fcip_timeout_ticks >
7297 7297 fcip_pkt->fcip_pkt_ttl) {
7298 7298 fcip_pkt->fcip_pkt_flags |=
7299 7299 FCIP_PKT_IN_TIMEOUT;
7300 7300
7301 7301 mutex_exit(&fdestp->fcipd_mutex);
7302 7302 if (taskq_dispatch(fptr->fcip_tq,
7303 7303 fcip_pkt_timeout, fcip_pkt,
7304 7304 KM_NOSLEEP) == 0) {
7305 7305 /*
7306 7306 * timeout immediately
7307 7307 */
7308 7308 fcip_pkt_timeout(fcip_pkt);
7309 7309 }
7310 7310 mutex_enter(&fdestp->fcipd_mutex);
7311 7311 /*
7312 7312 * The linked list is altered because
7313 7313 * of one of the following reasons:
7314 7314 * a. Timeout code dequeued a pkt
7315 7315 * b. Pkt completion happened
7316 7316 *
7317 7317 * So restart the spin starting at
7318 7318 * the head again; This is a bit
7319 7319 * excessive, but okay since
7320 7320 * fcip_timeout_ticks isn't incremented
7321 7321 * for this spin, we will skip the
7322 7322 * not-to-be-timedout packets quickly
7323 7323 */
7324 7324 fcip_pkt = fdestp->fcipd_head;
7325 7325 if (fcip_pkt == NULL) {
7326 7326 break;
7327 7327 }
7328 7328 }
7329 7329 }
7330 7330 mutex_exit(&fdestp->fcipd_mutex);
7331 7331 fdestp = fdestp->fcipd_next;
7332 7332 }
7333 7333 }
7334 7334 mutex_exit(&fptr->fcip_dest_mutex);
7335 7335
7336 7336 /*
7337 7337 * reschedule the timeout thread
7338 7338 */
7339 7339 mutex_enter(&fptr->fcip_mutex);
7340 7340
7341 7341 fptr->fcip_timeout_id = timeout(fcip_timeout, fptr,
7342 7342 drv_usectohz(1000000));
7343 7343 fptr->fcip_flags &= ~(FCIP_IN_TIMEOUT);
7344 7344 mutex_exit(&fptr->fcip_mutex);
7345 7345 }
7346 7346
7347 7347
7348 7348 /*
7349 7349 * This routine is either called from taskq or directly from fcip_timeout
7350 7350 * does the actual job of aborting the packet
7351 7351 */
7352 7352 static void
7353 7353 fcip_pkt_timeout(void *arg)
7354 7354 {
7355 7355 fcip_pkt_t *fcip_pkt = (fcip_pkt_t *)arg;
7356 7356 struct fcip_dest *fdestp;
7357 7357 struct fcip *fptr;
7358 7358 fc_packet_t *fc_pkt;
7359 7359 fcip_port_info_t *fport;
7360 7360 int rval;
7361 7361
7362 7362 fdestp = fcip_pkt->fcip_pkt_dest;
7363 7363 fptr = fcip_pkt->fcip_pkt_fptr;
7364 7364 fport = fptr->fcip_port_info;
7365 7365 fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
7366 7366
7367 7367 /*
7368 7368 * try to abort the pkt
7369 7369 */
7370 7370 fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_ABORT;
7371 7371 rval = fc_ulp_abort(fport->fcipp_handle, fc_pkt, KM_NOSLEEP);
7372 7372
7373 7373 FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
7374 7374 (CE_NOTE, "fc_ulp_abort returns: 0x%x", rval));
7375 7375
7376 7376 if (rval == FC_SUCCESS) {
7377 7377 ASSERT(fdestp != NULL);
7378 7378
7379 7379 /*
7380 7380 * dequeue the pkt from the dest structure pkt list
7381 7381 */
7382 7382 fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_ABORT;
7383 7383 mutex_enter(&fdestp->fcipd_mutex);
7384 7384 rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt);
7385 7385 ASSERT(rval == 1);
7386 7386 mutex_exit(&fdestp->fcipd_mutex);
7387 7387
7388 7388 /*
7389 7389 * Now cleanup the pkt and free the mblk
7390 7390 */
7391 7391 fcip_pkt_free(fcip_pkt, 1);
7392 7392 } else {
7393 7393 /*
7394 7394 * abort failed - just mark the pkt as done and
7395 7395 * wait for it to complete in fcip_pkt_callback since
7396 7396 * the pkt has already been xmitted by the FCA
7397 7397 */
7398 7398 fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_TIMEOUT;
7399 7399 if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_RETURNED) {
7400 7400 fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_ABORT;
7401 7401 mutex_enter(&fdestp->fcipd_mutex);
7402 7402 rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt);
7403 7403 ASSERT(rval == 1);
7404 7404 mutex_exit(&fdestp->fcipd_mutex);
7405 7405
7406 7406 fcip_pkt_free(fcip_pkt, 1);
7407 7407 }
7408 7408 return;
7409 7409 }
7410 7410 }
7411 7411
7412 7412
7413 7413 /*
7414 7414 * Remove a routing table entry marked for deferred removal. This routine
7415 7415 * unlike fcip_pkt_timeout, is always called from a taskq context
7416 7416 */
7417 7417 static void
7418 7418 fcip_rte_remove_deferred(void *arg)
7419 7419 {
7420 7420 struct fcip *fptr = (struct fcip *)arg;
7421 7421 int hash_bucket;
7422 7422 struct fcip_dest *fdestp;
7423 7423 la_wwn_t *pwwn;
7424 7424 int index;
7425 7425 struct fcip_routing_table *frtp, *frtp_next, *frtp_prev;
7426 7426
7427 7427
7428 7428 mutex_enter(&fptr->fcip_rt_mutex);
7429 7429 for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
7430 7430 frtp = fptr->fcip_rtable[index];
7431 7431 frtp_prev = NULL;
7432 7432 while (frtp) {
7433 7433 frtp_next = frtp->fcipr_next;
7434 7434
7435 7435 if (frtp->fcipr_state == FCIP_RT_RETIRED) {
7436 7436
7437 7437 pwwn = &frtp->fcipr_pwwn;
7438 7438 /*
7439 7439 * Get hold of destination pointer
7440 7440 */
7441 7441 mutex_enter(&fptr->fcip_dest_mutex);
7442 7442
7443 7443 hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn);
7444 7444 ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS);
7445 7445
7446 7446 fdestp = fptr->fcip_dest[hash_bucket];
7447 7447 while (fdestp != NULL) {
7448 7448 mutex_enter(&fdestp->fcipd_mutex);
7449 7449 if (fdestp->fcipd_rtable) {
7450 7450 if (fcip_wwn_compare(pwwn,
7451 7451 &fdestp->fcipd_pwwn,
7452 7452 FCIP_COMPARE_PWWN) == 0) {
7453 7453 mutex_exit(
7454 7454 &fdestp->fcipd_mutex);
7455 7455 break;
7456 7456 }
7457 7457 }
7458 7458 mutex_exit(&fdestp->fcipd_mutex);
7459 7459 fdestp = fdestp->fcipd_next;
7460 7460 }
7461 7461
7462 7462 mutex_exit(&fptr->fcip_dest_mutex);
7463 7463 if (fdestp == NULL) {
7464 7464 frtp_prev = frtp;
7465 7465 frtp = frtp_next;
7466 7466 continue;
7467 7467 }
7468 7468
7469 7469 mutex_enter(&fdestp->fcipd_mutex);
7470 7470 if (fdestp->fcipd_ncmds) {
7471 7471 /*
7472 7472 * Instead of waiting to drain commands
7473 7473 * let us revisit this RT entry in
7474 7474 * the next pass.
7475 7475 */
7476 7476 mutex_exit(&fdestp->fcipd_mutex);
7477 7477 frtp_prev = frtp;
7478 7478 frtp = frtp_next;
7479 7479 continue;
7480 7480 }
7481 7481
7482 7482 /*
7483 7483 * We are clean, so remove the RTE
7484 7484 */
7485 7485 fdestp->fcipd_rtable = NULL;
7486 7486 mutex_exit(&fdestp->fcipd_mutex);
7487 7487
7488 7488 FCIP_TNF_PROBE_2((fcip_rte_remove_deferred,
7489 7489 "fcip io", /* CSTYLED */,
7490 7490 tnf_string, msg,
7491 7491 "remove retired routing entry",
7492 7492 tnf_int, index, index));
7493 7493
7494 7494 if (frtp_prev == NULL) {
7495 7495 /* first element */
7496 7496 fptr->fcip_rtable[index] =
7497 7497 frtp->fcipr_next;
7498 7498 } else {
7499 7499 frtp_prev->fcipr_next =
7500 7500 frtp->fcipr_next;
7501 7501 }
7502 7502 kmem_free(frtp,
7503 7503 sizeof (struct fcip_routing_table));
7504 7504
7505 7505 frtp = frtp_next;
7506 7506 } else {
7507 7507 frtp_prev = frtp;
7508 7508 frtp = frtp_next;
7509 7509 }
7510 7510 }
7511 7511 }
7512 7512 mutex_exit(&fptr->fcip_rt_mutex);
7513 7513 /*
7514 7514 * Clear the RTE_REMOVING flag
7515 7515 */
7516 7516 mutex_enter(&fptr->fcip_mutex);
7517 7517 fptr->fcip_flags &= ~FCIP_RTE_REMOVING;
7518 7518 mutex_exit(&fptr->fcip_mutex);
7519 7519 }
7520 7520
7521 7521 /*
7522 7522 * Walk through all the dest hash table entries and count up the total
7523 7523 * no. of packets outstanding against a given port
7524 7524 */
7525 7525 static int
7526 7526 fcip_port_get_num_pkts(struct fcip *fptr)
7527 7527 {
7528 7528 int num_cmds = 0;
7529 7529 int i;
7530 7530 struct fcip_dest *fdestp;
7531 7531
7532 7532 ASSERT(mutex_owned(&fptr->fcip_dest_mutex));
7533 7533
7534 7534 for (i = 0; i < FCIP_DEST_HASH_ELEMS; i++) {
7535 7535 fdestp = fptr->fcip_dest[i];
7536 7536 while (fdestp != NULL) {
7537 7537 mutex_enter(&fdestp->fcipd_mutex);
7538 7538
7539 7539 ASSERT(fdestp->fcipd_ncmds >= 0);
7540 7540
7541 7541 if (fdestp->fcipd_ncmds > 0) {
7542 7542 num_cmds += fdestp->fcipd_ncmds;
7543 7543 }
7544 7544 mutex_exit(&fdestp->fcipd_mutex);
7545 7545 fdestp = fdestp->fcipd_next;
7546 7546 }
7547 7547 }
7548 7548
7549 7549 return (num_cmds);
7550 7550 }
7551 7551
7552 7552
7553 7553 /*
7554 7554 * Walk through the routing table for this state instance and see if there is a
7555 7555 * PLOGI in progress for any of the entries. Return success even if we find one.
7556 7556 */
7557 7557 static int
7558 7558 fcip_plogi_in_progress(struct fcip *fptr)
7559 7559 {
7560 7560 int i;
7561 7561 struct fcip_routing_table *frp;
7562 7562
7563 7563 ASSERT(mutex_owned(&fptr->fcip_rt_mutex));
7564 7564
7565 7565 for (i = 0; i < FCIP_RT_HASH_ELEMS; i++) {
7566 7566 frp = fptr->fcip_rtable[i];
7567 7567 while (frp) {
7568 7568 if (frp->fcipr_state == FCIP_RT_LOGIN_PROGRESS) {
7569 7569 /* Found an entry where PLOGI is in progress */
7570 7570 return (1);
7571 7571 }
7572 7572 frp = frp->fcipr_next;
7573 7573 }
7574 7574 }
7575 7575
7576 7576 return (0);
7577 7577 }
7578 7578
7579 7579 /*
7580 7580 * Walk through the fcip port global list and check if the given port exists in
7581 7581 * the list. Returns "0" if port exists and "1" if otherwise.
7582 7582 */
7583 7583 static int
7584 7584 fcip_check_port_exists(struct fcip *fptr)
7585 7585 {
7586 7586 fcip_port_info_t *cur_fport;
7587 7587 fcip_port_info_t *fport;
7588 7588
7589 7589 mutex_enter(&fcip_global_mutex);
7590 7590 fport = fptr->fcip_port_info;
7591 7591 cur_fport = fcip_port_head;
7592 7592 while (cur_fport != NULL) {
7593 7593 if (cur_fport == fport) {
7594 7594 /* Found */
7595 7595 mutex_exit(&fcip_global_mutex);
7596 7596 return (0);
7597 7597 } else {
7598 7598 cur_fport = cur_fport->fcipp_next;
7599 7599 }
7600 7600 }
7601 7601 mutex_exit(&fcip_global_mutex);
7602 7602
7603 7603 return (1);
7604 7604 }
7605 7605
7606 7606 /*
7607 7607 * Constructor to initialize the sendup elements for callback into
7608 7608 * modules upstream
7609 7609 */
7610 7610
7611 7611 /* ARGSUSED */
7612 7612 static int
7613 7613 fcip_sendup_constructor(void *buf, void *arg, int flags)
7614 7614 {
7615 7615 struct fcip_sendup_elem *msg_elem = (struct fcip_sendup_elem *)buf;
7616 7616 fcip_port_info_t *fport = (fcip_port_info_t *)arg;
7617 7617
7618 7618 ASSERT(fport != NULL);
7619 7619
7620 7620 msg_elem->fcipsu_mp = NULL;
7621 7621 msg_elem->fcipsu_func = NULL;
7622 7622 msg_elem->fcipsu_next = NULL;
7623 7623
7624 7624 return (FCIP_SUCCESS);
7625 7625 }
↓ open down ↓ |
7061 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX