Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/ulp/fcp.c
+++ new/usr/src/uts/common/io/fibre-channel/ulp/fcp.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 *
24 24 * Fibre Channel SCSI ULP Mapping driver
25 25 */
26 26
27 27 #include <sys/scsi/scsi.h>
28 28 #include <sys/types.h>
29 29 #include <sys/varargs.h>
30 30 #include <sys/devctl.h>
31 31 #include <sys/thread.h>
32 32 #include <sys/thread.h>
33 33 #include <sys/open.h>
34 34 #include <sys/file.h>
35 35 #include <sys/sunndi.h>
36 36 #include <sys/console.h>
37 37 #include <sys/proc.h>
38 38 #include <sys/time.h>
39 39 #include <sys/utsname.h>
40 40 #include <sys/scsi/impl/scsi_reset_notify.h>
41 41 #include <sys/ndi_impldefs.h>
42 42 #include <sys/byteorder.h>
43 43 #include <sys/fs/dv_node.h>
44 44 #include <sys/ctype.h>
45 45 #include <sys/sunmdi.h>
46 46
47 47 #include <sys/fibre-channel/fc.h>
48 48 #include <sys/fibre-channel/impl/fc_ulpif.h>
49 49 #include <sys/fibre-channel/ulp/fcpvar.h>
50 50
51 51 /*
52 52 * Discovery Process
53 53 * =================
54 54 *
55 55 * The discovery process is a major function of FCP. In order to help
56 56 * understand that function a flow diagram is given here. This diagram
57 57 * doesn't claim to cover all the cases and the events that can occur during
58 58 * the discovery process nor the subtleties of the code. The code paths shown
59 59 * are simplified. Its purpose is to help the reader (and potentially bug
60 60 * fixer) have an overall view of the logic of the code. For that reason the
61 61 * diagram covers the simple case of the line coming up cleanly or of a new
62 62 * port attaching to FCP the link being up. The reader must keep in mind
63 63 * that:
64 64 *
65 65 * - There are special cases where bringing devices online and offline
66 66 * is driven by Ioctl.
67 67 *
68 68 * - The behavior of the discovery process can be modified through the
69 69 * .conf file.
70 70 *
71 71 * - The line can go down and come back up at any time during the
72 72 * discovery process which explains some of the complexity of the code.
73 73 *
74 74 * ............................................................................
75 75 *
76 76 * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
77 77 *
78 78 *
79 79 * +-------------------------+
80 80 * fp/fctl module --->| fcp_port_attach |
81 81 * +-------------------------+
82 82 * | |
83 83 * | |
84 84 * | v
85 85 * | +-------------------------+
86 86 * | | fcp_handle_port_attach |
87 87 * | +-------------------------+
88 88 * | |
89 89 * | |
90 90 * +--------------------+ |
91 91 * | |
92 92 * v v
93 93 * +-------------------------+
94 94 * | fcp_statec_callback |
95 95 * +-------------------------+
96 96 * |
97 97 * |
98 98 * v
99 99 * +-------------------------+
100 100 * | fcp_handle_devices |
101 101 * +-------------------------+
102 102 * |
103 103 * |
104 104 * v
105 105 * +-------------------------+
106 106 * | fcp_handle_mapflags |
107 107 * +-------------------------+
108 108 * |
109 109 * |
110 110 * v
111 111 * +-------------------------+
112 112 * | fcp_send_els |
113 113 * | |
114 114 * | PLOGI or PRLI To all the|
115 115 * | reachable devices. |
116 116 * +-------------------------+
117 117 *
118 118 *
119 119 * ............................................................................
120 120 *
121 121 * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
122 122 * STEP 1 are called (it is actually the same function).
123 123 *
124 124 *
125 125 * +-------------------------+
126 126 * | fcp_icmd_callback |
127 127 * fp/fctl module --->| |
128 128 * | callback for PLOGI and |
129 129 * | PRLI. |
130 130 * +-------------------------+
131 131 * |
132 132 * |
133 133 * Received PLOGI Accept /-\ Received PRLI Accept
134 134 * _ _ _ _ _ _ / \_ _ _ _ _ _
135 135 * | \ / |
136 136 * | \-/ |
137 137 * | |
138 138 * v v
139 139 * +-------------------------+ +-------------------------+
140 140 * | fcp_send_els | | fcp_send_scsi |
141 141 * | | | |
142 142 * | PRLI | | REPORT_LUN |
143 143 * +-------------------------+ +-------------------------+
144 144 *
145 145 * ............................................................................
146 146 *
147 147 * STEP 3: The callback functions of the SCSI commands issued by FCP are called
148 148 * (It is actually the same function).
149 149 *
150 150 *
151 151 * +-------------------------+
152 152 * fp/fctl module ------->| fcp_scsi_callback |
153 153 * +-------------------------+
154 154 * |
155 155 * |
156 156 * |
157 157 * Receive REPORT_LUN reply /-\ Receive INQUIRY PAGE83 reply
158 158 * _ _ _ _ _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ _ _ _ _
159 159 * | \ / |
160 160 * | \-/ |
161 161 * | | |
162 162 * | Receive INQUIRY reply| |
163 163 * | | |
164 164 * v v v
165 165 * +------------------------+ +----------------------+ +----------------------+
166 166 * | fcp_handle_reportlun | | fcp_handle_inquiry | | fcp_handle_page83 |
167 167 * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
168 168 * +------------------------+ +----------------------+ +----------------------+
169 169 * | | |
170 170 * | | |
171 171 * | | |
172 172 * v v |
173 173 * +-----------------+ +-----------------+ |
174 174 * | fcp_send_scsi | | fcp_send_scsi | |
175 175 * | | | | |
176 176 * | INQUIRY | | INQUIRY PAGE83 | |
177 177 * | (To each LUN) | +-----------------+ |
178 178 * +-----------------+ |
179 179 * |
180 180 * v
181 181 * +------------------------+
182 182 * | fcp_call_finish_init |
183 183 * +------------------------+
184 184 * |
185 185 * v
186 186 * +-----------------------------+
187 187 * | fcp_call_finish_init_held |
188 188 * +-----------------------------+
189 189 * |
190 190 * |
191 191 * All LUNs scanned /-\
192 192 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ / \
193 193 * | \ /
194 194 * | \-/
195 195 * v |
196 196 * +------------------+ |
197 197 * | fcp_finish_tgt | |
198 198 * +------------------+ |
199 199 * | Target Not Offline and |
200 200 * Target Not Offline and | not marked and tgt_node_state |
201 201 * marked /-\ not FCP_TGT_NODE_ON_DEMAND |
202 202 * _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ |
203 203 * | \ / | |
204 204 * | \-/ | |
205 205 * v v |
206 206 * +----------------------------+ +-------------------+ |
207 207 * | fcp_offline_target | | fcp_create_luns | |
208 208 * | | +-------------------+ |
209 209 * | A structure fcp_tgt_elem | | |
210 210 * | is created and queued in | v |
211 211 * | the FCP port list | +-------------------+ |
212 212 * | port_offline_tgts. It | | fcp_pass_to_hp | |
213 213 * | will be unqueued by the | | | |
214 214 * | watchdog timer. | | Called for each | |
215 215 * +----------------------------+ | LUN. Dispatches | |
216 216 * | | fcp_hp_task | |
217 217 * | +-------------------+ |
218 218 * | | |
219 219 * | | |
220 220 * | | |
221 221 * | +---------------->|
222 222 * | |
223 223 * +---------------------------------------------->|
224 224 * |
225 225 * |
226 226 * All the targets (devices) have been scanned /-\
227 227 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ / \
228 228 * | \ /
229 229 * | \-/
230 230 * +-------------------------------------+ |
231 231 * | fcp_finish_init | |
232 232 * | | |
233 233 * | Signal broadcasts the condition | |
234 234 * | variable port_config_cv of the FCP | |
235 235 * | port. One potential code sequence | |
236 236 * | waiting on the condition variable | |
237 237 * | the code sequence handling | |
238 238 * | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER| |
239 239 * | The other is in the function | |
240 240 * | fcp_reconfig_wait which is called | |
241 241 * | in the transmit path preventing IOs | |
242 242 * | from going through till the disco- | |
243 243 * | very process is over. | |
244 244 * +-------------------------------------+ |
245 245 * | |
246 246 * | |
247 247 * +--------------------------------->|
248 248 * |
249 249 * v
250 250 * Return
251 251 *
252 252 * ............................................................................
253 253 *
254 254 * STEP 4: The hot plug task is called (for each fcp_hp_elem).
255 255 *
256 256 *
257 257 * +-------------------------+
258 258 * | fcp_hp_task |
259 259 * +-------------------------+
260 260 * |
261 261 * |
262 262 * v
263 263 * +-------------------------+
264 264 * | fcp_trigger_lun |
265 265 * +-------------------------+
266 266 * |
267 267 * |
268 268 * v
269 269 * Bring offline /-\ Bring online
270 270 * _ _ _ _ _ _ _ _ _/ \_ _ _ _ _ _ _ _ _ _
271 271 * | \ / |
272 272 * | \-/ |
273 273 * v v
274 274 * +---------------------+ +-----------------------+
275 275 * | fcp_offline_child | | fcp_get_cip |
276 276 * +---------------------+ | |
277 277 * | Creates a dev_info_t |
278 278 * | or a mdi_pathinfo_t |
279 279 * | depending on whether |
280 280 * | mpxio is on or off. |
281 281 * +-----------------------+
282 282 * |
283 283 * |
284 284 * v
285 285 * +-----------------------+
286 286 * | fcp_online_child |
287 287 * | |
288 288 * | Set device online |
289 289 * | using NDI or MDI. |
290 290 * +-----------------------+
291 291 *
292 292 * ............................................................................
293 293 *
294 294 * STEP 5: The watchdog timer expires. The watch dog timer does much more that
295 295 * what is described here. We only show the target offline path.
296 296 *
297 297 *
298 298 * +--------------------------+
299 299 * | fcp_watch |
300 300 * +--------------------------+
301 301 * |
302 302 * |
303 303 * v
304 304 * +--------------------------+
305 305 * | fcp_scan_offline_tgts |
306 306 * +--------------------------+
307 307 * |
308 308 * |
309 309 * v
310 310 * +--------------------------+
311 311 * | fcp_offline_target_now |
312 312 * +--------------------------+
313 313 * |
314 314 * |
315 315 * v
316 316 * +--------------------------+
317 317 * | fcp_offline_tgt_luns |
318 318 * +--------------------------+
319 319 * |
320 320 * |
321 321 * v
322 322 * +--------------------------+
323 323 * | fcp_offline_lun |
324 324 * +--------------------------+
325 325 * |
326 326 * |
327 327 * v
328 328 * +----------------------------------+
329 329 * | fcp_offline_lun_now |
330 330 * | |
331 331 * | A request (or two if mpxio) is |
332 332 * | sent to the hot plug task using |
333 333 * | a fcp_hp_elem structure. |
334 334 * +----------------------------------+
335 335 */
336 336
337 337 /*
338 338 * Functions registered with DDI framework
339 339 */
340 340 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
341 341 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
342 342 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
343 343 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
344 344 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
345 345 cred_t *credp, int *rval);
346 346
347 347 /*
348 348 * Functions registered with FC Transport framework
349 349 */
350 350 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
351 351 fc_attach_cmd_t cmd, uint32_t s_id);
352 352 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
353 353 fc_detach_cmd_t cmd);
354 354 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
355 355 int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
356 356 uint32_t claimed);
357 357 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
358 358 fc_unsol_buf_t *buf, uint32_t claimed);
359 359 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
360 360 fc_unsol_buf_t *buf, uint32_t claimed);
361 361 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
362 362 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
363 363 uint32_t dev_cnt, uint32_t port_sid);
364 364
365 365 /*
366 366 * Functions registered with SCSA framework
367 367 */
368 368 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
369 369 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
370 370 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
371 371 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
372 372 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
373 373 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
374 374 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
375 375 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
376 376 static int fcp_scsi_reset(struct scsi_address *ap, int level);
377 377 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
378 378 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
379 379 int whom);
380 380 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
381 381 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
382 382 void (*callback)(caddr_t), caddr_t arg);
383 383 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
384 384 char *name, ddi_eventcookie_t *event_cookiep);
385 385 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
386 386 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
387 387 ddi_callback_id_t *cb_id);
388 388 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
389 389 ddi_callback_id_t cb_id);
390 390 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
391 391 ddi_eventcookie_t eventid, void *impldata);
392 392 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
393 393 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
394 394 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
395 395 ddi_bus_config_op_t op, void *arg);
396 396
397 397 /*
398 398 * Internal functions
399 399 */
400 400 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
401 401 int mode, int *rval);
402 402
403 403 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
404 404 int mode, int *rval);
405 405 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
406 406 struct fcp_scsi_cmd *fscsi, int mode);
407 407 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
408 408 caddr_t base_addr, int mode);
409 409 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
410 410
411 411 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
412 412 la_wwn_t *pwwn, int *ret_val, int *fc_status, int *fc_pkt_state,
413 413 int *fc_pkt_reason, int *fc_pkt_action);
414 414 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
415 415 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
416 416 static int fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status,
417 417 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
418 418 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
419 419 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
420 420 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
421 421 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
422 422
423 423 static void fcp_handle_devices(struct fcp_port *pptr,
424 424 fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
425 425 fcp_map_tag_t *map_tag, int cause);
426 426 static int fcp_handle_mapflags(struct fcp_port *pptr,
427 427 struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
428 428 int tgt_cnt, int cause);
429 429 static int fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause);
430 430 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
431 431 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
432 432 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
433 433 int cause);
434 434 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
435 435 uint32_t state);
436 436 static struct fcp_port *fcp_get_port(opaque_t port_handle);
437 437 static void fcp_unsol_callback(fc_packet_t *fpkt);
438 438 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
439 439 uchar_t r_ctl, uchar_t type);
440 440 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
441 441 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
442 442 struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
443 443 int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
444 444 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
445 445 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
446 446 int nodma, int flags);
447 447 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
448 448 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
449 449 uchar_t *wwn);
450 450 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
451 451 uint32_t d_id);
452 452 static void fcp_icmd_callback(fc_packet_t *fpkt);
453 453 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
454 454 int len, int lcount, int tcount, int cause, uint32_t rscn_count);
455 455 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
456 456 static void fcp_scsi_callback(fc_packet_t *fpkt);
457 457 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
458 458 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
459 459 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
460 460 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
461 461 uint16_t lun_num);
462 462 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
463 463 int link_cnt, int tgt_cnt, int cause);
464 464 static void fcp_finish_init(struct fcp_port *pptr);
465 465 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
466 466 int tgt_cnt, int cause);
467 467 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
468 468 int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
469 469 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
470 470 int link_cnt, int tgt_cnt, int nowait, int flags);
471 471 static void fcp_offline_target_now(struct fcp_port *pptr,
472 472 struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
473 473 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
474 474 int tgt_cnt, int flags);
475 475 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
476 476 int nowait, int flags);
477 477 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
478 478 int tgt_cnt);
479 479 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
480 480 int tgt_cnt, int flags);
481 481 static void fcp_scan_offline_luns(struct fcp_port *pptr);
482 482 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
483 483 static void fcp_update_offline_flags(struct fcp_lun *plun);
484 484 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
485 485 static void fcp_abort_commands(struct fcp_pkt *head, struct
486 486 fcp_port *pptr);
487 487 static void fcp_cmd_callback(fc_packet_t *fpkt);
488 488 static void fcp_complete_pkt(fc_packet_t *fpkt);
489 489 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
490 490 struct fcp_port *pptr);
491 491 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
492 492 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
493 493 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
494 494 static void fcp_dealloc_lun(struct fcp_lun *plun);
495 495 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
496 496 fc_portmap_t *map_entry, int link_cnt);
497 497 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
498 498 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
499 499 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
500 500 int internal);
501 501 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
502 502 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
503 503 uint32_t s_id, int instance);
504 504 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
505 505 int instance);
506 506 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
507 507 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
508 508 int);
509 509 static void fcp_kmem_cache_destructor(struct scsi_pkt *, scsi_hba_tran_t *);
510 510 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
511 511 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
512 512 int flags);
513 513 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
514 514 static int fcp_reset_target(struct scsi_address *ap, int level);
515 515 static int fcp_commoncap(struct scsi_address *ap, char *cap,
516 516 int val, int tgtonly, int doset);
517 517 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
518 518 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
519 519 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
520 520 int sleep);
521 521 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
522 522 uint32_t s_id, fc_attach_cmd_t cmd, int instance);
523 523 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
524 524 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
525 525 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
526 526 int lcount, int tcount);
527 527 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
528 528 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
529 529 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
530 530 int tgt_cnt);
531 531 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
532 532 dev_info_t *pdip, caddr_t name);
533 533 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
534 534 int lcount, int tcount, int flags, int *circ);
535 535 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
536 536 int lcount, int tcount, int flags, int *circ);
537 537 static void fcp_remove_child(struct fcp_lun *plun);
538 538 static void fcp_watch(void *arg);
539 539 static void fcp_check_reset_delay(struct fcp_port *pptr);
540 540 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
541 541 struct fcp_lun *rlun, int tgt_cnt);
542 542 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
543 543 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
544 544 uchar_t *wwn, uint16_t lun);
545 545 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
546 546 struct fcp_lun *plun);
547 547 static void fcp_post_callback(struct fcp_pkt *cmd);
548 548 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
549 549 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
550 550 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
551 551 child_info_t *cip);
552 552 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
553 553 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
554 554 int tgt_cnt, int flags);
555 555 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
556 556 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
557 557 int tgt_cnt, int flags, int wait);
558 558 static void fcp_retransport_cmd(struct fcp_port *pptr,
559 559 struct fcp_pkt *cmd);
560 560 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
561 561 uint_t statistics);
562 562 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
563 563 static void fcp_update_targets(struct fcp_port *pptr,
564 564 fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
565 565 static int fcp_call_finish_init(struct fcp_port *pptr,
566 566 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
567 567 static int fcp_call_finish_init_held(struct fcp_port *pptr,
568 568 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
569 569 static void fcp_reconfigure_luns(void * tgt_handle);
570 570 static void fcp_free_targets(struct fcp_port *pptr);
571 571 static void fcp_free_target(struct fcp_tgt *ptgt);
572 572 static int fcp_is_retryable(struct fcp_ipkt *icmd);
573 573 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
574 574 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
575 575 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
576 576 static void fcp_print_error(fc_packet_t *fpkt);
577 577 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
578 578 struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
579 579 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
580 580 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
581 581 uint32_t *dev_cnt);
582 582 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
583 583 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
584 584 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
585 585 struct fcp_ioctl *, struct fcp_port **);
586 586 static char *fcp_get_lun_path(struct fcp_lun *plun);
587 587 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
588 588 int *rval);
589 589 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
590 590 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
591 591 static char *fcp_get_lun_path(struct fcp_lun *plun);
592 592 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
593 593 int *rval);
594 594 static void fcp_reconfig_wait(struct fcp_port *pptr);
595 595
596 596 /*
597 597 * New functions added for mpxio support
598 598 */
599 599 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
600 600 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
601 601 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
602 602 int tcount);
603 603 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
604 604 dev_info_t *pdip);
605 605 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
606 606 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
607 607 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
608 608 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
609 609 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
610 610 int what);
611 611 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
612 612 fc_packet_t *fpkt);
613 613 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
614 614
615 615 /*
616 616 * New functions added for lun masking support
617 617 */
618 618 static void fcp_read_blacklist(dev_info_t *dip,
619 619 struct fcp_black_list_entry **pplun_blacklist);
620 620 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
621 621 struct fcp_black_list_entry **pplun_blacklist);
622 622 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
623 623 struct fcp_black_list_entry **pplun_blacklist);
624 624 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
625 625 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
626 626
627 627 /*
628 628 * New functions to support software FCA (like fcoei)
629 629 */
630 630 static struct scsi_pkt *fcp_pseudo_init_pkt(
631 631 struct scsi_address *ap, struct scsi_pkt *pkt,
632 632 struct buf *bp, int cmdlen, int statuslen,
633 633 int tgtlen, int flags, int (*callback)(), caddr_t arg);
634 634 static void fcp_pseudo_destroy_pkt(
635 635 struct scsi_address *ap, struct scsi_pkt *pkt);
636 636 static void fcp_pseudo_sync_pkt(
637 637 struct scsi_address *ap, struct scsi_pkt *pkt);
638 638 static int fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt);
639 639 static void fcp_pseudo_dmafree(
640 640 struct scsi_address *ap, struct scsi_pkt *pkt);
641 641
642 642 extern struct mod_ops mod_driverops;
643 643 /*
644 644 * This variable is defined in modctl.c and set to '1' after the root driver
645 645 * and fs are loaded. It serves as an indication that the root filesystem can
646 646 * be used.
647 647 */
648 648 extern int modrootloaded;
649 649 /*
650 650 * This table contains strings associated with the SCSI sense key codes. It
651 651 * is used by FCP to print a clear explanation of the code returned in the
652 652 * sense information by a device.
653 653 */
654 654 extern char *sense_keys[];
655 655 /*
656 656 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
657 657 * under this device that the paths to a physical device are created when
658 658 * MPxIO is used.
659 659 */
660 660 extern dev_info_t *scsi_vhci_dip;
661 661
662 662 /*
663 663 * Report lun processing
664 664 */
665 665 #define FCP_LUN_ADDRESSING 0x80
666 666 #define FCP_PD_ADDRESSING 0x00
667 667 #define FCP_VOLUME_ADDRESSING 0x40
668 668
669 669 #define FCP_SVE_THROTTLE 0x28 /* Vicom */
670 670 #define MAX_INT_DMA 0x7fffffff
671 671 /*
672 672 * Property definitions
673 673 */
674 674 #define NODE_WWN_PROP (char *)fcp_node_wwn_prop
675 675 #define PORT_WWN_PROP (char *)fcp_port_wwn_prop
676 676 #define TARGET_PROP (char *)fcp_target_prop
677 677 #define LUN_PROP (char *)fcp_lun_prop
678 678 #define SAM_LUN_PROP (char *)fcp_sam_lun_prop
679 679 #define CONF_WWN_PROP (char *)fcp_conf_wwn_prop
680 680 #define OBP_BOOT_WWN (char *)fcp_obp_boot_wwn
681 681 #define MANUAL_CFG_ONLY (char *)fcp_manual_config_only
682 682 #define INIT_PORT_PROP (char *)fcp_init_port_prop
683 683 #define TGT_PORT_PROP (char *)fcp_tgt_port_prop
684 684 #define LUN_BLACKLIST_PROP (char *)fcp_lun_blacklist_prop
685 685 /*
686 686 * Short hand macros.
687 687 */
688 688 #define LUN_PORT (plun->lun_tgt->tgt_port)
689 689 #define LUN_TGT (plun->lun_tgt)
690 690
691 691 /*
692 692 * Driver private macros
693 693 */
694 694 #define FCP_ATOB(x) (((x) >= '0' && (x) <= '9') ? ((x) - '0') : \
695 695 ((x) >= 'a' && (x) <= 'f') ? \
696 696 ((x) - 'a' + 10) : ((x) - 'A' + 10))
697 697
698 698 #define FCP_MAX(a, b) ((a) > (b) ? (a) : (b))
699 699
700 700 #define FCP_N_NDI_EVENTS \
701 701 (sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
702 702
703 703 #define FCP_LINK_STATE_CHANGED(p, c) \
704 704 ((p)->port_link_cnt != (c)->ipkt_link_cnt)
705 705
706 706 #define FCP_TGT_STATE_CHANGED(t, c) \
707 707 ((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
708 708
709 709 #define FCP_STATE_CHANGED(p, t, c) \
710 710 (FCP_TGT_STATE_CHANGED(t, c))
711 711
712 712 #define FCP_MUST_RETRY(fpkt) \
713 713 ((fpkt)->pkt_state == FC_PKT_LOCAL_BSY || \
714 714 (fpkt)->pkt_state == FC_PKT_LOCAL_RJT || \
715 715 (fpkt)->pkt_state == FC_PKT_TRAN_BSY || \
716 716 (fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS || \
717 717 (fpkt)->pkt_state == FC_PKT_NPORT_BSY || \
718 718 (fpkt)->pkt_state == FC_PKT_FABRIC_BSY || \
719 719 (fpkt)->pkt_state == FC_PKT_PORT_OFFLINE || \
720 720 (fpkt)->pkt_reason == FC_REASON_OFFLINE)
721 721
722 722 #define FCP_SENSE_REPORTLUN_CHANGED(es) \
723 723 ((es)->es_key == KEY_UNIT_ATTENTION && \
724 724 (es)->es_add_code == 0x3f && \
725 725 (es)->es_qual_code == 0x0e)
726 726
727 727 #define FCP_SENSE_NO_LUN(es) \
728 728 ((es)->es_key == KEY_ILLEGAL_REQUEST && \
729 729 (es)->es_add_code == 0x25 && \
730 730 (es)->es_qual_code == 0x0)
731 731
732 732 #define FCP_VERSION "20091208-1.192"
733 733 #define FCP_NAME_VERSION "SunFC FCP v" FCP_VERSION
734 734
735 735 #define FCP_NUM_ELEMENTS(array) \
736 736 (sizeof (array) / sizeof ((array)[0]))
737 737
738 738 /*
739 739 * Debugging, Error reporting, and tracing
740 740 */
741 741 #define FCP_LOG_SIZE 1024 * 1024
742 742
743 743 #define FCP_LEVEL_1 0x00001 /* attach/detach PM CPR */
744 744 #define FCP_LEVEL_2 0x00002 /* failures/Invalid data */
745 745 #define FCP_LEVEL_3 0x00004 /* state change, discovery */
746 746 #define FCP_LEVEL_4 0x00008 /* ULP messages */
747 747 #define FCP_LEVEL_5 0x00010 /* ELS/SCSI cmds */
748 748 #define FCP_LEVEL_6 0x00020 /* Transport failures */
749 749 #define FCP_LEVEL_7 0x00040
750 750 #define FCP_LEVEL_8 0x00080 /* I/O tracing */
751 751 #define FCP_LEVEL_9 0x00100 /* I/O tracing */
752 752
753 753
754 754
755 755 /*
756 756 * Log contents to system messages file
757 757 */
758 758 #define FCP_MSG_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
759 759 #define FCP_MSG_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
760 760 #define FCP_MSG_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
761 761 #define FCP_MSG_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
762 762 #define FCP_MSG_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
763 763 #define FCP_MSG_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
764 764 #define FCP_MSG_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
765 765 #define FCP_MSG_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
766 766 #define FCP_MSG_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
767 767
768 768
769 769 /*
770 770 * Log contents to trace buffer
771 771 */
772 772 #define FCP_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
773 773 #define FCP_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
774 774 #define FCP_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
775 775 #define FCP_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
776 776 #define FCP_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
777 777 #define FCP_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
778 778 #define FCP_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
779 779 #define FCP_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
780 780 #define FCP_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
781 781
782 782
783 783 /*
784 784 * Log contents to both system messages file and trace buffer
785 785 */
786 786 #define FCP_MSG_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF | \
787 787 FC_TRACE_LOG_MSG)
788 788 #define FCP_MSG_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF | \
789 789 FC_TRACE_LOG_MSG)
790 790 #define FCP_MSG_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF | \
791 791 FC_TRACE_LOG_MSG)
792 792 #define FCP_MSG_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF | \
793 793 FC_TRACE_LOG_MSG)
794 794 #define FCP_MSG_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF | \
795 795 FC_TRACE_LOG_MSG)
796 796 #define FCP_MSG_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF | \
797 797 FC_TRACE_LOG_MSG)
798 798 #define FCP_MSG_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF | \
799 799 FC_TRACE_LOG_MSG)
800 800 #define FCP_MSG_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF | \
801 801 FC_TRACE_LOG_MSG)
802 802 #define FCP_MSG_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF | \
803 803 FC_TRACE_LOG_MSG)
804 804 #ifdef DEBUG
805 805 #define FCP_DTRACE fc_trace_debug
806 806 #else
807 807 #define FCP_DTRACE
808 808 #endif
809 809
810 810 #define FCP_TRACE fc_trace_debug
811 811
812 812 static struct cb_ops fcp_cb_ops = {
813 813 fcp_open, /* open */
814 814 fcp_close, /* close */
815 815 nodev, /* strategy */
816 816 nodev, /* print */
817 817 nodev, /* dump */
818 818 nodev, /* read */
819 819 nodev, /* write */
820 820 fcp_ioctl, /* ioctl */
821 821 nodev, /* devmap */
822 822 nodev, /* mmap */
823 823 nodev, /* segmap */
824 824 nochpoll, /* chpoll */
825 825 ddi_prop_op, /* cb_prop_op */
826 826 0, /* streamtab */
827 827 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
828 828 CB_REV, /* rev */
829 829 nodev, /* aread */
830 830 nodev /* awrite */
831 831 };
832 832
833 833
834 834 static struct dev_ops fcp_ops = {
835 835 DEVO_REV,
836 836 0,
837 837 ddi_getinfo_1to1,
838 838 nulldev, /* identify */
839 839 nulldev, /* probe */
840 840 fcp_attach, /* attach and detach are mandatory */
841 841 fcp_detach,
842 842 nodev, /* reset */
843 843 &fcp_cb_ops, /* cb_ops */
844 844 NULL, /* bus_ops */
845 845 NULL, /* power */
846 846 };
847 847
848 848
849 849 char *fcp_version = FCP_NAME_VERSION;
↓ open down ↓ |
849 lines elided |
↑ open up ↑ |
850 850
851 851 static struct modldrv modldrv = {
852 852 &mod_driverops,
853 853 FCP_NAME_VERSION,
854 854 &fcp_ops
855 855 };
856 856
857 857
858 858 static struct modlinkage modlinkage = {
859 859 MODREV_1,
860 - &modldrv,
861 - NULL
860 + { &modldrv, NULL }
862 861 };
863 862
864 863
865 864 static fc_ulp_modinfo_t fcp_modinfo = {
866 865 &fcp_modinfo, /* ulp_handle */
867 866 FCTL_ULP_MODREV_4, /* ulp_rev */
868 867 FC4_SCSI_FCP, /* ulp_type */
869 868 "fcp", /* ulp_name */
870 869 FCP_STATEC_MASK, /* ulp_statec_mask */
871 870 fcp_port_attach, /* ulp_port_attach */
872 871 fcp_port_detach, /* ulp_port_detach */
873 872 fcp_port_ioctl, /* ulp_port_ioctl */
874 873 fcp_els_callback, /* ulp_els_callback */
875 874 fcp_data_callback, /* ulp_data_callback */
876 875 fcp_statec_callback /* ulp_statec_callback */
877 876 };
878 877
879 878 #ifdef DEBUG
880 879 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
881 880 FCP_LEVEL_2 | FCP_LEVEL_3 | \
882 881 FCP_LEVEL_4 | FCP_LEVEL_5 | \
883 882 FCP_LEVEL_6 | FCP_LEVEL_7)
884 883 #else
885 884 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
886 885 FCP_LEVEL_2 | FCP_LEVEL_3 | \
887 886 FCP_LEVEL_4 | FCP_LEVEL_5 | \
888 887 FCP_LEVEL_6 | FCP_LEVEL_7)
889 888 #endif
890 889
891 890 /* FCP global variables */
892 891 int fcp_bus_config_debug = 0;
893 892 static int fcp_log_size = FCP_LOG_SIZE;
894 893 static int fcp_trace = FCP_TRACE_DEFAULT;
895 894 static fc_trace_logq_t *fcp_logq = NULL;
896 895 static struct fcp_black_list_entry *fcp_lun_blacklist = NULL;
897 896 /*
898 897 * The auto-configuration is set by default. The only way of disabling it is
899 898 * through the property MANUAL_CFG_ONLY in the fcp.conf file.
900 899 */
901 900 static int fcp_enable_auto_configuration = 1;
902 901 static int fcp_max_bus_config_retries = 4;
903 902 static int fcp_lun_ready_retry = 300;
904 903 /*
905 904 * The value assigned to the following variable has changed several times due
906 905 * to a problem with the data underruns reporting of some firmware(s). The
907 906 * current value of 50 gives a timeout value of 25 seconds for a max number
908 907 * of 256 LUNs.
909 908 */
910 909 static int fcp_max_target_retries = 50;
911 910 /*
912 911 * Watchdog variables
913 912 * ------------------
914 913 *
915 914 * fcp_watchdog_init
916 915 *
917 916 * Indicates if the watchdog timer is running or not. This is actually
918 917 * a counter of the number of Fibre Channel ports that attached. When
919 918 * the first port attaches the watchdog is started. When the last port
920 919 * detaches the watchdog timer is stopped.
921 920 *
922 921 * fcp_watchdog_time
923 922 *
924 923 * This is the watchdog clock counter. It is incremented by
925 924 * fcp_watchdog_time each time the watchdog timer expires.
926 925 *
927 926 * fcp_watchdog_timeout
928 927 *
929 928 * Increment value of the variable fcp_watchdog_time as well as the
930 929 * the timeout value of the watchdog timer. The unit is 1 second. It
931 930 * is strange that this is not a #define but a variable since the code
932 931 * never changes this value. The reason why it can be said that the
933 932 * unit is 1 second is because the number of ticks for the watchdog
934 933 * timer is determined like this:
935 934 *
936 935 * fcp_watchdog_tick = fcp_watchdog_timeout *
937 936 * drv_usectohz(1000000);
938 937 *
939 938 * The value 1000000 is hard coded in the code.
940 939 *
941 940 * fcp_watchdog_tick
942 941 *
943 942 * Watchdog timer value in ticks.
944 943 */
945 944 static int fcp_watchdog_init = 0;
946 945 static int fcp_watchdog_time = 0;
947 946 static int fcp_watchdog_timeout = 1;
948 947 static int fcp_watchdog_tick;
949 948
950 949 /*
951 950 * fcp_offline_delay is a global variable to enable customisation of
952 951 * the timeout on link offlines or RSCNs. The default value is set
953 952 * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
954 953 * specified in FCP4 Chapter 11 (see www.t10.org).
955 954 *
956 955 * The variable fcp_offline_delay is specified in SECONDS.
957 956 *
958 957 * If we made this a static var then the user would not be able to
959 958 * change it. This variable is set in fcp_attach().
960 959 */
961 960 unsigned int fcp_offline_delay = FCP_OFFLINE_DELAY;
962 961
963 962 static void *fcp_softstate = NULL; /* for soft state */
964 963 static uchar_t fcp_oflag = FCP_IDLE; /* open flag */
965 964 static kmutex_t fcp_global_mutex;
966 965 static kmutex_t fcp_ioctl_mutex;
967 966 static dev_info_t *fcp_global_dip = NULL;
968 967 static timeout_id_t fcp_watchdog_id;
969 968 const char *fcp_lun_prop = "lun";
970 969 const char *fcp_sam_lun_prop = "sam-lun";
971 970 const char *fcp_target_prop = "target";
972 971 /*
973 972 * NOTE: consumers of "node-wwn" property include stmsboot in ON
974 973 * consolidation.
975 974 */
976 975 const char *fcp_node_wwn_prop = "node-wwn";
977 976 const char *fcp_port_wwn_prop = "port-wwn";
978 977 const char *fcp_conf_wwn_prop = "fc-port-wwn";
979 978 const char *fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
980 979 const char *fcp_manual_config_only = "manual_configuration_only";
981 980 const char *fcp_init_port_prop = "initiator-port";
982 981 const char *fcp_tgt_port_prop = "target-port";
983 982 const char *fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
984 983
985 984 static struct fcp_port *fcp_port_head = NULL;
986 985 static ddi_eventcookie_t fcp_insert_eid;
987 986 static ddi_eventcookie_t fcp_remove_eid;
988 987
989 988 static ndi_event_definition_t fcp_ndi_event_defs[] = {
990 989 { FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
991 990 { FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
992 991 };
993 992
994 993 /*
995 994 * List of valid commands for the scsi_ioctl call
996 995 */
997 996 static uint8_t scsi_ioctl_list[] = {
998 997 SCMD_INQUIRY,
999 998 SCMD_REPORT_LUN,
1000 999 SCMD_READ_CAPACITY
1001 1000 };
1002 1001
1003 1002 /*
1004 1003 * this is used to dummy up a report lun response for cases
1005 1004 * where the target doesn't support it
1006 1005 */
1007 1006 static uchar_t fcp_dummy_lun[] = {
1008 1007 0x00, /* MSB length (length = no of luns * 8) */
1009 1008 0x00,
1010 1009 0x00,
1011 1010 0x08, /* LSB length */
1012 1011 0x00, /* MSB reserved */
1013 1012 0x00,
1014 1013 0x00,
1015 1014 0x00, /* LSB reserved */
1016 1015 FCP_PD_ADDRESSING,
1017 1016 0x00, /* LUN is ZERO at the first level */
1018 1017 0x00,
1019 1018 0x00, /* second level is zero */
1020 1019 0x00,
1021 1020 0x00, /* third level is zero */
1022 1021 0x00,
1023 1022 0x00 /* fourth level is zero */
1024 1023 };
1025 1024
1026 1025 static uchar_t fcp_alpa_to_switch[] = {
1027 1026 0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1028 1027 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1029 1028 0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1030 1029 0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1031 1030 0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1032 1031 0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1033 1032 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1034 1033 0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1035 1034 0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1036 1035 0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1037 1036 0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1038 1037 0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1039 1038 0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1040 1039 0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1041 1040 0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1042 1041 0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1043 1042 0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1044 1043 0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1045 1044 0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1046 1045 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1047 1046 0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1048 1047 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1049 1048 0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1050 1049 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1051 1050 };
1052 1051
1053 1052 static caddr_t pid = "SESS01 ";
1054 1053
1055 1054 #if !defined(lint)
1056 1055
1057 1056 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1058 1057 fcp_port::fcp_next fcp_watchdog_id))
1059 1058
1060 1059 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1061 1060
1062 1061 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1063 1062 fcp_insert_eid
1064 1063 fcp_remove_eid
1065 1064 fcp_watchdog_time))
1066 1065
1067 1066 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1068 1067 fcp_cb_ops
1069 1068 fcp_ops
1070 1069 callb_cpr))
1071 1070
1072 1071 #endif /* lint */
1073 1072
1074 1073 /*
1075 1074 * This table is used to determine whether or not it's safe to copy in
1076 1075 * the target node name for a lun. Since all luns behind the same target
1077 1076 * have the same wwnn, only tagets that do not support multiple luns are
1078 1077 * eligible to be enumerated under mpxio if they aren't page83 compliant.
1079 1078 */
1080 1079
1081 1080 char *fcp_symmetric_disk_table[] = {
1082 1081 "SEAGATE ST",
1083 1082 "IBM DDYFT",
1084 1083 "SUNW SUNWGS", /* Daktari enclosure */
1085 1084 "SUN SENA", /* SES device */
1086 1085 "SUN SESS01" /* VICOM SVE box */
1087 1086 };
1088 1087
1089 1088 int fcp_symmetric_disk_table_size =
1090 1089 sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1091 1090
1092 1091 /*
1093 1092 * This structure is bogus. scsi_hba_attach_setup() requires, as in the kernel
1094 1093 * will panic if you don't pass this in to the routine, this information.
1095 1094 * Need to determine what the actual impact to the system is by providing
1096 1095 * this information if any. Since dma allocation is done in pkt_init it may
1097 1096 * not have any impact. These values are straight from the Writing Device
1098 1097 * Driver manual.
1099 1098 */
1100 1099 static ddi_dma_attr_t pseudo_fca_dma_attr = {
1101 1100 DMA_ATTR_V0, /* ddi_dma_attr version */
1102 1101 0, /* low address */
1103 1102 0xffffffff, /* high address */
1104 1103 0x00ffffff, /* counter upper bound */
1105 1104 1, /* alignment requirements */
1106 1105 0x3f, /* burst sizes */
1107 1106 1, /* minimum DMA access */
1108 1107 0xffffffff, /* maximum DMA access */
1109 1108 (1 << 24) - 1, /* segment boundary restrictions */
1110 1109 1, /* scater/gather list length */
1111 1110 512, /* device granularity */
1112 1111 0 /* DMA flags */
1113 1112 };
1114 1113
1115 1114 /*
1116 1115 * The _init(9e) return value should be that of mod_install(9f). Under
1117 1116 * some circumstances, a failure may not be related mod_install(9f) and
1118 1117 * one would then require a return value to indicate the failure. Looking
1119 1118 * at mod_install(9f), it is expected to return 0 for success and non-zero
1120 1119 * for failure. mod_install(9f) for device drivers, further goes down the
1121 1120 * calling chain and ends up in ddi_installdrv(), whose return values are
1122 1121 * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1123 1122 * calling chain of mod_install(9f) which return values like EINVAL and
1124 1123 * in some even return -1.
1125 1124 *
1126 1125 * To work around the vagaries of the mod_install() calling chain, return
1127 1126 * either 0 or ENODEV depending on the success or failure of mod_install()
1128 1127 */
1129 1128 int
1130 1129 _init(void)
1131 1130 {
1132 1131 int rval;
1133 1132
1134 1133 /*
1135 1134 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1136 1135 * before registering with the transport first.
1137 1136 */
1138 1137 if (ddi_soft_state_init(&fcp_softstate,
1139 1138 sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1140 1139 return (EINVAL);
1141 1140 }
1142 1141
1143 1142 mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1144 1143 mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1145 1144
1146 1145 if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1147 1146 cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1148 1147 mutex_destroy(&fcp_global_mutex);
1149 1148 mutex_destroy(&fcp_ioctl_mutex);
1150 1149 ddi_soft_state_fini(&fcp_softstate);
1151 1150 return (ENODEV);
1152 1151 }
1153 1152
1154 1153 fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1155 1154
1156 1155 if ((rval = mod_install(&modlinkage)) != 0) {
1157 1156 fc_trace_free_logq(fcp_logq);
1158 1157 (void) fc_ulp_remove(&fcp_modinfo);
1159 1158 mutex_destroy(&fcp_global_mutex);
1160 1159 mutex_destroy(&fcp_ioctl_mutex);
1161 1160 ddi_soft_state_fini(&fcp_softstate);
1162 1161 rval = ENODEV;
1163 1162 }
1164 1163
1165 1164 return (rval);
1166 1165 }
1167 1166
1168 1167
1169 1168 /*
1170 1169 * the system is done with us as a driver, so clean up
1171 1170 */
1172 1171 int
1173 1172 _fini(void)
1174 1173 {
1175 1174 int rval;
1176 1175
1177 1176 /*
1178 1177 * don't start cleaning up until we know that the module remove
1179 1178 * has worked -- if this works, then we know that each instance
1180 1179 * has successfully been DDI_DETACHed
1181 1180 */
1182 1181 if ((rval = mod_remove(&modlinkage)) != 0) {
1183 1182 return (rval);
1184 1183 }
1185 1184
1186 1185 (void) fc_ulp_remove(&fcp_modinfo);
1187 1186
1188 1187 ddi_soft_state_fini(&fcp_softstate);
1189 1188 mutex_destroy(&fcp_global_mutex);
1190 1189 mutex_destroy(&fcp_ioctl_mutex);
1191 1190 fc_trace_free_logq(fcp_logq);
1192 1191
1193 1192 return (rval);
1194 1193 }
1195 1194
1196 1195
1197 1196 int
1198 1197 _info(struct modinfo *modinfop)
1199 1198 {
1200 1199 return (mod_info(&modlinkage, modinfop));
1201 1200 }
1202 1201
1203 1202
1204 1203 /*
1205 1204 * attach the module
1206 1205 */
1207 1206 static int
1208 1207 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1209 1208 {
1210 1209 int rval = DDI_SUCCESS;
1211 1210
1212 1211 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1213 1212 FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1214 1213
1215 1214 if (cmd == DDI_ATTACH) {
1216 1215 /* The FCP pseudo device is created here. */
1217 1216 mutex_enter(&fcp_global_mutex);
1218 1217 fcp_global_dip = devi;
1219 1218 mutex_exit(&fcp_global_mutex);
1220 1219
1221 1220 if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1222 1221 0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1223 1222 ddi_report_dev(fcp_global_dip);
1224 1223 } else {
1225 1224 cmn_err(CE_WARN, "FCP: Cannot create minor node");
1226 1225 mutex_enter(&fcp_global_mutex);
1227 1226 fcp_global_dip = NULL;
1228 1227 mutex_exit(&fcp_global_mutex);
1229 1228
1230 1229 rval = DDI_FAILURE;
1231 1230 }
1232 1231 /*
1233 1232 * We check the fcp_offline_delay property at this
1234 1233 * point. This variable is global for the driver,
1235 1234 * not specific to an instance.
1236 1235 *
1237 1236 * We do not recommend setting the value to less
1238 1237 * than 10 seconds (RA_TOV_els), or greater than
1239 1238 * 60 seconds.
1240 1239 */
1241 1240 fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1242 1241 devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1243 1242 "fcp_offline_delay", FCP_OFFLINE_DELAY);
1244 1243 if ((fcp_offline_delay < 10) ||
1245 1244 (fcp_offline_delay > 60)) {
1246 1245 cmn_err(CE_WARN, "Setting fcp_offline_delay "
1247 1246 "to %d second(s). This is outside the "
1248 1247 "recommended range of 10..60 seconds.",
1249 1248 fcp_offline_delay);
1250 1249 }
1251 1250 }
1252 1251
1253 1252 return (rval);
1254 1253 }
1255 1254
1256 1255
1257 1256 /*ARGSUSED*/
1258 1257 static int
1259 1258 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1260 1259 {
1261 1260 int res = DDI_SUCCESS;
1262 1261
1263 1262 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1264 1263 FCP_BUF_LEVEL_8, 0, "module detach: cmd=0x%x", cmd);
1265 1264
1266 1265 if (cmd == DDI_DETACH) {
1267 1266 /*
1268 1267 * Check if there are active ports/threads. If there
1269 1268 * are any, we will fail, else we will succeed (there
1270 1269 * should not be much to clean up)
1271 1270 */
1272 1271 mutex_enter(&fcp_global_mutex);
1273 1272 FCP_DTRACE(fcp_logq, "fcp",
1274 1273 fcp_trace, FCP_BUF_LEVEL_8, 0, "port_head=%p",
1275 1274 (void *) fcp_port_head);
1276 1275
1277 1276 if (fcp_port_head == NULL) {
1278 1277 ddi_remove_minor_node(fcp_global_dip, NULL);
1279 1278 fcp_global_dip = NULL;
1280 1279 mutex_exit(&fcp_global_mutex);
1281 1280 } else {
1282 1281 mutex_exit(&fcp_global_mutex);
1283 1282 res = DDI_FAILURE;
1284 1283 }
1285 1284 }
1286 1285 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1287 1286 FCP_BUF_LEVEL_8, 0, "module detach returning %d", res);
1288 1287
1289 1288 return (res);
1290 1289 }
1291 1290
1292 1291
1293 1292 /* ARGSUSED */
1294 1293 static int
1295 1294 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1296 1295 {
1297 1296 if (otype != OTYP_CHR) {
1298 1297 return (EINVAL);
1299 1298 }
1300 1299
1301 1300 /*
1302 1301 * Allow only root to talk;
1303 1302 */
1304 1303 if (drv_priv(credp)) {
1305 1304 return (EPERM);
1306 1305 }
1307 1306
1308 1307 mutex_enter(&fcp_global_mutex);
1309 1308 if (fcp_oflag & FCP_EXCL) {
1310 1309 mutex_exit(&fcp_global_mutex);
1311 1310 return (EBUSY);
1312 1311 }
1313 1312
1314 1313 if (flag & FEXCL) {
1315 1314 if (fcp_oflag & FCP_OPEN) {
1316 1315 mutex_exit(&fcp_global_mutex);
1317 1316 return (EBUSY);
1318 1317 }
1319 1318 fcp_oflag |= FCP_EXCL;
1320 1319 }
1321 1320 fcp_oflag |= FCP_OPEN;
1322 1321 mutex_exit(&fcp_global_mutex);
1323 1322
1324 1323 return (0);
1325 1324 }
1326 1325
1327 1326
1328 1327 /* ARGSUSED */
1329 1328 static int
1330 1329 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1331 1330 {
1332 1331 if (otype != OTYP_CHR) {
1333 1332 return (EINVAL);
1334 1333 }
1335 1334
1336 1335 mutex_enter(&fcp_global_mutex);
1337 1336 if (!(fcp_oflag & FCP_OPEN)) {
1338 1337 mutex_exit(&fcp_global_mutex);
1339 1338 return (ENODEV);
1340 1339 }
1341 1340 fcp_oflag = FCP_IDLE;
1342 1341 mutex_exit(&fcp_global_mutex);
1343 1342
1344 1343 return (0);
1345 1344 }
1346 1345
1347 1346
1348 1347 /*
1349 1348 * fcp_ioctl
1350 1349 * Entry point for the FCP ioctls
1351 1350 *
1352 1351 * Input:
1353 1352 * See ioctl(9E)
1354 1353 *
1355 1354 * Output:
1356 1355 * See ioctl(9E)
1357 1356 *
1358 1357 * Returns:
1359 1358 * See ioctl(9E)
1360 1359 *
1361 1360 * Context:
1362 1361 * Kernel context.
1363 1362 */
1364 1363 /* ARGSUSED */
1365 1364 static int
1366 1365 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1367 1366 int *rval)
1368 1367 {
1369 1368 int ret = 0;
1370 1369
1371 1370 mutex_enter(&fcp_global_mutex);
1372 1371 if (!(fcp_oflag & FCP_OPEN)) {
1373 1372 mutex_exit(&fcp_global_mutex);
1374 1373 return (ENXIO);
1375 1374 }
1376 1375 mutex_exit(&fcp_global_mutex);
1377 1376
1378 1377 switch (cmd) {
1379 1378 case FCP_TGT_INQUIRY:
1380 1379 case FCP_TGT_CREATE:
1381 1380 case FCP_TGT_DELETE:
1382 1381 ret = fcp_setup_device_data_ioctl(cmd,
1383 1382 (struct fcp_ioctl *)data, mode, rval);
1384 1383 break;
1385 1384
1386 1385 case FCP_TGT_SEND_SCSI:
1387 1386 mutex_enter(&fcp_ioctl_mutex);
1388 1387 ret = fcp_setup_scsi_ioctl(
1389 1388 (struct fcp_scsi_cmd *)data, mode, rval);
1390 1389 mutex_exit(&fcp_ioctl_mutex);
1391 1390 break;
1392 1391
1393 1392 case FCP_STATE_COUNT:
1394 1393 ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1395 1394 mode, rval);
1396 1395 break;
1397 1396 case FCP_GET_TARGET_MAPPINGS:
1398 1397 ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1399 1398 mode, rval);
1400 1399 break;
1401 1400 default:
1402 1401 fcp_log(CE_WARN, NULL,
1403 1402 "!Invalid ioctl opcode = 0x%x", cmd);
1404 1403 ret = EINVAL;
1405 1404 }
1406 1405
1407 1406 return (ret);
1408 1407 }
1409 1408
1410 1409
1411 1410 /*
1412 1411 * fcp_setup_device_data_ioctl
1413 1412 * Setup handler for the "device data" style of
1414 1413 * ioctl for FCP. See "fcp_util.h" for data structure
1415 1414 * definition.
1416 1415 *
1417 1416 * Input:
1418 1417 * cmd = FCP ioctl command
1419 1418 * data = ioctl data
1420 1419 * mode = See ioctl(9E)
1421 1420 *
1422 1421 * Output:
1423 1422 * data = ioctl data
1424 1423 * rval = return value - see ioctl(9E)
1425 1424 *
1426 1425 * Returns:
1427 1426 * See ioctl(9E)
1428 1427 *
1429 1428 * Context:
1430 1429 * Kernel context.
1431 1430 */
1432 1431 /* ARGSUSED */
1433 1432 static int
1434 1433 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1435 1434 int *rval)
1436 1435 {
1437 1436 struct fcp_port *pptr;
1438 1437 struct device_data *dev_data;
1439 1438 uint32_t link_cnt;
1440 1439 la_wwn_t *wwn_ptr = NULL;
1441 1440 struct fcp_tgt *ptgt = NULL;
1442 1441 struct fcp_lun *plun = NULL;
1443 1442 int i, error;
1444 1443 struct fcp_ioctl fioctl;
1445 1444
1446 1445 #ifdef _MULTI_DATAMODEL
1447 1446 switch (ddi_model_convert_from(mode & FMODELS)) {
1448 1447 case DDI_MODEL_ILP32: {
1449 1448 struct fcp32_ioctl f32_ioctl;
1450 1449
1451 1450 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1452 1451 sizeof (struct fcp32_ioctl), mode)) {
1453 1452 return (EFAULT);
1454 1453 }
1455 1454 fioctl.fp_minor = f32_ioctl.fp_minor;
1456 1455 fioctl.listlen = f32_ioctl.listlen;
1457 1456 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1458 1457 break;
1459 1458 }
1460 1459 case DDI_MODEL_NONE:
1461 1460 if (ddi_copyin((void *)data, (void *)&fioctl,
1462 1461 sizeof (struct fcp_ioctl), mode)) {
1463 1462 return (EFAULT);
1464 1463 }
1465 1464 break;
1466 1465 }
1467 1466
1468 1467 #else /* _MULTI_DATAMODEL */
1469 1468 if (ddi_copyin((void *)data, (void *)&fioctl,
1470 1469 sizeof (struct fcp_ioctl), mode)) {
1471 1470 return (EFAULT);
1472 1471 }
1473 1472 #endif /* _MULTI_DATAMODEL */
1474 1473
1475 1474 /*
1476 1475 * Right now we can assume that the minor number matches with
1477 1476 * this instance of fp. If this changes we will need to
1478 1477 * revisit this logic.
1479 1478 */
1480 1479 mutex_enter(&fcp_global_mutex);
1481 1480 pptr = fcp_port_head;
1482 1481 while (pptr) {
1483 1482 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1484 1483 break;
1485 1484 } else {
1486 1485 pptr = pptr->port_next;
1487 1486 }
1488 1487 }
1489 1488 mutex_exit(&fcp_global_mutex);
1490 1489 if (pptr == NULL) {
1491 1490 return (ENXIO);
1492 1491 }
1493 1492 mutex_enter(&pptr->port_mutex);
1494 1493
1495 1494
1496 1495 if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1497 1496 fioctl.listlen, KM_NOSLEEP)) == NULL) {
1498 1497 mutex_exit(&pptr->port_mutex);
1499 1498 return (ENOMEM);
1500 1499 }
1501 1500
1502 1501 if (ddi_copyin(fioctl.list, dev_data,
1503 1502 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1504 1503 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1505 1504 mutex_exit(&pptr->port_mutex);
1506 1505 return (EFAULT);
1507 1506 }
1508 1507 link_cnt = pptr->port_link_cnt;
1509 1508
1510 1509 if (cmd == FCP_TGT_INQUIRY) {
1511 1510 wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1512 1511 if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1513 1512 sizeof (wwn_ptr->raw_wwn)) == 0) {
1514 1513 /* This ioctl is requesting INQ info of local HBA */
1515 1514 mutex_exit(&pptr->port_mutex);
1516 1515 dev_data[0].dev0_type = DTYPE_UNKNOWN;
1517 1516 dev_data[0].dev_status = 0;
1518 1517 if (ddi_copyout(dev_data, fioctl.list,
1519 1518 (sizeof (struct device_data)) * fioctl.listlen,
1520 1519 mode)) {
1521 1520 kmem_free(dev_data,
1522 1521 sizeof (*dev_data) * fioctl.listlen);
1523 1522 return (EFAULT);
1524 1523 }
1525 1524 kmem_free(dev_data,
1526 1525 sizeof (*dev_data) * fioctl.listlen);
1527 1526 #ifdef _MULTI_DATAMODEL
1528 1527 switch (ddi_model_convert_from(mode & FMODELS)) {
1529 1528 case DDI_MODEL_ILP32: {
1530 1529 struct fcp32_ioctl f32_ioctl;
1531 1530 f32_ioctl.fp_minor = fioctl.fp_minor;
1532 1531 f32_ioctl.listlen = fioctl.listlen;
1533 1532 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1534 1533 if (ddi_copyout((void *)&f32_ioctl,
1535 1534 (void *)data,
1536 1535 sizeof (struct fcp32_ioctl), mode)) {
1537 1536 return (EFAULT);
1538 1537 }
1539 1538 break;
1540 1539 }
1541 1540 case DDI_MODEL_NONE:
1542 1541 if (ddi_copyout((void *)&fioctl, (void *)data,
1543 1542 sizeof (struct fcp_ioctl), mode)) {
1544 1543 return (EFAULT);
1545 1544 }
1546 1545 break;
1547 1546 }
1548 1547 #else /* _MULTI_DATAMODEL */
1549 1548 if (ddi_copyout((void *)&fioctl, (void *)data,
1550 1549 sizeof (struct fcp_ioctl), mode)) {
1551 1550 return (EFAULT);
1552 1551 }
1553 1552 #endif /* _MULTI_DATAMODEL */
1554 1553 return (0);
1555 1554 }
1556 1555 }
1557 1556
1558 1557 if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1559 1558 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1560 1559 mutex_exit(&pptr->port_mutex);
1561 1560 return (ENXIO);
1562 1561 }
1563 1562
1564 1563 for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1565 1564 i++) {
1566 1565 wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1567 1566
1568 1567 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1569 1568
1570 1569
1571 1570 dev_data[i].dev_status = ENXIO;
1572 1571
1573 1572 if ((ptgt = fcp_lookup_target(pptr,
1574 1573 (uchar_t *)wwn_ptr)) == NULL) {
1575 1574 mutex_exit(&pptr->port_mutex);
1576 1575 if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1577 1576 wwn_ptr, &error, 0) == NULL) {
1578 1577 dev_data[i].dev_status = ENODEV;
1579 1578 mutex_enter(&pptr->port_mutex);
1580 1579 continue;
1581 1580 } else {
1582 1581
1583 1582 dev_data[i].dev_status = EAGAIN;
1584 1583
1585 1584 mutex_enter(&pptr->port_mutex);
1586 1585 continue;
1587 1586 }
1588 1587 } else {
1589 1588 mutex_enter(&ptgt->tgt_mutex);
1590 1589 if (ptgt->tgt_state & (FCP_TGT_MARK |
1591 1590 FCP_TGT_BUSY)) {
1592 1591 dev_data[i].dev_status = EAGAIN;
1593 1592 mutex_exit(&ptgt->tgt_mutex);
1594 1593 continue;
1595 1594 }
1596 1595
1597 1596 if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1598 1597 if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1599 1598 dev_data[i].dev_status = ENOTSUP;
1600 1599 } else {
1601 1600 dev_data[i].dev_status = ENXIO;
1602 1601 }
1603 1602 mutex_exit(&ptgt->tgt_mutex);
1604 1603 continue;
1605 1604 }
1606 1605
1607 1606 switch (cmd) {
1608 1607 case FCP_TGT_INQUIRY:
1609 1608 /*
1610 1609 * The reason we give device type of
1611 1610 * lun 0 only even though in some
1612 1611 * cases(like maxstrat) lun 0 device
1613 1612 * type may be 0x3f(invalid) is that
1614 1613 * for bridge boxes target will appear
1615 1614 * as luns and the first lun could be
1616 1615 * a device that utility may not care
1617 1616 * about (like a tape device).
1618 1617 */
1619 1618 dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1620 1619 dev_data[i].dev_status = 0;
1621 1620 mutex_exit(&ptgt->tgt_mutex);
1622 1621
1623 1622 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1624 1623 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1625 1624 } else {
1626 1625 dev_data[i].dev0_type = plun->lun_type;
1627 1626 }
1628 1627 mutex_enter(&ptgt->tgt_mutex);
1629 1628 break;
1630 1629
1631 1630 case FCP_TGT_CREATE:
1632 1631 mutex_exit(&ptgt->tgt_mutex);
1633 1632 mutex_exit(&pptr->port_mutex);
1634 1633
1635 1634 /*
1636 1635 * serialize state change call backs.
1637 1636 * only one call back will be handled
1638 1637 * at a time.
1639 1638 */
1640 1639 mutex_enter(&fcp_global_mutex);
1641 1640 if (fcp_oflag & FCP_BUSY) {
1642 1641 mutex_exit(&fcp_global_mutex);
1643 1642 if (dev_data) {
1644 1643 kmem_free(dev_data,
1645 1644 sizeof (*dev_data) *
1646 1645 fioctl.listlen);
1647 1646 }
1648 1647 return (EBUSY);
1649 1648 }
1650 1649 fcp_oflag |= FCP_BUSY;
1651 1650 mutex_exit(&fcp_global_mutex);
1652 1651
1653 1652 dev_data[i].dev_status =
1654 1653 fcp_create_on_demand(pptr,
1655 1654 wwn_ptr->raw_wwn);
1656 1655
1657 1656 if (dev_data[i].dev_status != 0) {
1658 1657 char buf[25];
1659 1658
1660 1659 for (i = 0; i < FC_WWN_SIZE; i++) {
1661 1660 (void) sprintf(&buf[i << 1],
1662 1661 "%02x",
1663 1662 wwn_ptr->raw_wwn[i]);
1664 1663 }
1665 1664
1666 1665 fcp_log(CE_WARN, pptr->port_dip,
1667 1666 "!Failed to create nodes for"
1668 1667 " pwwn=%s; error=%x", buf,
1669 1668 dev_data[i].dev_status);
1670 1669 }
1671 1670
1672 1671 /* allow state change call backs again */
1673 1672 mutex_enter(&fcp_global_mutex);
1674 1673 fcp_oflag &= ~FCP_BUSY;
1675 1674 mutex_exit(&fcp_global_mutex);
1676 1675
1677 1676 mutex_enter(&pptr->port_mutex);
1678 1677 mutex_enter(&ptgt->tgt_mutex);
1679 1678
1680 1679 break;
1681 1680
1682 1681 case FCP_TGT_DELETE:
1683 1682 break;
1684 1683
1685 1684 default:
1686 1685 fcp_log(CE_WARN, pptr->port_dip,
1687 1686 "!Invalid device data ioctl "
1688 1687 "opcode = 0x%x", cmd);
1689 1688 }
1690 1689 mutex_exit(&ptgt->tgt_mutex);
1691 1690 }
1692 1691 }
1693 1692 mutex_exit(&pptr->port_mutex);
1694 1693
1695 1694 if (ddi_copyout(dev_data, fioctl.list,
1696 1695 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1697 1696 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1698 1697 return (EFAULT);
1699 1698 }
1700 1699 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1701 1700
1702 1701 #ifdef _MULTI_DATAMODEL
1703 1702 switch (ddi_model_convert_from(mode & FMODELS)) {
1704 1703 case DDI_MODEL_ILP32: {
1705 1704 struct fcp32_ioctl f32_ioctl;
1706 1705
1707 1706 f32_ioctl.fp_minor = fioctl.fp_minor;
1708 1707 f32_ioctl.listlen = fioctl.listlen;
1709 1708 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1710 1709 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1711 1710 sizeof (struct fcp32_ioctl), mode)) {
1712 1711 return (EFAULT);
1713 1712 }
1714 1713 break;
1715 1714 }
1716 1715 case DDI_MODEL_NONE:
1717 1716 if (ddi_copyout((void *)&fioctl, (void *)data,
1718 1717 sizeof (struct fcp_ioctl), mode)) {
1719 1718 return (EFAULT);
1720 1719 }
1721 1720 break;
1722 1721 }
1723 1722 #else /* _MULTI_DATAMODEL */
1724 1723
1725 1724 if (ddi_copyout((void *)&fioctl, (void *)data,
1726 1725 sizeof (struct fcp_ioctl), mode)) {
1727 1726 return (EFAULT);
1728 1727 }
1729 1728 #endif /* _MULTI_DATAMODEL */
1730 1729
1731 1730 return (0);
1732 1731 }
1733 1732
1734 1733 /*
1735 1734 * Fetch the target mappings (path, etc.) for all LUNs
1736 1735 * on this port.
1737 1736 */
1738 1737 /* ARGSUSED */
1739 1738 static int
1740 1739 fcp_get_target_mappings(struct fcp_ioctl *data,
1741 1740 int mode, int *rval)
1742 1741 {
1743 1742 struct fcp_port *pptr;
1744 1743 fc_hba_target_mappings_t *mappings;
1745 1744 fc_hba_mapping_entry_t *map;
1746 1745 struct fcp_tgt *ptgt = NULL;
1747 1746 struct fcp_lun *plun = NULL;
1748 1747 int i, mapIndex, mappingSize;
1749 1748 int listlen;
1750 1749 struct fcp_ioctl fioctl;
1751 1750 char *path;
1752 1751 fcp_ent_addr_t sam_lun_addr;
1753 1752
1754 1753 #ifdef _MULTI_DATAMODEL
1755 1754 switch (ddi_model_convert_from(mode & FMODELS)) {
1756 1755 case DDI_MODEL_ILP32: {
1757 1756 struct fcp32_ioctl f32_ioctl;
1758 1757
1759 1758 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1760 1759 sizeof (struct fcp32_ioctl), mode)) {
1761 1760 return (EFAULT);
1762 1761 }
1763 1762 fioctl.fp_minor = f32_ioctl.fp_minor;
1764 1763 fioctl.listlen = f32_ioctl.listlen;
1765 1764 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1766 1765 break;
1767 1766 }
1768 1767 case DDI_MODEL_NONE:
1769 1768 if (ddi_copyin((void *)data, (void *)&fioctl,
1770 1769 sizeof (struct fcp_ioctl), mode)) {
1771 1770 return (EFAULT);
1772 1771 }
1773 1772 break;
1774 1773 }
1775 1774
1776 1775 #else /* _MULTI_DATAMODEL */
1777 1776 if (ddi_copyin((void *)data, (void *)&fioctl,
1778 1777 sizeof (struct fcp_ioctl), mode)) {
1779 1778 return (EFAULT);
1780 1779 }
1781 1780 #endif /* _MULTI_DATAMODEL */
1782 1781
1783 1782 /*
1784 1783 * Right now we can assume that the minor number matches with
1785 1784 * this instance of fp. If this changes we will need to
1786 1785 * revisit this logic.
1787 1786 */
1788 1787 mutex_enter(&fcp_global_mutex);
1789 1788 pptr = fcp_port_head;
1790 1789 while (pptr) {
1791 1790 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1792 1791 break;
1793 1792 } else {
1794 1793 pptr = pptr->port_next;
1795 1794 }
1796 1795 }
1797 1796 mutex_exit(&fcp_global_mutex);
1798 1797 if (pptr == NULL) {
1799 1798 cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1800 1799 fioctl.fp_minor);
1801 1800 return (ENXIO);
1802 1801 }
1803 1802
1804 1803
1805 1804 /* We use listlen to show the total buffer size */
1806 1805 mappingSize = fioctl.listlen;
1807 1806
1808 1807 /* Now calculate how many mapping entries will fit */
1809 1808 listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1810 1809 - sizeof (fc_hba_target_mappings_t);
1811 1810 if (listlen <= 0) {
1812 1811 cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1813 1812 return (ENXIO);
1814 1813 }
1815 1814 listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1816 1815
1817 1816 if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1818 1817 return (ENOMEM);
1819 1818 }
1820 1819 mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1821 1820
1822 1821 /* Now get to work */
1823 1822 mapIndex = 0;
1824 1823
1825 1824 mutex_enter(&pptr->port_mutex);
1826 1825 /* Loop through all targets on this port */
1827 1826 for (i = 0; i < FCP_NUM_HASH; i++) {
1828 1827 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1829 1828 ptgt = ptgt->tgt_next) {
1830 1829
1831 1830 mutex_enter(&ptgt->tgt_mutex);
1832 1831
1833 1832 /* Loop through all LUNs on this target */
1834 1833 for (plun = ptgt->tgt_lun; plun != NULL;
1835 1834 plun = plun->lun_next) {
1836 1835 if (plun->lun_state & FCP_LUN_OFFLINE) {
1837 1836 continue;
1838 1837 }
1839 1838
1840 1839 path = fcp_get_lun_path(plun);
1841 1840 if (path == NULL) {
1842 1841 continue;
1843 1842 }
1844 1843
1845 1844 if (mapIndex >= listlen) {
1846 1845 mapIndex ++;
1847 1846 kmem_free(path, MAXPATHLEN);
1848 1847 continue;
1849 1848 }
1850 1849 map = &mappings->entries[mapIndex++];
1851 1850 bcopy(path, map->targetDriver,
1852 1851 sizeof (map->targetDriver));
1853 1852 map->d_id = ptgt->tgt_d_id;
1854 1853 map->busNumber = 0;
1855 1854 map->targetNumber = ptgt->tgt_d_id;
1856 1855 map->osLUN = plun->lun_num;
1857 1856
1858 1857 /*
1859 1858 * We had swapped lun when we stored it in
1860 1859 * lun_addr. We need to swap it back before
1861 1860 * returning it to user land
1862 1861 */
1863 1862
1864 1863 sam_lun_addr.ent_addr_0 =
1865 1864 BE_16(plun->lun_addr.ent_addr_0);
1866 1865 sam_lun_addr.ent_addr_1 =
1867 1866 BE_16(plun->lun_addr.ent_addr_1);
1868 1867 sam_lun_addr.ent_addr_2 =
1869 1868 BE_16(plun->lun_addr.ent_addr_2);
1870 1869 sam_lun_addr.ent_addr_3 =
1871 1870 BE_16(plun->lun_addr.ent_addr_3);
1872 1871
1873 1872 bcopy(&sam_lun_addr, &map->samLUN,
1874 1873 FCP_LUN_SIZE);
1875 1874 bcopy(ptgt->tgt_node_wwn.raw_wwn,
1876 1875 map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1877 1876 bcopy(ptgt->tgt_port_wwn.raw_wwn,
1878 1877 map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1879 1878
1880 1879 if (plun->lun_guid) {
1881 1880
1882 1881 /* convert ascii wwn to bytes */
1883 1882 fcp_ascii_to_wwn(plun->lun_guid,
1884 1883 map->guid, sizeof (map->guid));
1885 1884
1886 1885 if ((sizeof (map->guid)) <
1887 1886 plun->lun_guid_size / 2) {
1888 1887 cmn_err(CE_WARN,
1889 1888 "fcp_get_target_mappings:"
1890 1889 "guid copy space "
1891 1890 "insufficient."
1892 1891 "Copy Truncation - "
1893 1892 "available %d; need %d",
1894 1893 (int)sizeof (map->guid),
1895 1894 (int)
1896 1895 plun->lun_guid_size / 2);
1897 1896 }
1898 1897 }
1899 1898 kmem_free(path, MAXPATHLEN);
1900 1899 }
1901 1900 mutex_exit(&ptgt->tgt_mutex);
1902 1901 }
1903 1902 }
1904 1903 mutex_exit(&pptr->port_mutex);
1905 1904 mappings->numLuns = mapIndex;
1906 1905
1907 1906 if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1908 1907 kmem_free(mappings, mappingSize);
1909 1908 return (EFAULT);
1910 1909 }
1911 1910 kmem_free(mappings, mappingSize);
1912 1911
1913 1912 #ifdef _MULTI_DATAMODEL
1914 1913 switch (ddi_model_convert_from(mode & FMODELS)) {
1915 1914 case DDI_MODEL_ILP32: {
1916 1915 struct fcp32_ioctl f32_ioctl;
1917 1916
1918 1917 f32_ioctl.fp_minor = fioctl.fp_minor;
1919 1918 f32_ioctl.listlen = fioctl.listlen;
1920 1919 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1921 1920 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1922 1921 sizeof (struct fcp32_ioctl), mode)) {
1923 1922 return (EFAULT);
1924 1923 }
1925 1924 break;
1926 1925 }
1927 1926 case DDI_MODEL_NONE:
1928 1927 if (ddi_copyout((void *)&fioctl, (void *)data,
1929 1928 sizeof (struct fcp_ioctl), mode)) {
1930 1929 return (EFAULT);
1931 1930 }
1932 1931 break;
1933 1932 }
1934 1933 #else /* _MULTI_DATAMODEL */
1935 1934
1936 1935 if (ddi_copyout((void *)&fioctl, (void *)data,
1937 1936 sizeof (struct fcp_ioctl), mode)) {
1938 1937 return (EFAULT);
1939 1938 }
1940 1939 #endif /* _MULTI_DATAMODEL */
1941 1940
1942 1941 return (0);
1943 1942 }
1944 1943
1945 1944 /*
1946 1945 * fcp_setup_scsi_ioctl
1947 1946 * Setup handler for the "scsi passthru" style of
1948 1947 * ioctl for FCP. See "fcp_util.h" for data structure
1949 1948 * definition.
1950 1949 *
1951 1950 * Input:
1952 1951 * u_fscsi = ioctl data (user address space)
1953 1952 * mode = See ioctl(9E)
1954 1953 *
1955 1954 * Output:
1956 1955 * u_fscsi = ioctl data (user address space)
1957 1956 * rval = return value - see ioctl(9E)
1958 1957 *
1959 1958 * Returns:
1960 1959 * 0 = OK
1961 1960 * EAGAIN = See errno.h
1962 1961 * EBUSY = See errno.h
1963 1962 * EFAULT = See errno.h
1964 1963 * EINTR = See errno.h
1965 1964 * EINVAL = See errno.h
1966 1965 * EIO = See errno.h
1967 1966 * ENOMEM = See errno.h
1968 1967 * ENXIO = See errno.h
1969 1968 *
1970 1969 * Context:
1971 1970 * Kernel context.
1972 1971 */
1973 1972 /* ARGSUSED */
1974 1973 static int
1975 1974 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1976 1975 int mode, int *rval)
1977 1976 {
1978 1977 int ret = 0;
1979 1978 int temp_ret;
1980 1979 caddr_t k_cdbbufaddr = NULL;
1981 1980 caddr_t k_bufaddr = NULL;
1982 1981 caddr_t k_rqbufaddr = NULL;
1983 1982 caddr_t u_cdbbufaddr;
1984 1983 caddr_t u_bufaddr;
1985 1984 caddr_t u_rqbufaddr;
1986 1985 struct fcp_scsi_cmd k_fscsi;
1987 1986
1988 1987 /*
1989 1988 * Get fcp_scsi_cmd array element from user address space
1990 1989 */
1991 1990 if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1992 1991 != 0) {
1993 1992 return (ret);
1994 1993 }
1995 1994
1996 1995
1997 1996 /*
1998 1997 * Even though kmem_alloc() checks the validity of the
1999 1998 * buffer length, this check is needed when the
2000 1999 * kmem_flags set and the zero buffer length is passed.
2001 2000 */
2002 2001 if ((k_fscsi.scsi_cdblen <= 0) ||
2003 2002 (k_fscsi.scsi_buflen <= 0) ||
2004 2003 (k_fscsi.scsi_rqlen <= 0)) {
2005 2004 return (EINVAL);
2006 2005 }
2007 2006
2008 2007 /*
2009 2008 * Allocate data for fcp_scsi_cmd pointer fields
2010 2009 */
2011 2010 if (ret == 0) {
2012 2011 k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
2013 2012 k_bufaddr = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
2014 2013 k_rqbufaddr = kmem_alloc(k_fscsi.scsi_rqlen, KM_NOSLEEP);
2015 2014
2016 2015 if (k_cdbbufaddr == NULL ||
2017 2016 k_bufaddr == NULL ||
2018 2017 k_rqbufaddr == NULL) {
2019 2018 ret = ENOMEM;
2020 2019 }
2021 2020 }
2022 2021
2023 2022 /*
2024 2023 * Get fcp_scsi_cmd pointer fields from user
2025 2024 * address space
2026 2025 */
2027 2026 if (ret == 0) {
2028 2027 u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
2029 2028 u_bufaddr = k_fscsi.scsi_bufaddr;
2030 2029 u_rqbufaddr = k_fscsi.scsi_rqbufaddr;
2031 2030
2032 2031 if (ddi_copyin(u_cdbbufaddr,
2033 2032 k_cdbbufaddr,
2034 2033 k_fscsi.scsi_cdblen,
2035 2034 mode)) {
2036 2035 ret = EFAULT;
2037 2036 } else if (ddi_copyin(u_bufaddr,
2038 2037 k_bufaddr,
2039 2038 k_fscsi.scsi_buflen,
2040 2039 mode)) {
2041 2040 ret = EFAULT;
2042 2041 } else if (ddi_copyin(u_rqbufaddr,
2043 2042 k_rqbufaddr,
2044 2043 k_fscsi.scsi_rqlen,
2045 2044 mode)) {
2046 2045 ret = EFAULT;
2047 2046 }
2048 2047 }
2049 2048
2050 2049 /*
2051 2050 * Send scsi command (blocking)
2052 2051 */
2053 2052 if (ret == 0) {
2054 2053 /*
2055 2054 * Prior to sending the scsi command, the
2056 2055 * fcp_scsi_cmd data structure must contain kernel,
2057 2056 * not user, addresses.
2058 2057 */
2059 2058 k_fscsi.scsi_cdbbufaddr = k_cdbbufaddr;
2060 2059 k_fscsi.scsi_bufaddr = k_bufaddr;
2061 2060 k_fscsi.scsi_rqbufaddr = k_rqbufaddr;
2062 2061
2063 2062 ret = fcp_send_scsi_ioctl(&k_fscsi);
2064 2063
2065 2064 /*
2066 2065 * After sending the scsi command, the
2067 2066 * fcp_scsi_cmd data structure must contain user,
2068 2067 * not kernel, addresses.
2069 2068 */
2070 2069 k_fscsi.scsi_cdbbufaddr = u_cdbbufaddr;
2071 2070 k_fscsi.scsi_bufaddr = u_bufaddr;
2072 2071 k_fscsi.scsi_rqbufaddr = u_rqbufaddr;
2073 2072 }
2074 2073
2075 2074 /*
2076 2075 * Put fcp_scsi_cmd pointer fields to user address space
2077 2076 */
2078 2077 if (ret == 0) {
2079 2078 if (ddi_copyout(k_cdbbufaddr,
2080 2079 u_cdbbufaddr,
2081 2080 k_fscsi.scsi_cdblen,
2082 2081 mode)) {
2083 2082 ret = EFAULT;
2084 2083 } else if (ddi_copyout(k_bufaddr,
2085 2084 u_bufaddr,
2086 2085 k_fscsi.scsi_buflen,
2087 2086 mode)) {
2088 2087 ret = EFAULT;
2089 2088 } else if (ddi_copyout(k_rqbufaddr,
2090 2089 u_rqbufaddr,
2091 2090 k_fscsi.scsi_rqlen,
2092 2091 mode)) {
2093 2092 ret = EFAULT;
2094 2093 }
2095 2094 }
2096 2095
2097 2096 /*
2098 2097 * Free data for fcp_scsi_cmd pointer fields
2099 2098 */
2100 2099 if (k_cdbbufaddr != NULL) {
2101 2100 kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2102 2101 }
2103 2102 if (k_bufaddr != NULL) {
2104 2103 kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2105 2104 }
2106 2105 if (k_rqbufaddr != NULL) {
2107 2106 kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2108 2107 }
2109 2108
2110 2109 /*
2111 2110 * Put fcp_scsi_cmd array element to user address space
2112 2111 */
2113 2112 temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2114 2113 if (temp_ret != 0) {
2115 2114 ret = temp_ret;
2116 2115 }
2117 2116
2118 2117 /*
2119 2118 * Return status
2120 2119 */
2121 2120 return (ret);
2122 2121 }
2123 2122
2124 2123
2125 2124 /*
2126 2125 * fcp_copyin_scsi_cmd
2127 2126 * Copy in fcp_scsi_cmd data structure from user address space.
2128 2127 * The data may be in 32 bit or 64 bit modes.
2129 2128 *
2130 2129 * Input:
2131 2130 * base_addr = from address (user address space)
2132 2131 * mode = See ioctl(9E) and ddi_copyin(9F)
2133 2132 *
2134 2133 * Output:
2135 2134 * fscsi = to address (kernel address space)
2136 2135 *
2137 2136 * Returns:
2138 2137 * 0 = OK
2139 2138 * EFAULT = Error
2140 2139 *
2141 2140 * Context:
2142 2141 * Kernel context.
2143 2142 */
2144 2143 static int
2145 2144 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2146 2145 {
2147 2146 #ifdef _MULTI_DATAMODEL
2148 2147 struct fcp32_scsi_cmd f32scsi;
2149 2148
2150 2149 switch (ddi_model_convert_from(mode & FMODELS)) {
2151 2150 case DDI_MODEL_ILP32:
2152 2151 /*
2153 2152 * Copy data from user address space
2154 2153 */
2155 2154 if (ddi_copyin((void *)base_addr,
2156 2155 &f32scsi,
2157 2156 sizeof (struct fcp32_scsi_cmd),
2158 2157 mode)) {
2159 2158 return (EFAULT);
2160 2159 }
2161 2160 /*
2162 2161 * Convert from 32 bit to 64 bit
2163 2162 */
2164 2163 FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2165 2164 break;
2166 2165 case DDI_MODEL_NONE:
2167 2166 /*
2168 2167 * Copy data from user address space
2169 2168 */
2170 2169 if (ddi_copyin((void *)base_addr,
2171 2170 fscsi,
2172 2171 sizeof (struct fcp_scsi_cmd),
2173 2172 mode)) {
2174 2173 return (EFAULT);
2175 2174 }
2176 2175 break;
2177 2176 }
2178 2177 #else /* _MULTI_DATAMODEL */
2179 2178 /*
2180 2179 * Copy data from user address space
2181 2180 */
2182 2181 if (ddi_copyin((void *)base_addr,
2183 2182 fscsi,
2184 2183 sizeof (struct fcp_scsi_cmd),
2185 2184 mode)) {
2186 2185 return (EFAULT);
2187 2186 }
2188 2187 #endif /* _MULTI_DATAMODEL */
2189 2188
2190 2189 return (0);
2191 2190 }
2192 2191
2193 2192
2194 2193 /*
2195 2194 * fcp_copyout_scsi_cmd
2196 2195 * Copy out fcp_scsi_cmd data structure to user address space.
2197 2196 * The data may be in 32 bit or 64 bit modes.
2198 2197 *
2199 2198 * Input:
2200 2199 * fscsi = to address (kernel address space)
2201 2200 * mode = See ioctl(9E) and ddi_copyin(9F)
2202 2201 *
2203 2202 * Output:
2204 2203 * base_addr = from address (user address space)
2205 2204 *
2206 2205 * Returns:
2207 2206 * 0 = OK
2208 2207 * EFAULT = Error
2209 2208 *
2210 2209 * Context:
2211 2210 * Kernel context.
2212 2211 */
2213 2212 static int
2214 2213 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2215 2214 {
2216 2215 #ifdef _MULTI_DATAMODEL
2217 2216 struct fcp32_scsi_cmd f32scsi;
2218 2217
2219 2218 switch (ddi_model_convert_from(mode & FMODELS)) {
2220 2219 case DDI_MODEL_ILP32:
2221 2220 /*
2222 2221 * Convert from 64 bit to 32 bit
2223 2222 */
2224 2223 FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2225 2224 /*
2226 2225 * Copy data to user address space
2227 2226 */
2228 2227 if (ddi_copyout(&f32scsi,
2229 2228 (void *)base_addr,
2230 2229 sizeof (struct fcp32_scsi_cmd),
2231 2230 mode)) {
2232 2231 return (EFAULT);
2233 2232 }
2234 2233 break;
2235 2234 case DDI_MODEL_NONE:
2236 2235 /*
2237 2236 * Copy data to user address space
2238 2237 */
2239 2238 if (ddi_copyout(fscsi,
2240 2239 (void *)base_addr,
2241 2240 sizeof (struct fcp_scsi_cmd),
2242 2241 mode)) {
2243 2242 return (EFAULT);
2244 2243 }
2245 2244 break;
2246 2245 }
2247 2246 #else /* _MULTI_DATAMODEL */
2248 2247 /*
2249 2248 * Copy data to user address space
2250 2249 */
2251 2250 if (ddi_copyout(fscsi,
2252 2251 (void *)base_addr,
2253 2252 sizeof (struct fcp_scsi_cmd),
2254 2253 mode)) {
2255 2254 return (EFAULT);
2256 2255 }
2257 2256 #endif /* _MULTI_DATAMODEL */
2258 2257
2259 2258 return (0);
2260 2259 }
2261 2260
2262 2261
2263 2262 /*
2264 2263 * fcp_send_scsi_ioctl
2265 2264 * Sends the SCSI command in blocking mode.
2266 2265 *
2267 2266 * Input:
2268 2267 * fscsi = SCSI command data structure
2269 2268 *
2270 2269 * Output:
2271 2270 * fscsi = SCSI command data structure
2272 2271 *
2273 2272 * Returns:
2274 2273 * 0 = OK
2275 2274 * EAGAIN = See errno.h
2276 2275 * EBUSY = See errno.h
2277 2276 * EINTR = See errno.h
2278 2277 * EINVAL = See errno.h
2279 2278 * EIO = See errno.h
2280 2279 * ENOMEM = See errno.h
2281 2280 * ENXIO = See errno.h
2282 2281 *
2283 2282 * Context:
2284 2283 * Kernel context.
2285 2284 */
2286 2285 static int
2287 2286 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2288 2287 {
2289 2288 struct fcp_lun *plun = NULL;
2290 2289 struct fcp_port *pptr = NULL;
2291 2290 struct fcp_tgt *ptgt = NULL;
2292 2291 fc_packet_t *fpkt = NULL;
2293 2292 struct fcp_ipkt *icmd = NULL;
2294 2293 int target_created = FALSE;
2295 2294 fc_frame_hdr_t *hp;
2296 2295 struct fcp_cmd fcp_cmd;
2297 2296 struct fcp_cmd *fcmd;
2298 2297 union scsi_cdb *scsi_cdb;
2299 2298 la_wwn_t *wwn_ptr;
2300 2299 int nodma;
2301 2300 struct fcp_rsp *rsp;
2302 2301 struct fcp_rsp_info *rsp_info;
2303 2302 caddr_t rsp_sense;
2304 2303 int buf_len;
2305 2304 int info_len;
2306 2305 int sense_len;
2307 2306 struct scsi_extended_sense *sense_to = NULL;
2308 2307 timeout_id_t tid;
2309 2308 uint8_t reconfig_lun = FALSE;
2310 2309 uint8_t reconfig_pending = FALSE;
2311 2310 uint8_t scsi_cmd;
2312 2311 int rsp_len;
2313 2312 int cmd_index;
2314 2313 int fc_status;
2315 2314 int pkt_state;
2316 2315 int pkt_action;
2317 2316 int pkt_reason;
2318 2317 int ret, xport_retval = ~FC_SUCCESS;
2319 2318 int lcount;
2320 2319 int tcount;
2321 2320 int reconfig_status;
2322 2321 int port_busy = FALSE;
2323 2322 uchar_t *lun_string;
2324 2323
2325 2324 /*
2326 2325 * Check valid SCSI command
2327 2326 */
2328 2327 scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2329 2328 ret = EINVAL;
2330 2329 for (cmd_index = 0;
2331 2330 cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2332 2331 ret != 0;
2333 2332 cmd_index++) {
2334 2333 /*
2335 2334 * First byte of CDB is the SCSI command
2336 2335 */
2337 2336 if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2338 2337 ret = 0;
2339 2338 }
2340 2339 }
2341 2340
2342 2341 /*
2343 2342 * Check inputs
2344 2343 */
2345 2344 if (fscsi->scsi_flags != FCP_SCSI_READ) {
2346 2345 ret = EINVAL;
2347 2346 } else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2348 2347 /* no larger than */
2349 2348 ret = EINVAL;
2350 2349 }
2351 2350
2352 2351
2353 2352 /*
2354 2353 * Find FC port
2355 2354 */
2356 2355 if (ret == 0) {
2357 2356 /*
2358 2357 * Acquire global mutex
2359 2358 */
2360 2359 mutex_enter(&fcp_global_mutex);
2361 2360
2362 2361 pptr = fcp_port_head;
2363 2362 while (pptr) {
2364 2363 if (pptr->port_instance ==
2365 2364 (uint32_t)fscsi->scsi_fc_port_num) {
2366 2365 break;
2367 2366 } else {
2368 2367 pptr = pptr->port_next;
2369 2368 }
2370 2369 }
2371 2370
2372 2371 if (pptr == NULL) {
2373 2372 ret = ENXIO;
2374 2373 } else {
2375 2374 /*
2376 2375 * fc_ulp_busy_port can raise power
2377 2376 * so, we must not hold any mutexes involved in PM
2378 2377 */
2379 2378 mutex_exit(&fcp_global_mutex);
2380 2379 ret = fc_ulp_busy_port(pptr->port_fp_handle);
2381 2380 }
2382 2381
2383 2382 if (ret == 0) {
2384 2383
2385 2384 /* remember port is busy, so we will release later */
2386 2385 port_busy = TRUE;
2387 2386
2388 2387 /*
2389 2388 * If there is a reconfiguration in progress, wait
2390 2389 * for it to complete.
2391 2390 */
2392 2391
2393 2392 fcp_reconfig_wait(pptr);
2394 2393
2395 2394 /* reacquire mutexes in order */
2396 2395 mutex_enter(&fcp_global_mutex);
2397 2396 mutex_enter(&pptr->port_mutex);
2398 2397
2399 2398 /*
2400 2399 * Will port accept DMA?
2401 2400 */
2402 2401 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2403 2402 ? 1 : 0;
2404 2403
2405 2404 /*
2406 2405 * If init or offline, device not known
2407 2406 *
2408 2407 * If we are discovering (onlining), we can
2409 2408 * NOT obviously provide reliable data about
2410 2409 * devices until it is complete
2411 2410 */
2412 2411 if (pptr->port_state & (FCP_STATE_INIT |
2413 2412 FCP_STATE_OFFLINE)) {
2414 2413 ret = ENXIO;
2415 2414 } else if (pptr->port_state & FCP_STATE_ONLINING) {
2416 2415 ret = EBUSY;
2417 2416 } else {
2418 2417 /*
2419 2418 * Find target from pwwn
2420 2419 *
2421 2420 * The wwn must be put into a local
2422 2421 * variable to ensure alignment.
2423 2422 */
2424 2423 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2425 2424 ptgt = fcp_lookup_target(pptr,
2426 2425 (uchar_t *)wwn_ptr);
2427 2426
2428 2427 /*
2429 2428 * If target not found,
2430 2429 */
2431 2430 if (ptgt == NULL) {
2432 2431 /*
2433 2432 * Note: Still have global &
2434 2433 * port mutexes
2435 2434 */
2436 2435 mutex_exit(&pptr->port_mutex);
2437 2436 ptgt = fcp_port_create_tgt(pptr,
2438 2437 wwn_ptr, &ret, &fc_status,
2439 2438 &pkt_state, &pkt_action,
2440 2439 &pkt_reason);
2441 2440 mutex_enter(&pptr->port_mutex);
2442 2441
2443 2442 fscsi->scsi_fc_status = fc_status;
2444 2443 fscsi->scsi_pkt_state =
2445 2444 (uchar_t)pkt_state;
2446 2445 fscsi->scsi_pkt_reason = pkt_reason;
2447 2446 fscsi->scsi_pkt_action =
2448 2447 (uchar_t)pkt_action;
2449 2448
2450 2449 if (ptgt != NULL) {
2451 2450 target_created = TRUE;
2452 2451 } else if (ret == 0) {
2453 2452 ret = ENOMEM;
2454 2453 }
2455 2454 }
2456 2455
2457 2456 if (ret == 0) {
2458 2457 /*
2459 2458 * Acquire target
2460 2459 */
2461 2460 mutex_enter(&ptgt->tgt_mutex);
2462 2461
2463 2462 /*
2464 2463 * If target is mark or busy,
2465 2464 * then target can not be used
2466 2465 */
2467 2466 if (ptgt->tgt_state &
2468 2467 (FCP_TGT_MARK |
2469 2468 FCP_TGT_BUSY)) {
2470 2469 ret = EBUSY;
2471 2470 } else {
2472 2471 /*
2473 2472 * Mark target as busy
2474 2473 */
2475 2474 ptgt->tgt_state |=
2476 2475 FCP_TGT_BUSY;
2477 2476 }
2478 2477
2479 2478 /*
2480 2479 * Release target
2481 2480 */
2482 2481 lcount = pptr->port_link_cnt;
2483 2482 tcount = ptgt->tgt_change_cnt;
2484 2483 mutex_exit(&ptgt->tgt_mutex);
2485 2484 }
2486 2485 }
2487 2486
2488 2487 /*
2489 2488 * Release port
2490 2489 */
2491 2490 mutex_exit(&pptr->port_mutex);
2492 2491 }
2493 2492
2494 2493 /*
2495 2494 * Release global mutex
2496 2495 */
2497 2496 mutex_exit(&fcp_global_mutex);
2498 2497 }
2499 2498
2500 2499 if (ret == 0) {
2501 2500 uint64_t belun = BE_64(fscsi->scsi_lun);
2502 2501
2503 2502 /*
2504 2503 * If it's a target device, find lun from pwwn
2505 2504 * The wwn must be put into a local
2506 2505 * variable to ensure alignment.
2507 2506 */
2508 2507 mutex_enter(&pptr->port_mutex);
2509 2508 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2510 2509 if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2511 2510 /* this is not a target */
2512 2511 fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2513 2512 ret = ENXIO;
2514 2513 } else if ((belun << 16) != 0) {
2515 2514 /*
2516 2515 * Since fcp only support PD and LU addressing method
2517 2516 * so far, the last 6 bytes of a valid LUN are expected
2518 2517 * to be filled with 00h.
2519 2518 */
2520 2519 fscsi->scsi_fc_status = FC_INVALID_LUN;
2521 2520 cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2522 2521 " method 0x%02x with LUN number 0x%016" PRIx64,
2523 2522 (uint8_t)(belun >> 62), belun);
2524 2523 ret = ENXIO;
2525 2524 } else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2526 2525 (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2527 2526 /*
2528 2527 * This is a SCSI target, but no LUN at this
2529 2528 * address.
2530 2529 *
2531 2530 * In the future, we may want to send this to
2532 2531 * the target, and let it respond
2533 2532 * appropriately
2534 2533 */
2535 2534 ret = ENXIO;
2536 2535 }
2537 2536 mutex_exit(&pptr->port_mutex);
2538 2537 }
2539 2538
2540 2539 /*
2541 2540 * Finished grabbing external resources
2542 2541 * Allocate internal packet (icmd)
2543 2542 */
2544 2543 if (ret == 0) {
2545 2544 /*
2546 2545 * Calc rsp len assuming rsp info included
2547 2546 */
2548 2547 rsp_len = sizeof (struct fcp_rsp) +
2549 2548 sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2550 2549
2551 2550 icmd = fcp_icmd_alloc(pptr, ptgt,
2552 2551 sizeof (struct fcp_cmd),
2553 2552 rsp_len,
2554 2553 fscsi->scsi_buflen,
2555 2554 nodma,
2556 2555 lcount, /* ipkt_link_cnt */
2557 2556 tcount, /* ipkt_change_cnt */
2558 2557 0, /* cause */
2559 2558 FC_INVALID_RSCN_COUNT); /* invalidate the count */
2560 2559
2561 2560 if (icmd == NULL) {
2562 2561 ret = ENOMEM;
2563 2562 } else {
2564 2563 /*
2565 2564 * Setup internal packet as sema sync
2566 2565 */
2567 2566 fcp_ipkt_sema_init(icmd);
2568 2567 }
2569 2568 }
2570 2569
2571 2570 if (ret == 0) {
2572 2571 /*
2573 2572 * Init fpkt pointer for use.
2574 2573 */
2575 2574
2576 2575 fpkt = icmd->ipkt_fpkt;
2577 2576
2578 2577 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
2579 2578 fpkt->pkt_tran_type = FC_PKT_FCP_READ; /* only rd for now */
2580 2579 fpkt->pkt_timeout = fscsi->scsi_timeout;
2581 2580
2582 2581 /*
2583 2582 * Init fcmd pointer for use by SCSI command
2584 2583 */
2585 2584
2586 2585 if (nodma) {
2587 2586 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2588 2587 } else {
2589 2588 fcmd = &fcp_cmd;
2590 2589 }
2591 2590 bzero(fcmd, sizeof (struct fcp_cmd));
2592 2591 ptgt = plun->lun_tgt;
2593 2592
2594 2593 lun_string = (uchar_t *)&fscsi->scsi_lun;
2595 2594
2596 2595 fcmd->fcp_ent_addr.ent_addr_0 =
2597 2596 BE_16(*(uint16_t *)&(lun_string[0]));
2598 2597 fcmd->fcp_ent_addr.ent_addr_1 =
2599 2598 BE_16(*(uint16_t *)&(lun_string[2]));
2600 2599 fcmd->fcp_ent_addr.ent_addr_2 =
2601 2600 BE_16(*(uint16_t *)&(lun_string[4]));
2602 2601 fcmd->fcp_ent_addr.ent_addr_3 =
2603 2602 BE_16(*(uint16_t *)&(lun_string[6]));
2604 2603
2605 2604 /*
2606 2605 * Setup internal packet(icmd)
2607 2606 */
2608 2607 icmd->ipkt_lun = plun;
2609 2608 icmd->ipkt_restart = 0;
2610 2609 icmd->ipkt_retries = 0;
2611 2610 icmd->ipkt_opcode = 0;
2612 2611
2613 2612 /*
2614 2613 * Init the frame HEADER Pointer for use
2615 2614 */
2616 2615 hp = &fpkt->pkt_cmd_fhdr;
2617 2616
2618 2617 hp->s_id = pptr->port_id;
2619 2618 hp->d_id = ptgt->tgt_d_id;
2620 2619 hp->r_ctl = R_CTL_COMMAND;
2621 2620 hp->type = FC_TYPE_SCSI_FCP;
2622 2621 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2623 2622 hp->rsvd = 0;
2624 2623 hp->seq_id = 0;
2625 2624 hp->seq_cnt = 0;
2626 2625 hp->ox_id = 0xffff;
2627 2626 hp->rx_id = 0xffff;
2628 2627 hp->ro = 0;
2629 2628
2630 2629 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
2631 2630 fcmd->fcp_cntl.cntl_read_data = 1; /* only rd for now */
2632 2631 fcmd->fcp_cntl.cntl_write_data = 0;
2633 2632 fcmd->fcp_data_len = fscsi->scsi_buflen;
2634 2633
2635 2634 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2636 2635 bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2637 2636 fscsi->scsi_cdblen);
2638 2637
2639 2638 if (!nodma) {
2640 2639 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2641 2640 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2642 2641 }
2643 2642
2644 2643 /*
2645 2644 * Send SCSI command to FC transport
2646 2645 */
2647 2646
2648 2647 if (ret == 0) {
2649 2648 mutex_enter(&ptgt->tgt_mutex);
2650 2649
2651 2650 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2652 2651 mutex_exit(&ptgt->tgt_mutex);
2653 2652 fscsi->scsi_fc_status = xport_retval =
2654 2653 fc_ulp_transport(pptr->port_fp_handle,
2655 2654 fpkt);
2656 2655 if (fscsi->scsi_fc_status != FC_SUCCESS) {
2657 2656 ret = EIO;
2658 2657 }
2659 2658 } else {
2660 2659 mutex_exit(&ptgt->tgt_mutex);
2661 2660 ret = EBUSY;
2662 2661 }
2663 2662 }
2664 2663 }
2665 2664
2666 2665 /*
2667 2666 * Wait for completion only if fc_ulp_transport was called and it
2668 2667 * returned a success. This is the only time callback will happen.
2669 2668 * Otherwise, there is no point in waiting
2670 2669 */
2671 2670 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2672 2671 ret = fcp_ipkt_sema_wait(icmd);
2673 2672 }
2674 2673
2675 2674 /*
2676 2675 * Copy data to IOCTL data structures
2677 2676 */
2678 2677 rsp = NULL;
2679 2678 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2680 2679 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2681 2680
2682 2681 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2683 2682 fcp_log(CE_WARN, pptr->port_dip,
2684 2683 "!SCSI command to d_id=0x%x lun=0x%x"
2685 2684 " failed, Bad FCP response values:"
2686 2685 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2687 2686 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2688 2687 ptgt->tgt_d_id, plun->lun_num,
2689 2688 rsp->reserved_0, rsp->reserved_1,
2690 2689 rsp->fcp_u.fcp_status.reserved_0,
2691 2690 rsp->fcp_u.fcp_status.reserved_1,
2692 2691 rsp->fcp_response_len, rsp->fcp_sense_len);
2693 2692
2694 2693 ret = EIO;
2695 2694 }
2696 2695 }
2697 2696
2698 2697 if ((ret == 0) && (rsp != NULL)) {
2699 2698 /*
2700 2699 * Calc response lengths
2701 2700 */
2702 2701 sense_len = 0;
2703 2702 info_len = 0;
2704 2703
2705 2704 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2706 2705 info_len = rsp->fcp_response_len;
2707 2706 }
2708 2707
2709 2708 rsp_info = (struct fcp_rsp_info *)
2710 2709 ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2711 2710
2712 2711 /*
2713 2712 * Get SCSI status
2714 2713 */
2715 2714 fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2716 2715 /*
2717 2716 * If a lun was just added or removed and the next command
2718 2717 * comes through this interface, we need to capture the check
2719 2718 * condition so we can discover the new topology.
2720 2719 */
2721 2720 if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2722 2721 rsp->fcp_u.fcp_status.sense_len_set) {
2723 2722 sense_len = rsp->fcp_sense_len;
2724 2723 rsp_sense = (caddr_t)((uint8_t *)rsp_info + info_len);
2725 2724 sense_to = (struct scsi_extended_sense *)rsp_sense;
2726 2725 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2727 2726 (FCP_SENSE_NO_LUN(sense_to))) {
2728 2727 reconfig_lun = TRUE;
2729 2728 }
2730 2729 }
2731 2730
2732 2731 if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2733 2732 (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2734 2733 if (reconfig_lun == FALSE) {
2735 2734 reconfig_status =
2736 2735 fcp_is_reconfig_needed(ptgt, fpkt);
2737 2736 }
2738 2737
2739 2738 if ((reconfig_lun == TRUE) ||
2740 2739 (reconfig_status == TRUE)) {
2741 2740 mutex_enter(&ptgt->tgt_mutex);
2742 2741 if (ptgt->tgt_tid == NULL) {
2743 2742 /*
2744 2743 * Either we've been notified the
2745 2744 * REPORT_LUN data has changed, or
2746 2745 * we've determined on our own that
2747 2746 * we're out of date. Kick off
2748 2747 * rediscovery.
2749 2748 */
2750 2749 tid = timeout(fcp_reconfigure_luns,
2751 2750 (caddr_t)ptgt, drv_usectohz(1));
2752 2751
2753 2752 ptgt->tgt_tid = tid;
2754 2753 ptgt->tgt_state |= FCP_TGT_BUSY;
2755 2754 ret = EBUSY;
2756 2755 reconfig_pending = TRUE;
2757 2756 }
2758 2757 mutex_exit(&ptgt->tgt_mutex);
2759 2758 }
2760 2759 }
2761 2760
2762 2761 /*
2763 2762 * Calc residuals and buffer lengths
2764 2763 */
2765 2764
2766 2765 if (ret == 0) {
2767 2766 buf_len = fscsi->scsi_buflen;
2768 2767 fscsi->scsi_bufresid = 0;
2769 2768 if (rsp->fcp_u.fcp_status.resid_under) {
2770 2769 if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2771 2770 fscsi->scsi_bufresid = rsp->fcp_resid;
2772 2771 } else {
2773 2772 cmn_err(CE_WARN, "fcp: bad residue %x "
2774 2773 "for txfer len %x", rsp->fcp_resid,
2775 2774 fscsi->scsi_buflen);
2776 2775 fscsi->scsi_bufresid =
2777 2776 fscsi->scsi_buflen;
2778 2777 }
2779 2778 buf_len -= fscsi->scsi_bufresid;
2780 2779 }
2781 2780 if (rsp->fcp_u.fcp_status.resid_over) {
2782 2781 fscsi->scsi_bufresid = -rsp->fcp_resid;
2783 2782 }
2784 2783
2785 2784 fscsi->scsi_rqresid = fscsi->scsi_rqlen - sense_len;
2786 2785 if (fscsi->scsi_rqlen < sense_len) {
2787 2786 sense_len = fscsi->scsi_rqlen;
2788 2787 }
2789 2788
2790 2789 fscsi->scsi_fc_rspcode = 0;
2791 2790 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2792 2791 fscsi->scsi_fc_rspcode = rsp_info->rsp_code;
2793 2792 }
2794 2793 fscsi->scsi_pkt_state = fpkt->pkt_state;
2795 2794 fscsi->scsi_pkt_action = fpkt->pkt_action;
2796 2795 fscsi->scsi_pkt_reason = fpkt->pkt_reason;
2797 2796
2798 2797 /*
2799 2798 * Copy data and request sense
2800 2799 *
2801 2800 * Data must be copied by using the FCP_CP_IN macro.
2802 2801 * This will ensure the proper byte order since the data
2803 2802 * is being copied directly from the memory mapped
2804 2803 * device register.
2805 2804 *
2806 2805 * The response (and request sense) will be in the
2807 2806 * correct byte order. No special copy is necessary.
2808 2807 */
2809 2808
2810 2809 if (buf_len) {
2811 2810 FCP_CP_IN(fpkt->pkt_data,
2812 2811 fscsi->scsi_bufaddr,
2813 2812 fpkt->pkt_data_acc,
2814 2813 buf_len);
2815 2814 }
2816 2815 bcopy((void *)rsp_sense,
2817 2816 (void *)fscsi->scsi_rqbufaddr,
2818 2817 sense_len);
2819 2818 }
2820 2819 }
2821 2820
2822 2821 /*
2823 2822 * Cleanup transport data structures if icmd was alloc-ed
2824 2823 * So, cleanup happens in the same thread that icmd was alloc-ed
2825 2824 */
2826 2825 if (icmd != NULL) {
2827 2826 fcp_ipkt_sema_cleanup(icmd);
2828 2827 }
2829 2828
2830 2829 /* restore pm busy/idle status */
2831 2830 if (port_busy) {
2832 2831 fc_ulp_idle_port(pptr->port_fp_handle);
2833 2832 }
2834 2833
2835 2834 /*
2836 2835 * Cleanup target. if a reconfig is pending, don't clear the BUSY
2837 2836 * flag, it'll be cleared when the reconfig is complete.
2838 2837 */
2839 2838 if ((ptgt != NULL) && !reconfig_pending) {
2840 2839 /*
2841 2840 * If target was created,
2842 2841 */
2843 2842 if (target_created) {
2844 2843 mutex_enter(&ptgt->tgt_mutex);
2845 2844 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2846 2845 mutex_exit(&ptgt->tgt_mutex);
2847 2846 } else {
2848 2847 /*
2849 2848 * De-mark target as busy
2850 2849 */
2851 2850 mutex_enter(&ptgt->tgt_mutex);
2852 2851 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2853 2852 mutex_exit(&ptgt->tgt_mutex);
2854 2853 }
2855 2854 }
2856 2855 return (ret);
2857 2856 }
2858 2857
2859 2858
2860 2859 static int
2861 2860 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2862 2861 fc_packet_t *fpkt)
2863 2862 {
2864 2863 uchar_t *lun_string;
2865 2864 uint16_t lun_num, i;
2866 2865 int num_luns;
2867 2866 int actual_luns;
2868 2867 int num_masked_luns;
2869 2868 int lun_buflen;
2870 2869 struct fcp_lun *plun = NULL;
2871 2870 struct fcp_reportlun_resp *report_lun;
2872 2871 uint8_t reconfig_needed = FALSE;
2873 2872 uint8_t lun_exists = FALSE;
2874 2873 fcp_port_t *pptr = ptgt->tgt_port;
2875 2874
2876 2875 report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2877 2876
2878 2877 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2879 2878 fpkt->pkt_datalen);
2880 2879
2881 2880 /* get number of luns (which is supplied as LUNS * 8) */
2882 2881 num_luns = BE_32(report_lun->num_lun) >> 3;
2883 2882
2884 2883 /*
2885 2884 * Figure out exactly how many lun strings our response buffer
2886 2885 * can hold.
2887 2886 */
2888 2887 lun_buflen = (fpkt->pkt_datalen -
2889 2888 2 * sizeof (uint32_t)) / sizeof (longlong_t);
2890 2889
2891 2890 /*
2892 2891 * Is our response buffer full or not? We don't want to
2893 2892 * potentially walk beyond the number of luns we have.
2894 2893 */
2895 2894 if (num_luns <= lun_buflen) {
2896 2895 actual_luns = num_luns;
2897 2896 } else {
2898 2897 actual_luns = lun_buflen;
2899 2898 }
2900 2899
2901 2900 mutex_enter(&ptgt->tgt_mutex);
2902 2901
2903 2902 /* Scan each lun to see if we have masked it. */
2904 2903 num_masked_luns = 0;
2905 2904 if (fcp_lun_blacklist != NULL) {
2906 2905 for (i = 0; i < actual_luns; i++) {
2907 2906 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2908 2907 switch (lun_string[0] & 0xC0) {
2909 2908 case FCP_LUN_ADDRESSING:
2910 2909 case FCP_PD_ADDRESSING:
2911 2910 case FCP_VOLUME_ADDRESSING:
2912 2911 lun_num = ((lun_string[0] & 0x3F) << 8)
2913 2912 | lun_string[1];
2914 2913 if (fcp_should_mask(&ptgt->tgt_port_wwn,
2915 2914 lun_num) == TRUE) {
2916 2915 num_masked_luns++;
2917 2916 }
2918 2917 break;
2919 2918 default:
2920 2919 break;
2921 2920 }
2922 2921 }
2923 2922 }
2924 2923
2925 2924 /*
2926 2925 * The quick and easy check. If the number of LUNs reported
2927 2926 * doesn't match the number we currently know about, we need
2928 2927 * to reconfigure.
2929 2928 */
2930 2929 if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2931 2930 mutex_exit(&ptgt->tgt_mutex);
2932 2931 kmem_free(report_lun, fpkt->pkt_datalen);
2933 2932 return (TRUE);
2934 2933 }
2935 2934
2936 2935 /*
2937 2936 * If the quick and easy check doesn't turn up anything, we walk
2938 2937 * the list of luns from the REPORT_LUN response and look for
2939 2938 * any luns we don't know about. If we find one, we know we need
2940 2939 * to reconfigure. We will skip LUNs that are masked because of the
2941 2940 * blacklist.
2942 2941 */
2943 2942 for (i = 0; i < actual_luns; i++) {
2944 2943 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2945 2944 lun_exists = FALSE;
2946 2945 switch (lun_string[0] & 0xC0) {
2947 2946 case FCP_LUN_ADDRESSING:
2948 2947 case FCP_PD_ADDRESSING:
2949 2948 case FCP_VOLUME_ADDRESSING:
2950 2949 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2951 2950
2952 2951 if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2953 2952 &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2954 2953 lun_exists = TRUE;
2955 2954 break;
2956 2955 }
2957 2956
2958 2957 for (plun = ptgt->tgt_lun; plun;
2959 2958 plun = plun->lun_next) {
2960 2959 if (plun->lun_num == lun_num) {
2961 2960 lun_exists = TRUE;
2962 2961 break;
2963 2962 }
2964 2963 }
2965 2964 break;
2966 2965 default:
2967 2966 break;
2968 2967 }
2969 2968
2970 2969 if (lun_exists == FALSE) {
2971 2970 reconfig_needed = TRUE;
2972 2971 break;
2973 2972 }
2974 2973 }
2975 2974
2976 2975 mutex_exit(&ptgt->tgt_mutex);
2977 2976 kmem_free(report_lun, fpkt->pkt_datalen);
2978 2977
2979 2978 return (reconfig_needed);
2980 2979 }
2981 2980
2982 2981 /*
2983 2982 * This function is called by fcp_handle_page83 and uses inquiry response data
2984 2983 * stored in plun->lun_inq to determine whether or not a device is a member of
2985 2984 * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2986 2985 * otherwise 1.
2987 2986 */
2988 2987 static int
2989 2988 fcp_symmetric_device_probe(struct fcp_lun *plun)
2990 2989 {
2991 2990 struct scsi_inquiry *stdinq = &plun->lun_inq;
2992 2991 char *devidptr;
2993 2992 int i, len;
2994 2993
2995 2994 for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
2996 2995 devidptr = fcp_symmetric_disk_table[i];
2997 2996 len = (int)strlen(devidptr);
2998 2997
2999 2998 if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
3000 2999 return (0);
3001 3000 }
3002 3001 }
3003 3002 return (1);
3004 3003 }
3005 3004
3006 3005
3007 3006 /*
3008 3007 * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
3009 3008 * It basically returns the current count of # of state change callbacks
3010 3009 * i.e the value of tgt_change_cnt.
3011 3010 *
3012 3011 * INPUT:
3013 3012 * fcp_ioctl.fp_minor -> The minor # of the fp port
3014 3013 * fcp_ioctl.listlen -> 1
3015 3014 * fcp_ioctl.list -> Pointer to a 32 bit integer
3016 3015 */
3017 3016 /*ARGSUSED2*/
3018 3017 static int
3019 3018 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
3020 3019 {
3021 3020 int ret;
3022 3021 uint32_t link_cnt;
3023 3022 struct fcp_ioctl fioctl;
3024 3023 struct fcp_port *pptr = NULL;
3025 3024
3026 3025 if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
3027 3026 &pptr)) != 0) {
3028 3027 return (ret);
3029 3028 }
3030 3029
3031 3030 ASSERT(pptr != NULL);
3032 3031
3033 3032 if (fioctl.listlen != 1) {
3034 3033 return (EINVAL);
3035 3034 }
3036 3035
3037 3036 mutex_enter(&pptr->port_mutex);
3038 3037 if (pptr->port_state & FCP_STATE_OFFLINE) {
3039 3038 mutex_exit(&pptr->port_mutex);
3040 3039 return (ENXIO);
3041 3040 }
3042 3041
3043 3042 /*
3044 3043 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3045 3044 * When the fcp initially attaches to the port and there are nothing
3046 3045 * hanging out of the port or if there was a repeat offline state change
3047 3046 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3048 3047 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3049 3048 * will differentiate the 2 cases.
3050 3049 */
3051 3050 if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3052 3051 mutex_exit(&pptr->port_mutex);
3053 3052 return (ENXIO);
3054 3053 }
3055 3054
3056 3055 link_cnt = pptr->port_link_cnt;
3057 3056 mutex_exit(&pptr->port_mutex);
3058 3057
3059 3058 if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3060 3059 return (EFAULT);
3061 3060 }
3062 3061
3063 3062 #ifdef _MULTI_DATAMODEL
3064 3063 switch (ddi_model_convert_from(mode & FMODELS)) {
3065 3064 case DDI_MODEL_ILP32: {
3066 3065 struct fcp32_ioctl f32_ioctl;
3067 3066
3068 3067 f32_ioctl.fp_minor = fioctl.fp_minor;
3069 3068 f32_ioctl.listlen = fioctl.listlen;
3070 3069 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3071 3070 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3072 3071 sizeof (struct fcp32_ioctl), mode)) {
3073 3072 return (EFAULT);
3074 3073 }
3075 3074 break;
3076 3075 }
3077 3076 case DDI_MODEL_NONE:
3078 3077 if (ddi_copyout((void *)&fioctl, (void *)data,
3079 3078 sizeof (struct fcp_ioctl), mode)) {
3080 3079 return (EFAULT);
3081 3080 }
3082 3081 break;
3083 3082 }
3084 3083 #else /* _MULTI_DATAMODEL */
3085 3084
3086 3085 if (ddi_copyout((void *)&fioctl, (void *)data,
3087 3086 sizeof (struct fcp_ioctl), mode)) {
3088 3087 return (EFAULT);
3089 3088 }
3090 3089 #endif /* _MULTI_DATAMODEL */
3091 3090
3092 3091 return (0);
3093 3092 }
3094 3093
3095 3094 /*
3096 3095 * This function copies the fcp_ioctl structure passed in from user land
3097 3096 * into kernel land. Handles 32 bit applications.
3098 3097 */
3099 3098 /*ARGSUSED*/
3100 3099 static int
3101 3100 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3102 3101 struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3103 3102 {
3104 3103 struct fcp_port *t_pptr;
3105 3104
3106 3105 #ifdef _MULTI_DATAMODEL
3107 3106 switch (ddi_model_convert_from(mode & FMODELS)) {
3108 3107 case DDI_MODEL_ILP32: {
3109 3108 struct fcp32_ioctl f32_ioctl;
3110 3109
3111 3110 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3112 3111 sizeof (struct fcp32_ioctl), mode)) {
3113 3112 return (EFAULT);
3114 3113 }
3115 3114 fioctl->fp_minor = f32_ioctl.fp_minor;
3116 3115 fioctl->listlen = f32_ioctl.listlen;
3117 3116 fioctl->list = (caddr_t)(long)f32_ioctl.list;
3118 3117 break;
3119 3118 }
3120 3119 case DDI_MODEL_NONE:
3121 3120 if (ddi_copyin((void *)data, (void *)fioctl,
3122 3121 sizeof (struct fcp_ioctl), mode)) {
3123 3122 return (EFAULT);
3124 3123 }
3125 3124 break;
3126 3125 }
3127 3126
3128 3127 #else /* _MULTI_DATAMODEL */
3129 3128 if (ddi_copyin((void *)data, (void *)fioctl,
3130 3129 sizeof (struct fcp_ioctl), mode)) {
3131 3130 return (EFAULT);
3132 3131 }
3133 3132 #endif /* _MULTI_DATAMODEL */
3134 3133
3135 3134 /*
3136 3135 * Right now we can assume that the minor number matches with
3137 3136 * this instance of fp. If this changes we will need to
3138 3137 * revisit this logic.
3139 3138 */
3140 3139 mutex_enter(&fcp_global_mutex);
3141 3140 t_pptr = fcp_port_head;
3142 3141 while (t_pptr) {
3143 3142 if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3144 3143 break;
3145 3144 } else {
3146 3145 t_pptr = t_pptr->port_next;
3147 3146 }
3148 3147 }
3149 3148 *pptr = t_pptr;
3150 3149 mutex_exit(&fcp_global_mutex);
3151 3150 if (t_pptr == NULL) {
3152 3151 return (ENXIO);
3153 3152 }
3154 3153
3155 3154 return (0);
3156 3155 }
3157 3156
3158 3157 /*
3159 3158 * Function: fcp_port_create_tgt
3160 3159 *
3161 3160 * Description: As the name suggest this function creates the target context
3162 3161 * specified by the the WWN provided by the caller. If the
3163 3162 * creation goes well and the target is known by fp/fctl a PLOGI
3164 3163 * followed by a PRLI are issued.
3165 3164 *
3166 3165 * Argument: pptr fcp port structure
3167 3166 * pwwn WWN of the target
3168 3167 * ret_val Address of the return code. It could be:
3169 3168 * EIO, ENOMEM or 0.
3170 3169 * fc_status PLOGI or PRLI status completion
3171 3170 * fc_pkt_state PLOGI or PRLI state completion
3172 3171 * fc_pkt_reason PLOGI or PRLI reason completion
3173 3172 * fc_pkt_action PLOGI or PRLI action completion
3174 3173 *
3175 3174 * Return Value: NULL if it failed
3176 3175 * Target structure address if it succeeds
3177 3176 */
3178 3177 static struct fcp_tgt *
3179 3178 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3180 3179 int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3181 3180 {
3182 3181 struct fcp_tgt *ptgt = NULL;
3183 3182 fc_portmap_t devlist;
3184 3183 int lcount;
3185 3184 int error;
3186 3185
3187 3186 *ret_val = 0;
3188 3187
3189 3188 /*
3190 3189 * Check FC port device & get port map
3191 3190 */
3192 3191 if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3193 3192 &error, 1) == NULL) {
3194 3193 *ret_val = EIO;
3195 3194 } else {
3196 3195 if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3197 3196 &devlist) != FC_SUCCESS) {
3198 3197 *ret_val = EIO;
3199 3198 }
3200 3199 }
3201 3200
3202 3201 /* Set port map flags */
3203 3202 devlist.map_type = PORT_DEVICE_USER_CREATE;
3204 3203
3205 3204 /* Allocate target */
3206 3205 if (*ret_val == 0) {
3207 3206 lcount = pptr->port_link_cnt;
3208 3207 ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3209 3208 if (ptgt == NULL) {
3210 3209 fcp_log(CE_WARN, pptr->port_dip,
3211 3210 "!FC target allocation failed");
3212 3211 *ret_val = ENOMEM;
3213 3212 } else {
3214 3213 /* Setup target */
3215 3214 mutex_enter(&ptgt->tgt_mutex);
3216 3215
3217 3216 ptgt->tgt_statec_cause = FCP_CAUSE_TGT_CHANGE;
3218 3217 ptgt->tgt_tmp_cnt = 1;
3219 3218 ptgt->tgt_d_id = devlist.map_did.port_id;
3220 3219 ptgt->tgt_hard_addr =
3221 3220 devlist.map_hard_addr.hard_addr;
3222 3221 ptgt->tgt_pd_handle = devlist.map_pd;
3223 3222 ptgt->tgt_fca_dev = NULL;
3224 3223
3225 3224 bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3226 3225 FC_WWN_SIZE);
3227 3226 bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3228 3227 FC_WWN_SIZE);
3229 3228
3230 3229 mutex_exit(&ptgt->tgt_mutex);
3231 3230 }
3232 3231 }
3233 3232
3234 3233 /* Release global mutex for PLOGI and PRLI */
3235 3234 mutex_exit(&fcp_global_mutex);
3236 3235
3237 3236 /* Send PLOGI (If necessary) */
3238 3237 if (*ret_val == 0) {
3239 3238 *ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3240 3239 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3241 3240 }
3242 3241
3243 3242 /* Send PRLI (If necessary) */
3244 3243 if (*ret_val == 0) {
3245 3244 *ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3246 3245 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3247 3246 }
3248 3247
3249 3248 mutex_enter(&fcp_global_mutex);
3250 3249
3251 3250 return (ptgt);
3252 3251 }
3253 3252
3254 3253 /*
3255 3254 * Function: fcp_tgt_send_plogi
3256 3255 *
3257 3256 * Description: This function sends a PLOGI to the target specified by the
3258 3257 * caller and waits till it completes.
3259 3258 *
3260 3259 * Argument: ptgt Target to send the plogi to.
3261 3260 * fc_status Status returned by fp/fctl in the PLOGI request.
3262 3261 * fc_pkt_state State returned by fp/fctl in the PLOGI request.
3263 3262 * fc_pkt_reason Reason returned by fp/fctl in the PLOGI request.
3264 3263 * fc_pkt_action Action returned by fp/fctl in the PLOGI request.
3265 3264 *
3266 3265 * Return Value: 0
3267 3266 * ENOMEM
3268 3267 * EIO
3269 3268 *
3270 3269 * Context: User context.
3271 3270 */
3272 3271 static int
3273 3272 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3274 3273 int *fc_pkt_reason, int *fc_pkt_action)
3275 3274 {
3276 3275 struct fcp_port *pptr;
3277 3276 struct fcp_ipkt *icmd;
3278 3277 struct fc_packet *fpkt;
3279 3278 fc_frame_hdr_t *hp;
3280 3279 struct la_els_logi logi;
3281 3280 int tcount;
3282 3281 int lcount;
3283 3282 int ret, login_retval = ~FC_SUCCESS;
3284 3283
3285 3284 ret = 0;
3286 3285
3287 3286 pptr = ptgt->tgt_port;
3288 3287
3289 3288 lcount = pptr->port_link_cnt;
3290 3289 tcount = ptgt->tgt_change_cnt;
3291 3290
3292 3291 /* Alloc internal packet */
3293 3292 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3294 3293 sizeof (la_els_logi_t), 0,
3295 3294 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
3296 3295 lcount, tcount, 0, FC_INVALID_RSCN_COUNT);
3297 3296
3298 3297 if (icmd == NULL) {
3299 3298 ret = ENOMEM;
3300 3299 } else {
3301 3300 /*
3302 3301 * Setup internal packet as sema sync
3303 3302 */
3304 3303 fcp_ipkt_sema_init(icmd);
3305 3304
3306 3305 /*
3307 3306 * Setup internal packet (icmd)
3308 3307 */
3309 3308 icmd->ipkt_lun = NULL;
3310 3309 icmd->ipkt_restart = 0;
3311 3310 icmd->ipkt_retries = 0;
3312 3311 icmd->ipkt_opcode = LA_ELS_PLOGI;
3313 3312
3314 3313 /*
3315 3314 * Setup fc_packet
3316 3315 */
3317 3316 fpkt = icmd->ipkt_fpkt;
3318 3317
3319 3318 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
3320 3319 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
3321 3320 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
3322 3321
3323 3322 /*
3324 3323 * Setup FC frame header
3325 3324 */
3326 3325 hp = &fpkt->pkt_cmd_fhdr;
3327 3326
3328 3327 hp->s_id = pptr->port_id; /* source ID */
3329 3328 hp->d_id = ptgt->tgt_d_id; /* dest ID */
3330 3329 hp->r_ctl = R_CTL_ELS_REQ;
3331 3330 hp->type = FC_TYPE_EXTENDED_LS;
3332 3331 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3333 3332 hp->seq_id = 0;
3334 3333 hp->rsvd = 0;
3335 3334 hp->df_ctl = 0;
3336 3335 hp->seq_cnt = 0;
3337 3336 hp->ox_id = 0xffff; /* i.e. none */
3338 3337 hp->rx_id = 0xffff; /* i.e. none */
3339 3338 hp->ro = 0;
3340 3339
3341 3340 /*
3342 3341 * Setup PLOGI
3343 3342 */
3344 3343 bzero(&logi, sizeof (struct la_els_logi));
3345 3344 logi.ls_code.ls_code = LA_ELS_PLOGI;
3346 3345
3347 3346 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3348 3347 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3349 3348
3350 3349 /*
3351 3350 * Send PLOGI
3352 3351 */
3353 3352 *fc_status = login_retval =
3354 3353 fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3355 3354 if (*fc_status != FC_SUCCESS) {
3356 3355 ret = EIO;
3357 3356 }
3358 3357 }
3359 3358
3360 3359 /*
3361 3360 * Wait for completion
3362 3361 */
3363 3362 if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3364 3363 ret = fcp_ipkt_sema_wait(icmd);
3365 3364
3366 3365 *fc_pkt_state = fpkt->pkt_state;
3367 3366 *fc_pkt_reason = fpkt->pkt_reason;
3368 3367 *fc_pkt_action = fpkt->pkt_action;
3369 3368 }
3370 3369
3371 3370 /*
3372 3371 * Cleanup transport data structures if icmd was alloc-ed AND if there
3373 3372 * is going to be no callback (i.e if fc_ulp_login() failed).
3374 3373 * Otherwise, cleanup happens in callback routine.
3375 3374 */
3376 3375 if (icmd != NULL) {
3377 3376 fcp_ipkt_sema_cleanup(icmd);
3378 3377 }
3379 3378
3380 3379 return (ret);
3381 3380 }
3382 3381
3383 3382 /*
3384 3383 * Function: fcp_tgt_send_prli
3385 3384 *
3386 3385 * Description: Does nothing as of today.
3387 3386 *
3388 3387 * Argument: ptgt Target to send the prli to.
3389 3388 * fc_status Status returned by fp/fctl in the PRLI request.
3390 3389 * fc_pkt_state State returned by fp/fctl in the PRLI request.
3391 3390 * fc_pkt_reason Reason returned by fp/fctl in the PRLI request.
3392 3391 * fc_pkt_action Action returned by fp/fctl in the PRLI request.
3393 3392 *
3394 3393 * Return Value: 0
3395 3394 */
3396 3395 /*ARGSUSED*/
3397 3396 static int
3398 3397 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3399 3398 int *fc_pkt_reason, int *fc_pkt_action)
3400 3399 {
3401 3400 return (0);
3402 3401 }
3403 3402
3404 3403 /*
3405 3404 * Function: fcp_ipkt_sema_init
3406 3405 *
3407 3406 * Description: Initializes the semaphore contained in the internal packet.
3408 3407 *
3409 3408 * Argument: icmd Internal packet the semaphore of which must be
3410 3409 * initialized.
3411 3410 *
3412 3411 * Return Value: None
3413 3412 *
3414 3413 * Context: User context only.
3415 3414 */
3416 3415 static void
3417 3416 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3418 3417 {
3419 3418 struct fc_packet *fpkt;
3420 3419
3421 3420 fpkt = icmd->ipkt_fpkt;
3422 3421
3423 3422 /* Create semaphore for sync */
3424 3423 sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3425 3424
3426 3425 /* Setup the completion callback */
3427 3426 fpkt->pkt_comp = fcp_ipkt_sema_callback;
3428 3427 }
3429 3428
3430 3429 /*
3431 3430 * Function: fcp_ipkt_sema_wait
3432 3431 *
3433 3432 * Description: Wait on the semaphore embedded in the internal packet. The
3434 3433 * semaphore is released in the callback.
3435 3434 *
3436 3435 * Argument: icmd Internal packet to wait on for completion.
3437 3436 *
3438 3437 * Return Value: 0
3439 3438 * EIO
3440 3439 * EBUSY
3441 3440 * EAGAIN
3442 3441 *
3443 3442 * Context: User context only.
3444 3443 *
3445 3444 * This function does a conversion between the field pkt_state of the fc_packet
3446 3445 * embedded in the internal packet (icmd) and the code it returns.
3447 3446 */
3448 3447 static int
3449 3448 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3450 3449 {
3451 3450 struct fc_packet *fpkt;
3452 3451 int ret;
3453 3452
3454 3453 ret = EIO;
3455 3454 fpkt = icmd->ipkt_fpkt;
3456 3455
3457 3456 /*
3458 3457 * Wait on semaphore
3459 3458 */
3460 3459 sema_p(&(icmd->ipkt_sema));
3461 3460
3462 3461 /*
3463 3462 * Check the status of the FC packet
3464 3463 */
3465 3464 switch (fpkt->pkt_state) {
3466 3465 case FC_PKT_SUCCESS:
3467 3466 ret = 0;
3468 3467 break;
3469 3468 case FC_PKT_LOCAL_RJT:
3470 3469 switch (fpkt->pkt_reason) {
3471 3470 case FC_REASON_SEQ_TIMEOUT:
3472 3471 case FC_REASON_RX_BUF_TIMEOUT:
3473 3472 ret = EAGAIN;
3474 3473 break;
3475 3474 case FC_REASON_PKT_BUSY:
3476 3475 ret = EBUSY;
3477 3476 break;
3478 3477 }
3479 3478 break;
3480 3479 case FC_PKT_TIMEOUT:
3481 3480 ret = EAGAIN;
3482 3481 break;
3483 3482 case FC_PKT_LOCAL_BSY:
3484 3483 case FC_PKT_TRAN_BSY:
3485 3484 case FC_PKT_NPORT_BSY:
3486 3485 case FC_PKT_FABRIC_BSY:
3487 3486 ret = EBUSY;
3488 3487 break;
3489 3488 case FC_PKT_LS_RJT:
3490 3489 case FC_PKT_BA_RJT:
3491 3490 switch (fpkt->pkt_reason) {
3492 3491 case FC_REASON_LOGICAL_BSY:
3493 3492 ret = EBUSY;
3494 3493 break;
3495 3494 }
3496 3495 break;
3497 3496 case FC_PKT_FS_RJT:
3498 3497 switch (fpkt->pkt_reason) {
3499 3498 case FC_REASON_FS_LOGICAL_BUSY:
3500 3499 ret = EBUSY;
3501 3500 break;
3502 3501 }
3503 3502 break;
3504 3503 }
3505 3504
3506 3505 return (ret);
3507 3506 }
3508 3507
3509 3508 /*
3510 3509 * Function: fcp_ipkt_sema_callback
3511 3510 *
3512 3511 * Description: Registered as the completion callback function for the FC
3513 3512 * transport when the ipkt semaphore is used for sync. This will
3514 3513 * cleanup the used data structures, if necessary and wake up
3515 3514 * the user thread to complete the transaction.
3516 3515 *
3517 3516 * Argument: fpkt FC packet (points to the icmd)
3518 3517 *
3519 3518 * Return Value: None
3520 3519 *
3521 3520 * Context: User context only
3522 3521 */
3523 3522 static void
3524 3523 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3525 3524 {
3526 3525 struct fcp_ipkt *icmd;
3527 3526
3528 3527 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3529 3528
3530 3529 /*
3531 3530 * Wake up user thread
3532 3531 */
3533 3532 sema_v(&(icmd->ipkt_sema));
3534 3533 }
3535 3534
3536 3535 /*
3537 3536 * Function: fcp_ipkt_sema_cleanup
3538 3537 *
3539 3538 * Description: Called to cleanup (if necessary) the data structures used
3540 3539 * when ipkt sema is used for sync. This function will detect
3541 3540 * whether the caller is the last thread (via counter) and
3542 3541 * cleanup only if necessary.
3543 3542 *
3544 3543 * Argument: icmd Internal command packet
3545 3544 *
3546 3545 * Return Value: None
3547 3546 *
3548 3547 * Context: User context only
3549 3548 */
3550 3549 static void
3551 3550 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3552 3551 {
3553 3552 struct fcp_tgt *ptgt;
3554 3553 struct fcp_port *pptr;
3555 3554
3556 3555 ptgt = icmd->ipkt_tgt;
3557 3556 pptr = icmd->ipkt_port;
3558 3557
3559 3558 /*
3560 3559 * Acquire data structure
3561 3560 */
3562 3561 mutex_enter(&ptgt->tgt_mutex);
3563 3562
3564 3563 /*
3565 3564 * Destroy semaphore
3566 3565 */
3567 3566 sema_destroy(&(icmd->ipkt_sema));
3568 3567
3569 3568 /*
3570 3569 * Cleanup internal packet
3571 3570 */
3572 3571 mutex_exit(&ptgt->tgt_mutex);
3573 3572 fcp_icmd_free(pptr, icmd);
3574 3573 }
3575 3574
3576 3575 /*
3577 3576 * Function: fcp_port_attach
3578 3577 *
3579 3578 * Description: Called by the transport framework to resume, suspend or
3580 3579 * attach a new port.
3581 3580 *
3582 3581 * Argument: ulph Port handle
3583 3582 * *pinfo Port information
3584 3583 * cmd Command
3585 3584 * s_id Port ID
3586 3585 *
3587 3586 * Return Value: FC_FAILURE or FC_SUCCESS
3588 3587 */
3589 3588 /*ARGSUSED*/
3590 3589 static int
3591 3590 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3592 3591 fc_attach_cmd_t cmd, uint32_t s_id)
3593 3592 {
3594 3593 int instance;
3595 3594 int res = FC_FAILURE; /* default result */
3596 3595
3597 3596 ASSERT(pinfo != NULL);
3598 3597
3599 3598 instance = ddi_get_instance(pinfo->port_dip);
3600 3599
3601 3600 switch (cmd) {
3602 3601 case FC_CMD_ATTACH:
3603 3602 /*
3604 3603 * this port instance attaching for the first time (or after
3605 3604 * being detached before)
3606 3605 */
3607 3606 if (fcp_handle_port_attach(ulph, pinfo, s_id,
3608 3607 instance) == DDI_SUCCESS) {
3609 3608 res = FC_SUCCESS;
3610 3609 } else {
3611 3610 ASSERT(ddi_get_soft_state(fcp_softstate,
3612 3611 instance) == NULL);
3613 3612 }
3614 3613 break;
3615 3614
3616 3615 case FC_CMD_RESUME:
3617 3616 case FC_CMD_POWER_UP:
3618 3617 /*
3619 3618 * this port instance was attached and the suspended and
3620 3619 * will now be resumed
3621 3620 */
3622 3621 if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3623 3622 instance) == DDI_SUCCESS) {
3624 3623 res = FC_SUCCESS;
3625 3624 }
3626 3625 break;
3627 3626
3628 3627 default:
3629 3628 /* shouldn't happen */
3630 3629 FCP_TRACE(fcp_logq, "fcp",
3631 3630 fcp_trace, FCP_BUF_LEVEL_2, 0,
3632 3631 "port_attach: unknown cmdcommand: %d", cmd);
3633 3632 break;
3634 3633 }
3635 3634
3636 3635 /* return result */
3637 3636 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3638 3637 FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3639 3638
3640 3639 return (res);
3641 3640 }
3642 3641
3643 3642
3644 3643 /*
3645 3644 * detach or suspend this port instance
3646 3645 *
3647 3646 * acquires and releases the global mutex
3648 3647 *
3649 3648 * acquires and releases the mutex for this port
3650 3649 *
3651 3650 * acquires and releases the hotplug mutex for this port
3652 3651 */
3653 3652 /*ARGSUSED*/
3654 3653 static int
3655 3654 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3656 3655 fc_detach_cmd_t cmd)
3657 3656 {
3658 3657 int flag;
3659 3658 int instance;
3660 3659 struct fcp_port *pptr;
3661 3660
3662 3661 instance = ddi_get_instance(info->port_dip);
3663 3662 pptr = ddi_get_soft_state(fcp_softstate, instance);
3664 3663
3665 3664 switch (cmd) {
3666 3665 case FC_CMD_SUSPEND:
3667 3666 FCP_DTRACE(fcp_logq, "fcp",
3668 3667 fcp_trace, FCP_BUF_LEVEL_8, 0,
3669 3668 "port suspend called for port %d", instance);
3670 3669 flag = FCP_STATE_SUSPENDED;
3671 3670 break;
3672 3671
3673 3672 case FC_CMD_POWER_DOWN:
3674 3673 FCP_DTRACE(fcp_logq, "fcp",
3675 3674 fcp_trace, FCP_BUF_LEVEL_8, 0,
3676 3675 "port power down called for port %d", instance);
3677 3676 flag = FCP_STATE_POWER_DOWN;
3678 3677 break;
3679 3678
3680 3679 case FC_CMD_DETACH:
3681 3680 FCP_DTRACE(fcp_logq, "fcp",
3682 3681 fcp_trace, FCP_BUF_LEVEL_8, 0,
3683 3682 "port detach called for port %d", instance);
3684 3683 flag = FCP_STATE_DETACHING;
3685 3684 break;
3686 3685
3687 3686 default:
3688 3687 /* shouldn't happen */
3689 3688 return (FC_FAILURE);
3690 3689 }
3691 3690 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3692 3691 FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3693 3692
3694 3693 return (fcp_handle_port_detach(pptr, flag, instance));
3695 3694 }
3696 3695
3697 3696
3698 3697 /*
3699 3698 * called for ioctls on the transport's devctl interface, and the transport
3700 3699 * has passed it to us
3701 3700 *
3702 3701 * this will only be called for device control ioctls (i.e. hotplugging stuff)
3703 3702 *
3704 3703 * return FC_SUCCESS if we decide to claim the ioctl,
3705 3704 * else return FC_UNCLAIMED
3706 3705 *
3707 3706 * *rval is set iff we decide to claim the ioctl
3708 3707 */
3709 3708 /*ARGSUSED*/
3710 3709 static int
3711 3710 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3712 3711 intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3713 3712 {
3714 3713 int retval = FC_UNCLAIMED; /* return value */
3715 3714 struct fcp_port *pptr = NULL; /* our soft state */
3716 3715 struct devctl_iocdata *dcp = NULL; /* for devctl */
3717 3716 dev_info_t *cdip;
3718 3717 mdi_pathinfo_t *pip = NULL;
3719 3718 char *ndi_nm; /* NDI name */
3720 3719 char *ndi_addr; /* NDI addr */
3721 3720 int is_mpxio, circ;
3722 3721 int devi_entered = 0;
3723 3722 clock_t end_time;
3724 3723
3725 3724 ASSERT(rval != NULL);
3726 3725
3727 3726 FCP_DTRACE(fcp_logq, "fcp",
3728 3727 fcp_trace, FCP_BUF_LEVEL_8, 0,
3729 3728 "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3730 3729
3731 3730 /* if already claimed then forget it */
3732 3731 if (claimed) {
3733 3732 /*
3734 3733 * for now, if this ioctl has already been claimed, then
3735 3734 * we just ignore it
3736 3735 */
3737 3736 return (retval);
3738 3737 }
3739 3738
3740 3739 /* get our port info */
3741 3740 if ((pptr = fcp_get_port(port_handle)) == NULL) {
3742 3741 fcp_log(CE_WARN, NULL,
3743 3742 "!fcp:Invalid port handle handle in ioctl");
3744 3743 *rval = ENXIO;
3745 3744 return (retval);
3746 3745 }
3747 3746 is_mpxio = pptr->port_mpxio;
3748 3747
3749 3748 switch (cmd) {
3750 3749 case DEVCTL_BUS_GETSTATE:
3751 3750 case DEVCTL_BUS_QUIESCE:
3752 3751 case DEVCTL_BUS_UNQUIESCE:
3753 3752 case DEVCTL_BUS_RESET:
3754 3753 case DEVCTL_BUS_RESETALL:
3755 3754
3756 3755 case DEVCTL_BUS_DEV_CREATE:
3757 3756 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3758 3757 return (retval);
3759 3758 }
3760 3759 break;
3761 3760
3762 3761 case DEVCTL_DEVICE_GETSTATE:
3763 3762 case DEVCTL_DEVICE_OFFLINE:
3764 3763 case DEVCTL_DEVICE_ONLINE:
3765 3764 case DEVCTL_DEVICE_REMOVE:
3766 3765 case DEVCTL_DEVICE_RESET:
3767 3766 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3768 3767 return (retval);
3769 3768 }
3770 3769
3771 3770 ASSERT(dcp != NULL);
3772 3771
3773 3772 /* ensure we have a name and address */
3774 3773 if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3775 3774 ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3776 3775 FCP_TRACE(fcp_logq, pptr->port_instbuf,
3777 3776 fcp_trace, FCP_BUF_LEVEL_2, 0,
3778 3777 "ioctl: can't get name (%s) or addr (%s)",
3779 3778 ndi_nm ? ndi_nm : "<null ptr>",
3780 3779 ndi_addr ? ndi_addr : "<null ptr>");
3781 3780 ndi_dc_freehdl(dcp);
3782 3781 return (retval);
3783 3782 }
3784 3783
3785 3784
3786 3785 /* get our child's DIP */
3787 3786 ASSERT(pptr != NULL);
3788 3787 if (is_mpxio) {
3789 3788 mdi_devi_enter(pptr->port_dip, &circ);
3790 3789 } else {
3791 3790 ndi_devi_enter(pptr->port_dip, &circ);
3792 3791 }
3793 3792 devi_entered = 1;
3794 3793
3795 3794 if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3796 3795 ndi_addr)) == NULL) {
3797 3796 /* Look for virtually enumerated devices. */
3798 3797 pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3799 3798 if (pip == NULL ||
3800 3799 ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3801 3800 *rval = ENXIO;
3802 3801 goto out;
3803 3802 }
3804 3803 }
3805 3804 break;
3806 3805
3807 3806 default:
3808 3807 *rval = ENOTTY;
3809 3808 return (retval);
3810 3809 }
3811 3810
3812 3811 /* this ioctl is ours -- process it */
3813 3812
3814 3813 retval = FC_SUCCESS; /* just means we claim the ioctl */
3815 3814
3816 3815 /* we assume it will be a success; else we'll set error value */
3817 3816 *rval = 0;
3818 3817
3819 3818
3820 3819 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3821 3820 fcp_trace, FCP_BUF_LEVEL_8, 0,
3822 3821 "ioctl: claiming this one");
3823 3822
3824 3823 /* handle ioctls now */
3825 3824 switch (cmd) {
3826 3825 case DEVCTL_DEVICE_GETSTATE:
3827 3826 ASSERT(cdip != NULL);
3828 3827 ASSERT(dcp != NULL);
3829 3828 if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3830 3829 *rval = EFAULT;
3831 3830 }
3832 3831 break;
3833 3832
3834 3833 case DEVCTL_DEVICE_REMOVE:
3835 3834 case DEVCTL_DEVICE_OFFLINE: {
3836 3835 int flag = 0;
3837 3836 int lcount;
3838 3837 int tcount;
3839 3838 struct fcp_pkt *head = NULL;
3840 3839 struct fcp_lun *plun;
3841 3840 child_info_t *cip = CIP(cdip);
3842 3841 int all = 1;
3843 3842 struct fcp_lun *tplun;
3844 3843 struct fcp_tgt *ptgt;
3845 3844
3846 3845 ASSERT(pptr != NULL);
3847 3846 ASSERT(cdip != NULL);
3848 3847
3849 3848 mutex_enter(&pptr->port_mutex);
3850 3849 if (pip != NULL) {
3851 3850 cip = CIP(pip);
3852 3851 }
3853 3852 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3854 3853 mutex_exit(&pptr->port_mutex);
3855 3854 *rval = ENXIO;
3856 3855 break;
3857 3856 }
3858 3857
3859 3858 head = fcp_scan_commands(plun);
3860 3859 if (head != NULL) {
3861 3860 fcp_abort_commands(head, LUN_PORT);
3862 3861 }
3863 3862 lcount = pptr->port_link_cnt;
3864 3863 tcount = plun->lun_tgt->tgt_change_cnt;
3865 3864 mutex_exit(&pptr->port_mutex);
3866 3865
3867 3866 if (cmd == DEVCTL_DEVICE_REMOVE) {
3868 3867 flag = NDI_DEVI_REMOVE;
3869 3868 }
3870 3869
3871 3870 if (is_mpxio) {
3872 3871 mdi_devi_exit(pptr->port_dip, circ);
3873 3872 } else {
3874 3873 ndi_devi_exit(pptr->port_dip, circ);
3875 3874 }
3876 3875 devi_entered = 0;
3877 3876
3878 3877 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3879 3878 FCP_OFFLINE, lcount, tcount, flag);
3880 3879
3881 3880 if (*rval != NDI_SUCCESS) {
3882 3881 *rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3883 3882 break;
3884 3883 }
3885 3884
3886 3885 fcp_update_offline_flags(plun);
3887 3886
3888 3887 ptgt = plun->lun_tgt;
3889 3888 mutex_enter(&ptgt->tgt_mutex);
3890 3889 for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3891 3890 tplun->lun_next) {
3892 3891 mutex_enter(&tplun->lun_mutex);
3893 3892 if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3894 3893 all = 0;
3895 3894 }
3896 3895 mutex_exit(&tplun->lun_mutex);
3897 3896 }
3898 3897
3899 3898 if (all) {
3900 3899 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3901 3900 /*
3902 3901 * The user is unconfiguring/offlining the device.
3903 3902 * If fabric and the auto configuration is set
3904 3903 * then make sure the user is the only one who
3905 3904 * can reconfigure the device.
3906 3905 */
3907 3906 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3908 3907 fcp_enable_auto_configuration) {
3909 3908 ptgt->tgt_manual_config_only = 1;
3910 3909 }
3911 3910 }
3912 3911 mutex_exit(&ptgt->tgt_mutex);
3913 3912 break;
3914 3913 }
3915 3914
3916 3915 case DEVCTL_DEVICE_ONLINE: {
3917 3916 int lcount;
3918 3917 int tcount;
3919 3918 struct fcp_lun *plun;
3920 3919 child_info_t *cip = CIP(cdip);
3921 3920
3922 3921 ASSERT(cdip != NULL);
3923 3922 ASSERT(pptr != NULL);
3924 3923
3925 3924 mutex_enter(&pptr->port_mutex);
3926 3925 if (pip != NULL) {
3927 3926 cip = CIP(pip);
3928 3927 }
3929 3928 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3930 3929 mutex_exit(&pptr->port_mutex);
3931 3930 *rval = ENXIO;
3932 3931 break;
3933 3932 }
3934 3933 lcount = pptr->port_link_cnt;
3935 3934 tcount = plun->lun_tgt->tgt_change_cnt;
3936 3935 mutex_exit(&pptr->port_mutex);
3937 3936
3938 3937 /*
3939 3938 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3940 3939 * to allow the device attach to occur when the device is
3941 3940 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3942 3941 * from the scsi_probe()).
3943 3942 */
3944 3943 mutex_enter(&LUN_TGT->tgt_mutex);
3945 3944 plun->lun_state |= FCP_LUN_ONLINING;
3946 3945 mutex_exit(&LUN_TGT->tgt_mutex);
3947 3946
3948 3947 if (is_mpxio) {
3949 3948 mdi_devi_exit(pptr->port_dip, circ);
3950 3949 } else {
3951 3950 ndi_devi_exit(pptr->port_dip, circ);
3952 3951 }
3953 3952 devi_entered = 0;
3954 3953
3955 3954 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3956 3955 FCP_ONLINE, lcount, tcount, 0);
3957 3956
3958 3957 if (*rval != NDI_SUCCESS) {
3959 3958 /* Reset the FCP_LUN_ONLINING bit */
3960 3959 mutex_enter(&LUN_TGT->tgt_mutex);
3961 3960 plun->lun_state &= ~FCP_LUN_ONLINING;
3962 3961 mutex_exit(&LUN_TGT->tgt_mutex);
3963 3962 *rval = EIO;
3964 3963 break;
3965 3964 }
3966 3965 mutex_enter(&LUN_TGT->tgt_mutex);
3967 3966 plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3968 3967 FCP_LUN_ONLINING);
3969 3968 mutex_exit(&LUN_TGT->tgt_mutex);
3970 3969 break;
3971 3970 }
3972 3971
3973 3972 case DEVCTL_BUS_DEV_CREATE: {
3974 3973 uchar_t *bytes = NULL;
3975 3974 uint_t nbytes;
3976 3975 struct fcp_tgt *ptgt = NULL;
3977 3976 struct fcp_lun *plun = NULL;
3978 3977 dev_info_t *useless_dip = NULL;
3979 3978
3980 3979 *rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3981 3980 DEVCTL_CONSTRUCT, &useless_dip);
3982 3981 if (*rval != 0 || useless_dip == NULL) {
3983 3982 break;
3984 3983 }
3985 3984
3986 3985 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3987 3986 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3988 3987 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3989 3988 *rval = EINVAL;
3990 3989 (void) ndi_devi_free(useless_dip);
3991 3990 if (bytes != NULL) {
3992 3991 ddi_prop_free(bytes);
3993 3992 }
3994 3993 break;
3995 3994 }
3996 3995
3997 3996 *rval = fcp_create_on_demand(pptr, bytes);
3998 3997 if (*rval == 0) {
3999 3998 mutex_enter(&pptr->port_mutex);
4000 3999 ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
4001 4000 if (ptgt) {
4002 4001 /*
4003 4002 * We now have a pointer to the target that
4004 4003 * was created. Lets point to the first LUN on
4005 4004 * this new target.
4006 4005 */
4007 4006 mutex_enter(&ptgt->tgt_mutex);
4008 4007
4009 4008 plun = ptgt->tgt_lun;
4010 4009 /*
4011 4010 * There may be stale/offline LUN entries on
4012 4011 * this list (this is by design) and so we have
4013 4012 * to make sure we point to the first online
4014 4013 * LUN
4015 4014 */
4016 4015 while (plun &&
4017 4016 plun->lun_state & FCP_LUN_OFFLINE) {
4018 4017 plun = plun->lun_next;
4019 4018 }
4020 4019
4021 4020 mutex_exit(&ptgt->tgt_mutex);
4022 4021 }
4023 4022 mutex_exit(&pptr->port_mutex);
4024 4023 }
4025 4024
4026 4025 if (*rval == 0 && ptgt && plun) {
4027 4026 mutex_enter(&plun->lun_mutex);
4028 4027 /*
4029 4028 * Allow up to fcp_lun_ready_retry seconds to
4030 4029 * configure all the luns behind the target.
4031 4030 *
4032 4031 * The intent here is to allow targets with long
4033 4032 * reboot/reset-recovery times to become available
4034 4033 * while limiting the maximum wait time for an
4035 4034 * unresponsive target.
4036 4035 */
4037 4036 end_time = ddi_get_lbolt() +
4038 4037 SEC_TO_TICK(fcp_lun_ready_retry);
4039 4038
4040 4039 while (ddi_get_lbolt() < end_time) {
4041 4040 retval = FC_SUCCESS;
4042 4041
4043 4042 /*
4044 4043 * The new ndi interfaces for on-demand creation
4045 4044 * are inflexible, Do some more work to pass on
4046 4045 * a path name of some LUN (design is broken !)
4047 4046 */
4048 4047 if (plun->lun_cip) {
4049 4048 if (plun->lun_mpxio == 0) {
4050 4049 cdip = DIP(plun->lun_cip);
4051 4050 } else {
4052 4051 cdip = mdi_pi_get_client(
4053 4052 PIP(plun->lun_cip));
4054 4053 }
4055 4054 if (cdip == NULL) {
4056 4055 *rval = ENXIO;
4057 4056 break;
4058 4057 }
4059 4058
4060 4059 if (!i_ddi_devi_attached(cdip)) {
4061 4060 mutex_exit(&plun->lun_mutex);
4062 4061 delay(drv_usectohz(1000000));
4063 4062 mutex_enter(&plun->lun_mutex);
4064 4063 } else {
4065 4064 /*
4066 4065 * This Lun is ready, lets
4067 4066 * check the next one.
4068 4067 */
4069 4068 mutex_exit(&plun->lun_mutex);
4070 4069 plun = plun->lun_next;
4071 4070 while (plun && (plun->lun_state
4072 4071 & FCP_LUN_OFFLINE)) {
4073 4072 plun = plun->lun_next;
4074 4073 }
4075 4074 if (!plun) {
4076 4075 break;
4077 4076 }
4078 4077 mutex_enter(&plun->lun_mutex);
4079 4078 }
4080 4079 } else {
4081 4080 /*
4082 4081 * lun_cip field for a valid lun
4083 4082 * should never be NULL. Fail the
4084 4083 * command.
4085 4084 */
4086 4085 *rval = ENXIO;
4087 4086 break;
4088 4087 }
4089 4088 }
4090 4089 if (plun) {
4091 4090 mutex_exit(&plun->lun_mutex);
4092 4091 } else {
4093 4092 char devnm[MAXNAMELEN];
4094 4093 int nmlen;
4095 4094
4096 4095 nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4097 4096 ddi_node_name(cdip),
4098 4097 ddi_get_name_addr(cdip));
4099 4098
4100 4099 if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4101 4100 0) {
4102 4101 *rval = EFAULT;
4103 4102 }
4104 4103 }
4105 4104 } else {
4106 4105 int i;
4107 4106 char buf[25];
4108 4107
4109 4108 for (i = 0; i < FC_WWN_SIZE; i++) {
4110 4109 (void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4111 4110 }
4112 4111
4113 4112 fcp_log(CE_WARN, pptr->port_dip,
4114 4113 "!Failed to create nodes for pwwn=%s; error=%x",
4115 4114 buf, *rval);
4116 4115 }
4117 4116
4118 4117 (void) ndi_devi_free(useless_dip);
4119 4118 ddi_prop_free(bytes);
4120 4119 break;
4121 4120 }
4122 4121
4123 4122 case DEVCTL_DEVICE_RESET: {
4124 4123 struct fcp_lun *plun;
4125 4124 child_info_t *cip = CIP(cdip);
4126 4125
4127 4126 ASSERT(cdip != NULL);
4128 4127 ASSERT(pptr != NULL);
4129 4128 mutex_enter(&pptr->port_mutex);
4130 4129 if (pip != NULL) {
4131 4130 cip = CIP(pip);
4132 4131 }
4133 4132 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4134 4133 mutex_exit(&pptr->port_mutex);
4135 4134 *rval = ENXIO;
4136 4135 break;
4137 4136 }
4138 4137 mutex_exit(&pptr->port_mutex);
4139 4138
4140 4139 mutex_enter(&plun->lun_tgt->tgt_mutex);
4141 4140 if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4142 4141 mutex_exit(&plun->lun_tgt->tgt_mutex);
4143 4142
4144 4143 *rval = ENXIO;
4145 4144 break;
4146 4145 }
4147 4146
4148 4147 if (plun->lun_sd == NULL) {
4149 4148 mutex_exit(&plun->lun_tgt->tgt_mutex);
4150 4149
4151 4150 *rval = ENXIO;
4152 4151 break;
4153 4152 }
4154 4153 mutex_exit(&plun->lun_tgt->tgt_mutex);
4155 4154
4156 4155 /*
4157 4156 * set up ap so that fcp_reset can figure out
4158 4157 * which target to reset
4159 4158 */
4160 4159 if (fcp_scsi_reset(&plun->lun_sd->sd_address,
4161 4160 RESET_TARGET) == FALSE) {
4162 4161 *rval = EIO;
4163 4162 }
4164 4163 break;
4165 4164 }
4166 4165
4167 4166 case DEVCTL_BUS_GETSTATE:
4168 4167 ASSERT(dcp != NULL);
4169 4168 ASSERT(pptr != NULL);
4170 4169 ASSERT(pptr->port_dip != NULL);
4171 4170 if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4172 4171 NDI_SUCCESS) {
4173 4172 *rval = EFAULT;
4174 4173 }
4175 4174 break;
4176 4175
4177 4176 case DEVCTL_BUS_QUIESCE:
4178 4177 case DEVCTL_BUS_UNQUIESCE:
4179 4178 *rval = ENOTSUP;
4180 4179 break;
4181 4180
4182 4181 case DEVCTL_BUS_RESET:
4183 4182 case DEVCTL_BUS_RESETALL:
4184 4183 ASSERT(pptr != NULL);
4185 4184 (void) fcp_linkreset(pptr, NULL, KM_SLEEP);
4186 4185 break;
4187 4186
4188 4187 default:
4189 4188 ASSERT(dcp != NULL);
4190 4189 *rval = ENOTTY;
4191 4190 break;
4192 4191 }
4193 4192
4194 4193 /* all done -- clean up and return */
4195 4194 out: if (devi_entered) {
4196 4195 if (is_mpxio) {
4197 4196 mdi_devi_exit(pptr->port_dip, circ);
4198 4197 } else {
4199 4198 ndi_devi_exit(pptr->port_dip, circ);
4200 4199 }
4201 4200 }
4202 4201
4203 4202 if (dcp != NULL) {
4204 4203 ndi_dc_freehdl(dcp);
4205 4204 }
4206 4205
4207 4206 return (retval);
4208 4207 }
4209 4208
4210 4209
4211 4210 /*ARGSUSED*/
4212 4211 static int
4213 4212 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4214 4213 uint32_t claimed)
4215 4214 {
4216 4215 uchar_t r_ctl;
4217 4216 uchar_t ls_code;
4218 4217 struct fcp_port *pptr;
4219 4218
4220 4219 if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4221 4220 return (FC_UNCLAIMED);
4222 4221 }
4223 4222
4224 4223 mutex_enter(&pptr->port_mutex);
4225 4224 if (pptr->port_state & (FCP_STATE_DETACHING |
4226 4225 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4227 4226 mutex_exit(&pptr->port_mutex);
4228 4227 return (FC_UNCLAIMED);
4229 4228 }
4230 4229 mutex_exit(&pptr->port_mutex);
4231 4230
4232 4231 r_ctl = buf->ub_frame.r_ctl;
4233 4232
4234 4233 switch (r_ctl & R_CTL_ROUTING) {
4235 4234 case R_CTL_EXTENDED_SVC:
4236 4235 if (r_ctl == R_CTL_ELS_REQ) {
4237 4236 ls_code = buf->ub_buffer[0];
4238 4237
4239 4238 switch (ls_code) {
4240 4239 case LA_ELS_PRLI:
4241 4240 /*
4242 4241 * We really don't care if something fails.
4243 4242 * If the PRLI was not sent out, then the
4244 4243 * other end will time it out.
4245 4244 */
4246 4245 if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4247 4246 return (FC_SUCCESS);
4248 4247 }
4249 4248 return (FC_UNCLAIMED);
4250 4249 /* NOTREACHED */
4251 4250
4252 4251 default:
4253 4252 break;
4254 4253 }
4255 4254 }
4256 4255 /* FALLTHROUGH */
4257 4256
4258 4257 default:
4259 4258 return (FC_UNCLAIMED);
4260 4259 }
4261 4260 }
4262 4261
4263 4262
4264 4263 /*ARGSUSED*/
4265 4264 static int
4266 4265 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4267 4266 uint32_t claimed)
4268 4267 {
4269 4268 return (FC_UNCLAIMED);
4270 4269 }
4271 4270
4272 4271 /*
4273 4272 * Function: fcp_statec_callback
4274 4273 *
4275 4274 * Description: The purpose of this function is to handle a port state change.
4276 4275 * It is called from fp/fctl and, in a few instances, internally.
4277 4276 *
4278 4277 * Argument: ulph fp/fctl port handle
4279 4278 * port_handle fcp_port structure
4280 4279 * port_state Physical state of the port
4281 4280 * port_top Topology
4282 4281 * *devlist Pointer to the first entry of a table
4283 4282 * containing the remote ports that can be
4284 4283 * reached.
4285 4284 * dev_cnt Number of entries pointed by devlist.
4286 4285 * port_sid Port ID of the local port.
4287 4286 *
4288 4287 * Return Value: None
4289 4288 */
4290 4289 /*ARGSUSED*/
4291 4290 static void
4292 4291 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4293 4292 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4294 4293 uint32_t dev_cnt, uint32_t port_sid)
4295 4294 {
4296 4295 uint32_t link_count;
4297 4296 int map_len = 0;
4298 4297 struct fcp_port *pptr;
4299 4298 fcp_map_tag_t *map_tag = NULL;
4300 4299
4301 4300 if ((pptr = fcp_get_port(port_handle)) == NULL) {
4302 4301 fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4303 4302 return; /* nothing to work with! */
4304 4303 }
4305 4304
4306 4305 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4307 4306 fcp_trace, FCP_BUF_LEVEL_2, 0,
4308 4307 "fcp_statec_callback: port state/dev_cnt/top ="
4309 4308 "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4310 4309 dev_cnt, port_top);
4311 4310
4312 4311 mutex_enter(&pptr->port_mutex);
4313 4312
4314 4313 /*
4315 4314 * If a thread is in detach, don't do anything.
4316 4315 */
4317 4316 if (pptr->port_state & (FCP_STATE_DETACHING |
4318 4317 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4319 4318 mutex_exit(&pptr->port_mutex);
4320 4319 return;
4321 4320 }
4322 4321
4323 4322 /*
4324 4323 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4325 4324 * init_pkt is called, it knows whether or not the target's status
4326 4325 * (or pd) might be changing.
4327 4326 */
4328 4327
4329 4328 if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4330 4329 pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4331 4330 }
4332 4331
4333 4332 /*
4334 4333 * the transport doesn't allocate or probe unless being
4335 4334 * asked to by either the applications or ULPs
4336 4335 *
4337 4336 * in cases where the port is OFFLINE at the time of port
4338 4337 * attach callback and the link comes ONLINE later, for
4339 4338 * easier automatic node creation (i.e. without you having to
4340 4339 * go out and run the utility to perform LOGINs) the
4341 4340 * following conditional is helpful
4342 4341 */
4343 4342 pptr->port_phys_state = port_state;
4344 4343
4345 4344 if (dev_cnt) {
4346 4345 mutex_exit(&pptr->port_mutex);
4347 4346
4348 4347 map_len = sizeof (*map_tag) * dev_cnt;
4349 4348 map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4350 4349 if (map_tag == NULL) {
4351 4350 fcp_log(CE_WARN, pptr->port_dip,
4352 4351 "!fcp%d: failed to allocate for map tags; "
4353 4352 " state change will not be processed",
4354 4353 pptr->port_instance);
4355 4354
4356 4355 mutex_enter(&pptr->port_mutex);
4357 4356 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4358 4357 mutex_exit(&pptr->port_mutex);
4359 4358
4360 4359 return;
4361 4360 }
4362 4361
4363 4362 mutex_enter(&pptr->port_mutex);
4364 4363 }
4365 4364
4366 4365 if (pptr->port_id != port_sid) {
4367 4366 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4368 4367 fcp_trace, FCP_BUF_LEVEL_3, 0,
4369 4368 "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4370 4369 port_sid);
4371 4370 /*
4372 4371 * The local port changed ID. It is the first time a port ID
4373 4372 * is assigned or something drastic happened. We might have
4374 4373 * been unplugged and replugged on another loop or fabric port
4375 4374 * or somebody grabbed the AL_PA we had or somebody rezoned
4376 4375 * the fabric we were plugged into.
4377 4376 */
4378 4377 pptr->port_id = port_sid;
4379 4378 }
4380 4379
4381 4380 switch (FC_PORT_STATE_MASK(port_state)) {
4382 4381 case FC_STATE_OFFLINE:
4383 4382 case FC_STATE_RESET_REQUESTED:
4384 4383 /*
4385 4384 * link has gone from online to offline -- just update the
4386 4385 * state of this port to BUSY and MARKed to go offline
4387 4386 */
4388 4387 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4389 4388 fcp_trace, FCP_BUF_LEVEL_3, 0,
4390 4389 "link went offline");
4391 4390 if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4392 4391 /*
4393 4392 * We were offline a while ago and this one
4394 4393 * seems to indicate that the loop has gone
4395 4394 * dead forever.
4396 4395 */
4397 4396 pptr->port_tmp_cnt += dev_cnt;
4398 4397 pptr->port_state &= ~FCP_STATE_OFFLINE;
4399 4398 pptr->port_state |= FCP_STATE_INIT;
4400 4399 link_count = pptr->port_link_cnt;
4401 4400 fcp_handle_devices(pptr, devlist, dev_cnt,
4402 4401 link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4403 4402 } else {
4404 4403 pptr->port_link_cnt++;
4405 4404 ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4406 4405 fcp_update_state(pptr, (FCP_LUN_BUSY |
4407 4406 FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4408 4407 if (pptr->port_mpxio) {
4409 4408 fcp_update_mpxio_path_verifybusy(pptr);
4410 4409 }
4411 4410 pptr->port_state |= FCP_STATE_OFFLINE;
4412 4411 pptr->port_state &=
4413 4412 ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4414 4413 pptr->port_tmp_cnt = 0;
4415 4414 }
4416 4415 mutex_exit(&pptr->port_mutex);
4417 4416 break;
4418 4417
4419 4418 case FC_STATE_ONLINE:
4420 4419 case FC_STATE_LIP:
4421 4420 case FC_STATE_LIP_LBIT_SET:
4422 4421 /*
4423 4422 * link has gone from offline to online
4424 4423 */
4425 4424 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4426 4425 fcp_trace, FCP_BUF_LEVEL_3, 0,
4427 4426 "link went online");
4428 4427
4429 4428 pptr->port_link_cnt++;
4430 4429
4431 4430 while (pptr->port_ipkt_cnt) {
4432 4431 mutex_exit(&pptr->port_mutex);
4433 4432 delay(drv_usectohz(1000000));
4434 4433 mutex_enter(&pptr->port_mutex);
4435 4434 }
4436 4435
4437 4436 pptr->port_topology = port_top;
4438 4437
4439 4438 /*
4440 4439 * The state of the targets and luns accessible through this
4441 4440 * port is updated.
4442 4441 */
4443 4442 fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4444 4443 FCP_CAUSE_LINK_CHANGE);
4445 4444
4446 4445 pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4447 4446 pptr->port_state |= FCP_STATE_ONLINING;
4448 4447 pptr->port_tmp_cnt = dev_cnt;
4449 4448 link_count = pptr->port_link_cnt;
4450 4449
4451 4450 pptr->port_deadline = fcp_watchdog_time +
4452 4451 FCP_ICMD_DEADLINE;
4453 4452
4454 4453 if (!dev_cnt) {
4455 4454 /*
4456 4455 * We go directly to the online state if no remote
4457 4456 * ports were discovered.
4458 4457 */
4459 4458 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4460 4459 fcp_trace, FCP_BUF_LEVEL_3, 0,
4461 4460 "No remote ports discovered");
4462 4461
4463 4462 pptr->port_state &= ~FCP_STATE_ONLINING;
4464 4463 pptr->port_state |= FCP_STATE_ONLINE;
4465 4464 }
4466 4465
4467 4466 switch (port_top) {
4468 4467 case FC_TOP_FABRIC:
4469 4468 case FC_TOP_PUBLIC_LOOP:
4470 4469 case FC_TOP_PRIVATE_LOOP:
4471 4470 case FC_TOP_PT_PT:
4472 4471
4473 4472 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4474 4473 fcp_retry_ns_registry(pptr, port_sid);
4475 4474 }
4476 4475
4477 4476 fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4478 4477 map_tag, FCP_CAUSE_LINK_CHANGE);
4479 4478 break;
4480 4479
4481 4480 default:
4482 4481 /*
4483 4482 * We got here because we were provided with an unknown
4484 4483 * topology.
4485 4484 */
4486 4485 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4487 4486 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4488 4487 }
4489 4488
4490 4489 pptr->port_tmp_cnt -= dev_cnt;
4491 4490 fcp_log(CE_WARN, pptr->port_dip,
4492 4491 "!unknown/unsupported topology (0x%x)", port_top);
4493 4492 break;
4494 4493 }
4495 4494 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4496 4495 fcp_trace, FCP_BUF_LEVEL_3, 0,
4497 4496 "Notify ssd of the reset to reinstate the reservations");
4498 4497
4499 4498 scsi_hba_reset_notify_callback(&pptr->port_mutex,
4500 4499 &pptr->port_reset_notify_listf);
4501 4500
4502 4501 mutex_exit(&pptr->port_mutex);
4503 4502
4504 4503 break;
4505 4504
4506 4505 case FC_STATE_RESET:
4507 4506 ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4508 4507 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4509 4508 fcp_trace, FCP_BUF_LEVEL_3, 0,
4510 4509 "RESET state, waiting for Offline/Online state_cb");
4511 4510 mutex_exit(&pptr->port_mutex);
4512 4511 break;
4513 4512
4514 4513 case FC_STATE_DEVICE_CHANGE:
4515 4514 /*
4516 4515 * We come here when an application has requested
4517 4516 * Dynamic node creation/deletion in Fabric connectivity.
4518 4517 */
4519 4518 if (pptr->port_state & (FCP_STATE_OFFLINE |
4520 4519 FCP_STATE_INIT)) {
4521 4520 /*
4522 4521 * This case can happen when the FCTL is in the
4523 4522 * process of giving us on online and the host on
4524 4523 * the other side issues a PLOGI/PLOGO. Ideally
4525 4524 * the state changes should be serialized unless
4526 4525 * they are opposite (online-offline).
4527 4526 * The transport will give us a final state change
4528 4527 * so we can ignore this for the time being.
4529 4528 */
4530 4529 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4531 4530 mutex_exit(&pptr->port_mutex);
4532 4531 break;
4533 4532 }
4534 4533
4535 4534 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4536 4535 fcp_retry_ns_registry(pptr, port_sid);
4537 4536 }
4538 4537
4539 4538 /*
4540 4539 * Extend the deadline under steady state conditions
4541 4540 * to provide more time for the device-change-commands
4542 4541 */
4543 4542 if (!pptr->port_ipkt_cnt) {
4544 4543 pptr->port_deadline = fcp_watchdog_time +
4545 4544 FCP_ICMD_DEADLINE;
4546 4545 }
4547 4546
4548 4547 /*
4549 4548 * There is another race condition here, where if we were
4550 4549 * in ONLINEING state and a devices in the map logs out,
4551 4550 * fp will give another state change as DEVICE_CHANGE
4552 4551 * and OLD. This will result in that target being offlined.
4553 4552 * The pd_handle is freed. If from the first statec callback
4554 4553 * we were going to fire a PLOGI/PRLI, the system will
4555 4554 * panic in fc_ulp_transport with invalid pd_handle.
4556 4555 * The fix is to check for the link_cnt before issuing
4557 4556 * any command down.
4558 4557 */
4559 4558 fcp_update_targets(pptr, devlist, dev_cnt,
4560 4559 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4561 4560
4562 4561 link_count = pptr->port_link_cnt;
4563 4562
4564 4563 fcp_handle_devices(pptr, devlist, dev_cnt,
4565 4564 link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4566 4565
4567 4566 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4568 4567
4569 4568 mutex_exit(&pptr->port_mutex);
4570 4569 break;
4571 4570
4572 4571 case FC_STATE_TARGET_PORT_RESET:
4573 4572 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4574 4573 fcp_retry_ns_registry(pptr, port_sid);
4575 4574 }
4576 4575
4577 4576 /* Do nothing else */
4578 4577 mutex_exit(&pptr->port_mutex);
4579 4578 break;
4580 4579
4581 4580 default:
4582 4581 fcp_log(CE_WARN, pptr->port_dip,
4583 4582 "!Invalid state change=0x%x", port_state);
4584 4583 mutex_exit(&pptr->port_mutex);
4585 4584 break;
4586 4585 }
4587 4586
4588 4587 if (map_tag) {
4589 4588 kmem_free(map_tag, map_len);
4590 4589 }
4591 4590 }
4592 4591
4593 4592 /*
4594 4593 * Function: fcp_handle_devices
4595 4594 *
4596 4595 * Description: This function updates the devices currently known by
4597 4596 * walking the list provided by the caller. The list passed
4598 4597 * by the caller is supposed to be the list of reachable
4599 4598 * devices.
4600 4599 *
4601 4600 * Argument: *pptr Fcp port structure.
4602 4601 * *devlist Pointer to the first entry of a table
4603 4602 * containing the remote ports that can be
4604 4603 * reached.
4605 4604 * dev_cnt Number of entries pointed by devlist.
4606 4605 * link_cnt Link state count.
4607 4606 * *map_tag Array of fcp_map_tag_t structures.
4608 4607 * cause What caused this function to be called.
4609 4608 *
4610 4609 * Return Value: None
4611 4610 *
4612 4611 * Notes: The pptr->port_mutex must be held.
4613 4612 */
4614 4613 static void
4615 4614 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4616 4615 uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4617 4616 {
4618 4617 int i;
4619 4618 int check_finish_init = 0;
4620 4619 fc_portmap_t *map_entry;
4621 4620 struct fcp_tgt *ptgt = NULL;
4622 4621
4623 4622 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4624 4623 fcp_trace, FCP_BUF_LEVEL_3, 0,
4625 4624 "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4626 4625
4627 4626 if (dev_cnt) {
4628 4627 ASSERT(map_tag != NULL);
4629 4628 }
4630 4629
4631 4630 /*
4632 4631 * The following code goes through the list of remote ports that are
4633 4632 * accessible through this (pptr) local port (The list walked is the
4634 4633 * one provided by the caller which is the list of the remote ports
4635 4634 * currently reachable). It checks if any of them was already
4636 4635 * known by looking for the corresponding target structure based on
4637 4636 * the world wide name. If a target is part of the list it is tagged
4638 4637 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4639 4638 *
4640 4639 * Old comment
4641 4640 * -----------
4642 4641 * Before we drop port mutex; we MUST get the tags updated; This
4643 4642 * two step process is somewhat slow, but more reliable.
4644 4643 */
4645 4644 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4646 4645 map_entry = &(devlist[i]);
4647 4646
4648 4647 /*
4649 4648 * get ptr to this map entry in our port's
4650 4649 * list (if any)
4651 4650 */
4652 4651 ptgt = fcp_lookup_target(pptr,
4653 4652 (uchar_t *)&(map_entry->map_pwwn));
4654 4653
4655 4654 if (ptgt) {
4656 4655 map_tag[i] = ptgt->tgt_change_cnt;
4657 4656 if (cause == FCP_CAUSE_LINK_CHANGE) {
4658 4657 ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4659 4658 }
4660 4659 }
4661 4660 }
4662 4661
4663 4662 /*
4664 4663 * At this point we know which devices of the new list were already
4665 4664 * known (The field tgt_aux_state of the target structure has been
4666 4665 * set to FCP_TGT_TAGGED).
4667 4666 *
4668 4667 * The following code goes through the list of targets currently known
4669 4668 * by the local port (the list is actually a hashing table). If a
4670 4669 * target is found and is not tagged, it means the target cannot
4671 4670 * be reached anymore through the local port (pptr). It is offlined.
4672 4671 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4673 4672 */
4674 4673 for (i = 0; i < FCP_NUM_HASH; i++) {
4675 4674 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4676 4675 ptgt = ptgt->tgt_next) {
4677 4676 mutex_enter(&ptgt->tgt_mutex);
4678 4677 if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4679 4678 (cause == FCP_CAUSE_LINK_CHANGE) &&
4680 4679 !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4681 4680 fcp_offline_target_now(pptr, ptgt,
4682 4681 link_cnt, ptgt->tgt_change_cnt, 0);
4683 4682 }
4684 4683 mutex_exit(&ptgt->tgt_mutex);
4685 4684 }
4686 4685 }
4687 4686
4688 4687 /*
4689 4688 * At this point, the devices that were known but cannot be reached
4690 4689 * anymore, have most likely been offlined.
4691 4690 *
4692 4691 * The following section of code seems to go through the list of
4693 4692 * remote ports that can now be reached. For every single one it
4694 4693 * checks if it is already known or if it is a new port.
4695 4694 */
4696 4695 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4697 4696
4698 4697 if (check_finish_init) {
4699 4698 ASSERT(i > 0);
4700 4699 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4701 4700 map_tag[i - 1], cause);
4702 4701 check_finish_init = 0;
4703 4702 }
4704 4703
4705 4704 /* get a pointer to this map entry */
4706 4705 map_entry = &(devlist[i]);
4707 4706
4708 4707 /*
4709 4708 * Check for the duplicate map entry flag. If we have marked
4710 4709 * this entry as a duplicate we skip it since the correct
4711 4710 * (perhaps even same) state change will be encountered
4712 4711 * later in the list.
4713 4712 */
4714 4713 if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4715 4714 continue;
4716 4715 }
4717 4716
4718 4717 /* get ptr to this map entry in our port's list (if any) */
4719 4718 ptgt = fcp_lookup_target(pptr,
4720 4719 (uchar_t *)&(map_entry->map_pwwn));
4721 4720
4722 4721 if (ptgt) {
4723 4722 /*
4724 4723 * This device was already known. The field
4725 4724 * tgt_aux_state is reset (was probably set to
4726 4725 * FCP_TGT_TAGGED previously in this routine).
4727 4726 */
4728 4727 ptgt->tgt_aux_state = 0;
4729 4728 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4730 4729 fcp_trace, FCP_BUF_LEVEL_3, 0,
4731 4730 "handle_devices: map did/state/type/flags = "
4732 4731 "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4733 4732 "tgt_state=%d",
4734 4733 map_entry->map_did.port_id, map_entry->map_state,
4735 4734 map_entry->map_type, map_entry->map_flags,
4736 4735 ptgt->tgt_d_id, ptgt->tgt_state);
4737 4736 }
4738 4737
4739 4738 if (map_entry->map_type == PORT_DEVICE_OLD ||
4740 4739 map_entry->map_type == PORT_DEVICE_NEW ||
4741 4740 map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED ||
4742 4741 map_entry->map_type == PORT_DEVICE_CHANGED) {
4743 4742 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4744 4743 fcp_trace, FCP_BUF_LEVEL_2, 0,
4745 4744 "map_type=%x, did = %x",
4746 4745 map_entry->map_type,
4747 4746 map_entry->map_did.port_id);
4748 4747 }
4749 4748
4750 4749 switch (map_entry->map_type) {
4751 4750 case PORT_DEVICE_NOCHANGE:
4752 4751 case PORT_DEVICE_USER_CREATE:
4753 4752 case PORT_DEVICE_USER_LOGIN:
4754 4753 case PORT_DEVICE_NEW:
4755 4754 case PORT_DEVICE_REPORTLUN_CHANGED:
4756 4755 FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4757 4756
4758 4757 if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4759 4758 link_cnt, (ptgt) ? map_tag[i] : 0,
4760 4759 cause) == TRUE) {
4761 4760
4762 4761 FCP_TGT_TRACE(ptgt, map_tag[i],
4763 4762 FCP_TGT_TRACE_2);
4764 4763 check_finish_init++;
4765 4764 }
4766 4765 break;
4767 4766
4768 4767 case PORT_DEVICE_OLD:
4769 4768 if (ptgt != NULL) {
4770 4769 FCP_TGT_TRACE(ptgt, map_tag[i],
4771 4770 FCP_TGT_TRACE_3);
4772 4771
4773 4772 mutex_enter(&ptgt->tgt_mutex);
4774 4773 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4775 4774 /*
4776 4775 * Must do an in-line wait for I/Os
4777 4776 * to get drained
4778 4777 */
4779 4778 mutex_exit(&ptgt->tgt_mutex);
4780 4779 mutex_exit(&pptr->port_mutex);
4781 4780
4782 4781 mutex_enter(&ptgt->tgt_mutex);
4783 4782 while (ptgt->tgt_ipkt_cnt ||
4784 4783 fcp_outstanding_lun_cmds(ptgt)
4785 4784 == FC_SUCCESS) {
4786 4785 mutex_exit(&ptgt->tgt_mutex);
4787 4786 delay(drv_usectohz(1000000));
4788 4787 mutex_enter(&ptgt->tgt_mutex);
4789 4788 }
4790 4789 mutex_exit(&ptgt->tgt_mutex);
4791 4790
4792 4791 mutex_enter(&pptr->port_mutex);
4793 4792 mutex_enter(&ptgt->tgt_mutex);
4794 4793
4795 4794 (void) fcp_offline_target(pptr, ptgt,
4796 4795 link_cnt, map_tag[i], 0, 0);
4797 4796 }
4798 4797 mutex_exit(&ptgt->tgt_mutex);
4799 4798 }
4800 4799 check_finish_init++;
4801 4800 break;
4802 4801
4803 4802 case PORT_DEVICE_USER_DELETE:
4804 4803 case PORT_DEVICE_USER_LOGOUT:
4805 4804 if (ptgt != NULL) {
4806 4805 FCP_TGT_TRACE(ptgt, map_tag[i],
4807 4806 FCP_TGT_TRACE_4);
4808 4807
4809 4808 mutex_enter(&ptgt->tgt_mutex);
4810 4809 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4811 4810 (void) fcp_offline_target(pptr, ptgt,
4812 4811 link_cnt, map_tag[i], 1, 0);
4813 4812 }
4814 4813 mutex_exit(&ptgt->tgt_mutex);
4815 4814 }
4816 4815 check_finish_init++;
4817 4816 break;
4818 4817
4819 4818 case PORT_DEVICE_CHANGED:
4820 4819 if (ptgt != NULL) {
4821 4820 FCP_TGT_TRACE(ptgt, map_tag[i],
4822 4821 FCP_TGT_TRACE_5);
4823 4822
4824 4823 if (fcp_device_changed(pptr, ptgt,
4825 4824 map_entry, link_cnt, map_tag[i],
4826 4825 cause) == TRUE) {
4827 4826 check_finish_init++;
4828 4827 }
4829 4828 } else {
4830 4829 if (fcp_handle_mapflags(pptr, ptgt,
4831 4830 map_entry, link_cnt, 0, cause) == TRUE) {
4832 4831 check_finish_init++;
4833 4832 }
4834 4833 }
4835 4834 break;
4836 4835
4837 4836 default:
4838 4837 fcp_log(CE_WARN, pptr->port_dip,
4839 4838 "!Invalid map_type=0x%x", map_entry->map_type);
4840 4839 check_finish_init++;
4841 4840 break;
4842 4841 }
4843 4842 }
4844 4843
4845 4844 if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4846 4845 ASSERT(i > 0);
4847 4846 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4848 4847 map_tag[i-1], cause);
4849 4848 } else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4850 4849 fcp_offline_all(pptr, link_cnt, cause);
4851 4850 }
4852 4851 }
4853 4852
4854 4853 static int
4855 4854 fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause)
4856 4855 {
4857 4856 struct fcp_lun *plun;
4858 4857 struct fcp_port *pptr;
4859 4858 int rscn_count;
4860 4859 int lun0_newalloc;
4861 4860 int ret = TRUE;
4862 4861
4863 4862 ASSERT(ptgt);
4864 4863 pptr = ptgt->tgt_port;
4865 4864 lun0_newalloc = 0;
4866 4865 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
4867 4866 /*
4868 4867 * no LUN struct for LUN 0 yet exists,
4869 4868 * so create one
4870 4869 */
4871 4870 plun = fcp_alloc_lun(ptgt);
4872 4871 if (plun == NULL) {
4873 4872 fcp_log(CE_WARN, pptr->port_dip,
4874 4873 "!Failed to allocate lun 0 for"
4875 4874 " D_ID=%x", ptgt->tgt_d_id);
4876 4875 return (ret);
4877 4876 }
4878 4877 lun0_newalloc = 1;
4879 4878 }
4880 4879
4881 4880 mutex_enter(&ptgt->tgt_mutex);
4882 4881 /*
4883 4882 * consider lun 0 as device not connected if it is
4884 4883 * offlined or newly allocated
4885 4884 */
4886 4885 if ((plun->lun_state & FCP_LUN_OFFLINE) || lun0_newalloc) {
4887 4886 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
4888 4887 }
4889 4888 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
4890 4889 plun->lun_state &= ~FCP_LUN_OFFLINE;
4891 4890 ptgt->tgt_lun_cnt = 1;
4892 4891 ptgt->tgt_report_lun_cnt = 0;
4893 4892 mutex_exit(&ptgt->tgt_mutex);
4894 4893
4895 4894 rscn_count = fc_ulp_get_rscn_count(pptr->port_fp_handle);
4896 4895 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
4897 4896 sizeof (struct fcp_reportlun_resp), pptr->port_link_cnt,
4898 4897 ptgt->tgt_change_cnt, cause, rscn_count) != DDI_SUCCESS) {
4899 4898 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4900 4899 fcp_trace, FCP_BUF_LEVEL_3, 0, "!Failed to send REPORTLUN "
4901 4900 "to D_ID=%x", ptgt->tgt_d_id);
4902 4901 } else {
4903 4902 ret = FALSE;
4904 4903 }
4905 4904
4906 4905 return (ret);
4907 4906 }
4908 4907
4909 4908 /*
4910 4909 * Function: fcp_handle_mapflags
4911 4910 *
4912 4911 * Description: This function creates a target structure if the ptgt passed
4913 4912 * is NULL. It also kicks off the PLOGI if we are not logged
4914 4913 * into the target yet or the PRLI if we are logged into the
4915 4914 * target already. The rest of the treatment is done in the
4916 4915 * callbacks of the PLOGI or PRLI.
4917 4916 *
4918 4917 * Argument: *pptr FCP Port structure.
4919 4918 * *ptgt Target structure.
4920 4919 * *map_entry Array of fc_portmap_t structures.
4921 4920 * link_cnt Link state count.
4922 4921 * tgt_cnt Target state count.
4923 4922 * cause What caused this function to be called.
4924 4923 *
4925 4924 * Return Value: TRUE Failed
4926 4925 * FALSE Succeeded
4927 4926 *
4928 4927 * Notes: pptr->port_mutex must be owned.
4929 4928 */
4930 4929 static int
4931 4930 fcp_handle_mapflags(struct fcp_port *pptr, struct fcp_tgt *ptgt,
4932 4931 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4933 4932 {
4934 4933 int lcount;
4935 4934 int tcount;
4936 4935 int ret = TRUE;
4937 4936 int alloc;
4938 4937 struct fcp_ipkt *icmd;
4939 4938 struct fcp_lun *pseq_lun = NULL;
4940 4939 uchar_t opcode;
4941 4940 int valid_ptgt_was_passed = FALSE;
4942 4941
4943 4942 ASSERT(mutex_owned(&pptr->port_mutex));
4944 4943
4945 4944 /*
4946 4945 * This case is possible where the FCTL has come up and done discovery
4947 4946 * before FCP was loaded and attached. FCTL would have discovered the
4948 4947 * devices and later the ULP came online. In this case ULP's would get
4949 4948 * PORT_DEVICE_NOCHANGE but target would be NULL.
4950 4949 */
4951 4950 if (ptgt == NULL) {
4952 4951 /* don't already have a target */
4953 4952 mutex_exit(&pptr->port_mutex);
4954 4953 ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4955 4954 mutex_enter(&pptr->port_mutex);
4956 4955
4957 4956 if (ptgt == NULL) {
4958 4957 fcp_log(CE_WARN, pptr->port_dip,
4959 4958 "!FC target allocation failed");
4960 4959 return (ret);
4961 4960 }
4962 4961 mutex_enter(&ptgt->tgt_mutex);
4963 4962 ptgt->tgt_statec_cause = cause;
4964 4963 ptgt->tgt_tmp_cnt = 1;
4965 4964 mutex_exit(&ptgt->tgt_mutex);
4966 4965 } else {
4967 4966 valid_ptgt_was_passed = TRUE;
4968 4967 }
4969 4968
4970 4969 /*
4971 4970 * Copy in the target parameters
4972 4971 */
4973 4972 mutex_enter(&ptgt->tgt_mutex);
4974 4973 ptgt->tgt_d_id = map_entry->map_did.port_id;
4975 4974 ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4976 4975 ptgt->tgt_pd_handle = map_entry->map_pd;
4977 4976 ptgt->tgt_fca_dev = NULL;
4978 4977
4979 4978 /* Copy port and node WWNs */
4980 4979 bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4981 4980 FC_WWN_SIZE);
4982 4981 bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4983 4982 FC_WWN_SIZE);
4984 4983
4985 4984 if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4986 4985 (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4987 4986 (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4988 4987 valid_ptgt_was_passed) {
4989 4988 /*
4990 4989 * determine if there are any tape LUNs on this target
4991 4990 */
4992 4991 for (pseq_lun = ptgt->tgt_lun;
4993 4992 pseq_lun != NULL;
4994 4993 pseq_lun = pseq_lun->lun_next) {
4995 4994 if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
4996 4995 !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
4997 4996 fcp_update_tgt_state(ptgt, FCP_RESET,
4998 4997 FCP_LUN_MARK);
4999 4998 mutex_exit(&ptgt->tgt_mutex);
5000 4999 return (ret);
5001 5000 }
5002 5001 }
5003 5002 }
5004 5003
5005 5004 /*
5006 5005 * if UA'REPORT_LUN_CHANGED received,
5007 5006 * send out REPORT LUN promptly, skip PLOGI/PRLI process
5008 5007 */
5009 5008 if (map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED) {
5010 5009 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | FCP_TGT_MARK);
5011 5010 mutex_exit(&ptgt->tgt_mutex);
5012 5011 mutex_exit(&pptr->port_mutex);
5013 5012
5014 5013 ret = fcp_handle_reportlun_changed(ptgt, cause);
5015 5014
5016 5015 mutex_enter(&pptr->port_mutex);
5017 5016 return (ret);
5018 5017 }
5019 5018
5020 5019 /*
5021 5020 * If ptgt was NULL when this function was entered, then tgt_node_state
5022 5021 * was never specifically initialized but zeroed out which means
5023 5022 * FCP_TGT_NODE_NONE.
5024 5023 */
5025 5024 switch (ptgt->tgt_node_state) {
5026 5025 case FCP_TGT_NODE_NONE:
5027 5026 case FCP_TGT_NODE_ON_DEMAND:
5028 5027 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5029 5028 !fcp_enable_auto_configuration &&
5030 5029 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5031 5030 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5032 5031 } else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5033 5032 fcp_enable_auto_configuration &&
5034 5033 (ptgt->tgt_manual_config_only == 1) &&
5035 5034 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5036 5035 /*
5037 5036 * If auto configuration is set and
5038 5037 * the tgt_manual_config_only flag is set then
5039 5038 * we only want the user to be able to change
5040 5039 * the state through create_on_demand.
5041 5040 */
5042 5041 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5043 5042 } else {
5044 5043 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5045 5044 }
5046 5045 break;
5047 5046
5048 5047 case FCP_TGT_NODE_PRESENT:
5049 5048 break;
5050 5049 }
5051 5050 /*
5052 5051 * If we are booting from a fabric device, make sure we
5053 5052 * mark the node state appropriately for this target to be
5054 5053 * enumerated
5055 5054 */
5056 5055 if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
5057 5056 if (bcmp((caddr_t)pptr->port_boot_wwn,
5058 5057 (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
5059 5058 sizeof (ptgt->tgt_port_wwn)) == 0) {
5060 5059 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5061 5060 }
5062 5061 }
5063 5062 mutex_exit(&ptgt->tgt_mutex);
5064 5063
5065 5064 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5066 5065 fcp_trace, FCP_BUF_LEVEL_3, 0,
5067 5066 "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
5068 5067 map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
5069 5068 map_entry->map_rscn_info.ulp_rscn_count);
5070 5069
5071 5070 mutex_enter(&ptgt->tgt_mutex);
5072 5071
5073 5072 /*
5074 5073 * Reset target OFFLINE state and mark the target BUSY
5075 5074 */
5076 5075 ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
5077 5076 ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
5078 5077
5079 5078 tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
5080 5079 lcount = link_cnt;
5081 5080
5082 5081 mutex_exit(&ptgt->tgt_mutex);
5083 5082 mutex_exit(&pptr->port_mutex);
5084 5083
5085 5084 /*
5086 5085 * if we are already logged in, then we do a PRLI, else
5087 5086 * we do a PLOGI first (to get logged in)
5088 5087 *
5089 5088 * We will not check if we are the PLOGI initiator
5090 5089 */
5091 5090 opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
5092 5091 map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
5093 5092
5094 5093 alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
5095 5094
5096 5095 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5097 5096 pptr->port_state & FCP_STATE_FCA_IS_NODMA, lcount, tcount,
5098 5097 cause, map_entry->map_rscn_info.ulp_rscn_count);
5099 5098
5100 5099 if (icmd == NULL) {
5101 5100 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
5102 5101 /*
5103 5102 * We've exited port_mutex before calling fcp_icmd_alloc,
5104 5103 * we need to make sure we reacquire it before returning.
5105 5104 */
5106 5105 mutex_enter(&pptr->port_mutex);
5107 5106 return (FALSE);
5108 5107 }
5109 5108
5110 5109 /* TRUE is only returned while target is intended skipped */
5111 5110 ret = FALSE;
5112 5111 /* discover info about this target */
5113 5112 if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5114 5113 lcount, tcount, cause)) == DDI_SUCCESS) {
5115 5114 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5116 5115 } else {
5117 5116 fcp_icmd_free(pptr, icmd);
5118 5117 ret = TRUE;
5119 5118 }
5120 5119 mutex_enter(&pptr->port_mutex);
5121 5120
5122 5121 return (ret);
5123 5122 }
5124 5123
5125 5124 /*
5126 5125 * Function: fcp_send_els
5127 5126 *
5128 5127 * Description: Sends an ELS to the target specified by the caller. Supports
5129 5128 * PLOGI and PRLI.
5130 5129 *
5131 5130 * Argument: *pptr Fcp port.
5132 5131 * *ptgt Target to send the ELS to.
5133 5132 * *icmd Internal packet
5134 5133 * opcode ELS opcode
5135 5134 * lcount Link state change counter
5136 5135 * tcount Target state change counter
5137 5136 * cause What caused the call
5138 5137 *
5139 5138 * Return Value: DDI_SUCCESS
5140 5139 * Others
5141 5140 */
5142 5141 static int
5143 5142 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5144 5143 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5145 5144 {
5146 5145 fc_packet_t *fpkt;
5147 5146 fc_frame_hdr_t *hp;
5148 5147 int internal = 0;
5149 5148 int alloc;
5150 5149 int cmd_len;
5151 5150 int resp_len;
5152 5151 int res = DDI_FAILURE; /* default result */
5153 5152 int rval = DDI_FAILURE;
5154 5153
5155 5154 ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5156 5155 ASSERT(ptgt->tgt_port == pptr);
5157 5156
5158 5157 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5159 5158 fcp_trace, FCP_BUF_LEVEL_5, 0,
5160 5159 "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5161 5160 (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5162 5161
5163 5162 if (opcode == LA_ELS_PLOGI) {
5164 5163 cmd_len = sizeof (la_els_logi_t);
5165 5164 resp_len = sizeof (la_els_logi_t);
5166 5165 } else {
5167 5166 ASSERT(opcode == LA_ELS_PRLI);
5168 5167 cmd_len = sizeof (la_els_prli_t);
5169 5168 resp_len = sizeof (la_els_prli_t);
5170 5169 }
5171 5170
5172 5171 if (icmd == NULL) {
5173 5172 alloc = FCP_MAX(sizeof (la_els_logi_t),
5174 5173 sizeof (la_els_prli_t));
5175 5174 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5176 5175 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5177 5176 lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5178 5177 if (icmd == NULL) {
5179 5178 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5180 5179 return (res);
5181 5180 }
5182 5181 internal++;
5183 5182 }
5184 5183 fpkt = icmd->ipkt_fpkt;
5185 5184
5186 5185 fpkt->pkt_cmdlen = cmd_len;
5187 5186 fpkt->pkt_rsplen = resp_len;
5188 5187 fpkt->pkt_datalen = 0;
5189 5188 icmd->ipkt_retries = 0;
5190 5189
5191 5190 /* fill in fpkt info */
5192 5191 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5193 5192 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5194 5193 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5195 5194
5196 5195 /* get ptr to frame hdr in fpkt */
5197 5196 hp = &fpkt->pkt_cmd_fhdr;
5198 5197
5199 5198 /*
5200 5199 * fill in frame hdr
5201 5200 */
5202 5201 hp->r_ctl = R_CTL_ELS_REQ;
5203 5202 hp->s_id = pptr->port_id; /* source ID */
5204 5203 hp->d_id = ptgt->tgt_d_id; /* dest ID */
5205 5204 hp->type = FC_TYPE_EXTENDED_LS;
5206 5205 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5207 5206 hp->seq_id = 0;
5208 5207 hp->rsvd = 0;
5209 5208 hp->df_ctl = 0;
5210 5209 hp->seq_cnt = 0;
5211 5210 hp->ox_id = 0xffff; /* i.e. none */
5212 5211 hp->rx_id = 0xffff; /* i.e. none */
5213 5212 hp->ro = 0;
5214 5213
5215 5214 /*
5216 5215 * at this point we have a filled in cmd pkt
5217 5216 *
5218 5217 * fill in the respective info, then use the transport to send
5219 5218 * the packet
5220 5219 *
5221 5220 * for a PLOGI call fc_ulp_login(), and
5222 5221 * for a PRLI call fc_ulp_issue_els()
5223 5222 */
5224 5223 switch (opcode) {
5225 5224 case LA_ELS_PLOGI: {
5226 5225 struct la_els_logi logi;
5227 5226
5228 5227 bzero(&logi, sizeof (struct la_els_logi));
5229 5228
5230 5229 hp = &fpkt->pkt_cmd_fhdr;
5231 5230 hp->r_ctl = R_CTL_ELS_REQ;
5232 5231 logi.ls_code.ls_code = LA_ELS_PLOGI;
5233 5232 logi.ls_code.mbz = 0;
5234 5233
5235 5234 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5236 5235 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5237 5236
5238 5237 icmd->ipkt_opcode = LA_ELS_PLOGI;
5239 5238
5240 5239 mutex_enter(&pptr->port_mutex);
5241 5240 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5242 5241
5243 5242 mutex_exit(&pptr->port_mutex);
5244 5243
5245 5244 rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5246 5245 if (rval == FC_SUCCESS) {
5247 5246 res = DDI_SUCCESS;
5248 5247 break;
5249 5248 }
5250 5249
5251 5250 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5252 5251
5253 5252 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5254 5253 rval, "PLOGI");
5255 5254 } else {
5256 5255 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5257 5256 fcp_trace, FCP_BUF_LEVEL_5, 0,
5258 5257 "fcp_send_els1: state change occured"
5259 5258 " for D_ID=0x%x", ptgt->tgt_d_id);
5260 5259 mutex_exit(&pptr->port_mutex);
5261 5260 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5262 5261 }
5263 5262 break;
5264 5263 }
5265 5264
5266 5265 case LA_ELS_PRLI: {
5267 5266 struct la_els_prli prli;
5268 5267 struct fcp_prli *fprli;
5269 5268
5270 5269 bzero(&prli, sizeof (struct la_els_prli));
5271 5270
5272 5271 hp = &fpkt->pkt_cmd_fhdr;
5273 5272 hp->r_ctl = R_CTL_ELS_REQ;
5274 5273
5275 5274 /* fill in PRLI cmd ELS fields */
5276 5275 prli.ls_code = LA_ELS_PRLI;
5277 5276 prli.page_length = 0x10; /* huh? */
5278 5277 prli.payload_length = sizeof (struct la_els_prli);
5279 5278
5280 5279 icmd->ipkt_opcode = LA_ELS_PRLI;
5281 5280
5282 5281 /* get ptr to PRLI service params */
5283 5282 fprli = (struct fcp_prli *)prli.service_params;
5284 5283
5285 5284 /* fill in service params */
5286 5285 fprli->type = 0x08;
5287 5286 fprli->resvd1 = 0;
5288 5287 fprli->orig_process_assoc_valid = 0;
5289 5288 fprli->resp_process_assoc_valid = 0;
5290 5289 fprli->establish_image_pair = 1;
5291 5290 fprli->resvd2 = 0;
5292 5291 fprli->resvd3 = 0;
5293 5292 fprli->obsolete_1 = 0;
5294 5293 fprli->obsolete_2 = 0;
5295 5294 fprli->data_overlay_allowed = 0;
5296 5295 fprli->initiator_fn = 1;
5297 5296 fprli->confirmed_compl_allowed = 1;
5298 5297
5299 5298 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5300 5299 fprli->target_fn = 1;
5301 5300 } else {
5302 5301 fprli->target_fn = 0;
5303 5302 }
5304 5303
5305 5304 fprli->retry = 1;
5306 5305 fprli->read_xfer_rdy_disabled = 1;
5307 5306 fprli->write_xfer_rdy_disabled = 0;
5308 5307
5309 5308 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5310 5309 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5311 5310
5312 5311 /* issue the PRLI request */
5313 5312
5314 5313 mutex_enter(&pptr->port_mutex);
5315 5314 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5316 5315
5317 5316 mutex_exit(&pptr->port_mutex);
5318 5317
5319 5318 rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5320 5319 if (rval == FC_SUCCESS) {
5321 5320 res = DDI_SUCCESS;
5322 5321 break;
5323 5322 }
5324 5323
5325 5324 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5326 5325
5327 5326 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5328 5327 rval, "PRLI");
5329 5328 } else {
5330 5329 mutex_exit(&pptr->port_mutex);
5331 5330 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5332 5331 }
5333 5332 break;
5334 5333 }
5335 5334
5336 5335 default:
5337 5336 fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5338 5337 break;
5339 5338 }
5340 5339
5341 5340 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5342 5341 fcp_trace, FCP_BUF_LEVEL_5, 0,
5343 5342 "fcp_send_els: returning %d", res);
5344 5343
5345 5344 if (res != DDI_SUCCESS) {
5346 5345 if (internal) {
5347 5346 fcp_icmd_free(pptr, icmd);
5348 5347 }
5349 5348 }
5350 5349
5351 5350 return (res);
5352 5351 }
5353 5352
5354 5353
5355 5354 /*
5356 5355 * called internally update the state of all of the tgts and each LUN
5357 5356 * for this port (i.e. each target known to be attached to this port)
5358 5357 * if they are not already offline
5359 5358 *
5360 5359 * must be called with the port mutex owned
5361 5360 *
5362 5361 * acquires and releases the target mutexes for each target attached
5363 5362 * to this port
5364 5363 */
5365 5364 void
5366 5365 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5367 5366 {
5368 5367 int i;
5369 5368 struct fcp_tgt *ptgt;
5370 5369
5371 5370 ASSERT(mutex_owned(&pptr->port_mutex));
5372 5371
5373 5372 for (i = 0; i < FCP_NUM_HASH; i++) {
5374 5373 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5375 5374 ptgt = ptgt->tgt_next) {
5376 5375 mutex_enter(&ptgt->tgt_mutex);
5377 5376 fcp_update_tgt_state(ptgt, FCP_SET, state);
5378 5377 ptgt->tgt_change_cnt++;
5379 5378 ptgt->tgt_statec_cause = cause;
5380 5379 ptgt->tgt_tmp_cnt = 1;
5381 5380 ptgt->tgt_done = 0;
5382 5381 mutex_exit(&ptgt->tgt_mutex);
5383 5382 }
5384 5383 }
5385 5384 }
5386 5385
5387 5386
5388 5387 static void
5389 5388 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5390 5389 {
5391 5390 int i;
5392 5391 int ndevs;
5393 5392 struct fcp_tgt *ptgt;
5394 5393
5395 5394 ASSERT(mutex_owned(&pptr->port_mutex));
5396 5395
5397 5396 for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5398 5397 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5399 5398 ptgt = ptgt->tgt_next) {
5400 5399 ndevs++;
5401 5400 }
5402 5401 }
5403 5402
5404 5403 if (ndevs == 0) {
5405 5404 return;
5406 5405 }
5407 5406 pptr->port_tmp_cnt = ndevs;
5408 5407
5409 5408 for (i = 0; i < FCP_NUM_HASH; i++) {
5410 5409 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5411 5410 ptgt = ptgt->tgt_next) {
5412 5411 (void) fcp_call_finish_init_held(pptr, ptgt,
5413 5412 lcount, ptgt->tgt_change_cnt, cause);
5414 5413 }
5415 5414 }
5416 5415 }
5417 5416
5418 5417 /*
5419 5418 * Function: fcp_update_tgt_state
5420 5419 *
5421 5420 * Description: This function updates the field tgt_state of a target. That
5422 5421 * field is a bitmap and which bit can be set or reset
5423 5422 * individually. The action applied to the target state is also
5424 5423 * applied to all the LUNs belonging to the target (provided the
5425 5424 * LUN is not offline). A side effect of applying the state
5426 5425 * modification to the target and the LUNs is the field tgt_trace
5427 5426 * of the target and lun_trace of the LUNs is set to zero.
5428 5427 *
5429 5428 *
5430 5429 * Argument: *ptgt Target structure.
5431 5430 * flag Flag indication what action to apply (set/reset).
5432 5431 * state State bits to update.
5433 5432 *
5434 5433 * Return Value: None
5435 5434 *
5436 5435 * Context: Interrupt, Kernel or User context.
5437 5436 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5438 5437 * calling this function.
5439 5438 */
5440 5439 void
5441 5440 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5442 5441 {
5443 5442 struct fcp_lun *plun;
5444 5443
5445 5444 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5446 5445
5447 5446 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5448 5447 /* The target is not offline. */
5449 5448 if (flag == FCP_SET) {
5450 5449 ptgt->tgt_state |= state;
5451 5450 ptgt->tgt_trace = 0;
5452 5451 } else {
5453 5452 ptgt->tgt_state &= ~state;
5454 5453 }
5455 5454
5456 5455 for (plun = ptgt->tgt_lun; plun != NULL;
5457 5456 plun = plun->lun_next) {
5458 5457 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5459 5458 /* The LUN is not offline. */
5460 5459 if (flag == FCP_SET) {
5461 5460 plun->lun_state |= state;
5462 5461 plun->lun_trace = 0;
5463 5462 } else {
5464 5463 plun->lun_state &= ~state;
5465 5464 }
5466 5465 }
5467 5466 }
5468 5467 }
5469 5468 }
5470 5469
5471 5470 /*
5472 5471 * Function: fcp_update_tgt_state
5473 5472 *
5474 5473 * Description: This function updates the field lun_state of a LUN. That
5475 5474 * field is a bitmap and which bit can be set or reset
5476 5475 * individually.
5477 5476 *
5478 5477 * Argument: *plun LUN structure.
5479 5478 * flag Flag indication what action to apply (set/reset).
5480 5479 * state State bits to update.
5481 5480 *
5482 5481 * Return Value: None
5483 5482 *
5484 5483 * Context: Interrupt, Kernel or User context.
5485 5484 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5486 5485 * calling this function.
5487 5486 */
5488 5487 void
5489 5488 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5490 5489 {
5491 5490 struct fcp_tgt *ptgt = plun->lun_tgt;
5492 5491
5493 5492 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5494 5493
5495 5494 if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5496 5495 if (flag == FCP_SET) {
5497 5496 plun->lun_state |= state;
5498 5497 } else {
5499 5498 plun->lun_state &= ~state;
5500 5499 }
5501 5500 }
5502 5501 }
5503 5502
5504 5503 /*
5505 5504 * Function: fcp_get_port
5506 5505 *
5507 5506 * Description: This function returns the fcp_port structure from the opaque
5508 5507 * handle passed by the caller. That opaque handle is the handle
5509 5508 * used by fp/fctl to identify a particular local port. That
5510 5509 * handle has been stored in the corresponding fcp_port
5511 5510 * structure. This function is going to walk the global list of
5512 5511 * fcp_port structures till one has a port_fp_handle that matches
5513 5512 * the handle passed by the caller. This function enters the
5514 5513 * mutex fcp_global_mutex while walking the global list and then
5515 5514 * releases it.
5516 5515 *
5517 5516 * Argument: port_handle Opaque handle that fp/fctl uses to identify a
5518 5517 * particular port.
5519 5518 *
5520 5519 * Return Value: NULL Not found.
5521 5520 * Not NULL Pointer to the fcp_port structure.
5522 5521 *
5523 5522 * Context: Interrupt, Kernel or User context.
5524 5523 */
5525 5524 static struct fcp_port *
5526 5525 fcp_get_port(opaque_t port_handle)
5527 5526 {
5528 5527 struct fcp_port *pptr;
5529 5528
5530 5529 ASSERT(port_handle != NULL);
5531 5530
5532 5531 mutex_enter(&fcp_global_mutex);
5533 5532 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5534 5533 if (pptr->port_fp_handle == port_handle) {
5535 5534 break;
5536 5535 }
5537 5536 }
5538 5537 mutex_exit(&fcp_global_mutex);
5539 5538
5540 5539 return (pptr);
5541 5540 }
5542 5541
5543 5542
5544 5543 static void
5545 5544 fcp_unsol_callback(fc_packet_t *fpkt)
5546 5545 {
5547 5546 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5548 5547 struct fcp_port *pptr = icmd->ipkt_port;
5549 5548
5550 5549 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5551 5550 caddr_t state, reason, action, expln;
5552 5551
5553 5552 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
5554 5553 &action, &expln);
5555 5554
5556 5555 fcp_log(CE_WARN, pptr->port_dip,
5557 5556 "!couldn't post response to unsolicited request: "
5558 5557 " state=%s reason=%s rx_id=%x ox_id=%x",
5559 5558 state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5560 5559 fpkt->pkt_cmd_fhdr.rx_id);
5561 5560 }
5562 5561 fcp_icmd_free(pptr, icmd);
5563 5562 }
5564 5563
5565 5564
5566 5565 /*
5567 5566 * Perform general purpose preparation of a response to an unsolicited request
5568 5567 */
5569 5568 static void
5570 5569 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5571 5570 uchar_t r_ctl, uchar_t type)
5572 5571 {
5573 5572 pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5574 5573 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5575 5574 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5576 5575 pkt->pkt_cmd_fhdr.type = type;
5577 5576 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5578 5577 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5579 5578 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl;
5580 5579 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5581 5580 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5582 5581 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5583 5582 pkt->pkt_cmd_fhdr.ro = 0;
5584 5583 pkt->pkt_cmd_fhdr.rsvd = 0;
5585 5584 pkt->pkt_comp = fcp_unsol_callback;
5586 5585 pkt->pkt_pd = NULL;
5587 5586 pkt->pkt_ub_resp_token = (opaque_t)buf;
5588 5587 }
5589 5588
5590 5589
5591 5590 /*ARGSUSED*/
5592 5591 static int
5593 5592 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5594 5593 {
5595 5594 fc_packet_t *fpkt;
5596 5595 struct la_els_prli prli;
5597 5596 struct fcp_prli *fprli;
5598 5597 struct fcp_ipkt *icmd;
5599 5598 struct la_els_prli *from;
5600 5599 struct fcp_prli *orig;
5601 5600 struct fcp_tgt *ptgt;
5602 5601 int tcount = 0;
5603 5602 int lcount;
5604 5603
5605 5604 from = (struct la_els_prli *)buf->ub_buffer;
5606 5605 orig = (struct fcp_prli *)from->service_params;
5607 5606 if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5608 5607 NULL) {
5609 5608 mutex_enter(&ptgt->tgt_mutex);
5610 5609 tcount = ptgt->tgt_change_cnt;
5611 5610 mutex_exit(&ptgt->tgt_mutex);
5612 5611 }
5613 5612
5614 5613 mutex_enter(&pptr->port_mutex);
5615 5614 lcount = pptr->port_link_cnt;
5616 5615 mutex_exit(&pptr->port_mutex);
5617 5616
5618 5617 if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5619 5618 sizeof (la_els_prli_t), 0,
5620 5619 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5621 5620 lcount, tcount, 0, FC_INVALID_RSCN_COUNT)) == NULL) {
5622 5621 return (FC_FAILURE);
5623 5622 }
5624 5623
5625 5624 fpkt = icmd->ipkt_fpkt;
5626 5625 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5627 5626 fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5628 5627 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5629 5628 fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5630 5629 fpkt->pkt_rsplen = 0;
5631 5630 fpkt->pkt_datalen = 0;
5632 5631
5633 5632 icmd->ipkt_opcode = LA_ELS_PRLI;
5634 5633
5635 5634 bzero(&prli, sizeof (struct la_els_prli));
5636 5635 fprli = (struct fcp_prli *)prli.service_params;
5637 5636 prli.ls_code = LA_ELS_ACC;
5638 5637 prli.page_length = 0x10;
5639 5638 prli.payload_length = sizeof (struct la_els_prli);
5640 5639
5641 5640 /* fill in service params */
5642 5641 fprli->type = 0x08;
5643 5642 fprli->resvd1 = 0;
5644 5643 fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5645 5644 fprli->orig_process_associator = orig->orig_process_associator;
5646 5645 fprli->resp_process_assoc_valid = 0;
5647 5646 fprli->establish_image_pair = 1;
5648 5647 fprli->resvd2 = 0;
5649 5648 fprli->resvd3 = 0;
5650 5649 fprli->obsolete_1 = 0;
5651 5650 fprli->obsolete_2 = 0;
5652 5651 fprli->data_overlay_allowed = 0;
5653 5652 fprli->initiator_fn = 1;
5654 5653 fprli->confirmed_compl_allowed = 1;
5655 5654
5656 5655 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5657 5656 fprli->target_fn = 1;
5658 5657 } else {
5659 5658 fprli->target_fn = 0;
5660 5659 }
5661 5660
5662 5661 fprli->retry = 1;
5663 5662 fprli->read_xfer_rdy_disabled = 1;
5664 5663 fprli->write_xfer_rdy_disabled = 0;
5665 5664
5666 5665 /* save the unsol prli payload first */
5667 5666 FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5668 5667 fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5669 5668
5670 5669 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5671 5670 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5672 5671
5673 5672 fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5674 5673
5675 5674 mutex_enter(&pptr->port_mutex);
5676 5675 if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5677 5676 int rval;
5678 5677 mutex_exit(&pptr->port_mutex);
5679 5678
5680 5679 if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5681 5680 FC_SUCCESS) {
5682 5681 if ((rval == FC_STATEC_BUSY || rval == FC_OFFLINE) &&
5683 5682 ptgt != NULL) {
5684 5683 fcp_queue_ipkt(pptr, fpkt);
5685 5684 return (FC_SUCCESS);
5686 5685 }
5687 5686 /* Let it timeout */
5688 5687 fcp_icmd_free(pptr, icmd);
5689 5688 return (FC_FAILURE);
5690 5689 }
5691 5690 } else {
5692 5691 mutex_exit(&pptr->port_mutex);
5693 5692 fcp_icmd_free(pptr, icmd);
5694 5693 return (FC_FAILURE);
5695 5694 }
5696 5695
5697 5696 (void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5698 5697
5699 5698 return (FC_SUCCESS);
5700 5699 }
5701 5700
5702 5701 /*
5703 5702 * Function: fcp_icmd_alloc
5704 5703 *
5705 5704 * Description: This function allocated a fcp_ipkt structure. The pkt_comp
5706 5705 * field is initialized to fcp_icmd_callback. Sometimes it is
5707 5706 * modified by the caller (such as fcp_send_scsi). The
5708 5707 * structure is also tied to the state of the line and of the
5709 5708 * target at a particular time. That link is established by
5710 5709 * setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5711 5710 * and tcount which came respectively from pptr->link_cnt and
5712 5711 * ptgt->tgt_change_cnt.
5713 5712 *
5714 5713 * Argument: *pptr Fcp port.
5715 5714 * *ptgt Target (destination of the command).
5716 5715 * cmd_len Length of the command.
5717 5716 * resp_len Length of the expected response.
5718 5717 * data_len Length of the data.
5719 5718 * nodma Indicates weither the command and response.
5720 5719 * will be transfer through DMA or not.
5721 5720 * lcount Link state change counter.
5722 5721 * tcount Target state change counter.
5723 5722 * cause Reason that lead to this call.
5724 5723 *
5725 5724 * Return Value: NULL Failed.
5726 5725 * Not NULL Internal packet address.
5727 5726 */
5728 5727 static struct fcp_ipkt *
5729 5728 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5730 5729 int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5731 5730 uint32_t rscn_count)
5732 5731 {
5733 5732 int dma_setup = 0;
5734 5733 fc_packet_t *fpkt;
5735 5734 struct fcp_ipkt *icmd = NULL;
5736 5735
5737 5736 icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5738 5737 pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5739 5738 KM_NOSLEEP);
5740 5739 if (icmd == NULL) {
5741 5740 fcp_log(CE_WARN, pptr->port_dip,
5742 5741 "!internal packet allocation failed");
5743 5742 return (NULL);
5744 5743 }
5745 5744
5746 5745 /*
5747 5746 * initialize the allocated packet
5748 5747 */
5749 5748 icmd->ipkt_nodma = nodma;
5750 5749 icmd->ipkt_next = icmd->ipkt_prev = NULL;
5751 5750 icmd->ipkt_lun = NULL;
5752 5751
5753 5752 icmd->ipkt_link_cnt = lcount;
5754 5753 icmd->ipkt_change_cnt = tcount;
5755 5754 icmd->ipkt_cause = cause;
5756 5755
5757 5756 mutex_enter(&pptr->port_mutex);
5758 5757 icmd->ipkt_port = pptr;
5759 5758 mutex_exit(&pptr->port_mutex);
5760 5759
5761 5760 /* keep track of amt of data to be sent in pkt */
5762 5761 icmd->ipkt_cmdlen = cmd_len;
5763 5762 icmd->ipkt_resplen = resp_len;
5764 5763 icmd->ipkt_datalen = data_len;
5765 5764
5766 5765 /* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5767 5766 icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5768 5767
5769 5768 /* set pkt's private ptr to point to cmd pkt */
5770 5769 icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5771 5770
5772 5771 /* set FCA private ptr to memory just beyond */
5773 5772 icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5774 5773 ((char *)icmd + sizeof (struct fcp_ipkt) +
5775 5774 pptr->port_dmacookie_sz);
5776 5775
5777 5776 /* get ptr to fpkt substruct and fill it in */
5778 5777 fpkt = icmd->ipkt_fpkt;
5779 5778 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5780 5779 sizeof (struct fcp_ipkt));
5781 5780
5782 5781 if (ptgt != NULL) {
5783 5782 icmd->ipkt_tgt = ptgt;
5784 5783 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5785 5784 }
5786 5785
5787 5786 fpkt->pkt_comp = fcp_icmd_callback;
5788 5787 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5789 5788 fpkt->pkt_cmdlen = cmd_len;
5790 5789 fpkt->pkt_rsplen = resp_len;
5791 5790 fpkt->pkt_datalen = data_len;
5792 5791
5793 5792 /*
5794 5793 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5795 5794 * rscn_count as fcp knows down to the transport. If a valid count was
5796 5795 * passed into this function, we allocate memory to actually pass down
5797 5796 * this info.
5798 5797 *
5799 5798 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5800 5799 * basically mean that fcp will not be able to help transport
5801 5800 * distinguish if a new RSCN has come after fcp was last informed about
5802 5801 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5803 5802 * 5068068 where the device might end up going offline in case of RSCN
5804 5803 * storms.
5805 5804 */
5806 5805 fpkt->pkt_ulp_rscn_infop = NULL;
5807 5806 if (rscn_count != FC_INVALID_RSCN_COUNT) {
5808 5807 fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5809 5808 sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5810 5809 if (fpkt->pkt_ulp_rscn_infop == NULL) {
5811 5810 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5812 5811 fcp_trace, FCP_BUF_LEVEL_6, 0,
5813 5812 "Failed to alloc memory to pass rscn info");
5814 5813 }
5815 5814 }
5816 5815
5817 5816 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5818 5817 fc_ulp_rscn_info_t *rscnp;
5819 5818
5820 5819 rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5821 5820 rscnp->ulp_rscn_count = rscn_count;
5822 5821 }
5823 5822
5824 5823 if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5825 5824 goto fail;
5826 5825 }
5827 5826 dma_setup++;
5828 5827
5829 5828 /*
5830 5829 * Must hold target mutex across setting of pkt_pd and call to
5831 5830 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5832 5831 * away while we're not looking.
5833 5832 */
5834 5833 if (ptgt != NULL) {
5835 5834 mutex_enter(&ptgt->tgt_mutex);
5836 5835 fpkt->pkt_pd = ptgt->tgt_pd_handle;
5837 5836
5838 5837 /* ask transport to do its initialization on this pkt */
5839 5838 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5840 5839 != FC_SUCCESS) {
5841 5840 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5842 5841 fcp_trace, FCP_BUF_LEVEL_6, 0,
5843 5842 "fc_ulp_init_packet failed");
5844 5843 mutex_exit(&ptgt->tgt_mutex);
5845 5844 goto fail;
5846 5845 }
5847 5846 mutex_exit(&ptgt->tgt_mutex);
5848 5847 } else {
5849 5848 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5850 5849 != FC_SUCCESS) {
5851 5850 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5852 5851 fcp_trace, FCP_BUF_LEVEL_6, 0,
5853 5852 "fc_ulp_init_packet failed");
5854 5853 goto fail;
5855 5854 }
5856 5855 }
5857 5856
5858 5857 mutex_enter(&pptr->port_mutex);
5859 5858 if (pptr->port_state & (FCP_STATE_DETACHING |
5860 5859 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5861 5860 int rval;
5862 5861
5863 5862 mutex_exit(&pptr->port_mutex);
5864 5863
5865 5864 rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5866 5865 ASSERT(rval == FC_SUCCESS);
5867 5866
5868 5867 goto fail;
5869 5868 }
5870 5869
5871 5870 if (ptgt != NULL) {
5872 5871 mutex_enter(&ptgt->tgt_mutex);
5873 5872 ptgt->tgt_ipkt_cnt++;
5874 5873 mutex_exit(&ptgt->tgt_mutex);
5875 5874 }
5876 5875
5877 5876 pptr->port_ipkt_cnt++;
5878 5877
5879 5878 mutex_exit(&pptr->port_mutex);
5880 5879
5881 5880 return (icmd);
5882 5881
5883 5882 fail:
5884 5883 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5885 5884 kmem_free(fpkt->pkt_ulp_rscn_infop,
5886 5885 sizeof (fc_ulp_rscn_info_t));
5887 5886 fpkt->pkt_ulp_rscn_infop = NULL;
5888 5887 }
5889 5888
5890 5889 if (dma_setup) {
5891 5890 fcp_free_dma(pptr, icmd);
5892 5891 }
5893 5892 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5894 5893 (size_t)pptr->port_dmacookie_sz);
5895 5894
5896 5895 return (NULL);
5897 5896 }
5898 5897
5899 5898 /*
5900 5899 * Function: fcp_icmd_free
5901 5900 *
5902 5901 * Description: Frees the internal command passed by the caller.
5903 5902 *
5904 5903 * Argument: *pptr Fcp port.
5905 5904 * *icmd Internal packet to free.
5906 5905 *
5907 5906 * Return Value: None
5908 5907 */
5909 5908 static void
5910 5909 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5911 5910 {
5912 5911 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
5913 5912
5914 5913 /* Let the underlying layers do their cleanup. */
5915 5914 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5916 5915 icmd->ipkt_fpkt);
5917 5916
5918 5917 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5919 5918 kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5920 5919 sizeof (fc_ulp_rscn_info_t));
5921 5920 }
5922 5921
5923 5922 fcp_free_dma(pptr, icmd);
5924 5923
5925 5924 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5926 5925 (size_t)pptr->port_dmacookie_sz);
5927 5926
5928 5927 mutex_enter(&pptr->port_mutex);
5929 5928
5930 5929 if (ptgt) {
5931 5930 mutex_enter(&ptgt->tgt_mutex);
5932 5931 ptgt->tgt_ipkt_cnt--;
5933 5932 mutex_exit(&ptgt->tgt_mutex);
5934 5933 }
5935 5934
5936 5935 pptr->port_ipkt_cnt--;
5937 5936 mutex_exit(&pptr->port_mutex);
5938 5937 }
5939 5938
5940 5939 /*
5941 5940 * Function: fcp_alloc_dma
5942 5941 *
5943 5942 * Description: Allocated the DMA resources required for the internal
5944 5943 * packet.
5945 5944 *
5946 5945 * Argument: *pptr FCP port.
5947 5946 * *icmd Internal FCP packet.
5948 5947 * nodma Indicates if the Cmd and Resp will be DMAed.
5949 5948 * flags Allocation flags (Sleep or NoSleep).
5950 5949 *
5951 5950 * Return Value: FC_SUCCESS
5952 5951 * FC_NOMEM
5953 5952 */
5954 5953 static int
5955 5954 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5956 5955 int nodma, int flags)
5957 5956 {
5958 5957 int rval;
5959 5958 size_t real_size;
5960 5959 uint_t ccount;
5961 5960 int bound = 0;
5962 5961 int cmd_resp = 0;
5963 5962 fc_packet_t *fpkt;
5964 5963 ddi_dma_cookie_t pkt_data_cookie;
5965 5964 ddi_dma_cookie_t *cp;
5966 5965 uint32_t cnt;
5967 5966
5968 5967 fpkt = &icmd->ipkt_fc_packet;
5969 5968
5970 5969 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5971 5970 fpkt->pkt_resp_dma == NULL);
5972 5971
5973 5972 icmd->ipkt_nodma = nodma;
5974 5973
5975 5974 if (nodma) {
5976 5975 fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5977 5976 if (fpkt->pkt_cmd == NULL) {
5978 5977 goto fail;
5979 5978 }
5980 5979
5981 5980 fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5982 5981 if (fpkt->pkt_resp == NULL) {
5983 5982 goto fail;
5984 5983 }
5985 5984 } else {
5986 5985 ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5987 5986
5988 5987 rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5989 5988 if (rval == FC_FAILURE) {
5990 5989 ASSERT(fpkt->pkt_cmd_dma == NULL &&
5991 5990 fpkt->pkt_resp_dma == NULL);
5992 5991 goto fail;
5993 5992 }
5994 5993 cmd_resp++;
5995 5994 }
5996 5995
5997 5996 if ((fpkt->pkt_datalen != 0) &&
5998 5997 !(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
5999 5998 /*
6000 5999 * set up DMA handle and memory for the data in this packet
6001 6000 */
6002 6001 if (ddi_dma_alloc_handle(pptr->port_dip,
6003 6002 &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
6004 6003 NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
6005 6004 goto fail;
6006 6005 }
6007 6006
6008 6007 if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
6009 6008 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
6010 6009 DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
6011 6010 &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
6012 6011 goto fail;
6013 6012 }
6014 6013
6015 6014 /* was DMA mem size gotten < size asked for/needed ?? */
6016 6015 if (real_size < fpkt->pkt_datalen) {
6017 6016 goto fail;
6018 6017 }
6019 6018
6020 6019 /* bind DMA address and handle together */
6021 6020 if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
6022 6021 NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
6023 6022 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
6024 6023 &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
6025 6024 goto fail;
6026 6025 }
6027 6026 bound++;
6028 6027
6029 6028 if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
6030 6029 goto fail;
6031 6030 }
6032 6031
6033 6032 fpkt->pkt_data_cookie_cnt = ccount;
6034 6033
6035 6034 cp = fpkt->pkt_data_cookie;
6036 6035 *cp = pkt_data_cookie;
6037 6036 cp++;
6038 6037
6039 6038 for (cnt = 1; cnt < ccount; cnt++, cp++) {
6040 6039 ddi_dma_nextcookie(fpkt->pkt_data_dma,
6041 6040 &pkt_data_cookie);
6042 6041 *cp = pkt_data_cookie;
6043 6042 }
6044 6043
6045 6044 } else if (fpkt->pkt_datalen != 0) {
6046 6045 /*
6047 6046 * If it's a pseudo FCA, then it can't support DMA even in
6048 6047 * SCSI data phase.
6049 6048 */
6050 6049 fpkt->pkt_data = kmem_alloc(fpkt->pkt_datalen, flags);
6051 6050 if (fpkt->pkt_data == NULL) {
6052 6051 goto fail;
6053 6052 }
6054 6053
6055 6054 }
6056 6055
6057 6056 return (FC_SUCCESS);
6058 6057
6059 6058 fail:
6060 6059 if (bound) {
6061 6060 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6062 6061 }
6063 6062
6064 6063 if (fpkt->pkt_data_dma) {
6065 6064 if (fpkt->pkt_data) {
6066 6065 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6067 6066 }
6068 6067 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6069 6068 } else {
6070 6069 if (fpkt->pkt_data) {
6071 6070 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6072 6071 }
6073 6072 }
6074 6073
6075 6074 if (nodma) {
6076 6075 if (fpkt->pkt_cmd) {
6077 6076 kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
6078 6077 }
6079 6078 if (fpkt->pkt_resp) {
6080 6079 kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
6081 6080 }
6082 6081 } else {
6083 6082 if (cmd_resp) {
6084 6083 fcp_free_cmd_resp(pptr, fpkt);
6085 6084 }
6086 6085 }
6087 6086
6088 6087 return (FC_NOMEM);
6089 6088 }
6090 6089
6091 6090
6092 6091 static void
6093 6092 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
6094 6093 {
6095 6094 fc_packet_t *fpkt = icmd->ipkt_fpkt;
6096 6095
6097 6096 if (fpkt->pkt_data_dma) {
6098 6097 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6099 6098 if (fpkt->pkt_data) {
6100 6099 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6101 6100 }
6102 6101 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6103 6102 } else {
6104 6103 if (fpkt->pkt_data) {
6105 6104 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6106 6105 }
6107 6106 /*
6108 6107 * Need we reset pkt_* to zero???
6109 6108 */
6110 6109 }
6111 6110
6112 6111 if (icmd->ipkt_nodma) {
6113 6112 if (fpkt->pkt_cmd) {
6114 6113 kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
6115 6114 }
6116 6115 if (fpkt->pkt_resp) {
6117 6116 kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
6118 6117 }
6119 6118 } else {
6120 6119 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
6121 6120
6122 6121 fcp_free_cmd_resp(pptr, fpkt);
6123 6122 }
6124 6123 }
6125 6124
6126 6125 /*
6127 6126 * Function: fcp_lookup_target
6128 6127 *
6129 6128 * Description: Finds a target given a WWN.
6130 6129 *
6131 6130 * Argument: *pptr FCP port.
6132 6131 * *wwn World Wide Name of the device to look for.
6133 6132 *
6134 6133 * Return Value: NULL No target found
6135 6134 * Not NULL Target structure
6136 6135 *
6137 6136 * Context: Interrupt context.
6138 6137 * The mutex pptr->port_mutex must be owned.
6139 6138 */
6140 6139 /* ARGSUSED */
6141 6140 static struct fcp_tgt *
6142 6141 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6143 6142 {
6144 6143 int hash;
6145 6144 struct fcp_tgt *ptgt;
6146 6145
6147 6146 ASSERT(mutex_owned(&pptr->port_mutex));
6148 6147
6149 6148 hash = FCP_HASH(wwn);
6150 6149
6151 6150 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6152 6151 ptgt = ptgt->tgt_next) {
6153 6152 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6154 6153 bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6155 6154 sizeof (ptgt->tgt_port_wwn)) == 0) {
6156 6155 break;
6157 6156 }
6158 6157 }
6159 6158
6160 6159 return (ptgt);
6161 6160 }
6162 6161
6163 6162
6164 6163 /*
6165 6164 * Find target structure given a port identifier
6166 6165 */
6167 6166 static struct fcp_tgt *
6168 6167 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6169 6168 {
6170 6169 fc_portid_t port_id;
6171 6170 la_wwn_t pwwn;
6172 6171 struct fcp_tgt *ptgt = NULL;
6173 6172
6174 6173 port_id.priv_lilp_posit = 0;
6175 6174 port_id.port_id = d_id;
6176 6175 if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6177 6176 &pwwn) == FC_SUCCESS) {
6178 6177 mutex_enter(&pptr->port_mutex);
6179 6178 ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6180 6179 mutex_exit(&pptr->port_mutex);
6181 6180 }
6182 6181
6183 6182 return (ptgt);
6184 6183 }
6185 6184
6186 6185
6187 6186 /*
6188 6187 * the packet completion callback routine for info cmd pkts
6189 6188 *
6190 6189 * this means fpkt pts to a response to either a PLOGI or a PRLI
6191 6190 *
6192 6191 * if there is an error an attempt is made to call a routine to resend
6193 6192 * the command that failed
6194 6193 */
6195 6194 static void
6196 6195 fcp_icmd_callback(fc_packet_t *fpkt)
6197 6196 {
6198 6197 struct fcp_ipkt *icmd;
6199 6198 struct fcp_port *pptr;
6200 6199 struct fcp_tgt *ptgt;
6201 6200 struct la_els_prli *prli;
6202 6201 struct la_els_prli prli_s;
6203 6202 struct fcp_prli *fprli;
6204 6203 struct fcp_lun *plun;
6205 6204 int free_pkt = 1;
6206 6205 int rval;
6207 6206 ls_code_t resp;
6208 6207 uchar_t prli_acc = 0;
6209 6208 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
6210 6209 int lun0_newalloc;
6211 6210
6212 6211 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6213 6212
6214 6213 /* get ptrs to the port and target structs for the cmd */
6215 6214 pptr = icmd->ipkt_port;
6216 6215 ptgt = icmd->ipkt_tgt;
6217 6216
6218 6217 FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6219 6218
6220 6219 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6221 6220 FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6222 6221 sizeof (prli_s));
6223 6222 prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6224 6223 }
6225 6224
6226 6225 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6227 6226 fcp_trace, FCP_BUF_LEVEL_2, 0,
6228 6227 "ELS (%x) callback state=0x%x reason=0x%x for %x",
6229 6228 icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6230 6229 ptgt->tgt_d_id);
6231 6230
6232 6231 if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6233 6232 ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6234 6233
6235 6234 mutex_enter(&ptgt->tgt_mutex);
6236 6235 if (ptgt->tgt_pd_handle == NULL) {
6237 6236 /*
6238 6237 * in a fabric environment the port device handles
6239 6238 * get created only after successful LOGIN into the
6240 6239 * transport, so the transport makes this port
6241 6240 * device (pd) handle available in this packet, so
6242 6241 * save it now
6243 6242 */
6244 6243 ASSERT(fpkt->pkt_pd != NULL);
6245 6244 ptgt->tgt_pd_handle = fpkt->pkt_pd;
6246 6245 }
6247 6246 mutex_exit(&ptgt->tgt_mutex);
6248 6247
6249 6248 /* which ELS cmd is this response for ?? */
6250 6249 switch (icmd->ipkt_opcode) {
6251 6250 case LA_ELS_PLOGI:
6252 6251 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6253 6252 fcp_trace, FCP_BUF_LEVEL_5, 0,
6254 6253 "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6255 6254 ptgt->tgt_d_id,
6256 6255 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6257 6256 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6258 6257
6259 6258 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6260 6259 FCP_TGT_TRACE_15);
6261 6260
6262 6261 /* Note that we are not allocating a new icmd */
6263 6262 if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6264 6263 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6265 6264 icmd->ipkt_cause) != DDI_SUCCESS) {
6266 6265 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6267 6266 FCP_TGT_TRACE_16);
6268 6267 goto fail;
6269 6268 }
6270 6269 break;
6271 6270
6272 6271 case LA_ELS_PRLI:
6273 6272 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6274 6273 fcp_trace, FCP_BUF_LEVEL_5, 0,
6275 6274 "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6276 6275
6277 6276 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6278 6277 FCP_TGT_TRACE_17);
6279 6278
6280 6279 prli = &prli_s;
6281 6280
6282 6281 FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6283 6282 sizeof (prli_s));
6284 6283
6285 6284 fprli = (struct fcp_prli *)prli->service_params;
6286 6285
6287 6286 mutex_enter(&ptgt->tgt_mutex);
6288 6287 ptgt->tgt_icap = fprli->initiator_fn;
6289 6288 ptgt->tgt_tcap = fprli->target_fn;
6290 6289 mutex_exit(&ptgt->tgt_mutex);
6291 6290
6292 6291 if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6293 6292 /*
6294 6293 * this FCP device does not support target mode
6295 6294 */
6296 6295 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6297 6296 FCP_TGT_TRACE_18);
6298 6297 goto fail;
6299 6298 }
6300 6299 if (fprli->retry == 1) {
6301 6300 fc_ulp_disable_relogin(pptr->port_fp_handle,
6302 6301 &ptgt->tgt_port_wwn);
6303 6302 }
6304 6303
6305 6304 /* target is no longer offline */
6306 6305 mutex_enter(&pptr->port_mutex);
6307 6306 mutex_enter(&ptgt->tgt_mutex);
6308 6307 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6309 6308 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6310 6309 FCP_TGT_MARK);
6311 6310 } else {
6312 6311 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6313 6312 fcp_trace, FCP_BUF_LEVEL_2, 0,
6314 6313 "fcp_icmd_callback,1: state change "
6315 6314 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6316 6315 mutex_exit(&ptgt->tgt_mutex);
6317 6316 mutex_exit(&pptr->port_mutex);
6318 6317 goto fail;
6319 6318 }
6320 6319 mutex_exit(&ptgt->tgt_mutex);
6321 6320 mutex_exit(&pptr->port_mutex);
6322 6321
6323 6322 /*
6324 6323 * lun 0 should always respond to inquiry, so
6325 6324 * get the LUN struct for LUN 0
6326 6325 *
6327 6326 * Currently we deal with first level of addressing.
6328 6327 * If / when we start supporting 0x device types
6329 6328 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6330 6329 * this logic will need revisiting.
6331 6330 */
6332 6331 lun0_newalloc = 0;
6333 6332 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6334 6333 /*
6335 6334 * no LUN struct for LUN 0 yet exists,
6336 6335 * so create one
6337 6336 */
6338 6337 plun = fcp_alloc_lun(ptgt);
6339 6338 if (plun == NULL) {
6340 6339 fcp_log(CE_WARN, pptr->port_dip,
6341 6340 "!Failed to allocate lun 0 for"
6342 6341 " D_ID=%x", ptgt->tgt_d_id);
6343 6342 goto fail;
6344 6343 }
6345 6344 lun0_newalloc = 1;
6346 6345 }
6347 6346
6348 6347 /* fill in LUN info */
6349 6348 mutex_enter(&ptgt->tgt_mutex);
6350 6349 /*
6351 6350 * consider lun 0 as device not connected if it is
6352 6351 * offlined or newly allocated
6353 6352 */
6354 6353 if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6355 6354 lun0_newalloc) {
6356 6355 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6357 6356 }
6358 6357 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6359 6358 plun->lun_state &= ~FCP_LUN_OFFLINE;
6360 6359 ptgt->tgt_lun_cnt = 1;
6361 6360 ptgt->tgt_report_lun_cnt = 0;
6362 6361 mutex_exit(&ptgt->tgt_mutex);
6363 6362
6364 6363 /* Retrieve the rscn count (if a valid one exists) */
6365 6364 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6366 6365 rscn_count = ((fc_ulp_rscn_info_t *)
6367 6366 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6368 6367 ->ulp_rscn_count;
6369 6368 } else {
6370 6369 rscn_count = FC_INVALID_RSCN_COUNT;
6371 6370 }
6372 6371
6373 6372 /* send Report Lun request to target */
6374 6373 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6375 6374 sizeof (struct fcp_reportlun_resp),
6376 6375 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6377 6376 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6378 6377 mutex_enter(&pptr->port_mutex);
6379 6378 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6380 6379 fcp_log(CE_WARN, pptr->port_dip,
6381 6380 "!Failed to send REPORT LUN to"
6382 6381 " D_ID=%x", ptgt->tgt_d_id);
6383 6382 } else {
6384 6383 FCP_TRACE(fcp_logq,
6385 6384 pptr->port_instbuf, fcp_trace,
6386 6385 FCP_BUF_LEVEL_5, 0,
6387 6386 "fcp_icmd_callback,2:state change"
6388 6387 " occured for D_ID=0x%x",
6389 6388 ptgt->tgt_d_id);
6390 6389 }
6391 6390 mutex_exit(&pptr->port_mutex);
6392 6391
6393 6392 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6394 6393 FCP_TGT_TRACE_19);
6395 6394
6396 6395 goto fail;
6397 6396 } else {
6398 6397 free_pkt = 0;
6399 6398 fcp_icmd_free(pptr, icmd);
6400 6399 }
6401 6400 break;
6402 6401
6403 6402 default:
6404 6403 fcp_log(CE_WARN, pptr->port_dip,
6405 6404 "!fcp_icmd_callback Invalid opcode");
6406 6405 goto fail;
6407 6406 }
6408 6407
6409 6408 return;
6410 6409 }
6411 6410
6412 6411
6413 6412 /*
6414 6413 * Other PLOGI failures are not retried as the
6415 6414 * transport does it already
6416 6415 */
6417 6416 if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6418 6417 if (fcp_is_retryable(icmd) &&
6419 6418 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6420 6419
6421 6420 if (FCP_MUST_RETRY(fpkt)) {
6422 6421 fcp_queue_ipkt(pptr, fpkt);
6423 6422 return;
6424 6423 }
6425 6424
6426 6425 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6427 6426 fcp_trace, FCP_BUF_LEVEL_2, 0,
6428 6427 "ELS PRLI is retried for d_id=0x%x, state=%x,"
6429 6428 " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6430 6429 fpkt->pkt_reason);
6431 6430
6432 6431 /*
6433 6432 * Retry by recalling the routine that
6434 6433 * originally queued this packet
6435 6434 */
6436 6435 mutex_enter(&pptr->port_mutex);
6437 6436 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6438 6437 caddr_t msg;
6439 6438
6440 6439 mutex_exit(&pptr->port_mutex);
6441 6440
6442 6441 ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6443 6442
6444 6443 if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6445 6444 fpkt->pkt_timeout +=
6446 6445 FCP_TIMEOUT_DELTA;
6447 6446 }
6448 6447
6449 6448 rval = fc_ulp_issue_els(pptr->port_fp_handle,
6450 6449 fpkt);
6451 6450 if (rval == FC_SUCCESS) {
6452 6451 return;
6453 6452 }
6454 6453
6455 6454 if (rval == FC_STATEC_BUSY ||
6456 6455 rval == FC_OFFLINE) {
6457 6456 fcp_queue_ipkt(pptr, fpkt);
6458 6457 return;
6459 6458 }
6460 6459 (void) fc_ulp_error(rval, &msg);
6461 6460
6462 6461 fcp_log(CE_NOTE, pptr->port_dip,
6463 6462 "!ELS 0x%x failed to d_id=0x%x;"
6464 6463 " %s", icmd->ipkt_opcode,
6465 6464 ptgt->tgt_d_id, msg);
6466 6465 } else {
6467 6466 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6468 6467 fcp_trace, FCP_BUF_LEVEL_2, 0,
6469 6468 "fcp_icmd_callback,3: state change "
6470 6469 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6471 6470 mutex_exit(&pptr->port_mutex);
6472 6471 }
6473 6472 }
6474 6473 } else {
6475 6474 if (fcp_is_retryable(icmd) &&
6476 6475 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6477 6476 if (FCP_MUST_RETRY(fpkt)) {
6478 6477 fcp_queue_ipkt(pptr, fpkt);
6479 6478 return;
6480 6479 }
6481 6480 }
6482 6481 mutex_enter(&pptr->port_mutex);
6483 6482 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6484 6483 fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6485 6484 mutex_exit(&pptr->port_mutex);
6486 6485 fcp_print_error(fpkt);
6487 6486 } else {
6488 6487 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6489 6488 fcp_trace, FCP_BUF_LEVEL_2, 0,
6490 6489 "fcp_icmd_callback,4: state change occured"
6491 6490 " for D_ID=0x%x", ptgt->tgt_d_id);
6492 6491 mutex_exit(&pptr->port_mutex);
6493 6492 }
6494 6493 }
6495 6494
6496 6495 fail:
6497 6496 if (free_pkt) {
6498 6497 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6499 6498 icmd->ipkt_change_cnt, icmd->ipkt_cause);
6500 6499 fcp_icmd_free(pptr, icmd);
6501 6500 }
6502 6501 }
6503 6502
6504 6503
6505 6504 /*
6506 6505 * called internally to send an info cmd using the transport
6507 6506 *
6508 6507 * sends either an INQ or a REPORT_LUN
6509 6508 *
6510 6509 * when the packet is completed fcp_scsi_callback is called
6511 6510 */
6512 6511 static int
6513 6512 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6514 6513 int lcount, int tcount, int cause, uint32_t rscn_count)
6515 6514 {
6516 6515 int nodma;
6517 6516 struct fcp_ipkt *icmd;
6518 6517 struct fcp_tgt *ptgt;
6519 6518 struct fcp_port *pptr;
6520 6519 fc_frame_hdr_t *hp;
6521 6520 fc_packet_t *fpkt;
6522 6521 struct fcp_cmd fcp_cmd;
6523 6522 struct fcp_cmd *fcmd;
6524 6523 union scsi_cdb *scsi_cdb;
6525 6524
6526 6525 ASSERT(plun != NULL);
6527 6526
6528 6527 ptgt = plun->lun_tgt;
6529 6528 ASSERT(ptgt != NULL);
6530 6529
6531 6530 pptr = ptgt->tgt_port;
6532 6531 ASSERT(pptr != NULL);
6533 6532
6534 6533 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6535 6534 fcp_trace, FCP_BUF_LEVEL_5, 0,
6536 6535 "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6537 6536
6538 6537 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6539 6538 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6540 6539 FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6541 6540 rscn_count);
6542 6541
6543 6542 if (icmd == NULL) {
6544 6543 return (DDI_FAILURE);
6545 6544 }
6546 6545
6547 6546 fpkt = icmd->ipkt_fpkt;
6548 6547 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6549 6548 icmd->ipkt_retries = 0;
6550 6549 icmd->ipkt_opcode = opcode;
6551 6550 icmd->ipkt_lun = plun;
6552 6551
6553 6552 if (nodma) {
6554 6553 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6555 6554 } else {
6556 6555 fcmd = &fcp_cmd;
6557 6556 }
6558 6557 bzero(fcmd, sizeof (struct fcp_cmd));
6559 6558
6560 6559 fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6561 6560
6562 6561 hp = &fpkt->pkt_cmd_fhdr;
6563 6562
6564 6563 hp->s_id = pptr->port_id;
6565 6564 hp->d_id = ptgt->tgt_d_id;
6566 6565 hp->r_ctl = R_CTL_COMMAND;
6567 6566 hp->type = FC_TYPE_SCSI_FCP;
6568 6567 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6569 6568 hp->rsvd = 0;
6570 6569 hp->seq_id = 0;
6571 6570 hp->seq_cnt = 0;
6572 6571 hp->ox_id = 0xffff;
6573 6572 hp->rx_id = 0xffff;
6574 6573 hp->ro = 0;
6575 6574
6576 6575 bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6577 6576
6578 6577 /*
6579 6578 * Request SCSI target for expedited processing
6580 6579 */
6581 6580
6582 6581 /*
6583 6582 * Set up for untagged queuing because we do not
6584 6583 * know if the fibre device supports queuing.
6585 6584 */
6586 6585 fcmd->fcp_cntl.cntl_reserved_0 = 0;
6587 6586 fcmd->fcp_cntl.cntl_reserved_1 = 0;
6588 6587 fcmd->fcp_cntl.cntl_reserved_2 = 0;
6589 6588 fcmd->fcp_cntl.cntl_reserved_3 = 0;
6590 6589 fcmd->fcp_cntl.cntl_reserved_4 = 0;
6591 6590 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6592 6591 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6593 6592
6594 6593 switch (opcode) {
6595 6594 case SCMD_INQUIRY_PAGE83:
6596 6595 /*
6597 6596 * Prepare to get the Inquiry VPD page 83 information
6598 6597 */
6599 6598 fcmd->fcp_cntl.cntl_read_data = 1;
6600 6599 fcmd->fcp_cntl.cntl_write_data = 0;
6601 6600 fcmd->fcp_data_len = alloc_len;
6602 6601
6603 6602 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6604 6603 fpkt->pkt_comp = fcp_scsi_callback;
6605 6604
6606 6605 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6607 6606 scsi_cdb->g0_addr2 = 0x01;
6608 6607 scsi_cdb->g0_addr1 = 0x83;
6609 6608 scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6610 6609 break;
6611 6610
6612 6611 case SCMD_INQUIRY:
6613 6612 fcmd->fcp_cntl.cntl_read_data = 1;
6614 6613 fcmd->fcp_cntl.cntl_write_data = 0;
6615 6614 fcmd->fcp_data_len = alloc_len;
6616 6615
6617 6616 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6618 6617 fpkt->pkt_comp = fcp_scsi_callback;
6619 6618
6620 6619 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6621 6620 scsi_cdb->g0_count0 = SUN_INQSIZE;
6622 6621 break;
6623 6622
6624 6623 case SCMD_REPORT_LUN: {
6625 6624 fc_portid_t d_id;
6626 6625 opaque_t fca_dev;
6627 6626
6628 6627 ASSERT(alloc_len >= 16);
6629 6628
6630 6629 d_id.priv_lilp_posit = 0;
6631 6630 d_id.port_id = ptgt->tgt_d_id;
6632 6631
6633 6632 fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6634 6633
6635 6634 mutex_enter(&ptgt->tgt_mutex);
6636 6635 ptgt->tgt_fca_dev = fca_dev;
6637 6636 mutex_exit(&ptgt->tgt_mutex);
6638 6637
6639 6638 fcmd->fcp_cntl.cntl_read_data = 1;
6640 6639 fcmd->fcp_cntl.cntl_write_data = 0;
6641 6640 fcmd->fcp_data_len = alloc_len;
6642 6641
6643 6642 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6644 6643 fpkt->pkt_comp = fcp_scsi_callback;
6645 6644
6646 6645 scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6647 6646 scsi_cdb->scc5_count0 = alloc_len & 0xff;
6648 6647 scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6649 6648 scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6650 6649 scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6651 6650 break;
6652 6651 }
6653 6652
6654 6653 default:
6655 6654 fcp_log(CE_WARN, pptr->port_dip,
6656 6655 "!fcp_send_scsi Invalid opcode");
6657 6656 break;
6658 6657 }
6659 6658
6660 6659 if (!nodma) {
6661 6660 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6662 6661 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6663 6662 }
6664 6663
6665 6664 mutex_enter(&pptr->port_mutex);
6666 6665 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6667 6666
6668 6667 mutex_exit(&pptr->port_mutex);
6669 6668 if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6670 6669 FC_SUCCESS) {
6671 6670 fcp_icmd_free(pptr, icmd);
6672 6671 return (DDI_FAILURE);
6673 6672 }
6674 6673 return (DDI_SUCCESS);
6675 6674 } else {
6676 6675 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6677 6676 fcp_trace, FCP_BUF_LEVEL_2, 0,
6678 6677 "fcp_send_scsi,1: state change occured"
6679 6678 " for D_ID=0x%x", ptgt->tgt_d_id);
6680 6679 mutex_exit(&pptr->port_mutex);
6681 6680 fcp_icmd_free(pptr, icmd);
6682 6681 return (DDI_FAILURE);
6683 6682 }
6684 6683 }
6685 6684
6686 6685
6687 6686 /*
6688 6687 * called by fcp_scsi_callback to check to handle the case where
6689 6688 * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6690 6689 */
6691 6690 static int
6692 6691 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6693 6692 {
6694 6693 uchar_t rqlen;
6695 6694 int rval = DDI_FAILURE;
6696 6695 struct scsi_extended_sense sense_info, *sense;
6697 6696 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6698 6697 fpkt->pkt_ulp_private;
6699 6698 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
6700 6699 struct fcp_port *pptr = ptgt->tgt_port;
6701 6700
6702 6701 ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6703 6702
6704 6703 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6705 6704 /*
6706 6705 * SCSI-II Reserve Release support. Some older FC drives return
6707 6706 * Reservation conflict for Report Luns command.
6708 6707 */
6709 6708 if (icmd->ipkt_nodma) {
6710 6709 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6711 6710 rsp->fcp_u.fcp_status.sense_len_set = 0;
6712 6711 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6713 6712 } else {
6714 6713 fcp_rsp_t new_resp;
6715 6714
6716 6715 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6717 6716 fpkt->pkt_resp_acc, sizeof (new_resp));
6718 6717
6719 6718 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6720 6719 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6721 6720 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6722 6721
6723 6722 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6724 6723 fpkt->pkt_resp_acc, sizeof (new_resp));
6725 6724 }
6726 6725
6727 6726 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6728 6727 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6729 6728
6730 6729 return (DDI_SUCCESS);
6731 6730 }
6732 6731
6733 6732 sense = &sense_info;
6734 6733 if (!rsp->fcp_u.fcp_status.sense_len_set) {
6735 6734 /* no need to continue if sense length is not set */
6736 6735 return (rval);
6737 6736 }
6738 6737
6739 6738 /* casting 64-bit integer to 8-bit */
6740 6739 rqlen = (uchar_t)min(rsp->fcp_sense_len,
6741 6740 sizeof (struct scsi_extended_sense));
6742 6741
6743 6742 if (rqlen < 14) {
6744 6743 /* no need to continue if request length isn't long enough */
6745 6744 return (rval);
6746 6745 }
6747 6746
6748 6747 if (icmd->ipkt_nodma) {
6749 6748 /*
6750 6749 * We can safely use fcp_response_len here since the
6751 6750 * only path that calls fcp_check_reportlun,
6752 6751 * fcp_scsi_callback, has already called
6753 6752 * fcp_validate_fcp_response.
6754 6753 */
6755 6754 sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6756 6755 sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6757 6756 } else {
6758 6757 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6759 6758 rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6760 6759 sizeof (struct scsi_extended_sense));
6761 6760 }
6762 6761
6763 6762 if (!FCP_SENSE_NO_LUN(sense)) {
6764 6763 mutex_enter(&ptgt->tgt_mutex);
6765 6764 /* clear the flag if any */
6766 6765 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6767 6766 mutex_exit(&ptgt->tgt_mutex);
6768 6767 }
6769 6768
6770 6769 if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6771 6770 (sense->es_add_code == 0x20)) {
6772 6771 if (icmd->ipkt_nodma) {
6773 6772 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6774 6773 rsp->fcp_u.fcp_status.sense_len_set = 0;
6775 6774 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6776 6775 } else {
6777 6776 fcp_rsp_t new_resp;
6778 6777
6779 6778 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6780 6779 fpkt->pkt_resp_acc, sizeof (new_resp));
6781 6780
6782 6781 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6783 6782 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6784 6783 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6785 6784
6786 6785 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6787 6786 fpkt->pkt_resp_acc, sizeof (new_resp));
6788 6787 }
6789 6788
6790 6789 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6791 6790 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6792 6791
6793 6792 return (DDI_SUCCESS);
6794 6793 }
6795 6794
6796 6795 /*
6797 6796 * This is for the STK library which returns a check condition,
6798 6797 * to indicate device is not ready, manual assistance needed.
6799 6798 * This is to a report lun command when the door is open.
6800 6799 */
6801 6800 if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6802 6801 if (icmd->ipkt_nodma) {
6803 6802 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6804 6803 rsp->fcp_u.fcp_status.sense_len_set = 0;
6805 6804 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6806 6805 } else {
6807 6806 fcp_rsp_t new_resp;
6808 6807
6809 6808 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6810 6809 fpkt->pkt_resp_acc, sizeof (new_resp));
6811 6810
6812 6811 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6813 6812 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6814 6813 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6815 6814
6816 6815 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6817 6816 fpkt->pkt_resp_acc, sizeof (new_resp));
6818 6817 }
6819 6818
6820 6819 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6821 6820 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6822 6821
6823 6822 return (DDI_SUCCESS);
6824 6823 }
6825 6824
6826 6825 if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6827 6826 (FCP_SENSE_NO_LUN(sense))) {
6828 6827 mutex_enter(&ptgt->tgt_mutex);
6829 6828 if ((FCP_SENSE_NO_LUN(sense)) &&
6830 6829 (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6831 6830 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6832 6831 mutex_exit(&ptgt->tgt_mutex);
6833 6832 /*
6834 6833 * reconfig was triggred by ILLEGAL REQUEST but
6835 6834 * got ILLEGAL REQUEST again
6836 6835 */
6837 6836 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6838 6837 fcp_trace, FCP_BUF_LEVEL_3, 0,
6839 6838 "!FCP: Unable to obtain Report Lun data"
6840 6839 " target=%x", ptgt->tgt_d_id);
6841 6840 } else {
6842 6841 if (ptgt->tgt_tid == NULL) {
6843 6842 timeout_id_t tid;
6844 6843 /*
6845 6844 * REPORT LUN data has changed. Kick off
6846 6845 * rediscovery
6847 6846 */
6848 6847 tid = timeout(fcp_reconfigure_luns,
6849 6848 (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6850 6849
6851 6850 ptgt->tgt_tid = tid;
6852 6851 ptgt->tgt_state |= FCP_TGT_BUSY;
6853 6852 }
6854 6853 if (FCP_SENSE_NO_LUN(sense)) {
6855 6854 ptgt->tgt_state |= FCP_TGT_ILLREQ;
6856 6855 }
6857 6856 mutex_exit(&ptgt->tgt_mutex);
6858 6857 if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6859 6858 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6860 6859 fcp_trace, FCP_BUF_LEVEL_3, 0,
6861 6860 "!FCP:Report Lun Has Changed"
6862 6861 " target=%x", ptgt->tgt_d_id);
6863 6862 } else if (FCP_SENSE_NO_LUN(sense)) {
6864 6863 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6865 6864 fcp_trace, FCP_BUF_LEVEL_3, 0,
6866 6865 "!FCP:LU Not Supported"
6867 6866 " target=%x", ptgt->tgt_d_id);
6868 6867 }
6869 6868 }
6870 6869 rval = DDI_SUCCESS;
6871 6870 }
6872 6871
6873 6872 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6874 6873 fcp_trace, FCP_BUF_LEVEL_5, 0,
6875 6874 "D_ID=%x, sense=%x, status=%x",
6876 6875 fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6877 6876 rsp->fcp_u.fcp_status.scsi_status);
6878 6877
6879 6878 return (rval);
6880 6879 }
6881 6880
6882 6881 /*
6883 6882 * Function: fcp_scsi_callback
6884 6883 *
6885 6884 * Description: This is the callback routine set by fcp_send_scsi() after
6886 6885 * it calls fcp_icmd_alloc(). The SCSI command completed here
6887 6886 * and autogenerated by FCP are: REPORT_LUN, INQUIRY and
6888 6887 * INQUIRY_PAGE83.
6889 6888 *
6890 6889 * Argument: *fpkt FC packet used to convey the command
6891 6890 *
6892 6891 * Return Value: None
6893 6892 */
6894 6893 static void
6895 6894 fcp_scsi_callback(fc_packet_t *fpkt)
6896 6895 {
6897 6896 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6898 6897 fpkt->pkt_ulp_private;
6899 6898 struct fcp_rsp_info fcp_rsp_err, *bep;
6900 6899 struct fcp_port *pptr;
6901 6900 struct fcp_tgt *ptgt;
6902 6901 struct fcp_lun *plun;
6903 6902 struct fcp_rsp response, *rsp;
6904 6903
6905 6904 ptgt = icmd->ipkt_tgt;
6906 6905 pptr = ptgt->tgt_port;
6907 6906 plun = icmd->ipkt_lun;
6908 6907
6909 6908 if (icmd->ipkt_nodma) {
6910 6909 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6911 6910 } else {
6912 6911 rsp = &response;
6913 6912 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6914 6913 sizeof (struct fcp_rsp));
6915 6914 }
6916 6915
6917 6916 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6918 6917 fcp_trace, FCP_BUF_LEVEL_2, 0,
6919 6918 "SCSI callback state=0x%x for %x, op_code=0x%x, "
6920 6919 "status=%x, lun num=%x",
6921 6920 fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6922 6921 rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6923 6922
6924 6923 /*
6925 6924 * Pre-init LUN GUID with NWWN if it is not a device that
6926 6925 * supports multiple luns and we know it's not page83
6927 6926 * compliant. Although using a NWWN is not lun unique,
6928 6927 * we will be fine since there is only one lun behind the taget
6929 6928 * in this case.
6930 6929 */
6931 6930 if ((plun->lun_guid_size == 0) &&
6932 6931 (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6933 6932 (fcp_symmetric_device_probe(plun) == 0)) {
6934 6933
6935 6934 char ascii_wwn[FC_WWN_SIZE*2+1];
6936 6935 fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6937 6936 (void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6938 6937 }
6939 6938
6940 6939 /*
6941 6940 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6942 6941 * when thay have more data than what is asked in CDB. An overrun
6943 6942 * is really when FCP_DL is smaller than the data length in CDB.
6944 6943 * In the case here we know that REPORT LUN command we formed within
6945 6944 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6946 6945 * behavior. In reality this is FC_SUCCESS.
6947 6946 */
6948 6947 if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6949 6948 (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6950 6949 (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6951 6950 fpkt->pkt_state = FC_PKT_SUCCESS;
6952 6951 }
6953 6952
6954 6953 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6955 6954 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6956 6955 fcp_trace, FCP_BUF_LEVEL_2, 0,
6957 6956 "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6958 6957 ptgt->tgt_d_id);
6959 6958
6960 6959 if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6961 6960 /*
6962 6961 * Inquiry VPD page command on A5K SES devices would
6963 6962 * result in data CRC errors.
6964 6963 */
6965 6964 if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6966 6965 (void) fcp_handle_page83(fpkt, icmd, 1);
6967 6966 return;
6968 6967 }
6969 6968 }
6970 6969 if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6971 6970 FCP_MUST_RETRY(fpkt)) {
6972 6971 fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6973 6972 fcp_retry_scsi_cmd(fpkt);
6974 6973 return;
6975 6974 }
6976 6975
6977 6976 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6978 6977 FCP_TGT_TRACE_20);
6979 6978
6980 6979 mutex_enter(&pptr->port_mutex);
6981 6980 mutex_enter(&ptgt->tgt_mutex);
6982 6981 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6983 6982 mutex_exit(&ptgt->tgt_mutex);
6984 6983 mutex_exit(&pptr->port_mutex);
6985 6984 fcp_print_error(fpkt);
6986 6985 } else {
6987 6986 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6988 6987 fcp_trace, FCP_BUF_LEVEL_2, 0,
6989 6988 "fcp_scsi_callback,1: state change occured"
6990 6989 " for D_ID=0x%x", ptgt->tgt_d_id);
6991 6990 mutex_exit(&ptgt->tgt_mutex);
6992 6991 mutex_exit(&pptr->port_mutex);
6993 6992 }
6994 6993 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6995 6994 icmd->ipkt_change_cnt, icmd->ipkt_cause);
6996 6995 fcp_icmd_free(pptr, icmd);
6997 6996 return;
6998 6997 }
6999 6998
7000 6999 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
7001 7000
7002 7001 mutex_enter(&pptr->port_mutex);
7003 7002 mutex_enter(&ptgt->tgt_mutex);
7004 7003 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7005 7004 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7006 7005 fcp_trace, FCP_BUF_LEVEL_2, 0,
7007 7006 "fcp_scsi_callback,2: state change occured"
7008 7007 " for D_ID=0x%x", ptgt->tgt_d_id);
7009 7008 mutex_exit(&ptgt->tgt_mutex);
7010 7009 mutex_exit(&pptr->port_mutex);
7011 7010 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7012 7011 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7013 7012 fcp_icmd_free(pptr, icmd);
7014 7013 return;
7015 7014 }
7016 7015 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7017 7016
7018 7017 mutex_exit(&ptgt->tgt_mutex);
7019 7018 mutex_exit(&pptr->port_mutex);
7020 7019
7021 7020 if (icmd->ipkt_nodma) {
7022 7021 bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
7023 7022 sizeof (struct fcp_rsp));
7024 7023 } else {
7025 7024 bep = &fcp_rsp_err;
7026 7025 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
7027 7026 fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
7028 7027 }
7029 7028
7030 7029 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
7031 7030 fcp_retry_scsi_cmd(fpkt);
7032 7031 return;
7033 7032 }
7034 7033
7035 7034 if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
7036 7035 FCP_NO_FAILURE) {
7037 7036 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7038 7037 fcp_trace, FCP_BUF_LEVEL_2, 0,
7039 7038 "rsp_code=0x%x, rsp_len_set=0x%x",
7040 7039 bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
7041 7040 fcp_retry_scsi_cmd(fpkt);
7042 7041 return;
7043 7042 }
7044 7043
7045 7044 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
7046 7045 rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
7047 7046 fcp_queue_ipkt(pptr, fpkt);
7048 7047 return;
7049 7048 }
7050 7049
7051 7050 /*
7052 7051 * Devices that do not support INQUIRY_PAGE83, return check condition
7053 7052 * with illegal request as per SCSI spec.
7054 7053 * Crossbridge is one such device and Daktari's SES node is another.
7055 7054 * We want to ideally enumerate these devices as a non-mpxio devices.
7056 7055 * SES nodes (Daktari only currently) are an exception to this.
7057 7056 */
7058 7057 if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
7059 7058 (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
7060 7059
7061 7060 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7062 7061 fcp_trace, FCP_BUF_LEVEL_3, 0,
7063 7062 "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
7064 7063 "check condition. May enumerate as non-mpxio device",
7065 7064 ptgt->tgt_d_id, plun->lun_type);
7066 7065
7067 7066 /*
7068 7067 * If we let Daktari's SES be enumerated as a non-mpxio
7069 7068 * device, there will be a discrepency in that the other
7070 7069 * internal FC disks will get enumerated as mpxio devices.
7071 7070 * Applications like luxadm expect this to be consistent.
7072 7071 *
7073 7072 * So, we put in a hack here to check if this is an SES device
7074 7073 * and handle it here.
7075 7074 */
7076 7075 if (plun->lun_type == DTYPE_ESI) {
7077 7076 /*
7078 7077 * Since, pkt_state is actually FC_PKT_SUCCESS
7079 7078 * at this stage, we fake a failure here so that
7080 7079 * fcp_handle_page83 will create a device path using
7081 7080 * the WWN instead of the GUID which is not there anyway
7082 7081 */
7083 7082 fpkt->pkt_state = FC_PKT_LOCAL_RJT;
7084 7083 (void) fcp_handle_page83(fpkt, icmd, 1);
7085 7084 return;
7086 7085 }
7087 7086
7088 7087 mutex_enter(&ptgt->tgt_mutex);
7089 7088 plun->lun_state &= ~(FCP_LUN_OFFLINE |
7090 7089 FCP_LUN_MARK | FCP_LUN_BUSY);
7091 7090 mutex_exit(&ptgt->tgt_mutex);
7092 7091
7093 7092 (void) fcp_call_finish_init(pptr, ptgt,
7094 7093 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7095 7094 icmd->ipkt_cause);
7096 7095 fcp_icmd_free(pptr, icmd);
7097 7096 return;
7098 7097 }
7099 7098
7100 7099 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7101 7100 int rval = DDI_FAILURE;
7102 7101
7103 7102 /*
7104 7103 * handle cases where report lun isn't supported
7105 7104 * by faking up our own REPORT_LUN response or
7106 7105 * UNIT ATTENTION
7107 7106 */
7108 7107 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7109 7108 rval = fcp_check_reportlun(rsp, fpkt);
7110 7109
7111 7110 /*
7112 7111 * fcp_check_reportlun might have modified the
7113 7112 * FCP response. Copy it in again to get an updated
7114 7113 * FCP response
7115 7114 */
7116 7115 if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
7117 7116 rsp = &response;
7118 7117
7119 7118 FCP_CP_IN(fpkt->pkt_resp, rsp,
7120 7119 fpkt->pkt_resp_acc,
7121 7120 sizeof (struct fcp_rsp));
7122 7121 }
7123 7122 }
7124 7123
7125 7124 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7126 7125 if (rval == DDI_SUCCESS) {
7127 7126 (void) fcp_call_finish_init(pptr, ptgt,
7128 7127 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7129 7128 icmd->ipkt_cause);
7130 7129 fcp_icmd_free(pptr, icmd);
7131 7130 } else {
7132 7131 fcp_retry_scsi_cmd(fpkt);
7133 7132 }
7134 7133
7135 7134 return;
7136 7135 }
7137 7136 } else {
7138 7137 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7139 7138 mutex_enter(&ptgt->tgt_mutex);
7140 7139 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7141 7140 mutex_exit(&ptgt->tgt_mutex);
7142 7141 }
7143 7142 }
7144 7143
7145 7144 ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7146 7145 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
7147 7146 (void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0,
7148 7147 DDI_DMA_SYNC_FORCPU);
7149 7148 }
7150 7149
7151 7150 switch (icmd->ipkt_opcode) {
7152 7151 case SCMD_INQUIRY:
7153 7152 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7154 7153 fcp_handle_inquiry(fpkt, icmd);
7155 7154 break;
7156 7155
7157 7156 case SCMD_REPORT_LUN:
7158 7157 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7159 7158 FCP_TGT_TRACE_22);
7160 7159 fcp_handle_reportlun(fpkt, icmd);
7161 7160 break;
7162 7161
7163 7162 case SCMD_INQUIRY_PAGE83:
7164 7163 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7165 7164 (void) fcp_handle_page83(fpkt, icmd, 0);
7166 7165 break;
7167 7166
7168 7167 default:
7169 7168 fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7170 7169 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7171 7170 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7172 7171 fcp_icmd_free(pptr, icmd);
7173 7172 break;
7174 7173 }
7175 7174 }
7176 7175
7177 7176
7178 7177 static void
7179 7178 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7180 7179 {
7181 7180 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
7182 7181 fpkt->pkt_ulp_private;
7183 7182 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
7184 7183 struct fcp_port *pptr = ptgt->tgt_port;
7185 7184
7186 7185 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7187 7186 fcp_is_retryable(icmd)) {
7188 7187 mutex_enter(&pptr->port_mutex);
7189 7188 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7190 7189 mutex_exit(&pptr->port_mutex);
7191 7190 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7192 7191 fcp_trace, FCP_BUF_LEVEL_3, 0,
7193 7192 "Retrying %s to %x; state=%x, reason=%x",
7194 7193 (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7195 7194 "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7196 7195 fpkt->pkt_state, fpkt->pkt_reason);
7197 7196
7198 7197 fcp_queue_ipkt(pptr, fpkt);
7199 7198 } else {
7200 7199 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7201 7200 fcp_trace, FCP_BUF_LEVEL_3, 0,
7202 7201 "fcp_retry_scsi_cmd,1: state change occured"
7203 7202 " for D_ID=0x%x", ptgt->tgt_d_id);
7204 7203 mutex_exit(&pptr->port_mutex);
7205 7204 (void) fcp_call_finish_init(pptr, ptgt,
7206 7205 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7207 7206 icmd->ipkt_cause);
7208 7207 fcp_icmd_free(pptr, icmd);
7209 7208 }
7210 7209 } else {
7211 7210 fcp_print_error(fpkt);
7212 7211 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7213 7212 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7214 7213 fcp_icmd_free(pptr, icmd);
7215 7214 }
7216 7215 }
7217 7216
7218 7217 /*
7219 7218 * Function: fcp_handle_page83
7220 7219 *
7221 7220 * Description: Treats the response to INQUIRY_PAGE83.
7222 7221 *
7223 7222 * Argument: *fpkt FC packet used to convey the command.
7224 7223 * *icmd Original fcp_ipkt structure.
7225 7224 * ignore_page83_data
7226 7225 * if it's 1, that means it's a special devices's
7227 7226 * page83 response, it should be enumerated under mpxio
7228 7227 *
7229 7228 * Return Value: None
7230 7229 */
7231 7230 static void
7232 7231 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7233 7232 int ignore_page83_data)
7234 7233 {
7235 7234 struct fcp_port *pptr;
7236 7235 struct fcp_lun *plun;
7237 7236 struct fcp_tgt *ptgt;
7238 7237 uchar_t dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7239 7238 int fail = 0;
7240 7239 ddi_devid_t devid;
7241 7240 char *guid = NULL;
7242 7241 int ret;
7243 7242
7244 7243 ASSERT(icmd != NULL && fpkt != NULL);
7245 7244
7246 7245 pptr = icmd->ipkt_port;
7247 7246 ptgt = icmd->ipkt_tgt;
7248 7247 plun = icmd->ipkt_lun;
7249 7248
7250 7249 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7251 7250 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7252 7251
7253 7252 FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7254 7253 SCMD_MAX_INQUIRY_PAGE83_SIZE);
7255 7254
7256 7255 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7257 7256 fcp_trace, FCP_BUF_LEVEL_5, 0,
7258 7257 "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7259 7258 "dtype=0x%x, lun num=%x",
7260 7259 pptr->port_instance, ptgt->tgt_d_id,
7261 7260 dev_id_page[0], plun->lun_num);
7262 7261
7263 7262 ret = ddi_devid_scsi_encode(
7264 7263 DEVID_SCSI_ENCODE_VERSION_LATEST,
7265 7264 NULL, /* driver name */
7266 7265 (unsigned char *) &plun->lun_inq, /* standard inquiry */
7267 7266 sizeof (plun->lun_inq), /* size of standard inquiry */
7268 7267 NULL, /* page 80 data */
7269 7268 0, /* page 80 len */
7270 7269 dev_id_page, /* page 83 data */
7271 7270 SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7272 7271 &devid);
7273 7272
7274 7273 if (ret == DDI_SUCCESS) {
7275 7274
7276 7275 guid = ddi_devid_to_guid(devid);
7277 7276
7278 7277 if (guid) {
7279 7278 /*
7280 7279 * Check our current guid. If it's non null
7281 7280 * and it has changed, we need to copy it into
7282 7281 * lun_old_guid since we might still need it.
7283 7282 */
7284 7283 if (plun->lun_guid &&
7285 7284 strcmp(guid, plun->lun_guid)) {
7286 7285 unsigned int len;
7287 7286
7288 7287 /*
7289 7288 * If the guid of the LUN changes,
7290 7289 * reconfiguration should be triggered
7291 7290 * to reflect the changes.
7292 7291 * i.e. we should offline the LUN with
7293 7292 * the old guid, and online the LUN with
7294 7293 * the new guid.
7295 7294 */
7296 7295 plun->lun_state |= FCP_LUN_CHANGED;
7297 7296
7298 7297 if (plun->lun_old_guid) {
7299 7298 kmem_free(plun->lun_old_guid,
7300 7299 plun->lun_old_guid_size);
7301 7300 }
7302 7301
7303 7302 len = plun->lun_guid_size;
7304 7303 plun->lun_old_guid_size = len;
7305 7304
7306 7305 plun->lun_old_guid = kmem_zalloc(len,
7307 7306 KM_NOSLEEP);
7308 7307
7309 7308 if (plun->lun_old_guid) {
7310 7309 /*
7311 7310 * The alloc was successful then
7312 7311 * let's do the copy.
7313 7312 */
7314 7313 bcopy(plun->lun_guid,
7315 7314 plun->lun_old_guid, len);
7316 7315 } else {
7317 7316 fail = 1;
7318 7317 plun->lun_old_guid_size = 0;
7319 7318 }
7320 7319 }
7321 7320 if (!fail) {
7322 7321 if (fcp_copy_guid_2_lun_block(
7323 7322 plun, guid)) {
7324 7323 fail = 1;
7325 7324 }
7326 7325 }
7327 7326 ddi_devid_free_guid(guid);
7328 7327
7329 7328 } else {
7330 7329 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7331 7330 fcp_trace, FCP_BUF_LEVEL_2, 0,
7332 7331 "fcp_handle_page83: unable to create "
7333 7332 "GUID");
7334 7333
7335 7334 /* couldn't create good guid from devid */
7336 7335 fail = 1;
7337 7336 }
7338 7337 ddi_devid_free(devid);
7339 7338
7340 7339 } else if (ret == DDI_NOT_WELL_FORMED) {
7341 7340 /* NULL filled data for page 83 */
7342 7341 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7343 7342 fcp_trace, FCP_BUF_LEVEL_2, 0,
7344 7343 "fcp_handle_page83: retry GUID");
7345 7344
7346 7345 icmd->ipkt_retries = 0;
7347 7346 fcp_retry_scsi_cmd(fpkt);
7348 7347 return;
7349 7348 } else {
7350 7349 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7351 7350 fcp_trace, FCP_BUF_LEVEL_2, 0,
7352 7351 "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7353 7352 ret);
7354 7353 /*
7355 7354 * Since the page83 validation
7356 7355 * introduced late, we are being
7357 7356 * tolerant to the existing devices
7358 7357 * that already found to be working
7359 7358 * under mpxio, like A5200's SES device,
7360 7359 * its page83 response will not be standard-compliant,
7361 7360 * but we still want it to be enumerated under mpxio.
7362 7361 */
7363 7362 if (fcp_symmetric_device_probe(plun) != 0) {
7364 7363 fail = 1;
7365 7364 }
7366 7365 }
7367 7366
7368 7367 } else {
7369 7368 /* bad packet state */
7370 7369 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7371 7370
7372 7371 /*
7373 7372 * For some special devices (A5K SES and Daktari's SES devices),
7374 7373 * they should be enumerated under mpxio
7375 7374 * or "luxadm dis" will fail
7376 7375 */
7377 7376 if (ignore_page83_data) {
7378 7377 fail = 0;
7379 7378 } else {
7380 7379 fail = 1;
7381 7380 }
7382 7381 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7383 7382 fcp_trace, FCP_BUF_LEVEL_2, 0,
7384 7383 "!Devid page cmd failed. "
7385 7384 "fpkt_state: %x fpkt_reason: %x",
7386 7385 "ignore_page83: %d",
7387 7386 fpkt->pkt_state, fpkt->pkt_reason,
7388 7387 ignore_page83_data);
7389 7388 }
7390 7389
7391 7390 mutex_enter(&pptr->port_mutex);
7392 7391 mutex_enter(&plun->lun_mutex);
7393 7392 /*
7394 7393 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7395 7394 * mismatch between lun_cip and lun_mpxio.
7396 7395 */
7397 7396 if (plun->lun_cip == NULL) {
7398 7397 /*
7399 7398 * If we don't have a guid for this lun it's because we were
7400 7399 * unable to glean one from the page 83 response. Set the
7401 7400 * control flag to 0 here to make sure that we don't attempt to
7402 7401 * enumerate it under mpxio.
7403 7402 */
7404 7403 if (fail || pptr->port_mpxio == 0) {
7405 7404 plun->lun_mpxio = 0;
7406 7405 } else {
7407 7406 plun->lun_mpxio = 1;
7408 7407 }
7409 7408 }
7410 7409 mutex_exit(&plun->lun_mutex);
7411 7410 mutex_exit(&pptr->port_mutex);
7412 7411
7413 7412 mutex_enter(&ptgt->tgt_mutex);
7414 7413 plun->lun_state &=
7415 7414 ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7416 7415 mutex_exit(&ptgt->tgt_mutex);
7417 7416
7418 7417 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7419 7418 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7420 7419
7421 7420 fcp_icmd_free(pptr, icmd);
7422 7421 }
7423 7422
7424 7423 /*
7425 7424 * Function: fcp_handle_inquiry
7426 7425 *
7427 7426 * Description: Called by fcp_scsi_callback to handle the response to an
7428 7427 * INQUIRY request.
7429 7428 *
7430 7429 * Argument: *fpkt FC packet used to convey the command.
7431 7430 * *icmd Original fcp_ipkt structure.
7432 7431 *
7433 7432 * Return Value: None
7434 7433 */
7435 7434 static void
7436 7435 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7437 7436 {
7438 7437 struct fcp_port *pptr;
7439 7438 struct fcp_lun *plun;
7440 7439 struct fcp_tgt *ptgt;
7441 7440 uchar_t dtype;
7442 7441 uchar_t pqual;
7443 7442 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7444 7443
7445 7444 ASSERT(icmd != NULL && fpkt != NULL);
7446 7445
7447 7446 pptr = icmd->ipkt_port;
7448 7447 ptgt = icmd->ipkt_tgt;
7449 7448 plun = icmd->ipkt_lun;
7450 7449
7451 7450 FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7452 7451 sizeof (struct scsi_inquiry));
7453 7452
7454 7453 dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7455 7454 pqual = plun->lun_inq.inq_dtype >> 5;
7456 7455
7457 7456 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7458 7457 fcp_trace, FCP_BUF_LEVEL_5, 0,
7459 7458 "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7460 7459 "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7461 7460 plun->lun_num, dtype, pqual);
7462 7461
7463 7462 if (pqual != 0) {
7464 7463 /*
7465 7464 * Non-zero peripheral qualifier
7466 7465 */
7467 7466 fcp_log(CE_CONT, pptr->port_dip,
7468 7467 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7469 7468 "Device type=0x%x Peripheral qual=0x%x\n",
7470 7469 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7471 7470
7472 7471 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7473 7472 fcp_trace, FCP_BUF_LEVEL_5, 0,
7474 7473 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7475 7474 "Device type=0x%x Peripheral qual=0x%x\n",
7476 7475 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7477 7476
7478 7477 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7479 7478
7480 7479 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7481 7480 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7482 7481 fcp_icmd_free(pptr, icmd);
7483 7482 return;
7484 7483 }
7485 7484
7486 7485 /*
7487 7486 * If the device is already initialized, check the dtype
7488 7487 * for a change. If it has changed then update the flags
7489 7488 * so the create_luns will offline the old device and
7490 7489 * create the new device. Refer to bug: 4764752
7491 7490 */
7492 7491 if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7493 7492 plun->lun_state |= FCP_LUN_CHANGED;
7494 7493 }
7495 7494 plun->lun_type = plun->lun_inq.inq_dtype;
7496 7495
7497 7496 /*
7498 7497 * This code is setting/initializing the throttling in the FCA
7499 7498 * driver.
7500 7499 */
7501 7500 mutex_enter(&pptr->port_mutex);
7502 7501 if (!pptr->port_notify) {
7503 7502 if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7504 7503 uint32_t cmd = 0;
7505 7504 cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7506 7505 ((cmd & 0xFFFFFF00 >> 8) |
7507 7506 FCP_SVE_THROTTLE << 8));
7508 7507 pptr->port_notify = 1;
7509 7508 mutex_exit(&pptr->port_mutex);
7510 7509 (void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7511 7510 mutex_enter(&pptr->port_mutex);
7512 7511 }
7513 7512 }
7514 7513
7515 7514 if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7516 7515 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7517 7516 fcp_trace, FCP_BUF_LEVEL_2, 0,
7518 7517 "fcp_handle_inquiry,1:state change occured"
7519 7518 " for D_ID=0x%x", ptgt->tgt_d_id);
7520 7519 mutex_exit(&pptr->port_mutex);
7521 7520
7522 7521 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7523 7522 (void) fcp_call_finish_init(pptr, ptgt,
7524 7523 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7525 7524 icmd->ipkt_cause);
7526 7525 fcp_icmd_free(pptr, icmd);
7527 7526 return;
7528 7527 }
7529 7528 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7530 7529 mutex_exit(&pptr->port_mutex);
7531 7530
7532 7531 /* Retrieve the rscn count (if a valid one exists) */
7533 7532 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7534 7533 rscn_count = ((fc_ulp_rscn_info_t *)
7535 7534 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7536 7535 } else {
7537 7536 rscn_count = FC_INVALID_RSCN_COUNT;
7538 7537 }
7539 7538
7540 7539 if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7541 7540 SCMD_MAX_INQUIRY_PAGE83_SIZE,
7542 7541 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7543 7542 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7544 7543 fcp_log(CE_WARN, NULL, "!failed to send page 83");
7545 7544 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7546 7545 (void) fcp_call_finish_init(pptr, ptgt,
7547 7546 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7548 7547 icmd->ipkt_cause);
7549 7548 }
7550 7549
7551 7550 /*
7552 7551 * Read Inquiry VPD Page 0x83 to uniquely
7553 7552 * identify this logical unit.
7554 7553 */
7555 7554 fcp_icmd_free(pptr, icmd);
7556 7555 }
7557 7556
7558 7557 /*
7559 7558 * Function: fcp_handle_reportlun
7560 7559 *
7561 7560 * Description: Called by fcp_scsi_callback to handle the response to a
7562 7561 * REPORT_LUN request.
7563 7562 *
7564 7563 * Argument: *fpkt FC packet used to convey the command.
7565 7564 * *icmd Original fcp_ipkt structure.
7566 7565 *
7567 7566 * Return Value: None
7568 7567 */
7569 7568 static void
7570 7569 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7571 7570 {
7572 7571 int i;
7573 7572 int nluns_claimed;
7574 7573 int nluns_bufmax;
7575 7574 int len;
7576 7575 uint16_t lun_num;
7577 7576 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7578 7577 struct fcp_port *pptr;
7579 7578 struct fcp_tgt *ptgt;
7580 7579 struct fcp_lun *plun;
7581 7580 struct fcp_reportlun_resp *report_lun;
7582 7581
7583 7582 pptr = icmd->ipkt_port;
7584 7583 ptgt = icmd->ipkt_tgt;
7585 7584 len = fpkt->pkt_datalen;
7586 7585
7587 7586 if ((len < FCP_LUN_HEADER) ||
7588 7587 ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7589 7588 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7590 7589 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7591 7590 fcp_icmd_free(pptr, icmd);
7592 7591 return;
7593 7592 }
7594 7593
7595 7594 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7596 7595 fpkt->pkt_datalen);
7597 7596
7598 7597 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7599 7598 fcp_trace, FCP_BUF_LEVEL_5, 0,
7600 7599 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7601 7600 pptr->port_instance, ptgt->tgt_d_id);
7602 7601
7603 7602 /*
7604 7603 * Get the number of luns (which is supplied as LUNS * 8) the
7605 7604 * device claims it has.
7606 7605 */
7607 7606 nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7608 7607
7609 7608 /*
7610 7609 * Get the maximum number of luns the buffer submitted can hold.
7611 7610 */
7612 7611 nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7613 7612
7614 7613 /*
7615 7614 * Due to limitations of certain hardware, we support only 16 bit LUNs
7616 7615 */
7617 7616 if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7618 7617 kmem_free(report_lun, len);
7619 7618
7620 7619 fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7621 7620 " 0x%x number of LUNs for target=%x", nluns_claimed,
7622 7621 ptgt->tgt_d_id);
7623 7622
7624 7623 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7625 7624 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7626 7625 fcp_icmd_free(pptr, icmd);
7627 7626 return;
7628 7627 }
7629 7628
7630 7629 /*
7631 7630 * If there are more LUNs than we have allocated memory for,
7632 7631 * allocate more space and send down yet another report lun if
7633 7632 * the maximum number of attempts hasn't been reached.
7634 7633 */
7635 7634 mutex_enter(&ptgt->tgt_mutex);
7636 7635
7637 7636 if ((nluns_claimed > nluns_bufmax) &&
7638 7637 (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7639 7638
7640 7639 struct fcp_lun *plun;
7641 7640
7642 7641 ptgt->tgt_report_lun_cnt++;
7643 7642 plun = ptgt->tgt_lun;
7644 7643 ASSERT(plun != NULL);
7645 7644 mutex_exit(&ptgt->tgt_mutex);
7646 7645
7647 7646 kmem_free(report_lun, len);
7648 7647
7649 7648 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7650 7649 fcp_trace, FCP_BUF_LEVEL_5, 0,
7651 7650 "!Dynamically discovered %d LUNs for D_ID=%x",
7652 7651 nluns_claimed, ptgt->tgt_d_id);
7653 7652
7654 7653 /* Retrieve the rscn count (if a valid one exists) */
7655 7654 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7656 7655 rscn_count = ((fc_ulp_rscn_info_t *)
7657 7656 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7658 7657 ulp_rscn_count;
7659 7658 } else {
7660 7659 rscn_count = FC_INVALID_RSCN_COUNT;
7661 7660 }
7662 7661
7663 7662 if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7664 7663 FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7665 7664 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7666 7665 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7667 7666 (void) fcp_call_finish_init(pptr, ptgt,
7668 7667 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7669 7668 icmd->ipkt_cause);
7670 7669 }
7671 7670
7672 7671 fcp_icmd_free(pptr, icmd);
7673 7672 return;
7674 7673 }
7675 7674
7676 7675 if (nluns_claimed > nluns_bufmax) {
7677 7676 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7678 7677 fcp_trace, FCP_BUF_LEVEL_5, 0,
7679 7678 "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7680 7679 " Number of LUNs lost=%x",
7681 7680 ptgt->tgt_port_wwn.raw_wwn[0],
7682 7681 ptgt->tgt_port_wwn.raw_wwn[1],
7683 7682 ptgt->tgt_port_wwn.raw_wwn[2],
7684 7683 ptgt->tgt_port_wwn.raw_wwn[3],
7685 7684 ptgt->tgt_port_wwn.raw_wwn[4],
7686 7685 ptgt->tgt_port_wwn.raw_wwn[5],
7687 7686 ptgt->tgt_port_wwn.raw_wwn[6],
7688 7687 ptgt->tgt_port_wwn.raw_wwn[7],
7689 7688 nluns_claimed - nluns_bufmax);
7690 7689
7691 7690 nluns_claimed = nluns_bufmax;
7692 7691 }
7693 7692 ptgt->tgt_lun_cnt = nluns_claimed;
7694 7693
7695 7694 /*
7696 7695 * Identify missing LUNs and print warning messages
7697 7696 */
7698 7697 for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7699 7698 int offline;
7700 7699 int exists = 0;
7701 7700
7702 7701 offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7703 7702
7704 7703 for (i = 0; i < nluns_claimed && exists == 0; i++) {
7705 7704 uchar_t *lun_string;
7706 7705
7707 7706 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7708 7707
7709 7708 switch (lun_string[0] & 0xC0) {
7710 7709 case FCP_LUN_ADDRESSING:
7711 7710 case FCP_PD_ADDRESSING:
7712 7711 case FCP_VOLUME_ADDRESSING:
7713 7712 lun_num = ((lun_string[0] & 0x3F) << 8) |
7714 7713 lun_string[1];
7715 7714 if (plun->lun_num == lun_num) {
7716 7715 exists++;
7717 7716 break;
7718 7717 }
7719 7718 break;
7720 7719
7721 7720 default:
7722 7721 break;
7723 7722 }
7724 7723 }
7725 7724
7726 7725 if (!exists && !offline) {
7727 7726 mutex_exit(&ptgt->tgt_mutex);
7728 7727
7729 7728 mutex_enter(&pptr->port_mutex);
7730 7729 mutex_enter(&ptgt->tgt_mutex);
7731 7730 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7732 7731 /*
7733 7732 * set disappear flag when device was connected
7734 7733 */
7735 7734 if (!(plun->lun_state &
7736 7735 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7737 7736 plun->lun_state |= FCP_LUN_DISAPPEARED;
7738 7737 }
7739 7738 mutex_exit(&ptgt->tgt_mutex);
7740 7739 mutex_exit(&pptr->port_mutex);
7741 7740 if (!(plun->lun_state &
7742 7741 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7743 7742 fcp_log(CE_NOTE, pptr->port_dip,
7744 7743 "!Lun=%x for target=%x disappeared",
7745 7744 plun->lun_num, ptgt->tgt_d_id);
7746 7745 }
7747 7746 mutex_enter(&ptgt->tgt_mutex);
7748 7747 } else {
7749 7748 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7750 7749 fcp_trace, FCP_BUF_LEVEL_5, 0,
7751 7750 "fcp_handle_reportlun,1: state change"
7752 7751 " occured for D_ID=0x%x", ptgt->tgt_d_id);
7753 7752 mutex_exit(&ptgt->tgt_mutex);
7754 7753 mutex_exit(&pptr->port_mutex);
7755 7754 kmem_free(report_lun, len);
7756 7755 (void) fcp_call_finish_init(pptr, ptgt,
7757 7756 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7758 7757 icmd->ipkt_cause);
7759 7758 fcp_icmd_free(pptr, icmd);
7760 7759 return;
7761 7760 }
7762 7761 } else if (exists) {
7763 7762 /*
7764 7763 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7765 7764 * actually exists in REPORT_LUN response
7766 7765 */
7767 7766 if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7768 7767 plun->lun_state &=
7769 7768 ~FCP_LUN_DEVICE_NOT_CONNECTED;
7770 7769 }
7771 7770 if (offline || plun->lun_num == 0) {
7772 7771 if (plun->lun_state & FCP_LUN_DISAPPEARED) {
7773 7772 plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7774 7773 mutex_exit(&ptgt->tgt_mutex);
7775 7774 fcp_log(CE_NOTE, pptr->port_dip,
7776 7775 "!Lun=%x for target=%x reappeared",
7777 7776 plun->lun_num, ptgt->tgt_d_id);
7778 7777 mutex_enter(&ptgt->tgt_mutex);
7779 7778 }
7780 7779 }
7781 7780 }
7782 7781 }
7783 7782
7784 7783 ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7785 7784 mutex_exit(&ptgt->tgt_mutex);
7786 7785
7787 7786 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7788 7787 fcp_trace, FCP_BUF_LEVEL_5, 0,
7789 7788 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7790 7789 pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7791 7790
7792 7791 /* scan each lun */
7793 7792 for (i = 0; i < nluns_claimed; i++) {
7794 7793 uchar_t *lun_string;
7795 7794
7796 7795 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7797 7796
7798 7797 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7799 7798 fcp_trace, FCP_BUF_LEVEL_5, 0,
7800 7799 "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7801 7800 " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7802 7801 lun_string[0]);
7803 7802
7804 7803 switch (lun_string[0] & 0xC0) {
7805 7804 case FCP_LUN_ADDRESSING:
7806 7805 case FCP_PD_ADDRESSING:
7807 7806 case FCP_VOLUME_ADDRESSING:
7808 7807 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7809 7808
7810 7809 /* We will skip masked LUNs because of the blacklist. */
7811 7810 if (fcp_lun_blacklist != NULL) {
7812 7811 mutex_enter(&ptgt->tgt_mutex);
7813 7812 if (fcp_should_mask(&ptgt->tgt_port_wwn,
7814 7813 lun_num) == TRUE) {
7815 7814 ptgt->tgt_lun_cnt--;
7816 7815 mutex_exit(&ptgt->tgt_mutex);
7817 7816 break;
7818 7817 }
7819 7818 mutex_exit(&ptgt->tgt_mutex);
7820 7819 }
7821 7820
7822 7821 /* see if this LUN is already allocated */
7823 7822 if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7824 7823 plun = fcp_alloc_lun(ptgt);
7825 7824 if (plun == NULL) {
7826 7825 fcp_log(CE_NOTE, pptr->port_dip,
7827 7826 "!Lun allocation failed"
7828 7827 " target=%x lun=%x",
7829 7828 ptgt->tgt_d_id, lun_num);
7830 7829 break;
7831 7830 }
7832 7831 }
7833 7832
7834 7833 mutex_enter(&plun->lun_tgt->tgt_mutex);
7835 7834 /* convert to LUN */
7836 7835 plun->lun_addr.ent_addr_0 =
7837 7836 BE_16(*(uint16_t *)&(lun_string[0]));
7838 7837 plun->lun_addr.ent_addr_1 =
7839 7838 BE_16(*(uint16_t *)&(lun_string[2]));
7840 7839 plun->lun_addr.ent_addr_2 =
7841 7840 BE_16(*(uint16_t *)&(lun_string[4]));
7842 7841 plun->lun_addr.ent_addr_3 =
7843 7842 BE_16(*(uint16_t *)&(lun_string[6]));
7844 7843
7845 7844 plun->lun_num = lun_num;
7846 7845 plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7847 7846 plun->lun_state &= ~FCP_LUN_OFFLINE;
7848 7847 mutex_exit(&plun->lun_tgt->tgt_mutex);
7849 7848
7850 7849 /* Retrieve the rscn count (if a valid one exists) */
7851 7850 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7852 7851 rscn_count = ((fc_ulp_rscn_info_t *)
7853 7852 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7854 7853 ulp_rscn_count;
7855 7854 } else {
7856 7855 rscn_count = FC_INVALID_RSCN_COUNT;
7857 7856 }
7858 7857
7859 7858 if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7860 7859 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7861 7860 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7862 7861 mutex_enter(&pptr->port_mutex);
7863 7862 mutex_enter(&plun->lun_tgt->tgt_mutex);
7864 7863 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7865 7864 fcp_log(CE_NOTE, pptr->port_dip,
7866 7865 "!failed to send INQUIRY"
7867 7866 " target=%x lun=%x",
7868 7867 ptgt->tgt_d_id, plun->lun_num);
7869 7868 } else {
7870 7869 FCP_TRACE(fcp_logq,
7871 7870 pptr->port_instbuf, fcp_trace,
7872 7871 FCP_BUF_LEVEL_5, 0,
7873 7872 "fcp_handle_reportlun,2: state"
7874 7873 " change occured for D_ID=0x%x",
7875 7874 ptgt->tgt_d_id);
7876 7875 }
7877 7876 mutex_exit(&plun->lun_tgt->tgt_mutex);
7878 7877 mutex_exit(&pptr->port_mutex);
7879 7878 } else {
7880 7879 continue;
7881 7880 }
7882 7881 break;
7883 7882
7884 7883 default:
7885 7884 fcp_log(CE_WARN, NULL,
7886 7885 "!Unsupported LUN Addressing method %x "
7887 7886 "in response to REPORT_LUN", lun_string[0]);
7888 7887 break;
7889 7888 }
7890 7889
7891 7890 /*
7892 7891 * each time through this loop we should decrement
7893 7892 * the tmp_cnt by one -- since we go through this loop
7894 7893 * one time for each LUN, the tmp_cnt should never be <=0
7895 7894 */
7896 7895 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7897 7896 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7898 7897 }
7899 7898
7900 7899 if (i == 0) {
7901 7900 fcp_log(CE_WARN, pptr->port_dip,
7902 7901 "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7903 7902 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7904 7903 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7905 7904 }
7906 7905
7907 7906 kmem_free(report_lun, len);
7908 7907 fcp_icmd_free(pptr, icmd);
7909 7908 }
7910 7909
7911 7910
7912 7911 /*
7913 7912 * called internally to return a LUN given a target and a LUN number
7914 7913 */
7915 7914 static struct fcp_lun *
7916 7915 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7917 7916 {
7918 7917 struct fcp_lun *plun;
7919 7918
7920 7919 mutex_enter(&ptgt->tgt_mutex);
7921 7920 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7922 7921 if (plun->lun_num == lun_num) {
7923 7922 mutex_exit(&ptgt->tgt_mutex);
7924 7923 return (plun);
7925 7924 }
7926 7925 }
7927 7926 mutex_exit(&ptgt->tgt_mutex);
7928 7927
7929 7928 return (NULL);
7930 7929 }
7931 7930
7932 7931
7933 7932 /*
7934 7933 * handle finishing one target for fcp_finish_init
7935 7934 *
7936 7935 * return true (non-zero) if we want finish_init to continue with the
7937 7936 * next target
7938 7937 *
7939 7938 * called with the port mutex held
7940 7939 */
7941 7940 /*ARGSUSED*/
7942 7941 static int
7943 7942 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7944 7943 int link_cnt, int tgt_cnt, int cause)
7945 7944 {
7946 7945 int rval = 1;
7947 7946 ASSERT(pptr != NULL);
7948 7947 ASSERT(ptgt != NULL);
7949 7948
7950 7949 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7951 7950 fcp_trace, FCP_BUF_LEVEL_5, 0,
7952 7951 "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7953 7952 ptgt->tgt_state);
7954 7953
7955 7954 ASSERT(mutex_owned(&pptr->port_mutex));
7956 7955
7957 7956 if ((pptr->port_link_cnt != link_cnt) ||
7958 7957 (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7959 7958 /*
7960 7959 * oh oh -- another link reset or target change
7961 7960 * must have occurred while we are in here
7962 7961 */
7963 7962 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7964 7963
7965 7964 return (0);
7966 7965 } else {
7967 7966 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7968 7967 }
7969 7968
7970 7969 mutex_enter(&ptgt->tgt_mutex);
7971 7970
7972 7971 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7973 7972 /*
7974 7973 * tgt is not offline -- is it marked (i.e. needs
7975 7974 * to be offlined) ??
7976 7975 */
7977 7976 if (ptgt->tgt_state & FCP_TGT_MARK) {
7978 7977 /*
7979 7978 * this target not offline *and*
7980 7979 * marked
7981 7980 */
7982 7981 ptgt->tgt_state &= ~FCP_TGT_MARK;
7983 7982 rval = fcp_offline_target(pptr, ptgt, link_cnt,
7984 7983 tgt_cnt, 0, 0);
7985 7984 } else {
7986 7985 ptgt->tgt_state &= ~FCP_TGT_BUSY;
7987 7986
7988 7987 /* create the LUNs */
7989 7988 if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7990 7989 ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7991 7990 fcp_create_luns(ptgt, link_cnt, tgt_cnt,
7992 7991 cause);
7993 7992 ptgt->tgt_device_created = 1;
7994 7993 } else {
7995 7994 fcp_update_tgt_state(ptgt, FCP_RESET,
7996 7995 FCP_LUN_BUSY);
7997 7996 }
7998 7997 }
7999 7998 }
8000 7999
8001 8000 mutex_exit(&ptgt->tgt_mutex);
8002 8001
8003 8002 return (rval);
8004 8003 }
8005 8004
8006 8005
8007 8006 /*
8008 8007 * this routine is called to finish port initialization
8009 8008 *
8010 8009 * Each port has a "temp" counter -- when a state change happens (e.g.
8011 8010 * port online), the temp count is set to the number of devices in the map.
8012 8011 * Then, as each device gets "discovered", the temp counter is decremented
8013 8012 * by one. When this count reaches zero we know that all of the devices
8014 8013 * in the map have been discovered (or an error has occurred), so we can
8015 8014 * then finish initialization -- which is done by this routine (well, this
8016 8015 * and fcp-finish_tgt())
8017 8016 *
8018 8017 * acquires and releases the global mutex
8019 8018 *
8020 8019 * called with the port mutex owned
8021 8020 */
8022 8021 static void
8023 8022 fcp_finish_init(struct fcp_port *pptr)
8024 8023 {
8025 8024 #ifdef DEBUG
8026 8025 bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
8027 8026 pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
8028 8027 FCP_STACK_DEPTH);
8029 8028 #endif /* DEBUG */
8030 8029
8031 8030 ASSERT(mutex_owned(&pptr->port_mutex));
8032 8031
8033 8032 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8034 8033 fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
8035 8034 " entering; ipkt count=%d", pptr->port_ipkt_cnt);
8036 8035
8037 8036 if ((pptr->port_state & FCP_STATE_ONLINING) &&
8038 8037 !(pptr->port_state & (FCP_STATE_SUSPENDED |
8039 8038 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
8040 8039 pptr->port_state &= ~FCP_STATE_ONLINING;
8041 8040 pptr->port_state |= FCP_STATE_ONLINE;
8042 8041 }
8043 8042
8044 8043 /* Wake up threads waiting on config done */
8045 8044 cv_broadcast(&pptr->port_config_cv);
8046 8045 }
8047 8046
8048 8047
8049 8048 /*
8050 8049 * called from fcp_finish_init to create the LUNs for a target
8051 8050 *
8052 8051 * called with the port mutex owned
8053 8052 */
8054 8053 static void
8055 8054 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
8056 8055 {
8057 8056 struct fcp_lun *plun;
8058 8057 struct fcp_port *pptr;
8059 8058 child_info_t *cip = NULL;
8060 8059
8061 8060 ASSERT(ptgt != NULL);
8062 8061 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8063 8062
8064 8063 pptr = ptgt->tgt_port;
8065 8064
8066 8065 ASSERT(pptr != NULL);
8067 8066
8068 8067 /* scan all LUNs for this target */
8069 8068 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8070 8069 if (plun->lun_state & FCP_LUN_OFFLINE) {
8071 8070 continue;
8072 8071 }
8073 8072
8074 8073 if (plun->lun_state & FCP_LUN_MARK) {
8075 8074 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8076 8075 fcp_trace, FCP_BUF_LEVEL_2, 0,
8077 8076 "fcp_create_luns: offlining marked LUN!");
8078 8077 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
8079 8078 continue;
8080 8079 }
8081 8080
8082 8081 plun->lun_state &= ~FCP_LUN_BUSY;
8083 8082
8084 8083 /*
8085 8084 * There are conditions in which FCP_LUN_INIT flag is cleared
8086 8085 * but we have a valid plun->lun_cip. To cover this case also
8087 8086 * CLEAR_BUSY whenever we have a valid lun_cip.
8088 8087 */
8089 8088 if (plun->lun_mpxio && plun->lun_cip &&
8090 8089 (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
8091 8090 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8092 8091 0, 0))) {
8093 8092 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8094 8093 fcp_trace, FCP_BUF_LEVEL_2, 0,
8095 8094 "fcp_create_luns: enable lun %p failed!",
8096 8095 plun);
8097 8096 }
8098 8097
8099 8098 if (plun->lun_state & FCP_LUN_INIT &&
8100 8099 !(plun->lun_state & FCP_LUN_CHANGED)) {
8101 8100 continue;
8102 8101 }
8103 8102
8104 8103 if (cause == FCP_CAUSE_USER_CREATE) {
8105 8104 continue;
8106 8105 }
8107 8106
8108 8107 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8109 8108 fcp_trace, FCP_BUF_LEVEL_6, 0,
8110 8109 "create_luns: passing ONLINE elem to HP thread");
8111 8110
8112 8111 /*
8113 8112 * If lun has changed, prepare for offlining the old path.
8114 8113 * Do not offline the old path right now, since it may be
8115 8114 * still opened.
8116 8115 */
8117 8116 if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
8118 8117 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8119 8118 }
8120 8119
8121 8120 /* pass an ONLINE element to the hotplug thread */
8122 8121 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8123 8122 link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
8124 8123
8125 8124 /*
8126 8125 * We can not synchronous attach (i.e pass
8127 8126 * NDI_ONLINE_ATTACH) here as we might be
8128 8127 * coming from an interrupt or callback
8129 8128 * thread.
8130 8129 */
8131 8130 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8132 8131 link_cnt, tgt_cnt, 0, 0)) {
8133 8132 fcp_log(CE_CONT, pptr->port_dip,
8134 8133 "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
8135 8134 plun->lun_tgt->tgt_d_id, plun->lun_num);
8136 8135 }
8137 8136 }
8138 8137 }
8139 8138 }
8140 8139
8141 8140
8142 8141 /*
8143 8142 * function to online/offline devices
8144 8143 */
8145 8144 static int
8146 8145 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8147 8146 int online, int lcount, int tcount, int flags)
8148 8147 {
8149 8148 int rval = NDI_FAILURE;
8150 8149 int circ;
8151 8150 child_info_t *ccip;
8152 8151 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8153 8152 int is_mpxio = pptr->port_mpxio;
8154 8153 dev_info_t *cdip, *pdip;
8155 8154 char *devname;
8156 8155
8157 8156 if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8158 8157 /*
8159 8158 * When this event gets serviced, lun_cip and lun_mpxio
8160 8159 * has changed, so it should be invalidated now.
8161 8160 */
8162 8161 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8163 8162 FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8164 8163 "plun: %p, cip: %p, what:%d", plun, cip, online);
8165 8164 return (rval);
8166 8165 }
8167 8166
8168 8167 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8169 8168 fcp_trace, FCP_BUF_LEVEL_2, 0,
8170 8169 "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8171 8170 "flags=%x mpxio=%x\n",
8172 8171 plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8173 8172 plun->lun_mpxio);
8174 8173
8175 8174 /*
8176 8175 * lun_mpxio needs checking here because we can end up in a race
8177 8176 * condition where this task has been dispatched while lun_mpxio is
8178 8177 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8179 8178 * enable MPXIO for the LUN, but was unable to, and hence cleared
8180 8179 * the flag. We rely on the serialization of the tasks here. We return
8181 8180 * NDI_SUCCESS so any callers continue without reporting spurious
8182 8181 * errors, and the still think we're an MPXIO LUN.
8183 8182 */
8184 8183
8185 8184 if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8186 8185 online == FCP_MPXIO_PATH_SET_BUSY) {
8187 8186 if (plun->lun_mpxio) {
8188 8187 rval = fcp_update_mpxio_path(plun, cip, online);
8189 8188 } else {
8190 8189 rval = NDI_SUCCESS;
8191 8190 }
8192 8191 return (rval);
8193 8192 }
8194 8193
8195 8194 /*
8196 8195 * Explicit devfs_clean() due to ndi_devi_offline() not
8197 8196 * executing devfs_clean() if parent lock is held.
8198 8197 */
8199 8198 ASSERT(!servicing_interrupt());
8200 8199 if (online == FCP_OFFLINE) {
8201 8200 if (plun->lun_mpxio == 0) {
8202 8201 if (plun->lun_cip == cip) {
8203 8202 cdip = DIP(plun->lun_cip);
8204 8203 } else {
8205 8204 cdip = DIP(cip);
8206 8205 }
8207 8206 } else if ((plun->lun_cip == cip) && plun->lun_cip) {
8208 8207 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8209 8208 } else if ((plun->lun_cip != cip) && cip) {
8210 8209 /*
8211 8210 * This means a DTYPE/GUID change, we shall get the
8212 8211 * dip of the old cip instead of the current lun_cip.
8213 8212 */
8214 8213 cdip = mdi_pi_get_client(PIP(cip));
8215 8214 }
8216 8215 if (cdip) {
8217 8216 if (i_ddi_devi_attached(cdip)) {
8218 8217 pdip = ddi_get_parent(cdip);
8219 8218 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
8220 8219 ndi_devi_enter(pdip, &circ);
8221 8220 (void) ddi_deviname(cdip, devname);
8222 8221 ndi_devi_exit(pdip, circ);
8223 8222 /*
8224 8223 * Release parent lock before calling
8225 8224 * devfs_clean().
8226 8225 */
8227 8226 rval = devfs_clean(pdip, devname + 1,
8228 8227 DV_CLEAN_FORCE);
8229 8228 kmem_free(devname, MAXNAMELEN + 1);
8230 8229 /*
8231 8230 * Return if devfs_clean() fails for
8232 8231 * non-MPXIO case.
8233 8232 * For MPXIO case, another path could be
8234 8233 * offlined.
8235 8234 */
8236 8235 if (rval && plun->lun_mpxio == 0) {
8237 8236 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8238 8237 fcp_trace, FCP_BUF_LEVEL_3, 0,
8239 8238 "fcp_trigger_lun: devfs_clean "
8240 8239 "failed rval=%x dip=%p",
8241 8240 rval, pdip);
8242 8241 return (NDI_FAILURE);
8243 8242 }
8244 8243 }
8245 8244 }
8246 8245 }
8247 8246
8248 8247 if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8249 8248 return (NDI_FAILURE);
8250 8249 }
8251 8250
8252 8251 if (is_mpxio) {
8253 8252 mdi_devi_enter(pptr->port_dip, &circ);
8254 8253 } else {
8255 8254 ndi_devi_enter(pptr->port_dip, &circ);
8256 8255 }
8257 8256
8258 8257 mutex_enter(&pptr->port_mutex);
8259 8258 mutex_enter(&plun->lun_mutex);
8260 8259
8261 8260 if (online == FCP_ONLINE) {
8262 8261 ccip = fcp_get_cip(plun, cip, lcount, tcount);
8263 8262 if (ccip == NULL) {
8264 8263 goto fail;
8265 8264 }
8266 8265 } else {
8267 8266 if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8268 8267 goto fail;
8269 8268 }
8270 8269 ccip = cip;
8271 8270 }
8272 8271
8273 8272 if (online == FCP_ONLINE) {
8274 8273 rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8275 8274 &circ);
8276 8275 fc_ulp_log_device_event(pptr->port_fp_handle,
8277 8276 FC_ULP_DEVICE_ONLINE);
8278 8277 } else {
8279 8278 rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8280 8279 &circ);
8281 8280 fc_ulp_log_device_event(pptr->port_fp_handle,
8282 8281 FC_ULP_DEVICE_OFFLINE);
8283 8282 }
8284 8283
8285 8284 fail: mutex_exit(&plun->lun_mutex);
8286 8285 mutex_exit(&pptr->port_mutex);
8287 8286
8288 8287 if (is_mpxio) {
8289 8288 mdi_devi_exit(pptr->port_dip, circ);
8290 8289 } else {
8291 8290 ndi_devi_exit(pptr->port_dip, circ);
8292 8291 }
8293 8292
8294 8293 fc_ulp_idle_port(pptr->port_fp_handle);
8295 8294
8296 8295 return (rval);
8297 8296 }
8298 8297
8299 8298
8300 8299 /*
8301 8300 * take a target offline by taking all of its LUNs offline
8302 8301 */
8303 8302 /*ARGSUSED*/
8304 8303 static int
8305 8304 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8306 8305 int link_cnt, int tgt_cnt, int nowait, int flags)
8307 8306 {
8308 8307 struct fcp_tgt_elem *elem;
8309 8308
8310 8309 ASSERT(mutex_owned(&pptr->port_mutex));
8311 8310 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8312 8311
8313 8312 ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8314 8313
8315 8314 if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8316 8315 ptgt->tgt_change_cnt)) {
8317 8316 mutex_exit(&ptgt->tgt_mutex);
8318 8317 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8319 8318 mutex_enter(&ptgt->tgt_mutex);
8320 8319
8321 8320 return (0);
8322 8321 }
8323 8322
8324 8323 ptgt->tgt_pd_handle = NULL;
8325 8324 mutex_exit(&ptgt->tgt_mutex);
8326 8325 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8327 8326 mutex_enter(&ptgt->tgt_mutex);
8328 8327
8329 8328 tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8330 8329
8331 8330 if (ptgt->tgt_tcap &&
8332 8331 (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8333 8332 elem->flags = flags;
8334 8333 elem->time = fcp_watchdog_time;
8335 8334 if (nowait == 0) {
8336 8335 elem->time += fcp_offline_delay;
8337 8336 }
8338 8337 elem->ptgt = ptgt;
8339 8338 elem->link_cnt = link_cnt;
8340 8339 elem->tgt_cnt = tgt_cnt;
8341 8340 elem->next = pptr->port_offline_tgts;
8342 8341 pptr->port_offline_tgts = elem;
8343 8342 } else {
8344 8343 fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8345 8344 }
8346 8345
8347 8346 return (1);
8348 8347 }
8349 8348
8350 8349
8351 8350 static void
8352 8351 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8353 8352 int link_cnt, int tgt_cnt, int flags)
8354 8353 {
8355 8354 ASSERT(mutex_owned(&pptr->port_mutex));
8356 8355 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8357 8356
8358 8357 fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8359 8358 ptgt->tgt_state = FCP_TGT_OFFLINE;
8360 8359 ptgt->tgt_pd_handle = NULL;
8361 8360 fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8362 8361 }
8363 8362
8364 8363
8365 8364 static void
8366 8365 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8367 8366 int flags)
8368 8367 {
8369 8368 struct fcp_lun *plun;
8370 8369
8371 8370 ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8372 8371 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8373 8372
8374 8373 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8375 8374 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8376 8375 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8377 8376 }
8378 8377 }
8379 8378 }
8380 8379
8381 8380
8382 8381 /*
8383 8382 * take a LUN offline
8384 8383 *
8385 8384 * enters and leaves with the target mutex held, releasing it in the process
8386 8385 *
8387 8386 * allocates memory in non-sleep mode
8388 8387 */
8389 8388 static void
8390 8389 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8391 8390 int nowait, int flags)
8392 8391 {
8393 8392 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8394 8393 struct fcp_lun_elem *elem;
8395 8394
8396 8395 ASSERT(plun != NULL);
8397 8396 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8398 8397
8399 8398 if (nowait) {
8400 8399 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8401 8400 return;
8402 8401 }
8403 8402
8404 8403 if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8405 8404 elem->flags = flags;
8406 8405 elem->time = fcp_watchdog_time;
8407 8406 if (nowait == 0) {
8408 8407 elem->time += fcp_offline_delay;
8409 8408 }
8410 8409 elem->plun = plun;
8411 8410 elem->link_cnt = link_cnt;
8412 8411 elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8413 8412 elem->next = pptr->port_offline_luns;
8414 8413 pptr->port_offline_luns = elem;
8415 8414 } else {
8416 8415 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8417 8416 }
8418 8417 }
8419 8418
8420 8419
8421 8420 static void
8422 8421 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8423 8422 {
8424 8423 struct fcp_pkt *head = NULL;
8425 8424
8426 8425 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8427 8426
8428 8427 mutex_exit(&LUN_TGT->tgt_mutex);
8429 8428
8430 8429 head = fcp_scan_commands(plun);
8431 8430 if (head != NULL) {
8432 8431 fcp_abort_commands(head, LUN_PORT);
8433 8432 }
8434 8433
8435 8434 mutex_enter(&LUN_TGT->tgt_mutex);
8436 8435
8437 8436 if (plun->lun_cip && plun->lun_mpxio) {
8438 8437 /*
8439 8438 * Intimate MPxIO lun busy is cleared
8440 8439 */
8441 8440 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8442 8441 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8443 8442 0, 0)) {
8444 8443 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8445 8444 "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8446 8445 LUN_TGT->tgt_d_id, plun->lun_num);
8447 8446 }
8448 8447 /*
8449 8448 * Intimate MPxIO that the lun is now marked for offline
8450 8449 */
8451 8450 mutex_exit(&LUN_TGT->tgt_mutex);
8452 8451 (void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8453 8452 mutex_enter(&LUN_TGT->tgt_mutex);
8454 8453 }
8455 8454 }
8456 8455
8457 8456 static void
8458 8457 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8459 8458 int flags)
8460 8459 {
8461 8460 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8462 8461
8463 8462 mutex_exit(&LUN_TGT->tgt_mutex);
8464 8463 fcp_update_offline_flags(plun);
8465 8464 mutex_enter(&LUN_TGT->tgt_mutex);
8466 8465
8467 8466 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8468 8467
8469 8468 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8470 8469 fcp_trace, FCP_BUF_LEVEL_4, 0,
8471 8470 "offline_lun: passing OFFLINE elem to HP thread");
8472 8471
8473 8472 if (plun->lun_cip) {
8474 8473 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8475 8474 "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8476 8475 plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8477 8476 LUN_TGT->tgt_trace);
8478 8477
8479 8478 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8480 8479 link_cnt, tgt_cnt, flags, 0)) {
8481 8480 fcp_log(CE_CONT, LUN_PORT->port_dip,
8482 8481 "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8483 8482 LUN_TGT->tgt_d_id, plun->lun_num);
8484 8483 }
8485 8484 }
8486 8485 }
8487 8486
8488 8487 static void
8489 8488 fcp_scan_offline_luns(struct fcp_port *pptr)
8490 8489 {
8491 8490 struct fcp_lun_elem *elem;
8492 8491 struct fcp_lun_elem *prev;
8493 8492 struct fcp_lun_elem *next;
8494 8493
8495 8494 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8496 8495
8497 8496 prev = NULL;
8498 8497 elem = pptr->port_offline_luns;
8499 8498 while (elem) {
8500 8499 next = elem->next;
8501 8500 if (elem->time <= fcp_watchdog_time) {
8502 8501 int changed = 1;
8503 8502 struct fcp_tgt *ptgt = elem->plun->lun_tgt;
8504 8503
8505 8504 mutex_enter(&ptgt->tgt_mutex);
8506 8505 if (pptr->port_link_cnt == elem->link_cnt &&
8507 8506 ptgt->tgt_change_cnt == elem->tgt_cnt) {
8508 8507 changed = 0;
8509 8508 }
8510 8509
8511 8510 if (!changed &&
8512 8511 !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8513 8512 fcp_offline_lun_now(elem->plun,
8514 8513 elem->link_cnt, elem->tgt_cnt, elem->flags);
8515 8514 }
8516 8515 mutex_exit(&ptgt->tgt_mutex);
8517 8516
8518 8517 kmem_free(elem, sizeof (*elem));
8519 8518
8520 8519 if (prev) {
8521 8520 prev->next = next;
8522 8521 } else {
8523 8522 pptr->port_offline_luns = next;
8524 8523 }
8525 8524 } else {
8526 8525 prev = elem;
8527 8526 }
8528 8527 elem = next;
8529 8528 }
8530 8529 }
8531 8530
8532 8531
8533 8532 static void
8534 8533 fcp_scan_offline_tgts(struct fcp_port *pptr)
8535 8534 {
8536 8535 struct fcp_tgt_elem *elem;
8537 8536 struct fcp_tgt_elem *prev;
8538 8537 struct fcp_tgt_elem *next;
8539 8538
8540 8539 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8541 8540
8542 8541 prev = NULL;
8543 8542 elem = pptr->port_offline_tgts;
8544 8543 while (elem) {
8545 8544 next = elem->next;
8546 8545 if (elem->time <= fcp_watchdog_time) {
8547 8546 int outdated = 1;
8548 8547 struct fcp_tgt *ptgt = elem->ptgt;
8549 8548
8550 8549 mutex_enter(&ptgt->tgt_mutex);
8551 8550
8552 8551 if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8553 8552 /* No change on tgt since elem was created. */
8554 8553 outdated = 0;
8555 8554 } else if (ptgt->tgt_change_cnt == elem->tgt_cnt + 1 &&
8556 8555 pptr->port_link_cnt == elem->link_cnt + 1 &&
8557 8556 ptgt->tgt_statec_cause == FCP_CAUSE_LINK_DOWN) {
8558 8557 /*
8559 8558 * Exactly one thing happened to the target
8560 8559 * inbetween: the local port went offline.
8561 8560 * For fp the remote port is already gone so
8562 8561 * it will not tell us again to offline the
8563 8562 * target. We must offline it now.
8564 8563 */
8565 8564 outdated = 0;
8566 8565 }
8567 8566
8568 8567 if (!outdated && !(ptgt->tgt_state &
8569 8568 FCP_TGT_OFFLINE)) {
8570 8569 fcp_offline_target_now(pptr,
8571 8570 ptgt, elem->link_cnt, elem->tgt_cnt,
8572 8571 elem->flags);
8573 8572 }
8574 8573
8575 8574 mutex_exit(&ptgt->tgt_mutex);
8576 8575
8577 8576 kmem_free(elem, sizeof (*elem));
8578 8577
8579 8578 if (prev) {
8580 8579 prev->next = next;
8581 8580 } else {
8582 8581 pptr->port_offline_tgts = next;
8583 8582 }
8584 8583 } else {
8585 8584 prev = elem;
8586 8585 }
8587 8586 elem = next;
8588 8587 }
8589 8588 }
8590 8589
8591 8590
8592 8591 static void
8593 8592 fcp_update_offline_flags(struct fcp_lun *plun)
8594 8593 {
8595 8594 struct fcp_port *pptr = LUN_PORT;
8596 8595 ASSERT(plun != NULL);
8597 8596
8598 8597 mutex_enter(&LUN_TGT->tgt_mutex);
8599 8598 plun->lun_state |= FCP_LUN_OFFLINE;
8600 8599 plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8601 8600
8602 8601 mutex_enter(&plun->lun_mutex);
8603 8602 if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8604 8603 dev_info_t *cdip = NULL;
8605 8604
8606 8605 mutex_exit(&LUN_TGT->tgt_mutex);
8607 8606
8608 8607 if (plun->lun_mpxio == 0) {
8609 8608 cdip = DIP(plun->lun_cip);
8610 8609 } else if (plun->lun_cip) {
8611 8610 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8612 8611 }
8613 8612
8614 8613 mutex_exit(&plun->lun_mutex);
8615 8614 if (cdip) {
8616 8615 (void) ndi_event_retrieve_cookie(
8617 8616 pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8618 8617 &fcp_remove_eid, NDI_EVENT_NOPASS);
8619 8618 (void) ndi_event_run_callbacks(
8620 8619 pptr->port_ndi_event_hdl, cdip,
8621 8620 fcp_remove_eid, NULL);
8622 8621 }
8623 8622 } else {
8624 8623 mutex_exit(&plun->lun_mutex);
8625 8624 mutex_exit(&LUN_TGT->tgt_mutex);
8626 8625 }
8627 8626 }
8628 8627
8629 8628
8630 8629 /*
8631 8630 * Scan all of the command pkts for this port, moving pkts that
8632 8631 * match our LUN onto our own list (headed by "head")
8633 8632 */
8634 8633 static struct fcp_pkt *
8635 8634 fcp_scan_commands(struct fcp_lun *plun)
8636 8635 {
8637 8636 struct fcp_port *pptr = LUN_PORT;
8638 8637
8639 8638 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8640 8639 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8641 8640 struct fcp_pkt *pcmd = NULL; /* the previous command */
8642 8641
8643 8642 struct fcp_pkt *head = NULL; /* head of our list */
8644 8643 struct fcp_pkt *tail = NULL; /* tail of our list */
8645 8644
8646 8645 int cmds_found = 0;
8647 8646
8648 8647 mutex_enter(&pptr->port_pkt_mutex);
8649 8648 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8650 8649 struct fcp_lun *tlun =
8651 8650 ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8652 8651
8653 8652 ncmd = cmd->cmd_next; /* set next command */
8654 8653
8655 8654 /*
8656 8655 * if this pkt is for a different LUN or the
8657 8656 * command is sent down, skip it.
8658 8657 */
8659 8658 if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8660 8659 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8661 8660 pcmd = cmd;
8662 8661 continue;
8663 8662 }
8664 8663 cmds_found++;
8665 8664 if (pcmd != NULL) {
8666 8665 ASSERT(pptr->port_pkt_head != cmd);
8667 8666 pcmd->cmd_next = cmd->cmd_next;
8668 8667 } else {
8669 8668 ASSERT(cmd == pptr->port_pkt_head);
8670 8669 pptr->port_pkt_head = cmd->cmd_next;
8671 8670 }
8672 8671
8673 8672 if (cmd == pptr->port_pkt_tail) {
8674 8673 pptr->port_pkt_tail = pcmd;
8675 8674 if (pcmd) {
8676 8675 pcmd->cmd_next = NULL;
8677 8676 }
8678 8677 }
8679 8678
8680 8679 if (head == NULL) {
8681 8680 head = tail = cmd;
8682 8681 } else {
8683 8682 ASSERT(tail != NULL);
8684 8683
8685 8684 tail->cmd_next = cmd;
8686 8685 tail = cmd;
8687 8686 }
8688 8687 cmd->cmd_next = NULL;
8689 8688 }
8690 8689 mutex_exit(&pptr->port_pkt_mutex);
8691 8690
8692 8691 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8693 8692 fcp_trace, FCP_BUF_LEVEL_8, 0,
8694 8693 "scan commands: %d cmd(s) found", cmds_found);
8695 8694
8696 8695 return (head);
8697 8696 }
8698 8697
8699 8698
8700 8699 /*
8701 8700 * Abort all the commands in the command queue
8702 8701 */
8703 8702 static void
8704 8703 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8705 8704 {
8706 8705 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8707 8706 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8708 8707
8709 8708 ASSERT(mutex_owned(&pptr->port_mutex));
8710 8709
8711 8710 /* scan through the pkts and invalid them */
8712 8711 for (cmd = head; cmd != NULL; cmd = ncmd) {
8713 8712 struct scsi_pkt *pkt = cmd->cmd_pkt;
8714 8713
8715 8714 ncmd = cmd->cmd_next;
8716 8715 ASSERT(pkt != NULL);
8717 8716
8718 8717 /*
8719 8718 * The lun is going to be marked offline. Indicate
8720 8719 * the target driver not to requeue or retry this command
8721 8720 * as the device is going to be offlined pretty soon.
8722 8721 */
8723 8722 pkt->pkt_reason = CMD_DEV_GONE;
8724 8723 pkt->pkt_statistics = 0;
8725 8724 pkt->pkt_state = 0;
8726 8725
8727 8726 /* reset cmd flags/state */
8728 8727 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8729 8728 cmd->cmd_state = FCP_PKT_IDLE;
8730 8729
8731 8730 /*
8732 8731 * ensure we have a packet completion routine,
8733 8732 * then call it.
8734 8733 */
8735 8734 ASSERT(pkt->pkt_comp != NULL);
8736 8735
8737 8736 mutex_exit(&pptr->port_mutex);
8738 8737 fcp_post_callback(cmd);
8739 8738 mutex_enter(&pptr->port_mutex);
8740 8739 }
8741 8740 }
8742 8741
8743 8742
8744 8743 /*
8745 8744 * the pkt_comp callback for command packets
8746 8745 */
8747 8746 static void
8748 8747 fcp_cmd_callback(fc_packet_t *fpkt)
8749 8748 {
8750 8749 struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8751 8750 struct scsi_pkt *pkt = cmd->cmd_pkt;
8752 8751 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8753 8752
8754 8753 ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8755 8754
8756 8755 if (cmd->cmd_state == FCP_PKT_IDLE) {
8757 8756 cmn_err(CE_PANIC, "Packet already completed %p",
8758 8757 (void *)cmd);
8759 8758 }
8760 8759
8761 8760 /*
8762 8761 * Watch thread should be freeing the packet, ignore the pkt.
8763 8762 */
8764 8763 if (cmd->cmd_state == FCP_PKT_ABORTING) {
8765 8764 fcp_log(CE_CONT, pptr->port_dip,
8766 8765 "!FCP: Pkt completed while aborting\n");
8767 8766 return;
8768 8767 }
8769 8768 cmd->cmd_state = FCP_PKT_IDLE;
8770 8769
8771 8770 fcp_complete_pkt(fpkt);
8772 8771
8773 8772 #ifdef DEBUG
8774 8773 mutex_enter(&pptr->port_pkt_mutex);
8775 8774 pptr->port_npkts--;
8776 8775 mutex_exit(&pptr->port_pkt_mutex);
8777 8776 #endif /* DEBUG */
8778 8777
8779 8778 fcp_post_callback(cmd);
8780 8779 }
8781 8780
8782 8781
8783 8782 static void
8784 8783 fcp_complete_pkt(fc_packet_t *fpkt)
8785 8784 {
8786 8785 int error = 0;
8787 8786 struct fcp_pkt *cmd = (struct fcp_pkt *)
8788 8787 fpkt->pkt_ulp_private;
8789 8788 struct scsi_pkt *pkt = cmd->cmd_pkt;
8790 8789 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8791 8790 struct fcp_lun *plun;
8792 8791 struct fcp_tgt *ptgt;
8793 8792 struct fcp_rsp *rsp;
8794 8793 struct scsi_address save;
8795 8794
8796 8795 #ifdef DEBUG
8797 8796 save = pkt->pkt_address;
8798 8797 #endif /* DEBUG */
8799 8798
8800 8799 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8801 8800
8802 8801 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8803 8802 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8804 8803 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8805 8804 sizeof (struct fcp_rsp));
8806 8805 }
8807 8806
8808 8807 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8809 8808 STATE_SENT_CMD | STATE_GOT_STATUS;
8810 8809
8811 8810 pkt->pkt_resid = 0;
8812 8811
8813 8812 if (fpkt->pkt_datalen) {
8814 8813 pkt->pkt_state |= STATE_XFERRED_DATA;
8815 8814 if (fpkt->pkt_data_resid) {
8816 8815 error++;
8817 8816 }
8818 8817 }
8819 8818
8820 8819 if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8821 8820 rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8822 8821 /*
8823 8822 * The next two checks make sure that if there
8824 8823 * is no sense data or a valid response and
8825 8824 * the command came back with check condition,
8826 8825 * the command should be retried.
8827 8826 */
8828 8827 if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8829 8828 !rsp->fcp_u.fcp_status.sense_len_set) {
8830 8829 pkt->pkt_state &= ~STATE_XFERRED_DATA;
8831 8830 pkt->pkt_resid = cmd->cmd_dmacount;
8832 8831 }
8833 8832 }
8834 8833
8835 8834 if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8836 8835 return;
8837 8836 }
8838 8837
8839 8838 plun = ADDR2LUN(&pkt->pkt_address);
8840 8839 ptgt = plun->lun_tgt;
8841 8840 ASSERT(ptgt != NULL);
8842 8841
8843 8842 /*
8844 8843 * Update the transfer resid, if appropriate
8845 8844 */
8846 8845 if (rsp->fcp_u.fcp_status.resid_over ||
8847 8846 rsp->fcp_u.fcp_status.resid_under) {
8848 8847 pkt->pkt_resid = rsp->fcp_resid;
8849 8848 }
8850 8849
8851 8850 /*
8852 8851 * First see if we got a FCP protocol error.
8853 8852 */
8854 8853 if (rsp->fcp_u.fcp_status.rsp_len_set) {
8855 8854 struct fcp_rsp_info *bep;
8856 8855 bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8857 8856 sizeof (struct fcp_rsp));
8858 8857
8859 8858 if (fcp_validate_fcp_response(rsp, pptr) !=
8860 8859 FC_SUCCESS) {
8861 8860 pkt->pkt_reason = CMD_CMPLT;
8862 8861 *(pkt->pkt_scbp) = STATUS_CHECK;
8863 8862
8864 8863 fcp_log(CE_WARN, pptr->port_dip,
8865 8864 "!SCSI command to d_id=0x%x lun=0x%x"
8866 8865 " failed, Bad FCP response values:"
8867 8866 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8868 8867 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8869 8868 ptgt->tgt_d_id, plun->lun_num,
8870 8869 rsp->reserved_0, rsp->reserved_1,
8871 8870 rsp->fcp_u.fcp_status.reserved_0,
8872 8871 rsp->fcp_u.fcp_status.reserved_1,
8873 8872 rsp->fcp_response_len, rsp->fcp_sense_len);
8874 8873
8875 8874 return;
8876 8875 }
8877 8876
8878 8877 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8879 8878 FCP_CP_IN(fpkt->pkt_resp +
8880 8879 sizeof (struct fcp_rsp), bep,
8881 8880 fpkt->pkt_resp_acc,
8882 8881 sizeof (struct fcp_rsp_info));
8883 8882 }
8884 8883
8885 8884 if (bep->rsp_code != FCP_NO_FAILURE) {
8886 8885 child_info_t *cip;
8887 8886
8888 8887 pkt->pkt_reason = CMD_TRAN_ERR;
8889 8888
8890 8889 mutex_enter(&plun->lun_mutex);
8891 8890 cip = plun->lun_cip;
8892 8891 mutex_exit(&plun->lun_mutex);
8893 8892
8894 8893 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8895 8894 fcp_trace, FCP_BUF_LEVEL_2, 0,
8896 8895 "FCP response error on cmd=%p"
8897 8896 " target=0x%x, cip=%p", cmd,
8898 8897 ptgt->tgt_d_id, cip);
8899 8898 }
8900 8899 }
8901 8900
8902 8901 /*
8903 8902 * See if we got a SCSI error with sense data
8904 8903 */
8905 8904 if (rsp->fcp_u.fcp_status.sense_len_set) {
8906 8905 uchar_t rqlen;
8907 8906 caddr_t sense_from;
8908 8907 child_info_t *cip;
8909 8908 timeout_id_t tid;
8910 8909 struct scsi_arq_status *arq;
8911 8910 struct scsi_extended_sense *sense_to;
8912 8911
8913 8912 arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8914 8913 sense_to = &arq->sts_sensedata;
8915 8914
8916 8915 rqlen = (uchar_t)min(rsp->fcp_sense_len,
8917 8916 sizeof (struct scsi_extended_sense));
8918 8917
8919 8918 sense_from = (caddr_t)fpkt->pkt_resp +
8920 8919 sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8921 8920
8922 8921 if (fcp_validate_fcp_response(rsp, pptr) !=
8923 8922 FC_SUCCESS) {
8924 8923 pkt->pkt_reason = CMD_CMPLT;
8925 8924 *(pkt->pkt_scbp) = STATUS_CHECK;
8926 8925
8927 8926 fcp_log(CE_WARN, pptr->port_dip,
8928 8927 "!SCSI command to d_id=0x%x lun=0x%x"
8929 8928 " failed, Bad FCP response values:"
8930 8929 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8931 8930 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8932 8931 ptgt->tgt_d_id, plun->lun_num,
8933 8932 rsp->reserved_0, rsp->reserved_1,
8934 8933 rsp->fcp_u.fcp_status.reserved_0,
8935 8934 rsp->fcp_u.fcp_status.reserved_1,
8936 8935 rsp->fcp_response_len, rsp->fcp_sense_len);
8937 8936
8938 8937 return;
8939 8938 }
8940 8939
8941 8940 /*
8942 8941 * copy in sense information
8943 8942 */
8944 8943 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8945 8944 FCP_CP_IN(sense_from, sense_to,
8946 8945 fpkt->pkt_resp_acc, rqlen);
8947 8946 } else {
8948 8947 bcopy(sense_from, sense_to, rqlen);
8949 8948 }
8950 8949
8951 8950 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8952 8951 (FCP_SENSE_NO_LUN(sense_to))) {
8953 8952 mutex_enter(&ptgt->tgt_mutex);
8954 8953 if (ptgt->tgt_tid == NULL) {
8955 8954 /*
8956 8955 * Kick off rediscovery
8957 8956 */
8958 8957 tid = timeout(fcp_reconfigure_luns,
8959 8958 (caddr_t)ptgt, drv_usectohz(1));
8960 8959
8961 8960 ptgt->tgt_tid = tid;
8962 8961 ptgt->tgt_state |= FCP_TGT_BUSY;
8963 8962 }
8964 8963 mutex_exit(&ptgt->tgt_mutex);
8965 8964 if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8966 8965 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8967 8966 fcp_trace, FCP_BUF_LEVEL_3, 0,
8968 8967 "!FCP: Report Lun Has Changed"
8969 8968 " target=%x", ptgt->tgt_d_id);
8970 8969 } else if (FCP_SENSE_NO_LUN(sense_to)) {
8971 8970 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8972 8971 fcp_trace, FCP_BUF_LEVEL_3, 0,
8973 8972 "!FCP: LU Not Supported"
8974 8973 " target=%x", ptgt->tgt_d_id);
8975 8974 }
8976 8975 }
8977 8976 ASSERT(pkt->pkt_scbp != NULL);
8978 8977
8979 8978 pkt->pkt_state |= STATE_ARQ_DONE;
8980 8979
8981 8980 arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8982 8981
8983 8982 *((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8984 8983 arq->sts_rqpkt_reason = 0;
8985 8984 arq->sts_rqpkt_statistics = 0;
8986 8985
8987 8986 arq->sts_rqpkt_state = STATE_GOT_BUS |
8988 8987 STATE_GOT_TARGET | STATE_SENT_CMD |
8989 8988 STATE_GOT_STATUS | STATE_ARQ_DONE |
8990 8989 STATE_XFERRED_DATA;
8991 8990
8992 8991 mutex_enter(&plun->lun_mutex);
8993 8992 cip = plun->lun_cip;
8994 8993 mutex_exit(&plun->lun_mutex);
8995 8994
8996 8995 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8997 8996 fcp_trace, FCP_BUF_LEVEL_8, 0,
8998 8997 "SCSI Check condition on cmd=%p target=0x%x"
8999 8998 " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
9000 8999 " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
9001 9000 cmd->cmd_fcp_cmd.fcp_cdb[0],
9002 9001 rsp->fcp_u.fcp_status.scsi_status,
9003 9002 sense_to->es_key, sense_to->es_add_code,
9004 9003 sense_to->es_qual_code);
9005 9004 }
9006 9005 } else {
9007 9006 plun = ADDR2LUN(&pkt->pkt_address);
9008 9007 ptgt = plun->lun_tgt;
9009 9008 ASSERT(ptgt != NULL);
9010 9009
9011 9010 /*
9012 9011 * Work harder to translate errors into target driver
9013 9012 * understandable ones. Note with despair that the target
9014 9013 * drivers don't decode pkt_state and pkt_reason exhaustively
9015 9014 * They resort to using the big hammer most often, which
9016 9015 * may not get fixed in the life time of this driver.
9017 9016 */
9018 9017 pkt->pkt_state = 0;
9019 9018 pkt->pkt_statistics = 0;
9020 9019
9021 9020 switch (fpkt->pkt_state) {
9022 9021 case FC_PKT_TRAN_ERROR:
9023 9022 switch (fpkt->pkt_reason) {
9024 9023 case FC_REASON_OVERRUN:
9025 9024 pkt->pkt_reason = CMD_CMD_OVR;
9026 9025 pkt->pkt_statistics |= STAT_ABORTED;
9027 9026 break;
9028 9027
9029 9028 case FC_REASON_XCHG_BSY: {
9030 9029 caddr_t ptr;
9031 9030
9032 9031 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9033 9032
9034 9033 ptr = (caddr_t)pkt->pkt_scbp;
9035 9034 if (ptr) {
9036 9035 *ptr = STATUS_BUSY;
9037 9036 }
9038 9037 break;
9039 9038 }
9040 9039
9041 9040 case FC_REASON_ABORTED:
9042 9041 pkt->pkt_reason = CMD_TRAN_ERR;
9043 9042 pkt->pkt_statistics |= STAT_ABORTED;
9044 9043 break;
9045 9044
9046 9045 case FC_REASON_ABORT_FAILED:
9047 9046 pkt->pkt_reason = CMD_ABORT_FAIL;
9048 9047 break;
9049 9048
9050 9049 case FC_REASON_NO_SEQ_INIT:
9051 9050 case FC_REASON_CRC_ERROR:
9052 9051 pkt->pkt_reason = CMD_TRAN_ERR;
9053 9052 pkt->pkt_statistics |= STAT_ABORTED;
9054 9053 break;
9055 9054 default:
9056 9055 pkt->pkt_reason = CMD_TRAN_ERR;
9057 9056 break;
9058 9057 }
9059 9058 break;
9060 9059
9061 9060 case FC_PKT_PORT_OFFLINE: {
9062 9061 dev_info_t *cdip = NULL;
9063 9062 caddr_t ptr;
9064 9063
9065 9064 if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
9066 9065 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9067 9066 fcp_trace, FCP_BUF_LEVEL_8, 0,
9068 9067 "SCSI cmd; LOGIN REQUIRED from FCA for %x",
9069 9068 ptgt->tgt_d_id);
9070 9069 }
9071 9070
9072 9071 mutex_enter(&plun->lun_mutex);
9073 9072 if (plun->lun_mpxio == 0) {
9074 9073 cdip = DIP(plun->lun_cip);
9075 9074 } else if (plun->lun_cip) {
9076 9075 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
9077 9076 }
9078 9077
9079 9078 mutex_exit(&plun->lun_mutex);
9080 9079
9081 9080 if (cdip) {
9082 9081 (void) ndi_event_retrieve_cookie(
9083 9082 pptr->port_ndi_event_hdl, cdip,
9084 9083 FCAL_REMOVE_EVENT, &fcp_remove_eid,
9085 9084 NDI_EVENT_NOPASS);
9086 9085 (void) ndi_event_run_callbacks(
9087 9086 pptr->port_ndi_event_hdl, cdip,
9088 9087 fcp_remove_eid, NULL);
9089 9088 }
9090 9089
9091 9090 /*
9092 9091 * If the link goes off-line for a lip,
9093 9092 * this will cause a error to the ST SG
9094 9093 * SGEN drivers. By setting BUSY we will
9095 9094 * give the drivers the chance to retry
9096 9095 * before it blows of the job. ST will
9097 9096 * remember how many times it has retried.
9098 9097 */
9099 9098
9100 9099 if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
9101 9100 (plun->lun_type == DTYPE_CHANGER)) {
9102 9101 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9103 9102 ptr = (caddr_t)pkt->pkt_scbp;
9104 9103 if (ptr) {
9105 9104 *ptr = STATUS_BUSY;
9106 9105 }
9107 9106 } else {
9108 9107 pkt->pkt_reason = CMD_TRAN_ERR;
9109 9108 pkt->pkt_statistics |= STAT_BUS_RESET;
9110 9109 }
9111 9110 break;
9112 9111 }
9113 9112
9114 9113 case FC_PKT_TRAN_BSY:
9115 9114 /*
9116 9115 * Use the ssd Qfull handling here.
9117 9116 */
9118 9117 *pkt->pkt_scbp = STATUS_INTERMEDIATE;
9119 9118 pkt->pkt_state = STATE_GOT_BUS;
9120 9119 break;
9121 9120
9122 9121 case FC_PKT_TIMEOUT:
9123 9122 pkt->pkt_reason = CMD_TIMEOUT;
9124 9123 if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
9125 9124 pkt->pkt_statistics |= STAT_TIMEOUT;
9126 9125 } else {
9127 9126 pkt->pkt_statistics |= STAT_ABORTED;
9128 9127 }
9129 9128 break;
9130 9129
9131 9130 case FC_PKT_LOCAL_RJT:
9132 9131 switch (fpkt->pkt_reason) {
9133 9132 case FC_REASON_OFFLINE: {
9134 9133 dev_info_t *cdip = NULL;
9135 9134
9136 9135 mutex_enter(&plun->lun_mutex);
9137 9136 if (plun->lun_mpxio == 0) {
9138 9137 cdip = DIP(plun->lun_cip);
9139 9138 } else if (plun->lun_cip) {
9140 9139 cdip = mdi_pi_get_client(
9141 9140 PIP(plun->lun_cip));
9142 9141 }
9143 9142 mutex_exit(&plun->lun_mutex);
9144 9143
9145 9144 if (cdip) {
9146 9145 (void) ndi_event_retrieve_cookie(
9147 9146 pptr->port_ndi_event_hdl, cdip,
9148 9147 FCAL_REMOVE_EVENT,
9149 9148 &fcp_remove_eid,
9150 9149 NDI_EVENT_NOPASS);
9151 9150 (void) ndi_event_run_callbacks(
9152 9151 pptr->port_ndi_event_hdl,
9153 9152 cdip, fcp_remove_eid, NULL);
9154 9153 }
9155 9154
9156 9155 pkt->pkt_reason = CMD_TRAN_ERR;
9157 9156 pkt->pkt_statistics |= STAT_BUS_RESET;
9158 9157
9159 9158 break;
9160 9159 }
9161 9160
9162 9161 case FC_REASON_NOMEM:
9163 9162 case FC_REASON_QFULL: {
9164 9163 caddr_t ptr;
9165 9164
9166 9165 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9167 9166 ptr = (caddr_t)pkt->pkt_scbp;
9168 9167 if (ptr) {
9169 9168 *ptr = STATUS_BUSY;
9170 9169 }
9171 9170 break;
9172 9171 }
9173 9172
9174 9173 case FC_REASON_DMA_ERROR:
9175 9174 pkt->pkt_reason = CMD_DMA_DERR;
9176 9175 pkt->pkt_statistics |= STAT_ABORTED;
9177 9176 break;
9178 9177
9179 9178 case FC_REASON_CRC_ERROR:
9180 9179 case FC_REASON_UNDERRUN: {
9181 9180 uchar_t status;
9182 9181 /*
9183 9182 * Work around for Bugid: 4240945.
9184 9183 * IB on A5k doesn't set the Underrun bit
9185 9184 * in the fcp status, when it is transferring
9186 9185 * less than requested amount of data. Work
9187 9186 * around the ses problem to keep luxadm
9188 9187 * happy till ibfirmware is fixed.
9189 9188 */
9190 9189 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9191 9190 FCP_CP_IN(fpkt->pkt_resp, rsp,
9192 9191 fpkt->pkt_resp_acc,
9193 9192 sizeof (struct fcp_rsp));
9194 9193 }
9195 9194 status = rsp->fcp_u.fcp_status.scsi_status;
9196 9195 if (((plun->lun_type & DTYPE_MASK) ==
9197 9196 DTYPE_ESI) && (status == STATUS_GOOD)) {
9198 9197 pkt->pkt_reason = CMD_CMPLT;
9199 9198 *pkt->pkt_scbp = status;
9200 9199 pkt->pkt_resid = 0;
9201 9200 } else {
9202 9201 pkt->pkt_reason = CMD_TRAN_ERR;
9203 9202 pkt->pkt_statistics |= STAT_ABORTED;
9204 9203 }
9205 9204 break;
9206 9205 }
9207 9206
9208 9207 case FC_REASON_NO_CONNECTION:
9209 9208 case FC_REASON_UNSUPPORTED:
9210 9209 case FC_REASON_ILLEGAL_REQ:
9211 9210 case FC_REASON_BAD_SID:
9212 9211 case FC_REASON_DIAG_BUSY:
9213 9212 case FC_REASON_FCAL_OPN_FAIL:
9214 9213 case FC_REASON_BAD_XID:
9215 9214 default:
9216 9215 pkt->pkt_reason = CMD_TRAN_ERR;
9217 9216 pkt->pkt_statistics |= STAT_ABORTED;
9218 9217 break;
9219 9218
9220 9219 }
9221 9220 break;
9222 9221
9223 9222 case FC_PKT_NPORT_RJT:
9224 9223 case FC_PKT_FABRIC_RJT:
9225 9224 case FC_PKT_NPORT_BSY:
9226 9225 case FC_PKT_FABRIC_BSY:
9227 9226 default:
9228 9227 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9229 9228 fcp_trace, FCP_BUF_LEVEL_8, 0,
9230 9229 "FC Status 0x%x, reason 0x%x",
9231 9230 fpkt->pkt_state, fpkt->pkt_reason);
9232 9231 pkt->pkt_reason = CMD_TRAN_ERR;
9233 9232 pkt->pkt_statistics |= STAT_ABORTED;
9234 9233 break;
9235 9234 }
9236 9235
9237 9236 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9238 9237 fcp_trace, FCP_BUF_LEVEL_9, 0,
9239 9238 "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9240 9239 " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9241 9240 fpkt->pkt_reason);
9242 9241 }
9243 9242
9244 9243 ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9245 9244 }
9246 9245
9247 9246
9248 9247 static int
9249 9248 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9250 9249 {
9251 9250 if (rsp->reserved_0 || rsp->reserved_1 ||
9252 9251 rsp->fcp_u.fcp_status.reserved_0 ||
9253 9252 rsp->fcp_u.fcp_status.reserved_1) {
9254 9253 /*
9255 9254 * These reserved fields should ideally be zero. FCP-2 does say
9256 9255 * that the recipient need not check for reserved fields to be
9257 9256 * zero. If they are not zero, we will not make a fuss about it
9258 9257 * - just log it (in debug to both trace buffer and messages
9259 9258 * file and to trace buffer only in non-debug) and move on.
9260 9259 *
9261 9260 * Non-zero reserved fields were seen with minnows.
9262 9261 *
9263 9262 * qlc takes care of some of this but we cannot assume that all
9264 9263 * FCAs will do so.
9265 9264 */
9266 9265 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9267 9266 FCP_BUF_LEVEL_5, 0,
9268 9267 "Got fcp response packet with non-zero reserved fields "
9269 9268 "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9270 9269 "status.reserved_0:0x%x, status.reserved_1:0x%x",
9271 9270 rsp->reserved_0, rsp->reserved_1,
9272 9271 rsp->fcp_u.fcp_status.reserved_0,
9273 9272 rsp->fcp_u.fcp_status.reserved_1);
9274 9273 }
9275 9274
9276 9275 if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9277 9276 (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9278 9277 return (FC_FAILURE);
9279 9278 }
9280 9279
9281 9280 if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9282 9281 (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9283 9282 sizeof (struct fcp_rsp))) {
9284 9283 return (FC_FAILURE);
9285 9284 }
9286 9285
9287 9286 return (FC_SUCCESS);
9288 9287 }
9289 9288
9290 9289
9291 9290 /*
9292 9291 * This is called when there is a change the in device state. The case we're
9293 9292 * handling here is, if the d_id s does not match, offline this tgt and online
9294 9293 * a new tgt with the new d_id. called from fcp_handle_devices with
9295 9294 * port_mutex held.
9296 9295 */
9297 9296 static int
9298 9297 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9299 9298 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9300 9299 {
9301 9300 ASSERT(mutex_owned(&pptr->port_mutex));
9302 9301
9303 9302 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9304 9303 fcp_trace, FCP_BUF_LEVEL_3, 0,
9305 9304 "Starting fcp_device_changed...");
9306 9305
9307 9306 /*
9308 9307 * The two cases where the port_device_changed is called is
9309 9308 * either it changes it's d_id or it's hard address.
9310 9309 */
9311 9310 if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9312 9311 (FC_TOP_EXTERNAL(pptr->port_topology) &&
9313 9312 (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9314 9313
9315 9314 /* offline this target */
9316 9315 mutex_enter(&ptgt->tgt_mutex);
9317 9316 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9318 9317 (void) fcp_offline_target(pptr, ptgt, link_cnt,
9319 9318 0, 1, NDI_DEVI_REMOVE);
9320 9319 }
9321 9320 mutex_exit(&ptgt->tgt_mutex);
9322 9321
9323 9322 fcp_log(CE_NOTE, pptr->port_dip,
9324 9323 "Change in target properties: Old D_ID=%x New D_ID=%x"
9325 9324 " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9326 9325 map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9327 9326 map_entry->map_hard_addr.hard_addr);
9328 9327 }
9329 9328
9330 9329 return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9331 9330 link_cnt, tgt_cnt, cause));
9332 9331 }
9333 9332
9334 9333 /*
9335 9334 * Function: fcp_alloc_lun
9336 9335 *
9337 9336 * Description: Creates a new lun structure and adds it to the list
9338 9337 * of luns of the target.
9339 9338 *
9340 9339 * Argument: ptgt Target the lun will belong to.
9341 9340 *
9342 9341 * Return Value: NULL Failed
9343 9342 * Not NULL Succeeded
9344 9343 *
9345 9344 * Context: Kernel context
9346 9345 */
9347 9346 static struct fcp_lun *
9348 9347 fcp_alloc_lun(struct fcp_tgt *ptgt)
9349 9348 {
9350 9349 struct fcp_lun *plun;
9351 9350
9352 9351 plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9353 9352 if (plun != NULL) {
9354 9353 /*
9355 9354 * Initialize the mutex before putting in the target list
9356 9355 * especially before releasing the target mutex.
9357 9356 */
9358 9357 mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9359 9358 plun->lun_tgt = ptgt;
9360 9359
9361 9360 mutex_enter(&ptgt->tgt_mutex);
9362 9361 plun->lun_next = ptgt->tgt_lun;
9363 9362 ptgt->tgt_lun = plun;
9364 9363 plun->lun_old_guid = NULL;
9365 9364 plun->lun_old_guid_size = 0;
9366 9365 mutex_exit(&ptgt->tgt_mutex);
9367 9366 }
9368 9367
9369 9368 return (plun);
9370 9369 }
9371 9370
9372 9371 /*
9373 9372 * Function: fcp_dealloc_lun
9374 9373 *
9375 9374 * Description: Frees the LUN structure passed by the caller.
9376 9375 *
9377 9376 * Argument: plun LUN structure to free.
9378 9377 *
9379 9378 * Return Value: None
9380 9379 *
9381 9380 * Context: Kernel context.
9382 9381 */
9383 9382 static void
9384 9383 fcp_dealloc_lun(struct fcp_lun *plun)
9385 9384 {
9386 9385 mutex_enter(&plun->lun_mutex);
9387 9386 if (plun->lun_cip) {
9388 9387 fcp_remove_child(plun);
9389 9388 }
9390 9389 mutex_exit(&plun->lun_mutex);
9391 9390
9392 9391 mutex_destroy(&plun->lun_mutex);
9393 9392 if (plun->lun_guid) {
9394 9393 kmem_free(plun->lun_guid, plun->lun_guid_size);
9395 9394 }
9396 9395 if (plun->lun_old_guid) {
9397 9396 kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9398 9397 }
9399 9398 kmem_free(plun, sizeof (*plun));
9400 9399 }
9401 9400
9402 9401 /*
9403 9402 * Function: fcp_alloc_tgt
9404 9403 *
9405 9404 * Description: Creates a new target structure and adds it to the port
9406 9405 * hash list.
9407 9406 *
9408 9407 * Argument: pptr fcp port structure
9409 9408 * *map_entry entry describing the target to create
9410 9409 * link_cnt Link state change counter
9411 9410 *
9412 9411 * Return Value: NULL Failed
9413 9412 * Not NULL Succeeded
9414 9413 *
9415 9414 * Context: Kernel context.
9416 9415 */
9417 9416 static struct fcp_tgt *
9418 9417 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9419 9418 {
9420 9419 int hash;
9421 9420 uchar_t *wwn;
9422 9421 struct fcp_tgt *ptgt;
9423 9422
9424 9423 ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9425 9424 if (ptgt != NULL) {
9426 9425 mutex_enter(&pptr->port_mutex);
9427 9426 if (link_cnt != pptr->port_link_cnt) {
9428 9427 /*
9429 9428 * oh oh -- another link reset
9430 9429 * in progress -- give up
9431 9430 */
9432 9431 mutex_exit(&pptr->port_mutex);
9433 9432 kmem_free(ptgt, sizeof (*ptgt));
9434 9433 ptgt = NULL;
9435 9434 } else {
9436 9435 /*
9437 9436 * initialize the mutex before putting in the port
9438 9437 * wwn list, especially before releasing the port
9439 9438 * mutex.
9440 9439 */
9441 9440 mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9442 9441
9443 9442 /* add new target entry to the port's hash list */
9444 9443 wwn = (uchar_t *)&map_entry->map_pwwn;
9445 9444 hash = FCP_HASH(wwn);
9446 9445
9447 9446 ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9448 9447 pptr->port_tgt_hash_table[hash] = ptgt;
9449 9448
9450 9449 /* save cross-ptr */
9451 9450 ptgt->tgt_port = pptr;
9452 9451
9453 9452 ptgt->tgt_change_cnt = 1;
9454 9453
9455 9454 /* initialize the target manual_config_only flag */
9456 9455 if (fcp_enable_auto_configuration) {
9457 9456 ptgt->tgt_manual_config_only = 0;
9458 9457 } else {
9459 9458 ptgt->tgt_manual_config_only = 1;
9460 9459 }
9461 9460
9462 9461 mutex_exit(&pptr->port_mutex);
9463 9462 }
9464 9463 }
9465 9464
9466 9465 return (ptgt);
9467 9466 }
9468 9467
9469 9468 /*
9470 9469 * Function: fcp_dealloc_tgt
9471 9470 *
9472 9471 * Description: Frees the target structure passed by the caller.
9473 9472 *
9474 9473 * Argument: ptgt Target structure to free.
9475 9474 *
9476 9475 * Return Value: None
9477 9476 *
9478 9477 * Context: Kernel context.
9479 9478 */
9480 9479 static void
9481 9480 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9482 9481 {
9483 9482 mutex_destroy(&ptgt->tgt_mutex);
9484 9483 kmem_free(ptgt, sizeof (*ptgt));
9485 9484 }
9486 9485
9487 9486
9488 9487 /*
9489 9488 * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9490 9489 *
9491 9490 * Device discovery commands will not be retried for-ever as
9492 9491 * this will have repercussions on other devices that need to
9493 9492 * be submitted to the hotplug thread. After a quick glance
9494 9493 * at the SCSI-3 spec, it was found that the spec doesn't
9495 9494 * mandate a forever retry, rather recommends a delayed retry.
9496 9495 *
9497 9496 * Since Photon IB is single threaded, STATUS_BUSY is common
9498 9497 * in a 4+initiator environment. Make sure the total time
9499 9498 * spent on retries (including command timeout) does not
9500 9499 * 60 seconds
9501 9500 */
9502 9501 static void
9503 9502 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9504 9503 {
9505 9504 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9506 9505 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9507 9506
9508 9507 mutex_enter(&pptr->port_mutex);
9509 9508 mutex_enter(&ptgt->tgt_mutex);
9510 9509 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9511 9510 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9512 9511 fcp_trace, FCP_BUF_LEVEL_2, 0,
9513 9512 "fcp_queue_ipkt,1:state change occured"
9514 9513 " for D_ID=0x%x", ptgt->tgt_d_id);
9515 9514 mutex_exit(&ptgt->tgt_mutex);
9516 9515 mutex_exit(&pptr->port_mutex);
9517 9516 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9518 9517 icmd->ipkt_change_cnt, icmd->ipkt_cause);
9519 9518 fcp_icmd_free(pptr, icmd);
9520 9519 return;
9521 9520 }
9522 9521 mutex_exit(&ptgt->tgt_mutex);
9523 9522
9524 9523 icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9525 9524
9526 9525 if (pptr->port_ipkt_list != NULL) {
9527 9526 /* add pkt to front of doubly-linked list */
9528 9527 pptr->port_ipkt_list->ipkt_prev = icmd;
9529 9528 icmd->ipkt_next = pptr->port_ipkt_list;
9530 9529 pptr->port_ipkt_list = icmd;
9531 9530 icmd->ipkt_prev = NULL;
9532 9531 } else {
9533 9532 /* this is the first/only pkt on the list */
9534 9533 pptr->port_ipkt_list = icmd;
9535 9534 icmd->ipkt_next = NULL;
9536 9535 icmd->ipkt_prev = NULL;
9537 9536 }
9538 9537 mutex_exit(&pptr->port_mutex);
9539 9538 }
9540 9539
9541 9540 /*
9542 9541 * Function: fcp_transport
9543 9542 *
9544 9543 * Description: This function submits the Fibre Channel packet to the transort
9545 9544 * layer by calling fc_ulp_transport(). If fc_ulp_transport()
9546 9545 * fails the submission, the treatment depends on the value of
9547 9546 * the variable internal.
9548 9547 *
9549 9548 * Argument: port_handle fp/fctl port handle.
9550 9549 * *fpkt Packet to submit to the transport layer.
9551 9550 * internal Not zero when it's an internal packet.
9552 9551 *
9553 9552 * Return Value: FC_TRAN_BUSY
9554 9553 * FC_STATEC_BUSY
9555 9554 * FC_OFFLINE
9556 9555 * FC_LOGINREQ
9557 9556 * FC_DEVICE_BUSY
9558 9557 * FC_SUCCESS
9559 9558 */
9560 9559 static int
9561 9560 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9562 9561 {
9563 9562 int rval;
9564 9563
9565 9564 rval = fc_ulp_transport(port_handle, fpkt);
9566 9565 if (rval == FC_SUCCESS) {
9567 9566 return (rval);
9568 9567 }
9569 9568
9570 9569 /*
9571 9570 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9572 9571 * a command, if the underlying modules see that there is a state
9573 9572 * change, or if a port is OFFLINE, that means, that state change
9574 9573 * hasn't reached FCP yet, so re-queue the command for deferred
9575 9574 * submission.
9576 9575 */
9577 9576 if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9578 9577 (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9579 9578 (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9580 9579 /*
9581 9580 * Defer packet re-submission. Life hang is possible on
9582 9581 * internal commands if the port driver sends FC_STATEC_BUSY
9583 9582 * for ever, but that shouldn't happen in a good environment.
9584 9583 * Limiting re-transport for internal commands is probably a
9585 9584 * good idea..
9586 9585 * A race condition can happen when a port sees barrage of
9587 9586 * link transitions offline to online. If the FCTL has
9588 9587 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9589 9588 * internal commands should be queued to do the discovery.
9590 9589 * The race condition is when an online comes and FCP starts
9591 9590 * its internal discovery and the link goes offline. It is
9592 9591 * possible that the statec_callback has not reached FCP
9593 9592 * and FCP is carrying on with its internal discovery.
9594 9593 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9595 9594 * that the link has gone offline. At this point FCP should
9596 9595 * drop all the internal commands and wait for the
9597 9596 * statec_callback. It will be facilitated by incrementing
9598 9597 * port_link_cnt.
9599 9598 *
9600 9599 * For external commands, the (FC)pkt_timeout is decremented
9601 9600 * by the QUEUE Delay added by our driver, Care is taken to
9602 9601 * ensure that it doesn't become zero (zero means no timeout)
9603 9602 * If the time expires right inside driver queue itself,
9604 9603 * the watch thread will return it to the original caller
9605 9604 * indicating that the command has timed-out.
9606 9605 */
9607 9606 if (internal) {
9608 9607 char *op;
9609 9608 struct fcp_ipkt *icmd;
9610 9609
9611 9610 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9612 9611 switch (icmd->ipkt_opcode) {
9613 9612 case SCMD_REPORT_LUN:
9614 9613 op = "REPORT LUN";
9615 9614 break;
9616 9615
9617 9616 case SCMD_INQUIRY:
9618 9617 op = "INQUIRY";
9619 9618 break;
9620 9619
9621 9620 case SCMD_INQUIRY_PAGE83:
9622 9621 op = "INQUIRY-83";
9623 9622 break;
9624 9623
9625 9624 default:
9626 9625 op = "Internal SCSI COMMAND";
9627 9626 break;
9628 9627 }
9629 9628
9630 9629 if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9631 9630 icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9632 9631 rval = FC_SUCCESS;
9633 9632 }
9634 9633 } else {
9635 9634 struct fcp_pkt *cmd;
9636 9635 struct fcp_port *pptr;
9637 9636
9638 9637 cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9639 9638 cmd->cmd_state = FCP_PKT_IDLE;
9640 9639 pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9641 9640
9642 9641 if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9643 9642 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9644 9643 fcp_trace, FCP_BUF_LEVEL_9, 0,
9645 9644 "fcp_transport: xport busy for pkt %p",
9646 9645 cmd->cmd_pkt);
9647 9646 rval = FC_TRAN_BUSY;
9648 9647 } else {
9649 9648 fcp_queue_pkt(pptr, cmd);
9650 9649 rval = FC_SUCCESS;
9651 9650 }
9652 9651 }
9653 9652 }
9654 9653
9655 9654 return (rval);
9656 9655 }
9657 9656
9658 9657 /*VARARGS3*/
9659 9658 static void
9660 9659 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9661 9660 {
9662 9661 char buf[256];
9663 9662 va_list ap;
9664 9663
9665 9664 if (dip == NULL) {
9666 9665 dip = fcp_global_dip;
9667 9666 }
9668 9667
9669 9668 va_start(ap, fmt);
9670 9669 (void) vsprintf(buf, fmt, ap);
9671 9670 va_end(ap);
9672 9671
9673 9672 scsi_log(dip, "fcp", level, buf);
9674 9673 }
9675 9674
9676 9675 /*
9677 9676 * This function retries NS registry of FC4 type.
9678 9677 * It assumes that fcp_mutex is held.
9679 9678 * The function does nothing if topology is not fabric
9680 9679 * So, the topology has to be set before this function can be called
9681 9680 */
9682 9681 static void
9683 9682 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9684 9683 {
9685 9684 int rval;
9686 9685
9687 9686 ASSERT(MUTEX_HELD(&pptr->port_mutex));
9688 9687
9689 9688 if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9690 9689 ((pptr->port_topology != FC_TOP_FABRIC) &&
9691 9690 (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9692 9691 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9693 9692 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9694 9693 }
9695 9694 return;
9696 9695 }
9697 9696 mutex_exit(&pptr->port_mutex);
9698 9697 rval = fcp_do_ns_registry(pptr, s_id);
9699 9698 mutex_enter(&pptr->port_mutex);
9700 9699
9701 9700 if (rval == 0) {
9702 9701 /* Registry successful. Reset flag */
9703 9702 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9704 9703 }
9705 9704 }
9706 9705
9707 9706 /*
9708 9707 * This function registers the ULP with the switch by calling transport i/f
9709 9708 */
9710 9709 static int
9711 9710 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9712 9711 {
9713 9712 fc_ns_cmd_t ns_cmd;
9714 9713 ns_rfc_type_t rfc;
9715 9714 uint32_t types[8];
9716 9715
9717 9716 /*
9718 9717 * Prepare the Name server structure to
9719 9718 * register with the transport in case of
9720 9719 * Fabric configuration.
9721 9720 */
9722 9721 bzero(&rfc, sizeof (rfc));
9723 9722 bzero(types, sizeof (types));
9724 9723
9725 9724 types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9726 9725 (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9727 9726
9728 9727 rfc.rfc_port_id.port_id = s_id;
9729 9728 bcopy(types, rfc.rfc_types, sizeof (types));
9730 9729
9731 9730 ns_cmd.ns_flags = 0;
9732 9731 ns_cmd.ns_cmd = NS_RFT_ID;
9733 9732 ns_cmd.ns_req_len = sizeof (rfc);
9734 9733 ns_cmd.ns_req_payload = (caddr_t)&rfc;
9735 9734 ns_cmd.ns_resp_len = 0;
9736 9735 ns_cmd.ns_resp_payload = NULL;
9737 9736
9738 9737 /*
9739 9738 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9740 9739 */
9741 9740 if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9742 9741 fcp_log(CE_WARN, pptr->port_dip,
9743 9742 "!ns_registry: failed name server registration");
9744 9743 return (1);
9745 9744 }
9746 9745
9747 9746 return (0);
9748 9747 }
9749 9748
9750 9749 /*
9751 9750 * Function: fcp_handle_port_attach
9752 9751 *
9753 9752 * Description: This function is called from fcp_port_attach() to attach a
9754 9753 * new port. This routine does the following:
9755 9754 *
9756 9755 * 1) Allocates an fcp_port structure and initializes it.
9757 9756 * 2) Tries to register the new FC-4 (FCP) capablity with the name
9758 9757 * server.
9759 9758 * 3) Kicks off the enumeration of the targets/luns visible
9760 9759 * through this new port. That is done by calling
9761 9760 * fcp_statec_callback() if the port is online.
9762 9761 *
9763 9762 * Argument: ulph fp/fctl port handle.
9764 9763 * *pinfo Port information.
9765 9764 * s_id Port ID.
9766 9765 * instance Device instance number for the local port
9767 9766 * (returned by ddi_get_instance()).
9768 9767 *
9769 9768 * Return Value: DDI_SUCCESS
9770 9769 * DDI_FAILURE
9771 9770 *
9772 9771 * Context: User and Kernel context.
9773 9772 */
9774 9773 /*ARGSUSED*/
9775 9774 int
9776 9775 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9777 9776 uint32_t s_id, int instance)
9778 9777 {
9779 9778 int res = DDI_FAILURE;
9780 9779 scsi_hba_tran_t *tran;
9781 9780 int mutex_initted = FALSE;
9782 9781 int hba_attached = FALSE;
9783 9782 int soft_state_linked = FALSE;
9784 9783 int event_bind = FALSE;
9785 9784 struct fcp_port *pptr;
9786 9785 fc_portmap_t *tmp_list = NULL;
9787 9786 uint32_t max_cnt, alloc_cnt;
9788 9787 uchar_t *boot_wwn = NULL;
9789 9788 uint_t nbytes;
9790 9789 int manual_cfg;
9791 9790
9792 9791 /*
9793 9792 * this port instance attaching for the first time (or after
9794 9793 * being detached before)
9795 9794 */
9796 9795 FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9797 9796 FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9798 9797
9799 9798 if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9800 9799 cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9801 9800 "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9802 9801 instance);
9803 9802 return (res);
9804 9803 }
9805 9804
9806 9805 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9807 9806 /* this shouldn't happen */
9808 9807 ddi_soft_state_free(fcp_softstate, instance);
9809 9808 cmn_err(CE_WARN, "fcp: bad soft state");
9810 9809 return (res);
9811 9810 }
9812 9811
9813 9812 (void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9814 9813
9815 9814 /*
9816 9815 * Make a copy of ulp_port_info as fctl allocates
9817 9816 * a temp struct.
9818 9817 */
9819 9818 (void) fcp_cp_pinfo(pptr, pinfo);
9820 9819
9821 9820 /*
9822 9821 * Check for manual_configuration_only property.
9823 9822 * Enable manual configurtion if the property is
9824 9823 * set to 1, otherwise disable manual configuration.
9825 9824 */
9826 9825 if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9827 9826 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9828 9827 MANUAL_CFG_ONLY,
9829 9828 -1)) != -1) {
9830 9829 if (manual_cfg == 1) {
9831 9830 char *pathname;
9832 9831 pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9833 9832 (void) ddi_pathname(pptr->port_dip, pathname);
9834 9833 cmn_err(CE_NOTE,
9835 9834 "%s (%s%d) %s is enabled via %s.conf.",
9836 9835 pathname,
9837 9836 ddi_driver_name(pptr->port_dip),
9838 9837 ddi_get_instance(pptr->port_dip),
9839 9838 MANUAL_CFG_ONLY,
9840 9839 ddi_driver_name(pptr->port_dip));
9841 9840 fcp_enable_auto_configuration = 0;
9842 9841 kmem_free(pathname, MAXPATHLEN);
9843 9842 }
9844 9843 }
9845 9844 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9846 9845 pptr->port_link_cnt = 1;
9847 9846 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9848 9847 pptr->port_id = s_id;
9849 9848 pptr->port_instance = instance;
9850 9849 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state));
9851 9850 pptr->port_state = FCP_STATE_INIT;
9852 9851 if (pinfo->port_acc_attr == NULL) {
9853 9852 /*
9854 9853 * The corresponding FCA doesn't support DMA at all
9855 9854 */
9856 9855 pptr->port_state |= FCP_STATE_FCA_IS_NODMA;
9857 9856 }
9858 9857
9859 9858 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state));
9860 9859
9861 9860 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
9862 9861 /*
9863 9862 * If FCA supports DMA in SCSI data phase, we need preallocate
9864 9863 * dma cookie, so stash the cookie size
9865 9864 */
9866 9865 pptr->port_dmacookie_sz = sizeof (ddi_dma_cookie_t) *
9867 9866 pptr->port_data_dma_attr.dma_attr_sgllen;
9868 9867 }
9869 9868
9870 9869 /*
9871 9870 * The two mutexes of fcp_port are initialized. The variable
9872 9871 * mutex_initted is incremented to remember that fact. That variable
9873 9872 * is checked when the routine fails and the mutexes have to be
9874 9873 * destroyed.
9875 9874 */
9876 9875 mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9877 9876 mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9878 9877 mutex_initted++;
9879 9878
9880 9879 /*
9881 9880 * The SCSI tran structure is allocate and initialized now.
9882 9881 */
9883 9882 if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9884 9883 fcp_log(CE_WARN, pptr->port_dip,
9885 9884 "!fcp%d: scsi_hba_tran_alloc failed", instance);
9886 9885 goto fail;
9887 9886 }
9888 9887
9889 9888 /* link in the transport structure then fill it in */
9890 9889 pptr->port_tran = tran;
9891 9890 tran->tran_hba_private = pptr;
9892 9891 tran->tran_tgt_init = fcp_scsi_tgt_init;
9893 9892 tran->tran_tgt_probe = NULL;
9894 9893 tran->tran_tgt_free = fcp_scsi_tgt_free;
9895 9894 tran->tran_start = fcp_scsi_start;
9896 9895 tran->tran_reset = fcp_scsi_reset;
9897 9896 tran->tran_abort = fcp_scsi_abort;
9898 9897 tran->tran_getcap = fcp_scsi_getcap;
9899 9898 tran->tran_setcap = fcp_scsi_setcap;
9900 9899 tran->tran_init_pkt = NULL;
9901 9900 tran->tran_destroy_pkt = NULL;
9902 9901 tran->tran_dmafree = NULL;
9903 9902 tran->tran_sync_pkt = NULL;
9904 9903 tran->tran_reset_notify = fcp_scsi_reset_notify;
9905 9904 tran->tran_get_bus_addr = fcp_scsi_get_bus_addr;
9906 9905 tran->tran_get_name = fcp_scsi_get_name;
9907 9906 tran->tran_clear_aca = NULL;
9908 9907 tran->tran_clear_task_set = NULL;
9909 9908 tran->tran_terminate_task = NULL;
9910 9909 tran->tran_get_eventcookie = fcp_scsi_bus_get_eventcookie;
9911 9910 tran->tran_add_eventcall = fcp_scsi_bus_add_eventcall;
9912 9911 tran->tran_remove_eventcall = fcp_scsi_bus_remove_eventcall;
9913 9912 tran->tran_post_event = fcp_scsi_bus_post_event;
9914 9913 tran->tran_quiesce = NULL;
9915 9914 tran->tran_unquiesce = NULL;
9916 9915 tran->tran_bus_reset = NULL;
9917 9916 tran->tran_bus_config = fcp_scsi_bus_config;
9918 9917 tran->tran_bus_unconfig = fcp_scsi_bus_unconfig;
9919 9918 tran->tran_bus_power = NULL;
9920 9919 tran->tran_interconnect_type = INTERCONNECT_FABRIC;
9921 9920
9922 9921 tran->tran_pkt_constructor = fcp_kmem_cache_constructor;
9923 9922 tran->tran_pkt_destructor = fcp_kmem_cache_destructor;
9924 9923 tran->tran_setup_pkt = fcp_pkt_setup;
9925 9924 tran->tran_teardown_pkt = fcp_pkt_teardown;
9926 9925 tran->tran_hba_len = pptr->port_priv_pkt_len +
9927 9926 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9928 9927 if (pptr->port_state & FCP_STATE_FCA_IS_NODMA) {
9929 9928 /*
9930 9929 * If FCA don't support DMA, then we use different vectors to
9931 9930 * minimize the effects on DMA code flow path
9932 9931 */
9933 9932 tran->tran_start = fcp_pseudo_start;
9934 9933 tran->tran_init_pkt = fcp_pseudo_init_pkt;
9935 9934 tran->tran_destroy_pkt = fcp_pseudo_destroy_pkt;
9936 9935 tran->tran_sync_pkt = fcp_pseudo_sync_pkt;
9937 9936 tran->tran_dmafree = fcp_pseudo_dmafree;
9938 9937 tran->tran_setup_pkt = NULL;
9939 9938 tran->tran_teardown_pkt = NULL;
9940 9939 tran->tran_pkt_constructor = NULL;
9941 9940 tran->tran_pkt_destructor = NULL;
9942 9941 pptr->port_data_dma_attr = pseudo_fca_dma_attr;
9943 9942 }
9944 9943
9945 9944 /*
9946 9945 * Allocate an ndi event handle
9947 9946 */
9948 9947 pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9949 9948 kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9950 9949
9951 9950 bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9952 9951 sizeof (fcp_ndi_event_defs));
9953 9952
9954 9953 (void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9955 9954 &pptr->port_ndi_event_hdl, NDI_SLEEP);
9956 9955
9957 9956 pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9958 9957 pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9959 9958 pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9960 9959
9961 9960 if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9962 9961 (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9963 9962 &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9964 9963 goto fail;
9965 9964 }
9966 9965 event_bind++; /* Checked in fail case */
9967 9966
9968 9967 if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9969 9968 tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9970 9969 != DDI_SUCCESS) {
9971 9970 fcp_log(CE_WARN, pptr->port_dip,
9972 9971 "!fcp%d: scsi_hba_attach_setup failed", instance);
9973 9972 goto fail;
9974 9973 }
9975 9974 hba_attached++; /* Checked in fail case */
9976 9975
9977 9976 pptr->port_mpxio = 0;
9978 9977 if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9979 9978 MDI_SUCCESS) {
9980 9979 pptr->port_mpxio++;
9981 9980 }
9982 9981
9983 9982 /*
9984 9983 * The following code is putting the new port structure in the global
9985 9984 * list of ports and, if it is the first port to attach, it start the
9986 9985 * fcp_watchdog_tick.
9987 9986 *
9988 9987 * Why put this new port in the global before we are done attaching it?
9989 9988 * We are actually making the structure globally known before we are
9990 9989 * done attaching it. The reason for that is: because of the code that
9991 9990 * follows. At this point the resources to handle the port are
9992 9991 * allocated. This function is now going to do the following:
9993 9992 *
9994 9993 * 1) It is going to try to register with the name server advertizing
9995 9994 * the new FCP capability of the port.
9996 9995 * 2) It is going to play the role of the fp/fctl layer by building
9997 9996 * a list of worlwide names reachable through this port and call
9998 9997 * itself on fcp_statec_callback(). That requires the port to
9999 9998 * be part of the global list.
10000 9999 */
10001 10000 mutex_enter(&fcp_global_mutex);
10002 10001 if (fcp_port_head == NULL) {
10003 10002 fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
10004 10003 }
10005 10004 pptr->port_next = fcp_port_head;
10006 10005 fcp_port_head = pptr;
10007 10006 soft_state_linked++;
10008 10007
10009 10008 if (fcp_watchdog_init++ == 0) {
10010 10009 fcp_watchdog_tick = fcp_watchdog_timeout *
10011 10010 drv_usectohz(1000000);
10012 10011 fcp_watchdog_id = timeout(fcp_watch, NULL,
10013 10012 fcp_watchdog_tick);
10014 10013 }
10015 10014 mutex_exit(&fcp_global_mutex);
10016 10015
10017 10016 /*
10018 10017 * Here an attempt is made to register with the name server, the new
10019 10018 * FCP capability. That is done using an RTF_ID to the name server.
10020 10019 * It is done synchronously. The function fcp_do_ns_registry()
10021 10020 * doesn't return till the name server responded.
10022 10021 * On failures, just ignore it for now and it will get retried during
10023 10022 * state change callbacks. We'll set a flag to show this failure
10024 10023 */
10025 10024 if (fcp_do_ns_registry(pptr, s_id)) {
10026 10025 mutex_enter(&pptr->port_mutex);
10027 10026 pptr->port_state |= FCP_STATE_NS_REG_FAILED;
10028 10027 mutex_exit(&pptr->port_mutex);
10029 10028 } else {
10030 10029 mutex_enter(&pptr->port_mutex);
10031 10030 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
10032 10031 mutex_exit(&pptr->port_mutex);
10033 10032 }
10034 10033
10035 10034 /*
10036 10035 * Lookup for boot WWN property
10037 10036 */
10038 10037 if (modrootloaded != 1) {
10039 10038 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
10040 10039 ddi_get_parent(pinfo->port_dip),
10041 10040 DDI_PROP_DONTPASS, OBP_BOOT_WWN,
10042 10041 &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
10043 10042 (nbytes == FC_WWN_SIZE)) {
10044 10043 bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
10045 10044 }
10046 10045 if (boot_wwn) {
10047 10046 ddi_prop_free(boot_wwn);
10048 10047 }
10049 10048 }
10050 10049
10051 10050 /*
10052 10051 * Handle various topologies and link states.
10053 10052 */
10054 10053 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
10055 10054 case FC_STATE_OFFLINE:
10056 10055
10057 10056 /*
10058 10057 * we're attaching a port where the link is offline
10059 10058 *
10060 10059 * Wait for ONLINE, at which time a state
10061 10060 * change will cause a statec_callback
10062 10061 *
10063 10062 * in the mean time, do not do anything
10064 10063 */
10065 10064 res = DDI_SUCCESS;
10066 10065 pptr->port_state |= FCP_STATE_OFFLINE;
10067 10066 break;
10068 10067
10069 10068 case FC_STATE_ONLINE: {
10070 10069 if (pptr->port_topology == FC_TOP_UNKNOWN) {
10071 10070 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
10072 10071 res = DDI_SUCCESS;
10073 10072 break;
10074 10073 }
10075 10074 /*
10076 10075 * discover devices and create nodes (a private
10077 10076 * loop or point-to-point)
10078 10077 */
10079 10078 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
10080 10079
10081 10080 /*
10082 10081 * At this point we are going to build a list of all the ports
10083 10082 * that can be reached through this local port. It looks like
10084 10083 * we cannot handle more than FCP_MAX_DEVICES per local port
10085 10084 * (128).
10086 10085 */
10087 10086 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
10088 10087 sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
10089 10088 KM_NOSLEEP)) == NULL) {
10090 10089 fcp_log(CE_WARN, pptr->port_dip,
10091 10090 "!fcp%d: failed to allocate portmap",
10092 10091 instance);
10093 10092 goto fail;
10094 10093 }
10095 10094
10096 10095 /*
10097 10096 * fc_ulp_getportmap() is going to provide us with the list of
10098 10097 * remote ports in the buffer we just allocated. The way the
10099 10098 * list is going to be retrieved depends on the topology.
10100 10099 * However, if we are connected to a Fabric, a name server
10101 10100 * request may be sent to get the list of FCP capable ports.
10102 10101 * It should be noted that is the case the request is
10103 10102 * synchronous. This means we are stuck here till the name
10104 10103 * server replies. A lot of things can change during that time
10105 10104 * and including, may be, being called on
10106 10105 * fcp_statec_callback() for different reasons. I'm not sure
10107 10106 * the code can handle that.
10108 10107 */
10109 10108 max_cnt = FCP_MAX_DEVICES;
10110 10109 alloc_cnt = FCP_MAX_DEVICES;
10111 10110 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
10112 10111 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
10113 10112 FC_SUCCESS) {
10114 10113 caddr_t msg;
10115 10114
10116 10115 (void) fc_ulp_error(res, &msg);
10117 10116
10118 10117 /*
10119 10118 * this just means the transport is
10120 10119 * busy perhaps building a portmap so,
10121 10120 * for now, succeed this port attach
10122 10121 * when the transport has a new map,
10123 10122 * it'll send us a state change then
10124 10123 */
10125 10124 fcp_log(CE_WARN, pptr->port_dip,
10126 10125 "!failed to get port map : %s", msg);
10127 10126
10128 10127 res = DDI_SUCCESS;
10129 10128 break; /* go return result */
10130 10129 }
10131 10130 if (max_cnt > alloc_cnt) {
10132 10131 alloc_cnt = max_cnt;
10133 10132 }
10134 10133
10135 10134 /*
10136 10135 * We are now going to call fcp_statec_callback() ourselves.
10137 10136 * By issuing this call we are trying to kick off the enumera-
10138 10137 * tion process.
10139 10138 */
10140 10139 /*
10141 10140 * let the state change callback do the SCSI device
10142 10141 * discovery and create the devinfos
10143 10142 */
10144 10143 fcp_statec_callback(ulph, pptr->port_fp_handle,
10145 10144 pptr->port_phys_state, pptr->port_topology, tmp_list,
10146 10145 max_cnt, pptr->port_id);
10147 10146
10148 10147 res = DDI_SUCCESS;
10149 10148 break;
10150 10149 }
10151 10150
10152 10151 default:
10153 10152 /* unknown port state */
10154 10153 fcp_log(CE_WARN, pptr->port_dip,
10155 10154 "!fcp%d: invalid port state at attach=0x%x",
10156 10155 instance, pptr->port_phys_state);
10157 10156
10158 10157 mutex_enter(&pptr->port_mutex);
10159 10158 pptr->port_phys_state = FCP_STATE_OFFLINE;
10160 10159 mutex_exit(&pptr->port_mutex);
10161 10160
10162 10161 res = DDI_SUCCESS;
10163 10162 break;
10164 10163 }
10165 10164
10166 10165 /* free temp list if used */
10167 10166 if (tmp_list != NULL) {
10168 10167 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10169 10168 }
10170 10169
10171 10170 /* note the attach time */
10172 10171 pptr->port_attach_time = ddi_get_lbolt64();
10173 10172
10174 10173 /* all done */
10175 10174 return (res);
10176 10175
10177 10176 /* a failure we have to clean up after */
10178 10177 fail:
10179 10178 fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
10180 10179
10181 10180 if (soft_state_linked) {
10182 10181 /* remove this fcp_port from the linked list */
10183 10182 (void) fcp_soft_state_unlink(pptr);
10184 10183 }
10185 10184
10186 10185 /* unbind and free event set */
10187 10186 if (pptr->port_ndi_event_hdl) {
10188 10187 if (event_bind) {
10189 10188 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10190 10189 &pptr->port_ndi_events, NDI_SLEEP);
10191 10190 }
10192 10191 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10193 10192 }
10194 10193
10195 10194 if (pptr->port_ndi_event_defs) {
10196 10195 (void) kmem_free(pptr->port_ndi_event_defs,
10197 10196 sizeof (fcp_ndi_event_defs));
10198 10197 }
10199 10198
10200 10199 /*
10201 10200 * Clean up mpxio stuff
10202 10201 */
10203 10202 if (pptr->port_mpxio) {
10204 10203 (void) mdi_phci_unregister(pptr->port_dip, 0);
10205 10204 pptr->port_mpxio--;
10206 10205 }
10207 10206
10208 10207 /* undo SCSI HBA setup */
10209 10208 if (hba_attached) {
10210 10209 (void) scsi_hba_detach(pptr->port_dip);
10211 10210 }
10212 10211 if (pptr->port_tran != NULL) {
10213 10212 scsi_hba_tran_free(pptr->port_tran);
10214 10213 }
10215 10214
10216 10215 mutex_enter(&fcp_global_mutex);
10217 10216
10218 10217 /*
10219 10218 * We check soft_state_linked, because it is incremented right before
10220 10219 * we call increment fcp_watchdog_init. Therefore, we know if
10221 10220 * soft_state_linked is still FALSE, we do not want to decrement
10222 10221 * fcp_watchdog_init or possibly call untimeout.
10223 10222 */
10224 10223
10225 10224 if (soft_state_linked) {
10226 10225 if (--fcp_watchdog_init == 0) {
10227 10226 timeout_id_t tid = fcp_watchdog_id;
10228 10227
10229 10228 mutex_exit(&fcp_global_mutex);
10230 10229 (void) untimeout(tid);
10231 10230 } else {
10232 10231 mutex_exit(&fcp_global_mutex);
10233 10232 }
10234 10233 } else {
10235 10234 mutex_exit(&fcp_global_mutex);
10236 10235 }
10237 10236
10238 10237 if (mutex_initted) {
10239 10238 mutex_destroy(&pptr->port_mutex);
10240 10239 mutex_destroy(&pptr->port_pkt_mutex);
10241 10240 }
10242 10241
10243 10242 if (tmp_list != NULL) {
10244 10243 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10245 10244 }
10246 10245
10247 10246 /* this makes pptr invalid */
10248 10247 ddi_soft_state_free(fcp_softstate, instance);
10249 10248
10250 10249 return (DDI_FAILURE);
10251 10250 }
10252 10251
10253 10252
10254 10253 static int
10255 10254 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10256 10255 {
10257 10256 int count = 0;
10258 10257
10259 10258 mutex_enter(&pptr->port_mutex);
10260 10259
10261 10260 /*
10262 10261 * if the port is powered down or suspended, nothing else
10263 10262 * to do; just return.
10264 10263 */
10265 10264 if (flag != FCP_STATE_DETACHING) {
10266 10265 if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10267 10266 FCP_STATE_SUSPENDED)) {
10268 10267 pptr->port_state |= flag;
10269 10268 mutex_exit(&pptr->port_mutex);
10270 10269 return (FC_SUCCESS);
10271 10270 }
10272 10271 }
10273 10272
10274 10273 if (pptr->port_state & FCP_STATE_IN_MDI) {
10275 10274 mutex_exit(&pptr->port_mutex);
10276 10275 return (FC_FAILURE);
10277 10276 }
10278 10277
10279 10278 FCP_TRACE(fcp_logq, pptr->port_instbuf,
10280 10279 fcp_trace, FCP_BUF_LEVEL_2, 0,
10281 10280 "fcp_handle_port_detach: port is detaching");
10282 10281
10283 10282 pptr->port_state |= flag;
10284 10283
10285 10284 /*
10286 10285 * Wait for any ongoing reconfig/ipkt to complete, that
10287 10286 * ensures the freeing to targets/luns is safe.
10288 10287 * No more ref to this port should happen from statec/ioctl
10289 10288 * after that as it was removed from the global port list.
10290 10289 */
10291 10290 while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10292 10291 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10293 10292 /*
10294 10293 * Let's give sufficient time for reconfig/ipkt
10295 10294 * to complete.
10296 10295 */
10297 10296 if (count++ >= FCP_ICMD_DEADLINE) {
10298 10297 break;
10299 10298 }
10300 10299 mutex_exit(&pptr->port_mutex);
10301 10300 delay(drv_usectohz(1000000));
10302 10301 mutex_enter(&pptr->port_mutex);
10303 10302 }
10304 10303
10305 10304 /*
10306 10305 * if the driver is still busy then fail to
10307 10306 * suspend/power down.
10308 10307 */
10309 10308 if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10310 10309 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10311 10310 pptr->port_state &= ~flag;
10312 10311 mutex_exit(&pptr->port_mutex);
10313 10312 return (FC_FAILURE);
10314 10313 }
10315 10314
10316 10315 if (flag == FCP_STATE_DETACHING) {
10317 10316 pptr = fcp_soft_state_unlink(pptr);
10318 10317 ASSERT(pptr != NULL);
10319 10318 }
10320 10319
10321 10320 pptr->port_link_cnt++;
10322 10321 pptr->port_state |= FCP_STATE_OFFLINE;
10323 10322 pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10324 10323
10325 10324 fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10326 10325 FCP_CAUSE_LINK_DOWN);
10327 10326 mutex_exit(&pptr->port_mutex);
10328 10327
10329 10328 /* kill watch dog timer if we're the last */
10330 10329 mutex_enter(&fcp_global_mutex);
10331 10330 if (--fcp_watchdog_init == 0) {
10332 10331 timeout_id_t tid = fcp_watchdog_id;
10333 10332 mutex_exit(&fcp_global_mutex);
10334 10333 (void) untimeout(tid);
10335 10334 } else {
10336 10335 mutex_exit(&fcp_global_mutex);
10337 10336 }
10338 10337
10339 10338 /* clean up the port structures */
10340 10339 if (flag == FCP_STATE_DETACHING) {
10341 10340 fcp_cleanup_port(pptr, instance);
10342 10341 }
10343 10342
10344 10343 return (FC_SUCCESS);
10345 10344 }
10346 10345
10347 10346
10348 10347 static void
10349 10348 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10350 10349 {
10351 10350 ASSERT(pptr != NULL);
10352 10351
10353 10352 /* unbind and free event set */
10354 10353 if (pptr->port_ndi_event_hdl) {
10355 10354 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10356 10355 &pptr->port_ndi_events, NDI_SLEEP);
10357 10356 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10358 10357 }
10359 10358
10360 10359 if (pptr->port_ndi_event_defs) {
10361 10360 (void) kmem_free(pptr->port_ndi_event_defs,
10362 10361 sizeof (fcp_ndi_event_defs));
10363 10362 }
10364 10363
10365 10364 /* free the lun/target structures and devinfos */
10366 10365 fcp_free_targets(pptr);
10367 10366
10368 10367 /*
10369 10368 * Clean up mpxio stuff
10370 10369 */
10371 10370 if (pptr->port_mpxio) {
10372 10371 (void) mdi_phci_unregister(pptr->port_dip, 0);
10373 10372 pptr->port_mpxio--;
10374 10373 }
10375 10374
10376 10375 /* clean up SCSA stuff */
10377 10376 (void) scsi_hba_detach(pptr->port_dip);
10378 10377 if (pptr->port_tran != NULL) {
10379 10378 scsi_hba_tran_free(pptr->port_tran);
10380 10379 }
10381 10380
10382 10381 #ifdef KSTATS_CODE
10383 10382 /* clean up kstats */
10384 10383 if (pptr->fcp_ksp != NULL) {
10385 10384 kstat_delete(pptr->fcp_ksp);
10386 10385 }
10387 10386 #endif
10388 10387
10389 10388 /* clean up soft state mutexes/condition variables */
10390 10389 mutex_destroy(&pptr->port_mutex);
10391 10390 mutex_destroy(&pptr->port_pkt_mutex);
10392 10391
10393 10392 /* all done with soft state */
10394 10393 ddi_soft_state_free(fcp_softstate, instance);
10395 10394 }
10396 10395
10397 10396 /*
10398 10397 * Function: fcp_kmem_cache_constructor
10399 10398 *
10400 10399 * Description: This function allocates and initializes the resources required
10401 10400 * to build a scsi_pkt structure the target driver. The result
10402 10401 * of the allocation and initialization will be cached in the
10403 10402 * memory cache. As DMA resources may be allocated here, that
10404 10403 * means DMA resources will be tied up in the cache manager.
10405 10404 * This is a tradeoff that has been made for performance reasons.
10406 10405 *
10407 10406 * Argument: *buf Memory to preinitialize.
10408 10407 * *arg FCP port structure (fcp_port).
10409 10408 * kmflags Value passed to kmem_cache_alloc() and
10410 10409 * propagated to the constructor.
10411 10410 *
10412 10411 * Return Value: 0 Allocation/Initialization was successful.
10413 10412 * -1 Allocation or Initialization failed.
10414 10413 *
10415 10414 *
10416 10415 * If the returned value is 0, the buffer is initialized like this:
10417 10416 *
10418 10417 * +================================+
10419 10418 * +----> | struct scsi_pkt |
10420 10419 * | | |
10421 10420 * | +--- | pkt_ha_private |
10422 10421 * | | | |
10423 10422 * | | +================================+
10424 10423 * | |
10425 10424 * | | +================================+
10426 10425 * | +--> | struct fcp_pkt | <---------+
10427 10426 * | | | |
10428 10427 * +----- | cmd_pkt | |
10429 10428 * | cmd_fp_pkt | ---+ |
10430 10429 * +-------->| cmd_fcp_rsp[] | | |
10431 10430 * | +--->| cmd_fcp_cmd[] | | |
10432 10431 * | | |--------------------------------| | |
10433 10432 * | | | struct fc_packet | <--+ |
10434 10433 * | | | | |
10435 10434 * | | | pkt_ulp_private | ----------+
10436 10435 * | | | pkt_fca_private | -----+
10437 10436 * | | | pkt_data_cookie | ---+ |
10438 10437 * | | | pkt_cmdlen | | |
10439 10438 * | |(a) | pkt_rsplen | | |
10440 10439 * | +----| .......... pkt_cmd ........... | ---|-|---------------+
10441 10440 * | (b) | pkt_cmd_cookie | ---|-|----------+ |
10442 10441 * +---------| .......... pkt_resp .......... | ---|-|------+ | |
10443 10442 * | pkt_resp_cookie | ---|-|--+ | | |
10444 10443 * | pkt_cmd_dma | | | | | | |
10445 10444 * | pkt_cmd_acc | | | | | | |
10446 10445 * +================================+ | | | | | |
10447 10446 * | dma_cookies | <--+ | | | | |
10448 10447 * | | | | | | |
10449 10448 * +================================+ | | | | |
10450 10449 * | fca_private | <----+ | | | |
10451 10450 * | | | | | |
10452 10451 * +================================+ | | | |
10453 10452 * | | | |
10454 10453 * | | | |
10455 10454 * +================================+ (d) | | | |
10456 10455 * | fcp_resp cookies | <-------+ | | |
10457 10456 * | | | | |
10458 10457 * +================================+ | | |
10459 10458 * | | |
10460 10459 * +================================+ (d) | | |
10461 10460 * | fcp_resp | <-----------+ | |
10462 10461 * | (DMA resources associated) | | |
10463 10462 * +================================+ | |
10464 10463 * | |
10465 10464 * | |
10466 10465 * | |
10467 10466 * +================================+ (c) | |
10468 10467 * | fcp_cmd cookies | <---------------+ |
10469 10468 * | | |
10470 10469 * +================================+ |
10471 10470 * |
10472 10471 * +================================+ (c) |
10473 10472 * | fcp_cmd | <--------------------+
10474 10473 * | (DMA resources associated) |
10475 10474 * +================================+
10476 10475 *
10477 10476 * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10478 10477 * (b) Only if DMA is NOT used for the FCP_RESP buffer
10479 10478 * (c) Only if DMA is used for the FCP_CMD buffer.
10480 10479 * (d) Only if DMA is used for the FCP_RESP buffer
10481 10480 */
10482 10481 static int
10483 10482 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10484 10483 int kmflags)
10485 10484 {
10486 10485 struct fcp_pkt *cmd;
10487 10486 struct fcp_port *pptr;
10488 10487 fc_packet_t *fpkt;
10489 10488
10490 10489 pptr = (struct fcp_port *)tran->tran_hba_private;
10491 10490 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10492 10491 bzero(cmd, tran->tran_hba_len);
10493 10492
10494 10493 cmd->cmd_pkt = pkt;
10495 10494 pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10496 10495 fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10497 10496 cmd->cmd_fp_pkt = fpkt;
10498 10497
10499 10498 cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10500 10499 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10501 10500 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10502 10501 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10503 10502
10504 10503 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10505 10504 sizeof (struct fcp_pkt));
10506 10505
10507 10506 fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10508 10507 fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10509 10508
10510 10509 if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10511 10510 /*
10512 10511 * The underlying HBA doesn't want to DMA the fcp_cmd or
10513 10512 * fcp_resp. The transfer of information will be done by
10514 10513 * bcopy.
10515 10514 * The naming of the flags (that is actually a value) is
10516 10515 * unfortunate. FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10517 10516 * DMA" but instead "NO DMA".
10518 10517 */
10519 10518 fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10520 10519 fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10521 10520 fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10522 10521 } else {
10523 10522 /*
10524 10523 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10525 10524 * buffer. A buffer is allocated for each one the ddi_dma_*
10526 10525 * interfaces.
10527 10526 */
10528 10527 if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10529 10528 return (-1);
10530 10529 }
10531 10530 }
10532 10531
10533 10532 return (0);
10534 10533 }
10535 10534
10536 10535 /*
10537 10536 * Function: fcp_kmem_cache_destructor
10538 10537 *
10539 10538 * Description: Called by the destructor of the cache managed by SCSA.
10540 10539 * All the resources pre-allocated in fcp_pkt_constructor
10541 10540 * and the data also pre-initialized in fcp_pkt_constructor
10542 10541 * are freed and uninitialized here.
10543 10542 *
10544 10543 * Argument: *buf Memory to uninitialize.
10545 10544 * *arg FCP port structure (fcp_port).
10546 10545 *
10547 10546 * Return Value: None
10548 10547 *
10549 10548 * Context: kernel
10550 10549 */
10551 10550 static void
10552 10551 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10553 10552 {
10554 10553 struct fcp_pkt *cmd;
10555 10554 struct fcp_port *pptr;
10556 10555
10557 10556 pptr = (struct fcp_port *)(tran->tran_hba_private);
10558 10557 cmd = pkt->pkt_ha_private;
10559 10558
10560 10559 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10561 10560 /*
10562 10561 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10563 10562 * buffer and DMA resources allocated to do so are released.
10564 10563 */
10565 10564 fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10566 10565 }
10567 10566 }
10568 10567
10569 10568 /*
10570 10569 * Function: fcp_alloc_cmd_resp
10571 10570 *
10572 10571 * Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10573 10572 * will be DMAed by the HBA. The buffer is allocated applying
10574 10573 * the DMA requirements for the HBA. The buffers allocated will
10575 10574 * also be bound. DMA resources are allocated in the process.
10576 10575 * They will be released by fcp_free_cmd_resp().
10577 10576 *
10578 10577 * Argument: *pptr FCP port.
10579 10578 * *fpkt fc packet for which the cmd and resp packet should be
10580 10579 * allocated.
10581 10580 * flags Allocation flags.
10582 10581 *
10583 10582 * Return Value: FC_FAILURE
10584 10583 * FC_SUCCESS
10585 10584 *
10586 10585 * Context: User or Kernel context only if flags == KM_SLEEP.
10587 10586 * Interrupt context if the KM_SLEEP is not specified.
10588 10587 */
10589 10588 static int
10590 10589 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10591 10590 {
10592 10591 int rval;
10593 10592 int cmd_len;
10594 10593 int resp_len;
10595 10594 ulong_t real_len;
10596 10595 int (*cb) (caddr_t);
10597 10596 ddi_dma_cookie_t pkt_cookie;
10598 10597 ddi_dma_cookie_t *cp;
10599 10598 uint32_t cnt;
10600 10599
10601 10600 cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10602 10601
10603 10602 cmd_len = fpkt->pkt_cmdlen;
10604 10603 resp_len = fpkt->pkt_rsplen;
10605 10604
10606 10605 ASSERT(fpkt->pkt_cmd_dma == NULL);
10607 10606
10608 10607 /* Allocation of a DMA handle used in subsequent calls. */
10609 10608 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10610 10609 cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10611 10610 return (FC_FAILURE);
10612 10611 }
10613 10612
10614 10613 /* A buffer is allocated that satisfies the DMA requirements. */
10615 10614 rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10616 10615 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10617 10616 (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10618 10617
10619 10618 if (rval != DDI_SUCCESS) {
10620 10619 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10621 10620 return (FC_FAILURE);
10622 10621 }
10623 10622
10624 10623 if (real_len < cmd_len) {
10625 10624 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10626 10625 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10627 10626 return (FC_FAILURE);
10628 10627 }
10629 10628
10630 10629 /* The buffer allocated is DMA bound. */
10631 10630 rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10632 10631 fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10633 10632 cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10634 10633
10635 10634 if (rval != DDI_DMA_MAPPED) {
10636 10635 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10637 10636 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10638 10637 return (FC_FAILURE);
10639 10638 }
10640 10639
10641 10640 if (fpkt->pkt_cmd_cookie_cnt >
10642 10641 pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10643 10642 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10644 10643 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10645 10644 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10646 10645 return (FC_FAILURE);
10647 10646 }
10648 10647
10649 10648 ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10650 10649
10651 10650 /*
10652 10651 * The buffer where the scatter/gather list is going to be built is
10653 10652 * allocated.
10654 10653 */
10655 10654 cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10656 10655 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10657 10656 KM_NOSLEEP);
10658 10657
10659 10658 if (cp == NULL) {
10660 10659 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10661 10660 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10662 10661 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10663 10662 return (FC_FAILURE);
10664 10663 }
10665 10664
10666 10665 /*
10667 10666 * The scatter/gather list for the buffer we just allocated is built
10668 10667 * here.
10669 10668 */
10670 10669 *cp = pkt_cookie;
10671 10670 cp++;
10672 10671
10673 10672 for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10674 10673 ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10675 10674 &pkt_cookie);
10676 10675 *cp = pkt_cookie;
10677 10676 }
10678 10677
10679 10678 ASSERT(fpkt->pkt_resp_dma == NULL);
10680 10679 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10681 10680 cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10682 10681 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10683 10682 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10684 10683 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10685 10684 return (FC_FAILURE);
10686 10685 }
10687 10686
10688 10687 rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10689 10688 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10690 10689 (caddr_t *)&fpkt->pkt_resp, &real_len,
10691 10690 &fpkt->pkt_resp_acc);
10692 10691
10693 10692 if (rval != DDI_SUCCESS) {
10694 10693 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10695 10694 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10696 10695 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10697 10696 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10698 10697 kmem_free(fpkt->pkt_cmd_cookie,
10699 10698 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10700 10699 return (FC_FAILURE);
10701 10700 }
10702 10701
10703 10702 if (real_len < resp_len) {
10704 10703 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10705 10704 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10706 10705 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10707 10706 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10708 10707 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10709 10708 kmem_free(fpkt->pkt_cmd_cookie,
10710 10709 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10711 10710 return (FC_FAILURE);
10712 10711 }
10713 10712
10714 10713 rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10715 10714 fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10716 10715 cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10717 10716
10718 10717 if (rval != DDI_DMA_MAPPED) {
10719 10718 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10720 10719 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10721 10720 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10722 10721 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10723 10722 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10724 10723 kmem_free(fpkt->pkt_cmd_cookie,
10725 10724 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10726 10725 return (FC_FAILURE);
10727 10726 }
10728 10727
10729 10728 if (fpkt->pkt_resp_cookie_cnt >
10730 10729 pptr->port_resp_dma_attr.dma_attr_sgllen) {
10731 10730 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10732 10731 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10733 10732 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10734 10733 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10735 10734 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10736 10735 kmem_free(fpkt->pkt_cmd_cookie,
10737 10736 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10738 10737 return (FC_FAILURE);
10739 10738 }
10740 10739
10741 10740 ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10742 10741
10743 10742 cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10744 10743 fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10745 10744 KM_NOSLEEP);
10746 10745
10747 10746 if (cp == NULL) {
10748 10747 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10749 10748 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10750 10749 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10751 10750 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10752 10751 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10753 10752 kmem_free(fpkt->pkt_cmd_cookie,
10754 10753 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10755 10754 return (FC_FAILURE);
10756 10755 }
10757 10756
10758 10757 *cp = pkt_cookie;
10759 10758 cp++;
10760 10759
10761 10760 for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10762 10761 ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10763 10762 &pkt_cookie);
10764 10763 *cp = pkt_cookie;
10765 10764 }
10766 10765
10767 10766 return (FC_SUCCESS);
10768 10767 }
10769 10768
10770 10769 /*
10771 10770 * Function: fcp_free_cmd_resp
10772 10771 *
10773 10772 * Description: This function releases the FCP_CMD and FCP_RESP buffer
10774 10773 * allocated by fcp_alloc_cmd_resp() and all the resources
10775 10774 * associated with them. That includes the DMA resources and the
10776 10775 * buffer allocated for the cookies of each one of them.
10777 10776 *
10778 10777 * Argument: *pptr FCP port context.
10779 10778 * *fpkt fc packet containing the cmd and resp packet
10780 10779 * to be released.
10781 10780 *
10782 10781 * Return Value: None
10783 10782 *
10784 10783 * Context: Interrupt, User and Kernel context.
10785 10784 */
10786 10785 /* ARGSUSED */
10787 10786 static void
10788 10787 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10789 10788 {
10790 10789 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10791 10790
10792 10791 if (fpkt->pkt_resp_dma) {
10793 10792 (void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10794 10793 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10795 10794 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10796 10795 }
10797 10796
10798 10797 if (fpkt->pkt_resp_cookie) {
10799 10798 kmem_free(fpkt->pkt_resp_cookie,
10800 10799 fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10801 10800 fpkt->pkt_resp_cookie = NULL;
10802 10801 }
10803 10802
10804 10803 if (fpkt->pkt_cmd_dma) {
10805 10804 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10806 10805 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10807 10806 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10808 10807 }
10809 10808
10810 10809 if (fpkt->pkt_cmd_cookie) {
10811 10810 kmem_free(fpkt->pkt_cmd_cookie,
10812 10811 fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10813 10812 fpkt->pkt_cmd_cookie = NULL;
10814 10813 }
10815 10814 }
10816 10815
10817 10816
10818 10817 /*
10819 10818 * called by the transport to do our own target initialization
10820 10819 *
10821 10820 * can acquire and release the global mutex
10822 10821 */
10823 10822 /* ARGSUSED */
10824 10823 static int
10825 10824 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10826 10825 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10827 10826 {
10828 10827 uchar_t *bytes;
10829 10828 uint_t nbytes;
10830 10829 uint16_t lun_num;
10831 10830 struct fcp_tgt *ptgt;
10832 10831 struct fcp_lun *plun;
10833 10832 struct fcp_port *pptr = (struct fcp_port *)
10834 10833 hba_tran->tran_hba_private;
10835 10834
10836 10835 ASSERT(pptr != NULL);
10837 10836
10838 10837 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10839 10838 FCP_BUF_LEVEL_8, 0,
10840 10839 "fcp_phys_tgt_init: called for %s (instance %d)",
10841 10840 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10842 10841
10843 10842 /* get our port WWN property */
10844 10843 bytes = NULL;
10845 10844 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10846 10845 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10847 10846 (nbytes != FC_WWN_SIZE)) {
10848 10847 /* no port WWN property */
10849 10848 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10850 10849 FCP_BUF_LEVEL_8, 0,
10851 10850 "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10852 10851 " for %s (instance %d): bytes=%p nbytes=%x",
10853 10852 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10854 10853 nbytes);
10855 10854
10856 10855 if (bytes != NULL) {
10857 10856 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10858 10857 }
10859 10858
10860 10859 return (DDI_NOT_WELL_FORMED);
10861 10860 }
10862 10861 ASSERT(bytes != NULL);
10863 10862
10864 10863 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10865 10864 LUN_PROP, 0xFFFF);
10866 10865 if (lun_num == 0xFFFF) {
10867 10866 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10868 10867 FCP_BUF_LEVEL_8, 0,
10869 10868 "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10870 10869 " for %s (instance %d)", ddi_get_name(tgt_dip),
10871 10870 ddi_get_instance(tgt_dip));
10872 10871
10873 10872 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10874 10873 return (DDI_NOT_WELL_FORMED);
10875 10874 }
10876 10875
10877 10876 mutex_enter(&pptr->port_mutex);
10878 10877 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10879 10878 mutex_exit(&pptr->port_mutex);
10880 10879 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10881 10880 FCP_BUF_LEVEL_8, 0,
10882 10881 "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10883 10882 " for %s (instance %d)", ddi_get_name(tgt_dip),
10884 10883 ddi_get_instance(tgt_dip));
10885 10884
10886 10885 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10887 10886 return (DDI_FAILURE);
10888 10887 }
10889 10888
10890 10889 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10891 10890 FC_WWN_SIZE) == 0);
10892 10891 ASSERT(plun->lun_num == lun_num);
10893 10892
10894 10893 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10895 10894
10896 10895 ptgt = plun->lun_tgt;
10897 10896
10898 10897 mutex_enter(&ptgt->tgt_mutex);
10899 10898 plun->lun_tgt_count++;
10900 10899 scsi_device_hba_private_set(sd, plun);
10901 10900 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10902 10901 plun->lun_sd = sd;
10903 10902 mutex_exit(&ptgt->tgt_mutex);
10904 10903 mutex_exit(&pptr->port_mutex);
10905 10904
10906 10905 return (DDI_SUCCESS);
10907 10906 }
10908 10907
10909 10908 /*ARGSUSED*/
10910 10909 static int
10911 10910 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10912 10911 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10913 10912 {
10914 10913 uchar_t *bytes;
10915 10914 uint_t nbytes;
10916 10915 uint16_t lun_num;
10917 10916 struct fcp_tgt *ptgt;
10918 10917 struct fcp_lun *plun;
10919 10918 struct fcp_port *pptr = (struct fcp_port *)
10920 10919 hba_tran->tran_hba_private;
10921 10920 child_info_t *cip;
10922 10921
10923 10922 ASSERT(pptr != NULL);
10924 10923
10925 10924 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10926 10925 fcp_trace, FCP_BUF_LEVEL_8, 0,
10927 10926 "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10928 10927 " (tgt_dip %p)", ddi_get_name(tgt_dip),
10929 10928 ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10930 10929
10931 10930 cip = (child_info_t *)sd->sd_pathinfo;
10932 10931 if (cip == NULL) {
10933 10932 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10934 10933 fcp_trace, FCP_BUF_LEVEL_8, 0,
10935 10934 "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10936 10935 " for %s (instance %d)", ddi_get_name(tgt_dip),
10937 10936 ddi_get_instance(tgt_dip));
10938 10937
10939 10938 return (DDI_NOT_WELL_FORMED);
10940 10939 }
10941 10940
10942 10941 /* get our port WWN property */
10943 10942 bytes = NULL;
10944 10943 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10945 10944 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10946 10945 (nbytes != FC_WWN_SIZE)) {
10947 10946 if (bytes) {
10948 10947 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10949 10948 }
10950 10949 return (DDI_NOT_WELL_FORMED);
10951 10950 }
10952 10951
10953 10952 ASSERT(bytes != NULL);
10954 10953
10955 10954 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10956 10955 LUN_PROP, 0xFFFF);
10957 10956 if (lun_num == 0xFFFF) {
10958 10957 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10959 10958 fcp_trace, FCP_BUF_LEVEL_8, 0,
10960 10959 "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10961 10960 " for %s (instance %d)", ddi_get_name(tgt_dip),
10962 10961 ddi_get_instance(tgt_dip));
10963 10962
10964 10963 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10965 10964 return (DDI_NOT_WELL_FORMED);
10966 10965 }
10967 10966
10968 10967 mutex_enter(&pptr->port_mutex);
10969 10968 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10970 10969 mutex_exit(&pptr->port_mutex);
10971 10970 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10972 10971 fcp_trace, FCP_BUF_LEVEL_8, 0,
10973 10972 "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10974 10973 " for %s (instance %d)", ddi_get_name(tgt_dip),
10975 10974 ddi_get_instance(tgt_dip));
10976 10975
10977 10976 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10978 10977 return (DDI_FAILURE);
10979 10978 }
10980 10979
10981 10980 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10982 10981 FC_WWN_SIZE) == 0);
10983 10982 ASSERT(plun->lun_num == lun_num);
10984 10983
10985 10984 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10986 10985
10987 10986 ptgt = plun->lun_tgt;
10988 10987
10989 10988 mutex_enter(&ptgt->tgt_mutex);
10990 10989 plun->lun_tgt_count++;
10991 10990 scsi_device_hba_private_set(sd, plun);
10992 10991 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10993 10992 plun->lun_sd = sd;
10994 10993 mutex_exit(&ptgt->tgt_mutex);
10995 10994 mutex_exit(&pptr->port_mutex);
10996 10995
10997 10996 return (DDI_SUCCESS);
10998 10997 }
10999 10998
11000 10999
11001 11000 /*
11002 11001 * called by the transport to do our own target initialization
11003 11002 *
11004 11003 * can acquire and release the global mutex
11005 11004 */
11006 11005 /* ARGSUSED */
11007 11006 static int
11008 11007 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
11009 11008 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
11010 11009 {
11011 11010 struct fcp_port *pptr = (struct fcp_port *)
11012 11011 hba_tran->tran_hba_private;
11013 11012 int rval;
11014 11013
11015 11014 ASSERT(pptr != NULL);
11016 11015
11017 11016 /*
11018 11017 * Child node is getting initialized. Look at the mpxio component
11019 11018 * type on the child device to see if this device is mpxio managed
11020 11019 * or not.
11021 11020 */
11022 11021 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
11023 11022 rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11024 11023 } else {
11025 11024 rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11026 11025 }
11027 11026
11028 11027 return (rval);
11029 11028 }
11030 11029
11031 11030
11032 11031 /* ARGSUSED */
11033 11032 static void
11034 11033 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
11035 11034 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
11036 11035 {
11037 11036 struct fcp_lun *plun = scsi_device_hba_private_get(sd);
11038 11037 struct fcp_tgt *ptgt;
11039 11038
11040 11039 FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
11041 11040 fcp_trace, FCP_BUF_LEVEL_8, 0,
11042 11041 "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
11043 11042 ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
11044 11043 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
11045 11044
11046 11045 if (plun == NULL) {
11047 11046 return;
11048 11047 }
11049 11048 ptgt = plun->lun_tgt;
11050 11049
11051 11050 ASSERT(ptgt != NULL);
11052 11051
11053 11052 mutex_enter(&ptgt->tgt_mutex);
11054 11053 ASSERT(plun->lun_tgt_count > 0);
11055 11054
11056 11055 if (--plun->lun_tgt_count == 0) {
11057 11056 plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
11058 11057 }
11059 11058 plun->lun_sd = NULL;
11060 11059 mutex_exit(&ptgt->tgt_mutex);
11061 11060 }
11062 11061
11063 11062 /*
11064 11063 * Function: fcp_scsi_start
11065 11064 *
11066 11065 * Description: This function is called by the target driver to request a
11067 11066 * command to be sent.
11068 11067 *
11069 11068 * Argument: *ap SCSI address of the device.
11070 11069 * *pkt SCSI packet containing the cmd to send.
11071 11070 *
11072 11071 * Return Value: TRAN_ACCEPT
11073 11072 * TRAN_BUSY
11074 11073 * TRAN_BADPKT
11075 11074 * TRAN_FATAL_ERROR
11076 11075 */
11077 11076 static int
11078 11077 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
11079 11078 {
11080 11079 struct fcp_port *pptr = ADDR2FCP(ap);
11081 11080 struct fcp_lun *plun = ADDR2LUN(ap);
11082 11081 struct fcp_pkt *cmd = PKT2CMD(pkt);
11083 11082 struct fcp_tgt *ptgt = plun->lun_tgt;
11084 11083 int rval;
11085 11084
11086 11085 /* ensure command isn't already issued */
11087 11086 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
11088 11087
11089 11088 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11090 11089 fcp_trace, FCP_BUF_LEVEL_9, 0,
11091 11090 "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
11092 11091
11093 11092 /*
11094 11093 * It is strange that we enter the fcp_port mutex and the target
11095 11094 * mutex to check the lun state (which has a mutex of its own).
11096 11095 */
11097 11096 mutex_enter(&pptr->port_mutex);
11098 11097 mutex_enter(&ptgt->tgt_mutex);
11099 11098
11100 11099 /*
11101 11100 * If the device is offline and is not in the process of coming
11102 11101 * online, fail the request.
11103 11102 */
11104 11103
11105 11104 if ((plun->lun_state & FCP_LUN_OFFLINE) &&
11106 11105 !(plun->lun_state & FCP_LUN_ONLINING)) {
11107 11106 mutex_exit(&ptgt->tgt_mutex);
11108 11107 mutex_exit(&pptr->port_mutex);
11109 11108
11110 11109 if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
11111 11110 pkt->pkt_reason = CMD_DEV_GONE;
11112 11111 }
11113 11112
11114 11113 return (TRAN_FATAL_ERROR);
11115 11114 }
11116 11115 cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
11117 11116
11118 11117 /*
11119 11118 * If we are suspended, kernel is trying to dump, so don't
11120 11119 * block, fail or defer requests - send them down right away.
11121 11120 * NOTE: If we are in panic (i.e. trying to dump), we can't
11122 11121 * assume we have been suspended. There is hardware such as
11123 11122 * the v880 that doesn't do PM. Thus, the check for
11124 11123 * ddi_in_panic.
11125 11124 *
11126 11125 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
11127 11126 * of changing. So, if we can queue the packet, do it. Eventually,
11128 11127 * either the device will have gone away or changed and we can fail
11129 11128 * the request, or we can proceed if the device didn't change.
11130 11129 *
11131 11130 * If the pd in the target or the packet is NULL it's probably
11132 11131 * because the device has gone away, we allow the request to be
11133 11132 * put on the internal queue here in case the device comes back within
11134 11133 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
11135 11134 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
11136 11135 * could be NULL because the device was disappearing during or since
11137 11136 * packet initialization.
11138 11137 */
11139 11138
11140 11139 if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
11141 11140 FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
11142 11141 (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
11143 11142 (ptgt->tgt_pd_handle == NULL) ||
11144 11143 (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
11145 11144 /*
11146 11145 * If ((LUN is busy AND
11147 11146 * LUN not suspended AND
11148 11147 * The system is not in panic state) OR
11149 11148 * (The port is coming up))
11150 11149 *
11151 11150 * We check to see if the any of the flags FLAG_NOINTR or
11152 11151 * FLAG_NOQUEUE is set. If one of them is set the value
11153 11152 * returned will be TRAN_BUSY. If not, the request is queued.
11154 11153 */
11155 11154 mutex_exit(&ptgt->tgt_mutex);
11156 11155 mutex_exit(&pptr->port_mutex);
11157 11156
11158 11157 /* see if using interrupts is allowed (so queueing'll work) */
11159 11158 if (pkt->pkt_flags & FLAG_NOINTR) {
11160 11159 pkt->pkt_resid = 0;
11161 11160 return (TRAN_BUSY);
11162 11161 }
11163 11162 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11164 11163 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11165 11164 fcp_trace, FCP_BUF_LEVEL_9, 0,
11166 11165 "fcp_scsi_start: lun busy for pkt %p", pkt);
11167 11166 return (TRAN_BUSY);
11168 11167 }
11169 11168 #ifdef DEBUG
11170 11169 mutex_enter(&pptr->port_pkt_mutex);
11171 11170 pptr->port_npkts++;
11172 11171 mutex_exit(&pptr->port_pkt_mutex);
11173 11172 #endif /* DEBUG */
11174 11173
11175 11174 /* got queue up the pkt for later */
11176 11175 fcp_queue_pkt(pptr, cmd);
11177 11176 return (TRAN_ACCEPT);
11178 11177 }
11179 11178 cmd->cmd_state = FCP_PKT_ISSUED;
11180 11179
11181 11180 mutex_exit(&ptgt->tgt_mutex);
11182 11181 mutex_exit(&pptr->port_mutex);
11183 11182
11184 11183 /*
11185 11184 * Now that we released the mutexes, what was protected by them can
11186 11185 * change.
11187 11186 */
11188 11187
11189 11188 /*
11190 11189 * If there is a reconfiguration in progress, wait for it to complete.
11191 11190 */
11192 11191 fcp_reconfig_wait(pptr);
11193 11192
11194 11193 cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11195 11194 pkt->pkt_time : 0;
11196 11195
11197 11196 /* prepare the packet */
11198 11197
11199 11198 fcp_prepare_pkt(pptr, cmd, plun);
11200 11199
11201 11200 if (cmd->cmd_pkt->pkt_time) {
11202 11201 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11203 11202 } else {
11204 11203 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11205 11204 }
11206 11205
11207 11206 /*
11208 11207 * if interrupts aren't allowed (e.g. at dump time) then we'll
11209 11208 * have to do polled I/O
11210 11209 */
11211 11210 if (pkt->pkt_flags & FLAG_NOINTR) {
11212 11211 cmd->cmd_state &= ~FCP_PKT_ISSUED;
11213 11212 return (fcp_dopoll(pptr, cmd));
11214 11213 }
11215 11214
11216 11215 #ifdef DEBUG
11217 11216 mutex_enter(&pptr->port_pkt_mutex);
11218 11217 pptr->port_npkts++;
11219 11218 mutex_exit(&pptr->port_pkt_mutex);
11220 11219 #endif /* DEBUG */
11221 11220
11222 11221 rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11223 11222 if (rval == FC_SUCCESS) {
11224 11223 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11225 11224 fcp_trace, FCP_BUF_LEVEL_9, 0,
11226 11225 "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11227 11226 return (TRAN_ACCEPT);
11228 11227 }
11229 11228
11230 11229 cmd->cmd_state = FCP_PKT_IDLE;
11231 11230
11232 11231 #ifdef DEBUG
11233 11232 mutex_enter(&pptr->port_pkt_mutex);
11234 11233 pptr->port_npkts--;
11235 11234 mutex_exit(&pptr->port_pkt_mutex);
11236 11235 #endif /* DEBUG */
11237 11236
11238 11237 /*
11239 11238 * For lack of clearer definitions, choose
11240 11239 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11241 11240 */
11242 11241
11243 11242 if (rval == FC_TRAN_BUSY) {
11244 11243 pkt->pkt_resid = 0;
11245 11244 rval = TRAN_BUSY;
11246 11245 } else {
11247 11246 mutex_enter(&ptgt->tgt_mutex);
11248 11247 if (plun->lun_state & FCP_LUN_OFFLINE) {
11249 11248 child_info_t *cip;
11250 11249
11251 11250 mutex_enter(&plun->lun_mutex);
11252 11251 cip = plun->lun_cip;
11253 11252 mutex_exit(&plun->lun_mutex);
11254 11253
11255 11254 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11256 11255 fcp_trace, FCP_BUF_LEVEL_6, 0,
11257 11256 "fcp_transport failed 2 for %x: %x; dip=%p",
11258 11257 plun->lun_tgt->tgt_d_id, rval, cip);
11259 11258
11260 11259 rval = TRAN_FATAL_ERROR;
11261 11260 } else {
11262 11261 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11263 11262 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11264 11263 fcp_trace, FCP_BUF_LEVEL_9, 0,
11265 11264 "fcp_scsi_start: FC_BUSY for pkt %p",
11266 11265 pkt);
11267 11266 rval = TRAN_BUSY;
11268 11267 } else {
11269 11268 rval = TRAN_ACCEPT;
11270 11269 fcp_queue_pkt(pptr, cmd);
11271 11270 }
11272 11271 }
11273 11272 mutex_exit(&ptgt->tgt_mutex);
11274 11273 }
11275 11274
11276 11275 return (rval);
11277 11276 }
11278 11277
11279 11278 /*
11280 11279 * called by the transport to abort a packet
11281 11280 */
11282 11281 /*ARGSUSED*/
11283 11282 static int
11284 11283 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11285 11284 {
11286 11285 int tgt_cnt;
11287 11286 struct fcp_port *pptr = ADDR2FCP(ap);
11288 11287 struct fcp_lun *plun = ADDR2LUN(ap);
11289 11288 struct fcp_tgt *ptgt = plun->lun_tgt;
11290 11289
11291 11290 if (pkt == NULL) {
11292 11291 if (ptgt) {
11293 11292 mutex_enter(&ptgt->tgt_mutex);
11294 11293 tgt_cnt = ptgt->tgt_change_cnt;
11295 11294 mutex_exit(&ptgt->tgt_mutex);
11296 11295 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11297 11296 return (TRUE);
11298 11297 }
11299 11298 }
11300 11299 return (FALSE);
11301 11300 }
11302 11301
11303 11302
11304 11303 /*
11305 11304 * Perform reset
11306 11305 */
11307 11306 int
11308 11307 fcp_scsi_reset(struct scsi_address *ap, int level)
11309 11308 {
11310 11309 int rval = 0;
11311 11310 struct fcp_port *pptr = ADDR2FCP(ap);
11312 11311 struct fcp_lun *plun = ADDR2LUN(ap);
11313 11312 struct fcp_tgt *ptgt = plun->lun_tgt;
11314 11313
11315 11314 if (level == RESET_ALL) {
11316 11315 if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11317 11316 rval = 1;
11318 11317 }
11319 11318 } else if (level == RESET_TARGET || level == RESET_LUN) {
11320 11319 /*
11321 11320 * If we are in the middle of discovery, return
11322 11321 * SUCCESS as this target will be rediscovered
11323 11322 * anyway
11324 11323 */
11325 11324 mutex_enter(&ptgt->tgt_mutex);
11326 11325 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11327 11326 mutex_exit(&ptgt->tgt_mutex);
11328 11327 return (1);
11329 11328 }
11330 11329 mutex_exit(&ptgt->tgt_mutex);
11331 11330
11332 11331 if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11333 11332 rval = 1;
11334 11333 }
11335 11334 }
11336 11335 return (rval);
11337 11336 }
11338 11337
11339 11338
11340 11339 /*
11341 11340 * called by the framework to get a SCSI capability
11342 11341 */
11343 11342 static int
11344 11343 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11345 11344 {
11346 11345 return (fcp_commoncap(ap, cap, 0, whom, 0));
11347 11346 }
11348 11347
11349 11348
11350 11349 /*
11351 11350 * called by the framework to set a SCSI capability
11352 11351 */
11353 11352 static int
11354 11353 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11355 11354 {
11356 11355 return (fcp_commoncap(ap, cap, value, whom, 1));
11357 11356 }
11358 11357
11359 11358 /*
11360 11359 * Function: fcp_pkt_setup
11361 11360 *
11362 11361 * Description: This function sets up the scsi_pkt structure passed by the
11363 11362 * caller. This function assumes fcp_pkt_constructor has been
11364 11363 * called previously for the packet passed by the caller. If
11365 11364 * successful this call will have the following results:
11366 11365 *
11367 11366 * - The resources needed that will be constant through out
11368 11367 * the whole transaction are allocated.
11369 11368 * - The fields that will be constant through out the whole
11370 11369 * transaction are initialized.
11371 11370 * - The scsi packet will be linked to the LUN structure
11372 11371 * addressed by the transaction.
11373 11372 *
11374 11373 * Argument:
11375 11374 * *pkt Pointer to a scsi_pkt structure.
11376 11375 * callback
11377 11376 * arg
11378 11377 *
11379 11378 * Return Value: 0 Success
11380 11379 * !0 Failure
11381 11380 *
11382 11381 * Context: Kernel context or interrupt context
11383 11382 */
11384 11383 /* ARGSUSED */
11385 11384 static int
11386 11385 fcp_pkt_setup(struct scsi_pkt *pkt,
11387 11386 int (*callback)(caddr_t arg),
11388 11387 caddr_t arg)
11389 11388 {
11390 11389 struct fcp_pkt *cmd;
11391 11390 struct fcp_port *pptr;
11392 11391 struct fcp_lun *plun;
11393 11392 struct fcp_tgt *ptgt;
11394 11393 int kf;
11395 11394 fc_packet_t *fpkt;
11396 11395 fc_frame_hdr_t *hp;
11397 11396
11398 11397 pptr = ADDR2FCP(&pkt->pkt_address);
11399 11398 plun = ADDR2LUN(&pkt->pkt_address);
11400 11399 ptgt = plun->lun_tgt;
11401 11400
11402 11401 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11403 11402 fpkt = cmd->cmd_fp_pkt;
11404 11403
11405 11404 /*
11406 11405 * this request is for dma allocation only
11407 11406 */
11408 11407 /*
11409 11408 * First step of fcp_scsi_init_pkt: pkt allocation
11410 11409 * We determine if the caller is willing to wait for the
11411 11410 * resources.
11412 11411 */
11413 11412 kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11414 11413
11415 11414 /*
11416 11415 * Selective zeroing of the pkt.
11417 11416 */
11418 11417 cmd->cmd_back = NULL;
11419 11418 cmd->cmd_next = NULL;
11420 11419
11421 11420 /*
11422 11421 * Zero out fcp command
11423 11422 */
11424 11423 bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11425 11424
11426 11425 cmd->cmd_state = FCP_PKT_IDLE;
11427 11426
11428 11427 fpkt = cmd->cmd_fp_pkt;
11429 11428 fpkt->pkt_data_acc = NULL;
11430 11429
11431 11430 /*
11432 11431 * When port_state is FCP_STATE_OFFLINE, remote_port (tgt_pd_handle)
11433 11432 * could be destroyed. We need fail pkt_setup.
11434 11433 */
11435 11434 if (pptr->port_state & FCP_STATE_OFFLINE) {
11436 11435 return (-1);
11437 11436 }
11438 11437
11439 11438 mutex_enter(&ptgt->tgt_mutex);
11440 11439 fpkt->pkt_pd = ptgt->tgt_pd_handle;
11441 11440
11442 11441 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11443 11442 != FC_SUCCESS) {
11444 11443 mutex_exit(&ptgt->tgt_mutex);
11445 11444 return (-1);
11446 11445 }
11447 11446
11448 11447 mutex_exit(&ptgt->tgt_mutex);
11449 11448
11450 11449 /* Fill in the Fabric Channel Header */
11451 11450 hp = &fpkt->pkt_cmd_fhdr;
11452 11451 hp->r_ctl = R_CTL_COMMAND;
11453 11452 hp->rsvd = 0;
11454 11453 hp->type = FC_TYPE_SCSI_FCP;
11455 11454 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11456 11455 hp->seq_id = 0;
11457 11456 hp->df_ctl = 0;
11458 11457 hp->seq_cnt = 0;
11459 11458 hp->ox_id = 0xffff;
11460 11459 hp->rx_id = 0xffff;
11461 11460 hp->ro = 0;
11462 11461
11463 11462 /*
11464 11463 * A doubly linked list (cmd_forw, cmd_back) is built
11465 11464 * out of every allocated packet on a per-lun basis
11466 11465 *
11467 11466 * The packets are maintained in the list so as to satisfy
11468 11467 * scsi_abort() requests. At present (which is unlikely to
11469 11468 * change in the future) nobody performs a real scsi_abort
11470 11469 * in the SCSI target drivers (as they don't keep the packets
11471 11470 * after doing scsi_transport - so they don't know how to
11472 11471 * abort a packet other than sending a NULL to abort all
11473 11472 * outstanding packets)
11474 11473 */
11475 11474 mutex_enter(&plun->lun_mutex);
11476 11475 if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11477 11476 plun->lun_pkt_head->cmd_back = cmd;
11478 11477 } else {
11479 11478 plun->lun_pkt_tail = cmd;
11480 11479 }
11481 11480 plun->lun_pkt_head = cmd;
11482 11481 mutex_exit(&plun->lun_mutex);
11483 11482 return (0);
11484 11483 }
11485 11484
11486 11485 /*
11487 11486 * Function: fcp_pkt_teardown
11488 11487 *
11489 11488 * Description: This function releases a scsi_pkt structure and all the
11490 11489 * resources attached to it.
11491 11490 *
11492 11491 * Argument: *pkt Pointer to a scsi_pkt structure.
11493 11492 *
11494 11493 * Return Value: None
11495 11494 *
11496 11495 * Context: User, Kernel or Interrupt context.
11497 11496 */
11498 11497 static void
11499 11498 fcp_pkt_teardown(struct scsi_pkt *pkt)
11500 11499 {
11501 11500 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
11502 11501 struct fcp_lun *plun = ADDR2LUN(&pkt->pkt_address);
11503 11502 struct fcp_pkt *cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11504 11503
11505 11504 /*
11506 11505 * Remove the packet from the per-lun list
11507 11506 */
11508 11507 mutex_enter(&plun->lun_mutex);
11509 11508 if (cmd->cmd_back) {
11510 11509 ASSERT(cmd != plun->lun_pkt_head);
11511 11510 cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11512 11511 } else {
11513 11512 ASSERT(cmd == plun->lun_pkt_head);
11514 11513 plun->lun_pkt_head = cmd->cmd_forw;
11515 11514 }
11516 11515
11517 11516 if (cmd->cmd_forw) {
11518 11517 cmd->cmd_forw->cmd_back = cmd->cmd_back;
11519 11518 } else {
11520 11519 ASSERT(cmd == plun->lun_pkt_tail);
11521 11520 plun->lun_pkt_tail = cmd->cmd_back;
11522 11521 }
11523 11522
11524 11523 mutex_exit(&plun->lun_mutex);
11525 11524
11526 11525 (void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11527 11526 }
11528 11527
11529 11528 /*
11530 11529 * Routine for reset notification setup, to register or cancel.
11531 11530 * This function is called by SCSA
11532 11531 */
11533 11532 /*ARGSUSED*/
11534 11533 static int
11535 11534 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11536 11535 void (*callback)(caddr_t), caddr_t arg)
11537 11536 {
11538 11537 struct fcp_port *pptr = ADDR2FCP(ap);
11539 11538
11540 11539 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11541 11540 &pptr->port_mutex, &pptr->port_reset_notify_listf));
11542 11541 }
11543 11542
11544 11543
11545 11544 static int
11546 11545 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11547 11546 ddi_eventcookie_t *event_cookiep)
11548 11547 {
11549 11548 struct fcp_port *pptr = fcp_dip2port(dip);
11550 11549
11551 11550 if (pptr == NULL) {
11552 11551 return (DDI_FAILURE);
11553 11552 }
11554 11553
11555 11554 return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11556 11555 event_cookiep, NDI_EVENT_NOPASS));
11557 11556 }
11558 11557
11559 11558
11560 11559 static int
11561 11560 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11562 11561 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11563 11562 ddi_callback_id_t *cb_id)
11564 11563 {
11565 11564 struct fcp_port *pptr = fcp_dip2port(dip);
11566 11565
11567 11566 if (pptr == NULL) {
11568 11567 return (DDI_FAILURE);
11569 11568 }
11570 11569
11571 11570 return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11572 11571 eventid, callback, arg, NDI_SLEEP, cb_id));
11573 11572 }
11574 11573
11575 11574
11576 11575 static int
11577 11576 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11578 11577 {
11579 11578
11580 11579 struct fcp_port *pptr = fcp_dip2port(dip);
11581 11580
11582 11581 if (pptr == NULL) {
11583 11582 return (DDI_FAILURE);
11584 11583 }
11585 11584 return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11586 11585 }
11587 11586
11588 11587
11589 11588 /*
11590 11589 * called by the transport to post an event
11591 11590 */
11592 11591 static int
11593 11592 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11594 11593 ddi_eventcookie_t eventid, void *impldata)
11595 11594 {
11596 11595 struct fcp_port *pptr = fcp_dip2port(dip);
11597 11596
11598 11597 if (pptr == NULL) {
11599 11598 return (DDI_FAILURE);
11600 11599 }
11601 11600
11602 11601 return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11603 11602 eventid, impldata));
11604 11603 }
11605 11604
11606 11605
11607 11606 /*
11608 11607 * A target in in many cases in Fibre Channel has a one to one relation
11609 11608 * with a port identifier (which is also known as D_ID and also as AL_PA
11610 11609 * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11611 11610 * will most likely result in resetting all LUNs (which means a reset will
11612 11611 * occur on all the SCSI devices connected at the other end of the bridge)
11613 11612 * That is the latest favorite topic for discussion, for, one can debate as
11614 11613 * hot as one likes and come up with arguably a best solution to one's
11615 11614 * satisfaction
11616 11615 *
11617 11616 * To stay on track and not digress much, here are the problems stated
11618 11617 * briefly:
11619 11618 *
11620 11619 * SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11621 11620 * target drivers use RESET_TARGET even if their instance is on a
11622 11621 * LUN. Doesn't that sound a bit broken ?
11623 11622 *
11624 11623 * FCP SCSI (the current spec) only defines RESET TARGET in the
11625 11624 * control fields of an FCP_CMND structure. It should have been
11626 11625 * fixed right there, giving flexibility to the initiators to
11627 11626 * minimize havoc that could be caused by resetting a target.
11628 11627 */
11629 11628 static int
11630 11629 fcp_reset_target(struct scsi_address *ap, int level)
11631 11630 {
11632 11631 int rval = FC_FAILURE;
11633 11632 char lun_id[25];
11634 11633 struct fcp_port *pptr = ADDR2FCP(ap);
11635 11634 struct fcp_lun *plun = ADDR2LUN(ap);
11636 11635 struct fcp_tgt *ptgt = plun->lun_tgt;
11637 11636 struct scsi_pkt *pkt;
11638 11637 struct fcp_pkt *cmd;
11639 11638 struct fcp_rsp *rsp;
11640 11639 uint32_t tgt_cnt;
11641 11640 struct fcp_rsp_info *rsp_info;
11642 11641 struct fcp_reset_elem *p;
11643 11642 int bval;
11644 11643
11645 11644 if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11646 11645 KM_NOSLEEP)) == NULL) {
11647 11646 return (rval);
11648 11647 }
11649 11648
11650 11649 mutex_enter(&ptgt->tgt_mutex);
11651 11650 if (level == RESET_TARGET) {
11652 11651 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11653 11652 mutex_exit(&ptgt->tgt_mutex);
11654 11653 kmem_free(p, sizeof (struct fcp_reset_elem));
11655 11654 return (rval);
11656 11655 }
11657 11656 fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11658 11657 (void) strcpy(lun_id, " ");
11659 11658 } else {
11660 11659 if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11661 11660 mutex_exit(&ptgt->tgt_mutex);
11662 11661 kmem_free(p, sizeof (struct fcp_reset_elem));
11663 11662 return (rval);
11664 11663 }
11665 11664 fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11666 11665
11667 11666 (void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11668 11667 }
11669 11668 tgt_cnt = ptgt->tgt_change_cnt;
11670 11669
11671 11670 mutex_exit(&ptgt->tgt_mutex);
11672 11671
11673 11672 if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11674 11673 0, 0, NULL, 0)) == NULL) {
11675 11674 kmem_free(p, sizeof (struct fcp_reset_elem));
11676 11675 mutex_enter(&ptgt->tgt_mutex);
11677 11676 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11678 11677 mutex_exit(&ptgt->tgt_mutex);
11679 11678 return (rval);
11680 11679 }
11681 11680 pkt->pkt_time = FCP_POLL_TIMEOUT;
11682 11681
11683 11682 /* fill in cmd part of packet */
11684 11683 cmd = PKT2CMD(pkt);
11685 11684 if (level == RESET_TARGET) {
11686 11685 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11687 11686 } else {
11688 11687 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11689 11688 }
11690 11689 cmd->cmd_fp_pkt->pkt_comp = NULL;
11691 11690 cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11692 11691
11693 11692 /* prepare a packet for transport */
11694 11693 fcp_prepare_pkt(pptr, cmd, plun);
11695 11694
11696 11695 if (cmd->cmd_pkt->pkt_time) {
11697 11696 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11698 11697 } else {
11699 11698 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11700 11699 }
11701 11700
11702 11701 (void) fc_ulp_busy_port(pptr->port_fp_handle);
11703 11702 bval = fcp_dopoll(pptr, cmd);
11704 11703 fc_ulp_idle_port(pptr->port_fp_handle);
11705 11704
11706 11705 /* submit the packet */
11707 11706 if (bval == TRAN_ACCEPT) {
11708 11707 int error = 3;
11709 11708
11710 11709 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11711 11710 rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11712 11711 sizeof (struct fcp_rsp));
11713 11712
11714 11713 if (rsp->fcp_u.fcp_status.rsp_len_set) {
11715 11714 if (fcp_validate_fcp_response(rsp, pptr) ==
11716 11715 FC_SUCCESS) {
11717 11716 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11718 11717 FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11719 11718 sizeof (struct fcp_rsp), rsp_info,
11720 11719 cmd->cmd_fp_pkt->pkt_resp_acc,
11721 11720 sizeof (struct fcp_rsp_info));
11722 11721 }
11723 11722 if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11724 11723 rval = FC_SUCCESS;
11725 11724 error = 0;
11726 11725 } else {
11727 11726 error = 1;
11728 11727 }
11729 11728 } else {
11730 11729 error = 2;
11731 11730 }
11732 11731 }
11733 11732
11734 11733 switch (error) {
11735 11734 case 0:
11736 11735 fcp_log(CE_WARN, pptr->port_dip,
11737 11736 "!FCP: WWN 0x%08x%08x %s reset successfully",
11738 11737 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11739 11738 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11740 11739 break;
11741 11740
11742 11741 case 1:
11743 11742 fcp_log(CE_WARN, pptr->port_dip,
11744 11743 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11745 11744 " response code=%x",
11746 11745 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11747 11746 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11748 11747 rsp_info->rsp_code);
11749 11748 break;
11750 11749
11751 11750 case 2:
11752 11751 fcp_log(CE_WARN, pptr->port_dip,
11753 11752 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11754 11753 " Bad FCP response values: rsvd1=%x,"
11755 11754 " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11756 11755 " rsplen=%x, senselen=%x",
11757 11756 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11758 11757 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11759 11758 rsp->reserved_0, rsp->reserved_1,
11760 11759 rsp->fcp_u.fcp_status.reserved_0,
11761 11760 rsp->fcp_u.fcp_status.reserved_1,
11762 11761 rsp->fcp_response_len, rsp->fcp_sense_len);
11763 11762 break;
11764 11763
11765 11764 default:
11766 11765 fcp_log(CE_WARN, pptr->port_dip,
11767 11766 "!FCP: Reset to WWN 0x%08x%08x %s failed",
11768 11767 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11769 11768 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11770 11769 break;
11771 11770 }
11772 11771 }
11773 11772 scsi_destroy_pkt(pkt);
11774 11773
11775 11774 if (rval == FC_FAILURE) {
11776 11775 mutex_enter(&ptgt->tgt_mutex);
11777 11776 if (level == RESET_TARGET) {
11778 11777 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11779 11778 } else {
11780 11779 fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11781 11780 }
11782 11781 mutex_exit(&ptgt->tgt_mutex);
11783 11782 kmem_free(p, sizeof (struct fcp_reset_elem));
11784 11783 return (rval);
11785 11784 }
11786 11785
11787 11786 mutex_enter(&pptr->port_mutex);
11788 11787 if (level == RESET_TARGET) {
11789 11788 p->tgt = ptgt;
11790 11789 p->lun = NULL;
11791 11790 } else {
11792 11791 p->tgt = NULL;
11793 11792 p->lun = plun;
11794 11793 }
11795 11794 p->tgt = ptgt;
11796 11795 p->tgt_cnt = tgt_cnt;
11797 11796 p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11798 11797 p->next = pptr->port_reset_list;
11799 11798 pptr->port_reset_list = p;
11800 11799
11801 11800 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11802 11801 fcp_trace, FCP_BUF_LEVEL_3, 0,
11803 11802 "Notify ssd of the reset to reinstate the reservations");
11804 11803
11805 11804 scsi_hba_reset_notify_callback(&pptr->port_mutex,
11806 11805 &pptr->port_reset_notify_listf);
11807 11806
11808 11807 mutex_exit(&pptr->port_mutex);
11809 11808
11810 11809 return (rval);
11811 11810 }
11812 11811
11813 11812
11814 11813 /*
11815 11814 * called by fcp_getcap and fcp_setcap to get and set (respectively)
11816 11815 * SCSI capabilities
11817 11816 */
11818 11817 /* ARGSUSED */
11819 11818 static int
11820 11819 fcp_commoncap(struct scsi_address *ap, char *cap,
11821 11820 int val, int tgtonly, int doset)
11822 11821 {
11823 11822 struct fcp_port *pptr = ADDR2FCP(ap);
11824 11823 struct fcp_lun *plun = ADDR2LUN(ap);
11825 11824 struct fcp_tgt *ptgt = plun->lun_tgt;
11826 11825 int cidx;
11827 11826 int rval = FALSE;
11828 11827
11829 11828 if (cap == (char *)0) {
11830 11829 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11831 11830 fcp_trace, FCP_BUF_LEVEL_3, 0,
11832 11831 "fcp_commoncap: invalid arg");
11833 11832 return (rval);
11834 11833 }
11835 11834
11836 11835 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11837 11836 return (UNDEFINED);
11838 11837 }
11839 11838
11840 11839 /*
11841 11840 * Process setcap request.
11842 11841 */
11843 11842 if (doset) {
11844 11843 /*
11845 11844 * At present, we can only set binary (0/1) values
11846 11845 */
11847 11846 switch (cidx) {
11848 11847 case SCSI_CAP_ARQ:
11849 11848 if (val == 0) {
11850 11849 rval = FALSE;
11851 11850 } else {
11852 11851 rval = TRUE;
11853 11852 }
11854 11853 break;
11855 11854
11856 11855 case SCSI_CAP_LUN_RESET:
11857 11856 if (val) {
11858 11857 plun->lun_cap |= FCP_LUN_CAP_RESET;
11859 11858 } else {
11860 11859 plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11861 11860 }
11862 11861 rval = TRUE;
11863 11862 break;
11864 11863
11865 11864 case SCSI_CAP_SECTOR_SIZE:
11866 11865 rval = TRUE;
11867 11866 break;
11868 11867 default:
11869 11868 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11870 11869 fcp_trace, FCP_BUF_LEVEL_4, 0,
11871 11870 "fcp_setcap: unsupported %d", cidx);
11872 11871 rval = UNDEFINED;
11873 11872 break;
11874 11873 }
11875 11874
11876 11875 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11877 11876 fcp_trace, FCP_BUF_LEVEL_5, 0,
11878 11877 "set cap: cap=%s, val/tgtonly/doset/rval = "
11879 11878 "0x%x/0x%x/0x%x/%d",
11880 11879 cap, val, tgtonly, doset, rval);
11881 11880
11882 11881 } else {
11883 11882 /*
11884 11883 * Process getcap request.
11885 11884 */
11886 11885 switch (cidx) {
11887 11886 case SCSI_CAP_DMA_MAX:
11888 11887 rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11889 11888
11890 11889 /*
11891 11890 * Need to make an adjustment qlc is uint_t 64
11892 11891 * st is int, so we will make the adjustment here
11893 11892 * being as nobody wants to touch this.
11894 11893 * It still leaves the max single block length
11895 11894 * of 2 gig. This should last .
11896 11895 */
11897 11896
11898 11897 if (rval == -1) {
11899 11898 rval = MAX_INT_DMA;
11900 11899 }
11901 11900
11902 11901 break;
11903 11902
11904 11903 case SCSI_CAP_INITIATOR_ID:
11905 11904 rval = pptr->port_id;
11906 11905 break;
11907 11906
11908 11907 case SCSI_CAP_ARQ:
11909 11908 case SCSI_CAP_RESET_NOTIFICATION:
11910 11909 case SCSI_CAP_TAGGED_QING:
11911 11910 rval = TRUE;
11912 11911 break;
11913 11912
11914 11913 case SCSI_CAP_SCSI_VERSION:
11915 11914 rval = 3;
11916 11915 break;
11917 11916
11918 11917 case SCSI_CAP_INTERCONNECT_TYPE:
11919 11918 if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11920 11919 (ptgt->tgt_hard_addr == 0)) {
11921 11920 rval = INTERCONNECT_FABRIC;
11922 11921 } else {
11923 11922 rval = INTERCONNECT_FIBRE;
11924 11923 }
11925 11924 break;
11926 11925
11927 11926 case SCSI_CAP_LUN_RESET:
11928 11927 rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11929 11928 TRUE : FALSE;
11930 11929 break;
11931 11930
11932 11931 default:
11933 11932 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11934 11933 fcp_trace, FCP_BUF_LEVEL_4, 0,
11935 11934 "fcp_getcap: unsupported %d", cidx);
11936 11935 rval = UNDEFINED;
11937 11936 break;
11938 11937 }
11939 11938
11940 11939 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11941 11940 fcp_trace, FCP_BUF_LEVEL_8, 0,
11942 11941 "get cap: cap=%s, val/tgtonly/doset/rval = "
11943 11942 "0x%x/0x%x/0x%x/%d",
11944 11943 cap, val, tgtonly, doset, rval);
11945 11944 }
11946 11945
11947 11946 return (rval);
11948 11947 }
11949 11948
11950 11949 /*
11951 11950 * called by the transport to get the port-wwn and lun
11952 11951 * properties of this device, and to create a "name" based on them
11953 11952 *
11954 11953 * these properties don't exist on sun4m
11955 11954 *
11956 11955 * return 1 for success else return 0
11957 11956 */
11958 11957 /* ARGSUSED */
11959 11958 static int
11960 11959 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11961 11960 {
11962 11961 int i;
11963 11962 int *lun;
11964 11963 int numChars;
11965 11964 uint_t nlun;
11966 11965 uint_t count;
11967 11966 uint_t nbytes;
11968 11967 uchar_t *bytes;
11969 11968 uint16_t lun_num;
11970 11969 uint32_t tgt_id;
11971 11970 char **conf_wwn;
11972 11971 char tbuf[(FC_WWN_SIZE << 1) + 1];
11973 11972 uchar_t barray[FC_WWN_SIZE];
11974 11973 dev_info_t *tgt_dip;
11975 11974 struct fcp_tgt *ptgt;
11976 11975 struct fcp_port *pptr;
11977 11976 struct fcp_lun *plun;
11978 11977
11979 11978 ASSERT(sd != NULL);
11980 11979 ASSERT(name != NULL);
11981 11980
11982 11981 tgt_dip = sd->sd_dev;
11983 11982 pptr = ddi_get_soft_state(fcp_softstate,
11984 11983 ddi_get_instance(ddi_get_parent(tgt_dip)));
11985 11984 if (pptr == NULL) {
11986 11985 return (0);
11987 11986 }
11988 11987
11989 11988 ASSERT(tgt_dip != NULL);
11990 11989
11991 11990 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11992 11991 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11993 11992 LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11994 11993 name[0] = '\0';
11995 11994 return (0);
11996 11995 }
11997 11996
11998 11997 if (nlun == 0) {
11999 11998 ddi_prop_free(lun);
12000 11999 return (0);
12001 12000 }
12002 12001
12003 12002 lun_num = lun[0];
12004 12003 ddi_prop_free(lun);
12005 12004
12006 12005 /*
12007 12006 * Lookup for .conf WWN property
12008 12007 */
12009 12008 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
12010 12009 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
12011 12010 &conf_wwn, &count) == DDI_PROP_SUCCESS) {
12012 12011 ASSERT(count >= 1);
12013 12012
12014 12013 fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
12015 12014 ddi_prop_free(conf_wwn);
12016 12015 mutex_enter(&pptr->port_mutex);
12017 12016 if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
12018 12017 mutex_exit(&pptr->port_mutex);
12019 12018 return (0);
12020 12019 }
12021 12020 ptgt = plun->lun_tgt;
12022 12021 mutex_exit(&pptr->port_mutex);
12023 12022
12024 12023 (void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
12025 12024 tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
12026 12025
12027 12026 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12028 12027 ptgt->tgt_hard_addr != 0) {
12029 12028 tgt_id = (uint32_t)fcp_alpa_to_switch[
12030 12029 ptgt->tgt_hard_addr];
12031 12030 } else {
12032 12031 tgt_id = ptgt->tgt_d_id;
12033 12032 }
12034 12033
12035 12034 (void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
12036 12035 TARGET_PROP, tgt_id);
12037 12036 }
12038 12037
12039 12038 /* get the our port-wwn property */
12040 12039 bytes = NULL;
12041 12040 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
12042 12041 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12043 12042 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
12044 12043 if (bytes != NULL) {
12045 12044 ddi_prop_free(bytes);
12046 12045 }
12047 12046 return (0);
12048 12047 }
12049 12048
12050 12049 for (i = 0; i < FC_WWN_SIZE; i++) {
12051 12050 (void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
12052 12051 }
12053 12052
12054 12053 /* Stick in the address of the form "wWWN,LUN" */
12055 12054 numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
12056 12055
12057 12056 ASSERT(numChars < len);
12058 12057 if (numChars >= len) {
12059 12058 fcp_log(CE_WARN, pptr->port_dip,
12060 12059 "!fcp_scsi_get_name: "
12061 12060 "name parameter length too small, it needs to be %d",
12062 12061 numChars+1);
12063 12062 }
12064 12063
12065 12064 ddi_prop_free(bytes);
12066 12065
12067 12066 return (1);
12068 12067 }
12069 12068
12070 12069
12071 12070 /*
12072 12071 * called by the transport to get the SCSI target id value, returning
12073 12072 * it in "name"
12074 12073 *
12075 12074 * this isn't needed/used on sun4m
12076 12075 *
12077 12076 * return 1 for success else return 0
12078 12077 */
12079 12078 /* ARGSUSED */
12080 12079 static int
12081 12080 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
12082 12081 {
12083 12082 struct fcp_lun *plun = ADDR2LUN(&sd->sd_address);
12084 12083 struct fcp_tgt *ptgt;
12085 12084 int numChars;
12086 12085
12087 12086 if (plun == NULL) {
12088 12087 return (0);
12089 12088 }
12090 12089
12091 12090 if ((ptgt = plun->lun_tgt) == NULL) {
12092 12091 return (0);
12093 12092 }
12094 12093
12095 12094 numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
12096 12095
12097 12096 ASSERT(numChars < len);
12098 12097 if (numChars >= len) {
12099 12098 fcp_log(CE_WARN, NULL,
12100 12099 "!fcp_scsi_get_bus_addr: "
12101 12100 "name parameter length too small, it needs to be %d",
12102 12101 numChars+1);
12103 12102 }
12104 12103
12105 12104 return (1);
12106 12105 }
12107 12106
12108 12107
12109 12108 /*
12110 12109 * called internally to reset the link where the specified port lives
12111 12110 */
12112 12111 static int
12113 12112 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
12114 12113 {
12115 12114 la_wwn_t wwn;
12116 12115 struct fcp_lun *plun;
12117 12116 struct fcp_tgt *ptgt;
12118 12117
12119 12118 /* disable restart of lip if we're suspended */
12120 12119 mutex_enter(&pptr->port_mutex);
12121 12120
12122 12121 if (pptr->port_state & (FCP_STATE_SUSPENDED |
12123 12122 FCP_STATE_POWER_DOWN)) {
12124 12123 mutex_exit(&pptr->port_mutex);
12125 12124 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12126 12125 fcp_trace, FCP_BUF_LEVEL_2, 0,
12127 12126 "fcp_linkreset, fcp%d: link reset "
12128 12127 "disabled due to DDI_SUSPEND",
12129 12128 ddi_get_instance(pptr->port_dip));
12130 12129 return (FC_FAILURE);
12131 12130 }
12132 12131
12133 12132 if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
12134 12133 mutex_exit(&pptr->port_mutex);
12135 12134 return (FC_SUCCESS);
12136 12135 }
12137 12136
12138 12137 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
12139 12138 fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
12140 12139
12141 12140 /*
12142 12141 * If ap == NULL assume local link reset.
12143 12142 */
12144 12143 if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
12145 12144 plun = ADDR2LUN(ap);
12146 12145 ptgt = plun->lun_tgt;
12147 12146 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
12148 12147 } else {
12149 12148 bzero((caddr_t)&wwn, sizeof (wwn));
12150 12149 }
12151 12150 mutex_exit(&pptr->port_mutex);
12152 12151
12153 12152 return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
12154 12153 }
12155 12154
12156 12155
12157 12156 /*
12158 12157 * called from fcp_port_attach() to resume a port
12159 12158 * return DDI_* success/failure status
12160 12159 * acquires and releases the global mutex
12161 12160 * acquires and releases the port mutex
12162 12161 */
12163 12162 /*ARGSUSED*/
12164 12163
12165 12164 static int
12166 12165 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
12167 12166 uint32_t s_id, fc_attach_cmd_t cmd, int instance)
12168 12167 {
12169 12168 int res = DDI_FAILURE; /* default result */
12170 12169 struct fcp_port *pptr; /* port state ptr */
12171 12170 uint32_t alloc_cnt;
12172 12171 uint32_t max_cnt;
12173 12172 fc_portmap_t *tmp_list = NULL;
12174 12173
12175 12174 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
12176 12175 FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
12177 12176 instance);
12178 12177
12179 12178 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
12180 12179 cmn_err(CE_WARN, "fcp: bad soft state");
12181 12180 return (res);
12182 12181 }
12183 12182
12184 12183 mutex_enter(&pptr->port_mutex);
12185 12184 switch (cmd) {
12186 12185 case FC_CMD_RESUME:
12187 12186 ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
12188 12187 pptr->port_state &= ~FCP_STATE_SUSPENDED;
12189 12188 break;
12190 12189
12191 12190 case FC_CMD_POWER_UP:
12192 12191 /*
12193 12192 * If the port is DDI_SUSPENded, defer rediscovery
12194 12193 * until DDI_RESUME occurs
12195 12194 */
12196 12195 if (pptr->port_state & FCP_STATE_SUSPENDED) {
12197 12196 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12198 12197 mutex_exit(&pptr->port_mutex);
12199 12198 return (DDI_SUCCESS);
12200 12199 }
12201 12200 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12202 12201 }
12203 12202 pptr->port_id = s_id;
12204 12203 pptr->port_state = FCP_STATE_INIT;
12205 12204 mutex_exit(&pptr->port_mutex);
12206 12205
12207 12206 /*
12208 12207 * Make a copy of ulp_port_info as fctl allocates
12209 12208 * a temp struct.
12210 12209 */
12211 12210 (void) fcp_cp_pinfo(pptr, pinfo);
12212 12211
12213 12212 mutex_enter(&fcp_global_mutex);
12214 12213 if (fcp_watchdog_init++ == 0) {
12215 12214 fcp_watchdog_tick = fcp_watchdog_timeout *
12216 12215 drv_usectohz(1000000);
12217 12216 fcp_watchdog_id = timeout(fcp_watch,
12218 12217 NULL, fcp_watchdog_tick);
12219 12218 }
12220 12219 mutex_exit(&fcp_global_mutex);
12221 12220
12222 12221 /*
12223 12222 * Handle various topologies and link states.
12224 12223 */
12225 12224 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12226 12225 case FC_STATE_OFFLINE:
12227 12226 /*
12228 12227 * Wait for ONLINE, at which time a state
12229 12228 * change will cause a statec_callback
12230 12229 */
12231 12230 res = DDI_SUCCESS;
12232 12231 break;
12233 12232
12234 12233 case FC_STATE_ONLINE:
12235 12234
12236 12235 if (pptr->port_topology == FC_TOP_UNKNOWN) {
12237 12236 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12238 12237 res = DDI_SUCCESS;
12239 12238 break;
12240 12239 }
12241 12240
12242 12241 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12243 12242 !fcp_enable_auto_configuration) {
12244 12243 tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12245 12244 if (tmp_list == NULL) {
12246 12245 if (!alloc_cnt) {
12247 12246 res = DDI_SUCCESS;
12248 12247 }
12249 12248 break;
12250 12249 }
12251 12250 max_cnt = alloc_cnt;
12252 12251 } else {
12253 12252 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12254 12253
12255 12254 alloc_cnt = FCP_MAX_DEVICES;
12256 12255
12257 12256 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12258 12257 (sizeof (fc_portmap_t)) * alloc_cnt,
12259 12258 KM_NOSLEEP)) == NULL) {
12260 12259 fcp_log(CE_WARN, pptr->port_dip,
12261 12260 "!fcp%d: failed to allocate portmap",
12262 12261 instance);
12263 12262 break;
12264 12263 }
12265 12264
12266 12265 max_cnt = alloc_cnt;
12267 12266 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12268 12267 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12269 12268 FC_SUCCESS) {
12270 12269 caddr_t msg;
12271 12270
12272 12271 (void) fc_ulp_error(res, &msg);
12273 12272
12274 12273 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12275 12274 fcp_trace, FCP_BUF_LEVEL_2, 0,
12276 12275 "resume failed getportmap: reason=0x%x",
12277 12276 res);
12278 12277
12279 12278 fcp_log(CE_WARN, pptr->port_dip,
12280 12279 "!failed to get port map : %s", msg);
12281 12280 break;
12282 12281 }
12283 12282 if (max_cnt > alloc_cnt) {
12284 12283 alloc_cnt = max_cnt;
12285 12284 }
12286 12285 }
12287 12286
12288 12287 /*
12289 12288 * do the SCSI device discovery and create
12290 12289 * the devinfos
12291 12290 */
12292 12291 fcp_statec_callback(ulph, pptr->port_fp_handle,
12293 12292 pptr->port_phys_state, pptr->port_topology, tmp_list,
12294 12293 max_cnt, pptr->port_id);
12295 12294
12296 12295 res = DDI_SUCCESS;
12297 12296 break;
12298 12297
12299 12298 default:
12300 12299 fcp_log(CE_WARN, pptr->port_dip,
12301 12300 "!fcp%d: invalid port state at attach=0x%x",
12302 12301 instance, pptr->port_phys_state);
12303 12302
12304 12303 mutex_enter(&pptr->port_mutex);
12305 12304 pptr->port_phys_state = FCP_STATE_OFFLINE;
12306 12305 mutex_exit(&pptr->port_mutex);
12307 12306 res = DDI_SUCCESS;
12308 12307
12309 12308 break;
12310 12309 }
12311 12310
12312 12311 if (tmp_list != NULL) {
12313 12312 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12314 12313 }
12315 12314
12316 12315 return (res);
12317 12316 }
12318 12317
12319 12318
12320 12319 static void
12321 12320 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12322 12321 {
12323 12322 pptr->port_fp_modlinkage = *pinfo->port_linkage;
12324 12323 pptr->port_dip = pinfo->port_dip;
12325 12324 pptr->port_fp_handle = pinfo->port_handle;
12326 12325 if (pinfo->port_acc_attr != NULL) {
12327 12326 /*
12328 12327 * FCA supports DMA
12329 12328 */
12330 12329 pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12331 12330 pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12332 12331 pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12333 12332 pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12334 12333 }
12335 12334 pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12336 12335 pptr->port_max_exch = pinfo->port_fca_max_exch;
12337 12336 pptr->port_phys_state = pinfo->port_state;
12338 12337 pptr->port_topology = pinfo->port_flags;
12339 12338 pptr->port_reset_action = pinfo->port_reset_action;
12340 12339 pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12341 12340 pptr->port_fcp_dma = pinfo->port_fcp_dma;
12342 12341 bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12343 12342 bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12344 12343
12345 12344 /* Clear FMA caps to avoid fm-capability ereport */
12346 12345 if (pptr->port_cmd_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12347 12346 pptr->port_cmd_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12348 12347 if (pptr->port_data_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12349 12348 pptr->port_data_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12350 12349 if (pptr->port_resp_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12351 12350 pptr->port_resp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12352 12351 }
12353 12352
12354 12353 /*
12355 12354 * If the elements wait field is set to 1 then
12356 12355 * another thread is waiting for the operation to complete. Once
12357 12356 * it is complete, the waiting thread is signaled and the element is
12358 12357 * freed by the waiting thread. If the elements wait field is set to 0
12359 12358 * the element is freed.
12360 12359 */
12361 12360 static void
12362 12361 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12363 12362 {
12364 12363 ASSERT(elem != NULL);
12365 12364 mutex_enter(&elem->mutex);
12366 12365 elem->result = result;
12367 12366 if (elem->wait) {
12368 12367 elem->wait = 0;
12369 12368 cv_signal(&elem->cv);
12370 12369 mutex_exit(&elem->mutex);
12371 12370 } else {
12372 12371 mutex_exit(&elem->mutex);
12373 12372 cv_destroy(&elem->cv);
12374 12373 mutex_destroy(&elem->mutex);
12375 12374 kmem_free(elem, sizeof (struct fcp_hp_elem));
12376 12375 }
12377 12376 }
12378 12377
12379 12378 /*
12380 12379 * This function is invoked from the taskq thread to allocate
12381 12380 * devinfo nodes and to online/offline them.
12382 12381 */
12383 12382 static void
12384 12383 fcp_hp_task(void *arg)
12385 12384 {
12386 12385 struct fcp_hp_elem *elem = (struct fcp_hp_elem *)arg;
12387 12386 struct fcp_lun *plun = elem->lun;
12388 12387 struct fcp_port *pptr = elem->port;
12389 12388 int result;
12390 12389
12391 12390 ASSERT(elem->what == FCP_ONLINE ||
12392 12391 elem->what == FCP_OFFLINE ||
12393 12392 elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12394 12393 elem->what == FCP_MPXIO_PATH_SET_BUSY);
12395 12394
12396 12395 mutex_enter(&pptr->port_mutex);
12397 12396 mutex_enter(&plun->lun_mutex);
12398 12397 if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12399 12398 plun->lun_event_count != elem->event_cnt) ||
12400 12399 pptr->port_state & (FCP_STATE_SUSPENDED |
12401 12400 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12402 12401 mutex_exit(&plun->lun_mutex);
12403 12402 mutex_exit(&pptr->port_mutex);
12404 12403 fcp_process_elem(elem, NDI_FAILURE);
12405 12404 return;
12406 12405 }
12407 12406 mutex_exit(&plun->lun_mutex);
12408 12407 mutex_exit(&pptr->port_mutex);
12409 12408
12410 12409 result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12411 12410 elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12412 12411 fcp_process_elem(elem, result);
12413 12412 }
12414 12413
12415 12414
12416 12415 static child_info_t *
12417 12416 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12418 12417 int tcount)
12419 12418 {
12420 12419 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12421 12420
12422 12421 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12423 12422 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12424 12423
12425 12424 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12426 12425 /*
12427 12426 * Child has not been created yet. Create the child device
12428 12427 * based on the per-Lun flags.
12429 12428 */
12430 12429 if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12431 12430 plun->lun_cip =
12432 12431 CIP(fcp_create_dip(plun, lcount, tcount));
12433 12432 plun->lun_mpxio = 0;
12434 12433 } else {
12435 12434 plun->lun_cip =
12436 12435 CIP(fcp_create_pip(plun, lcount, tcount));
12437 12436 plun->lun_mpxio = 1;
12438 12437 }
12439 12438 } else {
12440 12439 plun->lun_cip = cip;
12441 12440 }
12442 12441
12443 12442 return (plun->lun_cip);
12444 12443 }
12445 12444
12446 12445
12447 12446 static int
12448 12447 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12449 12448 {
12450 12449 int rval = FC_FAILURE;
12451 12450 dev_info_t *pdip;
12452 12451 struct dev_info *dip;
12453 12452 int circular;
12454 12453
12455 12454 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12456 12455
12457 12456 pdip = plun->lun_tgt->tgt_port->port_dip;
12458 12457
12459 12458 if (plun->lun_cip == NULL) {
12460 12459 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12461 12460 fcp_trace, FCP_BUF_LEVEL_3, 0,
12462 12461 "fcp_is_dip_present: plun->lun_cip is NULL: "
12463 12462 "plun: %p lun state: %x num: %d target state: %x",
12464 12463 plun, plun->lun_state, plun->lun_num,
12465 12464 plun->lun_tgt->tgt_port->port_state);
12466 12465 return (rval);
12467 12466 }
12468 12467 ndi_devi_enter(pdip, &circular);
12469 12468 dip = DEVI(pdip)->devi_child;
12470 12469 while (dip) {
12471 12470 if (dip == DEVI(cdip)) {
12472 12471 rval = FC_SUCCESS;
12473 12472 break;
12474 12473 }
12475 12474 dip = dip->devi_sibling;
12476 12475 }
12477 12476 ndi_devi_exit(pdip, circular);
12478 12477 return (rval);
12479 12478 }
12480 12479
12481 12480 static int
12482 12481 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12483 12482 {
12484 12483 int rval = FC_FAILURE;
12485 12484
12486 12485 ASSERT(plun != NULL);
12487 12486 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12488 12487
12489 12488 if (plun->lun_mpxio == 0) {
12490 12489 rval = fcp_is_dip_present(plun, DIP(cip));
12491 12490 } else {
12492 12491 rval = fcp_is_pip_present(plun, PIP(cip));
12493 12492 }
12494 12493
12495 12494 return (rval);
12496 12495 }
12497 12496
12498 12497 /*
12499 12498 * Function: fcp_create_dip
12500 12499 *
12501 12500 * Description: Creates a dev_info_t structure for the LUN specified by the
12502 12501 * caller.
12503 12502 *
12504 12503 * Argument: plun Lun structure
12505 12504 * link_cnt Link state count.
12506 12505 * tgt_cnt Target state change count.
12507 12506 *
12508 12507 * Return Value: NULL if it failed
12509 12508 * dev_info_t structure address if it succeeded
12510 12509 *
12511 12510 * Context: Kernel context
12512 12511 */
12513 12512 static dev_info_t *
12514 12513 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12515 12514 {
12516 12515 int failure = 0;
12517 12516 uint32_t tgt_id;
12518 12517 uint64_t sam_lun;
12519 12518 struct fcp_tgt *ptgt = plun->lun_tgt;
12520 12519 struct fcp_port *pptr = ptgt->tgt_port;
12521 12520 dev_info_t *pdip = pptr->port_dip;
12522 12521 dev_info_t *cdip = NULL;
12523 12522 dev_info_t *old_dip = DIP(plun->lun_cip);
12524 12523 char *nname = NULL;
12525 12524 char **compatible = NULL;
12526 12525 int ncompatible;
12527 12526 char *scsi_binding_set;
12528 12527 char t_pwwn[17];
12529 12528
12530 12529 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12531 12530 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12532 12531
12533 12532 /* get the 'scsi-binding-set' property */
12534 12533 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12535 12534 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12536 12535 &scsi_binding_set) != DDI_PROP_SUCCESS) {
12537 12536 scsi_binding_set = NULL;
12538 12537 }
12539 12538
12540 12539 /* determine the node name and compatible */
12541 12540 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12542 12541 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12543 12542 if (scsi_binding_set) {
12544 12543 ddi_prop_free(scsi_binding_set);
12545 12544 }
12546 12545
12547 12546 if (nname == NULL) {
12548 12547 #ifdef DEBUG
12549 12548 cmn_err(CE_WARN, "%s%d: no driver for "
12550 12549 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12551 12550 " compatible: %s",
12552 12551 ddi_driver_name(pdip), ddi_get_instance(pdip),
12553 12552 ptgt->tgt_port_wwn.raw_wwn[0],
12554 12553 ptgt->tgt_port_wwn.raw_wwn[1],
12555 12554 ptgt->tgt_port_wwn.raw_wwn[2],
12556 12555 ptgt->tgt_port_wwn.raw_wwn[3],
12557 12556 ptgt->tgt_port_wwn.raw_wwn[4],
12558 12557 ptgt->tgt_port_wwn.raw_wwn[5],
12559 12558 ptgt->tgt_port_wwn.raw_wwn[6],
12560 12559 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12561 12560 *compatible);
12562 12561 #endif /* DEBUG */
12563 12562 failure++;
12564 12563 goto end_of_fcp_create_dip;
12565 12564 }
12566 12565
12567 12566 cdip = fcp_find_existing_dip(plun, pdip, nname);
12568 12567
12569 12568 /*
12570 12569 * if the old_dip does not match the cdip, that means there is
12571 12570 * some property change. since we'll be using the cdip, we need
12572 12571 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12573 12572 * then the dtype for the device has been updated. Offline the
12574 12573 * the old device and create a new device with the new device type
12575 12574 * Refer to bug: 4764752
12576 12575 */
12577 12576 if (old_dip && (cdip != old_dip ||
12578 12577 plun->lun_state & FCP_LUN_CHANGED)) {
12579 12578 plun->lun_state &= ~(FCP_LUN_INIT);
12580 12579 mutex_exit(&plun->lun_mutex);
12581 12580 mutex_exit(&pptr->port_mutex);
12582 12581
12583 12582 mutex_enter(&ptgt->tgt_mutex);
12584 12583 (void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12585 12584 link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12586 12585 mutex_exit(&ptgt->tgt_mutex);
12587 12586
12588 12587 #ifdef DEBUG
12589 12588 if (cdip != NULL) {
12590 12589 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12591 12590 fcp_trace, FCP_BUF_LEVEL_2, 0,
12592 12591 "Old dip=%p; New dip=%p don't match", old_dip,
12593 12592 cdip);
12594 12593 } else {
12595 12594 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12596 12595 fcp_trace, FCP_BUF_LEVEL_2, 0,
12597 12596 "Old dip=%p; New dip=NULL don't match", old_dip);
12598 12597 }
12599 12598 #endif
12600 12599
12601 12600 mutex_enter(&pptr->port_mutex);
12602 12601 mutex_enter(&plun->lun_mutex);
12603 12602 }
12604 12603
12605 12604 if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12606 12605 plun->lun_state &= ~(FCP_LUN_CHANGED);
12607 12606 if (ndi_devi_alloc(pptr->port_dip, nname,
12608 12607 DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12609 12608 failure++;
12610 12609 goto end_of_fcp_create_dip;
12611 12610 }
12612 12611 }
12613 12612
12614 12613 /*
12615 12614 * Previously all the properties for the devinfo were destroyed here
12616 12615 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12617 12616 * the devid property (and other properties established by the target
12618 12617 * driver or framework) which the code does not always recreate, this
12619 12618 * call was removed.
12620 12619 * This opens a theoretical possibility that we may return with a
12621 12620 * stale devid on the node if the scsi entity behind the fibre channel
12622 12621 * lun has changed.
12623 12622 */
12624 12623
12625 12624 /* decorate the node with compatible */
12626 12625 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12627 12626 "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12628 12627 failure++;
12629 12628 goto end_of_fcp_create_dip;
12630 12629 }
12631 12630
12632 12631 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12633 12632 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12634 12633 failure++;
12635 12634 goto end_of_fcp_create_dip;
12636 12635 }
12637 12636
12638 12637 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12639 12638 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12640 12639 failure++;
12641 12640 goto end_of_fcp_create_dip;
12642 12641 }
12643 12642
12644 12643 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12645 12644 t_pwwn[16] = '\0';
12646 12645 if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12647 12646 != DDI_PROP_SUCCESS) {
12648 12647 failure++;
12649 12648 goto end_of_fcp_create_dip;
12650 12649 }
12651 12650
12652 12651 /*
12653 12652 * If there is no hard address - We might have to deal with
12654 12653 * that by using WWN - Having said that it is important to
12655 12654 * recognize this problem early so ssd can be informed of
12656 12655 * the right interconnect type.
12657 12656 */
12658 12657 if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12659 12658 tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12660 12659 } else {
12661 12660 tgt_id = ptgt->tgt_d_id;
12662 12661 }
12663 12662
12664 12663 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12665 12664 tgt_id) != DDI_PROP_SUCCESS) {
12666 12665 failure++;
12667 12666 goto end_of_fcp_create_dip;
12668 12667 }
12669 12668
12670 12669 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12671 12670 (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12672 12671 failure++;
12673 12672 goto end_of_fcp_create_dip;
12674 12673 }
12675 12674 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12676 12675 if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12677 12676 sam_lun) != DDI_PROP_SUCCESS) {
12678 12677 failure++;
12679 12678 goto end_of_fcp_create_dip;
12680 12679 }
12681 12680
12682 12681 end_of_fcp_create_dip:
12683 12682 scsi_hba_nodename_compatible_free(nname, compatible);
12684 12683
12685 12684 if (cdip != NULL && failure) {
12686 12685 (void) ndi_prop_remove_all(cdip);
12687 12686 (void) ndi_devi_free(cdip);
12688 12687 cdip = NULL;
12689 12688 }
12690 12689
12691 12690 return (cdip);
12692 12691 }
12693 12692
12694 12693 /*
12695 12694 * Function: fcp_create_pip
12696 12695 *
12697 12696 * Description: Creates a Path Id for the LUN specified by the caller.
12698 12697 *
12699 12698 * Argument: plun Lun structure
12700 12699 * link_cnt Link state count.
12701 12700 * tgt_cnt Target state count.
12702 12701 *
12703 12702 * Return Value: NULL if it failed
12704 12703 * mdi_pathinfo_t structure address if it succeeded
12705 12704 *
12706 12705 * Context: Kernel context
12707 12706 */
12708 12707 static mdi_pathinfo_t *
12709 12708 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12710 12709 {
12711 12710 int i;
12712 12711 char buf[MAXNAMELEN];
12713 12712 char uaddr[MAXNAMELEN];
12714 12713 int failure = 0;
12715 12714 uint32_t tgt_id;
12716 12715 uint64_t sam_lun;
12717 12716 struct fcp_tgt *ptgt = plun->lun_tgt;
12718 12717 struct fcp_port *pptr = ptgt->tgt_port;
12719 12718 dev_info_t *pdip = pptr->port_dip;
12720 12719 mdi_pathinfo_t *pip = NULL;
12721 12720 mdi_pathinfo_t *old_pip = PIP(plun->lun_cip);
12722 12721 char *nname = NULL;
12723 12722 char **compatible = NULL;
12724 12723 int ncompatible;
12725 12724 char *scsi_binding_set;
12726 12725 char t_pwwn[17];
12727 12726
12728 12727 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12729 12728 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12730 12729
12731 12730 scsi_binding_set = "vhci";
12732 12731
12733 12732 /* determine the node name and compatible */
12734 12733 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12735 12734 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12736 12735
12737 12736 if (nname == NULL) {
12738 12737 #ifdef DEBUG
12739 12738 cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12740 12739 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12741 12740 " compatible: %s",
12742 12741 ddi_driver_name(pdip), ddi_get_instance(pdip),
12743 12742 ptgt->tgt_port_wwn.raw_wwn[0],
12744 12743 ptgt->tgt_port_wwn.raw_wwn[1],
12745 12744 ptgt->tgt_port_wwn.raw_wwn[2],
12746 12745 ptgt->tgt_port_wwn.raw_wwn[3],
12747 12746 ptgt->tgt_port_wwn.raw_wwn[4],
12748 12747 ptgt->tgt_port_wwn.raw_wwn[5],
12749 12748 ptgt->tgt_port_wwn.raw_wwn[6],
12750 12749 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12751 12750 *compatible);
12752 12751 #endif /* DEBUG */
12753 12752 failure++;
12754 12753 goto end_of_fcp_create_pip;
12755 12754 }
12756 12755
12757 12756 pip = fcp_find_existing_pip(plun, pdip);
12758 12757
12759 12758 /*
12760 12759 * if the old_dip does not match the cdip, that means there is
12761 12760 * some property change. since we'll be using the cdip, we need
12762 12761 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12763 12762 * then the dtype for the device has been updated. Offline the
12764 12763 * the old device and create a new device with the new device type
12765 12764 * Refer to bug: 4764752
12766 12765 */
12767 12766 if (old_pip && (pip != old_pip ||
12768 12767 plun->lun_state & FCP_LUN_CHANGED)) {
12769 12768 plun->lun_state &= ~(FCP_LUN_INIT);
12770 12769 mutex_exit(&plun->lun_mutex);
12771 12770 mutex_exit(&pptr->port_mutex);
12772 12771
12773 12772 mutex_enter(&ptgt->tgt_mutex);
12774 12773 (void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12775 12774 FCP_OFFLINE, lcount, tcount,
12776 12775 NDI_DEVI_REMOVE, 0);
12777 12776 mutex_exit(&ptgt->tgt_mutex);
12778 12777
12779 12778 if (pip != NULL) {
12780 12779 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12781 12780 fcp_trace, FCP_BUF_LEVEL_2, 0,
12782 12781 "Old pip=%p; New pip=%p don't match",
12783 12782 old_pip, pip);
12784 12783 } else {
12785 12784 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12786 12785 fcp_trace, FCP_BUF_LEVEL_2, 0,
12787 12786 "Old pip=%p; New pip=NULL don't match",
12788 12787 old_pip);
12789 12788 }
12790 12789
12791 12790 mutex_enter(&pptr->port_mutex);
12792 12791 mutex_enter(&plun->lun_mutex);
12793 12792 }
12794 12793
12795 12794 /*
12796 12795 * Since FC_WWN_SIZE is 8 bytes and its not like the
12797 12796 * lun_guid_size which is dependent on the target, I don't
12798 12797 * believe the same trancation happens here UNLESS the standards
12799 12798 * change the FC_WWN_SIZE value to something larger than
12800 12799 * MAXNAMELEN(currently 255 bytes).
12801 12800 */
12802 12801
12803 12802 for (i = 0; i < FC_WWN_SIZE; i++) {
12804 12803 (void) sprintf(&buf[i << 1], "%02x",
12805 12804 ptgt->tgt_port_wwn.raw_wwn[i]);
12806 12805 }
12807 12806
12808 12807 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12809 12808 buf, plun->lun_num);
12810 12809
12811 12810 if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12812 12811 /*
12813 12812 * Release the locks before calling into
12814 12813 * mdi_pi_alloc_compatible() since this can result in a
12815 12814 * callback into fcp which can result in a deadlock
12816 12815 * (see bug # 4870272).
12817 12816 *
12818 12817 * Basically, what we are trying to avoid is the scenario where
12819 12818 * one thread does ndi_devi_enter() and tries to grab
12820 12819 * fcp_mutex and another does it the other way round.
12821 12820 *
12822 12821 * But before we do that, make sure that nobody releases the
12823 12822 * port in the meantime. We can do this by setting a flag.
12824 12823 */
12825 12824 plun->lun_state &= ~(FCP_LUN_CHANGED);
12826 12825 pptr->port_state |= FCP_STATE_IN_MDI;
12827 12826 mutex_exit(&plun->lun_mutex);
12828 12827 mutex_exit(&pptr->port_mutex);
12829 12828 if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12830 12829 uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12831 12830 fcp_log(CE_WARN, pptr->port_dip,
12832 12831 "!path alloc failed:0x%x", plun);
12833 12832 mutex_enter(&pptr->port_mutex);
12834 12833 mutex_enter(&plun->lun_mutex);
12835 12834 pptr->port_state &= ~FCP_STATE_IN_MDI;
12836 12835 failure++;
12837 12836 goto end_of_fcp_create_pip;
12838 12837 }
12839 12838 mutex_enter(&pptr->port_mutex);
12840 12839 mutex_enter(&plun->lun_mutex);
12841 12840 pptr->port_state &= ~FCP_STATE_IN_MDI;
12842 12841 } else {
12843 12842 (void) mdi_prop_remove(pip, NULL);
12844 12843 }
12845 12844
12846 12845 mdi_pi_set_phci_private(pip, (caddr_t)plun);
12847 12846
12848 12847 if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12849 12848 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12850 12849 != DDI_PROP_SUCCESS) {
12851 12850 failure++;
12852 12851 goto end_of_fcp_create_pip;
12853 12852 }
12854 12853
12855 12854 if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12856 12855 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12857 12856 != DDI_PROP_SUCCESS) {
12858 12857 failure++;
12859 12858 goto end_of_fcp_create_pip;
12860 12859 }
12861 12860
12862 12861 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12863 12862 t_pwwn[16] = '\0';
12864 12863 if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12865 12864 != DDI_PROP_SUCCESS) {
12866 12865 failure++;
12867 12866 goto end_of_fcp_create_pip;
12868 12867 }
12869 12868
12870 12869 /*
12871 12870 * If there is no hard address - We might have to deal with
12872 12871 * that by using WWN - Having said that it is important to
12873 12872 * recognize this problem early so ssd can be informed of
12874 12873 * the right interconnect type.
12875 12874 */
12876 12875 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12877 12876 ptgt->tgt_hard_addr != 0) {
12878 12877 tgt_id = (uint32_t)
12879 12878 fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12880 12879 } else {
12881 12880 tgt_id = ptgt->tgt_d_id;
12882 12881 }
12883 12882
12884 12883 if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12885 12884 != DDI_PROP_SUCCESS) {
12886 12885 failure++;
12887 12886 goto end_of_fcp_create_pip;
12888 12887 }
12889 12888
12890 12889 if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12891 12890 != DDI_PROP_SUCCESS) {
12892 12891 failure++;
12893 12892 goto end_of_fcp_create_pip;
12894 12893 }
12895 12894 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12896 12895 if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12897 12896 != DDI_PROP_SUCCESS) {
12898 12897 failure++;
12899 12898 goto end_of_fcp_create_pip;
12900 12899 }
12901 12900
12902 12901 end_of_fcp_create_pip:
12903 12902 scsi_hba_nodename_compatible_free(nname, compatible);
12904 12903
12905 12904 if (pip != NULL && failure) {
12906 12905 (void) mdi_prop_remove(pip, NULL);
12907 12906 mutex_exit(&plun->lun_mutex);
12908 12907 mutex_exit(&pptr->port_mutex);
12909 12908 (void) mdi_pi_free(pip, 0);
12910 12909 mutex_enter(&pptr->port_mutex);
12911 12910 mutex_enter(&plun->lun_mutex);
12912 12911 pip = NULL;
12913 12912 }
12914 12913
12915 12914 return (pip);
12916 12915 }
12917 12916
12918 12917 static dev_info_t *
12919 12918 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12920 12919 {
12921 12920 uint_t nbytes;
12922 12921 uchar_t *bytes;
12923 12922 uint_t nwords;
12924 12923 uint32_t tgt_id;
12925 12924 int *words;
12926 12925 dev_info_t *cdip;
12927 12926 dev_info_t *ndip;
12928 12927 struct fcp_tgt *ptgt = plun->lun_tgt;
12929 12928 struct fcp_port *pptr = ptgt->tgt_port;
12930 12929 int circular;
12931 12930
12932 12931 ndi_devi_enter(pdip, &circular);
12933 12932
12934 12933 ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12935 12934 while ((cdip = ndip) != NULL) {
12936 12935 ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12937 12936
12938 12937 if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12939 12938 continue;
12940 12939 }
12941 12940
12942 12941 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12943 12942 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12944 12943 &nbytes) != DDI_PROP_SUCCESS) {
12945 12944 continue;
12946 12945 }
12947 12946
12948 12947 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12949 12948 if (bytes != NULL) {
12950 12949 ddi_prop_free(bytes);
12951 12950 }
12952 12951 continue;
12953 12952 }
12954 12953 ASSERT(bytes != NULL);
12955 12954
12956 12955 if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12957 12956 ddi_prop_free(bytes);
12958 12957 continue;
12959 12958 }
12960 12959
12961 12960 ddi_prop_free(bytes);
12962 12961
12963 12962 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12964 12963 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12965 12964 &nbytes) != DDI_PROP_SUCCESS) {
12966 12965 continue;
12967 12966 }
12968 12967
12969 12968 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12970 12969 if (bytes != NULL) {
12971 12970 ddi_prop_free(bytes);
12972 12971 }
12973 12972 continue;
12974 12973 }
12975 12974 ASSERT(bytes != NULL);
12976 12975
12977 12976 if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12978 12977 ddi_prop_free(bytes);
12979 12978 continue;
12980 12979 }
12981 12980
12982 12981 ddi_prop_free(bytes);
12983 12982
12984 12983 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12985 12984 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12986 12985 &nwords) != DDI_PROP_SUCCESS) {
12987 12986 continue;
12988 12987 }
12989 12988
12990 12989 if (nwords != 1 || words == NULL) {
12991 12990 if (words != NULL) {
12992 12991 ddi_prop_free(words);
12993 12992 }
12994 12993 continue;
12995 12994 }
12996 12995 ASSERT(words != NULL);
12997 12996
12998 12997 /*
12999 12998 * If there is no hard address - We might have to deal with
13000 12999 * that by using WWN - Having said that it is important to
13001 13000 * recognize this problem early so ssd can be informed of
13002 13001 * the right interconnect type.
13003 13002 */
13004 13003 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
13005 13004 ptgt->tgt_hard_addr != 0) {
13006 13005 tgt_id =
13007 13006 (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
13008 13007 } else {
13009 13008 tgt_id = ptgt->tgt_d_id;
13010 13009 }
13011 13010
13012 13011 if (tgt_id != (uint32_t)*words) {
13013 13012 ddi_prop_free(words);
13014 13013 continue;
13015 13014 }
13016 13015 ddi_prop_free(words);
13017 13016
13018 13017 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
13019 13018 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
13020 13019 &nwords) != DDI_PROP_SUCCESS) {
13021 13020 continue;
13022 13021 }
13023 13022
13024 13023 if (nwords != 1 || words == NULL) {
13025 13024 if (words != NULL) {
13026 13025 ddi_prop_free(words);
13027 13026 }
13028 13027 continue;
13029 13028 }
13030 13029 ASSERT(words != NULL);
13031 13030
13032 13031 if (plun->lun_num == (uint16_t)*words) {
13033 13032 ddi_prop_free(words);
13034 13033 break;
13035 13034 }
13036 13035 ddi_prop_free(words);
13037 13036 }
13038 13037 ndi_devi_exit(pdip, circular);
13039 13038
13040 13039 return (cdip);
13041 13040 }
13042 13041
13043 13042
13044 13043 static int
13045 13044 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
13046 13045 {
13047 13046 dev_info_t *pdip;
13048 13047 char buf[MAXNAMELEN];
13049 13048 char uaddr[MAXNAMELEN];
13050 13049 int rval = FC_FAILURE;
13051 13050
13052 13051 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13053 13052
13054 13053 pdip = plun->lun_tgt->tgt_port->port_dip;
13055 13054
13056 13055 /*
13057 13056 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
13058 13057 * non-NULL even when the LUN is not there as in the case when a LUN is
13059 13058 * configured and then deleted on the device end (for T3/T4 case). In
13060 13059 * such cases, pip will be NULL.
13061 13060 *
13062 13061 * If the device generates an RSCN, it will end up getting offlined when
13063 13062 * it disappeared and a new LUN will get created when it is rediscovered
13064 13063 * on the device. If we check for lun_cip here, the LUN will not end
13065 13064 * up getting onlined since this function will end up returning a
13066 13065 * FC_SUCCESS.
13067 13066 *
13068 13067 * The behavior is different on other devices. For instance, on a HDS,
13069 13068 * there was no RSCN generated by the device but the next I/O generated
13070 13069 * a check condition and rediscovery got triggered that way. So, in
13071 13070 * such cases, this path will not be exercised
13072 13071 */
13073 13072 if (pip == NULL) {
13074 13073 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
13075 13074 fcp_trace, FCP_BUF_LEVEL_4, 0,
13076 13075 "fcp_is_pip_present: plun->lun_cip is NULL: "
13077 13076 "plun: %p lun state: %x num: %d target state: %x",
13078 13077 plun, plun->lun_state, plun->lun_num,
13079 13078 plun->lun_tgt->tgt_port->port_state);
13080 13079 return (rval);
13081 13080 }
13082 13081
13083 13082 fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
13084 13083
13085 13084 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13086 13085
13087 13086 if (plun->lun_old_guid) {
13088 13087 if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
13089 13088 rval = FC_SUCCESS;
13090 13089 }
13091 13090 } else {
13092 13091 if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
13093 13092 rval = FC_SUCCESS;
13094 13093 }
13095 13094 }
13096 13095 return (rval);
13097 13096 }
13098 13097
13099 13098 static mdi_pathinfo_t *
13100 13099 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
13101 13100 {
13102 13101 char buf[MAXNAMELEN];
13103 13102 char uaddr[MAXNAMELEN];
13104 13103 mdi_pathinfo_t *pip;
13105 13104 struct fcp_tgt *ptgt = plun->lun_tgt;
13106 13105 struct fcp_port *pptr = ptgt->tgt_port;
13107 13106
13108 13107 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13109 13108
13110 13109 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
13111 13110 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13112 13111
13113 13112 pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
13114 13113
13115 13114 return (pip);
13116 13115 }
13117 13116
13118 13117
13119 13118 static int
13120 13119 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13121 13120 int tcount, int flags, int *circ)
13122 13121 {
13123 13122 int rval;
13124 13123 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13125 13124 struct fcp_tgt *ptgt = plun->lun_tgt;
13126 13125 dev_info_t *cdip = NULL;
13127 13126
13128 13127 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13129 13128 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13130 13129
13131 13130 if (plun->lun_cip == NULL) {
13132 13131 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13133 13132 fcp_trace, FCP_BUF_LEVEL_3, 0,
13134 13133 "fcp_online_child: plun->lun_cip is NULL: "
13135 13134 "plun: %p state: %x num: %d target state: %x",
13136 13135 plun, plun->lun_state, plun->lun_num,
13137 13136 plun->lun_tgt->tgt_port->port_state);
13138 13137 return (NDI_FAILURE);
13139 13138 }
13140 13139 again:
13141 13140 if (plun->lun_mpxio == 0) {
13142 13141 cdip = DIP(cip);
13143 13142 mutex_exit(&plun->lun_mutex);
13144 13143 mutex_exit(&pptr->port_mutex);
13145 13144
13146 13145 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13147 13146 fcp_trace, FCP_BUF_LEVEL_3, 0,
13148 13147 "!Invoking ndi_devi_online for %s: target=%x lun=%x",
13149 13148 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13150 13149
13151 13150 /*
13152 13151 * We could check for FCP_LUN_INIT here but chances
13153 13152 * of getting here when it's already in FCP_LUN_INIT
13154 13153 * is rare and a duplicate ndi_devi_online wouldn't
13155 13154 * hurt either (as the node would already have been
13156 13155 * in CF2)
13157 13156 */
13158 13157 if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
13159 13158 rval = ndi_devi_bind_driver(cdip, flags);
13160 13159 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13161 13160 fcp_trace, FCP_BUF_LEVEL_3, 0,
13162 13161 "!Invoking ndi_devi_bind_driver: rval=%d", rval);
13163 13162 } else {
13164 13163 rval = ndi_devi_online(cdip, flags);
13165 13164 }
13166 13165
13167 13166 /*
13168 13167 * We log the message into trace buffer if the device
13169 13168 * is "ses" and into syslog for any other device
13170 13169 * type. This is to prevent the ndi_devi_online failure
13171 13170 * message that appears for V880/A5K ses devices.
13172 13171 */
13173 13172 if (rval == NDI_SUCCESS) {
13174 13173 mutex_enter(&ptgt->tgt_mutex);
13175 13174 plun->lun_state |= FCP_LUN_INIT;
13176 13175 mutex_exit(&ptgt->tgt_mutex);
13177 13176 } else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
13178 13177 fcp_log(CE_NOTE, pptr->port_dip,
13179 13178 "!ndi_devi_online:"
13180 13179 " failed for %s: target=%x lun=%x %x",
13181 13180 ddi_get_name(cdip), ptgt->tgt_d_id,
13182 13181 plun->lun_num, rval);
13183 13182 } else {
13184 13183 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13185 13184 fcp_trace, FCP_BUF_LEVEL_3, 0,
13186 13185 " !ndi_devi_online:"
13187 13186 " failed for %s: target=%x lun=%x %x",
13188 13187 ddi_get_name(cdip), ptgt->tgt_d_id,
13189 13188 plun->lun_num, rval);
13190 13189 }
13191 13190 } else {
13192 13191 cdip = mdi_pi_get_client(PIP(cip));
13193 13192 mutex_exit(&plun->lun_mutex);
13194 13193 mutex_exit(&pptr->port_mutex);
13195 13194
13196 13195 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13197 13196 fcp_trace, FCP_BUF_LEVEL_3, 0,
13198 13197 "!Invoking mdi_pi_online for %s: target=%x lun=%x",
13199 13198 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13200 13199
13201 13200 /*
13202 13201 * Hold path and exit phci to avoid deadlock with power
13203 13202 * management code during mdi_pi_online.
13204 13203 */
13205 13204 mdi_hold_path(PIP(cip));
13206 13205 mdi_devi_exit_phci(pptr->port_dip, *circ);
13207 13206
13208 13207 rval = mdi_pi_online(PIP(cip), flags);
13209 13208
13210 13209 mdi_devi_enter_phci(pptr->port_dip, circ);
13211 13210 mdi_rele_path(PIP(cip));
13212 13211
13213 13212 if (rval == MDI_SUCCESS) {
13214 13213 mutex_enter(&ptgt->tgt_mutex);
13215 13214 plun->lun_state |= FCP_LUN_INIT;
13216 13215 mutex_exit(&ptgt->tgt_mutex);
13217 13216
13218 13217 /*
13219 13218 * Clear MPxIO path permanent disable in case
13220 13219 * fcp hotplug dropped the offline event.
13221 13220 */
13222 13221 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13223 13222
13224 13223 } else if (rval == MDI_NOT_SUPPORTED) {
13225 13224 child_info_t *old_cip = cip;
13226 13225
13227 13226 /*
13228 13227 * MPxIO does not support this device yet.
13229 13228 * Enumerate in legacy mode.
13230 13229 */
13231 13230 mutex_enter(&pptr->port_mutex);
13232 13231 mutex_enter(&plun->lun_mutex);
13233 13232 plun->lun_mpxio = 0;
13234 13233 plun->lun_cip = NULL;
13235 13234 cdip = fcp_create_dip(plun, lcount, tcount);
13236 13235 plun->lun_cip = cip = CIP(cdip);
13237 13236 if (cip == NULL) {
13238 13237 fcp_log(CE_WARN, pptr->port_dip,
13239 13238 "!fcp_online_child: "
13240 13239 "Create devinfo failed for LU=%p", plun);
13241 13240 mutex_exit(&plun->lun_mutex);
13242 13241
13243 13242 mutex_enter(&ptgt->tgt_mutex);
13244 13243 plun->lun_state |= FCP_LUN_OFFLINE;
13245 13244 mutex_exit(&ptgt->tgt_mutex);
13246 13245
13247 13246 mutex_exit(&pptr->port_mutex);
13248 13247
13249 13248 /*
13250 13249 * free the mdi_pathinfo node
13251 13250 */
13252 13251 (void) mdi_pi_free(PIP(old_cip), 0);
13253 13252 } else {
13254 13253 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13255 13254 fcp_trace, FCP_BUF_LEVEL_3, 0,
13256 13255 "fcp_online_child: creating devinfo "
13257 13256 "node 0x%p for plun 0x%p",
13258 13257 cip, plun);
13259 13258 mutex_exit(&plun->lun_mutex);
13260 13259 mutex_exit(&pptr->port_mutex);
13261 13260 /*
13262 13261 * free the mdi_pathinfo node
13263 13262 */
13264 13263 (void) mdi_pi_free(PIP(old_cip), 0);
13265 13264 mutex_enter(&pptr->port_mutex);
13266 13265 mutex_enter(&plun->lun_mutex);
13267 13266 goto again;
13268 13267 }
13269 13268 } else {
13270 13269 if (cdip) {
13271 13270 fcp_log(CE_NOTE, pptr->port_dip,
13272 13271 "!fcp_online_child: mdi_pi_online:"
13273 13272 " failed for %s: target=%x lun=%x %x",
13274 13273 ddi_get_name(cdip), ptgt->tgt_d_id,
13275 13274 plun->lun_num, rval);
13276 13275 }
13277 13276 }
13278 13277 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13279 13278 }
13280 13279
13281 13280 if (rval == NDI_SUCCESS) {
13282 13281 if (cdip) {
13283 13282 (void) ndi_event_retrieve_cookie(
13284 13283 pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13285 13284 &fcp_insert_eid, NDI_EVENT_NOPASS);
13286 13285 (void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13287 13286 cdip, fcp_insert_eid, NULL);
13288 13287 }
13289 13288 }
13290 13289 mutex_enter(&pptr->port_mutex);
13291 13290 mutex_enter(&plun->lun_mutex);
13292 13291 return (rval);
13293 13292 }
13294 13293
13295 13294 /* ARGSUSED */
13296 13295 static int
13297 13296 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13298 13297 int tcount, int flags, int *circ)
13299 13298 {
13300 13299 int rval;
13301 13300 int lun_mpxio;
13302 13301 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13303 13302 struct fcp_tgt *ptgt = plun->lun_tgt;
13304 13303 dev_info_t *cdip;
13305 13304
13306 13305 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13307 13306 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13308 13307
13309 13308 if (plun->lun_cip == NULL) {
13310 13309 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13311 13310 fcp_trace, FCP_BUF_LEVEL_3, 0,
13312 13311 "fcp_offline_child: plun->lun_cip is NULL: "
13313 13312 "plun: %p lun state: %x num: %d target state: %x",
13314 13313 plun, plun->lun_state, plun->lun_num,
13315 13314 plun->lun_tgt->tgt_port->port_state);
13316 13315 return (NDI_FAILURE);
13317 13316 }
13318 13317
13319 13318 /*
13320 13319 * We will use this value twice. Make a copy to be sure we use
13321 13320 * the same value in both places.
13322 13321 */
13323 13322 lun_mpxio = plun->lun_mpxio;
13324 13323
13325 13324 if (lun_mpxio == 0) {
13326 13325 cdip = DIP(cip);
13327 13326 mutex_exit(&plun->lun_mutex);
13328 13327 mutex_exit(&pptr->port_mutex);
13329 13328 rval = ndi_devi_offline(DIP(cip), flags);
13330 13329 if (rval != NDI_SUCCESS) {
13331 13330 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13332 13331 fcp_trace, FCP_BUF_LEVEL_3, 0,
13333 13332 "fcp_offline_child: ndi_devi_offline failed "
13334 13333 "rval=%x cip=%p", rval, cip);
13335 13334 }
13336 13335 } else {
13337 13336 cdip = mdi_pi_get_client(PIP(cip));
13338 13337 mutex_exit(&plun->lun_mutex);
13339 13338 mutex_exit(&pptr->port_mutex);
13340 13339
13341 13340 /*
13342 13341 * Exit phci to avoid deadlock with power management code
13343 13342 * during mdi_pi_offline
13344 13343 */
13345 13344 mdi_hold_path(PIP(cip));
13346 13345 mdi_devi_exit_phci(pptr->port_dip, *circ);
13347 13346
13348 13347 rval = mdi_pi_offline(PIP(cip), flags);
13349 13348
13350 13349 mdi_devi_enter_phci(pptr->port_dip, circ);
13351 13350 mdi_rele_path(PIP(cip));
13352 13351
13353 13352 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13354 13353 }
13355 13354
13356 13355 mutex_enter(&ptgt->tgt_mutex);
13357 13356 plun->lun_state &= ~FCP_LUN_INIT;
13358 13357 mutex_exit(&ptgt->tgt_mutex);
13359 13358
13360 13359 if (rval == NDI_SUCCESS) {
13361 13360 cdip = NULL;
13362 13361 if (flags & NDI_DEVI_REMOVE) {
13363 13362 mutex_enter(&plun->lun_mutex);
13364 13363 /*
13365 13364 * If the guid of the LUN changes, lun_cip will not
13366 13365 * equal to cip, and after offlining the LUN with the
13367 13366 * old guid, we should keep lun_cip since it's the cip
13368 13367 * of the LUN with the new guid.
13369 13368 * Otherwise remove our reference to child node.
13370 13369 *
13371 13370 * This must be done before the child node is freed,
13372 13371 * otherwise other threads could see a stale lun_cip
13373 13372 * pointer.
13374 13373 */
13375 13374 if (plun->lun_cip == cip) {
13376 13375 plun->lun_cip = NULL;
13377 13376 }
13378 13377 if (plun->lun_old_guid) {
13379 13378 kmem_free(plun->lun_old_guid,
13380 13379 plun->lun_old_guid_size);
13381 13380 plun->lun_old_guid = NULL;
13382 13381 plun->lun_old_guid_size = 0;
13383 13382 }
13384 13383 mutex_exit(&plun->lun_mutex);
13385 13384 }
13386 13385 }
13387 13386
13388 13387 if (lun_mpxio != 0) {
13389 13388 if (rval == NDI_SUCCESS) {
13390 13389 /*
13391 13390 * Clear MPxIO path permanent disable as the path is
13392 13391 * already offlined.
13393 13392 */
13394 13393 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13395 13394
13396 13395 if (flags & NDI_DEVI_REMOVE) {
13397 13396 (void) mdi_pi_free(PIP(cip), 0);
13398 13397 }
13399 13398 } else {
13400 13399 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13401 13400 fcp_trace, FCP_BUF_LEVEL_3, 0,
13402 13401 "fcp_offline_child: mdi_pi_offline failed "
13403 13402 "rval=%x cip=%p", rval, cip);
13404 13403 }
13405 13404 }
13406 13405
13407 13406 mutex_enter(&pptr->port_mutex);
13408 13407 mutex_enter(&plun->lun_mutex);
13409 13408
13410 13409 if (cdip) {
13411 13410 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13412 13411 fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13413 13412 " target=%x lun=%x", "ndi_offline",
13414 13413 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13415 13414 }
13416 13415
13417 13416 return (rval);
13418 13417 }
13419 13418
13420 13419 static void
13421 13420 fcp_remove_child(struct fcp_lun *plun)
13422 13421 {
13423 13422 child_info_t *cip;
13424 13423 int circ;
13425 13424
13426 13425 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13427 13426
13428 13427 if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13429 13428 if (plun->lun_mpxio == 0) {
13430 13429 (void) ndi_prop_remove_all(DIP(plun->lun_cip));
13431 13430 (void) ndi_devi_free(DIP(plun->lun_cip));
13432 13431 plun->lun_cip = NULL;
13433 13432 } else {
13434 13433 /*
13435 13434 * Clear reference to the child node in the lun.
13436 13435 * This must be done before freeing it with mdi_pi_free
13437 13436 * and with lun_mutex held so that other threads always
13438 13437 * see either valid lun_cip or NULL when holding
13439 13438 * lun_mutex. We keep a copy in cip.
13440 13439 */
13441 13440 cip = plun->lun_cip;
13442 13441 plun->lun_cip = NULL;
13443 13442
13444 13443 mutex_exit(&plun->lun_mutex);
13445 13444 mutex_exit(&plun->lun_tgt->tgt_mutex);
13446 13445 mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13447 13446
13448 13447 mdi_devi_enter(
13449 13448 plun->lun_tgt->tgt_port->port_dip, &circ);
13450 13449
13451 13450 /*
13452 13451 * Exit phci to avoid deadlock with power management
13453 13452 * code during mdi_pi_offline
13454 13453 */
13455 13454 mdi_hold_path(PIP(cip));
13456 13455 mdi_devi_exit_phci(
13457 13456 plun->lun_tgt->tgt_port->port_dip, circ);
13458 13457 (void) mdi_pi_offline(PIP(cip),
13459 13458 NDI_DEVI_REMOVE);
13460 13459 mdi_devi_enter_phci(
13461 13460 plun->lun_tgt->tgt_port->port_dip, &circ);
13462 13461 mdi_rele_path(PIP(cip));
13463 13462
13464 13463 mdi_devi_exit(
13465 13464 plun->lun_tgt->tgt_port->port_dip, circ);
13466 13465
13467 13466 FCP_TRACE(fcp_logq,
13468 13467 plun->lun_tgt->tgt_port->port_instbuf,
13469 13468 fcp_trace, FCP_BUF_LEVEL_3, 0,
13470 13469 "lun=%p pip freed %p", plun, cip);
13471 13470
13472 13471 (void) mdi_prop_remove(PIP(cip), NULL);
13473 13472 (void) mdi_pi_free(PIP(cip), 0);
13474 13473
13475 13474 mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13476 13475 mutex_enter(&plun->lun_tgt->tgt_mutex);
13477 13476 mutex_enter(&plun->lun_mutex);
13478 13477 }
13479 13478 } else {
13480 13479 plun->lun_cip = NULL;
13481 13480 }
13482 13481 }
13483 13482
13484 13483 /*
13485 13484 * called when a timeout occurs
13486 13485 *
13487 13486 * can be scheduled during an attach or resume (if not already running)
13488 13487 *
13489 13488 * one timeout is set up for all ports
13490 13489 *
13491 13490 * acquires and releases the global mutex
13492 13491 */
13493 13492 /*ARGSUSED*/
13494 13493 static void
13495 13494 fcp_watch(void *arg)
13496 13495 {
13497 13496 struct fcp_port *pptr;
13498 13497 struct fcp_ipkt *icmd;
13499 13498 struct fcp_ipkt *nicmd;
13500 13499 struct fcp_pkt *cmd;
13501 13500 struct fcp_pkt *ncmd;
13502 13501 struct fcp_pkt *tail;
13503 13502 struct fcp_pkt *pcmd;
13504 13503 struct fcp_pkt *save_head;
13505 13504 struct fcp_port *save_port;
13506 13505
13507 13506 /* increment global watchdog time */
13508 13507 fcp_watchdog_time += fcp_watchdog_timeout;
13509 13508
13510 13509 mutex_enter(&fcp_global_mutex);
13511 13510
13512 13511 /* scan each port in our list */
13513 13512 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13514 13513 save_port = fcp_port_head;
13515 13514 pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13516 13515 mutex_exit(&fcp_global_mutex);
13517 13516
13518 13517 mutex_enter(&pptr->port_mutex);
13519 13518 if (pptr->port_ipkt_list == NULL &&
13520 13519 (pptr->port_state & (FCP_STATE_SUSPENDED |
13521 13520 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13522 13521 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13523 13522 mutex_exit(&pptr->port_mutex);
13524 13523 mutex_enter(&fcp_global_mutex);
13525 13524 goto end_of_watchdog;
13526 13525 }
13527 13526
13528 13527 /*
13529 13528 * We check if a list of targets need to be offlined.
13530 13529 */
13531 13530 if (pptr->port_offline_tgts) {
13532 13531 fcp_scan_offline_tgts(pptr);
13533 13532 }
13534 13533
13535 13534 /*
13536 13535 * We check if a list of luns need to be offlined.
13537 13536 */
13538 13537 if (pptr->port_offline_luns) {
13539 13538 fcp_scan_offline_luns(pptr);
13540 13539 }
13541 13540
13542 13541 /*
13543 13542 * We check if a list of targets or luns need to be reset.
13544 13543 */
13545 13544 if (pptr->port_reset_list) {
13546 13545 fcp_check_reset_delay(pptr);
13547 13546 }
13548 13547
13549 13548 mutex_exit(&pptr->port_mutex);
13550 13549
13551 13550 /*
13552 13551 * This is where the pending commands (pkt) are checked for
13553 13552 * timeout.
13554 13553 */
13555 13554 mutex_enter(&pptr->port_pkt_mutex);
13556 13555 tail = pptr->port_pkt_tail;
13557 13556
13558 13557 for (pcmd = NULL, cmd = pptr->port_pkt_head;
13559 13558 cmd != NULL; cmd = ncmd) {
13560 13559 ncmd = cmd->cmd_next;
13561 13560 /*
13562 13561 * If a command is in this queue the bit CFLAG_IN_QUEUE
13563 13562 * must be set.
13564 13563 */
13565 13564 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13566 13565 /*
13567 13566 * FCP_INVALID_TIMEOUT will be set for those
13568 13567 * command that need to be failed. Mostly those
13569 13568 * cmds that could not be queued down for the
13570 13569 * "timeout" value. cmd->cmd_timeout is used
13571 13570 * to try and requeue the command regularly.
13572 13571 */
13573 13572 if (cmd->cmd_timeout >= fcp_watchdog_time) {
13574 13573 /*
13575 13574 * This command hasn't timed out yet. Let's
13576 13575 * go to the next one.
13577 13576 */
13578 13577 pcmd = cmd;
13579 13578 goto end_of_loop;
13580 13579 }
13581 13580
13582 13581 if (cmd == pptr->port_pkt_head) {
13583 13582 ASSERT(pcmd == NULL);
13584 13583 pptr->port_pkt_head = cmd->cmd_next;
13585 13584 } else {
13586 13585 ASSERT(pcmd != NULL);
13587 13586 pcmd->cmd_next = cmd->cmd_next;
13588 13587 }
13589 13588
13590 13589 if (cmd == pptr->port_pkt_tail) {
13591 13590 ASSERT(cmd->cmd_next == NULL);
13592 13591 pptr->port_pkt_tail = pcmd;
13593 13592 if (pcmd) {
13594 13593 pcmd->cmd_next = NULL;
13595 13594 }
13596 13595 }
13597 13596 cmd->cmd_next = NULL;
13598 13597
13599 13598 /*
13600 13599 * save the current head before dropping the
13601 13600 * mutex - If the head doesn't remain the
13602 13601 * same after re acquiring the mutex, just
13603 13602 * bail out and revisit on next tick.
13604 13603 *
13605 13604 * PS: The tail pointer can change as the commands
13606 13605 * get requeued after failure to retransport
13607 13606 */
13608 13607 save_head = pptr->port_pkt_head;
13609 13608 mutex_exit(&pptr->port_pkt_mutex);
13610 13609
13611 13610 if (cmd->cmd_fp_pkt->pkt_timeout ==
13612 13611 FCP_INVALID_TIMEOUT) {
13613 13612 struct scsi_pkt *pkt = cmd->cmd_pkt;
13614 13613 struct fcp_lun *plun;
13615 13614 struct fcp_tgt *ptgt;
13616 13615
13617 13616 plun = ADDR2LUN(&pkt->pkt_address);
13618 13617 ptgt = plun->lun_tgt;
13619 13618
13620 13619 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13621 13620 fcp_trace, FCP_BUF_LEVEL_2, 0,
13622 13621 "SCSI cmd 0x%x to D_ID=%x timed out",
13623 13622 pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13624 13623
13625 13624 cmd->cmd_state == FCP_PKT_ABORTING ?
13626 13625 fcp_fail_cmd(cmd, CMD_RESET,
13627 13626 STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13628 13627 CMD_TIMEOUT, STAT_ABORTED);
13629 13628 } else {
13630 13629 fcp_retransport_cmd(pptr, cmd);
13631 13630 }
13632 13631 mutex_enter(&pptr->port_pkt_mutex);
13633 13632 if (save_head && save_head != pptr->port_pkt_head) {
13634 13633 /*
13635 13634 * Looks like linked list got changed (mostly
13636 13635 * happens when an an OFFLINE LUN code starts
13637 13636 * returning overflow queue commands in
13638 13637 * parallel. So bail out and revisit during
13639 13638 * next tick
13640 13639 */
13641 13640 break;
13642 13641 }
13643 13642 end_of_loop:
13644 13643 /*
13645 13644 * Scan only upto the previously known tail pointer
13646 13645 * to avoid excessive processing - lots of new packets
13647 13646 * could have been added to the tail or the old ones
13648 13647 * re-queued.
13649 13648 */
13650 13649 if (cmd == tail) {
13651 13650 break;
13652 13651 }
13653 13652 }
13654 13653 mutex_exit(&pptr->port_pkt_mutex);
13655 13654
13656 13655 mutex_enter(&pptr->port_mutex);
13657 13656 for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13658 13657 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13659 13658
13660 13659 nicmd = icmd->ipkt_next;
13661 13660 if ((icmd->ipkt_restart != 0) &&
13662 13661 (icmd->ipkt_restart >= fcp_watchdog_time)) {
13663 13662 /* packet has not timed out */
13664 13663 continue;
13665 13664 }
13666 13665
13667 13666 /* time for packet re-transport */
13668 13667 if (icmd == pptr->port_ipkt_list) {
13669 13668 pptr->port_ipkt_list = icmd->ipkt_next;
13670 13669 if (pptr->port_ipkt_list) {
13671 13670 pptr->port_ipkt_list->ipkt_prev =
13672 13671 NULL;
13673 13672 }
13674 13673 } else {
13675 13674 icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13676 13675 if (icmd->ipkt_next) {
13677 13676 icmd->ipkt_next->ipkt_prev =
13678 13677 icmd->ipkt_prev;
13679 13678 }
13680 13679 }
13681 13680 icmd->ipkt_next = NULL;
13682 13681 icmd->ipkt_prev = NULL;
13683 13682 mutex_exit(&pptr->port_mutex);
13684 13683
13685 13684 if (fcp_is_retryable(icmd)) {
13686 13685 fc_ulp_rscn_info_t *rscnp =
13687 13686 (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13688 13687 pkt_ulp_rscn_infop;
13689 13688
13690 13689 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13691 13690 fcp_trace, FCP_BUF_LEVEL_2, 0,
13692 13691 "%x to D_ID=%x Retrying..",
13693 13692 icmd->ipkt_opcode,
13694 13693 icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13695 13694
13696 13695 /*
13697 13696 * Update the RSCN count in the packet
13698 13697 * before resending.
13699 13698 */
13700 13699
13701 13700 if (rscnp != NULL) {
13702 13701 rscnp->ulp_rscn_count =
13703 13702 fc_ulp_get_rscn_count(pptr->
13704 13703 port_fp_handle);
13705 13704 }
13706 13705
13707 13706 mutex_enter(&pptr->port_mutex);
13708 13707 mutex_enter(&ptgt->tgt_mutex);
13709 13708 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13710 13709 mutex_exit(&ptgt->tgt_mutex);
13711 13710 mutex_exit(&pptr->port_mutex);
13712 13711 switch (icmd->ipkt_opcode) {
13713 13712 int rval;
13714 13713 case LA_ELS_PLOGI:
13715 13714 if ((rval = fc_ulp_login(
13716 13715 pptr->port_fp_handle,
13717 13716 &icmd->ipkt_fpkt, 1)) ==
13718 13717 FC_SUCCESS) {
13719 13718 mutex_enter(
13720 13719 &pptr->port_mutex);
13721 13720 continue;
13722 13721 }
13723 13722 if (fcp_handle_ipkt_errors(
13724 13723 pptr, ptgt, icmd, rval,
13725 13724 "PLOGI") == DDI_SUCCESS) {
13726 13725 mutex_enter(
13727 13726 &pptr->port_mutex);
13728 13727 continue;
13729 13728 }
13730 13729 break;
13731 13730
13732 13731 case LA_ELS_PRLI:
13733 13732 if ((rval = fc_ulp_issue_els(
13734 13733 pptr->port_fp_handle,
13735 13734 icmd->ipkt_fpkt)) ==
13736 13735 FC_SUCCESS) {
13737 13736 mutex_enter(
13738 13737 &pptr->port_mutex);
13739 13738 continue;
13740 13739 }
13741 13740 if (fcp_handle_ipkt_errors(
13742 13741 pptr, ptgt, icmd, rval,
13743 13742 "PRLI") == DDI_SUCCESS) {
13744 13743 mutex_enter(
13745 13744 &pptr->port_mutex);
13746 13745 continue;
13747 13746 }
13748 13747 break;
13749 13748
13750 13749 default:
13751 13750 if ((rval = fcp_transport(
13752 13751 pptr->port_fp_handle,
13753 13752 icmd->ipkt_fpkt, 1)) ==
13754 13753 FC_SUCCESS) {
13755 13754 mutex_enter(
13756 13755 &pptr->port_mutex);
13757 13756 continue;
13758 13757 }
13759 13758 if (fcp_handle_ipkt_errors(
13760 13759 pptr, ptgt, icmd, rval,
13761 13760 "PRLI") == DDI_SUCCESS) {
13762 13761 mutex_enter(
13763 13762 &pptr->port_mutex);
13764 13763 continue;
13765 13764 }
13766 13765 break;
13767 13766 }
13768 13767 } else {
13769 13768 mutex_exit(&ptgt->tgt_mutex);
13770 13769 mutex_exit(&pptr->port_mutex);
13771 13770 }
13772 13771 } else {
13773 13772 fcp_print_error(icmd->ipkt_fpkt);
13774 13773 }
13775 13774
13776 13775 (void) fcp_call_finish_init(pptr, ptgt,
13777 13776 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13778 13777 icmd->ipkt_cause);
13779 13778 fcp_icmd_free(pptr, icmd);
13780 13779 mutex_enter(&pptr->port_mutex);
13781 13780 }
13782 13781
13783 13782 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13784 13783 mutex_exit(&pptr->port_mutex);
13785 13784 mutex_enter(&fcp_global_mutex);
13786 13785
13787 13786 end_of_watchdog:
13788 13787 /*
13789 13788 * Bail out early before getting into trouble
13790 13789 */
13791 13790 if (save_port != fcp_port_head) {
13792 13791 break;
13793 13792 }
13794 13793 }
13795 13794
13796 13795 if (fcp_watchdog_init > 0) {
13797 13796 /* reschedule timeout to go again */
13798 13797 fcp_watchdog_id =
13799 13798 timeout(fcp_watch, NULL, fcp_watchdog_tick);
13800 13799 }
13801 13800 mutex_exit(&fcp_global_mutex);
13802 13801 }
13803 13802
13804 13803
13805 13804 static void
13806 13805 fcp_check_reset_delay(struct fcp_port *pptr)
13807 13806 {
13808 13807 uint32_t tgt_cnt;
13809 13808 int level;
13810 13809 struct fcp_tgt *ptgt;
13811 13810 struct fcp_lun *plun;
13812 13811 struct fcp_reset_elem *cur = NULL;
13813 13812 struct fcp_reset_elem *next = NULL;
13814 13813 struct fcp_reset_elem *prev = NULL;
13815 13814
13816 13815 ASSERT(mutex_owned(&pptr->port_mutex));
13817 13816
13818 13817 next = pptr->port_reset_list;
13819 13818 while ((cur = next) != NULL) {
13820 13819 next = cur->next;
13821 13820
13822 13821 if (cur->timeout < fcp_watchdog_time) {
13823 13822 prev = cur;
13824 13823 continue;
13825 13824 }
13826 13825
13827 13826 ptgt = cur->tgt;
13828 13827 plun = cur->lun;
13829 13828 tgt_cnt = cur->tgt_cnt;
13830 13829
13831 13830 if (ptgt) {
13832 13831 level = RESET_TARGET;
13833 13832 } else {
13834 13833 ASSERT(plun != NULL);
13835 13834 level = RESET_LUN;
13836 13835 ptgt = plun->lun_tgt;
13837 13836 }
13838 13837 if (prev) {
13839 13838 prev->next = next;
13840 13839 } else {
13841 13840 /*
13842 13841 * Because we drop port mutex while doing aborts for
13843 13842 * packets, we can't rely on reset_list pointing to
13844 13843 * our head
13845 13844 */
13846 13845 if (cur == pptr->port_reset_list) {
13847 13846 pptr->port_reset_list = next;
13848 13847 } else {
13849 13848 struct fcp_reset_elem *which;
13850 13849
13851 13850 which = pptr->port_reset_list;
13852 13851 while (which && which->next != cur) {
13853 13852 which = which->next;
13854 13853 }
13855 13854 ASSERT(which != NULL);
13856 13855
13857 13856 which->next = next;
13858 13857 prev = which;
13859 13858 }
13860 13859 }
13861 13860
13862 13861 kmem_free(cur, sizeof (*cur));
13863 13862
13864 13863 if (tgt_cnt == ptgt->tgt_change_cnt) {
13865 13864 mutex_enter(&ptgt->tgt_mutex);
13866 13865 if (level == RESET_TARGET) {
13867 13866 fcp_update_tgt_state(ptgt,
13868 13867 FCP_RESET, FCP_LUN_BUSY);
13869 13868 } else {
13870 13869 fcp_update_lun_state(plun,
13871 13870 FCP_RESET, FCP_LUN_BUSY);
13872 13871 }
13873 13872 mutex_exit(&ptgt->tgt_mutex);
13874 13873
13875 13874 mutex_exit(&pptr->port_mutex);
13876 13875 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13877 13876 mutex_enter(&pptr->port_mutex);
13878 13877 }
13879 13878 }
13880 13879 }
13881 13880
13882 13881
13883 13882 static void
13884 13883 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13885 13884 struct fcp_lun *rlun, int tgt_cnt)
13886 13885 {
13887 13886 int rval;
13888 13887 struct fcp_lun *tlun, *nlun;
13889 13888 struct fcp_pkt *pcmd = NULL, *ncmd = NULL,
13890 13889 *cmd = NULL, *head = NULL,
13891 13890 *tail = NULL;
13892 13891
13893 13892 mutex_enter(&pptr->port_pkt_mutex);
13894 13893 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13895 13894 struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13896 13895 struct fcp_tgt *ptgt = plun->lun_tgt;
13897 13896
13898 13897 ncmd = cmd->cmd_next;
13899 13898
13900 13899 if (ptgt != ttgt && plun != rlun) {
13901 13900 pcmd = cmd;
13902 13901 continue;
13903 13902 }
13904 13903
13905 13904 if (pcmd != NULL) {
13906 13905 ASSERT(pptr->port_pkt_head != cmd);
13907 13906 pcmd->cmd_next = ncmd;
13908 13907 } else {
13909 13908 ASSERT(cmd == pptr->port_pkt_head);
13910 13909 pptr->port_pkt_head = ncmd;
13911 13910 }
13912 13911 if (pptr->port_pkt_tail == cmd) {
13913 13912 ASSERT(cmd->cmd_next == NULL);
13914 13913 pptr->port_pkt_tail = pcmd;
13915 13914 if (pcmd != NULL) {
13916 13915 pcmd->cmd_next = NULL;
13917 13916 }
13918 13917 }
13919 13918
13920 13919 if (head == NULL) {
13921 13920 head = tail = cmd;
13922 13921 } else {
13923 13922 ASSERT(tail != NULL);
13924 13923 tail->cmd_next = cmd;
13925 13924 tail = cmd;
13926 13925 }
13927 13926 cmd->cmd_next = NULL;
13928 13927 }
13929 13928 mutex_exit(&pptr->port_pkt_mutex);
13930 13929
13931 13930 for (cmd = head; cmd != NULL; cmd = ncmd) {
13932 13931 struct scsi_pkt *pkt = cmd->cmd_pkt;
13933 13932
13934 13933 ncmd = cmd->cmd_next;
13935 13934 ASSERT(pkt != NULL);
13936 13935
13937 13936 mutex_enter(&pptr->port_mutex);
13938 13937 if (ttgt->tgt_change_cnt == tgt_cnt) {
13939 13938 mutex_exit(&pptr->port_mutex);
13940 13939 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13941 13940 pkt->pkt_reason = CMD_RESET;
13942 13941 pkt->pkt_statistics |= STAT_DEV_RESET;
13943 13942 cmd->cmd_state = FCP_PKT_IDLE;
13944 13943 fcp_post_callback(cmd);
13945 13944 } else {
13946 13945 mutex_exit(&pptr->port_mutex);
13947 13946 }
13948 13947 }
13949 13948
13950 13949 /*
13951 13950 * If the FCA will return all the commands in its queue then our
13952 13951 * work is easy, just return.
13953 13952 */
13954 13953
13955 13954 if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13956 13955 return;
13957 13956 }
13958 13957
13959 13958 /*
13960 13959 * For RESET_LUN get hold of target pointer
13961 13960 */
13962 13961 if (ttgt == NULL) {
13963 13962 ASSERT(rlun != NULL);
13964 13963
13965 13964 ttgt = rlun->lun_tgt;
13966 13965
13967 13966 ASSERT(ttgt != NULL);
13968 13967 }
13969 13968
13970 13969 /*
13971 13970 * There are some severe race conditions here.
13972 13971 * While we are trying to abort the pkt, it might be completing
13973 13972 * so mark it aborted and if the abort does not succeed then
13974 13973 * handle it in the watch thread.
13975 13974 */
13976 13975 mutex_enter(&ttgt->tgt_mutex);
13977 13976 nlun = ttgt->tgt_lun;
13978 13977 mutex_exit(&ttgt->tgt_mutex);
13979 13978 while ((tlun = nlun) != NULL) {
13980 13979 int restart = 0;
13981 13980 if (rlun && rlun != tlun) {
13982 13981 mutex_enter(&ttgt->tgt_mutex);
13983 13982 nlun = tlun->lun_next;
13984 13983 mutex_exit(&ttgt->tgt_mutex);
13985 13984 continue;
13986 13985 }
13987 13986 mutex_enter(&tlun->lun_mutex);
13988 13987 cmd = tlun->lun_pkt_head;
13989 13988 while (cmd != NULL) {
13990 13989 if (cmd->cmd_state == FCP_PKT_ISSUED) {
13991 13990 struct scsi_pkt *pkt;
13992 13991
13993 13992 restart = 1;
13994 13993 cmd->cmd_state = FCP_PKT_ABORTING;
13995 13994 mutex_exit(&tlun->lun_mutex);
13996 13995 rval = fc_ulp_abort(pptr->port_fp_handle,
13997 13996 cmd->cmd_fp_pkt, KM_SLEEP);
13998 13997 if (rval == FC_SUCCESS) {
13999 13998 pkt = cmd->cmd_pkt;
14000 13999 pkt->pkt_reason = CMD_RESET;
14001 14000 pkt->pkt_statistics |= STAT_DEV_RESET;
14002 14001 cmd->cmd_state = FCP_PKT_IDLE;
14003 14002 fcp_post_callback(cmd);
14004 14003 } else {
14005 14004 caddr_t msg;
14006 14005
14007 14006 (void) fc_ulp_error(rval, &msg);
14008 14007
14009 14008 /*
14010 14009 * This part is tricky. The abort
14011 14010 * failed and now the command could
14012 14011 * be completing. The cmd_state ==
14013 14012 * FCP_PKT_ABORTING should save
14014 14013 * us in fcp_cmd_callback. If we
14015 14014 * are already aborting ignore the
14016 14015 * command in fcp_cmd_callback.
14017 14016 * Here we leave this packet for 20
14018 14017 * sec to be aborted in the
14019 14018 * fcp_watch thread.
14020 14019 */
14021 14020 fcp_log(CE_WARN, pptr->port_dip,
14022 14021 "!Abort failed after reset %s",
14023 14022 msg);
14024 14023
14025 14024 cmd->cmd_timeout =
14026 14025 fcp_watchdog_time +
14027 14026 cmd->cmd_pkt->pkt_time +
14028 14027 FCP_FAILED_DELAY;
14029 14028
14030 14029 cmd->cmd_fp_pkt->pkt_timeout =
14031 14030 FCP_INVALID_TIMEOUT;
14032 14031 /*
14033 14032 * This is a hack, cmd is put in the
14034 14033 * overflow queue so that it can be
14035 14034 * timed out finally
14036 14035 */
14037 14036 cmd->cmd_flags |= CFLAG_IN_QUEUE;
14038 14037
14039 14038 mutex_enter(&pptr->port_pkt_mutex);
14040 14039 if (pptr->port_pkt_head) {
14041 14040 ASSERT(pptr->port_pkt_tail
14042 14041 != NULL);
14043 14042 pptr->port_pkt_tail->cmd_next
14044 14043 = cmd;
14045 14044 pptr->port_pkt_tail = cmd;
14046 14045 } else {
14047 14046 ASSERT(pptr->port_pkt_tail
14048 14047 == NULL);
14049 14048 pptr->port_pkt_head =
14050 14049 pptr->port_pkt_tail
14051 14050 = cmd;
14052 14051 }
14053 14052 cmd->cmd_next = NULL;
14054 14053 mutex_exit(&pptr->port_pkt_mutex);
14055 14054 }
14056 14055 mutex_enter(&tlun->lun_mutex);
14057 14056 cmd = tlun->lun_pkt_head;
14058 14057 } else {
14059 14058 cmd = cmd->cmd_forw;
14060 14059 }
14061 14060 }
14062 14061 mutex_exit(&tlun->lun_mutex);
14063 14062
14064 14063 mutex_enter(&ttgt->tgt_mutex);
14065 14064 restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
14066 14065 mutex_exit(&ttgt->tgt_mutex);
14067 14066
14068 14067 mutex_enter(&pptr->port_mutex);
14069 14068 if (tgt_cnt != ttgt->tgt_change_cnt) {
14070 14069 mutex_exit(&pptr->port_mutex);
14071 14070 return;
14072 14071 } else {
14073 14072 mutex_exit(&pptr->port_mutex);
14074 14073 }
14075 14074 }
14076 14075 }
14077 14076
14078 14077
14079 14078 /*
14080 14079 * unlink the soft state, returning the soft state found (if any)
14081 14080 *
14082 14081 * acquires and releases the global mutex
14083 14082 */
14084 14083 struct fcp_port *
14085 14084 fcp_soft_state_unlink(struct fcp_port *pptr)
14086 14085 {
14087 14086 struct fcp_port *hptr; /* ptr index */
14088 14087 struct fcp_port *tptr; /* prev hptr */
14089 14088
14090 14089 mutex_enter(&fcp_global_mutex);
14091 14090 for (hptr = fcp_port_head, tptr = NULL;
14092 14091 hptr != NULL;
14093 14092 tptr = hptr, hptr = hptr->port_next) {
14094 14093 if (hptr == pptr) {
14095 14094 /* we found a match -- remove this item */
14096 14095 if (tptr == NULL) {
14097 14096 /* we're at the head of the list */
14098 14097 fcp_port_head = hptr->port_next;
14099 14098 } else {
14100 14099 tptr->port_next = hptr->port_next;
14101 14100 }
14102 14101 break; /* success */
14103 14102 }
14104 14103 }
14105 14104 if (fcp_port_head == NULL) {
14106 14105 fcp_cleanup_blacklist(&fcp_lun_blacklist);
14107 14106 }
14108 14107 mutex_exit(&fcp_global_mutex);
14109 14108 return (hptr);
14110 14109 }
14111 14110
14112 14111
14113 14112 /*
14114 14113 * called by fcp_scsi_hba_tgt_init to find a LUN given a
14115 14114 * WWN and a LUN number
14116 14115 */
14117 14116 /* ARGSUSED */
14118 14117 static struct fcp_lun *
14119 14118 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
14120 14119 {
14121 14120 int hash;
14122 14121 struct fcp_tgt *ptgt;
14123 14122 struct fcp_lun *plun;
14124 14123
14125 14124 ASSERT(mutex_owned(&pptr->port_mutex));
14126 14125
14127 14126 hash = FCP_HASH(wwn);
14128 14127 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
14129 14128 ptgt = ptgt->tgt_next) {
14130 14129 if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
14131 14130 sizeof (ptgt->tgt_port_wwn)) == 0) {
14132 14131 mutex_enter(&ptgt->tgt_mutex);
14133 14132 for (plun = ptgt->tgt_lun;
14134 14133 plun != NULL;
14135 14134 plun = plun->lun_next) {
14136 14135 if (plun->lun_num == lun) {
14137 14136 mutex_exit(&ptgt->tgt_mutex);
14138 14137 return (plun);
14139 14138 }
14140 14139 }
14141 14140 mutex_exit(&ptgt->tgt_mutex);
14142 14141 return (NULL);
14143 14142 }
14144 14143 }
14145 14144 return (NULL);
14146 14145 }
14147 14146
14148 14147 /*
14149 14148 * Function: fcp_prepare_pkt
14150 14149 *
14151 14150 * Description: This function prepares the SCSI cmd pkt, passed by the caller,
14152 14151 * for fcp_start(). It binds the data or partially maps it.
14153 14152 * Builds the FCP header and starts the initialization of the
14154 14153 * Fibre Channel header.
14155 14154 *
14156 14155 * Argument: *pptr FCP port.
14157 14156 * *cmd FCP packet.
14158 14157 * *plun LUN the command will be sent to.
14159 14158 *
14160 14159 * Context: User, Kernel and Interrupt context.
14161 14160 */
14162 14161 static void
14163 14162 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
14164 14163 struct fcp_lun *plun)
14165 14164 {
14166 14165 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
14167 14166 struct fcp_tgt *ptgt = plun->lun_tgt;
14168 14167 struct fcp_cmd *fcmd = &cmd->cmd_fcp_cmd;
14169 14168
14170 14169 ASSERT(cmd->cmd_pkt->pkt_comp ||
14171 14170 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
14172 14171
14173 14172 if (cmd->cmd_pkt->pkt_numcookies) {
14174 14173 if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
14175 14174 fcmd->fcp_cntl.cntl_read_data = 1;
14176 14175 fcmd->fcp_cntl.cntl_write_data = 0;
14177 14176 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
14178 14177 } else {
14179 14178 fcmd->fcp_cntl.cntl_read_data = 0;
14180 14179 fcmd->fcp_cntl.cntl_write_data = 1;
14181 14180 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
14182 14181 }
14183 14182
14184 14183 fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
14185 14184
14186 14185 fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
14187 14186 ASSERT(fpkt->pkt_data_cookie_cnt <=
14188 14187 pptr->port_data_dma_attr.dma_attr_sgllen);
14189 14188
14190 14189 cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
14191 14190
14192 14191 /* FCA needs pkt_datalen to be set */
14193 14192 fpkt->pkt_datalen = cmd->cmd_dmacount;
14194 14193 fcmd->fcp_data_len = cmd->cmd_dmacount;
14195 14194 } else {
14196 14195 fcmd->fcp_cntl.cntl_read_data = 0;
14197 14196 fcmd->fcp_cntl.cntl_write_data = 0;
14198 14197 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
14199 14198 fpkt->pkt_datalen = 0;
14200 14199 fcmd->fcp_data_len = 0;
14201 14200 }
14202 14201
14203 14202 /* set up the Tagged Queuing type */
14204 14203 if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
14205 14204 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
14206 14205 } else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
14207 14206 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
14208 14207 } else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
14209 14208 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
14210 14209 } else {
14211 14210 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
14212 14211 }
14213 14212
14214 14213 fcmd->fcp_ent_addr = plun->lun_addr;
14215 14214
14216 14215 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
14217 14216 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
14218 14217 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
14219 14218 } else {
14220 14219 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
14221 14220 }
14222 14221
14223 14222 cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
14224 14223 cmd->cmd_pkt->pkt_state = 0;
14225 14224 cmd->cmd_pkt->pkt_statistics = 0;
14226 14225 cmd->cmd_pkt->pkt_resid = 0;
14227 14226
14228 14227 cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
14229 14228
14230 14229 if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
14231 14230 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
14232 14231 fpkt->pkt_comp = NULL;
14233 14232 } else {
14234 14233 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
14235 14234 if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
14236 14235 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
14237 14236 }
14238 14237 fpkt->pkt_comp = fcp_cmd_callback;
14239 14238 }
14240 14239
14241 14240 mutex_enter(&pptr->port_mutex);
14242 14241 if (pptr->port_state & FCP_STATE_SUSPENDED) {
14243 14242 fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
14244 14243 }
14245 14244 mutex_exit(&pptr->port_mutex);
14246 14245
14247 14246 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
14248 14247 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
14249 14248
14250 14249 /*
14251 14250 * Save a few kernel cycles here
14252 14251 */
14253 14252 #ifndef __lock_lint
14254 14253 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
14255 14254 #endif /* __lock_lint */
14256 14255 }
14257 14256
14258 14257 static void
14259 14258 fcp_post_callback(struct fcp_pkt *cmd)
14260 14259 {
14261 14260 scsi_hba_pkt_comp(cmd->cmd_pkt);
14262 14261 }
14263 14262
14264 14263
14265 14264 /*
14266 14265 * called to do polled I/O by fcp_start()
14267 14266 *
14268 14267 * return a transport status value, i.e. TRAN_ACCECPT for success
14269 14268 */
14270 14269 static int
14271 14270 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14272 14271 {
14273 14272 int rval;
14274 14273
14275 14274 #ifdef DEBUG
14276 14275 mutex_enter(&pptr->port_pkt_mutex);
14277 14276 pptr->port_npkts++;
14278 14277 mutex_exit(&pptr->port_pkt_mutex);
14279 14278 #endif /* DEBUG */
14280 14279
14281 14280 if (cmd->cmd_fp_pkt->pkt_timeout) {
14282 14281 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14283 14282 } else {
14284 14283 cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14285 14284 }
14286 14285
14287 14286 ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14288 14287
14289 14288 cmd->cmd_state = FCP_PKT_ISSUED;
14290 14289
14291 14290 rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14292 14291
14293 14292 #ifdef DEBUG
14294 14293 mutex_enter(&pptr->port_pkt_mutex);
14295 14294 pptr->port_npkts--;
14296 14295 mutex_exit(&pptr->port_pkt_mutex);
14297 14296 #endif /* DEBUG */
14298 14297
14299 14298 cmd->cmd_state = FCP_PKT_IDLE;
14300 14299
14301 14300 switch (rval) {
14302 14301 case FC_SUCCESS:
14303 14302 if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14304 14303 fcp_complete_pkt(cmd->cmd_fp_pkt);
14305 14304 rval = TRAN_ACCEPT;
14306 14305 } else {
14307 14306 rval = TRAN_FATAL_ERROR;
14308 14307 }
14309 14308 break;
14310 14309
14311 14310 case FC_TRAN_BUSY:
14312 14311 rval = TRAN_BUSY;
14313 14312 cmd->cmd_pkt->pkt_resid = 0;
14314 14313 break;
14315 14314
14316 14315 case FC_BADPACKET:
14317 14316 rval = TRAN_BADPKT;
14318 14317 break;
14319 14318
14320 14319 default:
14321 14320 rval = TRAN_FATAL_ERROR;
14322 14321 break;
14323 14322 }
14324 14323
14325 14324 return (rval);
14326 14325 }
14327 14326
14328 14327
14329 14328 /*
14330 14329 * called by some of the following transport-called routines to convert
14331 14330 * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14332 14331 */
14333 14332 static struct fcp_port *
14334 14333 fcp_dip2port(dev_info_t *dip)
14335 14334 {
14336 14335 int instance;
14337 14336
14338 14337 instance = ddi_get_instance(dip);
14339 14338 return (ddi_get_soft_state(fcp_softstate, instance));
14340 14339 }
14341 14340
14342 14341
14343 14342 /*
14344 14343 * called internally to return a LUN given a dip
14345 14344 */
14346 14345 struct fcp_lun *
14347 14346 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14348 14347 {
14349 14348 struct fcp_tgt *ptgt;
14350 14349 struct fcp_lun *plun;
14351 14350 int i;
14352 14351
14353 14352
14354 14353 ASSERT(mutex_owned(&pptr->port_mutex));
14355 14354
14356 14355 for (i = 0; i < FCP_NUM_HASH; i++) {
14357 14356 for (ptgt = pptr->port_tgt_hash_table[i];
14358 14357 ptgt != NULL;
14359 14358 ptgt = ptgt->tgt_next) {
14360 14359 mutex_enter(&ptgt->tgt_mutex);
14361 14360 for (plun = ptgt->tgt_lun; plun != NULL;
14362 14361 plun = plun->lun_next) {
14363 14362 mutex_enter(&plun->lun_mutex);
14364 14363 if (plun->lun_cip == cip) {
14365 14364 mutex_exit(&plun->lun_mutex);
14366 14365 mutex_exit(&ptgt->tgt_mutex);
14367 14366 return (plun); /* match found */
14368 14367 }
14369 14368 mutex_exit(&plun->lun_mutex);
14370 14369 }
14371 14370 mutex_exit(&ptgt->tgt_mutex);
14372 14371 }
14373 14372 }
14374 14373 return (NULL); /* no LUN found */
14375 14374 }
14376 14375
14377 14376 /*
14378 14377 * pass an element to the hotplug list, kick the hotplug thread
14379 14378 * and wait for the element to get processed by the hotplug thread.
14380 14379 * on return the element is freed.
14381 14380 *
14382 14381 * return zero success and non-zero on failure
14383 14382 *
14384 14383 * acquires/releases the target mutex
14385 14384 *
14386 14385 */
14387 14386 static int
14388 14387 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14389 14388 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14390 14389 {
14391 14390 struct fcp_hp_elem *elem;
14392 14391 int rval;
14393 14392
14394 14393 mutex_enter(&plun->lun_tgt->tgt_mutex);
14395 14394 if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14396 14395 what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14397 14396 mutex_exit(&plun->lun_tgt->tgt_mutex);
14398 14397 fcp_log(CE_CONT, pptr->port_dip,
14399 14398 "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14400 14399 what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14401 14400 return (NDI_FAILURE);
14402 14401 }
14403 14402 mutex_exit(&plun->lun_tgt->tgt_mutex);
14404 14403 mutex_enter(&elem->mutex);
14405 14404 if (elem->wait) {
14406 14405 while (elem->wait) {
14407 14406 cv_wait(&elem->cv, &elem->mutex);
14408 14407 }
14409 14408 }
14410 14409 rval = (elem->result);
14411 14410 mutex_exit(&elem->mutex);
14412 14411 mutex_destroy(&elem->mutex);
14413 14412 cv_destroy(&elem->cv);
14414 14413 kmem_free(elem, sizeof (struct fcp_hp_elem));
14415 14414 return (rval);
14416 14415 }
14417 14416
14418 14417 /*
14419 14418 * pass an element to the hotplug list, and then
14420 14419 * kick the hotplug thread
14421 14420 *
14422 14421 * return Boolean success, i.e. non-zero if all goes well, else zero on error
14423 14422 *
14424 14423 * acquires/releases the hotplug mutex
14425 14424 *
14426 14425 * called with the target mutex owned
14427 14426 *
14428 14427 * memory acquired in NOSLEEP mode
14429 14428 * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14430 14429 * for the hp daemon to process the request and is responsible for
14431 14430 * freeing the element
14432 14431 */
14433 14432 static struct fcp_hp_elem *
14434 14433 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14435 14434 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14436 14435 {
14437 14436 struct fcp_hp_elem *elem;
14438 14437 dev_info_t *pdip;
14439 14438
14440 14439 ASSERT(pptr != NULL);
14441 14440 ASSERT(plun != NULL);
14442 14441 ASSERT(plun->lun_tgt != NULL);
14443 14442 ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14444 14443
14445 14444 /* create space for a hotplug element */
14446 14445 if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14447 14446 == NULL) {
14448 14447 fcp_log(CE_WARN, NULL,
14449 14448 "!can't allocate memory for hotplug element");
14450 14449 return (NULL);
14451 14450 }
14452 14451
14453 14452 /* fill in hotplug element */
14454 14453 elem->port = pptr;
14455 14454 elem->lun = plun;
14456 14455 elem->cip = cip;
14457 14456 elem->old_lun_mpxio = plun->lun_mpxio;
14458 14457 elem->what = what;
14459 14458 elem->flags = flags;
14460 14459 elem->link_cnt = link_cnt;
14461 14460 elem->tgt_cnt = tgt_cnt;
14462 14461 elem->wait = wait;
14463 14462 mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14464 14463 cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14465 14464
14466 14465 /* schedule the hotplug task */
14467 14466 pdip = pptr->port_dip;
14468 14467 mutex_enter(&plun->lun_mutex);
14469 14468 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14470 14469 plun->lun_event_count++;
14471 14470 elem->event_cnt = plun->lun_event_count;
14472 14471 }
14473 14472 mutex_exit(&plun->lun_mutex);
14474 14473 if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14475 14474 (void *)elem, KM_NOSLEEP) == NULL) {
14476 14475 mutex_enter(&plun->lun_mutex);
14477 14476 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14478 14477 plun->lun_event_count--;
14479 14478 }
14480 14479 mutex_exit(&plun->lun_mutex);
14481 14480 kmem_free(elem, sizeof (*elem));
14482 14481 return (0);
14483 14482 }
14484 14483
14485 14484 return (elem);
14486 14485 }
14487 14486
14488 14487
14489 14488 static void
14490 14489 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14491 14490 {
14492 14491 int rval;
14493 14492 struct scsi_address *ap;
14494 14493 struct fcp_lun *plun;
14495 14494 struct fcp_tgt *ptgt;
14496 14495 fc_packet_t *fpkt;
14497 14496
14498 14497 ap = &cmd->cmd_pkt->pkt_address;
14499 14498 plun = ADDR2LUN(ap);
14500 14499 ptgt = plun->lun_tgt;
14501 14500
14502 14501 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14503 14502
14504 14503 cmd->cmd_state = FCP_PKT_IDLE;
14505 14504
14506 14505 mutex_enter(&pptr->port_mutex);
14507 14506 mutex_enter(&ptgt->tgt_mutex);
14508 14507 if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14509 14508 (!(pptr->port_state & FCP_STATE_ONLINING))) {
14510 14509 fc_ulp_rscn_info_t *rscnp;
14511 14510
14512 14511 cmd->cmd_state = FCP_PKT_ISSUED;
14513 14512
14514 14513 /*
14515 14514 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14516 14515 * originally NULL, hence we try to set it to the pd pointed
14517 14516 * to by the SCSI device we're trying to get to.
14518 14517 */
14519 14518
14520 14519 fpkt = cmd->cmd_fp_pkt;
14521 14520 if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14522 14521 fpkt->pkt_pd = ptgt->tgt_pd_handle;
14523 14522 /*
14524 14523 * We need to notify the transport that we now have a
14525 14524 * reference to the remote port handle.
14526 14525 */
14527 14526 fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14528 14527 }
14529 14528
14530 14529 mutex_exit(&ptgt->tgt_mutex);
14531 14530 mutex_exit(&pptr->port_mutex);
14532 14531
14533 14532 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14534 14533
14535 14534 /* prepare the packet */
14536 14535
14537 14536 fcp_prepare_pkt(pptr, cmd, plun);
14538 14537
14539 14538 rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14540 14539 pkt_ulp_rscn_infop;
14541 14540
14542 14541 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14543 14542 fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14544 14543
14545 14544 if (rscnp != NULL) {
14546 14545 rscnp->ulp_rscn_count =
14547 14546 fc_ulp_get_rscn_count(pptr->
14548 14547 port_fp_handle);
14549 14548 }
14550 14549
14551 14550 rval = fcp_transport(pptr->port_fp_handle,
14552 14551 cmd->cmd_fp_pkt, 0);
14553 14552
14554 14553 if (rval == FC_SUCCESS) {
14555 14554 return;
14556 14555 }
14557 14556 cmd->cmd_state &= ~FCP_PKT_ISSUED;
14558 14557 } else {
14559 14558 mutex_exit(&ptgt->tgt_mutex);
14560 14559 mutex_exit(&pptr->port_mutex);
14561 14560 }
14562 14561
14563 14562 fcp_queue_pkt(pptr, cmd);
14564 14563 }
14565 14564
14566 14565
14567 14566 static void
14568 14567 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14569 14568 {
14570 14569 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14571 14570
14572 14571 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14573 14572 cmd->cmd_state = FCP_PKT_IDLE;
14574 14573
14575 14574 cmd->cmd_pkt->pkt_reason = reason;
14576 14575 cmd->cmd_pkt->pkt_state = 0;
14577 14576 cmd->cmd_pkt->pkt_statistics = statistics;
14578 14577
14579 14578 fcp_post_callback(cmd);
14580 14579 }
14581 14580
14582 14581 /*
14583 14582 * Function: fcp_queue_pkt
14584 14583 *
14585 14584 * Description: This function queues the packet passed by the caller into
14586 14585 * the list of packets of the FCP port.
14587 14586 *
14588 14587 * Argument: *pptr FCP port.
14589 14588 * *cmd FCP packet to queue.
14590 14589 *
14591 14590 * Return Value: None
14592 14591 *
14593 14592 * Context: User, Kernel and Interrupt context.
14594 14593 */
14595 14594 static void
14596 14595 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14597 14596 {
14598 14597 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14599 14598
14600 14599 mutex_enter(&pptr->port_pkt_mutex);
14601 14600 cmd->cmd_flags |= CFLAG_IN_QUEUE;
14602 14601 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14603 14602 cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14604 14603
14605 14604 /*
14606 14605 * zero pkt_time means hang around for ever
14607 14606 */
14608 14607 if (cmd->cmd_pkt->pkt_time) {
14609 14608 if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14610 14609 cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14611 14610 } else {
14612 14611 /*
14613 14612 * Indicate the watch thread to fail the
14614 14613 * command by setting it to highest value
14615 14614 */
14616 14615 cmd->cmd_timeout = fcp_watchdog_time;
14617 14616 cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14618 14617 }
14619 14618 }
14620 14619
14621 14620 if (pptr->port_pkt_head) {
14622 14621 ASSERT(pptr->port_pkt_tail != NULL);
14623 14622
14624 14623 pptr->port_pkt_tail->cmd_next = cmd;
14625 14624 pptr->port_pkt_tail = cmd;
14626 14625 } else {
14627 14626 ASSERT(pptr->port_pkt_tail == NULL);
14628 14627
14629 14628 pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14630 14629 }
14631 14630 cmd->cmd_next = NULL;
14632 14631 mutex_exit(&pptr->port_pkt_mutex);
14633 14632 }
14634 14633
14635 14634 /*
14636 14635 * Function: fcp_update_targets
14637 14636 *
14638 14637 * Description: This function applies the specified change of state to all
14639 14638 * the targets listed. The operation applied is 'set'.
14640 14639 *
14641 14640 * Argument: *pptr FCP port.
14642 14641 * *dev_list Array of fc_portmap_t structures.
14643 14642 * count Length of dev_list.
14644 14643 * state State bits to update.
14645 14644 * cause Reason for the update.
14646 14645 *
14647 14646 * Return Value: None
14648 14647 *
14649 14648 * Context: User, Kernel and Interrupt context.
14650 14649 * The mutex pptr->port_mutex must be held.
14651 14650 */
14652 14651 static void
14653 14652 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14654 14653 uint32_t count, uint32_t state, int cause)
14655 14654 {
14656 14655 fc_portmap_t *map_entry;
14657 14656 struct fcp_tgt *ptgt;
14658 14657
14659 14658 ASSERT(MUTEX_HELD(&pptr->port_mutex));
14660 14659
14661 14660 while (count--) {
14662 14661 map_entry = &(dev_list[count]);
14663 14662 ptgt = fcp_lookup_target(pptr,
14664 14663 (uchar_t *)&(map_entry->map_pwwn));
14665 14664 if (ptgt == NULL) {
14666 14665 continue;
14667 14666 }
14668 14667
14669 14668 mutex_enter(&ptgt->tgt_mutex);
14670 14669 ptgt->tgt_trace = 0;
14671 14670 ptgt->tgt_change_cnt++;
14672 14671 ptgt->tgt_statec_cause = cause;
14673 14672 ptgt->tgt_tmp_cnt = 1;
14674 14673 fcp_update_tgt_state(ptgt, FCP_SET, state);
14675 14674 mutex_exit(&ptgt->tgt_mutex);
14676 14675 }
14677 14676 }
14678 14677
14679 14678 static int
14680 14679 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14681 14680 int lcount, int tcount, int cause)
14682 14681 {
14683 14682 int rval;
14684 14683
14685 14684 mutex_enter(&pptr->port_mutex);
14686 14685 rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14687 14686 mutex_exit(&pptr->port_mutex);
14688 14687
14689 14688 return (rval);
14690 14689 }
14691 14690
14692 14691
14693 14692 static int
14694 14693 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14695 14694 int lcount, int tcount, int cause)
14696 14695 {
14697 14696 int finish_init = 0;
14698 14697 int finish_tgt = 0;
14699 14698 int do_finish_init = 0;
14700 14699 int rval = FCP_NO_CHANGE;
14701 14700
14702 14701 if (cause == FCP_CAUSE_LINK_CHANGE ||
14703 14702 cause == FCP_CAUSE_LINK_DOWN) {
14704 14703 do_finish_init = 1;
14705 14704 }
14706 14705
14707 14706 if (ptgt != NULL) {
14708 14707 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14709 14708 FCP_BUF_LEVEL_2, 0,
14710 14709 "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14711 14710 " cause = %d, d_id = 0x%x, tgt_done = %d",
14712 14711 pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14713 14712 pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14714 14713 ptgt->tgt_d_id, ptgt->tgt_done);
14715 14714
14716 14715 mutex_enter(&ptgt->tgt_mutex);
14717 14716
14718 14717 if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14719 14718 rval = FCP_DEV_CHANGE;
14720 14719 if (do_finish_init && ptgt->tgt_done == 0) {
14721 14720 ptgt->tgt_done++;
14722 14721 finish_init = 1;
14723 14722 }
14724 14723 } else {
14725 14724 if (--ptgt->tgt_tmp_cnt <= 0) {
14726 14725 ptgt->tgt_tmp_cnt = 0;
14727 14726 finish_tgt = 1;
14728 14727
14729 14728 if (do_finish_init) {
14730 14729 finish_init = 1;
14731 14730 }
14732 14731 }
14733 14732 }
14734 14733 mutex_exit(&ptgt->tgt_mutex);
14735 14734 } else {
14736 14735 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14737 14736 FCP_BUF_LEVEL_2, 0,
14738 14737 "Call Finish Init for NO target");
14739 14738
14740 14739 if (do_finish_init) {
14741 14740 finish_init = 1;
14742 14741 }
14743 14742 }
14744 14743
14745 14744 if (finish_tgt) {
14746 14745 ASSERT(ptgt != NULL);
14747 14746
14748 14747 mutex_enter(&ptgt->tgt_mutex);
14749 14748 #ifdef DEBUG
14750 14749 bzero(ptgt->tgt_tmp_cnt_stack,
14751 14750 sizeof (ptgt->tgt_tmp_cnt_stack));
14752 14751
14753 14752 ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14754 14753 FCP_STACK_DEPTH);
14755 14754 #endif /* DEBUG */
14756 14755 mutex_exit(&ptgt->tgt_mutex);
14757 14756
14758 14757 (void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14759 14758 }
14760 14759
14761 14760 if (finish_init && lcount == pptr->port_link_cnt) {
14762 14761 ASSERT(pptr->port_tmp_cnt > 0);
14763 14762 if (--pptr->port_tmp_cnt == 0) {
14764 14763 fcp_finish_init(pptr);
14765 14764 }
14766 14765 } else if (lcount != pptr->port_link_cnt) {
14767 14766 FCP_TRACE(fcp_logq, pptr->port_instbuf,
14768 14767 fcp_trace, FCP_BUF_LEVEL_2, 0,
14769 14768 "fcp_call_finish_init_held,1: state change occured"
14770 14769 " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14771 14770 }
14772 14771
14773 14772 return (rval);
14774 14773 }
14775 14774
14776 14775 static void
14777 14776 fcp_reconfigure_luns(void * tgt_handle)
14778 14777 {
14779 14778 uint32_t dev_cnt;
14780 14779 fc_portmap_t *devlist;
14781 14780 struct fcp_tgt *ptgt = (struct fcp_tgt *)tgt_handle;
14782 14781 struct fcp_port *pptr = ptgt->tgt_port;
14783 14782
14784 14783 /*
14785 14784 * If the timer that fires this off got canceled too late, the
14786 14785 * target could have been destroyed.
14787 14786 */
14788 14787
14789 14788 if (ptgt->tgt_tid == NULL) {
14790 14789 return;
14791 14790 }
14792 14791
14793 14792 devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14794 14793 if (devlist == NULL) {
14795 14794 fcp_log(CE_WARN, pptr->port_dip,
14796 14795 "!fcp%d: failed to allocate for portmap",
14797 14796 pptr->port_instance);
14798 14797 return;
14799 14798 }
14800 14799
14801 14800 dev_cnt = 1;
14802 14801 devlist->map_pd = ptgt->tgt_pd_handle;
14803 14802 devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14804 14803 devlist->map_did.port_id = ptgt->tgt_d_id;
14805 14804
14806 14805 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14807 14806 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14808 14807
14809 14808 devlist->map_state = PORT_DEVICE_LOGGED_IN;
14810 14809 devlist->map_type = PORT_DEVICE_REPORTLUN_CHANGED;
14811 14810 devlist->map_flags = 0;
14812 14811
14813 14812 fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14814 14813 pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14815 14814
14816 14815 /*
14817 14816 * Clear the tgt_tid after no more references to
14818 14817 * the fcp_tgt
14819 14818 */
14820 14819 mutex_enter(&ptgt->tgt_mutex);
14821 14820 ptgt->tgt_tid = NULL;
14822 14821 mutex_exit(&ptgt->tgt_mutex);
14823 14822
14824 14823 kmem_free(devlist, sizeof (*devlist));
14825 14824 }
14826 14825
14827 14826
14828 14827 static void
14829 14828 fcp_free_targets(struct fcp_port *pptr)
14830 14829 {
14831 14830 int i;
14832 14831 struct fcp_tgt *ptgt;
14833 14832
14834 14833 mutex_enter(&pptr->port_mutex);
14835 14834 for (i = 0; i < FCP_NUM_HASH; i++) {
14836 14835 ptgt = pptr->port_tgt_hash_table[i];
14837 14836 while (ptgt != NULL) {
14838 14837 struct fcp_tgt *next_tgt = ptgt->tgt_next;
14839 14838
14840 14839 fcp_free_target(ptgt);
14841 14840 ptgt = next_tgt;
14842 14841 }
14843 14842 }
14844 14843 mutex_exit(&pptr->port_mutex);
14845 14844 }
14846 14845
14847 14846
14848 14847 static void
14849 14848 fcp_free_target(struct fcp_tgt *ptgt)
14850 14849 {
14851 14850 struct fcp_lun *plun;
14852 14851 timeout_id_t tid;
14853 14852
14854 14853 mutex_enter(&ptgt->tgt_mutex);
14855 14854 tid = ptgt->tgt_tid;
14856 14855
14857 14856 /*
14858 14857 * Cancel any pending timeouts for this target.
14859 14858 */
14860 14859
14861 14860 if (tid != NULL) {
14862 14861 /*
14863 14862 * Set tgt_tid to NULL first to avoid a race in the callback.
14864 14863 * If tgt_tid is NULL, the callback will simply return.
14865 14864 */
14866 14865 ptgt->tgt_tid = NULL;
14867 14866 mutex_exit(&ptgt->tgt_mutex);
14868 14867 (void) untimeout(tid);
14869 14868 mutex_enter(&ptgt->tgt_mutex);
14870 14869 }
14871 14870
14872 14871 plun = ptgt->tgt_lun;
14873 14872 while (plun != NULL) {
14874 14873 struct fcp_lun *next_lun = plun->lun_next;
14875 14874
14876 14875 fcp_dealloc_lun(plun);
14877 14876 plun = next_lun;
14878 14877 }
14879 14878
14880 14879 mutex_exit(&ptgt->tgt_mutex);
14881 14880 fcp_dealloc_tgt(ptgt);
14882 14881 }
14883 14882
14884 14883 /*
14885 14884 * Function: fcp_is_retryable
14886 14885 *
14887 14886 * Description: Indicates if the internal packet is retryable.
14888 14887 *
14889 14888 * Argument: *icmd FCP internal packet.
14890 14889 *
14891 14890 * Return Value: 0 Not retryable
14892 14891 * 1 Retryable
14893 14892 *
14894 14893 * Context: User, Kernel and Interrupt context
14895 14894 */
14896 14895 static int
14897 14896 fcp_is_retryable(struct fcp_ipkt *icmd)
14898 14897 {
14899 14898 if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14900 14899 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14901 14900 return (0);
14902 14901 }
14903 14902
14904 14903 return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14905 14904 icmd->ipkt_port->port_deadline) ? 1 : 0);
14906 14905 }
14907 14906
14908 14907 /*
14909 14908 * Function: fcp_create_on_demand
14910 14909 *
14911 14910 * Argument: *pptr FCP port.
14912 14911 * *pwwn Port WWN.
14913 14912 *
14914 14913 * Return Value: 0 Success
14915 14914 * EIO
14916 14915 * ENOMEM
14917 14916 * EBUSY
14918 14917 * EINVAL
14919 14918 *
14920 14919 * Context: User and Kernel context
14921 14920 */
14922 14921 static int
14923 14922 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14924 14923 {
14925 14924 int wait_ms;
14926 14925 int tcount;
14927 14926 int lcount;
14928 14927 int ret;
14929 14928 int error;
14930 14929 int rval = EIO;
14931 14930 int ntries;
14932 14931 fc_portmap_t *devlist;
14933 14932 opaque_t pd;
14934 14933 struct fcp_lun *plun;
14935 14934 struct fcp_tgt *ptgt;
14936 14935 int old_manual = 0;
14937 14936
14938 14937 /* Allocates the fc_portmap_t structure. */
14939 14938 devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14940 14939
14941 14940 /*
14942 14941 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14943 14942 * in the commented statement below:
14944 14943 *
14945 14944 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14946 14945 *
14947 14946 * Below, the deadline for the discovery process is set.
14948 14947 */
14949 14948 mutex_enter(&pptr->port_mutex);
14950 14949 pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14951 14950 mutex_exit(&pptr->port_mutex);
14952 14951
14953 14952 /*
14954 14953 * We try to find the remote port based on the WWN provided by the
14955 14954 * caller. We actually ask fp/fctl if it has it.
14956 14955 */
14957 14956 pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14958 14957 (la_wwn_t *)pwwn, &error, 1);
14959 14958
14960 14959 if (pd == NULL) {
14961 14960 kmem_free(devlist, sizeof (*devlist));
14962 14961 return (rval);
14963 14962 }
14964 14963
14965 14964 /*
14966 14965 * The remote port was found. We ask fp/fctl to update our
14967 14966 * fc_portmap_t structure.
14968 14967 */
14969 14968 ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14970 14969 (la_wwn_t *)pwwn, devlist);
14971 14970 if (ret != FC_SUCCESS) {
14972 14971 kmem_free(devlist, sizeof (*devlist));
14973 14972 return (rval);
14974 14973 }
14975 14974
14976 14975 /*
14977 14976 * The map flag field is set to indicates that the creation is being
14978 14977 * done at the user request (Ioclt probably luxadm or cfgadm).
14979 14978 */
14980 14979 devlist->map_type = PORT_DEVICE_USER_CREATE;
14981 14980
14982 14981 mutex_enter(&pptr->port_mutex);
14983 14982
14984 14983 /*
14985 14984 * We check to see if fcp already has a target that describes the
14986 14985 * device being created. If not it is created.
14987 14986 */
14988 14987 ptgt = fcp_lookup_target(pptr, pwwn);
14989 14988 if (ptgt == NULL) {
14990 14989 lcount = pptr->port_link_cnt;
14991 14990 mutex_exit(&pptr->port_mutex);
14992 14991
14993 14992 ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14994 14993 if (ptgt == NULL) {
14995 14994 fcp_log(CE_WARN, pptr->port_dip,
14996 14995 "!FC target allocation failed");
14997 14996 return (ENOMEM);
14998 14997 }
14999 14998
15000 14999 mutex_enter(&pptr->port_mutex);
15001 15000 }
15002 15001
15003 15002 mutex_enter(&ptgt->tgt_mutex);
15004 15003 ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
15005 15004 ptgt->tgt_tmp_cnt = 1;
15006 15005 ptgt->tgt_device_created = 0;
15007 15006 /*
15008 15007 * If fabric and auto config is set but the target was
15009 15008 * manually unconfigured then reset to the manual_config_only to
15010 15009 * 0 so the device will get configured.
15011 15010 */
15012 15011 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15013 15012 fcp_enable_auto_configuration &&
15014 15013 ptgt->tgt_manual_config_only == 1) {
15015 15014 old_manual = 1;
15016 15015 ptgt->tgt_manual_config_only = 0;
15017 15016 }
15018 15017 mutex_exit(&ptgt->tgt_mutex);
15019 15018
15020 15019 fcp_update_targets(pptr, devlist, 1,
15021 15020 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
15022 15021
15023 15022 lcount = pptr->port_link_cnt;
15024 15023 tcount = ptgt->tgt_change_cnt;
15025 15024
15026 15025 if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
15027 15026 tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
15028 15027 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15029 15028 fcp_enable_auto_configuration && old_manual) {
15030 15029 mutex_enter(&ptgt->tgt_mutex);
15031 15030 ptgt->tgt_manual_config_only = 1;
15032 15031 mutex_exit(&ptgt->tgt_mutex);
15033 15032 }
15034 15033
15035 15034 if (pptr->port_link_cnt != lcount ||
15036 15035 ptgt->tgt_change_cnt != tcount) {
15037 15036 rval = EBUSY;
15038 15037 }
15039 15038 mutex_exit(&pptr->port_mutex);
15040 15039
15041 15040 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15042 15041 FCP_BUF_LEVEL_3, 0,
15043 15042 "fcp_create_on_demand: mapflags ptgt=%x, "
15044 15043 "lcount=%x::port_link_cnt=%x, "
15045 15044 "tcount=%x: tgt_change_cnt=%x, rval=%x",
15046 15045 ptgt, lcount, pptr->port_link_cnt,
15047 15046 tcount, ptgt->tgt_change_cnt, rval);
15048 15047 return (rval);
15049 15048 }
15050 15049
15051 15050 /*
15052 15051 * Due to lack of synchronization mechanisms, we perform
15053 15052 * periodic monitoring of our request; Because requests
15054 15053 * get dropped when another one supercedes (either because
15055 15054 * of a link change or a target change), it is difficult to
15056 15055 * provide a clean synchronization mechanism (such as a
15057 15056 * semaphore or a conditional variable) without exhaustively
15058 15057 * rewriting the mainline discovery code of this driver.
15059 15058 */
15060 15059 wait_ms = 500;
15061 15060
15062 15061 ntries = fcp_max_target_retries;
15063 15062
15064 15063 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15065 15064 FCP_BUF_LEVEL_3, 0,
15066 15065 "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
15067 15066 "lcount=%x::port_link_cnt=%x, "
15068 15067 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15069 15068 "tgt_tmp_cnt =%x",
15070 15069 ntries, ptgt, lcount, pptr->port_link_cnt,
15071 15070 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15072 15071 ptgt->tgt_tmp_cnt);
15073 15072
15074 15073 mutex_enter(&ptgt->tgt_mutex);
15075 15074 while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
15076 15075 ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
15077 15076 mutex_exit(&ptgt->tgt_mutex);
15078 15077 mutex_exit(&pptr->port_mutex);
15079 15078
15080 15079 delay(drv_usectohz(wait_ms * 1000));
15081 15080
15082 15081 mutex_enter(&pptr->port_mutex);
15083 15082 mutex_enter(&ptgt->tgt_mutex);
15084 15083 }
15085 15084
15086 15085
15087 15086 if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
15088 15087 rval = EBUSY;
15089 15088 } else {
15090 15089 if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
15091 15090 FCP_TGT_NODE_PRESENT) {
15092 15091 rval = 0;
15093 15092 }
15094 15093 }
15095 15094
15096 15095 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15097 15096 FCP_BUF_LEVEL_3, 0,
15098 15097 "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
15099 15098 "lcount=%x::port_link_cnt=%x, "
15100 15099 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15101 15100 "tgt_tmp_cnt =%x",
15102 15101 ntries, ptgt, lcount, pptr->port_link_cnt,
15103 15102 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15104 15103 ptgt->tgt_tmp_cnt);
15105 15104
15106 15105 if (rval) {
15107 15106 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15108 15107 fcp_enable_auto_configuration && old_manual) {
15109 15108 ptgt->tgt_manual_config_only = 1;
15110 15109 }
15111 15110 mutex_exit(&ptgt->tgt_mutex);
15112 15111 mutex_exit(&pptr->port_mutex);
15113 15112 kmem_free(devlist, sizeof (*devlist));
15114 15113
15115 15114 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15116 15115 FCP_BUF_LEVEL_3, 0,
15117 15116 "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
15118 15117 "lcount=%x::port_link_cnt=%x, "
15119 15118 "tcount=%x::tgt_change_cnt=%x, rval=%x, "
15120 15119 "tgt_device_created=%x, tgt D_ID=%x",
15121 15120 ntries, ptgt, lcount, pptr->port_link_cnt,
15122 15121 tcount, ptgt->tgt_change_cnt, rval,
15123 15122 ptgt->tgt_device_created, ptgt->tgt_d_id);
15124 15123 return (rval);
15125 15124 }
15126 15125
15127 15126 if ((plun = ptgt->tgt_lun) != NULL) {
15128 15127 tcount = plun->lun_tgt->tgt_change_cnt;
15129 15128 } else {
15130 15129 rval = EINVAL;
15131 15130 }
15132 15131 lcount = pptr->port_link_cnt;
15133 15132
15134 15133 /*
15135 15134 * Configuring the target with no LUNs will fail. We
15136 15135 * should reset the node state so that it is not
15137 15136 * automatically configured when the LUNs are added
15138 15137 * to this target.
15139 15138 */
15140 15139 if (ptgt->tgt_lun_cnt == 0) {
15141 15140 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
15142 15141 }
15143 15142 mutex_exit(&ptgt->tgt_mutex);
15144 15143 mutex_exit(&pptr->port_mutex);
15145 15144
15146 15145 while (plun) {
15147 15146 child_info_t *cip;
15148 15147
15149 15148 mutex_enter(&plun->lun_mutex);
15150 15149 cip = plun->lun_cip;
15151 15150 mutex_exit(&plun->lun_mutex);
15152 15151
15153 15152 mutex_enter(&ptgt->tgt_mutex);
15154 15153 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
15155 15154 mutex_exit(&ptgt->tgt_mutex);
15156 15155
15157 15156 rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
15158 15157 FCP_ONLINE, lcount, tcount,
15159 15158 NDI_ONLINE_ATTACH);
15160 15159 if (rval != NDI_SUCCESS) {
15161 15160 FCP_TRACE(fcp_logq,
15162 15161 pptr->port_instbuf, fcp_trace,
15163 15162 FCP_BUF_LEVEL_3, 0,
15164 15163 "fcp_create_on_demand: "
15165 15164 "pass_to_hp_and_wait failed "
15166 15165 "rval=%x", rval);
15167 15166 rval = EIO;
15168 15167 } else {
15169 15168 mutex_enter(&LUN_TGT->tgt_mutex);
15170 15169 plun->lun_state &= ~(FCP_LUN_OFFLINE |
15171 15170 FCP_LUN_BUSY);
15172 15171 mutex_exit(&LUN_TGT->tgt_mutex);
15173 15172 }
15174 15173 mutex_enter(&ptgt->tgt_mutex);
15175 15174 }
15176 15175
15177 15176 plun = plun->lun_next;
15178 15177 mutex_exit(&ptgt->tgt_mutex);
15179 15178 }
15180 15179
15181 15180 kmem_free(devlist, sizeof (*devlist));
15182 15181
15183 15182 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15184 15183 fcp_enable_auto_configuration && old_manual) {
15185 15184 mutex_enter(&ptgt->tgt_mutex);
15186 15185 /* if successful then set manual to 0 */
15187 15186 if (rval == 0) {
15188 15187 ptgt->tgt_manual_config_only = 0;
15189 15188 } else {
15190 15189 /* reset to 1 so the user has to do the config */
15191 15190 ptgt->tgt_manual_config_only = 1;
15192 15191 }
15193 15192 mutex_exit(&ptgt->tgt_mutex);
15194 15193 }
15195 15194
15196 15195 return (rval);
15197 15196 }
15198 15197
15199 15198
15200 15199 static void
15201 15200 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
15202 15201 {
15203 15202 int count;
15204 15203 uchar_t byte;
15205 15204
15206 15205 count = 0;
15207 15206 while (*string) {
15208 15207 byte = FCP_ATOB(*string); string++;
15209 15208 byte = byte << 4 | FCP_ATOB(*string); string++;
15210 15209 bytes[count++] = byte;
15211 15210
15212 15211 if (count >= byte_len) {
15213 15212 break;
15214 15213 }
15215 15214 }
15216 15215 }
15217 15216
15218 15217 static void
15219 15218 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
15220 15219 {
15221 15220 int i;
15222 15221
15223 15222 for (i = 0; i < FC_WWN_SIZE; i++) {
15224 15223 (void) sprintf(string + (i * 2),
15225 15224 "%02x", wwn[i]);
15226 15225 }
15227 15226
15228 15227 }
15229 15228
15230 15229 static void
15231 15230 fcp_print_error(fc_packet_t *fpkt)
15232 15231 {
15233 15232 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
15234 15233 fpkt->pkt_ulp_private;
15235 15234 struct fcp_port *pptr;
15236 15235 struct fcp_tgt *ptgt;
15237 15236 struct fcp_lun *plun;
15238 15237 caddr_t buf;
15239 15238 int scsi_cmd = 0;
15240 15239
15241 15240 ptgt = icmd->ipkt_tgt;
15242 15241 plun = icmd->ipkt_lun;
15243 15242 pptr = ptgt->tgt_port;
15244 15243
15245 15244 buf = kmem_zalloc(256, KM_NOSLEEP);
15246 15245 if (buf == NULL) {
15247 15246 return;
15248 15247 }
15249 15248
15250 15249 switch (icmd->ipkt_opcode) {
15251 15250 case SCMD_REPORT_LUN:
15252 15251 (void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
15253 15252 " lun=0x%%x failed");
15254 15253 scsi_cmd++;
15255 15254 break;
15256 15255
15257 15256 case SCMD_INQUIRY_PAGE83:
15258 15257 (void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15259 15258 " lun=0x%%x failed");
15260 15259 scsi_cmd++;
15261 15260 break;
15262 15261
15263 15262 case SCMD_INQUIRY:
15264 15263 (void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15265 15264 " lun=0x%%x failed");
15266 15265 scsi_cmd++;
15267 15266 break;
15268 15267
15269 15268 case LA_ELS_PLOGI:
15270 15269 (void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15271 15270 break;
15272 15271
15273 15272 case LA_ELS_PRLI:
15274 15273 (void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15275 15274 break;
15276 15275 }
15277 15276
15278 15277 if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15279 15278 struct fcp_rsp response, *rsp;
15280 15279 uchar_t asc, ascq;
15281 15280 caddr_t sense_key = NULL;
15282 15281 struct fcp_rsp_info fcp_rsp_err, *bep;
15283 15282
15284 15283 if (icmd->ipkt_nodma) {
15285 15284 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15286 15285 bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15287 15286 sizeof (struct fcp_rsp));
15288 15287 } else {
15289 15288 rsp = &response;
15290 15289 bep = &fcp_rsp_err;
15291 15290
15292 15291 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15293 15292 sizeof (struct fcp_rsp));
15294 15293
15295 15294 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15296 15295 bep, fpkt->pkt_resp_acc,
15297 15296 sizeof (struct fcp_rsp_info));
15298 15297 }
15299 15298
15300 15299
15301 15300 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15302 15301 (void) sprintf(buf + strlen(buf),
15303 15302 " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15304 15303 " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15305 15304 " senselen=%%x. Giving up");
15306 15305
15307 15306 fcp_log(CE_WARN, pptr->port_dip, buf,
15308 15307 ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15309 15308 rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15310 15309 rsp->fcp_u.fcp_status.reserved_1,
15311 15310 rsp->fcp_response_len, rsp->fcp_sense_len);
15312 15311
15313 15312 kmem_free(buf, 256);
15314 15313 return;
15315 15314 }
15316 15315
15317 15316 if (rsp->fcp_u.fcp_status.rsp_len_set &&
15318 15317 bep->rsp_code != FCP_NO_FAILURE) {
15319 15318 (void) sprintf(buf + strlen(buf),
15320 15319 " FCP Response code = 0x%x", bep->rsp_code);
15321 15320 }
15322 15321
15323 15322 if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15324 15323 struct scsi_extended_sense sense_info, *sense_ptr;
15325 15324
15326 15325 if (icmd->ipkt_nodma) {
15327 15326 sense_ptr = (struct scsi_extended_sense *)
15328 15327 ((caddr_t)fpkt->pkt_resp +
15329 15328 sizeof (struct fcp_rsp) +
15330 15329 rsp->fcp_response_len);
15331 15330 } else {
15332 15331 sense_ptr = &sense_info;
15333 15332
15334 15333 FCP_CP_IN(fpkt->pkt_resp +
15335 15334 sizeof (struct fcp_rsp) +
15336 15335 rsp->fcp_response_len, &sense_info,
15337 15336 fpkt->pkt_resp_acc,
15338 15337 sizeof (struct scsi_extended_sense));
15339 15338 }
15340 15339
15341 15340 if (sense_ptr->es_key < NUM_SENSE_KEYS +
15342 15341 NUM_IMPL_SENSE_KEYS) {
15343 15342 sense_key = sense_keys[sense_ptr->es_key];
15344 15343 } else {
15345 15344 sense_key = "Undefined";
15346 15345 }
15347 15346
15348 15347 asc = sense_ptr->es_add_code;
15349 15348 ascq = sense_ptr->es_qual_code;
15350 15349
15351 15350 (void) sprintf(buf + strlen(buf),
15352 15351 ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15353 15352 " Giving up");
15354 15353
15355 15354 fcp_log(CE_WARN, pptr->port_dip, buf,
15356 15355 ptgt->tgt_d_id, plun->lun_num, sense_key,
15357 15356 asc, ascq);
15358 15357 } else {
15359 15358 (void) sprintf(buf + strlen(buf),
15360 15359 " : SCSI status=%%x. Giving up");
15361 15360
15362 15361 fcp_log(CE_WARN, pptr->port_dip, buf,
15363 15362 ptgt->tgt_d_id, plun->lun_num,
15364 15363 rsp->fcp_u.fcp_status.scsi_status);
15365 15364 }
15366 15365 } else {
15367 15366 caddr_t state, reason, action, expln;
15368 15367
15369 15368 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
15370 15369 &action, &expln);
15371 15370
15372 15371 (void) sprintf(buf + strlen(buf), ": State:%%s,"
15373 15372 " Reason:%%s. Giving up");
15374 15373
15375 15374 if (scsi_cmd) {
15376 15375 fcp_log(CE_WARN, pptr->port_dip, buf,
15377 15376 ptgt->tgt_d_id, plun->lun_num, state, reason);
15378 15377 } else {
15379 15378 fcp_log(CE_WARN, pptr->port_dip, buf,
15380 15379 ptgt->tgt_d_id, state, reason);
15381 15380 }
15382 15381 }
15383 15382
15384 15383 kmem_free(buf, 256);
15385 15384 }
15386 15385
15387 15386
15388 15387 static int
15389 15388 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15390 15389 struct fcp_ipkt *icmd, int rval, caddr_t op)
15391 15390 {
15392 15391 int ret = DDI_FAILURE;
15393 15392 char *error;
15394 15393
15395 15394 switch (rval) {
15396 15395 case FC_DEVICE_BUSY_NEW_RSCN:
15397 15396 /*
15398 15397 * This means that there was a new RSCN that the transport
15399 15398 * knows about (which the ULP *may* know about too) but the
15400 15399 * pkt that was sent down was related to an older RSCN. So, we
15401 15400 * are just going to reset the retry count and deadline and
15402 15401 * continue to retry. The idea is that transport is currently
15403 15402 * working on the new RSCN and will soon let the ULPs know
15404 15403 * about it and when it does the existing logic will kick in
15405 15404 * where it will change the tcount to indicate that something
15406 15405 * changed on the target. So, rediscovery will start and there
15407 15406 * will not be an infinite retry.
15408 15407 *
15409 15408 * For a full flow of how the RSCN info is transferred back and
15410 15409 * forth, see fp.c
15411 15410 */
15412 15411 icmd->ipkt_retries = 0;
15413 15412 icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15414 15413 FCP_ICMD_DEADLINE;
15415 15414
15416 15415 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15417 15416 FCP_BUF_LEVEL_3, 0,
15418 15417 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15419 15418 rval, ptgt->tgt_d_id);
15420 15419 /* FALLTHROUGH */
15421 15420
15422 15421 case FC_STATEC_BUSY:
15423 15422 case FC_DEVICE_BUSY:
15424 15423 case FC_PBUSY:
15425 15424 case FC_FBUSY:
15426 15425 case FC_TRAN_BUSY:
15427 15426 case FC_OFFLINE:
15428 15427 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15429 15428 FCP_BUF_LEVEL_3, 0,
15430 15429 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15431 15430 rval, ptgt->tgt_d_id);
15432 15431 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15433 15432 fcp_is_retryable(icmd)) {
15434 15433 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15435 15434 ret = DDI_SUCCESS;
15436 15435 }
15437 15436 break;
15438 15437
15439 15438 case FC_LOGINREQ:
15440 15439 /*
15441 15440 * FC_LOGINREQ used to be handled just like all the cases
15442 15441 * above. It has been changed to handled a PRLI that fails
15443 15442 * with FC_LOGINREQ different than other ipkts that fail
15444 15443 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15445 15444 * a simple matter to turn it into a PLOGI instead, so that's
15446 15445 * exactly what we do here.
15447 15446 */
15448 15447 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15449 15448 ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15450 15449 icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15451 15450 icmd->ipkt_change_cnt, icmd->ipkt_cause);
15452 15451 } else {
15453 15452 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15454 15453 FCP_BUF_LEVEL_3, 0,
15455 15454 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15456 15455 rval, ptgt->tgt_d_id);
15457 15456 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15458 15457 fcp_is_retryable(icmd)) {
15459 15458 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15460 15459 ret = DDI_SUCCESS;
15461 15460 }
15462 15461 }
15463 15462 break;
15464 15463
15465 15464 default:
15466 15465 mutex_enter(&pptr->port_mutex);
15467 15466 mutex_enter(&ptgt->tgt_mutex);
15468 15467 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15469 15468 mutex_exit(&ptgt->tgt_mutex);
15470 15469 mutex_exit(&pptr->port_mutex);
15471 15470
15472 15471 (void) fc_ulp_error(rval, &error);
15473 15472 fcp_log(CE_WARN, pptr->port_dip,
15474 15473 "!Failed to send %s to D_ID=%x error=%s",
15475 15474 op, ptgt->tgt_d_id, error);
15476 15475 } else {
15477 15476 FCP_TRACE(fcp_logq, pptr->port_instbuf,
15478 15477 fcp_trace, FCP_BUF_LEVEL_2, 0,
15479 15478 "fcp_handle_ipkt_errors,1: state change occured"
15480 15479 " for D_ID=0x%x", ptgt->tgt_d_id);
15481 15480 mutex_exit(&ptgt->tgt_mutex);
15482 15481 mutex_exit(&pptr->port_mutex);
15483 15482 }
15484 15483 break;
15485 15484 }
15486 15485
15487 15486 return (ret);
15488 15487 }
15489 15488
15490 15489
15491 15490 /*
15492 15491 * Check of outstanding commands on any LUN for this target
15493 15492 */
15494 15493 static int
15495 15494 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15496 15495 {
15497 15496 struct fcp_lun *plun;
15498 15497 struct fcp_pkt *cmd;
15499 15498
15500 15499 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15501 15500 mutex_enter(&plun->lun_mutex);
15502 15501 for (cmd = plun->lun_pkt_head; cmd != NULL;
15503 15502 cmd = cmd->cmd_forw) {
15504 15503 if (cmd->cmd_state == FCP_PKT_ISSUED) {
15505 15504 mutex_exit(&plun->lun_mutex);
15506 15505 return (FC_SUCCESS);
15507 15506 }
15508 15507 }
15509 15508 mutex_exit(&plun->lun_mutex);
15510 15509 }
15511 15510
15512 15511 return (FC_FAILURE);
15513 15512 }
15514 15513
15515 15514 static fc_portmap_t *
15516 15515 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15517 15516 {
15518 15517 int i;
15519 15518 fc_portmap_t *devlist;
15520 15519 fc_portmap_t *devptr = NULL;
15521 15520 struct fcp_tgt *ptgt;
15522 15521
15523 15522 mutex_enter(&pptr->port_mutex);
15524 15523 for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15525 15524 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15526 15525 ptgt = ptgt->tgt_next) {
15527 15526 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15528 15527 ++*dev_cnt;
15529 15528 }
15530 15529 }
15531 15530 }
15532 15531
15533 15532 devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15534 15533 KM_NOSLEEP);
15535 15534 if (devlist == NULL) {
15536 15535 mutex_exit(&pptr->port_mutex);
15537 15536 fcp_log(CE_WARN, pptr->port_dip,
15538 15537 "!fcp%d: failed to allocate for portmap for construct map",
15539 15538 pptr->port_instance);
15540 15539 return (devptr);
15541 15540 }
15542 15541
15543 15542 for (i = 0; i < FCP_NUM_HASH; i++) {
15544 15543 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15545 15544 ptgt = ptgt->tgt_next) {
15546 15545 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15547 15546 int ret;
15548 15547
15549 15548 ret = fc_ulp_pwwn_to_portmap(
15550 15549 pptr->port_fp_handle,
15551 15550 (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15552 15551 devlist);
15553 15552
15554 15553 if (ret == FC_SUCCESS) {
15555 15554 devlist++;
15556 15555 continue;
15557 15556 }
15558 15557
15559 15558 devlist->map_pd = NULL;
15560 15559 devlist->map_did.port_id = ptgt->tgt_d_id;
15561 15560 devlist->map_hard_addr.hard_addr =
15562 15561 ptgt->tgt_hard_addr;
15563 15562
15564 15563 devlist->map_state = PORT_DEVICE_INVALID;
15565 15564 devlist->map_type = PORT_DEVICE_OLD;
15566 15565
15567 15566 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15568 15567 &devlist->map_nwwn, FC_WWN_SIZE);
15569 15568
15570 15569 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15571 15570 &devlist->map_pwwn, FC_WWN_SIZE);
15572 15571
15573 15572 devlist++;
15574 15573 }
15575 15574 }
15576 15575 }
15577 15576
15578 15577 mutex_exit(&pptr->port_mutex);
15579 15578
15580 15579 return (devptr);
15581 15580 }
15582 15581 /*
15583 15582 * Inimate MPxIO that the lun is busy and cannot accept regular IO
15584 15583 */
15585 15584 static void
15586 15585 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15587 15586 {
15588 15587 int i;
15589 15588 struct fcp_tgt *ptgt;
15590 15589 struct fcp_lun *plun;
15591 15590
15592 15591 for (i = 0; i < FCP_NUM_HASH; i++) {
15593 15592 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15594 15593 ptgt = ptgt->tgt_next) {
15595 15594 mutex_enter(&ptgt->tgt_mutex);
15596 15595 for (plun = ptgt->tgt_lun; plun != NULL;
15597 15596 plun = plun->lun_next) {
15598 15597 if (plun->lun_mpxio &&
15599 15598 plun->lun_state & FCP_LUN_BUSY) {
15600 15599 if (!fcp_pass_to_hp(pptr, plun,
15601 15600 plun->lun_cip,
15602 15601 FCP_MPXIO_PATH_SET_BUSY,
15603 15602 pptr->port_link_cnt,
15604 15603 ptgt->tgt_change_cnt, 0, 0)) {
15605 15604 FCP_TRACE(fcp_logq,
15606 15605 pptr->port_instbuf,
15607 15606 fcp_trace,
15608 15607 FCP_BUF_LEVEL_2, 0,
15609 15608 "path_verifybusy: "
15610 15609 "disable lun %p failed!",
15611 15610 plun);
15612 15611 }
15613 15612 }
15614 15613 }
15615 15614 mutex_exit(&ptgt->tgt_mutex);
15616 15615 }
15617 15616 }
15618 15617 }
15619 15618
15620 15619 static int
15621 15620 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15622 15621 {
15623 15622 dev_info_t *cdip = NULL;
15624 15623 dev_info_t *pdip = NULL;
15625 15624
15626 15625 ASSERT(plun);
15627 15626
15628 15627 mutex_enter(&plun->lun_mutex);
15629 15628 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15630 15629 mutex_exit(&plun->lun_mutex);
15631 15630 return (NDI_FAILURE);
15632 15631 }
15633 15632 mutex_exit(&plun->lun_mutex);
15634 15633 cdip = mdi_pi_get_client(PIP(cip));
15635 15634 pdip = mdi_pi_get_phci(PIP(cip));
15636 15635
15637 15636 ASSERT(cdip != NULL);
15638 15637 ASSERT(pdip != NULL);
15639 15638
15640 15639 if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15641 15640 /* LUN ready for IO */
15642 15641 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15643 15642 } else {
15644 15643 /* LUN busy to accept IO */
15645 15644 (void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15646 15645 }
15647 15646 return (NDI_SUCCESS);
15648 15647 }
15649 15648
15650 15649 /*
15651 15650 * Caller must free the returned string of MAXPATHLEN len
15652 15651 * If the device is offline (-1 instance number) NULL
15653 15652 * will be returned.
15654 15653 */
15655 15654 static char *
15656 15655 fcp_get_lun_path(struct fcp_lun *plun) {
15657 15656 dev_info_t *dip = NULL;
15658 15657 char *path = NULL;
15659 15658 mdi_pathinfo_t *pip = NULL;
15660 15659
15661 15660 if (plun == NULL) {
15662 15661 return (NULL);
15663 15662 }
15664 15663
15665 15664 mutex_enter(&plun->lun_mutex);
15666 15665 if (plun->lun_mpxio == 0) {
15667 15666 dip = DIP(plun->lun_cip);
15668 15667 mutex_exit(&plun->lun_mutex);
15669 15668 } else {
15670 15669 /*
15671 15670 * lun_cip must be accessed with lun_mutex held. Here
15672 15671 * plun->lun_cip either points to a valid node or it is NULL.
15673 15672 * Make a copy so that we can release lun_mutex.
15674 15673 */
15675 15674 pip = PIP(plun->lun_cip);
15676 15675
15677 15676 /*
15678 15677 * Increase ref count on the path so that we can release
15679 15678 * lun_mutex and still be sure that the pathinfo node (and thus
15680 15679 * also the client) is not deallocated. If pip is NULL, this
15681 15680 * has no effect.
15682 15681 */
15683 15682 mdi_hold_path(pip);
15684 15683
15685 15684 mutex_exit(&plun->lun_mutex);
15686 15685
15687 15686 /* Get the client. If pip is NULL, we get NULL. */
15688 15687 dip = mdi_pi_get_client(pip);
15689 15688 }
15690 15689
15691 15690 if (dip == NULL)
15692 15691 goto out;
15693 15692 if (ddi_get_instance(dip) < 0)
15694 15693 goto out;
15695 15694
15696 15695 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15697 15696 if (path == NULL)
15698 15697 goto out;
15699 15698
15700 15699 (void) ddi_pathname(dip, path);
15701 15700
15702 15701 /* Clean up. */
15703 15702 out:
15704 15703 if (pip != NULL)
15705 15704 mdi_rele_path(pip);
15706 15705
15707 15706 /*
15708 15707 * In reality, the user wants a fully valid path (one they can open)
15709 15708 * but this string is lacking the mount point, and the minor node.
15710 15709 * It would be nice if we could "figure these out" somehow
15711 15710 * and fill them in. Otherwise, the userland code has to understand
15712 15711 * driver specific details of which minor node is the "best" or
15713 15712 * "right" one to expose. (Ex: which slice is the whole disk, or
15714 15713 * which tape doesn't rewind)
15715 15714 */
15716 15715 return (path);
15717 15716 }
15718 15717
15719 15718 static int
15720 15719 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15721 15720 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15722 15721 {
15723 15722 int64_t reset_delay;
15724 15723 int rval, retry = 0;
15725 15724 struct fcp_port *pptr = fcp_dip2port(parent);
15726 15725
15727 15726 reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15728 15727 (ddi_get_lbolt64() - pptr->port_attach_time);
15729 15728 if (reset_delay < 0) {
15730 15729 reset_delay = 0;
15731 15730 }
15732 15731
15733 15732 if (fcp_bus_config_debug) {
15734 15733 flag |= NDI_DEVI_DEBUG;
15735 15734 }
15736 15735
15737 15736 switch (op) {
15738 15737 case BUS_CONFIG_ONE:
15739 15738 /*
15740 15739 * Retry the command since we need to ensure
15741 15740 * the fabric devices are available for root
15742 15741 */
15743 15742 while (retry++ < fcp_max_bus_config_retries) {
15744 15743 rval = (ndi_busop_bus_config(parent,
15745 15744 flag | NDI_MDI_FALLBACK, op,
15746 15745 arg, childp, (clock_t)reset_delay));
15747 15746 if (rval == 0) {
15748 15747 return (rval);
15749 15748 }
15750 15749 }
15751 15750
15752 15751 /*
15753 15752 * drain taskq to make sure nodes are created and then
15754 15753 * try again.
15755 15754 */
15756 15755 taskq_wait(DEVI(parent)->devi_taskq);
15757 15756 return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15758 15757 op, arg, childp, 0));
15759 15758
15760 15759 case BUS_CONFIG_DRIVER:
15761 15760 case BUS_CONFIG_ALL: {
15762 15761 /*
15763 15762 * delay till all devices report in (port_tmp_cnt == 0)
15764 15763 * or FCP_INIT_WAIT_TIMEOUT
15765 15764 */
15766 15765 mutex_enter(&pptr->port_mutex);
15767 15766 while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15768 15767 (void) cv_timedwait(&pptr->port_config_cv,
15769 15768 &pptr->port_mutex,
15770 15769 ddi_get_lbolt() + (clock_t)reset_delay);
15771 15770 reset_delay =
15772 15771 (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15773 15772 (ddi_get_lbolt64() - pptr->port_attach_time);
15774 15773 }
15775 15774 mutex_exit(&pptr->port_mutex);
15776 15775 /* drain taskq to make sure nodes are created */
15777 15776 taskq_wait(DEVI(parent)->devi_taskq);
15778 15777 return (ndi_busop_bus_config(parent, flag, op,
15779 15778 arg, childp, 0));
15780 15779 }
15781 15780
15782 15781 default:
15783 15782 return (NDI_FAILURE);
15784 15783 }
15785 15784 /*NOTREACHED*/
15786 15785 }
15787 15786
15788 15787 static int
15789 15788 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15790 15789 ddi_bus_config_op_t op, void *arg)
15791 15790 {
15792 15791 if (fcp_bus_config_debug) {
15793 15792 flag |= NDI_DEVI_DEBUG;
15794 15793 }
15795 15794
15796 15795 return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15797 15796 }
15798 15797
15799 15798
15800 15799 /*
15801 15800 * Routine to copy GUID into the lun structure.
15802 15801 * returns 0 if copy was successful and 1 if encountered a
15803 15802 * failure and did not copy the guid.
15804 15803 */
15805 15804 static int
15806 15805 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15807 15806 {
15808 15807
15809 15808 int retval = 0;
15810 15809
15811 15810 /* add one for the null terminator */
15812 15811 const unsigned int len = strlen(guidp) + 1;
15813 15812
15814 15813 if ((guidp == NULL) || (plun == NULL)) {
15815 15814 return (1);
15816 15815 }
15817 15816
15818 15817 /*
15819 15818 * if the plun->lun_guid already has been allocated,
15820 15819 * then check the size. if the size is exact, reuse
15821 15820 * it....if not free it an allocate the required size.
15822 15821 * The reallocation should NOT typically happen
15823 15822 * unless the GUIDs reported changes between passes.
15824 15823 * We free up and alloc again even if the
15825 15824 * size was more than required. This is due to the
15826 15825 * fact that the field lun_guid_size - serves
15827 15826 * dual role of indicating the size of the wwn
15828 15827 * size and ALSO the allocation size.
15829 15828 */
15830 15829 if (plun->lun_guid) {
15831 15830 if (plun->lun_guid_size != len) {
15832 15831 /*
15833 15832 * free the allocated memory and
15834 15833 * initialize the field
15835 15834 * lun_guid_size to 0.
15836 15835 */
15837 15836 kmem_free(plun->lun_guid, plun->lun_guid_size);
15838 15837 plun->lun_guid = NULL;
15839 15838 plun->lun_guid_size = 0;
15840 15839 }
15841 15840 }
15842 15841 /*
15843 15842 * alloc only if not already done.
15844 15843 */
15845 15844 if (plun->lun_guid == NULL) {
15846 15845 plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15847 15846 if (plun->lun_guid == NULL) {
15848 15847 cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15849 15848 "Unable to allocate"
15850 15849 "Memory for GUID!!! size %d", len);
15851 15850 retval = 1;
15852 15851 } else {
15853 15852 plun->lun_guid_size = len;
15854 15853 }
15855 15854 }
15856 15855 if (plun->lun_guid) {
15857 15856 /*
15858 15857 * now copy the GUID
15859 15858 */
15860 15859 bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15861 15860 }
15862 15861 return (retval);
15863 15862 }
15864 15863
15865 15864 /*
15866 15865 * fcp_reconfig_wait
15867 15866 *
15868 15867 * Wait for a rediscovery/reconfiguration to complete before continuing.
15869 15868 */
15870 15869
15871 15870 static void
15872 15871 fcp_reconfig_wait(struct fcp_port *pptr)
15873 15872 {
15874 15873 clock_t reconfig_start, wait_timeout;
15875 15874
15876 15875 /*
15877 15876 * Quick check. If pptr->port_tmp_cnt is 0, there is no
15878 15877 * reconfiguration in progress.
15879 15878 */
15880 15879
15881 15880 mutex_enter(&pptr->port_mutex);
15882 15881 if (pptr->port_tmp_cnt == 0) {
15883 15882 mutex_exit(&pptr->port_mutex);
15884 15883 return;
15885 15884 }
15886 15885 mutex_exit(&pptr->port_mutex);
15887 15886
15888 15887 /*
15889 15888 * If we cause a reconfig by raising power, delay until all devices
15890 15889 * report in (port_tmp_cnt returns to 0)
15891 15890 */
15892 15891
15893 15892 reconfig_start = ddi_get_lbolt();
15894 15893 wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15895 15894
15896 15895 mutex_enter(&pptr->port_mutex);
15897 15896
15898 15897 while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15899 15898 pptr->port_tmp_cnt) {
15900 15899
15901 15900 (void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15902 15901 reconfig_start + wait_timeout);
15903 15902 }
15904 15903
15905 15904 mutex_exit(&pptr->port_mutex);
15906 15905
15907 15906 /*
15908 15907 * Even if fcp_tmp_count isn't 0, continue without error. The port
15909 15908 * we want may still be ok. If not, it will error out later
15910 15909 */
15911 15910 }
15912 15911
15913 15912 /*
15914 15913 * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15915 15914 * We rely on the fcp_global_mutex to provide protection against changes to
15916 15915 * the fcp_lun_blacklist.
15917 15916 *
15918 15917 * You can describe a list of target port WWNs and LUN numbers which will
15919 15918 * not be configured. LUN numbers will be interpreted as decimal. White
15920 15919 * spaces and ',' can be used in the list of LUN numbers.
15921 15920 *
15922 15921 * To prevent LUNs 1 and 2 from being configured for target
15923 15922 * port 510000f010fd92a1 and target port 510000e012079df1, set:
15924 15923 *
15925 15924 * pwwn-lun-blacklist=
15926 15925 * "510000f010fd92a1,1,2",
15927 15926 * "510000e012079df1,1,2";
15928 15927 */
15929 15928 static void
15930 15929 fcp_read_blacklist(dev_info_t *dip,
15931 15930 struct fcp_black_list_entry **pplun_blacklist) {
15932 15931 char **prop_array = NULL;
15933 15932 char *curr_pwwn = NULL;
15934 15933 char *curr_lun = NULL;
15935 15934 uint32_t prop_item = 0;
15936 15935 int idx = 0;
15937 15936 int len = 0;
15938 15937
15939 15938 ASSERT(mutex_owned(&fcp_global_mutex));
15940 15939 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15941 15940 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15942 15941 LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15943 15942 return;
15944 15943 }
15945 15944
15946 15945 for (idx = 0; idx < prop_item; idx++) {
15947 15946
15948 15947 curr_pwwn = prop_array[idx];
15949 15948 while (*curr_pwwn == ' ') {
15950 15949 curr_pwwn++;
15951 15950 }
15952 15951 if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15953 15952 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15954 15953 ", please check.", curr_pwwn);
15955 15954 continue;
15956 15955 }
15957 15956 if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15958 15957 (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15959 15958 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15960 15959 ", please check.", curr_pwwn);
15961 15960 continue;
15962 15961 }
15963 15962 for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15964 15963 if (isxdigit(curr_pwwn[len]) != TRUE) {
15965 15964 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15966 15965 "blacklist, please check.", curr_pwwn);
15967 15966 break;
15968 15967 }
15969 15968 }
15970 15969 if (len != sizeof (la_wwn_t) * 2) {
15971 15970 continue;
15972 15971 }
15973 15972
15974 15973 curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15975 15974 *(curr_lun - 1) = '\0';
15976 15975 fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15977 15976 }
15978 15977
15979 15978 ddi_prop_free(prop_array);
15980 15979 }
15981 15980
15982 15981 /*
15983 15982 * Get the masking info about one remote target port designated by wwn.
15984 15983 * Lun ids could be separated by ',' or white spaces.
15985 15984 */
15986 15985 static void
15987 15986 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15988 15987 struct fcp_black_list_entry **pplun_blacklist) {
15989 15988 int idx = 0;
15990 15989 uint32_t offset = 0;
15991 15990 unsigned long lun_id = 0;
15992 15991 char lunid_buf[16];
15993 15992 char *pend = NULL;
15994 15993 int illegal_digit = 0;
15995 15994
15996 15995 while (offset < strlen(curr_lun)) {
15997 15996 while ((curr_lun[offset + idx] != ',') &&
15998 15997 (curr_lun[offset + idx] != '\0') &&
15999 15998 (curr_lun[offset + idx] != ' ')) {
16000 15999 if (isdigit(curr_lun[offset + idx]) == 0) {
16001 16000 illegal_digit++;
16002 16001 }
16003 16002 idx++;
16004 16003 }
16005 16004 if (illegal_digit > 0) {
16006 16005 offset += (idx+1); /* To the start of next lun */
16007 16006 idx = 0;
16008 16007 illegal_digit = 0;
16009 16008 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16010 16009 "the blacklist, please check digits.",
16011 16010 curr_lun, curr_pwwn);
16012 16011 continue;
16013 16012 }
16014 16013 if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
16015 16014 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16016 16015 "the blacklist, please check the length of LUN#.",
16017 16016 curr_lun, curr_pwwn);
16018 16017 break;
16019 16018 }
16020 16019 if (idx == 0) { /* ignore ' ' or ',' or '\0' */
16021 16020 offset++;
16022 16021 continue;
16023 16022 }
16024 16023
16025 16024 bcopy(curr_lun + offset, lunid_buf, idx);
16026 16025 lunid_buf[idx] = '\0';
16027 16026 if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
16028 16027 fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
16029 16028 } else {
16030 16029 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16031 16030 "the blacklist, please check %s.",
16032 16031 curr_lun, curr_pwwn, lunid_buf);
16033 16032 }
16034 16033 offset += (idx+1); /* To the start of next lun */
16035 16034 idx = 0;
16036 16035 }
16037 16036 }
16038 16037
16039 16038 /*
16040 16039 * Add one masking record
16041 16040 */
16042 16041 static void
16043 16042 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
16044 16043 struct fcp_black_list_entry **pplun_blacklist) {
16045 16044 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
16046 16045 struct fcp_black_list_entry *new_entry = NULL;
16047 16046 la_wwn_t wwn;
16048 16047
16049 16048 fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
16050 16049 while (tmp_entry) {
16051 16050 if ((bcmp(&tmp_entry->wwn, &wwn,
16052 16051 sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
16053 16052 return;
16054 16053 }
16055 16054
16056 16055 tmp_entry = tmp_entry->next;
16057 16056 }
16058 16057
16059 16058 /* add to black list */
16060 16059 new_entry = (struct fcp_black_list_entry *)kmem_zalloc
16061 16060 (sizeof (struct fcp_black_list_entry), KM_SLEEP);
16062 16061 bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
16063 16062 new_entry->lun = lun_id;
16064 16063 new_entry->masked = 0;
16065 16064 new_entry->next = *pplun_blacklist;
16066 16065 *pplun_blacklist = new_entry;
16067 16066 }
16068 16067
16069 16068 /*
16070 16069 * Check if we should mask the specified lun of this fcp_tgt
16071 16070 */
16072 16071 static int
16073 16072 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id) {
16074 16073 struct fcp_black_list_entry *remote_port;
16075 16074
16076 16075 remote_port = fcp_lun_blacklist;
16077 16076 while (remote_port != NULL) {
16078 16077 if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
16079 16078 if (remote_port->lun == lun_id) {
16080 16079 remote_port->masked++;
16081 16080 if (remote_port->masked == 1) {
16082 16081 fcp_log(CE_NOTE, NULL, "LUN %d of port "
16083 16082 "%02x%02x%02x%02x%02x%02x%02x%02x "
16084 16083 "is masked due to black listing.\n",
16085 16084 lun_id, wwn->raw_wwn[0],
16086 16085 wwn->raw_wwn[1], wwn->raw_wwn[2],
16087 16086 wwn->raw_wwn[3], wwn->raw_wwn[4],
16088 16087 wwn->raw_wwn[5], wwn->raw_wwn[6],
16089 16088 wwn->raw_wwn[7]);
16090 16089 }
16091 16090 return (TRUE);
16092 16091 }
16093 16092 }
16094 16093 remote_port = remote_port->next;
16095 16094 }
16096 16095 return (FALSE);
16097 16096 }
16098 16097
16099 16098 /*
16100 16099 * Release all allocated resources
16101 16100 */
16102 16101 static void
16103 16102 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist) {
16104 16103 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
16105 16104 struct fcp_black_list_entry *current_entry = NULL;
16106 16105
16107 16106 ASSERT(mutex_owned(&fcp_global_mutex));
16108 16107 /*
16109 16108 * Traverse all luns
16110 16109 */
16111 16110 while (tmp_entry) {
16112 16111 current_entry = tmp_entry;
16113 16112 tmp_entry = tmp_entry->next;
16114 16113 kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
16115 16114 }
16116 16115 *pplun_blacklist = NULL;
16117 16116 }
16118 16117
16119 16118 /*
16120 16119 * In fcp module,
16121 16120 * pkt@scsi_pkt, cmd@fcp_pkt, icmd@fcp_ipkt, fpkt@fc_packet, pptr@fcp_port
16122 16121 */
16123 16122 static struct scsi_pkt *
16124 16123 fcp_pseudo_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
16125 16124 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
16126 16125 int flags, int (*callback)(), caddr_t arg)
16127 16126 {
16128 16127 fcp_port_t *pptr = ADDR2FCP(ap);
16129 16128 fcp_pkt_t *cmd = NULL;
16130 16129 fc_frame_hdr_t *hp;
16131 16130
16132 16131 /*
16133 16132 * First step: get the packet
16134 16133 */
16135 16134 if (pkt == NULL) {
16136 16135 pkt = scsi_hba_pkt_alloc(pptr->port_dip, ap, cmdlen, statuslen,
16137 16136 tgtlen, sizeof (fcp_pkt_t) + pptr->port_priv_pkt_len,
16138 16137 callback, arg);
16139 16138 if (pkt == NULL) {
16140 16139 return (NULL);
16141 16140 }
16142 16141
16143 16142 /*
16144 16143 * All fields in scsi_pkt will be initialized properly or
16145 16144 * set to zero. We need do nothing for scsi_pkt.
16146 16145 */
16147 16146 /*
16148 16147 * But it's our responsibility to link other related data
16149 16148 * structures. Their initialization will be done, just
16150 16149 * before the scsi_pkt will be sent to FCA.
16151 16150 */
16152 16151 cmd = PKT2CMD(pkt);
16153 16152 cmd->cmd_pkt = pkt;
16154 16153 cmd->cmd_fp_pkt = &cmd->cmd_fc_packet;
16155 16154 /*
16156 16155 * fc_packet_t
16157 16156 */
16158 16157 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
16159 16158 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
16160 16159 sizeof (struct fcp_pkt));
16161 16160 cmd->cmd_fp_pkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
16162 16161 cmd->cmd_fp_pkt->pkt_cmdlen = sizeof (struct fcp_cmd);
16163 16162 cmd->cmd_fp_pkt->pkt_resp = cmd->cmd_fcp_rsp;
16164 16163 cmd->cmd_fp_pkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
16165 16164 /*
16166 16165 * Fill in the Fabric Channel Header
16167 16166 */
16168 16167 hp = &cmd->cmd_fp_pkt->pkt_cmd_fhdr;
16169 16168 hp->r_ctl = R_CTL_COMMAND;
16170 16169 hp->rsvd = 0;
16171 16170 hp->type = FC_TYPE_SCSI_FCP;
16172 16171 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
16173 16172 hp->seq_id = 0;
16174 16173 hp->df_ctl = 0;
16175 16174 hp->seq_cnt = 0;
16176 16175 hp->ox_id = 0xffff;
16177 16176 hp->rx_id = 0xffff;
16178 16177 hp->ro = 0;
16179 16178 } else {
16180 16179 /*
16181 16180 * We need think if we should reset any elements in
16182 16181 * related data structures.
16183 16182 */
16184 16183 FCP_TRACE(fcp_logq, pptr->port_instbuf,
16185 16184 fcp_trace, FCP_BUF_LEVEL_6, 0,
16186 16185 "reusing pkt, flags %d", flags);
16187 16186 cmd = PKT2CMD(pkt);
16188 16187 if (cmd->cmd_fp_pkt->pkt_pd) {
16189 16188 cmd->cmd_fp_pkt->pkt_pd = NULL;
16190 16189 }
16191 16190 }
16192 16191
16193 16192 /*
16194 16193 * Second step: dma allocation/move
16195 16194 */
16196 16195 if (bp && bp->b_bcount != 0) {
16197 16196 /*
16198 16197 * Mark if it's read or write
16199 16198 */
16200 16199 if (bp->b_flags & B_READ) {
16201 16200 cmd->cmd_flags |= CFLAG_IS_READ;
16202 16201 } else {
16203 16202 cmd->cmd_flags &= ~CFLAG_IS_READ;
16204 16203 }
16205 16204
16206 16205 bp_mapin(bp);
16207 16206 cmd->cmd_fp_pkt->pkt_data = bp->b_un.b_addr;
16208 16207 cmd->cmd_fp_pkt->pkt_datalen = bp->b_bcount;
16209 16208 cmd->cmd_fp_pkt->pkt_data_resid = 0;
16210 16209 } else {
16211 16210 /*
16212 16211 * It seldom happens, except when CLUSTER or SCSI_VHCI wants
16213 16212 * to send zero-length read/write.
16214 16213 */
16215 16214 cmd->cmd_fp_pkt->pkt_data = NULL;
16216 16215 cmd->cmd_fp_pkt->pkt_datalen = 0;
16217 16216 }
16218 16217
16219 16218 return (pkt);
16220 16219 }
16221 16220
16222 16221 static void
16223 16222 fcp_pseudo_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16224 16223 {
16225 16224 fcp_port_t *pptr = ADDR2FCP(ap);
16226 16225
16227 16226 /*
16228 16227 * First we let FCA to uninitilize private part.
16229 16228 */
16230 16229 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
16231 16230 PKT2CMD(pkt)->cmd_fp_pkt);
16232 16231
16233 16232 /*
16234 16233 * Then we uninitialize fc_packet.
16235 16234 */
16236 16235
16237 16236 /*
16238 16237 * Thirdly, we uninitializae fcp_pkt.
16239 16238 */
16240 16239
16241 16240 /*
16242 16241 * In the end, we free scsi_pkt.
16243 16242 */
16244 16243 scsi_hba_pkt_free(ap, pkt);
16245 16244 }
16246 16245
16247 16246 static int
16248 16247 fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt)
16249 16248 {
16250 16249 fcp_port_t *pptr = ADDR2FCP(ap);
16251 16250 fcp_lun_t *plun = ADDR2LUN(ap);
16252 16251 fcp_tgt_t *ptgt = plun->lun_tgt;
16253 16252 fcp_pkt_t *cmd = PKT2CMD(pkt);
16254 16253 fcp_cmd_t *fcmd = &cmd->cmd_fcp_cmd;
16255 16254 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
16256 16255 int rval;
16257 16256
16258 16257 fpkt->pkt_pd = ptgt->tgt_pd_handle;
16259 16258 (void) fc_ulp_init_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt, 1);
16260 16259
16261 16260 /*
16262 16261 * Firstly, we need initialize fcp_pkt_t
16263 16262 * Secondly, we need initialize fcp_cmd_t.
16264 16263 */
16265 16264 bcopy(pkt->pkt_cdbp, fcmd->fcp_cdb, pkt->pkt_cdblen);
16266 16265 fcmd->fcp_data_len = fpkt->pkt_datalen;
16267 16266 fcmd->fcp_ent_addr = plun->lun_addr;
16268 16267 if (pkt->pkt_flags & FLAG_HTAG) {
16269 16268 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
16270 16269 } else if (pkt->pkt_flags & FLAG_OTAG) {
16271 16270 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
16272 16271 } else if (pkt->pkt_flags & FLAG_STAG) {
16273 16272 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
16274 16273 } else {
16275 16274 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
16276 16275 }
16277 16276
16278 16277 if (cmd->cmd_flags & CFLAG_IS_READ) {
16279 16278 fcmd->fcp_cntl.cntl_read_data = 1;
16280 16279 fcmd->fcp_cntl.cntl_write_data = 0;
16281 16280 } else {
16282 16281 fcmd->fcp_cntl.cntl_read_data = 0;
16283 16282 fcmd->fcp_cntl.cntl_write_data = 1;
16284 16283 }
16285 16284
16286 16285 /*
16287 16286 * Then we need initialize fc_packet_t too.
16288 16287 */
16289 16288 fpkt->pkt_timeout = pkt->pkt_time + 2;
16290 16289 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
16291 16290 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
16292 16291 if (cmd->cmd_flags & CFLAG_IS_READ) {
16293 16292 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
16294 16293 } else {
16295 16294 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
16296 16295 }
16297 16296
16298 16297 if (pkt->pkt_flags & FLAG_NOINTR) {
16299 16298 fpkt->pkt_comp = NULL;
16300 16299 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
16301 16300 } else {
16302 16301 fpkt->pkt_comp = fcp_cmd_callback;
16303 16302 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
16304 16303 if (pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
16305 16304 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
16306 16305 }
16307 16306 }
16308 16307
16309 16308 /*
16310 16309 * Lastly, we need initialize scsi_pkt
16311 16310 */
16312 16311 pkt->pkt_reason = CMD_CMPLT;
16313 16312 pkt->pkt_state = 0;
16314 16313 pkt->pkt_statistics = 0;
16315 16314 pkt->pkt_resid = 0;
16316 16315
16317 16316 /*
16318 16317 * if interrupts aren't allowed (e.g. at dump time) then we'll
16319 16318 * have to do polled I/O
16320 16319 */
16321 16320 if (pkt->pkt_flags & FLAG_NOINTR) {
16322 16321 return (fcp_dopoll(pptr, cmd));
16323 16322 }
16324 16323
16325 16324 cmd->cmd_state = FCP_PKT_ISSUED;
16326 16325 rval = fcp_transport(pptr->port_fp_handle, fpkt, 0);
16327 16326 if (rval == FC_SUCCESS) {
16328 16327 return (TRAN_ACCEPT);
16329 16328 }
16330 16329
16331 16330 /*
16332 16331 * Need more consideration
16333 16332 *
16334 16333 * pkt->pkt_flags & FLAG_NOQUEUE could abort other pkt
16335 16334 */
16336 16335 cmd->cmd_state = FCP_PKT_IDLE;
16337 16336 if (rval == FC_TRAN_BUSY) {
16338 16337 return (TRAN_BUSY);
16339 16338 } else {
16340 16339 return (TRAN_FATAL_ERROR);
16341 16340 }
16342 16341 }
16343 16342
16344 16343 /*
16345 16344 * scsi_poll will always call tran_sync_pkt for pseudo FC-HBAs
16346 16345 * SCSA will initialize it to scsi_sync_cache_pkt for physical FC-HBAs
16347 16346 */
16348 16347 static void
16349 16348 fcp_pseudo_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16350 16349 {
16351 16350 FCP_TRACE(fcp_logq, "fcp_pseudo_sync_pkt", fcp_trace,
16352 16351 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16353 16352 }
16354 16353
16355 16354 /*
16356 16355 * scsi_dmafree will always call tran_dmafree, when STATE_ARQ_DONE
16357 16356 */
16358 16357 static void
16359 16358 fcp_pseudo_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
16360 16359 {
16361 16360 FCP_TRACE(fcp_logq, "fcp_pseudo_dmafree", fcp_trace,
16362 16361 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16363 16362 }
↓ open down ↓ |
15492 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX