Print this page
9702 HBA drivers don't need the redundant devfs_clean step
Reviewed by: Dan Fields <dan.fields@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/ulp/fcp.c
+++ new/usr/src/uts/common/io/fibre-channel/ulp/fcp.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 +
21 22 /*
22 23 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
23 - *
24 + */
25 +
26 +/*
27 + * Copyright 2018 Nexenta Systems, Inc.
28 + */
29 +
30 +/*
24 31 * Fibre Channel SCSI ULP Mapping driver
25 32 */
26 33
27 34 #include <sys/scsi/scsi.h>
28 35 #include <sys/types.h>
29 36 #include <sys/varargs.h>
30 37 #include <sys/devctl.h>
31 38 #include <sys/thread.h>
32 39 #include <sys/thread.h>
33 40 #include <sys/open.h>
34 41 #include <sys/file.h>
35 42 #include <sys/sunndi.h>
36 43 #include <sys/console.h>
37 44 #include <sys/proc.h>
38 45 #include <sys/time.h>
39 46 #include <sys/utsname.h>
40 47 #include <sys/scsi/impl/scsi_reset_notify.h>
41 48 #include <sys/ndi_impldefs.h>
42 49 #include <sys/byteorder.h>
43 -#include <sys/fs/dv_node.h>
44 50 #include <sys/ctype.h>
45 51 #include <sys/sunmdi.h>
46 52
47 53 #include <sys/fibre-channel/fc.h>
48 54 #include <sys/fibre-channel/impl/fc_ulpif.h>
49 55 #include <sys/fibre-channel/ulp/fcpvar.h>
50 56
51 57 /*
52 58 * Discovery Process
53 59 * =================
54 60 *
55 61 * The discovery process is a major function of FCP. In order to help
56 62 * understand that function a flow diagram is given here. This diagram
57 63 * doesn't claim to cover all the cases and the events that can occur during
58 64 * the discovery process nor the subtleties of the code. The code paths shown
59 65 * are simplified. Its purpose is to help the reader (and potentially bug
60 66 * fixer) have an overall view of the logic of the code. For that reason the
61 67 * diagram covers the simple case of the line coming up cleanly or of a new
62 68 * port attaching to FCP the link being up. The reader must keep in mind
63 69 * that:
64 70 *
65 71 * - There are special cases where bringing devices online and offline
66 72 * is driven by Ioctl.
67 73 *
68 74 * - The behavior of the discovery process can be modified through the
69 75 * .conf file.
70 76 *
71 77 * - The line can go down and come back up at any time during the
72 78 * discovery process which explains some of the complexity of the code.
73 79 *
74 80 * ............................................................................
75 81 *
76 82 * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
77 83 *
78 84 *
79 85 * +-------------------------+
80 86 * fp/fctl module --->| fcp_port_attach |
81 87 * +-------------------------+
82 88 * | |
83 89 * | |
84 90 * | v
85 91 * | +-------------------------+
86 92 * | | fcp_handle_port_attach |
87 93 * | +-------------------------+
88 94 * | |
89 95 * | |
90 96 * +--------------------+ |
91 97 * | |
92 98 * v v
93 99 * +-------------------------+
94 100 * | fcp_statec_callback |
95 101 * +-------------------------+
96 102 * |
97 103 * |
98 104 * v
99 105 * +-------------------------+
100 106 * | fcp_handle_devices |
101 107 * +-------------------------+
102 108 * |
103 109 * |
104 110 * v
105 111 * +-------------------------+
106 112 * | fcp_handle_mapflags |
107 113 * +-------------------------+
108 114 * |
109 115 * |
110 116 * v
111 117 * +-------------------------+
112 118 * | fcp_send_els |
113 119 * | |
114 120 * | PLOGI or PRLI To all the|
115 121 * | reachable devices. |
116 122 * +-------------------------+
117 123 *
118 124 *
119 125 * ............................................................................
120 126 *
121 127 * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
122 128 * STEP 1 are called (it is actually the same function).
123 129 *
124 130 *
125 131 * +-------------------------+
126 132 * | fcp_icmd_callback |
127 133 * fp/fctl module --->| |
128 134 * | callback for PLOGI and |
129 135 * | PRLI. |
130 136 * +-------------------------+
131 137 * |
132 138 * |
133 139 * Received PLOGI Accept /-\ Received PRLI Accept
134 140 * _ _ _ _ _ _ / \_ _ _ _ _ _
135 141 * | \ / |
136 142 * | \-/ |
137 143 * | |
138 144 * v v
139 145 * +-------------------------+ +-------------------------+
140 146 * | fcp_send_els | | fcp_send_scsi |
141 147 * | | | |
142 148 * | PRLI | | REPORT_LUN |
143 149 * +-------------------------+ +-------------------------+
144 150 *
145 151 * ............................................................................
146 152 *
147 153 * STEP 3: The callback functions of the SCSI commands issued by FCP are called
148 154 * (It is actually the same function).
149 155 *
150 156 *
151 157 * +-------------------------+
152 158 * fp/fctl module ------->| fcp_scsi_callback |
153 159 * +-------------------------+
154 160 * |
155 161 * |
156 162 * |
157 163 * Receive REPORT_LUN reply /-\ Receive INQUIRY PAGE83 reply
158 164 * _ _ _ _ _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ _ _ _ _
159 165 * | \ / |
160 166 * | \-/ |
161 167 * | | |
162 168 * | Receive INQUIRY reply| |
163 169 * | | |
164 170 * v v v
165 171 * +------------------------+ +----------------------+ +----------------------+
166 172 * | fcp_handle_reportlun | | fcp_handle_inquiry | | fcp_handle_page83 |
167 173 * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
168 174 * +------------------------+ +----------------------+ +----------------------+
169 175 * | | |
170 176 * | | |
171 177 * | | |
172 178 * v v |
173 179 * +-----------------+ +-----------------+ |
174 180 * | fcp_send_scsi | | fcp_send_scsi | |
175 181 * | | | | |
176 182 * | INQUIRY | | INQUIRY PAGE83 | |
177 183 * | (To each LUN) | +-----------------+ |
178 184 * +-----------------+ |
179 185 * |
180 186 * v
181 187 * +------------------------+
182 188 * | fcp_call_finish_init |
183 189 * +------------------------+
184 190 * |
185 191 * v
186 192 * +-----------------------------+
187 193 * | fcp_call_finish_init_held |
188 194 * +-----------------------------+
189 195 * |
190 196 * |
191 197 * All LUNs scanned /-\
192 198 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ / \
193 199 * | \ /
194 200 * | \-/
195 201 * v |
196 202 * +------------------+ |
197 203 * | fcp_finish_tgt | |
198 204 * +------------------+ |
199 205 * | Target Not Offline and |
200 206 * Target Not Offline and | not marked and tgt_node_state |
201 207 * marked /-\ not FCP_TGT_NODE_ON_DEMAND |
202 208 * _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ |
203 209 * | \ / | |
204 210 * | \-/ | |
205 211 * v v |
206 212 * +----------------------------+ +-------------------+ |
207 213 * | fcp_offline_target | | fcp_create_luns | |
208 214 * | | +-------------------+ |
209 215 * | A structure fcp_tgt_elem | | |
210 216 * | is created and queued in | v |
211 217 * | the FCP port list | +-------------------+ |
212 218 * | port_offline_tgts. It | | fcp_pass_to_hp | |
213 219 * | will be unqueued by the | | | |
214 220 * | watchdog timer. | | Called for each | |
215 221 * +----------------------------+ | LUN. Dispatches | |
216 222 * | | fcp_hp_task | |
217 223 * | +-------------------+ |
218 224 * | | |
219 225 * | | |
220 226 * | | |
221 227 * | +---------------->|
222 228 * | |
223 229 * +---------------------------------------------->|
224 230 * |
225 231 * |
226 232 * All the targets (devices) have been scanned /-\
227 233 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ / \
228 234 * | \ /
229 235 * | \-/
230 236 * +-------------------------------------+ |
231 237 * | fcp_finish_init | |
232 238 * | | |
233 239 * | Signal broadcasts the condition | |
234 240 * | variable port_config_cv of the FCP | |
235 241 * | port. One potential code sequence | |
236 242 * | waiting on the condition variable | |
237 243 * | the code sequence handling | |
238 244 * | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER| |
239 245 * | The other is in the function | |
240 246 * | fcp_reconfig_wait which is called | |
241 247 * | in the transmit path preventing IOs | |
242 248 * | from going through till the disco- | |
243 249 * | very process is over. | |
244 250 * +-------------------------------------+ |
245 251 * | |
246 252 * | |
247 253 * +--------------------------------->|
248 254 * |
249 255 * v
250 256 * Return
251 257 *
252 258 * ............................................................................
253 259 *
254 260 * STEP 4: The hot plug task is called (for each fcp_hp_elem).
255 261 *
256 262 *
257 263 * +-------------------------+
258 264 * | fcp_hp_task |
259 265 * +-------------------------+
260 266 * |
261 267 * |
262 268 * v
263 269 * +-------------------------+
264 270 * | fcp_trigger_lun |
265 271 * +-------------------------+
266 272 * |
267 273 * |
268 274 * v
269 275 * Bring offline /-\ Bring online
270 276 * _ _ _ _ _ _ _ _ _/ \_ _ _ _ _ _ _ _ _ _
271 277 * | \ / |
272 278 * | \-/ |
273 279 * v v
274 280 * +---------------------+ +-----------------------+
275 281 * | fcp_offline_child | | fcp_get_cip |
276 282 * +---------------------+ | |
277 283 * | Creates a dev_info_t |
278 284 * | or a mdi_pathinfo_t |
279 285 * | depending on whether |
280 286 * | mpxio is on or off. |
281 287 * +-----------------------+
282 288 * |
283 289 * |
284 290 * v
285 291 * +-----------------------+
286 292 * | fcp_online_child |
287 293 * | |
288 294 * | Set device online |
289 295 * | using NDI or MDI. |
290 296 * +-----------------------+
291 297 *
292 298 * ............................................................................
293 299 *
294 300 * STEP 5: The watchdog timer expires. The watch dog timer does much more that
295 301 * what is described here. We only show the target offline path.
296 302 *
297 303 *
298 304 * +--------------------------+
299 305 * | fcp_watch |
300 306 * +--------------------------+
301 307 * |
302 308 * |
303 309 * v
304 310 * +--------------------------+
305 311 * | fcp_scan_offline_tgts |
306 312 * +--------------------------+
307 313 * |
308 314 * |
309 315 * v
310 316 * +--------------------------+
311 317 * | fcp_offline_target_now |
312 318 * +--------------------------+
313 319 * |
314 320 * |
315 321 * v
316 322 * +--------------------------+
317 323 * | fcp_offline_tgt_luns |
318 324 * +--------------------------+
319 325 * |
320 326 * |
321 327 * v
322 328 * +--------------------------+
323 329 * | fcp_offline_lun |
324 330 * +--------------------------+
325 331 * |
326 332 * |
327 333 * v
328 334 * +----------------------------------+
329 335 * | fcp_offline_lun_now |
330 336 * | |
331 337 * | A request (or two if mpxio) is |
332 338 * | sent to the hot plug task using |
333 339 * | a fcp_hp_elem structure. |
334 340 * +----------------------------------+
335 341 */
336 342
337 343 /*
338 344 * Functions registered with DDI framework
339 345 */
340 346 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
341 347 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
342 348 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
343 349 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
344 350 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
345 351 cred_t *credp, int *rval);
346 352
347 353 /*
348 354 * Functions registered with FC Transport framework
349 355 */
350 356 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
351 357 fc_attach_cmd_t cmd, uint32_t s_id);
352 358 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
353 359 fc_detach_cmd_t cmd);
354 360 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
355 361 int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
356 362 uint32_t claimed);
357 363 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
358 364 fc_unsol_buf_t *buf, uint32_t claimed);
359 365 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
360 366 fc_unsol_buf_t *buf, uint32_t claimed);
361 367 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
362 368 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
363 369 uint32_t dev_cnt, uint32_t port_sid);
364 370
365 371 /*
366 372 * Functions registered with SCSA framework
367 373 */
368 374 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
369 375 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
370 376 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
371 377 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
372 378 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
373 379 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
374 380 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
375 381 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
376 382 static int fcp_scsi_reset(struct scsi_address *ap, int level);
377 383 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
378 384 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
379 385 int whom);
380 386 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
381 387 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
382 388 void (*callback)(caddr_t), caddr_t arg);
383 389 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
384 390 char *name, ddi_eventcookie_t *event_cookiep);
385 391 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
386 392 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
387 393 ddi_callback_id_t *cb_id);
388 394 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
389 395 ddi_callback_id_t cb_id);
390 396 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
391 397 ddi_eventcookie_t eventid, void *impldata);
392 398 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
393 399 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
394 400 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
395 401 ddi_bus_config_op_t op, void *arg);
396 402
397 403 /*
398 404 * Internal functions
399 405 */
400 406 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
401 407 int mode, int *rval);
402 408
403 409 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
404 410 int mode, int *rval);
405 411 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
406 412 struct fcp_scsi_cmd *fscsi, int mode);
407 413 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
408 414 caddr_t base_addr, int mode);
409 415 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
410 416
411 417 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
412 418 la_wwn_t *pwwn, int *ret_val, int *fc_status, int *fc_pkt_state,
413 419 int *fc_pkt_reason, int *fc_pkt_action);
414 420 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
415 421 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
416 422 static int fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status,
417 423 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
418 424 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
419 425 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
420 426 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
421 427 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
422 428
423 429 static void fcp_handle_devices(struct fcp_port *pptr,
424 430 fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
425 431 fcp_map_tag_t *map_tag, int cause);
426 432 static int fcp_handle_mapflags(struct fcp_port *pptr,
427 433 struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
428 434 int tgt_cnt, int cause);
429 435 static int fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause);
430 436 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
431 437 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
432 438 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
433 439 int cause);
434 440 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
435 441 uint32_t state);
436 442 static struct fcp_port *fcp_get_port(opaque_t port_handle);
437 443 static void fcp_unsol_callback(fc_packet_t *fpkt);
438 444 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
439 445 uchar_t r_ctl, uchar_t type);
440 446 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
441 447 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
442 448 struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
443 449 int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
444 450 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
445 451 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
446 452 int nodma, int flags);
447 453 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
448 454 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
449 455 uchar_t *wwn);
450 456 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
451 457 uint32_t d_id);
452 458 static void fcp_icmd_callback(fc_packet_t *fpkt);
453 459 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
454 460 int len, int lcount, int tcount, int cause, uint32_t rscn_count);
455 461 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
456 462 static void fcp_scsi_callback(fc_packet_t *fpkt);
457 463 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
458 464 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
459 465 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
460 466 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
461 467 uint16_t lun_num);
462 468 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
463 469 int link_cnt, int tgt_cnt, int cause);
464 470 static void fcp_finish_init(struct fcp_port *pptr);
465 471 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
466 472 int tgt_cnt, int cause);
467 473 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
468 474 int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
469 475 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
470 476 int link_cnt, int tgt_cnt, int nowait, int flags);
471 477 static void fcp_offline_target_now(struct fcp_port *pptr,
472 478 struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
473 479 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
474 480 int tgt_cnt, int flags);
475 481 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
476 482 int nowait, int flags);
477 483 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
478 484 int tgt_cnt);
479 485 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
480 486 int tgt_cnt, int flags);
481 487 static void fcp_scan_offline_luns(struct fcp_port *pptr);
482 488 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
483 489 static void fcp_update_offline_flags(struct fcp_lun *plun);
484 490 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
485 491 static void fcp_abort_commands(struct fcp_pkt *head, struct
486 492 fcp_port *pptr);
487 493 static void fcp_cmd_callback(fc_packet_t *fpkt);
488 494 static void fcp_complete_pkt(fc_packet_t *fpkt);
489 495 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
490 496 struct fcp_port *pptr);
491 497 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
492 498 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
493 499 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
494 500 static void fcp_dealloc_lun(struct fcp_lun *plun);
495 501 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
496 502 fc_portmap_t *map_entry, int link_cnt);
497 503 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
498 504 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
499 505 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
500 506 int internal);
501 507 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
502 508 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
503 509 uint32_t s_id, int instance);
504 510 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
505 511 int instance);
506 512 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
507 513 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
508 514 int);
509 515 static void fcp_kmem_cache_destructor(struct scsi_pkt *, scsi_hba_tran_t *);
510 516 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
511 517 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
512 518 int flags);
513 519 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
514 520 static int fcp_reset_target(struct scsi_address *ap, int level);
515 521 static int fcp_commoncap(struct scsi_address *ap, char *cap,
516 522 int val, int tgtonly, int doset);
517 523 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
518 524 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
519 525 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
520 526 int sleep);
521 527 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
522 528 uint32_t s_id, fc_attach_cmd_t cmd, int instance);
523 529 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
524 530 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
525 531 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
526 532 int lcount, int tcount);
527 533 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
528 534 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
529 535 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
530 536 int tgt_cnt);
531 537 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
532 538 dev_info_t *pdip, caddr_t name);
533 539 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
534 540 int lcount, int tcount, int flags, int *circ);
535 541 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
536 542 int lcount, int tcount, int flags, int *circ);
537 543 static void fcp_remove_child(struct fcp_lun *plun);
538 544 static void fcp_watch(void *arg);
539 545 static void fcp_check_reset_delay(struct fcp_port *pptr);
540 546 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
541 547 struct fcp_lun *rlun, int tgt_cnt);
542 548 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
543 549 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
544 550 uchar_t *wwn, uint16_t lun);
545 551 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
546 552 struct fcp_lun *plun);
547 553 static void fcp_post_callback(struct fcp_pkt *cmd);
548 554 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
549 555 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
550 556 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
551 557 child_info_t *cip);
552 558 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
553 559 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
554 560 int tgt_cnt, int flags);
555 561 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
556 562 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
557 563 int tgt_cnt, int flags, int wait);
558 564 static void fcp_retransport_cmd(struct fcp_port *pptr,
559 565 struct fcp_pkt *cmd);
560 566 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
561 567 uint_t statistics);
562 568 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
563 569 static void fcp_update_targets(struct fcp_port *pptr,
564 570 fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
565 571 static int fcp_call_finish_init(struct fcp_port *pptr,
566 572 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
567 573 static int fcp_call_finish_init_held(struct fcp_port *pptr,
568 574 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
569 575 static void fcp_reconfigure_luns(void * tgt_handle);
570 576 static void fcp_free_targets(struct fcp_port *pptr);
571 577 static void fcp_free_target(struct fcp_tgt *ptgt);
572 578 static int fcp_is_retryable(struct fcp_ipkt *icmd);
573 579 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
574 580 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
575 581 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
576 582 static void fcp_print_error(fc_packet_t *fpkt);
577 583 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
578 584 struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
579 585 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
580 586 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
581 587 uint32_t *dev_cnt);
582 588 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
583 589 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
584 590 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
585 591 struct fcp_ioctl *, struct fcp_port **);
586 592 static char *fcp_get_lun_path(struct fcp_lun *plun);
587 593 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
588 594 int *rval);
589 595 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
590 596 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
591 597 static char *fcp_get_lun_path(struct fcp_lun *plun);
592 598 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
593 599 int *rval);
594 600 static void fcp_reconfig_wait(struct fcp_port *pptr);
595 601
596 602 /*
597 603 * New functions added for mpxio support
598 604 */
599 605 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
600 606 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
601 607 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
602 608 int tcount);
603 609 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
604 610 dev_info_t *pdip);
605 611 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
606 612 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
607 613 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
608 614 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
609 615 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
610 616 int what);
611 617 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
612 618 fc_packet_t *fpkt);
613 619 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
614 620
615 621 /*
616 622 * New functions added for lun masking support
617 623 */
618 624 static void fcp_read_blacklist(dev_info_t *dip,
619 625 struct fcp_black_list_entry **pplun_blacklist);
620 626 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
621 627 struct fcp_black_list_entry **pplun_blacklist);
622 628 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
623 629 struct fcp_black_list_entry **pplun_blacklist);
624 630 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
625 631 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
626 632
627 633 /*
628 634 * New functions to support software FCA (like fcoei)
629 635 */
630 636 static struct scsi_pkt *fcp_pseudo_init_pkt(
631 637 struct scsi_address *ap, struct scsi_pkt *pkt,
632 638 struct buf *bp, int cmdlen, int statuslen,
633 639 int tgtlen, int flags, int (*callback)(), caddr_t arg);
634 640 static void fcp_pseudo_destroy_pkt(
635 641 struct scsi_address *ap, struct scsi_pkt *pkt);
636 642 static void fcp_pseudo_sync_pkt(
637 643 struct scsi_address *ap, struct scsi_pkt *pkt);
638 644 static int fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt);
639 645 static void fcp_pseudo_dmafree(
640 646 struct scsi_address *ap, struct scsi_pkt *pkt);
641 647
642 648 extern struct mod_ops mod_driverops;
643 649 /*
644 650 * This variable is defined in modctl.c and set to '1' after the root driver
645 651 * and fs are loaded. It serves as an indication that the root filesystem can
646 652 * be used.
647 653 */
648 654 extern int modrootloaded;
649 655 /*
650 656 * This table contains strings associated with the SCSI sense key codes. It
651 657 * is used by FCP to print a clear explanation of the code returned in the
652 658 * sense information by a device.
653 659 */
654 660 extern char *sense_keys[];
655 661 /*
656 662 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
657 663 * under this device that the paths to a physical device are created when
658 664 * MPxIO is used.
659 665 */
660 666 extern dev_info_t *scsi_vhci_dip;
661 667
662 668 /*
663 669 * Report lun processing
664 670 */
665 671 #define FCP_LUN_ADDRESSING 0x80
666 672 #define FCP_PD_ADDRESSING 0x00
667 673 #define FCP_VOLUME_ADDRESSING 0x40
668 674
669 675 #define FCP_SVE_THROTTLE 0x28 /* Vicom */
670 676 #define MAX_INT_DMA 0x7fffffff
671 677 /*
672 678 * Property definitions
673 679 */
674 680 #define NODE_WWN_PROP (char *)fcp_node_wwn_prop
675 681 #define PORT_WWN_PROP (char *)fcp_port_wwn_prop
676 682 #define TARGET_PROP (char *)fcp_target_prop
677 683 #define LUN_PROP (char *)fcp_lun_prop
678 684 #define SAM_LUN_PROP (char *)fcp_sam_lun_prop
679 685 #define CONF_WWN_PROP (char *)fcp_conf_wwn_prop
680 686 #define OBP_BOOT_WWN (char *)fcp_obp_boot_wwn
681 687 #define MANUAL_CFG_ONLY (char *)fcp_manual_config_only
682 688 #define INIT_PORT_PROP (char *)fcp_init_port_prop
683 689 #define TGT_PORT_PROP (char *)fcp_tgt_port_prop
684 690 #define LUN_BLACKLIST_PROP (char *)fcp_lun_blacklist_prop
685 691 /*
686 692 * Short hand macros.
687 693 */
688 694 #define LUN_PORT (plun->lun_tgt->tgt_port)
689 695 #define LUN_TGT (plun->lun_tgt)
690 696
691 697 /*
692 698 * Driver private macros
693 699 */
694 700 #define FCP_ATOB(x) (((x) >= '0' && (x) <= '9') ? ((x) - '0') : \
695 701 ((x) >= 'a' && (x) <= 'f') ? \
696 702 ((x) - 'a' + 10) : ((x) - 'A' + 10))
697 703
698 704 #define FCP_MAX(a, b) ((a) > (b) ? (a) : (b))
699 705
700 706 #define FCP_N_NDI_EVENTS \
701 707 (sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
702 708
703 709 #define FCP_LINK_STATE_CHANGED(p, c) \
704 710 ((p)->port_link_cnt != (c)->ipkt_link_cnt)
705 711
706 712 #define FCP_TGT_STATE_CHANGED(t, c) \
707 713 ((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
708 714
709 715 #define FCP_STATE_CHANGED(p, t, c) \
710 716 (FCP_TGT_STATE_CHANGED(t, c))
711 717
712 718 #define FCP_MUST_RETRY(fpkt) \
713 719 ((fpkt)->pkt_state == FC_PKT_LOCAL_BSY || \
714 720 (fpkt)->pkt_state == FC_PKT_LOCAL_RJT || \
715 721 (fpkt)->pkt_state == FC_PKT_TRAN_BSY || \
716 722 (fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS || \
717 723 (fpkt)->pkt_state == FC_PKT_NPORT_BSY || \
718 724 (fpkt)->pkt_state == FC_PKT_FABRIC_BSY || \
719 725 (fpkt)->pkt_state == FC_PKT_PORT_OFFLINE || \
720 726 (fpkt)->pkt_reason == FC_REASON_OFFLINE)
721 727
722 728 #define FCP_SENSE_REPORTLUN_CHANGED(es) \
723 729 ((es)->es_key == KEY_UNIT_ATTENTION && \
724 730 (es)->es_add_code == 0x3f && \
725 731 (es)->es_qual_code == 0x0e)
726 732
727 733 #define FCP_SENSE_NO_LUN(es) \
728 734 ((es)->es_key == KEY_ILLEGAL_REQUEST && \
729 735 (es)->es_add_code == 0x25 && \
730 736 (es)->es_qual_code == 0x0)
731 737
732 738 #define FCP_VERSION "20091208-1.192"
733 739 #define FCP_NAME_VERSION "SunFC FCP v" FCP_VERSION
734 740
735 741 #define FCP_NUM_ELEMENTS(array) \
736 742 (sizeof (array) / sizeof ((array)[0]))
737 743
738 744 /*
739 745 * Debugging, Error reporting, and tracing
740 746 */
741 747 #define FCP_LOG_SIZE 1024 * 1024
742 748
743 749 #define FCP_LEVEL_1 0x00001 /* attach/detach PM CPR */
744 750 #define FCP_LEVEL_2 0x00002 /* failures/Invalid data */
745 751 #define FCP_LEVEL_3 0x00004 /* state change, discovery */
746 752 #define FCP_LEVEL_4 0x00008 /* ULP messages */
747 753 #define FCP_LEVEL_5 0x00010 /* ELS/SCSI cmds */
748 754 #define FCP_LEVEL_6 0x00020 /* Transport failures */
749 755 #define FCP_LEVEL_7 0x00040
750 756 #define FCP_LEVEL_8 0x00080 /* I/O tracing */
751 757 #define FCP_LEVEL_9 0x00100 /* I/O tracing */
752 758
753 759
754 760
755 761 /*
756 762 * Log contents to system messages file
757 763 */
758 764 #define FCP_MSG_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
759 765 #define FCP_MSG_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
760 766 #define FCP_MSG_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
761 767 #define FCP_MSG_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
762 768 #define FCP_MSG_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
763 769 #define FCP_MSG_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
764 770 #define FCP_MSG_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
765 771 #define FCP_MSG_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
766 772 #define FCP_MSG_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
767 773
768 774
769 775 /*
770 776 * Log contents to trace buffer
771 777 */
772 778 #define FCP_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
773 779 #define FCP_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
774 780 #define FCP_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
775 781 #define FCP_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
776 782 #define FCP_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
777 783 #define FCP_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
778 784 #define FCP_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
779 785 #define FCP_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
780 786 #define FCP_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
781 787
782 788
783 789 /*
784 790 * Log contents to both system messages file and trace buffer
785 791 */
786 792 #define FCP_MSG_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF | \
787 793 FC_TRACE_LOG_MSG)
788 794 #define FCP_MSG_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF | \
789 795 FC_TRACE_LOG_MSG)
790 796 #define FCP_MSG_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF | \
791 797 FC_TRACE_LOG_MSG)
792 798 #define FCP_MSG_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF | \
793 799 FC_TRACE_LOG_MSG)
794 800 #define FCP_MSG_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF | \
795 801 FC_TRACE_LOG_MSG)
796 802 #define FCP_MSG_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF | \
797 803 FC_TRACE_LOG_MSG)
798 804 #define FCP_MSG_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF | \
799 805 FC_TRACE_LOG_MSG)
800 806 #define FCP_MSG_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF | \
801 807 FC_TRACE_LOG_MSG)
802 808 #define FCP_MSG_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF | \
803 809 FC_TRACE_LOG_MSG)
804 810 #ifdef DEBUG
805 811 #define FCP_DTRACE fc_trace_debug
806 812 #else
807 813 #define FCP_DTRACE
808 814 #endif
809 815
810 816 #define FCP_TRACE fc_trace_debug
811 817
812 818 static struct cb_ops fcp_cb_ops = {
813 819 fcp_open, /* open */
814 820 fcp_close, /* close */
815 821 nodev, /* strategy */
816 822 nodev, /* print */
817 823 nodev, /* dump */
818 824 nodev, /* read */
819 825 nodev, /* write */
820 826 fcp_ioctl, /* ioctl */
821 827 nodev, /* devmap */
822 828 nodev, /* mmap */
823 829 nodev, /* segmap */
824 830 nochpoll, /* chpoll */
825 831 ddi_prop_op, /* cb_prop_op */
826 832 0, /* streamtab */
827 833 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
828 834 CB_REV, /* rev */
829 835 nodev, /* aread */
830 836 nodev /* awrite */
831 837 };
832 838
833 839
834 840 static struct dev_ops fcp_ops = {
835 841 DEVO_REV,
836 842 0,
837 843 ddi_getinfo_1to1,
838 844 nulldev, /* identify */
839 845 nulldev, /* probe */
840 846 fcp_attach, /* attach and detach are mandatory */
841 847 fcp_detach,
842 848 nodev, /* reset */
843 849 &fcp_cb_ops, /* cb_ops */
844 850 NULL, /* bus_ops */
845 851 NULL, /* power */
846 852 };
847 853
848 854
849 855 char *fcp_version = FCP_NAME_VERSION;
850 856
851 857 static struct modldrv modldrv = {
852 858 &mod_driverops,
853 859 FCP_NAME_VERSION,
854 860 &fcp_ops
855 861 };
856 862
857 863
858 864 static struct modlinkage modlinkage = {
859 865 MODREV_1,
860 866 &modldrv,
861 867 NULL
862 868 };
863 869
864 870
865 871 static fc_ulp_modinfo_t fcp_modinfo = {
866 872 &fcp_modinfo, /* ulp_handle */
867 873 FCTL_ULP_MODREV_4, /* ulp_rev */
868 874 FC4_SCSI_FCP, /* ulp_type */
869 875 "fcp", /* ulp_name */
870 876 FCP_STATEC_MASK, /* ulp_statec_mask */
871 877 fcp_port_attach, /* ulp_port_attach */
872 878 fcp_port_detach, /* ulp_port_detach */
873 879 fcp_port_ioctl, /* ulp_port_ioctl */
874 880 fcp_els_callback, /* ulp_els_callback */
875 881 fcp_data_callback, /* ulp_data_callback */
876 882 fcp_statec_callback /* ulp_statec_callback */
877 883 };
878 884
879 885 #ifdef DEBUG
880 886 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
881 887 FCP_LEVEL_2 | FCP_LEVEL_3 | \
882 888 FCP_LEVEL_4 | FCP_LEVEL_5 | \
883 889 FCP_LEVEL_6 | FCP_LEVEL_7)
884 890 #else
885 891 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
886 892 FCP_LEVEL_2 | FCP_LEVEL_3 | \
887 893 FCP_LEVEL_4 | FCP_LEVEL_5 | \
888 894 FCP_LEVEL_6 | FCP_LEVEL_7)
889 895 #endif
890 896
891 897 /* FCP global variables */
892 898 int fcp_bus_config_debug = 0;
893 899 static int fcp_log_size = FCP_LOG_SIZE;
894 900 static int fcp_trace = FCP_TRACE_DEFAULT;
895 901 static fc_trace_logq_t *fcp_logq = NULL;
896 902 static struct fcp_black_list_entry *fcp_lun_blacklist = NULL;
897 903 /*
898 904 * The auto-configuration is set by default. The only way of disabling it is
899 905 * through the property MANUAL_CFG_ONLY in the fcp.conf file.
900 906 */
901 907 static int fcp_enable_auto_configuration = 1;
902 908 static int fcp_max_bus_config_retries = 4;
903 909 static int fcp_lun_ready_retry = 300;
904 910 /*
905 911 * The value assigned to the following variable has changed several times due
906 912 * to a problem with the data underruns reporting of some firmware(s). The
907 913 * current value of 50 gives a timeout value of 25 seconds for a max number
908 914 * of 256 LUNs.
909 915 */
910 916 static int fcp_max_target_retries = 50;
911 917 /*
912 918 * Watchdog variables
913 919 * ------------------
914 920 *
915 921 * fcp_watchdog_init
916 922 *
917 923 * Indicates if the watchdog timer is running or not. This is actually
918 924 * a counter of the number of Fibre Channel ports that attached. When
919 925 * the first port attaches the watchdog is started. When the last port
920 926 * detaches the watchdog timer is stopped.
921 927 *
922 928 * fcp_watchdog_time
923 929 *
924 930 * This is the watchdog clock counter. It is incremented by
925 931 * fcp_watchdog_time each time the watchdog timer expires.
926 932 *
927 933 * fcp_watchdog_timeout
928 934 *
929 935 * Increment value of the variable fcp_watchdog_time as well as the
930 936 * the timeout value of the watchdog timer. The unit is 1 second. It
931 937 * is strange that this is not a #define but a variable since the code
932 938 * never changes this value. The reason why it can be said that the
933 939 * unit is 1 second is because the number of ticks for the watchdog
934 940 * timer is determined like this:
935 941 *
936 942 * fcp_watchdog_tick = fcp_watchdog_timeout *
937 943 * drv_usectohz(1000000);
938 944 *
939 945 * The value 1000000 is hard coded in the code.
940 946 *
941 947 * fcp_watchdog_tick
942 948 *
943 949 * Watchdog timer value in ticks.
944 950 */
945 951 static int fcp_watchdog_init = 0;
946 952 static int fcp_watchdog_time = 0;
947 953 static int fcp_watchdog_timeout = 1;
948 954 static int fcp_watchdog_tick;
949 955
950 956 /*
951 957 * fcp_offline_delay is a global variable to enable customisation of
952 958 * the timeout on link offlines or RSCNs. The default value is set
953 959 * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
954 960 * specified in FCP4 Chapter 11 (see www.t10.org).
955 961 *
956 962 * The variable fcp_offline_delay is specified in SECONDS.
957 963 *
958 964 * If we made this a static var then the user would not be able to
959 965 * change it. This variable is set in fcp_attach().
960 966 */
961 967 unsigned int fcp_offline_delay = FCP_OFFLINE_DELAY;
962 968
963 969 static void *fcp_softstate = NULL; /* for soft state */
964 970 static uchar_t fcp_oflag = FCP_IDLE; /* open flag */
965 971 static kmutex_t fcp_global_mutex;
966 972 static kmutex_t fcp_ioctl_mutex;
967 973 static dev_info_t *fcp_global_dip = NULL;
968 974 static timeout_id_t fcp_watchdog_id;
969 975 const char *fcp_lun_prop = "lun";
970 976 const char *fcp_sam_lun_prop = "sam-lun";
971 977 const char *fcp_target_prop = "target";
972 978 /*
973 979 * NOTE: consumers of "node-wwn" property include stmsboot in ON
974 980 * consolidation.
975 981 */
976 982 const char *fcp_node_wwn_prop = "node-wwn";
977 983 const char *fcp_port_wwn_prop = "port-wwn";
978 984 const char *fcp_conf_wwn_prop = "fc-port-wwn";
979 985 const char *fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
980 986 const char *fcp_manual_config_only = "manual_configuration_only";
981 987 const char *fcp_init_port_prop = "initiator-port";
982 988 const char *fcp_tgt_port_prop = "target-port";
983 989 const char *fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
984 990
985 991 static struct fcp_port *fcp_port_head = NULL;
986 992 static ddi_eventcookie_t fcp_insert_eid;
987 993 static ddi_eventcookie_t fcp_remove_eid;
988 994
989 995 static ndi_event_definition_t fcp_ndi_event_defs[] = {
990 996 { FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
991 997 { FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
992 998 };
993 999
994 1000 /*
995 1001 * List of valid commands for the scsi_ioctl call
996 1002 */
997 1003 static uint8_t scsi_ioctl_list[] = {
998 1004 SCMD_INQUIRY,
999 1005 SCMD_REPORT_LUN,
1000 1006 SCMD_READ_CAPACITY
1001 1007 };
1002 1008
1003 1009 /*
1004 1010 * this is used to dummy up a report lun response for cases
1005 1011 * where the target doesn't support it
1006 1012 */
1007 1013 static uchar_t fcp_dummy_lun[] = {
1008 1014 0x00, /* MSB length (length = no of luns * 8) */
1009 1015 0x00,
1010 1016 0x00,
1011 1017 0x08, /* LSB length */
1012 1018 0x00, /* MSB reserved */
1013 1019 0x00,
1014 1020 0x00,
1015 1021 0x00, /* LSB reserved */
1016 1022 FCP_PD_ADDRESSING,
1017 1023 0x00, /* LUN is ZERO at the first level */
1018 1024 0x00,
1019 1025 0x00, /* second level is zero */
1020 1026 0x00,
1021 1027 0x00, /* third level is zero */
1022 1028 0x00,
1023 1029 0x00 /* fourth level is zero */
1024 1030 };
1025 1031
1026 1032 static uchar_t fcp_alpa_to_switch[] = {
1027 1033 0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1028 1034 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1029 1035 0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1030 1036 0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1031 1037 0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1032 1038 0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1033 1039 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1034 1040 0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1035 1041 0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1036 1042 0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1037 1043 0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1038 1044 0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1039 1045 0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1040 1046 0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1041 1047 0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1042 1048 0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1043 1049 0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1044 1050 0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1045 1051 0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1046 1052 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1047 1053 0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1048 1054 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1049 1055 0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1050 1056 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1051 1057 };
1052 1058
1053 1059 static caddr_t pid = "SESS01 ";
1054 1060
1055 1061 #if !defined(lint)
1056 1062
1057 1063 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1058 1064 fcp_port::fcp_next fcp_watchdog_id))
1059 1065
1060 1066 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1061 1067
1062 1068 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1063 1069 fcp_insert_eid
1064 1070 fcp_remove_eid
1065 1071 fcp_watchdog_time))
1066 1072
1067 1073 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1068 1074 fcp_cb_ops
1069 1075 fcp_ops
1070 1076 callb_cpr))
1071 1077
1072 1078 #endif /* lint */
1073 1079
1074 1080 /*
1075 1081 * This table is used to determine whether or not it's safe to copy in
1076 1082 * the target node name for a lun. Since all luns behind the same target
1077 1083 * have the same wwnn, only tagets that do not support multiple luns are
1078 1084 * eligible to be enumerated under mpxio if they aren't page83 compliant.
1079 1085 */
1080 1086
1081 1087 char *fcp_symmetric_disk_table[] = {
1082 1088 "SEAGATE ST",
1083 1089 "IBM DDYFT",
1084 1090 "SUNW SUNWGS", /* Daktari enclosure */
1085 1091 "SUN SENA", /* SES device */
1086 1092 "SUN SESS01" /* VICOM SVE box */
1087 1093 };
1088 1094
1089 1095 int fcp_symmetric_disk_table_size =
1090 1096 sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1091 1097
1092 1098 /*
1093 1099 * This structure is bogus. scsi_hba_attach_setup() requires, as in the kernel
1094 1100 * will panic if you don't pass this in to the routine, this information.
1095 1101 * Need to determine what the actual impact to the system is by providing
1096 1102 * this information if any. Since dma allocation is done in pkt_init it may
1097 1103 * not have any impact. These values are straight from the Writing Device
1098 1104 * Driver manual.
1099 1105 */
1100 1106 static ddi_dma_attr_t pseudo_fca_dma_attr = {
1101 1107 DMA_ATTR_V0, /* ddi_dma_attr version */
1102 1108 0, /* low address */
1103 1109 0xffffffff, /* high address */
1104 1110 0x00ffffff, /* counter upper bound */
1105 1111 1, /* alignment requirements */
1106 1112 0x3f, /* burst sizes */
1107 1113 1, /* minimum DMA access */
1108 1114 0xffffffff, /* maximum DMA access */
1109 1115 (1 << 24) - 1, /* segment boundary restrictions */
1110 1116 1, /* scater/gather list length */
1111 1117 512, /* device granularity */
1112 1118 0 /* DMA flags */
1113 1119 };
1114 1120
1115 1121 /*
1116 1122 * The _init(9e) return value should be that of mod_install(9f). Under
1117 1123 * some circumstances, a failure may not be related mod_install(9f) and
1118 1124 * one would then require a return value to indicate the failure. Looking
1119 1125 * at mod_install(9f), it is expected to return 0 for success and non-zero
1120 1126 * for failure. mod_install(9f) for device drivers, further goes down the
1121 1127 * calling chain and ends up in ddi_installdrv(), whose return values are
1122 1128 * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1123 1129 * calling chain of mod_install(9f) which return values like EINVAL and
1124 1130 * in some even return -1.
1125 1131 *
1126 1132 * To work around the vagaries of the mod_install() calling chain, return
1127 1133 * either 0 or ENODEV depending on the success or failure of mod_install()
1128 1134 */
1129 1135 int
1130 1136 _init(void)
1131 1137 {
1132 1138 int rval;
1133 1139
1134 1140 /*
1135 1141 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1136 1142 * before registering with the transport first.
1137 1143 */
1138 1144 if (ddi_soft_state_init(&fcp_softstate,
1139 1145 sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1140 1146 return (EINVAL);
1141 1147 }
1142 1148
1143 1149 mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1144 1150 mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1145 1151
1146 1152 if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1147 1153 cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1148 1154 mutex_destroy(&fcp_global_mutex);
1149 1155 mutex_destroy(&fcp_ioctl_mutex);
1150 1156 ddi_soft_state_fini(&fcp_softstate);
1151 1157 return (ENODEV);
1152 1158 }
1153 1159
1154 1160 fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1155 1161
1156 1162 if ((rval = mod_install(&modlinkage)) != 0) {
1157 1163 fc_trace_free_logq(fcp_logq);
1158 1164 (void) fc_ulp_remove(&fcp_modinfo);
1159 1165 mutex_destroy(&fcp_global_mutex);
1160 1166 mutex_destroy(&fcp_ioctl_mutex);
1161 1167 ddi_soft_state_fini(&fcp_softstate);
1162 1168 rval = ENODEV;
1163 1169 }
1164 1170
1165 1171 return (rval);
1166 1172 }
1167 1173
1168 1174
1169 1175 /*
1170 1176 * the system is done with us as a driver, so clean up
1171 1177 */
1172 1178 int
1173 1179 _fini(void)
1174 1180 {
1175 1181 int rval;
1176 1182
1177 1183 /*
1178 1184 * don't start cleaning up until we know that the module remove
1179 1185 * has worked -- if this works, then we know that each instance
1180 1186 * has successfully been DDI_DETACHed
1181 1187 */
1182 1188 if ((rval = mod_remove(&modlinkage)) != 0) {
1183 1189 return (rval);
1184 1190 }
1185 1191
1186 1192 (void) fc_ulp_remove(&fcp_modinfo);
1187 1193
1188 1194 ddi_soft_state_fini(&fcp_softstate);
1189 1195 mutex_destroy(&fcp_global_mutex);
1190 1196 mutex_destroy(&fcp_ioctl_mutex);
1191 1197 fc_trace_free_logq(fcp_logq);
1192 1198
1193 1199 return (rval);
1194 1200 }
1195 1201
1196 1202
1197 1203 int
1198 1204 _info(struct modinfo *modinfop)
1199 1205 {
1200 1206 return (mod_info(&modlinkage, modinfop));
1201 1207 }
1202 1208
1203 1209
1204 1210 /*
1205 1211 * attach the module
1206 1212 */
1207 1213 static int
1208 1214 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1209 1215 {
1210 1216 int rval = DDI_SUCCESS;
1211 1217
1212 1218 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1213 1219 FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1214 1220
1215 1221 if (cmd == DDI_ATTACH) {
1216 1222 /* The FCP pseudo device is created here. */
1217 1223 mutex_enter(&fcp_global_mutex);
1218 1224 fcp_global_dip = devi;
1219 1225 mutex_exit(&fcp_global_mutex);
1220 1226
1221 1227 if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1222 1228 0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1223 1229 ddi_report_dev(fcp_global_dip);
1224 1230 } else {
1225 1231 cmn_err(CE_WARN, "FCP: Cannot create minor node");
1226 1232 mutex_enter(&fcp_global_mutex);
1227 1233 fcp_global_dip = NULL;
1228 1234 mutex_exit(&fcp_global_mutex);
1229 1235
1230 1236 rval = DDI_FAILURE;
1231 1237 }
1232 1238 /*
1233 1239 * We check the fcp_offline_delay property at this
1234 1240 * point. This variable is global for the driver,
1235 1241 * not specific to an instance.
1236 1242 *
1237 1243 * We do not recommend setting the value to less
1238 1244 * than 10 seconds (RA_TOV_els), or greater than
1239 1245 * 60 seconds.
1240 1246 */
1241 1247 fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1242 1248 devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1243 1249 "fcp_offline_delay", FCP_OFFLINE_DELAY);
1244 1250 if ((fcp_offline_delay < 10) ||
1245 1251 (fcp_offline_delay > 60)) {
1246 1252 cmn_err(CE_WARN, "Setting fcp_offline_delay "
1247 1253 "to %d second(s). This is outside the "
1248 1254 "recommended range of 10..60 seconds.",
1249 1255 fcp_offline_delay);
1250 1256 }
1251 1257 }
1252 1258
1253 1259 return (rval);
1254 1260 }
1255 1261
1256 1262
1257 1263 /*ARGSUSED*/
1258 1264 static int
1259 1265 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1260 1266 {
1261 1267 int res = DDI_SUCCESS;
1262 1268
1263 1269 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1264 1270 FCP_BUF_LEVEL_8, 0, "module detach: cmd=0x%x", cmd);
1265 1271
1266 1272 if (cmd == DDI_DETACH) {
1267 1273 /*
1268 1274 * Check if there are active ports/threads. If there
1269 1275 * are any, we will fail, else we will succeed (there
1270 1276 * should not be much to clean up)
1271 1277 */
1272 1278 mutex_enter(&fcp_global_mutex);
1273 1279 FCP_DTRACE(fcp_logq, "fcp",
1274 1280 fcp_trace, FCP_BUF_LEVEL_8, 0, "port_head=%p",
1275 1281 (void *) fcp_port_head);
1276 1282
1277 1283 if (fcp_port_head == NULL) {
1278 1284 ddi_remove_minor_node(fcp_global_dip, NULL);
1279 1285 fcp_global_dip = NULL;
1280 1286 mutex_exit(&fcp_global_mutex);
1281 1287 } else {
1282 1288 mutex_exit(&fcp_global_mutex);
1283 1289 res = DDI_FAILURE;
1284 1290 }
1285 1291 }
1286 1292 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1287 1293 FCP_BUF_LEVEL_8, 0, "module detach returning %d", res);
1288 1294
1289 1295 return (res);
1290 1296 }
1291 1297
1292 1298
1293 1299 /* ARGSUSED */
1294 1300 static int
1295 1301 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1296 1302 {
1297 1303 if (otype != OTYP_CHR) {
1298 1304 return (EINVAL);
1299 1305 }
1300 1306
1301 1307 /*
1302 1308 * Allow only root to talk;
1303 1309 */
1304 1310 if (drv_priv(credp)) {
1305 1311 return (EPERM);
1306 1312 }
1307 1313
1308 1314 mutex_enter(&fcp_global_mutex);
1309 1315 if (fcp_oflag & FCP_EXCL) {
1310 1316 mutex_exit(&fcp_global_mutex);
1311 1317 return (EBUSY);
1312 1318 }
1313 1319
1314 1320 if (flag & FEXCL) {
1315 1321 if (fcp_oflag & FCP_OPEN) {
1316 1322 mutex_exit(&fcp_global_mutex);
1317 1323 return (EBUSY);
1318 1324 }
1319 1325 fcp_oflag |= FCP_EXCL;
1320 1326 }
1321 1327 fcp_oflag |= FCP_OPEN;
1322 1328 mutex_exit(&fcp_global_mutex);
1323 1329
1324 1330 return (0);
1325 1331 }
1326 1332
1327 1333
1328 1334 /* ARGSUSED */
1329 1335 static int
1330 1336 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1331 1337 {
1332 1338 if (otype != OTYP_CHR) {
1333 1339 return (EINVAL);
1334 1340 }
1335 1341
1336 1342 mutex_enter(&fcp_global_mutex);
1337 1343 if (!(fcp_oflag & FCP_OPEN)) {
1338 1344 mutex_exit(&fcp_global_mutex);
1339 1345 return (ENODEV);
1340 1346 }
1341 1347 fcp_oflag = FCP_IDLE;
1342 1348 mutex_exit(&fcp_global_mutex);
1343 1349
1344 1350 return (0);
1345 1351 }
1346 1352
1347 1353
1348 1354 /*
1349 1355 * fcp_ioctl
1350 1356 * Entry point for the FCP ioctls
1351 1357 *
1352 1358 * Input:
1353 1359 * See ioctl(9E)
1354 1360 *
1355 1361 * Output:
1356 1362 * See ioctl(9E)
1357 1363 *
1358 1364 * Returns:
1359 1365 * See ioctl(9E)
1360 1366 *
1361 1367 * Context:
1362 1368 * Kernel context.
1363 1369 */
1364 1370 /* ARGSUSED */
1365 1371 static int
1366 1372 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1367 1373 int *rval)
1368 1374 {
1369 1375 int ret = 0;
1370 1376
1371 1377 mutex_enter(&fcp_global_mutex);
1372 1378 if (!(fcp_oflag & FCP_OPEN)) {
1373 1379 mutex_exit(&fcp_global_mutex);
1374 1380 return (ENXIO);
1375 1381 }
1376 1382 mutex_exit(&fcp_global_mutex);
1377 1383
1378 1384 switch (cmd) {
1379 1385 case FCP_TGT_INQUIRY:
1380 1386 case FCP_TGT_CREATE:
1381 1387 case FCP_TGT_DELETE:
1382 1388 ret = fcp_setup_device_data_ioctl(cmd,
1383 1389 (struct fcp_ioctl *)data, mode, rval);
1384 1390 break;
1385 1391
1386 1392 case FCP_TGT_SEND_SCSI:
1387 1393 mutex_enter(&fcp_ioctl_mutex);
1388 1394 ret = fcp_setup_scsi_ioctl(
1389 1395 (struct fcp_scsi_cmd *)data, mode, rval);
1390 1396 mutex_exit(&fcp_ioctl_mutex);
1391 1397 break;
1392 1398
1393 1399 case FCP_STATE_COUNT:
1394 1400 ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1395 1401 mode, rval);
1396 1402 break;
1397 1403 case FCP_GET_TARGET_MAPPINGS:
1398 1404 ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1399 1405 mode, rval);
1400 1406 break;
1401 1407 default:
1402 1408 fcp_log(CE_WARN, NULL,
1403 1409 "!Invalid ioctl opcode = 0x%x", cmd);
1404 1410 ret = EINVAL;
1405 1411 }
1406 1412
1407 1413 return (ret);
1408 1414 }
1409 1415
1410 1416
1411 1417 /*
1412 1418 * fcp_setup_device_data_ioctl
1413 1419 * Setup handler for the "device data" style of
1414 1420 * ioctl for FCP. See "fcp_util.h" for data structure
1415 1421 * definition.
1416 1422 *
1417 1423 * Input:
1418 1424 * cmd = FCP ioctl command
1419 1425 * data = ioctl data
1420 1426 * mode = See ioctl(9E)
1421 1427 *
1422 1428 * Output:
1423 1429 * data = ioctl data
1424 1430 * rval = return value - see ioctl(9E)
1425 1431 *
1426 1432 * Returns:
1427 1433 * See ioctl(9E)
1428 1434 *
1429 1435 * Context:
1430 1436 * Kernel context.
1431 1437 */
1432 1438 /* ARGSUSED */
1433 1439 static int
1434 1440 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1435 1441 int *rval)
1436 1442 {
1437 1443 struct fcp_port *pptr;
1438 1444 struct device_data *dev_data;
1439 1445 uint32_t link_cnt;
1440 1446 la_wwn_t *wwn_ptr = NULL;
1441 1447 struct fcp_tgt *ptgt = NULL;
1442 1448 struct fcp_lun *plun = NULL;
1443 1449 int i, error;
1444 1450 struct fcp_ioctl fioctl;
1445 1451
1446 1452 #ifdef _MULTI_DATAMODEL
1447 1453 switch (ddi_model_convert_from(mode & FMODELS)) {
1448 1454 case DDI_MODEL_ILP32: {
1449 1455 struct fcp32_ioctl f32_ioctl;
1450 1456
1451 1457 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1452 1458 sizeof (struct fcp32_ioctl), mode)) {
1453 1459 return (EFAULT);
1454 1460 }
1455 1461 fioctl.fp_minor = f32_ioctl.fp_minor;
1456 1462 fioctl.listlen = f32_ioctl.listlen;
1457 1463 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1458 1464 break;
1459 1465 }
1460 1466 case DDI_MODEL_NONE:
1461 1467 if (ddi_copyin((void *)data, (void *)&fioctl,
1462 1468 sizeof (struct fcp_ioctl), mode)) {
1463 1469 return (EFAULT);
1464 1470 }
1465 1471 break;
1466 1472 }
1467 1473
1468 1474 #else /* _MULTI_DATAMODEL */
1469 1475 if (ddi_copyin((void *)data, (void *)&fioctl,
1470 1476 sizeof (struct fcp_ioctl), mode)) {
1471 1477 return (EFAULT);
1472 1478 }
1473 1479 #endif /* _MULTI_DATAMODEL */
1474 1480
1475 1481 /*
1476 1482 * Right now we can assume that the minor number matches with
1477 1483 * this instance of fp. If this changes we will need to
1478 1484 * revisit this logic.
1479 1485 */
1480 1486 mutex_enter(&fcp_global_mutex);
1481 1487 pptr = fcp_port_head;
1482 1488 while (pptr) {
1483 1489 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1484 1490 break;
1485 1491 } else {
1486 1492 pptr = pptr->port_next;
1487 1493 }
1488 1494 }
1489 1495 mutex_exit(&fcp_global_mutex);
1490 1496 if (pptr == NULL) {
1491 1497 return (ENXIO);
1492 1498 }
1493 1499 mutex_enter(&pptr->port_mutex);
1494 1500
1495 1501
1496 1502 if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1497 1503 fioctl.listlen, KM_NOSLEEP)) == NULL) {
1498 1504 mutex_exit(&pptr->port_mutex);
1499 1505 return (ENOMEM);
1500 1506 }
1501 1507
1502 1508 if (ddi_copyin(fioctl.list, dev_data,
1503 1509 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1504 1510 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1505 1511 mutex_exit(&pptr->port_mutex);
1506 1512 return (EFAULT);
1507 1513 }
1508 1514 link_cnt = pptr->port_link_cnt;
1509 1515
1510 1516 if (cmd == FCP_TGT_INQUIRY) {
1511 1517 wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1512 1518 if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1513 1519 sizeof (wwn_ptr->raw_wwn)) == 0) {
1514 1520 /* This ioctl is requesting INQ info of local HBA */
1515 1521 mutex_exit(&pptr->port_mutex);
1516 1522 dev_data[0].dev0_type = DTYPE_UNKNOWN;
1517 1523 dev_data[0].dev_status = 0;
1518 1524 if (ddi_copyout(dev_data, fioctl.list,
1519 1525 (sizeof (struct device_data)) * fioctl.listlen,
1520 1526 mode)) {
1521 1527 kmem_free(dev_data,
1522 1528 sizeof (*dev_data) * fioctl.listlen);
1523 1529 return (EFAULT);
1524 1530 }
1525 1531 kmem_free(dev_data,
1526 1532 sizeof (*dev_data) * fioctl.listlen);
1527 1533 #ifdef _MULTI_DATAMODEL
1528 1534 switch (ddi_model_convert_from(mode & FMODELS)) {
1529 1535 case DDI_MODEL_ILP32: {
1530 1536 struct fcp32_ioctl f32_ioctl;
1531 1537 f32_ioctl.fp_minor = fioctl.fp_minor;
1532 1538 f32_ioctl.listlen = fioctl.listlen;
1533 1539 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1534 1540 if (ddi_copyout((void *)&f32_ioctl,
1535 1541 (void *)data,
1536 1542 sizeof (struct fcp32_ioctl), mode)) {
1537 1543 return (EFAULT);
1538 1544 }
1539 1545 break;
1540 1546 }
1541 1547 case DDI_MODEL_NONE:
1542 1548 if (ddi_copyout((void *)&fioctl, (void *)data,
1543 1549 sizeof (struct fcp_ioctl), mode)) {
1544 1550 return (EFAULT);
1545 1551 }
1546 1552 break;
1547 1553 }
1548 1554 #else /* _MULTI_DATAMODEL */
1549 1555 if (ddi_copyout((void *)&fioctl, (void *)data,
1550 1556 sizeof (struct fcp_ioctl), mode)) {
1551 1557 return (EFAULT);
1552 1558 }
1553 1559 #endif /* _MULTI_DATAMODEL */
1554 1560 return (0);
1555 1561 }
1556 1562 }
1557 1563
1558 1564 if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1559 1565 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1560 1566 mutex_exit(&pptr->port_mutex);
1561 1567 return (ENXIO);
1562 1568 }
1563 1569
1564 1570 for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1565 1571 i++) {
1566 1572 wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1567 1573
1568 1574 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1569 1575
1570 1576
1571 1577 dev_data[i].dev_status = ENXIO;
1572 1578
1573 1579 if ((ptgt = fcp_lookup_target(pptr,
1574 1580 (uchar_t *)wwn_ptr)) == NULL) {
1575 1581 mutex_exit(&pptr->port_mutex);
1576 1582 if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1577 1583 wwn_ptr, &error, 0) == NULL) {
1578 1584 dev_data[i].dev_status = ENODEV;
1579 1585 mutex_enter(&pptr->port_mutex);
1580 1586 continue;
1581 1587 } else {
1582 1588
1583 1589 dev_data[i].dev_status = EAGAIN;
1584 1590
1585 1591 mutex_enter(&pptr->port_mutex);
1586 1592 continue;
1587 1593 }
1588 1594 } else {
1589 1595 mutex_enter(&ptgt->tgt_mutex);
1590 1596 if (ptgt->tgt_state & (FCP_TGT_MARK |
1591 1597 FCP_TGT_BUSY)) {
1592 1598 dev_data[i].dev_status = EAGAIN;
1593 1599 mutex_exit(&ptgt->tgt_mutex);
1594 1600 continue;
1595 1601 }
1596 1602
1597 1603 if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1598 1604 if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1599 1605 dev_data[i].dev_status = ENOTSUP;
1600 1606 } else {
1601 1607 dev_data[i].dev_status = ENXIO;
1602 1608 }
1603 1609 mutex_exit(&ptgt->tgt_mutex);
1604 1610 continue;
1605 1611 }
1606 1612
1607 1613 switch (cmd) {
1608 1614 case FCP_TGT_INQUIRY:
1609 1615 /*
1610 1616 * The reason we give device type of
1611 1617 * lun 0 only even though in some
1612 1618 * cases(like maxstrat) lun 0 device
1613 1619 * type may be 0x3f(invalid) is that
1614 1620 * for bridge boxes target will appear
1615 1621 * as luns and the first lun could be
1616 1622 * a device that utility may not care
1617 1623 * about (like a tape device).
1618 1624 */
1619 1625 dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1620 1626 dev_data[i].dev_status = 0;
1621 1627 mutex_exit(&ptgt->tgt_mutex);
1622 1628
1623 1629 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1624 1630 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1625 1631 } else {
1626 1632 dev_data[i].dev0_type = plun->lun_type;
1627 1633 }
1628 1634 mutex_enter(&ptgt->tgt_mutex);
1629 1635 break;
1630 1636
1631 1637 case FCP_TGT_CREATE:
1632 1638 mutex_exit(&ptgt->tgt_mutex);
1633 1639 mutex_exit(&pptr->port_mutex);
1634 1640
1635 1641 /*
1636 1642 * serialize state change call backs.
1637 1643 * only one call back will be handled
1638 1644 * at a time.
1639 1645 */
1640 1646 mutex_enter(&fcp_global_mutex);
1641 1647 if (fcp_oflag & FCP_BUSY) {
1642 1648 mutex_exit(&fcp_global_mutex);
1643 1649 if (dev_data) {
1644 1650 kmem_free(dev_data,
1645 1651 sizeof (*dev_data) *
1646 1652 fioctl.listlen);
1647 1653 }
1648 1654 return (EBUSY);
1649 1655 }
1650 1656 fcp_oflag |= FCP_BUSY;
1651 1657 mutex_exit(&fcp_global_mutex);
1652 1658
1653 1659 dev_data[i].dev_status =
1654 1660 fcp_create_on_demand(pptr,
1655 1661 wwn_ptr->raw_wwn);
1656 1662
1657 1663 if (dev_data[i].dev_status != 0) {
1658 1664 char buf[25];
1659 1665
1660 1666 for (i = 0; i < FC_WWN_SIZE; i++) {
1661 1667 (void) sprintf(&buf[i << 1],
1662 1668 "%02x",
1663 1669 wwn_ptr->raw_wwn[i]);
1664 1670 }
1665 1671
1666 1672 fcp_log(CE_WARN, pptr->port_dip,
1667 1673 "!Failed to create nodes for"
1668 1674 " pwwn=%s; error=%x", buf,
1669 1675 dev_data[i].dev_status);
1670 1676 }
1671 1677
1672 1678 /* allow state change call backs again */
1673 1679 mutex_enter(&fcp_global_mutex);
1674 1680 fcp_oflag &= ~FCP_BUSY;
1675 1681 mutex_exit(&fcp_global_mutex);
1676 1682
1677 1683 mutex_enter(&pptr->port_mutex);
1678 1684 mutex_enter(&ptgt->tgt_mutex);
1679 1685
1680 1686 break;
1681 1687
1682 1688 case FCP_TGT_DELETE:
1683 1689 break;
1684 1690
1685 1691 default:
1686 1692 fcp_log(CE_WARN, pptr->port_dip,
1687 1693 "!Invalid device data ioctl "
1688 1694 "opcode = 0x%x", cmd);
1689 1695 }
1690 1696 mutex_exit(&ptgt->tgt_mutex);
1691 1697 }
1692 1698 }
1693 1699 mutex_exit(&pptr->port_mutex);
1694 1700
1695 1701 if (ddi_copyout(dev_data, fioctl.list,
1696 1702 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1697 1703 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1698 1704 return (EFAULT);
1699 1705 }
1700 1706 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1701 1707
1702 1708 #ifdef _MULTI_DATAMODEL
1703 1709 switch (ddi_model_convert_from(mode & FMODELS)) {
1704 1710 case DDI_MODEL_ILP32: {
1705 1711 struct fcp32_ioctl f32_ioctl;
1706 1712
1707 1713 f32_ioctl.fp_minor = fioctl.fp_minor;
1708 1714 f32_ioctl.listlen = fioctl.listlen;
1709 1715 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1710 1716 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1711 1717 sizeof (struct fcp32_ioctl), mode)) {
1712 1718 return (EFAULT);
1713 1719 }
1714 1720 break;
1715 1721 }
1716 1722 case DDI_MODEL_NONE:
1717 1723 if (ddi_copyout((void *)&fioctl, (void *)data,
1718 1724 sizeof (struct fcp_ioctl), mode)) {
1719 1725 return (EFAULT);
1720 1726 }
1721 1727 break;
1722 1728 }
1723 1729 #else /* _MULTI_DATAMODEL */
1724 1730
1725 1731 if (ddi_copyout((void *)&fioctl, (void *)data,
1726 1732 sizeof (struct fcp_ioctl), mode)) {
1727 1733 return (EFAULT);
1728 1734 }
1729 1735 #endif /* _MULTI_DATAMODEL */
1730 1736
1731 1737 return (0);
1732 1738 }
1733 1739
1734 1740 /*
1735 1741 * Fetch the target mappings (path, etc.) for all LUNs
1736 1742 * on this port.
1737 1743 */
1738 1744 /* ARGSUSED */
1739 1745 static int
1740 1746 fcp_get_target_mappings(struct fcp_ioctl *data,
1741 1747 int mode, int *rval)
1742 1748 {
1743 1749 struct fcp_port *pptr;
1744 1750 fc_hba_target_mappings_t *mappings;
1745 1751 fc_hba_mapping_entry_t *map;
1746 1752 struct fcp_tgt *ptgt = NULL;
1747 1753 struct fcp_lun *plun = NULL;
1748 1754 int i, mapIndex, mappingSize;
1749 1755 int listlen;
1750 1756 struct fcp_ioctl fioctl;
1751 1757 char *path;
1752 1758 fcp_ent_addr_t sam_lun_addr;
1753 1759
1754 1760 #ifdef _MULTI_DATAMODEL
1755 1761 switch (ddi_model_convert_from(mode & FMODELS)) {
1756 1762 case DDI_MODEL_ILP32: {
1757 1763 struct fcp32_ioctl f32_ioctl;
1758 1764
1759 1765 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1760 1766 sizeof (struct fcp32_ioctl), mode)) {
1761 1767 return (EFAULT);
1762 1768 }
1763 1769 fioctl.fp_minor = f32_ioctl.fp_minor;
1764 1770 fioctl.listlen = f32_ioctl.listlen;
1765 1771 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1766 1772 break;
1767 1773 }
1768 1774 case DDI_MODEL_NONE:
1769 1775 if (ddi_copyin((void *)data, (void *)&fioctl,
1770 1776 sizeof (struct fcp_ioctl), mode)) {
1771 1777 return (EFAULT);
1772 1778 }
1773 1779 break;
1774 1780 }
1775 1781
1776 1782 #else /* _MULTI_DATAMODEL */
1777 1783 if (ddi_copyin((void *)data, (void *)&fioctl,
1778 1784 sizeof (struct fcp_ioctl), mode)) {
1779 1785 return (EFAULT);
1780 1786 }
1781 1787 #endif /* _MULTI_DATAMODEL */
1782 1788
1783 1789 /*
1784 1790 * Right now we can assume that the minor number matches with
1785 1791 * this instance of fp. If this changes we will need to
1786 1792 * revisit this logic.
1787 1793 */
1788 1794 mutex_enter(&fcp_global_mutex);
1789 1795 pptr = fcp_port_head;
1790 1796 while (pptr) {
1791 1797 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1792 1798 break;
1793 1799 } else {
1794 1800 pptr = pptr->port_next;
1795 1801 }
1796 1802 }
1797 1803 mutex_exit(&fcp_global_mutex);
1798 1804 if (pptr == NULL) {
1799 1805 cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1800 1806 fioctl.fp_minor);
1801 1807 return (ENXIO);
1802 1808 }
1803 1809
1804 1810
1805 1811 /* We use listlen to show the total buffer size */
1806 1812 mappingSize = fioctl.listlen;
1807 1813
1808 1814 /* Now calculate how many mapping entries will fit */
1809 1815 listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1810 1816 - sizeof (fc_hba_target_mappings_t);
1811 1817 if (listlen <= 0) {
1812 1818 cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1813 1819 return (ENXIO);
1814 1820 }
1815 1821 listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1816 1822
1817 1823 if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1818 1824 return (ENOMEM);
1819 1825 }
1820 1826 mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1821 1827
1822 1828 /* Now get to work */
1823 1829 mapIndex = 0;
1824 1830
1825 1831 mutex_enter(&pptr->port_mutex);
1826 1832 /* Loop through all targets on this port */
1827 1833 for (i = 0; i < FCP_NUM_HASH; i++) {
1828 1834 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1829 1835 ptgt = ptgt->tgt_next) {
1830 1836
1831 1837 mutex_enter(&ptgt->tgt_mutex);
1832 1838
1833 1839 /* Loop through all LUNs on this target */
1834 1840 for (plun = ptgt->tgt_lun; plun != NULL;
1835 1841 plun = plun->lun_next) {
1836 1842 if (plun->lun_state & FCP_LUN_OFFLINE) {
1837 1843 continue;
1838 1844 }
1839 1845
1840 1846 path = fcp_get_lun_path(plun);
1841 1847 if (path == NULL) {
1842 1848 continue;
1843 1849 }
1844 1850
1845 1851 if (mapIndex >= listlen) {
1846 1852 mapIndex ++;
1847 1853 kmem_free(path, MAXPATHLEN);
1848 1854 continue;
1849 1855 }
1850 1856 map = &mappings->entries[mapIndex++];
1851 1857 bcopy(path, map->targetDriver,
1852 1858 sizeof (map->targetDriver));
1853 1859 map->d_id = ptgt->tgt_d_id;
1854 1860 map->busNumber = 0;
1855 1861 map->targetNumber = ptgt->tgt_d_id;
1856 1862 map->osLUN = plun->lun_num;
1857 1863
1858 1864 /*
1859 1865 * We had swapped lun when we stored it in
1860 1866 * lun_addr. We need to swap it back before
1861 1867 * returning it to user land
1862 1868 */
1863 1869
1864 1870 sam_lun_addr.ent_addr_0 =
1865 1871 BE_16(plun->lun_addr.ent_addr_0);
1866 1872 sam_lun_addr.ent_addr_1 =
1867 1873 BE_16(plun->lun_addr.ent_addr_1);
1868 1874 sam_lun_addr.ent_addr_2 =
1869 1875 BE_16(plun->lun_addr.ent_addr_2);
1870 1876 sam_lun_addr.ent_addr_3 =
1871 1877 BE_16(plun->lun_addr.ent_addr_3);
1872 1878
1873 1879 bcopy(&sam_lun_addr, &map->samLUN,
1874 1880 FCP_LUN_SIZE);
1875 1881 bcopy(ptgt->tgt_node_wwn.raw_wwn,
1876 1882 map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1877 1883 bcopy(ptgt->tgt_port_wwn.raw_wwn,
1878 1884 map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1879 1885
1880 1886 if (plun->lun_guid) {
1881 1887
1882 1888 /* convert ascii wwn to bytes */
1883 1889 fcp_ascii_to_wwn(plun->lun_guid,
1884 1890 map->guid, sizeof (map->guid));
1885 1891
1886 1892 if ((sizeof (map->guid)) <
1887 1893 plun->lun_guid_size / 2) {
1888 1894 cmn_err(CE_WARN,
1889 1895 "fcp_get_target_mappings:"
1890 1896 "guid copy space "
1891 1897 "insufficient."
1892 1898 "Copy Truncation - "
1893 1899 "available %d; need %d",
1894 1900 (int)sizeof (map->guid),
1895 1901 (int)
1896 1902 plun->lun_guid_size / 2);
1897 1903 }
1898 1904 }
1899 1905 kmem_free(path, MAXPATHLEN);
1900 1906 }
1901 1907 mutex_exit(&ptgt->tgt_mutex);
1902 1908 }
1903 1909 }
1904 1910 mutex_exit(&pptr->port_mutex);
1905 1911 mappings->numLuns = mapIndex;
1906 1912
1907 1913 if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1908 1914 kmem_free(mappings, mappingSize);
1909 1915 return (EFAULT);
1910 1916 }
1911 1917 kmem_free(mappings, mappingSize);
1912 1918
1913 1919 #ifdef _MULTI_DATAMODEL
1914 1920 switch (ddi_model_convert_from(mode & FMODELS)) {
1915 1921 case DDI_MODEL_ILP32: {
1916 1922 struct fcp32_ioctl f32_ioctl;
1917 1923
1918 1924 f32_ioctl.fp_minor = fioctl.fp_minor;
1919 1925 f32_ioctl.listlen = fioctl.listlen;
1920 1926 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1921 1927 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1922 1928 sizeof (struct fcp32_ioctl), mode)) {
1923 1929 return (EFAULT);
1924 1930 }
1925 1931 break;
1926 1932 }
1927 1933 case DDI_MODEL_NONE:
1928 1934 if (ddi_copyout((void *)&fioctl, (void *)data,
1929 1935 sizeof (struct fcp_ioctl), mode)) {
1930 1936 return (EFAULT);
1931 1937 }
1932 1938 break;
1933 1939 }
1934 1940 #else /* _MULTI_DATAMODEL */
1935 1941
1936 1942 if (ddi_copyout((void *)&fioctl, (void *)data,
1937 1943 sizeof (struct fcp_ioctl), mode)) {
1938 1944 return (EFAULT);
1939 1945 }
1940 1946 #endif /* _MULTI_DATAMODEL */
1941 1947
1942 1948 return (0);
1943 1949 }
1944 1950
1945 1951 /*
1946 1952 * fcp_setup_scsi_ioctl
1947 1953 * Setup handler for the "scsi passthru" style of
1948 1954 * ioctl for FCP. See "fcp_util.h" for data structure
1949 1955 * definition.
1950 1956 *
1951 1957 * Input:
1952 1958 * u_fscsi = ioctl data (user address space)
1953 1959 * mode = See ioctl(9E)
1954 1960 *
1955 1961 * Output:
1956 1962 * u_fscsi = ioctl data (user address space)
1957 1963 * rval = return value - see ioctl(9E)
1958 1964 *
1959 1965 * Returns:
1960 1966 * 0 = OK
1961 1967 * EAGAIN = See errno.h
1962 1968 * EBUSY = See errno.h
1963 1969 * EFAULT = See errno.h
1964 1970 * EINTR = See errno.h
1965 1971 * EINVAL = See errno.h
1966 1972 * EIO = See errno.h
1967 1973 * ENOMEM = See errno.h
1968 1974 * ENXIO = See errno.h
1969 1975 *
1970 1976 * Context:
1971 1977 * Kernel context.
1972 1978 */
1973 1979 /* ARGSUSED */
1974 1980 static int
1975 1981 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1976 1982 int mode, int *rval)
1977 1983 {
1978 1984 int ret = 0;
1979 1985 int temp_ret;
1980 1986 caddr_t k_cdbbufaddr = NULL;
1981 1987 caddr_t k_bufaddr = NULL;
1982 1988 caddr_t k_rqbufaddr = NULL;
1983 1989 caddr_t u_cdbbufaddr;
1984 1990 caddr_t u_bufaddr;
1985 1991 caddr_t u_rqbufaddr;
1986 1992 struct fcp_scsi_cmd k_fscsi;
1987 1993
1988 1994 /*
1989 1995 * Get fcp_scsi_cmd array element from user address space
1990 1996 */
1991 1997 if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1992 1998 != 0) {
1993 1999 return (ret);
1994 2000 }
1995 2001
1996 2002
1997 2003 /*
1998 2004 * Even though kmem_alloc() checks the validity of the
1999 2005 * buffer length, this check is needed when the
2000 2006 * kmem_flags set and the zero buffer length is passed.
2001 2007 */
2002 2008 if ((k_fscsi.scsi_cdblen <= 0) ||
2003 2009 (k_fscsi.scsi_buflen <= 0) ||
2004 2010 (k_fscsi.scsi_rqlen <= 0)) {
2005 2011 return (EINVAL);
2006 2012 }
2007 2013
2008 2014 /*
2009 2015 * Allocate data for fcp_scsi_cmd pointer fields
2010 2016 */
2011 2017 if (ret == 0) {
2012 2018 k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
2013 2019 k_bufaddr = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
2014 2020 k_rqbufaddr = kmem_alloc(k_fscsi.scsi_rqlen, KM_NOSLEEP);
2015 2021
2016 2022 if (k_cdbbufaddr == NULL ||
2017 2023 k_bufaddr == NULL ||
2018 2024 k_rqbufaddr == NULL) {
2019 2025 ret = ENOMEM;
2020 2026 }
2021 2027 }
2022 2028
2023 2029 /*
2024 2030 * Get fcp_scsi_cmd pointer fields from user
2025 2031 * address space
2026 2032 */
2027 2033 if (ret == 0) {
2028 2034 u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
2029 2035 u_bufaddr = k_fscsi.scsi_bufaddr;
2030 2036 u_rqbufaddr = k_fscsi.scsi_rqbufaddr;
2031 2037
2032 2038 if (ddi_copyin(u_cdbbufaddr,
2033 2039 k_cdbbufaddr,
2034 2040 k_fscsi.scsi_cdblen,
2035 2041 mode)) {
2036 2042 ret = EFAULT;
2037 2043 } else if (ddi_copyin(u_bufaddr,
2038 2044 k_bufaddr,
2039 2045 k_fscsi.scsi_buflen,
2040 2046 mode)) {
2041 2047 ret = EFAULT;
2042 2048 } else if (ddi_copyin(u_rqbufaddr,
2043 2049 k_rqbufaddr,
2044 2050 k_fscsi.scsi_rqlen,
2045 2051 mode)) {
2046 2052 ret = EFAULT;
2047 2053 }
2048 2054 }
2049 2055
2050 2056 /*
2051 2057 * Send scsi command (blocking)
2052 2058 */
2053 2059 if (ret == 0) {
2054 2060 /*
2055 2061 * Prior to sending the scsi command, the
2056 2062 * fcp_scsi_cmd data structure must contain kernel,
2057 2063 * not user, addresses.
2058 2064 */
2059 2065 k_fscsi.scsi_cdbbufaddr = k_cdbbufaddr;
2060 2066 k_fscsi.scsi_bufaddr = k_bufaddr;
2061 2067 k_fscsi.scsi_rqbufaddr = k_rqbufaddr;
2062 2068
2063 2069 ret = fcp_send_scsi_ioctl(&k_fscsi);
2064 2070
2065 2071 /*
2066 2072 * After sending the scsi command, the
2067 2073 * fcp_scsi_cmd data structure must contain user,
2068 2074 * not kernel, addresses.
2069 2075 */
2070 2076 k_fscsi.scsi_cdbbufaddr = u_cdbbufaddr;
2071 2077 k_fscsi.scsi_bufaddr = u_bufaddr;
2072 2078 k_fscsi.scsi_rqbufaddr = u_rqbufaddr;
2073 2079 }
2074 2080
2075 2081 /*
2076 2082 * Put fcp_scsi_cmd pointer fields to user address space
2077 2083 */
2078 2084 if (ret == 0) {
2079 2085 if (ddi_copyout(k_cdbbufaddr,
2080 2086 u_cdbbufaddr,
2081 2087 k_fscsi.scsi_cdblen,
2082 2088 mode)) {
2083 2089 ret = EFAULT;
2084 2090 } else if (ddi_copyout(k_bufaddr,
2085 2091 u_bufaddr,
2086 2092 k_fscsi.scsi_buflen,
2087 2093 mode)) {
2088 2094 ret = EFAULT;
2089 2095 } else if (ddi_copyout(k_rqbufaddr,
2090 2096 u_rqbufaddr,
2091 2097 k_fscsi.scsi_rqlen,
2092 2098 mode)) {
2093 2099 ret = EFAULT;
2094 2100 }
2095 2101 }
2096 2102
2097 2103 /*
2098 2104 * Free data for fcp_scsi_cmd pointer fields
2099 2105 */
2100 2106 if (k_cdbbufaddr != NULL) {
2101 2107 kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2102 2108 }
2103 2109 if (k_bufaddr != NULL) {
2104 2110 kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2105 2111 }
2106 2112 if (k_rqbufaddr != NULL) {
2107 2113 kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2108 2114 }
2109 2115
2110 2116 /*
2111 2117 * Put fcp_scsi_cmd array element to user address space
2112 2118 */
2113 2119 temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2114 2120 if (temp_ret != 0) {
2115 2121 ret = temp_ret;
2116 2122 }
2117 2123
2118 2124 /*
2119 2125 * Return status
2120 2126 */
2121 2127 return (ret);
2122 2128 }
2123 2129
2124 2130
2125 2131 /*
2126 2132 * fcp_copyin_scsi_cmd
2127 2133 * Copy in fcp_scsi_cmd data structure from user address space.
2128 2134 * The data may be in 32 bit or 64 bit modes.
2129 2135 *
2130 2136 * Input:
2131 2137 * base_addr = from address (user address space)
2132 2138 * mode = See ioctl(9E) and ddi_copyin(9F)
2133 2139 *
2134 2140 * Output:
2135 2141 * fscsi = to address (kernel address space)
2136 2142 *
2137 2143 * Returns:
2138 2144 * 0 = OK
2139 2145 * EFAULT = Error
2140 2146 *
2141 2147 * Context:
2142 2148 * Kernel context.
2143 2149 */
2144 2150 static int
2145 2151 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2146 2152 {
2147 2153 #ifdef _MULTI_DATAMODEL
2148 2154 struct fcp32_scsi_cmd f32scsi;
2149 2155
2150 2156 switch (ddi_model_convert_from(mode & FMODELS)) {
2151 2157 case DDI_MODEL_ILP32:
2152 2158 /*
2153 2159 * Copy data from user address space
2154 2160 */
2155 2161 if (ddi_copyin((void *)base_addr,
2156 2162 &f32scsi,
2157 2163 sizeof (struct fcp32_scsi_cmd),
2158 2164 mode)) {
2159 2165 return (EFAULT);
2160 2166 }
2161 2167 /*
2162 2168 * Convert from 32 bit to 64 bit
2163 2169 */
2164 2170 FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2165 2171 break;
2166 2172 case DDI_MODEL_NONE:
2167 2173 /*
2168 2174 * Copy data from user address space
2169 2175 */
2170 2176 if (ddi_copyin((void *)base_addr,
2171 2177 fscsi,
2172 2178 sizeof (struct fcp_scsi_cmd),
2173 2179 mode)) {
2174 2180 return (EFAULT);
2175 2181 }
2176 2182 break;
2177 2183 }
2178 2184 #else /* _MULTI_DATAMODEL */
2179 2185 /*
2180 2186 * Copy data from user address space
2181 2187 */
2182 2188 if (ddi_copyin((void *)base_addr,
2183 2189 fscsi,
2184 2190 sizeof (struct fcp_scsi_cmd),
2185 2191 mode)) {
2186 2192 return (EFAULT);
2187 2193 }
2188 2194 #endif /* _MULTI_DATAMODEL */
2189 2195
2190 2196 return (0);
2191 2197 }
2192 2198
2193 2199
2194 2200 /*
2195 2201 * fcp_copyout_scsi_cmd
2196 2202 * Copy out fcp_scsi_cmd data structure to user address space.
2197 2203 * The data may be in 32 bit or 64 bit modes.
2198 2204 *
2199 2205 * Input:
2200 2206 * fscsi = to address (kernel address space)
2201 2207 * mode = See ioctl(9E) and ddi_copyin(9F)
2202 2208 *
2203 2209 * Output:
2204 2210 * base_addr = from address (user address space)
2205 2211 *
2206 2212 * Returns:
2207 2213 * 0 = OK
2208 2214 * EFAULT = Error
2209 2215 *
2210 2216 * Context:
2211 2217 * Kernel context.
2212 2218 */
2213 2219 static int
2214 2220 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2215 2221 {
2216 2222 #ifdef _MULTI_DATAMODEL
2217 2223 struct fcp32_scsi_cmd f32scsi;
2218 2224
2219 2225 switch (ddi_model_convert_from(mode & FMODELS)) {
2220 2226 case DDI_MODEL_ILP32:
2221 2227 /*
2222 2228 * Convert from 64 bit to 32 bit
2223 2229 */
2224 2230 FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2225 2231 /*
2226 2232 * Copy data to user address space
2227 2233 */
2228 2234 if (ddi_copyout(&f32scsi,
2229 2235 (void *)base_addr,
2230 2236 sizeof (struct fcp32_scsi_cmd),
2231 2237 mode)) {
2232 2238 return (EFAULT);
2233 2239 }
2234 2240 break;
2235 2241 case DDI_MODEL_NONE:
2236 2242 /*
2237 2243 * Copy data to user address space
2238 2244 */
2239 2245 if (ddi_copyout(fscsi,
2240 2246 (void *)base_addr,
2241 2247 sizeof (struct fcp_scsi_cmd),
2242 2248 mode)) {
2243 2249 return (EFAULT);
2244 2250 }
2245 2251 break;
2246 2252 }
2247 2253 #else /* _MULTI_DATAMODEL */
2248 2254 /*
2249 2255 * Copy data to user address space
2250 2256 */
2251 2257 if (ddi_copyout(fscsi,
2252 2258 (void *)base_addr,
2253 2259 sizeof (struct fcp_scsi_cmd),
2254 2260 mode)) {
2255 2261 return (EFAULT);
2256 2262 }
2257 2263 #endif /* _MULTI_DATAMODEL */
2258 2264
2259 2265 return (0);
2260 2266 }
2261 2267
2262 2268
2263 2269 /*
2264 2270 * fcp_send_scsi_ioctl
2265 2271 * Sends the SCSI command in blocking mode.
2266 2272 *
2267 2273 * Input:
2268 2274 * fscsi = SCSI command data structure
2269 2275 *
2270 2276 * Output:
2271 2277 * fscsi = SCSI command data structure
2272 2278 *
2273 2279 * Returns:
2274 2280 * 0 = OK
2275 2281 * EAGAIN = See errno.h
2276 2282 * EBUSY = See errno.h
2277 2283 * EINTR = See errno.h
2278 2284 * EINVAL = See errno.h
2279 2285 * EIO = See errno.h
2280 2286 * ENOMEM = See errno.h
2281 2287 * ENXIO = See errno.h
2282 2288 *
2283 2289 * Context:
2284 2290 * Kernel context.
2285 2291 */
2286 2292 static int
2287 2293 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2288 2294 {
2289 2295 struct fcp_lun *plun = NULL;
2290 2296 struct fcp_port *pptr = NULL;
2291 2297 struct fcp_tgt *ptgt = NULL;
2292 2298 fc_packet_t *fpkt = NULL;
2293 2299 struct fcp_ipkt *icmd = NULL;
2294 2300 int target_created = FALSE;
2295 2301 fc_frame_hdr_t *hp;
2296 2302 struct fcp_cmd fcp_cmd;
2297 2303 struct fcp_cmd *fcmd;
2298 2304 union scsi_cdb *scsi_cdb;
2299 2305 la_wwn_t *wwn_ptr;
2300 2306 int nodma;
2301 2307 struct fcp_rsp *rsp;
2302 2308 struct fcp_rsp_info *rsp_info;
2303 2309 caddr_t rsp_sense;
2304 2310 int buf_len;
2305 2311 int info_len;
2306 2312 int sense_len;
2307 2313 struct scsi_extended_sense *sense_to = NULL;
2308 2314 timeout_id_t tid;
2309 2315 uint8_t reconfig_lun = FALSE;
2310 2316 uint8_t reconfig_pending = FALSE;
2311 2317 uint8_t scsi_cmd;
2312 2318 int rsp_len;
2313 2319 int cmd_index;
2314 2320 int fc_status;
2315 2321 int pkt_state;
2316 2322 int pkt_action;
2317 2323 int pkt_reason;
2318 2324 int ret, xport_retval = ~FC_SUCCESS;
2319 2325 int lcount;
2320 2326 int tcount;
2321 2327 int reconfig_status;
2322 2328 int port_busy = FALSE;
2323 2329 uchar_t *lun_string;
2324 2330
2325 2331 /*
2326 2332 * Check valid SCSI command
2327 2333 */
2328 2334 scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2329 2335 ret = EINVAL;
2330 2336 for (cmd_index = 0;
2331 2337 cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2332 2338 ret != 0;
2333 2339 cmd_index++) {
2334 2340 /*
2335 2341 * First byte of CDB is the SCSI command
2336 2342 */
2337 2343 if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2338 2344 ret = 0;
2339 2345 }
2340 2346 }
2341 2347
2342 2348 /*
2343 2349 * Check inputs
2344 2350 */
2345 2351 if (fscsi->scsi_flags != FCP_SCSI_READ) {
2346 2352 ret = EINVAL;
2347 2353 } else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2348 2354 /* no larger than */
2349 2355 ret = EINVAL;
2350 2356 }
2351 2357
2352 2358
2353 2359 /*
2354 2360 * Find FC port
2355 2361 */
2356 2362 if (ret == 0) {
2357 2363 /*
2358 2364 * Acquire global mutex
2359 2365 */
2360 2366 mutex_enter(&fcp_global_mutex);
2361 2367
2362 2368 pptr = fcp_port_head;
2363 2369 while (pptr) {
2364 2370 if (pptr->port_instance ==
2365 2371 (uint32_t)fscsi->scsi_fc_port_num) {
2366 2372 break;
2367 2373 } else {
2368 2374 pptr = pptr->port_next;
2369 2375 }
2370 2376 }
2371 2377
2372 2378 if (pptr == NULL) {
2373 2379 ret = ENXIO;
2374 2380 } else {
2375 2381 /*
2376 2382 * fc_ulp_busy_port can raise power
2377 2383 * so, we must not hold any mutexes involved in PM
2378 2384 */
2379 2385 mutex_exit(&fcp_global_mutex);
2380 2386 ret = fc_ulp_busy_port(pptr->port_fp_handle);
2381 2387 }
2382 2388
2383 2389 if (ret == 0) {
2384 2390
2385 2391 /* remember port is busy, so we will release later */
2386 2392 port_busy = TRUE;
2387 2393
2388 2394 /*
2389 2395 * If there is a reconfiguration in progress, wait
2390 2396 * for it to complete.
2391 2397 */
2392 2398
2393 2399 fcp_reconfig_wait(pptr);
2394 2400
2395 2401 /* reacquire mutexes in order */
2396 2402 mutex_enter(&fcp_global_mutex);
2397 2403 mutex_enter(&pptr->port_mutex);
2398 2404
2399 2405 /*
2400 2406 * Will port accept DMA?
2401 2407 */
2402 2408 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2403 2409 ? 1 : 0;
2404 2410
2405 2411 /*
2406 2412 * If init or offline, device not known
2407 2413 *
2408 2414 * If we are discovering (onlining), we can
2409 2415 * NOT obviously provide reliable data about
2410 2416 * devices until it is complete
2411 2417 */
2412 2418 if (pptr->port_state & (FCP_STATE_INIT |
2413 2419 FCP_STATE_OFFLINE)) {
2414 2420 ret = ENXIO;
2415 2421 } else if (pptr->port_state & FCP_STATE_ONLINING) {
2416 2422 ret = EBUSY;
2417 2423 } else {
2418 2424 /*
2419 2425 * Find target from pwwn
2420 2426 *
2421 2427 * The wwn must be put into a local
2422 2428 * variable to ensure alignment.
2423 2429 */
2424 2430 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2425 2431 ptgt = fcp_lookup_target(pptr,
2426 2432 (uchar_t *)wwn_ptr);
2427 2433
2428 2434 /*
2429 2435 * If target not found,
2430 2436 */
2431 2437 if (ptgt == NULL) {
2432 2438 /*
2433 2439 * Note: Still have global &
2434 2440 * port mutexes
2435 2441 */
2436 2442 mutex_exit(&pptr->port_mutex);
2437 2443 ptgt = fcp_port_create_tgt(pptr,
2438 2444 wwn_ptr, &ret, &fc_status,
2439 2445 &pkt_state, &pkt_action,
2440 2446 &pkt_reason);
2441 2447 mutex_enter(&pptr->port_mutex);
2442 2448
2443 2449 fscsi->scsi_fc_status = fc_status;
2444 2450 fscsi->scsi_pkt_state =
2445 2451 (uchar_t)pkt_state;
2446 2452 fscsi->scsi_pkt_reason = pkt_reason;
2447 2453 fscsi->scsi_pkt_action =
2448 2454 (uchar_t)pkt_action;
2449 2455
2450 2456 if (ptgt != NULL) {
2451 2457 target_created = TRUE;
2452 2458 } else if (ret == 0) {
2453 2459 ret = ENOMEM;
2454 2460 }
2455 2461 }
2456 2462
2457 2463 if (ret == 0) {
2458 2464 /*
2459 2465 * Acquire target
2460 2466 */
2461 2467 mutex_enter(&ptgt->tgt_mutex);
2462 2468
2463 2469 /*
2464 2470 * If target is mark or busy,
2465 2471 * then target can not be used
2466 2472 */
2467 2473 if (ptgt->tgt_state &
2468 2474 (FCP_TGT_MARK |
2469 2475 FCP_TGT_BUSY)) {
2470 2476 ret = EBUSY;
2471 2477 } else {
2472 2478 /*
2473 2479 * Mark target as busy
2474 2480 */
2475 2481 ptgt->tgt_state |=
2476 2482 FCP_TGT_BUSY;
2477 2483 }
2478 2484
2479 2485 /*
2480 2486 * Release target
2481 2487 */
2482 2488 lcount = pptr->port_link_cnt;
2483 2489 tcount = ptgt->tgt_change_cnt;
2484 2490 mutex_exit(&ptgt->tgt_mutex);
2485 2491 }
2486 2492 }
2487 2493
2488 2494 /*
2489 2495 * Release port
2490 2496 */
2491 2497 mutex_exit(&pptr->port_mutex);
2492 2498 }
2493 2499
2494 2500 /*
2495 2501 * Release global mutex
2496 2502 */
2497 2503 mutex_exit(&fcp_global_mutex);
2498 2504 }
2499 2505
2500 2506 if (ret == 0) {
2501 2507 uint64_t belun = BE_64(fscsi->scsi_lun);
2502 2508
2503 2509 /*
2504 2510 * If it's a target device, find lun from pwwn
2505 2511 * The wwn must be put into a local
2506 2512 * variable to ensure alignment.
2507 2513 */
2508 2514 mutex_enter(&pptr->port_mutex);
2509 2515 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2510 2516 if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2511 2517 /* this is not a target */
2512 2518 fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2513 2519 ret = ENXIO;
2514 2520 } else if ((belun << 16) != 0) {
2515 2521 /*
2516 2522 * Since fcp only support PD and LU addressing method
2517 2523 * so far, the last 6 bytes of a valid LUN are expected
2518 2524 * to be filled with 00h.
2519 2525 */
2520 2526 fscsi->scsi_fc_status = FC_INVALID_LUN;
2521 2527 cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2522 2528 " method 0x%02x with LUN number 0x%016" PRIx64,
2523 2529 (uint8_t)(belun >> 62), belun);
2524 2530 ret = ENXIO;
2525 2531 } else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2526 2532 (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2527 2533 /*
2528 2534 * This is a SCSI target, but no LUN at this
2529 2535 * address.
2530 2536 *
2531 2537 * In the future, we may want to send this to
2532 2538 * the target, and let it respond
2533 2539 * appropriately
2534 2540 */
2535 2541 ret = ENXIO;
2536 2542 }
2537 2543 mutex_exit(&pptr->port_mutex);
2538 2544 }
2539 2545
2540 2546 /*
2541 2547 * Finished grabbing external resources
2542 2548 * Allocate internal packet (icmd)
2543 2549 */
2544 2550 if (ret == 0) {
2545 2551 /*
2546 2552 * Calc rsp len assuming rsp info included
2547 2553 */
2548 2554 rsp_len = sizeof (struct fcp_rsp) +
2549 2555 sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2550 2556
2551 2557 icmd = fcp_icmd_alloc(pptr, ptgt,
2552 2558 sizeof (struct fcp_cmd),
2553 2559 rsp_len,
2554 2560 fscsi->scsi_buflen,
2555 2561 nodma,
2556 2562 lcount, /* ipkt_link_cnt */
2557 2563 tcount, /* ipkt_change_cnt */
2558 2564 0, /* cause */
2559 2565 FC_INVALID_RSCN_COUNT); /* invalidate the count */
2560 2566
2561 2567 if (icmd == NULL) {
2562 2568 ret = ENOMEM;
2563 2569 } else {
2564 2570 /*
2565 2571 * Setup internal packet as sema sync
2566 2572 */
2567 2573 fcp_ipkt_sema_init(icmd);
2568 2574 }
2569 2575 }
2570 2576
2571 2577 if (ret == 0) {
2572 2578 /*
2573 2579 * Init fpkt pointer for use.
2574 2580 */
2575 2581
2576 2582 fpkt = icmd->ipkt_fpkt;
2577 2583
2578 2584 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
2579 2585 fpkt->pkt_tran_type = FC_PKT_FCP_READ; /* only rd for now */
2580 2586 fpkt->pkt_timeout = fscsi->scsi_timeout;
2581 2587
2582 2588 /*
2583 2589 * Init fcmd pointer for use by SCSI command
2584 2590 */
2585 2591
2586 2592 if (nodma) {
2587 2593 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2588 2594 } else {
2589 2595 fcmd = &fcp_cmd;
2590 2596 }
2591 2597 bzero(fcmd, sizeof (struct fcp_cmd));
2592 2598 ptgt = plun->lun_tgt;
2593 2599
2594 2600 lun_string = (uchar_t *)&fscsi->scsi_lun;
2595 2601
2596 2602 fcmd->fcp_ent_addr.ent_addr_0 =
2597 2603 BE_16(*(uint16_t *)&(lun_string[0]));
2598 2604 fcmd->fcp_ent_addr.ent_addr_1 =
2599 2605 BE_16(*(uint16_t *)&(lun_string[2]));
2600 2606 fcmd->fcp_ent_addr.ent_addr_2 =
2601 2607 BE_16(*(uint16_t *)&(lun_string[4]));
2602 2608 fcmd->fcp_ent_addr.ent_addr_3 =
2603 2609 BE_16(*(uint16_t *)&(lun_string[6]));
2604 2610
2605 2611 /*
2606 2612 * Setup internal packet(icmd)
2607 2613 */
2608 2614 icmd->ipkt_lun = plun;
2609 2615 icmd->ipkt_restart = 0;
2610 2616 icmd->ipkt_retries = 0;
2611 2617 icmd->ipkt_opcode = 0;
2612 2618
2613 2619 /*
2614 2620 * Init the frame HEADER Pointer for use
2615 2621 */
2616 2622 hp = &fpkt->pkt_cmd_fhdr;
2617 2623
2618 2624 hp->s_id = pptr->port_id;
2619 2625 hp->d_id = ptgt->tgt_d_id;
2620 2626 hp->r_ctl = R_CTL_COMMAND;
2621 2627 hp->type = FC_TYPE_SCSI_FCP;
2622 2628 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2623 2629 hp->rsvd = 0;
2624 2630 hp->seq_id = 0;
2625 2631 hp->seq_cnt = 0;
2626 2632 hp->ox_id = 0xffff;
2627 2633 hp->rx_id = 0xffff;
2628 2634 hp->ro = 0;
2629 2635
2630 2636 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
2631 2637 fcmd->fcp_cntl.cntl_read_data = 1; /* only rd for now */
2632 2638 fcmd->fcp_cntl.cntl_write_data = 0;
2633 2639 fcmd->fcp_data_len = fscsi->scsi_buflen;
2634 2640
2635 2641 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2636 2642 bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2637 2643 fscsi->scsi_cdblen);
2638 2644
2639 2645 if (!nodma) {
2640 2646 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2641 2647 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2642 2648 }
2643 2649
2644 2650 /*
2645 2651 * Send SCSI command to FC transport
2646 2652 */
2647 2653
2648 2654 if (ret == 0) {
2649 2655 mutex_enter(&ptgt->tgt_mutex);
2650 2656
2651 2657 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2652 2658 mutex_exit(&ptgt->tgt_mutex);
2653 2659 fscsi->scsi_fc_status = xport_retval =
2654 2660 fc_ulp_transport(pptr->port_fp_handle,
2655 2661 fpkt);
2656 2662 if (fscsi->scsi_fc_status != FC_SUCCESS) {
2657 2663 ret = EIO;
2658 2664 }
2659 2665 } else {
2660 2666 mutex_exit(&ptgt->tgt_mutex);
2661 2667 ret = EBUSY;
2662 2668 }
2663 2669 }
2664 2670 }
2665 2671
2666 2672 /*
2667 2673 * Wait for completion only if fc_ulp_transport was called and it
2668 2674 * returned a success. This is the only time callback will happen.
2669 2675 * Otherwise, there is no point in waiting
2670 2676 */
2671 2677 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2672 2678 ret = fcp_ipkt_sema_wait(icmd);
2673 2679 }
2674 2680
2675 2681 /*
2676 2682 * Copy data to IOCTL data structures
2677 2683 */
2678 2684 rsp = NULL;
2679 2685 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2680 2686 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2681 2687
2682 2688 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2683 2689 fcp_log(CE_WARN, pptr->port_dip,
2684 2690 "!SCSI command to d_id=0x%x lun=0x%x"
2685 2691 " failed, Bad FCP response values:"
2686 2692 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2687 2693 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2688 2694 ptgt->tgt_d_id, plun->lun_num,
2689 2695 rsp->reserved_0, rsp->reserved_1,
2690 2696 rsp->fcp_u.fcp_status.reserved_0,
2691 2697 rsp->fcp_u.fcp_status.reserved_1,
2692 2698 rsp->fcp_response_len, rsp->fcp_sense_len);
2693 2699
2694 2700 ret = EIO;
2695 2701 }
2696 2702 }
2697 2703
2698 2704 if ((ret == 0) && (rsp != NULL)) {
2699 2705 /*
2700 2706 * Calc response lengths
2701 2707 */
2702 2708 sense_len = 0;
2703 2709 info_len = 0;
2704 2710
2705 2711 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2706 2712 info_len = rsp->fcp_response_len;
2707 2713 }
2708 2714
2709 2715 rsp_info = (struct fcp_rsp_info *)
2710 2716 ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2711 2717
2712 2718 /*
2713 2719 * Get SCSI status
2714 2720 */
2715 2721 fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2716 2722 /*
2717 2723 * If a lun was just added or removed and the next command
2718 2724 * comes through this interface, we need to capture the check
2719 2725 * condition so we can discover the new topology.
2720 2726 */
2721 2727 if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2722 2728 rsp->fcp_u.fcp_status.sense_len_set) {
2723 2729 sense_len = rsp->fcp_sense_len;
2724 2730 rsp_sense = (caddr_t)((uint8_t *)rsp_info + info_len);
2725 2731 sense_to = (struct scsi_extended_sense *)rsp_sense;
2726 2732 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2727 2733 (FCP_SENSE_NO_LUN(sense_to))) {
2728 2734 reconfig_lun = TRUE;
2729 2735 }
2730 2736 }
2731 2737
2732 2738 if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2733 2739 (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2734 2740 if (reconfig_lun == FALSE) {
2735 2741 reconfig_status =
2736 2742 fcp_is_reconfig_needed(ptgt, fpkt);
2737 2743 }
2738 2744
2739 2745 if ((reconfig_lun == TRUE) ||
2740 2746 (reconfig_status == TRUE)) {
2741 2747 mutex_enter(&ptgt->tgt_mutex);
2742 2748 if (ptgt->tgt_tid == NULL) {
2743 2749 /*
2744 2750 * Either we've been notified the
2745 2751 * REPORT_LUN data has changed, or
2746 2752 * we've determined on our own that
2747 2753 * we're out of date. Kick off
2748 2754 * rediscovery.
2749 2755 */
2750 2756 tid = timeout(fcp_reconfigure_luns,
2751 2757 (caddr_t)ptgt, drv_usectohz(1));
2752 2758
2753 2759 ptgt->tgt_tid = tid;
2754 2760 ptgt->tgt_state |= FCP_TGT_BUSY;
2755 2761 ret = EBUSY;
2756 2762 reconfig_pending = TRUE;
2757 2763 }
2758 2764 mutex_exit(&ptgt->tgt_mutex);
2759 2765 }
2760 2766 }
2761 2767
2762 2768 /*
2763 2769 * Calc residuals and buffer lengths
2764 2770 */
2765 2771
2766 2772 if (ret == 0) {
2767 2773 buf_len = fscsi->scsi_buflen;
2768 2774 fscsi->scsi_bufresid = 0;
2769 2775 if (rsp->fcp_u.fcp_status.resid_under) {
2770 2776 if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2771 2777 fscsi->scsi_bufresid = rsp->fcp_resid;
2772 2778 } else {
2773 2779 cmn_err(CE_WARN, "fcp: bad residue %x "
2774 2780 "for txfer len %x", rsp->fcp_resid,
2775 2781 fscsi->scsi_buflen);
2776 2782 fscsi->scsi_bufresid =
2777 2783 fscsi->scsi_buflen;
2778 2784 }
2779 2785 buf_len -= fscsi->scsi_bufresid;
2780 2786 }
2781 2787 if (rsp->fcp_u.fcp_status.resid_over) {
2782 2788 fscsi->scsi_bufresid = -rsp->fcp_resid;
2783 2789 }
2784 2790
2785 2791 fscsi->scsi_rqresid = fscsi->scsi_rqlen - sense_len;
2786 2792 if (fscsi->scsi_rqlen < sense_len) {
2787 2793 sense_len = fscsi->scsi_rqlen;
2788 2794 }
2789 2795
2790 2796 fscsi->scsi_fc_rspcode = 0;
2791 2797 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2792 2798 fscsi->scsi_fc_rspcode = rsp_info->rsp_code;
2793 2799 }
2794 2800 fscsi->scsi_pkt_state = fpkt->pkt_state;
2795 2801 fscsi->scsi_pkt_action = fpkt->pkt_action;
2796 2802 fscsi->scsi_pkt_reason = fpkt->pkt_reason;
2797 2803
2798 2804 /*
2799 2805 * Copy data and request sense
2800 2806 *
2801 2807 * Data must be copied by using the FCP_CP_IN macro.
2802 2808 * This will ensure the proper byte order since the data
2803 2809 * is being copied directly from the memory mapped
2804 2810 * device register.
2805 2811 *
2806 2812 * The response (and request sense) will be in the
2807 2813 * correct byte order. No special copy is necessary.
2808 2814 */
2809 2815
2810 2816 if (buf_len) {
2811 2817 FCP_CP_IN(fpkt->pkt_data,
2812 2818 fscsi->scsi_bufaddr,
2813 2819 fpkt->pkt_data_acc,
2814 2820 buf_len);
2815 2821 }
2816 2822 bcopy((void *)rsp_sense,
2817 2823 (void *)fscsi->scsi_rqbufaddr,
2818 2824 sense_len);
2819 2825 }
2820 2826 }
2821 2827
2822 2828 /*
2823 2829 * Cleanup transport data structures if icmd was alloc-ed
2824 2830 * So, cleanup happens in the same thread that icmd was alloc-ed
2825 2831 */
2826 2832 if (icmd != NULL) {
2827 2833 fcp_ipkt_sema_cleanup(icmd);
2828 2834 }
2829 2835
2830 2836 /* restore pm busy/idle status */
2831 2837 if (port_busy) {
2832 2838 fc_ulp_idle_port(pptr->port_fp_handle);
2833 2839 }
2834 2840
2835 2841 /*
2836 2842 * Cleanup target. if a reconfig is pending, don't clear the BUSY
2837 2843 * flag, it'll be cleared when the reconfig is complete.
2838 2844 */
2839 2845 if ((ptgt != NULL) && !reconfig_pending) {
2840 2846 /*
2841 2847 * If target was created,
2842 2848 */
2843 2849 if (target_created) {
2844 2850 mutex_enter(&ptgt->tgt_mutex);
2845 2851 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2846 2852 mutex_exit(&ptgt->tgt_mutex);
2847 2853 } else {
2848 2854 /*
2849 2855 * De-mark target as busy
2850 2856 */
2851 2857 mutex_enter(&ptgt->tgt_mutex);
2852 2858 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2853 2859 mutex_exit(&ptgt->tgt_mutex);
2854 2860 }
2855 2861 }
2856 2862 return (ret);
2857 2863 }
2858 2864
2859 2865
2860 2866 static int
2861 2867 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2862 2868 fc_packet_t *fpkt)
2863 2869 {
2864 2870 uchar_t *lun_string;
2865 2871 uint16_t lun_num, i;
2866 2872 int num_luns;
2867 2873 int actual_luns;
2868 2874 int num_masked_luns;
2869 2875 int lun_buflen;
2870 2876 struct fcp_lun *plun = NULL;
2871 2877 struct fcp_reportlun_resp *report_lun;
2872 2878 uint8_t reconfig_needed = FALSE;
2873 2879 uint8_t lun_exists = FALSE;
2874 2880 fcp_port_t *pptr = ptgt->tgt_port;
2875 2881
2876 2882 report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2877 2883
2878 2884 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2879 2885 fpkt->pkt_datalen);
2880 2886
2881 2887 /* get number of luns (which is supplied as LUNS * 8) */
2882 2888 num_luns = BE_32(report_lun->num_lun) >> 3;
2883 2889
2884 2890 /*
2885 2891 * Figure out exactly how many lun strings our response buffer
2886 2892 * can hold.
2887 2893 */
2888 2894 lun_buflen = (fpkt->pkt_datalen -
2889 2895 2 * sizeof (uint32_t)) / sizeof (longlong_t);
2890 2896
2891 2897 /*
2892 2898 * Is our response buffer full or not? We don't want to
2893 2899 * potentially walk beyond the number of luns we have.
2894 2900 */
2895 2901 if (num_luns <= lun_buflen) {
2896 2902 actual_luns = num_luns;
2897 2903 } else {
2898 2904 actual_luns = lun_buflen;
2899 2905 }
2900 2906
2901 2907 mutex_enter(&ptgt->tgt_mutex);
2902 2908
2903 2909 /* Scan each lun to see if we have masked it. */
2904 2910 num_masked_luns = 0;
2905 2911 if (fcp_lun_blacklist != NULL) {
2906 2912 for (i = 0; i < actual_luns; i++) {
2907 2913 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2908 2914 switch (lun_string[0] & 0xC0) {
2909 2915 case FCP_LUN_ADDRESSING:
2910 2916 case FCP_PD_ADDRESSING:
2911 2917 case FCP_VOLUME_ADDRESSING:
2912 2918 lun_num = ((lun_string[0] & 0x3F) << 8)
2913 2919 | lun_string[1];
2914 2920 if (fcp_should_mask(&ptgt->tgt_port_wwn,
2915 2921 lun_num) == TRUE) {
2916 2922 num_masked_luns++;
2917 2923 }
2918 2924 break;
2919 2925 default:
2920 2926 break;
2921 2927 }
2922 2928 }
2923 2929 }
2924 2930
2925 2931 /*
2926 2932 * The quick and easy check. If the number of LUNs reported
2927 2933 * doesn't match the number we currently know about, we need
2928 2934 * to reconfigure.
2929 2935 */
2930 2936 if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2931 2937 mutex_exit(&ptgt->tgt_mutex);
2932 2938 kmem_free(report_lun, fpkt->pkt_datalen);
2933 2939 return (TRUE);
2934 2940 }
2935 2941
2936 2942 /*
2937 2943 * If the quick and easy check doesn't turn up anything, we walk
2938 2944 * the list of luns from the REPORT_LUN response and look for
2939 2945 * any luns we don't know about. If we find one, we know we need
2940 2946 * to reconfigure. We will skip LUNs that are masked because of the
2941 2947 * blacklist.
2942 2948 */
2943 2949 for (i = 0; i < actual_luns; i++) {
2944 2950 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2945 2951 lun_exists = FALSE;
2946 2952 switch (lun_string[0] & 0xC0) {
2947 2953 case FCP_LUN_ADDRESSING:
2948 2954 case FCP_PD_ADDRESSING:
2949 2955 case FCP_VOLUME_ADDRESSING:
2950 2956 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2951 2957
2952 2958 if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2953 2959 &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2954 2960 lun_exists = TRUE;
2955 2961 break;
2956 2962 }
2957 2963
2958 2964 for (plun = ptgt->tgt_lun; plun;
2959 2965 plun = plun->lun_next) {
2960 2966 if (plun->lun_num == lun_num) {
2961 2967 lun_exists = TRUE;
2962 2968 break;
2963 2969 }
2964 2970 }
2965 2971 break;
2966 2972 default:
2967 2973 break;
2968 2974 }
2969 2975
2970 2976 if (lun_exists == FALSE) {
2971 2977 reconfig_needed = TRUE;
2972 2978 break;
2973 2979 }
2974 2980 }
2975 2981
2976 2982 mutex_exit(&ptgt->tgt_mutex);
2977 2983 kmem_free(report_lun, fpkt->pkt_datalen);
2978 2984
2979 2985 return (reconfig_needed);
2980 2986 }
2981 2987
2982 2988 /*
2983 2989 * This function is called by fcp_handle_page83 and uses inquiry response data
2984 2990 * stored in plun->lun_inq to determine whether or not a device is a member of
2985 2991 * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2986 2992 * otherwise 1.
2987 2993 */
2988 2994 static int
2989 2995 fcp_symmetric_device_probe(struct fcp_lun *plun)
2990 2996 {
2991 2997 struct scsi_inquiry *stdinq = &plun->lun_inq;
2992 2998 char *devidptr;
2993 2999 int i, len;
2994 3000
2995 3001 for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
2996 3002 devidptr = fcp_symmetric_disk_table[i];
2997 3003 len = (int)strlen(devidptr);
2998 3004
2999 3005 if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
3000 3006 return (0);
3001 3007 }
3002 3008 }
3003 3009 return (1);
3004 3010 }
3005 3011
3006 3012
3007 3013 /*
3008 3014 * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
3009 3015 * It basically returns the current count of # of state change callbacks
3010 3016 * i.e the value of tgt_change_cnt.
3011 3017 *
3012 3018 * INPUT:
3013 3019 * fcp_ioctl.fp_minor -> The minor # of the fp port
3014 3020 * fcp_ioctl.listlen -> 1
3015 3021 * fcp_ioctl.list -> Pointer to a 32 bit integer
3016 3022 */
3017 3023 /*ARGSUSED2*/
3018 3024 static int
3019 3025 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
3020 3026 {
3021 3027 int ret;
3022 3028 uint32_t link_cnt;
3023 3029 struct fcp_ioctl fioctl;
3024 3030 struct fcp_port *pptr = NULL;
3025 3031
3026 3032 if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
3027 3033 &pptr)) != 0) {
3028 3034 return (ret);
3029 3035 }
3030 3036
3031 3037 ASSERT(pptr != NULL);
3032 3038
3033 3039 if (fioctl.listlen != 1) {
3034 3040 return (EINVAL);
3035 3041 }
3036 3042
3037 3043 mutex_enter(&pptr->port_mutex);
3038 3044 if (pptr->port_state & FCP_STATE_OFFLINE) {
3039 3045 mutex_exit(&pptr->port_mutex);
3040 3046 return (ENXIO);
3041 3047 }
3042 3048
3043 3049 /*
3044 3050 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3045 3051 * When the fcp initially attaches to the port and there are nothing
3046 3052 * hanging out of the port or if there was a repeat offline state change
3047 3053 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3048 3054 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3049 3055 * will differentiate the 2 cases.
3050 3056 */
3051 3057 if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3052 3058 mutex_exit(&pptr->port_mutex);
3053 3059 return (ENXIO);
3054 3060 }
3055 3061
3056 3062 link_cnt = pptr->port_link_cnt;
3057 3063 mutex_exit(&pptr->port_mutex);
3058 3064
3059 3065 if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3060 3066 return (EFAULT);
3061 3067 }
3062 3068
3063 3069 #ifdef _MULTI_DATAMODEL
3064 3070 switch (ddi_model_convert_from(mode & FMODELS)) {
3065 3071 case DDI_MODEL_ILP32: {
3066 3072 struct fcp32_ioctl f32_ioctl;
3067 3073
3068 3074 f32_ioctl.fp_minor = fioctl.fp_minor;
3069 3075 f32_ioctl.listlen = fioctl.listlen;
3070 3076 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3071 3077 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3072 3078 sizeof (struct fcp32_ioctl), mode)) {
3073 3079 return (EFAULT);
3074 3080 }
3075 3081 break;
3076 3082 }
3077 3083 case DDI_MODEL_NONE:
3078 3084 if (ddi_copyout((void *)&fioctl, (void *)data,
3079 3085 sizeof (struct fcp_ioctl), mode)) {
3080 3086 return (EFAULT);
3081 3087 }
3082 3088 break;
3083 3089 }
3084 3090 #else /* _MULTI_DATAMODEL */
3085 3091
3086 3092 if (ddi_copyout((void *)&fioctl, (void *)data,
3087 3093 sizeof (struct fcp_ioctl), mode)) {
3088 3094 return (EFAULT);
3089 3095 }
3090 3096 #endif /* _MULTI_DATAMODEL */
3091 3097
3092 3098 return (0);
3093 3099 }
3094 3100
3095 3101 /*
3096 3102 * This function copies the fcp_ioctl structure passed in from user land
3097 3103 * into kernel land. Handles 32 bit applications.
3098 3104 */
3099 3105 /*ARGSUSED*/
3100 3106 static int
3101 3107 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3102 3108 struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3103 3109 {
3104 3110 struct fcp_port *t_pptr;
3105 3111
3106 3112 #ifdef _MULTI_DATAMODEL
3107 3113 switch (ddi_model_convert_from(mode & FMODELS)) {
3108 3114 case DDI_MODEL_ILP32: {
3109 3115 struct fcp32_ioctl f32_ioctl;
3110 3116
3111 3117 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3112 3118 sizeof (struct fcp32_ioctl), mode)) {
3113 3119 return (EFAULT);
3114 3120 }
3115 3121 fioctl->fp_minor = f32_ioctl.fp_minor;
3116 3122 fioctl->listlen = f32_ioctl.listlen;
3117 3123 fioctl->list = (caddr_t)(long)f32_ioctl.list;
3118 3124 break;
3119 3125 }
3120 3126 case DDI_MODEL_NONE:
3121 3127 if (ddi_copyin((void *)data, (void *)fioctl,
3122 3128 sizeof (struct fcp_ioctl), mode)) {
3123 3129 return (EFAULT);
3124 3130 }
3125 3131 break;
3126 3132 }
3127 3133
3128 3134 #else /* _MULTI_DATAMODEL */
3129 3135 if (ddi_copyin((void *)data, (void *)fioctl,
3130 3136 sizeof (struct fcp_ioctl), mode)) {
3131 3137 return (EFAULT);
3132 3138 }
3133 3139 #endif /* _MULTI_DATAMODEL */
3134 3140
3135 3141 /*
3136 3142 * Right now we can assume that the minor number matches with
3137 3143 * this instance of fp. If this changes we will need to
3138 3144 * revisit this logic.
3139 3145 */
3140 3146 mutex_enter(&fcp_global_mutex);
3141 3147 t_pptr = fcp_port_head;
3142 3148 while (t_pptr) {
3143 3149 if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3144 3150 break;
3145 3151 } else {
3146 3152 t_pptr = t_pptr->port_next;
3147 3153 }
3148 3154 }
3149 3155 *pptr = t_pptr;
3150 3156 mutex_exit(&fcp_global_mutex);
3151 3157 if (t_pptr == NULL) {
3152 3158 return (ENXIO);
3153 3159 }
3154 3160
3155 3161 return (0);
3156 3162 }
3157 3163
3158 3164 /*
3159 3165 * Function: fcp_port_create_tgt
3160 3166 *
3161 3167 * Description: As the name suggest this function creates the target context
3162 3168 * specified by the the WWN provided by the caller. If the
3163 3169 * creation goes well and the target is known by fp/fctl a PLOGI
3164 3170 * followed by a PRLI are issued.
3165 3171 *
3166 3172 * Argument: pptr fcp port structure
3167 3173 * pwwn WWN of the target
3168 3174 * ret_val Address of the return code. It could be:
3169 3175 * EIO, ENOMEM or 0.
3170 3176 * fc_status PLOGI or PRLI status completion
3171 3177 * fc_pkt_state PLOGI or PRLI state completion
3172 3178 * fc_pkt_reason PLOGI or PRLI reason completion
3173 3179 * fc_pkt_action PLOGI or PRLI action completion
3174 3180 *
3175 3181 * Return Value: NULL if it failed
3176 3182 * Target structure address if it succeeds
3177 3183 */
3178 3184 static struct fcp_tgt *
3179 3185 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3180 3186 int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3181 3187 {
3182 3188 struct fcp_tgt *ptgt = NULL;
3183 3189 fc_portmap_t devlist;
3184 3190 int lcount;
3185 3191 int error;
3186 3192
3187 3193 *ret_val = 0;
3188 3194
3189 3195 /*
3190 3196 * Check FC port device & get port map
3191 3197 */
3192 3198 if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3193 3199 &error, 1) == NULL) {
3194 3200 *ret_val = EIO;
3195 3201 } else {
3196 3202 if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3197 3203 &devlist) != FC_SUCCESS) {
3198 3204 *ret_val = EIO;
3199 3205 }
3200 3206 }
3201 3207
3202 3208 /* Set port map flags */
3203 3209 devlist.map_type = PORT_DEVICE_USER_CREATE;
3204 3210
3205 3211 /* Allocate target */
3206 3212 if (*ret_val == 0) {
3207 3213 lcount = pptr->port_link_cnt;
3208 3214 ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3209 3215 if (ptgt == NULL) {
3210 3216 fcp_log(CE_WARN, pptr->port_dip,
3211 3217 "!FC target allocation failed");
3212 3218 *ret_val = ENOMEM;
3213 3219 } else {
3214 3220 /* Setup target */
3215 3221 mutex_enter(&ptgt->tgt_mutex);
3216 3222
3217 3223 ptgt->tgt_statec_cause = FCP_CAUSE_TGT_CHANGE;
3218 3224 ptgt->tgt_tmp_cnt = 1;
3219 3225 ptgt->tgt_d_id = devlist.map_did.port_id;
3220 3226 ptgt->tgt_hard_addr =
3221 3227 devlist.map_hard_addr.hard_addr;
3222 3228 ptgt->tgt_pd_handle = devlist.map_pd;
3223 3229 ptgt->tgt_fca_dev = NULL;
3224 3230
3225 3231 bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3226 3232 FC_WWN_SIZE);
3227 3233 bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3228 3234 FC_WWN_SIZE);
3229 3235
3230 3236 mutex_exit(&ptgt->tgt_mutex);
3231 3237 }
3232 3238 }
3233 3239
3234 3240 /* Release global mutex for PLOGI and PRLI */
3235 3241 mutex_exit(&fcp_global_mutex);
3236 3242
3237 3243 /* Send PLOGI (If necessary) */
3238 3244 if (*ret_val == 0) {
3239 3245 *ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3240 3246 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3241 3247 }
3242 3248
3243 3249 /* Send PRLI (If necessary) */
3244 3250 if (*ret_val == 0) {
3245 3251 *ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3246 3252 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3247 3253 }
3248 3254
3249 3255 mutex_enter(&fcp_global_mutex);
3250 3256
3251 3257 return (ptgt);
3252 3258 }
3253 3259
3254 3260 /*
3255 3261 * Function: fcp_tgt_send_plogi
3256 3262 *
3257 3263 * Description: This function sends a PLOGI to the target specified by the
3258 3264 * caller and waits till it completes.
3259 3265 *
3260 3266 * Argument: ptgt Target to send the plogi to.
3261 3267 * fc_status Status returned by fp/fctl in the PLOGI request.
3262 3268 * fc_pkt_state State returned by fp/fctl in the PLOGI request.
3263 3269 * fc_pkt_reason Reason returned by fp/fctl in the PLOGI request.
3264 3270 * fc_pkt_action Action returned by fp/fctl in the PLOGI request.
3265 3271 *
3266 3272 * Return Value: 0
3267 3273 * ENOMEM
3268 3274 * EIO
3269 3275 *
3270 3276 * Context: User context.
3271 3277 */
3272 3278 static int
3273 3279 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3274 3280 int *fc_pkt_reason, int *fc_pkt_action)
3275 3281 {
3276 3282 struct fcp_port *pptr;
3277 3283 struct fcp_ipkt *icmd;
3278 3284 struct fc_packet *fpkt;
3279 3285 fc_frame_hdr_t *hp;
3280 3286 struct la_els_logi logi;
3281 3287 int tcount;
3282 3288 int lcount;
3283 3289 int ret, login_retval = ~FC_SUCCESS;
3284 3290
3285 3291 ret = 0;
3286 3292
3287 3293 pptr = ptgt->tgt_port;
3288 3294
3289 3295 lcount = pptr->port_link_cnt;
3290 3296 tcount = ptgt->tgt_change_cnt;
3291 3297
3292 3298 /* Alloc internal packet */
3293 3299 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3294 3300 sizeof (la_els_logi_t), 0,
3295 3301 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
3296 3302 lcount, tcount, 0, FC_INVALID_RSCN_COUNT);
3297 3303
3298 3304 if (icmd == NULL) {
3299 3305 ret = ENOMEM;
3300 3306 } else {
3301 3307 /*
3302 3308 * Setup internal packet as sema sync
3303 3309 */
3304 3310 fcp_ipkt_sema_init(icmd);
3305 3311
3306 3312 /*
3307 3313 * Setup internal packet (icmd)
3308 3314 */
3309 3315 icmd->ipkt_lun = NULL;
3310 3316 icmd->ipkt_restart = 0;
3311 3317 icmd->ipkt_retries = 0;
3312 3318 icmd->ipkt_opcode = LA_ELS_PLOGI;
3313 3319
3314 3320 /*
3315 3321 * Setup fc_packet
3316 3322 */
3317 3323 fpkt = icmd->ipkt_fpkt;
3318 3324
3319 3325 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
3320 3326 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
3321 3327 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
3322 3328
3323 3329 /*
3324 3330 * Setup FC frame header
3325 3331 */
3326 3332 hp = &fpkt->pkt_cmd_fhdr;
3327 3333
3328 3334 hp->s_id = pptr->port_id; /* source ID */
3329 3335 hp->d_id = ptgt->tgt_d_id; /* dest ID */
3330 3336 hp->r_ctl = R_CTL_ELS_REQ;
3331 3337 hp->type = FC_TYPE_EXTENDED_LS;
3332 3338 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3333 3339 hp->seq_id = 0;
3334 3340 hp->rsvd = 0;
3335 3341 hp->df_ctl = 0;
3336 3342 hp->seq_cnt = 0;
3337 3343 hp->ox_id = 0xffff; /* i.e. none */
3338 3344 hp->rx_id = 0xffff; /* i.e. none */
3339 3345 hp->ro = 0;
3340 3346
3341 3347 /*
3342 3348 * Setup PLOGI
3343 3349 */
3344 3350 bzero(&logi, sizeof (struct la_els_logi));
3345 3351 logi.ls_code.ls_code = LA_ELS_PLOGI;
3346 3352
3347 3353 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3348 3354 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3349 3355
3350 3356 /*
3351 3357 * Send PLOGI
3352 3358 */
3353 3359 *fc_status = login_retval =
3354 3360 fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3355 3361 if (*fc_status != FC_SUCCESS) {
3356 3362 ret = EIO;
3357 3363 }
3358 3364 }
3359 3365
3360 3366 /*
3361 3367 * Wait for completion
3362 3368 */
3363 3369 if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3364 3370 ret = fcp_ipkt_sema_wait(icmd);
3365 3371
3366 3372 *fc_pkt_state = fpkt->pkt_state;
3367 3373 *fc_pkt_reason = fpkt->pkt_reason;
3368 3374 *fc_pkt_action = fpkt->pkt_action;
3369 3375 }
3370 3376
3371 3377 /*
3372 3378 * Cleanup transport data structures if icmd was alloc-ed AND if there
3373 3379 * is going to be no callback (i.e if fc_ulp_login() failed).
3374 3380 * Otherwise, cleanup happens in callback routine.
3375 3381 */
3376 3382 if (icmd != NULL) {
3377 3383 fcp_ipkt_sema_cleanup(icmd);
3378 3384 }
3379 3385
3380 3386 return (ret);
3381 3387 }
3382 3388
3383 3389 /*
3384 3390 * Function: fcp_tgt_send_prli
3385 3391 *
3386 3392 * Description: Does nothing as of today.
3387 3393 *
3388 3394 * Argument: ptgt Target to send the prli to.
3389 3395 * fc_status Status returned by fp/fctl in the PRLI request.
3390 3396 * fc_pkt_state State returned by fp/fctl in the PRLI request.
3391 3397 * fc_pkt_reason Reason returned by fp/fctl in the PRLI request.
3392 3398 * fc_pkt_action Action returned by fp/fctl in the PRLI request.
3393 3399 *
3394 3400 * Return Value: 0
3395 3401 */
3396 3402 /*ARGSUSED*/
3397 3403 static int
3398 3404 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3399 3405 int *fc_pkt_reason, int *fc_pkt_action)
3400 3406 {
3401 3407 return (0);
3402 3408 }
3403 3409
3404 3410 /*
3405 3411 * Function: fcp_ipkt_sema_init
3406 3412 *
3407 3413 * Description: Initializes the semaphore contained in the internal packet.
3408 3414 *
3409 3415 * Argument: icmd Internal packet the semaphore of which must be
3410 3416 * initialized.
3411 3417 *
3412 3418 * Return Value: None
3413 3419 *
3414 3420 * Context: User context only.
3415 3421 */
3416 3422 static void
3417 3423 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3418 3424 {
3419 3425 struct fc_packet *fpkt;
3420 3426
3421 3427 fpkt = icmd->ipkt_fpkt;
3422 3428
3423 3429 /* Create semaphore for sync */
3424 3430 sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3425 3431
3426 3432 /* Setup the completion callback */
3427 3433 fpkt->pkt_comp = fcp_ipkt_sema_callback;
3428 3434 }
3429 3435
3430 3436 /*
3431 3437 * Function: fcp_ipkt_sema_wait
3432 3438 *
3433 3439 * Description: Wait on the semaphore embedded in the internal packet. The
3434 3440 * semaphore is released in the callback.
3435 3441 *
3436 3442 * Argument: icmd Internal packet to wait on for completion.
3437 3443 *
3438 3444 * Return Value: 0
3439 3445 * EIO
3440 3446 * EBUSY
3441 3447 * EAGAIN
3442 3448 *
3443 3449 * Context: User context only.
3444 3450 *
3445 3451 * This function does a conversion between the field pkt_state of the fc_packet
3446 3452 * embedded in the internal packet (icmd) and the code it returns.
3447 3453 */
3448 3454 static int
3449 3455 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3450 3456 {
3451 3457 struct fc_packet *fpkt;
3452 3458 int ret;
3453 3459
3454 3460 ret = EIO;
3455 3461 fpkt = icmd->ipkt_fpkt;
3456 3462
3457 3463 /*
3458 3464 * Wait on semaphore
3459 3465 */
3460 3466 sema_p(&(icmd->ipkt_sema));
3461 3467
3462 3468 /*
3463 3469 * Check the status of the FC packet
3464 3470 */
3465 3471 switch (fpkt->pkt_state) {
3466 3472 case FC_PKT_SUCCESS:
3467 3473 ret = 0;
3468 3474 break;
3469 3475 case FC_PKT_LOCAL_RJT:
3470 3476 switch (fpkt->pkt_reason) {
3471 3477 case FC_REASON_SEQ_TIMEOUT:
3472 3478 case FC_REASON_RX_BUF_TIMEOUT:
3473 3479 ret = EAGAIN;
3474 3480 break;
3475 3481 case FC_REASON_PKT_BUSY:
3476 3482 ret = EBUSY;
3477 3483 break;
3478 3484 }
3479 3485 break;
3480 3486 case FC_PKT_TIMEOUT:
3481 3487 ret = EAGAIN;
3482 3488 break;
3483 3489 case FC_PKT_LOCAL_BSY:
3484 3490 case FC_PKT_TRAN_BSY:
3485 3491 case FC_PKT_NPORT_BSY:
3486 3492 case FC_PKT_FABRIC_BSY:
3487 3493 ret = EBUSY;
3488 3494 break;
3489 3495 case FC_PKT_LS_RJT:
3490 3496 case FC_PKT_BA_RJT:
3491 3497 switch (fpkt->pkt_reason) {
3492 3498 case FC_REASON_LOGICAL_BSY:
3493 3499 ret = EBUSY;
3494 3500 break;
3495 3501 }
3496 3502 break;
3497 3503 case FC_PKT_FS_RJT:
3498 3504 switch (fpkt->pkt_reason) {
3499 3505 case FC_REASON_FS_LOGICAL_BUSY:
3500 3506 ret = EBUSY;
3501 3507 break;
3502 3508 }
3503 3509 break;
3504 3510 }
3505 3511
3506 3512 return (ret);
3507 3513 }
3508 3514
3509 3515 /*
3510 3516 * Function: fcp_ipkt_sema_callback
3511 3517 *
3512 3518 * Description: Registered as the completion callback function for the FC
3513 3519 * transport when the ipkt semaphore is used for sync. This will
3514 3520 * cleanup the used data structures, if necessary and wake up
3515 3521 * the user thread to complete the transaction.
3516 3522 *
3517 3523 * Argument: fpkt FC packet (points to the icmd)
3518 3524 *
3519 3525 * Return Value: None
3520 3526 *
3521 3527 * Context: User context only
3522 3528 */
3523 3529 static void
3524 3530 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3525 3531 {
3526 3532 struct fcp_ipkt *icmd;
3527 3533
3528 3534 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3529 3535
3530 3536 /*
3531 3537 * Wake up user thread
3532 3538 */
3533 3539 sema_v(&(icmd->ipkt_sema));
3534 3540 }
3535 3541
3536 3542 /*
3537 3543 * Function: fcp_ipkt_sema_cleanup
3538 3544 *
3539 3545 * Description: Called to cleanup (if necessary) the data structures used
3540 3546 * when ipkt sema is used for sync. This function will detect
3541 3547 * whether the caller is the last thread (via counter) and
3542 3548 * cleanup only if necessary.
3543 3549 *
3544 3550 * Argument: icmd Internal command packet
3545 3551 *
3546 3552 * Return Value: None
3547 3553 *
3548 3554 * Context: User context only
3549 3555 */
3550 3556 static void
3551 3557 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3552 3558 {
3553 3559 struct fcp_tgt *ptgt;
3554 3560 struct fcp_port *pptr;
3555 3561
3556 3562 ptgt = icmd->ipkt_tgt;
3557 3563 pptr = icmd->ipkt_port;
3558 3564
3559 3565 /*
3560 3566 * Acquire data structure
3561 3567 */
3562 3568 mutex_enter(&ptgt->tgt_mutex);
3563 3569
3564 3570 /*
3565 3571 * Destroy semaphore
3566 3572 */
3567 3573 sema_destroy(&(icmd->ipkt_sema));
3568 3574
3569 3575 /*
3570 3576 * Cleanup internal packet
3571 3577 */
3572 3578 mutex_exit(&ptgt->tgt_mutex);
3573 3579 fcp_icmd_free(pptr, icmd);
3574 3580 }
3575 3581
3576 3582 /*
3577 3583 * Function: fcp_port_attach
3578 3584 *
3579 3585 * Description: Called by the transport framework to resume, suspend or
3580 3586 * attach a new port.
3581 3587 *
3582 3588 * Argument: ulph Port handle
3583 3589 * *pinfo Port information
3584 3590 * cmd Command
3585 3591 * s_id Port ID
3586 3592 *
3587 3593 * Return Value: FC_FAILURE or FC_SUCCESS
3588 3594 */
3589 3595 /*ARGSUSED*/
3590 3596 static int
3591 3597 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3592 3598 fc_attach_cmd_t cmd, uint32_t s_id)
3593 3599 {
3594 3600 int instance;
3595 3601 int res = FC_FAILURE; /* default result */
3596 3602
3597 3603 ASSERT(pinfo != NULL);
3598 3604
3599 3605 instance = ddi_get_instance(pinfo->port_dip);
3600 3606
3601 3607 switch (cmd) {
3602 3608 case FC_CMD_ATTACH:
3603 3609 /*
3604 3610 * this port instance attaching for the first time (or after
3605 3611 * being detached before)
3606 3612 */
3607 3613 if (fcp_handle_port_attach(ulph, pinfo, s_id,
3608 3614 instance) == DDI_SUCCESS) {
3609 3615 res = FC_SUCCESS;
3610 3616 } else {
3611 3617 ASSERT(ddi_get_soft_state(fcp_softstate,
3612 3618 instance) == NULL);
3613 3619 }
3614 3620 break;
3615 3621
3616 3622 case FC_CMD_RESUME:
3617 3623 case FC_CMD_POWER_UP:
3618 3624 /*
3619 3625 * this port instance was attached and the suspended and
3620 3626 * will now be resumed
3621 3627 */
3622 3628 if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3623 3629 instance) == DDI_SUCCESS) {
3624 3630 res = FC_SUCCESS;
3625 3631 }
3626 3632 break;
3627 3633
3628 3634 default:
3629 3635 /* shouldn't happen */
3630 3636 FCP_TRACE(fcp_logq, "fcp",
3631 3637 fcp_trace, FCP_BUF_LEVEL_2, 0,
3632 3638 "port_attach: unknown cmdcommand: %d", cmd);
3633 3639 break;
3634 3640 }
3635 3641
3636 3642 /* return result */
3637 3643 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3638 3644 FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3639 3645
3640 3646 return (res);
3641 3647 }
3642 3648
3643 3649
3644 3650 /*
3645 3651 * detach or suspend this port instance
3646 3652 *
3647 3653 * acquires and releases the global mutex
3648 3654 *
3649 3655 * acquires and releases the mutex for this port
3650 3656 *
3651 3657 * acquires and releases the hotplug mutex for this port
3652 3658 */
3653 3659 /*ARGSUSED*/
3654 3660 static int
3655 3661 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3656 3662 fc_detach_cmd_t cmd)
3657 3663 {
3658 3664 int flag;
3659 3665 int instance;
3660 3666 struct fcp_port *pptr;
3661 3667
3662 3668 instance = ddi_get_instance(info->port_dip);
3663 3669 pptr = ddi_get_soft_state(fcp_softstate, instance);
3664 3670
3665 3671 switch (cmd) {
3666 3672 case FC_CMD_SUSPEND:
3667 3673 FCP_DTRACE(fcp_logq, "fcp",
3668 3674 fcp_trace, FCP_BUF_LEVEL_8, 0,
3669 3675 "port suspend called for port %d", instance);
3670 3676 flag = FCP_STATE_SUSPENDED;
3671 3677 break;
3672 3678
3673 3679 case FC_CMD_POWER_DOWN:
3674 3680 FCP_DTRACE(fcp_logq, "fcp",
3675 3681 fcp_trace, FCP_BUF_LEVEL_8, 0,
3676 3682 "port power down called for port %d", instance);
3677 3683 flag = FCP_STATE_POWER_DOWN;
3678 3684 break;
3679 3685
3680 3686 case FC_CMD_DETACH:
3681 3687 FCP_DTRACE(fcp_logq, "fcp",
3682 3688 fcp_trace, FCP_BUF_LEVEL_8, 0,
3683 3689 "port detach called for port %d", instance);
3684 3690 flag = FCP_STATE_DETACHING;
3685 3691 break;
3686 3692
3687 3693 default:
3688 3694 /* shouldn't happen */
3689 3695 return (FC_FAILURE);
3690 3696 }
3691 3697 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3692 3698 FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3693 3699
3694 3700 return (fcp_handle_port_detach(pptr, flag, instance));
3695 3701 }
3696 3702
3697 3703
3698 3704 /*
3699 3705 * called for ioctls on the transport's devctl interface, and the transport
3700 3706 * has passed it to us
3701 3707 *
3702 3708 * this will only be called for device control ioctls (i.e. hotplugging stuff)
3703 3709 *
3704 3710 * return FC_SUCCESS if we decide to claim the ioctl,
3705 3711 * else return FC_UNCLAIMED
3706 3712 *
3707 3713 * *rval is set iff we decide to claim the ioctl
3708 3714 */
3709 3715 /*ARGSUSED*/
3710 3716 static int
3711 3717 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3712 3718 intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3713 3719 {
3714 3720 int retval = FC_UNCLAIMED; /* return value */
3715 3721 struct fcp_port *pptr = NULL; /* our soft state */
3716 3722 struct devctl_iocdata *dcp = NULL; /* for devctl */
3717 3723 dev_info_t *cdip;
3718 3724 mdi_pathinfo_t *pip = NULL;
3719 3725 char *ndi_nm; /* NDI name */
3720 3726 char *ndi_addr; /* NDI addr */
3721 3727 int is_mpxio, circ;
3722 3728 int devi_entered = 0;
3723 3729 clock_t end_time;
3724 3730
3725 3731 ASSERT(rval != NULL);
3726 3732
3727 3733 FCP_DTRACE(fcp_logq, "fcp",
3728 3734 fcp_trace, FCP_BUF_LEVEL_8, 0,
3729 3735 "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3730 3736
3731 3737 /* if already claimed then forget it */
3732 3738 if (claimed) {
3733 3739 /*
3734 3740 * for now, if this ioctl has already been claimed, then
3735 3741 * we just ignore it
3736 3742 */
3737 3743 return (retval);
3738 3744 }
3739 3745
3740 3746 /* get our port info */
3741 3747 if ((pptr = fcp_get_port(port_handle)) == NULL) {
3742 3748 fcp_log(CE_WARN, NULL,
3743 3749 "!fcp:Invalid port handle handle in ioctl");
3744 3750 *rval = ENXIO;
3745 3751 return (retval);
3746 3752 }
3747 3753 is_mpxio = pptr->port_mpxio;
3748 3754
3749 3755 switch (cmd) {
3750 3756 case DEVCTL_BUS_GETSTATE:
3751 3757 case DEVCTL_BUS_QUIESCE:
3752 3758 case DEVCTL_BUS_UNQUIESCE:
3753 3759 case DEVCTL_BUS_RESET:
3754 3760 case DEVCTL_BUS_RESETALL:
3755 3761
3756 3762 case DEVCTL_BUS_DEV_CREATE:
3757 3763 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3758 3764 return (retval);
3759 3765 }
3760 3766 break;
3761 3767
3762 3768 case DEVCTL_DEVICE_GETSTATE:
3763 3769 case DEVCTL_DEVICE_OFFLINE:
3764 3770 case DEVCTL_DEVICE_ONLINE:
3765 3771 case DEVCTL_DEVICE_REMOVE:
3766 3772 case DEVCTL_DEVICE_RESET:
3767 3773 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3768 3774 return (retval);
3769 3775 }
3770 3776
3771 3777 ASSERT(dcp != NULL);
3772 3778
3773 3779 /* ensure we have a name and address */
3774 3780 if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3775 3781 ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3776 3782 FCP_TRACE(fcp_logq, pptr->port_instbuf,
3777 3783 fcp_trace, FCP_BUF_LEVEL_2, 0,
3778 3784 "ioctl: can't get name (%s) or addr (%s)",
3779 3785 ndi_nm ? ndi_nm : "<null ptr>",
3780 3786 ndi_addr ? ndi_addr : "<null ptr>");
3781 3787 ndi_dc_freehdl(dcp);
3782 3788 return (retval);
3783 3789 }
3784 3790
3785 3791
3786 3792 /* get our child's DIP */
3787 3793 ASSERT(pptr != NULL);
3788 3794 if (is_mpxio) {
3789 3795 mdi_devi_enter(pptr->port_dip, &circ);
3790 3796 } else {
3791 3797 ndi_devi_enter(pptr->port_dip, &circ);
3792 3798 }
3793 3799 devi_entered = 1;
3794 3800
3795 3801 if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3796 3802 ndi_addr)) == NULL) {
3797 3803 /* Look for virtually enumerated devices. */
3798 3804 pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3799 3805 if (pip == NULL ||
3800 3806 ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3801 3807 *rval = ENXIO;
3802 3808 goto out;
3803 3809 }
3804 3810 }
3805 3811 break;
3806 3812
3807 3813 default:
3808 3814 *rval = ENOTTY;
3809 3815 return (retval);
3810 3816 }
3811 3817
3812 3818 /* this ioctl is ours -- process it */
3813 3819
3814 3820 retval = FC_SUCCESS; /* just means we claim the ioctl */
3815 3821
3816 3822 /* we assume it will be a success; else we'll set error value */
3817 3823 *rval = 0;
3818 3824
3819 3825
3820 3826 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3821 3827 fcp_trace, FCP_BUF_LEVEL_8, 0,
3822 3828 "ioctl: claiming this one");
3823 3829
3824 3830 /* handle ioctls now */
3825 3831 switch (cmd) {
3826 3832 case DEVCTL_DEVICE_GETSTATE:
3827 3833 ASSERT(cdip != NULL);
3828 3834 ASSERT(dcp != NULL);
3829 3835 if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3830 3836 *rval = EFAULT;
3831 3837 }
3832 3838 break;
3833 3839
3834 3840 case DEVCTL_DEVICE_REMOVE:
3835 3841 case DEVCTL_DEVICE_OFFLINE: {
3836 3842 int flag = 0;
3837 3843 int lcount;
3838 3844 int tcount;
3839 3845 struct fcp_pkt *head = NULL;
3840 3846 struct fcp_lun *plun;
3841 3847 child_info_t *cip = CIP(cdip);
3842 3848 int all = 1;
3843 3849 struct fcp_lun *tplun;
3844 3850 struct fcp_tgt *ptgt;
3845 3851
3846 3852 ASSERT(pptr != NULL);
3847 3853 ASSERT(cdip != NULL);
3848 3854
3849 3855 mutex_enter(&pptr->port_mutex);
3850 3856 if (pip != NULL) {
3851 3857 cip = CIP(pip);
3852 3858 }
3853 3859 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3854 3860 mutex_exit(&pptr->port_mutex);
3855 3861 *rval = ENXIO;
3856 3862 break;
3857 3863 }
3858 3864
3859 3865 head = fcp_scan_commands(plun);
3860 3866 if (head != NULL) {
3861 3867 fcp_abort_commands(head, LUN_PORT);
3862 3868 }
3863 3869 lcount = pptr->port_link_cnt;
3864 3870 tcount = plun->lun_tgt->tgt_change_cnt;
3865 3871 mutex_exit(&pptr->port_mutex);
3866 3872
3867 3873 if (cmd == DEVCTL_DEVICE_REMOVE) {
3868 3874 flag = NDI_DEVI_REMOVE;
3869 3875 }
3870 3876
3871 3877 if (is_mpxio) {
3872 3878 mdi_devi_exit(pptr->port_dip, circ);
3873 3879 } else {
3874 3880 ndi_devi_exit(pptr->port_dip, circ);
3875 3881 }
3876 3882 devi_entered = 0;
3877 3883
3878 3884 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3879 3885 FCP_OFFLINE, lcount, tcount, flag);
3880 3886
3881 3887 if (*rval != NDI_SUCCESS) {
3882 3888 *rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3883 3889 break;
3884 3890 }
3885 3891
3886 3892 fcp_update_offline_flags(plun);
3887 3893
3888 3894 ptgt = plun->lun_tgt;
3889 3895 mutex_enter(&ptgt->tgt_mutex);
3890 3896 for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3891 3897 tplun->lun_next) {
3892 3898 mutex_enter(&tplun->lun_mutex);
3893 3899 if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3894 3900 all = 0;
3895 3901 }
3896 3902 mutex_exit(&tplun->lun_mutex);
3897 3903 }
3898 3904
3899 3905 if (all) {
3900 3906 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3901 3907 /*
3902 3908 * The user is unconfiguring/offlining the device.
3903 3909 * If fabric and the auto configuration is set
3904 3910 * then make sure the user is the only one who
3905 3911 * can reconfigure the device.
3906 3912 */
3907 3913 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3908 3914 fcp_enable_auto_configuration) {
3909 3915 ptgt->tgt_manual_config_only = 1;
3910 3916 }
3911 3917 }
3912 3918 mutex_exit(&ptgt->tgt_mutex);
3913 3919 break;
3914 3920 }
3915 3921
3916 3922 case DEVCTL_DEVICE_ONLINE: {
3917 3923 int lcount;
3918 3924 int tcount;
3919 3925 struct fcp_lun *plun;
3920 3926 child_info_t *cip = CIP(cdip);
3921 3927
3922 3928 ASSERT(cdip != NULL);
3923 3929 ASSERT(pptr != NULL);
3924 3930
3925 3931 mutex_enter(&pptr->port_mutex);
3926 3932 if (pip != NULL) {
3927 3933 cip = CIP(pip);
3928 3934 }
3929 3935 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3930 3936 mutex_exit(&pptr->port_mutex);
3931 3937 *rval = ENXIO;
3932 3938 break;
3933 3939 }
3934 3940 lcount = pptr->port_link_cnt;
3935 3941 tcount = plun->lun_tgt->tgt_change_cnt;
3936 3942 mutex_exit(&pptr->port_mutex);
3937 3943
3938 3944 /*
3939 3945 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3940 3946 * to allow the device attach to occur when the device is
3941 3947 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3942 3948 * from the scsi_probe()).
3943 3949 */
3944 3950 mutex_enter(&LUN_TGT->tgt_mutex);
3945 3951 plun->lun_state |= FCP_LUN_ONLINING;
3946 3952 mutex_exit(&LUN_TGT->tgt_mutex);
3947 3953
3948 3954 if (is_mpxio) {
3949 3955 mdi_devi_exit(pptr->port_dip, circ);
3950 3956 } else {
3951 3957 ndi_devi_exit(pptr->port_dip, circ);
3952 3958 }
3953 3959 devi_entered = 0;
3954 3960
3955 3961 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3956 3962 FCP_ONLINE, lcount, tcount, 0);
3957 3963
3958 3964 if (*rval != NDI_SUCCESS) {
3959 3965 /* Reset the FCP_LUN_ONLINING bit */
3960 3966 mutex_enter(&LUN_TGT->tgt_mutex);
3961 3967 plun->lun_state &= ~FCP_LUN_ONLINING;
3962 3968 mutex_exit(&LUN_TGT->tgt_mutex);
3963 3969 *rval = EIO;
3964 3970 break;
3965 3971 }
3966 3972 mutex_enter(&LUN_TGT->tgt_mutex);
3967 3973 plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3968 3974 FCP_LUN_ONLINING);
3969 3975 mutex_exit(&LUN_TGT->tgt_mutex);
3970 3976 break;
3971 3977 }
3972 3978
3973 3979 case DEVCTL_BUS_DEV_CREATE: {
3974 3980 uchar_t *bytes = NULL;
3975 3981 uint_t nbytes;
3976 3982 struct fcp_tgt *ptgt = NULL;
3977 3983 struct fcp_lun *plun = NULL;
3978 3984 dev_info_t *useless_dip = NULL;
3979 3985
3980 3986 *rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3981 3987 DEVCTL_CONSTRUCT, &useless_dip);
3982 3988 if (*rval != 0 || useless_dip == NULL) {
3983 3989 break;
3984 3990 }
3985 3991
3986 3992 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3987 3993 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3988 3994 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3989 3995 *rval = EINVAL;
3990 3996 (void) ndi_devi_free(useless_dip);
3991 3997 if (bytes != NULL) {
3992 3998 ddi_prop_free(bytes);
3993 3999 }
3994 4000 break;
3995 4001 }
3996 4002
3997 4003 *rval = fcp_create_on_demand(pptr, bytes);
3998 4004 if (*rval == 0) {
3999 4005 mutex_enter(&pptr->port_mutex);
4000 4006 ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
4001 4007 if (ptgt) {
4002 4008 /*
4003 4009 * We now have a pointer to the target that
4004 4010 * was created. Lets point to the first LUN on
4005 4011 * this new target.
4006 4012 */
4007 4013 mutex_enter(&ptgt->tgt_mutex);
4008 4014
4009 4015 plun = ptgt->tgt_lun;
4010 4016 /*
4011 4017 * There may be stale/offline LUN entries on
4012 4018 * this list (this is by design) and so we have
4013 4019 * to make sure we point to the first online
4014 4020 * LUN
4015 4021 */
4016 4022 while (plun &&
4017 4023 plun->lun_state & FCP_LUN_OFFLINE) {
4018 4024 plun = plun->lun_next;
4019 4025 }
4020 4026
4021 4027 mutex_exit(&ptgt->tgt_mutex);
4022 4028 }
4023 4029 mutex_exit(&pptr->port_mutex);
4024 4030 }
4025 4031
4026 4032 if (*rval == 0 && ptgt && plun) {
4027 4033 mutex_enter(&plun->lun_mutex);
4028 4034 /*
4029 4035 * Allow up to fcp_lun_ready_retry seconds to
4030 4036 * configure all the luns behind the target.
4031 4037 *
4032 4038 * The intent here is to allow targets with long
4033 4039 * reboot/reset-recovery times to become available
4034 4040 * while limiting the maximum wait time for an
4035 4041 * unresponsive target.
4036 4042 */
4037 4043 end_time = ddi_get_lbolt() +
4038 4044 SEC_TO_TICK(fcp_lun_ready_retry);
4039 4045
4040 4046 while (ddi_get_lbolt() < end_time) {
4041 4047 retval = FC_SUCCESS;
4042 4048
4043 4049 /*
4044 4050 * The new ndi interfaces for on-demand creation
4045 4051 * are inflexible, Do some more work to pass on
4046 4052 * a path name of some LUN (design is broken !)
4047 4053 */
4048 4054 if (plun->lun_cip) {
4049 4055 if (plun->lun_mpxio == 0) {
4050 4056 cdip = DIP(plun->lun_cip);
4051 4057 } else {
4052 4058 cdip = mdi_pi_get_client(
4053 4059 PIP(plun->lun_cip));
4054 4060 }
4055 4061 if (cdip == NULL) {
4056 4062 *rval = ENXIO;
4057 4063 break;
4058 4064 }
4059 4065
4060 4066 if (!i_ddi_devi_attached(cdip)) {
4061 4067 mutex_exit(&plun->lun_mutex);
4062 4068 delay(drv_usectohz(1000000));
4063 4069 mutex_enter(&plun->lun_mutex);
4064 4070 } else {
4065 4071 /*
4066 4072 * This Lun is ready, lets
4067 4073 * check the next one.
4068 4074 */
4069 4075 mutex_exit(&plun->lun_mutex);
4070 4076 plun = plun->lun_next;
4071 4077 while (plun && (plun->lun_state
4072 4078 & FCP_LUN_OFFLINE)) {
4073 4079 plun = plun->lun_next;
4074 4080 }
4075 4081 if (!plun) {
4076 4082 break;
4077 4083 }
4078 4084 mutex_enter(&plun->lun_mutex);
4079 4085 }
4080 4086 } else {
4081 4087 /*
4082 4088 * lun_cip field for a valid lun
4083 4089 * should never be NULL. Fail the
4084 4090 * command.
4085 4091 */
4086 4092 *rval = ENXIO;
4087 4093 break;
4088 4094 }
4089 4095 }
4090 4096 if (plun) {
4091 4097 mutex_exit(&plun->lun_mutex);
4092 4098 } else {
4093 4099 char devnm[MAXNAMELEN];
4094 4100 int nmlen;
4095 4101
4096 4102 nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4097 4103 ddi_node_name(cdip),
4098 4104 ddi_get_name_addr(cdip));
4099 4105
4100 4106 if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4101 4107 0) {
4102 4108 *rval = EFAULT;
4103 4109 }
4104 4110 }
4105 4111 } else {
4106 4112 int i;
4107 4113 char buf[25];
4108 4114
4109 4115 for (i = 0; i < FC_WWN_SIZE; i++) {
4110 4116 (void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4111 4117 }
4112 4118
4113 4119 fcp_log(CE_WARN, pptr->port_dip,
4114 4120 "!Failed to create nodes for pwwn=%s; error=%x",
4115 4121 buf, *rval);
4116 4122 }
4117 4123
4118 4124 (void) ndi_devi_free(useless_dip);
4119 4125 ddi_prop_free(bytes);
4120 4126 break;
4121 4127 }
4122 4128
4123 4129 case DEVCTL_DEVICE_RESET: {
4124 4130 struct fcp_lun *plun;
4125 4131 child_info_t *cip = CIP(cdip);
4126 4132
4127 4133 ASSERT(cdip != NULL);
4128 4134 ASSERT(pptr != NULL);
4129 4135 mutex_enter(&pptr->port_mutex);
4130 4136 if (pip != NULL) {
4131 4137 cip = CIP(pip);
4132 4138 }
4133 4139 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4134 4140 mutex_exit(&pptr->port_mutex);
4135 4141 *rval = ENXIO;
4136 4142 break;
4137 4143 }
4138 4144 mutex_exit(&pptr->port_mutex);
4139 4145
4140 4146 mutex_enter(&plun->lun_tgt->tgt_mutex);
4141 4147 if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4142 4148 mutex_exit(&plun->lun_tgt->tgt_mutex);
4143 4149
4144 4150 *rval = ENXIO;
4145 4151 break;
4146 4152 }
4147 4153
4148 4154 if (plun->lun_sd == NULL) {
4149 4155 mutex_exit(&plun->lun_tgt->tgt_mutex);
4150 4156
4151 4157 *rval = ENXIO;
4152 4158 break;
4153 4159 }
4154 4160 mutex_exit(&plun->lun_tgt->tgt_mutex);
4155 4161
4156 4162 /*
4157 4163 * set up ap so that fcp_reset can figure out
4158 4164 * which target to reset
4159 4165 */
4160 4166 if (fcp_scsi_reset(&plun->lun_sd->sd_address,
4161 4167 RESET_TARGET) == FALSE) {
4162 4168 *rval = EIO;
4163 4169 }
4164 4170 break;
4165 4171 }
4166 4172
4167 4173 case DEVCTL_BUS_GETSTATE:
4168 4174 ASSERT(dcp != NULL);
4169 4175 ASSERT(pptr != NULL);
4170 4176 ASSERT(pptr->port_dip != NULL);
4171 4177 if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4172 4178 NDI_SUCCESS) {
4173 4179 *rval = EFAULT;
4174 4180 }
4175 4181 break;
4176 4182
4177 4183 case DEVCTL_BUS_QUIESCE:
4178 4184 case DEVCTL_BUS_UNQUIESCE:
4179 4185 *rval = ENOTSUP;
4180 4186 break;
4181 4187
4182 4188 case DEVCTL_BUS_RESET:
4183 4189 case DEVCTL_BUS_RESETALL:
4184 4190 ASSERT(pptr != NULL);
4185 4191 (void) fcp_linkreset(pptr, NULL, KM_SLEEP);
4186 4192 break;
4187 4193
4188 4194 default:
4189 4195 ASSERT(dcp != NULL);
4190 4196 *rval = ENOTTY;
4191 4197 break;
4192 4198 }
4193 4199
4194 4200 /* all done -- clean up and return */
4195 4201 out: if (devi_entered) {
4196 4202 if (is_mpxio) {
4197 4203 mdi_devi_exit(pptr->port_dip, circ);
4198 4204 } else {
4199 4205 ndi_devi_exit(pptr->port_dip, circ);
4200 4206 }
4201 4207 }
4202 4208
4203 4209 if (dcp != NULL) {
4204 4210 ndi_dc_freehdl(dcp);
4205 4211 }
4206 4212
4207 4213 return (retval);
4208 4214 }
4209 4215
4210 4216
4211 4217 /*ARGSUSED*/
4212 4218 static int
4213 4219 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4214 4220 uint32_t claimed)
4215 4221 {
4216 4222 uchar_t r_ctl;
4217 4223 uchar_t ls_code;
4218 4224 struct fcp_port *pptr;
4219 4225
4220 4226 if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4221 4227 return (FC_UNCLAIMED);
4222 4228 }
4223 4229
4224 4230 mutex_enter(&pptr->port_mutex);
4225 4231 if (pptr->port_state & (FCP_STATE_DETACHING |
4226 4232 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4227 4233 mutex_exit(&pptr->port_mutex);
4228 4234 return (FC_UNCLAIMED);
4229 4235 }
4230 4236 mutex_exit(&pptr->port_mutex);
4231 4237
4232 4238 r_ctl = buf->ub_frame.r_ctl;
4233 4239
4234 4240 switch (r_ctl & R_CTL_ROUTING) {
4235 4241 case R_CTL_EXTENDED_SVC:
4236 4242 if (r_ctl == R_CTL_ELS_REQ) {
4237 4243 ls_code = buf->ub_buffer[0];
4238 4244
4239 4245 switch (ls_code) {
4240 4246 case LA_ELS_PRLI:
4241 4247 /*
4242 4248 * We really don't care if something fails.
4243 4249 * If the PRLI was not sent out, then the
4244 4250 * other end will time it out.
4245 4251 */
4246 4252 if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4247 4253 return (FC_SUCCESS);
4248 4254 }
4249 4255 return (FC_UNCLAIMED);
4250 4256 /* NOTREACHED */
4251 4257
4252 4258 default:
4253 4259 break;
4254 4260 }
4255 4261 }
4256 4262 /* FALLTHROUGH */
4257 4263
4258 4264 default:
4259 4265 return (FC_UNCLAIMED);
4260 4266 }
4261 4267 }
4262 4268
4263 4269
4264 4270 /*ARGSUSED*/
4265 4271 static int
4266 4272 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4267 4273 uint32_t claimed)
4268 4274 {
4269 4275 return (FC_UNCLAIMED);
4270 4276 }
4271 4277
4272 4278 /*
4273 4279 * Function: fcp_statec_callback
4274 4280 *
4275 4281 * Description: The purpose of this function is to handle a port state change.
4276 4282 * It is called from fp/fctl and, in a few instances, internally.
4277 4283 *
4278 4284 * Argument: ulph fp/fctl port handle
4279 4285 * port_handle fcp_port structure
4280 4286 * port_state Physical state of the port
4281 4287 * port_top Topology
4282 4288 * *devlist Pointer to the first entry of a table
4283 4289 * containing the remote ports that can be
4284 4290 * reached.
4285 4291 * dev_cnt Number of entries pointed by devlist.
4286 4292 * port_sid Port ID of the local port.
4287 4293 *
4288 4294 * Return Value: None
4289 4295 */
4290 4296 /*ARGSUSED*/
4291 4297 static void
4292 4298 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4293 4299 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4294 4300 uint32_t dev_cnt, uint32_t port_sid)
4295 4301 {
4296 4302 uint32_t link_count;
4297 4303 int map_len = 0;
4298 4304 struct fcp_port *pptr;
4299 4305 fcp_map_tag_t *map_tag = NULL;
4300 4306
4301 4307 if ((pptr = fcp_get_port(port_handle)) == NULL) {
4302 4308 fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4303 4309 return; /* nothing to work with! */
4304 4310 }
4305 4311
4306 4312 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4307 4313 fcp_trace, FCP_BUF_LEVEL_2, 0,
4308 4314 "fcp_statec_callback: port state/dev_cnt/top ="
4309 4315 "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4310 4316 dev_cnt, port_top);
4311 4317
4312 4318 mutex_enter(&pptr->port_mutex);
4313 4319
4314 4320 /*
4315 4321 * If a thread is in detach, don't do anything.
4316 4322 */
4317 4323 if (pptr->port_state & (FCP_STATE_DETACHING |
4318 4324 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4319 4325 mutex_exit(&pptr->port_mutex);
4320 4326 return;
4321 4327 }
4322 4328
4323 4329 /*
4324 4330 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4325 4331 * init_pkt is called, it knows whether or not the target's status
4326 4332 * (or pd) might be changing.
4327 4333 */
4328 4334
4329 4335 if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4330 4336 pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4331 4337 }
4332 4338
4333 4339 /*
4334 4340 * the transport doesn't allocate or probe unless being
4335 4341 * asked to by either the applications or ULPs
4336 4342 *
4337 4343 * in cases where the port is OFFLINE at the time of port
4338 4344 * attach callback and the link comes ONLINE later, for
4339 4345 * easier automatic node creation (i.e. without you having to
4340 4346 * go out and run the utility to perform LOGINs) the
4341 4347 * following conditional is helpful
4342 4348 */
4343 4349 pptr->port_phys_state = port_state;
4344 4350
4345 4351 if (dev_cnt) {
4346 4352 mutex_exit(&pptr->port_mutex);
4347 4353
4348 4354 map_len = sizeof (*map_tag) * dev_cnt;
4349 4355 map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4350 4356 if (map_tag == NULL) {
4351 4357 fcp_log(CE_WARN, pptr->port_dip,
4352 4358 "!fcp%d: failed to allocate for map tags; "
4353 4359 " state change will not be processed",
4354 4360 pptr->port_instance);
4355 4361
4356 4362 mutex_enter(&pptr->port_mutex);
4357 4363 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4358 4364 mutex_exit(&pptr->port_mutex);
4359 4365
4360 4366 return;
4361 4367 }
4362 4368
4363 4369 mutex_enter(&pptr->port_mutex);
4364 4370 }
4365 4371
4366 4372 if (pptr->port_id != port_sid) {
4367 4373 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4368 4374 fcp_trace, FCP_BUF_LEVEL_3, 0,
4369 4375 "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4370 4376 port_sid);
4371 4377 /*
4372 4378 * The local port changed ID. It is the first time a port ID
4373 4379 * is assigned or something drastic happened. We might have
4374 4380 * been unplugged and replugged on another loop or fabric port
4375 4381 * or somebody grabbed the AL_PA we had or somebody rezoned
4376 4382 * the fabric we were plugged into.
4377 4383 */
4378 4384 pptr->port_id = port_sid;
4379 4385 }
4380 4386
4381 4387 switch (FC_PORT_STATE_MASK(port_state)) {
4382 4388 case FC_STATE_OFFLINE:
4383 4389 case FC_STATE_RESET_REQUESTED:
4384 4390 /*
4385 4391 * link has gone from online to offline -- just update the
4386 4392 * state of this port to BUSY and MARKed to go offline
4387 4393 */
4388 4394 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4389 4395 fcp_trace, FCP_BUF_LEVEL_3, 0,
4390 4396 "link went offline");
4391 4397 if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4392 4398 /*
4393 4399 * We were offline a while ago and this one
4394 4400 * seems to indicate that the loop has gone
4395 4401 * dead forever.
4396 4402 */
4397 4403 pptr->port_tmp_cnt += dev_cnt;
4398 4404 pptr->port_state &= ~FCP_STATE_OFFLINE;
4399 4405 pptr->port_state |= FCP_STATE_INIT;
4400 4406 link_count = pptr->port_link_cnt;
4401 4407 fcp_handle_devices(pptr, devlist, dev_cnt,
4402 4408 link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4403 4409 } else {
4404 4410 pptr->port_link_cnt++;
4405 4411 ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4406 4412 fcp_update_state(pptr, (FCP_LUN_BUSY |
4407 4413 FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4408 4414 if (pptr->port_mpxio) {
4409 4415 fcp_update_mpxio_path_verifybusy(pptr);
4410 4416 }
4411 4417 pptr->port_state |= FCP_STATE_OFFLINE;
4412 4418 pptr->port_state &=
4413 4419 ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4414 4420 pptr->port_tmp_cnt = 0;
4415 4421 }
4416 4422 mutex_exit(&pptr->port_mutex);
4417 4423 break;
4418 4424
4419 4425 case FC_STATE_ONLINE:
4420 4426 case FC_STATE_LIP:
4421 4427 case FC_STATE_LIP_LBIT_SET:
4422 4428 /*
4423 4429 * link has gone from offline to online
4424 4430 */
4425 4431 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4426 4432 fcp_trace, FCP_BUF_LEVEL_3, 0,
4427 4433 "link went online");
4428 4434
4429 4435 pptr->port_link_cnt++;
4430 4436
4431 4437 while (pptr->port_ipkt_cnt) {
4432 4438 mutex_exit(&pptr->port_mutex);
4433 4439 delay(drv_usectohz(1000000));
4434 4440 mutex_enter(&pptr->port_mutex);
4435 4441 }
4436 4442
4437 4443 pptr->port_topology = port_top;
4438 4444
4439 4445 /*
4440 4446 * The state of the targets and luns accessible through this
4441 4447 * port is updated.
4442 4448 */
4443 4449 fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4444 4450 FCP_CAUSE_LINK_CHANGE);
4445 4451
4446 4452 pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4447 4453 pptr->port_state |= FCP_STATE_ONLINING;
4448 4454 pptr->port_tmp_cnt = dev_cnt;
4449 4455 link_count = pptr->port_link_cnt;
4450 4456
4451 4457 pptr->port_deadline = fcp_watchdog_time +
4452 4458 FCP_ICMD_DEADLINE;
4453 4459
4454 4460 if (!dev_cnt) {
4455 4461 /*
4456 4462 * We go directly to the online state if no remote
4457 4463 * ports were discovered.
4458 4464 */
4459 4465 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4460 4466 fcp_trace, FCP_BUF_LEVEL_3, 0,
4461 4467 "No remote ports discovered");
4462 4468
4463 4469 pptr->port_state &= ~FCP_STATE_ONLINING;
4464 4470 pptr->port_state |= FCP_STATE_ONLINE;
4465 4471 }
4466 4472
4467 4473 switch (port_top) {
4468 4474 case FC_TOP_FABRIC:
4469 4475 case FC_TOP_PUBLIC_LOOP:
4470 4476 case FC_TOP_PRIVATE_LOOP:
4471 4477 case FC_TOP_PT_PT:
4472 4478
4473 4479 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4474 4480 fcp_retry_ns_registry(pptr, port_sid);
4475 4481 }
4476 4482
4477 4483 fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4478 4484 map_tag, FCP_CAUSE_LINK_CHANGE);
4479 4485 break;
4480 4486
4481 4487 default:
4482 4488 /*
4483 4489 * We got here because we were provided with an unknown
4484 4490 * topology.
4485 4491 */
4486 4492 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4487 4493 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4488 4494 }
4489 4495
4490 4496 pptr->port_tmp_cnt -= dev_cnt;
4491 4497 fcp_log(CE_WARN, pptr->port_dip,
4492 4498 "!unknown/unsupported topology (0x%x)", port_top);
4493 4499 break;
4494 4500 }
4495 4501 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4496 4502 fcp_trace, FCP_BUF_LEVEL_3, 0,
4497 4503 "Notify ssd of the reset to reinstate the reservations");
4498 4504
4499 4505 scsi_hba_reset_notify_callback(&pptr->port_mutex,
4500 4506 &pptr->port_reset_notify_listf);
4501 4507
4502 4508 mutex_exit(&pptr->port_mutex);
4503 4509
4504 4510 break;
4505 4511
4506 4512 case FC_STATE_RESET:
4507 4513 ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4508 4514 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4509 4515 fcp_trace, FCP_BUF_LEVEL_3, 0,
4510 4516 "RESET state, waiting for Offline/Online state_cb");
4511 4517 mutex_exit(&pptr->port_mutex);
4512 4518 break;
4513 4519
4514 4520 case FC_STATE_DEVICE_CHANGE:
4515 4521 /*
4516 4522 * We come here when an application has requested
4517 4523 * Dynamic node creation/deletion in Fabric connectivity.
4518 4524 */
4519 4525 if (pptr->port_state & (FCP_STATE_OFFLINE |
4520 4526 FCP_STATE_INIT)) {
4521 4527 /*
4522 4528 * This case can happen when the FCTL is in the
4523 4529 * process of giving us on online and the host on
4524 4530 * the other side issues a PLOGI/PLOGO. Ideally
4525 4531 * the state changes should be serialized unless
4526 4532 * they are opposite (online-offline).
4527 4533 * The transport will give us a final state change
4528 4534 * so we can ignore this for the time being.
4529 4535 */
4530 4536 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4531 4537 mutex_exit(&pptr->port_mutex);
4532 4538 break;
4533 4539 }
4534 4540
4535 4541 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4536 4542 fcp_retry_ns_registry(pptr, port_sid);
4537 4543 }
4538 4544
4539 4545 /*
4540 4546 * Extend the deadline under steady state conditions
4541 4547 * to provide more time for the device-change-commands
4542 4548 */
4543 4549 if (!pptr->port_ipkt_cnt) {
4544 4550 pptr->port_deadline = fcp_watchdog_time +
4545 4551 FCP_ICMD_DEADLINE;
4546 4552 }
4547 4553
4548 4554 /*
4549 4555 * There is another race condition here, where if we were
4550 4556 * in ONLINEING state and a devices in the map logs out,
4551 4557 * fp will give another state change as DEVICE_CHANGE
4552 4558 * and OLD. This will result in that target being offlined.
4553 4559 * The pd_handle is freed. If from the first statec callback
4554 4560 * we were going to fire a PLOGI/PRLI, the system will
4555 4561 * panic in fc_ulp_transport with invalid pd_handle.
4556 4562 * The fix is to check for the link_cnt before issuing
4557 4563 * any command down.
4558 4564 */
4559 4565 fcp_update_targets(pptr, devlist, dev_cnt,
4560 4566 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4561 4567
4562 4568 link_count = pptr->port_link_cnt;
4563 4569
4564 4570 fcp_handle_devices(pptr, devlist, dev_cnt,
4565 4571 link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4566 4572
4567 4573 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4568 4574
4569 4575 mutex_exit(&pptr->port_mutex);
4570 4576 break;
4571 4577
4572 4578 case FC_STATE_TARGET_PORT_RESET:
4573 4579 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4574 4580 fcp_retry_ns_registry(pptr, port_sid);
4575 4581 }
4576 4582
4577 4583 /* Do nothing else */
4578 4584 mutex_exit(&pptr->port_mutex);
4579 4585 break;
4580 4586
4581 4587 default:
4582 4588 fcp_log(CE_WARN, pptr->port_dip,
4583 4589 "!Invalid state change=0x%x", port_state);
4584 4590 mutex_exit(&pptr->port_mutex);
4585 4591 break;
4586 4592 }
4587 4593
4588 4594 if (map_tag) {
4589 4595 kmem_free(map_tag, map_len);
4590 4596 }
4591 4597 }
4592 4598
4593 4599 /*
4594 4600 * Function: fcp_handle_devices
4595 4601 *
4596 4602 * Description: This function updates the devices currently known by
4597 4603 * walking the list provided by the caller. The list passed
4598 4604 * by the caller is supposed to be the list of reachable
4599 4605 * devices.
4600 4606 *
4601 4607 * Argument: *pptr Fcp port structure.
4602 4608 * *devlist Pointer to the first entry of a table
4603 4609 * containing the remote ports that can be
4604 4610 * reached.
4605 4611 * dev_cnt Number of entries pointed by devlist.
4606 4612 * link_cnt Link state count.
4607 4613 * *map_tag Array of fcp_map_tag_t structures.
4608 4614 * cause What caused this function to be called.
4609 4615 *
4610 4616 * Return Value: None
4611 4617 *
4612 4618 * Notes: The pptr->port_mutex must be held.
4613 4619 */
4614 4620 static void
4615 4621 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4616 4622 uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4617 4623 {
4618 4624 int i;
4619 4625 int check_finish_init = 0;
4620 4626 fc_portmap_t *map_entry;
4621 4627 struct fcp_tgt *ptgt = NULL;
4622 4628
4623 4629 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4624 4630 fcp_trace, FCP_BUF_LEVEL_3, 0,
4625 4631 "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4626 4632
4627 4633 if (dev_cnt) {
4628 4634 ASSERT(map_tag != NULL);
4629 4635 }
4630 4636
4631 4637 /*
4632 4638 * The following code goes through the list of remote ports that are
4633 4639 * accessible through this (pptr) local port (The list walked is the
4634 4640 * one provided by the caller which is the list of the remote ports
4635 4641 * currently reachable). It checks if any of them was already
4636 4642 * known by looking for the corresponding target structure based on
4637 4643 * the world wide name. If a target is part of the list it is tagged
4638 4644 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4639 4645 *
4640 4646 * Old comment
4641 4647 * -----------
4642 4648 * Before we drop port mutex; we MUST get the tags updated; This
4643 4649 * two step process is somewhat slow, but more reliable.
4644 4650 */
4645 4651 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4646 4652 map_entry = &(devlist[i]);
4647 4653
4648 4654 /*
4649 4655 * get ptr to this map entry in our port's
4650 4656 * list (if any)
4651 4657 */
4652 4658 ptgt = fcp_lookup_target(pptr,
4653 4659 (uchar_t *)&(map_entry->map_pwwn));
4654 4660
4655 4661 if (ptgt) {
4656 4662 map_tag[i] = ptgt->tgt_change_cnt;
4657 4663 if (cause == FCP_CAUSE_LINK_CHANGE) {
4658 4664 ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4659 4665 }
4660 4666 }
4661 4667 }
4662 4668
4663 4669 /*
4664 4670 * At this point we know which devices of the new list were already
4665 4671 * known (The field tgt_aux_state of the target structure has been
4666 4672 * set to FCP_TGT_TAGGED).
4667 4673 *
4668 4674 * The following code goes through the list of targets currently known
4669 4675 * by the local port (the list is actually a hashing table). If a
4670 4676 * target is found and is not tagged, it means the target cannot
4671 4677 * be reached anymore through the local port (pptr). It is offlined.
4672 4678 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4673 4679 */
4674 4680 for (i = 0; i < FCP_NUM_HASH; i++) {
4675 4681 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4676 4682 ptgt = ptgt->tgt_next) {
4677 4683 mutex_enter(&ptgt->tgt_mutex);
4678 4684 if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4679 4685 (cause == FCP_CAUSE_LINK_CHANGE) &&
4680 4686 !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4681 4687 fcp_offline_target_now(pptr, ptgt,
4682 4688 link_cnt, ptgt->tgt_change_cnt, 0);
4683 4689 }
4684 4690 mutex_exit(&ptgt->tgt_mutex);
4685 4691 }
4686 4692 }
4687 4693
4688 4694 /*
4689 4695 * At this point, the devices that were known but cannot be reached
4690 4696 * anymore, have most likely been offlined.
4691 4697 *
4692 4698 * The following section of code seems to go through the list of
4693 4699 * remote ports that can now be reached. For every single one it
4694 4700 * checks if it is already known or if it is a new port.
4695 4701 */
4696 4702 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4697 4703
4698 4704 if (check_finish_init) {
4699 4705 ASSERT(i > 0);
4700 4706 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4701 4707 map_tag[i - 1], cause);
4702 4708 check_finish_init = 0;
4703 4709 }
4704 4710
4705 4711 /* get a pointer to this map entry */
4706 4712 map_entry = &(devlist[i]);
4707 4713
4708 4714 /*
4709 4715 * Check for the duplicate map entry flag. If we have marked
4710 4716 * this entry as a duplicate we skip it since the correct
4711 4717 * (perhaps even same) state change will be encountered
4712 4718 * later in the list.
4713 4719 */
4714 4720 if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4715 4721 continue;
4716 4722 }
4717 4723
4718 4724 /* get ptr to this map entry in our port's list (if any) */
4719 4725 ptgt = fcp_lookup_target(pptr,
4720 4726 (uchar_t *)&(map_entry->map_pwwn));
4721 4727
4722 4728 if (ptgt) {
4723 4729 /*
4724 4730 * This device was already known. The field
4725 4731 * tgt_aux_state is reset (was probably set to
4726 4732 * FCP_TGT_TAGGED previously in this routine).
4727 4733 */
4728 4734 ptgt->tgt_aux_state = 0;
4729 4735 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4730 4736 fcp_trace, FCP_BUF_LEVEL_3, 0,
4731 4737 "handle_devices: map did/state/type/flags = "
4732 4738 "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4733 4739 "tgt_state=%d",
4734 4740 map_entry->map_did.port_id, map_entry->map_state,
4735 4741 map_entry->map_type, map_entry->map_flags,
4736 4742 ptgt->tgt_d_id, ptgt->tgt_state);
4737 4743 }
4738 4744
4739 4745 if (map_entry->map_type == PORT_DEVICE_OLD ||
4740 4746 map_entry->map_type == PORT_DEVICE_NEW ||
4741 4747 map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED ||
4742 4748 map_entry->map_type == PORT_DEVICE_CHANGED) {
4743 4749 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4744 4750 fcp_trace, FCP_BUF_LEVEL_2, 0,
4745 4751 "map_type=%x, did = %x",
4746 4752 map_entry->map_type,
4747 4753 map_entry->map_did.port_id);
4748 4754 }
4749 4755
4750 4756 switch (map_entry->map_type) {
4751 4757 case PORT_DEVICE_NOCHANGE:
4752 4758 case PORT_DEVICE_USER_CREATE:
4753 4759 case PORT_DEVICE_USER_LOGIN:
4754 4760 case PORT_DEVICE_NEW:
4755 4761 case PORT_DEVICE_REPORTLUN_CHANGED:
4756 4762 FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4757 4763
4758 4764 if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4759 4765 link_cnt, (ptgt) ? map_tag[i] : 0,
4760 4766 cause) == TRUE) {
4761 4767
4762 4768 FCP_TGT_TRACE(ptgt, map_tag[i],
4763 4769 FCP_TGT_TRACE_2);
4764 4770 check_finish_init++;
4765 4771 }
4766 4772 break;
4767 4773
4768 4774 case PORT_DEVICE_OLD:
4769 4775 if (ptgt != NULL) {
4770 4776 FCP_TGT_TRACE(ptgt, map_tag[i],
4771 4777 FCP_TGT_TRACE_3);
4772 4778
4773 4779 mutex_enter(&ptgt->tgt_mutex);
4774 4780 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4775 4781 /*
4776 4782 * Must do an in-line wait for I/Os
4777 4783 * to get drained
4778 4784 */
4779 4785 mutex_exit(&ptgt->tgt_mutex);
4780 4786 mutex_exit(&pptr->port_mutex);
4781 4787
4782 4788 mutex_enter(&ptgt->tgt_mutex);
4783 4789 while (ptgt->tgt_ipkt_cnt ||
4784 4790 fcp_outstanding_lun_cmds(ptgt)
4785 4791 == FC_SUCCESS) {
4786 4792 mutex_exit(&ptgt->tgt_mutex);
4787 4793 delay(drv_usectohz(1000000));
4788 4794 mutex_enter(&ptgt->tgt_mutex);
4789 4795 }
4790 4796 mutex_exit(&ptgt->tgt_mutex);
4791 4797
4792 4798 mutex_enter(&pptr->port_mutex);
4793 4799 mutex_enter(&ptgt->tgt_mutex);
4794 4800
4795 4801 (void) fcp_offline_target(pptr, ptgt,
4796 4802 link_cnt, map_tag[i], 0, 0);
4797 4803 }
4798 4804 mutex_exit(&ptgt->tgt_mutex);
4799 4805 }
4800 4806 check_finish_init++;
4801 4807 break;
4802 4808
4803 4809 case PORT_DEVICE_USER_DELETE:
4804 4810 case PORT_DEVICE_USER_LOGOUT:
4805 4811 if (ptgt != NULL) {
4806 4812 FCP_TGT_TRACE(ptgt, map_tag[i],
4807 4813 FCP_TGT_TRACE_4);
4808 4814
4809 4815 mutex_enter(&ptgt->tgt_mutex);
4810 4816 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4811 4817 (void) fcp_offline_target(pptr, ptgt,
4812 4818 link_cnt, map_tag[i], 1, 0);
4813 4819 }
4814 4820 mutex_exit(&ptgt->tgt_mutex);
4815 4821 }
4816 4822 check_finish_init++;
4817 4823 break;
4818 4824
4819 4825 case PORT_DEVICE_CHANGED:
4820 4826 if (ptgt != NULL) {
4821 4827 FCP_TGT_TRACE(ptgt, map_tag[i],
4822 4828 FCP_TGT_TRACE_5);
4823 4829
4824 4830 if (fcp_device_changed(pptr, ptgt,
4825 4831 map_entry, link_cnt, map_tag[i],
4826 4832 cause) == TRUE) {
4827 4833 check_finish_init++;
4828 4834 }
4829 4835 } else {
4830 4836 if (fcp_handle_mapflags(pptr, ptgt,
4831 4837 map_entry, link_cnt, 0, cause) == TRUE) {
4832 4838 check_finish_init++;
4833 4839 }
4834 4840 }
4835 4841 break;
4836 4842
4837 4843 default:
4838 4844 fcp_log(CE_WARN, pptr->port_dip,
4839 4845 "!Invalid map_type=0x%x", map_entry->map_type);
4840 4846 check_finish_init++;
4841 4847 break;
4842 4848 }
4843 4849 }
4844 4850
4845 4851 if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4846 4852 ASSERT(i > 0);
4847 4853 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4848 4854 map_tag[i-1], cause);
4849 4855 } else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4850 4856 fcp_offline_all(pptr, link_cnt, cause);
4851 4857 }
4852 4858 }
4853 4859
4854 4860 static int
4855 4861 fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause)
4856 4862 {
4857 4863 struct fcp_lun *plun;
4858 4864 struct fcp_port *pptr;
4859 4865 int rscn_count;
4860 4866 int lun0_newalloc;
4861 4867 int ret = TRUE;
4862 4868
4863 4869 ASSERT(ptgt);
4864 4870 pptr = ptgt->tgt_port;
4865 4871 lun0_newalloc = 0;
4866 4872 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
4867 4873 /*
4868 4874 * no LUN struct for LUN 0 yet exists,
4869 4875 * so create one
4870 4876 */
4871 4877 plun = fcp_alloc_lun(ptgt);
4872 4878 if (plun == NULL) {
4873 4879 fcp_log(CE_WARN, pptr->port_dip,
4874 4880 "!Failed to allocate lun 0 for"
4875 4881 " D_ID=%x", ptgt->tgt_d_id);
4876 4882 return (ret);
4877 4883 }
4878 4884 lun0_newalloc = 1;
4879 4885 }
4880 4886
4881 4887 mutex_enter(&ptgt->tgt_mutex);
4882 4888 /*
4883 4889 * consider lun 0 as device not connected if it is
4884 4890 * offlined or newly allocated
4885 4891 */
4886 4892 if ((plun->lun_state & FCP_LUN_OFFLINE) || lun0_newalloc) {
4887 4893 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
4888 4894 }
4889 4895 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
4890 4896 plun->lun_state &= ~FCP_LUN_OFFLINE;
4891 4897 ptgt->tgt_lun_cnt = 1;
4892 4898 ptgt->tgt_report_lun_cnt = 0;
4893 4899 mutex_exit(&ptgt->tgt_mutex);
4894 4900
4895 4901 rscn_count = fc_ulp_get_rscn_count(pptr->port_fp_handle);
4896 4902 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
4897 4903 sizeof (struct fcp_reportlun_resp), pptr->port_link_cnt,
4898 4904 ptgt->tgt_change_cnt, cause, rscn_count) != DDI_SUCCESS) {
4899 4905 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4900 4906 fcp_trace, FCP_BUF_LEVEL_3, 0, "!Failed to send REPORTLUN "
4901 4907 "to D_ID=%x", ptgt->tgt_d_id);
4902 4908 } else {
4903 4909 ret = FALSE;
4904 4910 }
4905 4911
4906 4912 return (ret);
4907 4913 }
4908 4914
4909 4915 /*
4910 4916 * Function: fcp_handle_mapflags
4911 4917 *
4912 4918 * Description: This function creates a target structure if the ptgt passed
4913 4919 * is NULL. It also kicks off the PLOGI if we are not logged
4914 4920 * into the target yet or the PRLI if we are logged into the
4915 4921 * target already. The rest of the treatment is done in the
4916 4922 * callbacks of the PLOGI or PRLI.
4917 4923 *
4918 4924 * Argument: *pptr FCP Port structure.
4919 4925 * *ptgt Target structure.
4920 4926 * *map_entry Array of fc_portmap_t structures.
4921 4927 * link_cnt Link state count.
4922 4928 * tgt_cnt Target state count.
4923 4929 * cause What caused this function to be called.
4924 4930 *
4925 4931 * Return Value: TRUE Failed
4926 4932 * FALSE Succeeded
4927 4933 *
4928 4934 * Notes: pptr->port_mutex must be owned.
4929 4935 */
4930 4936 static int
4931 4937 fcp_handle_mapflags(struct fcp_port *pptr, struct fcp_tgt *ptgt,
4932 4938 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4933 4939 {
4934 4940 int lcount;
4935 4941 int tcount;
4936 4942 int ret = TRUE;
4937 4943 int alloc;
4938 4944 struct fcp_ipkt *icmd;
4939 4945 struct fcp_lun *pseq_lun = NULL;
4940 4946 uchar_t opcode;
4941 4947 int valid_ptgt_was_passed = FALSE;
4942 4948
4943 4949 ASSERT(mutex_owned(&pptr->port_mutex));
4944 4950
4945 4951 /*
4946 4952 * This case is possible where the FCTL has come up and done discovery
4947 4953 * before FCP was loaded and attached. FCTL would have discovered the
4948 4954 * devices and later the ULP came online. In this case ULP's would get
4949 4955 * PORT_DEVICE_NOCHANGE but target would be NULL.
4950 4956 */
4951 4957 if (ptgt == NULL) {
4952 4958 /* don't already have a target */
4953 4959 mutex_exit(&pptr->port_mutex);
4954 4960 ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4955 4961 mutex_enter(&pptr->port_mutex);
4956 4962
4957 4963 if (ptgt == NULL) {
4958 4964 fcp_log(CE_WARN, pptr->port_dip,
4959 4965 "!FC target allocation failed");
4960 4966 return (ret);
4961 4967 }
4962 4968 mutex_enter(&ptgt->tgt_mutex);
4963 4969 ptgt->tgt_statec_cause = cause;
4964 4970 ptgt->tgt_tmp_cnt = 1;
4965 4971 mutex_exit(&ptgt->tgt_mutex);
4966 4972 } else {
4967 4973 valid_ptgt_was_passed = TRUE;
4968 4974 }
4969 4975
4970 4976 /*
4971 4977 * Copy in the target parameters
4972 4978 */
4973 4979 mutex_enter(&ptgt->tgt_mutex);
4974 4980 ptgt->tgt_d_id = map_entry->map_did.port_id;
4975 4981 ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4976 4982 ptgt->tgt_pd_handle = map_entry->map_pd;
4977 4983 ptgt->tgt_fca_dev = NULL;
4978 4984
4979 4985 /* Copy port and node WWNs */
4980 4986 bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4981 4987 FC_WWN_SIZE);
4982 4988 bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4983 4989 FC_WWN_SIZE);
4984 4990
4985 4991 if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4986 4992 (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4987 4993 (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4988 4994 valid_ptgt_was_passed) {
4989 4995 /*
4990 4996 * determine if there are any tape LUNs on this target
4991 4997 */
4992 4998 for (pseq_lun = ptgt->tgt_lun;
4993 4999 pseq_lun != NULL;
4994 5000 pseq_lun = pseq_lun->lun_next) {
4995 5001 if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
4996 5002 !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
4997 5003 fcp_update_tgt_state(ptgt, FCP_RESET,
4998 5004 FCP_LUN_MARK);
4999 5005 mutex_exit(&ptgt->tgt_mutex);
5000 5006 return (ret);
5001 5007 }
5002 5008 }
5003 5009 }
5004 5010
5005 5011 /*
5006 5012 * if UA'REPORT_LUN_CHANGED received,
5007 5013 * send out REPORT LUN promptly, skip PLOGI/PRLI process
5008 5014 */
5009 5015 if (map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED) {
5010 5016 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | FCP_TGT_MARK);
5011 5017 mutex_exit(&ptgt->tgt_mutex);
5012 5018 mutex_exit(&pptr->port_mutex);
5013 5019
5014 5020 ret = fcp_handle_reportlun_changed(ptgt, cause);
5015 5021
5016 5022 mutex_enter(&pptr->port_mutex);
5017 5023 return (ret);
5018 5024 }
5019 5025
5020 5026 /*
5021 5027 * If ptgt was NULL when this function was entered, then tgt_node_state
5022 5028 * was never specifically initialized but zeroed out which means
5023 5029 * FCP_TGT_NODE_NONE.
5024 5030 */
5025 5031 switch (ptgt->tgt_node_state) {
5026 5032 case FCP_TGT_NODE_NONE:
5027 5033 case FCP_TGT_NODE_ON_DEMAND:
5028 5034 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5029 5035 !fcp_enable_auto_configuration &&
5030 5036 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5031 5037 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5032 5038 } else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5033 5039 fcp_enable_auto_configuration &&
5034 5040 (ptgt->tgt_manual_config_only == 1) &&
5035 5041 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5036 5042 /*
5037 5043 * If auto configuration is set and
5038 5044 * the tgt_manual_config_only flag is set then
5039 5045 * we only want the user to be able to change
5040 5046 * the state through create_on_demand.
5041 5047 */
5042 5048 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5043 5049 } else {
5044 5050 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5045 5051 }
5046 5052 break;
5047 5053
5048 5054 case FCP_TGT_NODE_PRESENT:
5049 5055 break;
5050 5056 }
5051 5057 /*
5052 5058 * If we are booting from a fabric device, make sure we
5053 5059 * mark the node state appropriately for this target to be
5054 5060 * enumerated
5055 5061 */
5056 5062 if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
5057 5063 if (bcmp((caddr_t)pptr->port_boot_wwn,
5058 5064 (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
5059 5065 sizeof (ptgt->tgt_port_wwn)) == 0) {
5060 5066 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5061 5067 }
5062 5068 }
5063 5069 mutex_exit(&ptgt->tgt_mutex);
5064 5070
5065 5071 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5066 5072 fcp_trace, FCP_BUF_LEVEL_3, 0,
5067 5073 "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
5068 5074 map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
5069 5075 map_entry->map_rscn_info.ulp_rscn_count);
5070 5076
5071 5077 mutex_enter(&ptgt->tgt_mutex);
5072 5078
5073 5079 /*
5074 5080 * Reset target OFFLINE state and mark the target BUSY
5075 5081 */
5076 5082 ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
5077 5083 ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
5078 5084
5079 5085 tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
5080 5086 lcount = link_cnt;
5081 5087
5082 5088 mutex_exit(&ptgt->tgt_mutex);
5083 5089 mutex_exit(&pptr->port_mutex);
5084 5090
5085 5091 /*
5086 5092 * if we are already logged in, then we do a PRLI, else
5087 5093 * we do a PLOGI first (to get logged in)
5088 5094 *
5089 5095 * We will not check if we are the PLOGI initiator
5090 5096 */
5091 5097 opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
5092 5098 map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
5093 5099
5094 5100 alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
5095 5101
5096 5102 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5097 5103 pptr->port_state & FCP_STATE_FCA_IS_NODMA, lcount, tcount,
5098 5104 cause, map_entry->map_rscn_info.ulp_rscn_count);
5099 5105
5100 5106 if (icmd == NULL) {
5101 5107 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
5102 5108 /*
5103 5109 * We've exited port_mutex before calling fcp_icmd_alloc,
5104 5110 * we need to make sure we reacquire it before returning.
5105 5111 */
5106 5112 mutex_enter(&pptr->port_mutex);
5107 5113 return (FALSE);
5108 5114 }
5109 5115
5110 5116 /* TRUE is only returned while target is intended skipped */
5111 5117 ret = FALSE;
5112 5118 /* discover info about this target */
5113 5119 if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5114 5120 lcount, tcount, cause)) == DDI_SUCCESS) {
5115 5121 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5116 5122 } else {
5117 5123 fcp_icmd_free(pptr, icmd);
5118 5124 ret = TRUE;
5119 5125 }
5120 5126 mutex_enter(&pptr->port_mutex);
5121 5127
5122 5128 return (ret);
5123 5129 }
5124 5130
5125 5131 /*
5126 5132 * Function: fcp_send_els
5127 5133 *
5128 5134 * Description: Sends an ELS to the target specified by the caller. Supports
5129 5135 * PLOGI and PRLI.
5130 5136 *
5131 5137 * Argument: *pptr Fcp port.
5132 5138 * *ptgt Target to send the ELS to.
5133 5139 * *icmd Internal packet
5134 5140 * opcode ELS opcode
5135 5141 * lcount Link state change counter
5136 5142 * tcount Target state change counter
5137 5143 * cause What caused the call
5138 5144 *
5139 5145 * Return Value: DDI_SUCCESS
5140 5146 * Others
5141 5147 */
5142 5148 static int
5143 5149 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5144 5150 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5145 5151 {
5146 5152 fc_packet_t *fpkt;
5147 5153 fc_frame_hdr_t *hp;
5148 5154 int internal = 0;
5149 5155 int alloc;
5150 5156 int cmd_len;
5151 5157 int resp_len;
5152 5158 int res = DDI_FAILURE; /* default result */
5153 5159 int rval = DDI_FAILURE;
5154 5160
5155 5161 ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5156 5162 ASSERT(ptgt->tgt_port == pptr);
5157 5163
5158 5164 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5159 5165 fcp_trace, FCP_BUF_LEVEL_5, 0,
5160 5166 "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5161 5167 (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5162 5168
5163 5169 if (opcode == LA_ELS_PLOGI) {
5164 5170 cmd_len = sizeof (la_els_logi_t);
5165 5171 resp_len = sizeof (la_els_logi_t);
5166 5172 } else {
5167 5173 ASSERT(opcode == LA_ELS_PRLI);
5168 5174 cmd_len = sizeof (la_els_prli_t);
5169 5175 resp_len = sizeof (la_els_prli_t);
5170 5176 }
5171 5177
5172 5178 if (icmd == NULL) {
5173 5179 alloc = FCP_MAX(sizeof (la_els_logi_t),
5174 5180 sizeof (la_els_prli_t));
5175 5181 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5176 5182 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5177 5183 lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5178 5184 if (icmd == NULL) {
5179 5185 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5180 5186 return (res);
5181 5187 }
5182 5188 internal++;
5183 5189 }
5184 5190 fpkt = icmd->ipkt_fpkt;
5185 5191
5186 5192 fpkt->pkt_cmdlen = cmd_len;
5187 5193 fpkt->pkt_rsplen = resp_len;
5188 5194 fpkt->pkt_datalen = 0;
5189 5195 icmd->ipkt_retries = 0;
5190 5196
5191 5197 /* fill in fpkt info */
5192 5198 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5193 5199 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5194 5200 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5195 5201
5196 5202 /* get ptr to frame hdr in fpkt */
5197 5203 hp = &fpkt->pkt_cmd_fhdr;
5198 5204
5199 5205 /*
5200 5206 * fill in frame hdr
5201 5207 */
5202 5208 hp->r_ctl = R_CTL_ELS_REQ;
5203 5209 hp->s_id = pptr->port_id; /* source ID */
5204 5210 hp->d_id = ptgt->tgt_d_id; /* dest ID */
5205 5211 hp->type = FC_TYPE_EXTENDED_LS;
5206 5212 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5207 5213 hp->seq_id = 0;
5208 5214 hp->rsvd = 0;
5209 5215 hp->df_ctl = 0;
5210 5216 hp->seq_cnt = 0;
5211 5217 hp->ox_id = 0xffff; /* i.e. none */
5212 5218 hp->rx_id = 0xffff; /* i.e. none */
5213 5219 hp->ro = 0;
5214 5220
5215 5221 /*
5216 5222 * at this point we have a filled in cmd pkt
5217 5223 *
5218 5224 * fill in the respective info, then use the transport to send
5219 5225 * the packet
5220 5226 *
5221 5227 * for a PLOGI call fc_ulp_login(), and
5222 5228 * for a PRLI call fc_ulp_issue_els()
5223 5229 */
5224 5230 switch (opcode) {
5225 5231 case LA_ELS_PLOGI: {
5226 5232 struct la_els_logi logi;
5227 5233
5228 5234 bzero(&logi, sizeof (struct la_els_logi));
5229 5235
5230 5236 hp = &fpkt->pkt_cmd_fhdr;
5231 5237 hp->r_ctl = R_CTL_ELS_REQ;
5232 5238 logi.ls_code.ls_code = LA_ELS_PLOGI;
5233 5239 logi.ls_code.mbz = 0;
5234 5240
5235 5241 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5236 5242 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5237 5243
5238 5244 icmd->ipkt_opcode = LA_ELS_PLOGI;
5239 5245
5240 5246 mutex_enter(&pptr->port_mutex);
5241 5247 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5242 5248
5243 5249 mutex_exit(&pptr->port_mutex);
5244 5250
5245 5251 rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5246 5252 if (rval == FC_SUCCESS) {
5247 5253 res = DDI_SUCCESS;
5248 5254 break;
5249 5255 }
5250 5256
5251 5257 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5252 5258
5253 5259 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5254 5260 rval, "PLOGI");
5255 5261 } else {
5256 5262 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5257 5263 fcp_trace, FCP_BUF_LEVEL_5, 0,
5258 5264 "fcp_send_els1: state change occured"
5259 5265 " for D_ID=0x%x", ptgt->tgt_d_id);
5260 5266 mutex_exit(&pptr->port_mutex);
5261 5267 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5262 5268 }
5263 5269 break;
5264 5270 }
5265 5271
5266 5272 case LA_ELS_PRLI: {
5267 5273 struct la_els_prli prli;
5268 5274 struct fcp_prli *fprli;
5269 5275
5270 5276 bzero(&prli, sizeof (struct la_els_prli));
5271 5277
5272 5278 hp = &fpkt->pkt_cmd_fhdr;
5273 5279 hp->r_ctl = R_CTL_ELS_REQ;
5274 5280
5275 5281 /* fill in PRLI cmd ELS fields */
5276 5282 prli.ls_code = LA_ELS_PRLI;
5277 5283 prli.page_length = 0x10; /* huh? */
5278 5284 prli.payload_length = sizeof (struct la_els_prli);
5279 5285
5280 5286 icmd->ipkt_opcode = LA_ELS_PRLI;
5281 5287
5282 5288 /* get ptr to PRLI service params */
5283 5289 fprli = (struct fcp_prli *)prli.service_params;
5284 5290
5285 5291 /* fill in service params */
5286 5292 fprli->type = 0x08;
5287 5293 fprli->resvd1 = 0;
5288 5294 fprli->orig_process_assoc_valid = 0;
5289 5295 fprli->resp_process_assoc_valid = 0;
5290 5296 fprli->establish_image_pair = 1;
5291 5297 fprli->resvd2 = 0;
5292 5298 fprli->resvd3 = 0;
5293 5299 fprli->obsolete_1 = 0;
5294 5300 fprli->obsolete_2 = 0;
5295 5301 fprli->data_overlay_allowed = 0;
5296 5302 fprli->initiator_fn = 1;
5297 5303 fprli->confirmed_compl_allowed = 1;
5298 5304
5299 5305 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5300 5306 fprli->target_fn = 1;
5301 5307 } else {
5302 5308 fprli->target_fn = 0;
5303 5309 }
5304 5310
5305 5311 fprli->retry = 1;
5306 5312 fprli->read_xfer_rdy_disabled = 1;
5307 5313 fprli->write_xfer_rdy_disabled = 0;
5308 5314
5309 5315 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5310 5316 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5311 5317
5312 5318 /* issue the PRLI request */
5313 5319
5314 5320 mutex_enter(&pptr->port_mutex);
5315 5321 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5316 5322
5317 5323 mutex_exit(&pptr->port_mutex);
5318 5324
5319 5325 rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5320 5326 if (rval == FC_SUCCESS) {
5321 5327 res = DDI_SUCCESS;
5322 5328 break;
5323 5329 }
5324 5330
5325 5331 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5326 5332
5327 5333 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5328 5334 rval, "PRLI");
5329 5335 } else {
5330 5336 mutex_exit(&pptr->port_mutex);
5331 5337 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5332 5338 }
5333 5339 break;
5334 5340 }
5335 5341
5336 5342 default:
5337 5343 fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5338 5344 break;
5339 5345 }
5340 5346
5341 5347 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5342 5348 fcp_trace, FCP_BUF_LEVEL_5, 0,
5343 5349 "fcp_send_els: returning %d", res);
5344 5350
5345 5351 if (res != DDI_SUCCESS) {
5346 5352 if (internal) {
5347 5353 fcp_icmd_free(pptr, icmd);
5348 5354 }
5349 5355 }
5350 5356
5351 5357 return (res);
5352 5358 }
5353 5359
5354 5360
5355 5361 /*
5356 5362 * called internally update the state of all of the tgts and each LUN
5357 5363 * for this port (i.e. each target known to be attached to this port)
5358 5364 * if they are not already offline
5359 5365 *
5360 5366 * must be called with the port mutex owned
5361 5367 *
5362 5368 * acquires and releases the target mutexes for each target attached
5363 5369 * to this port
5364 5370 */
5365 5371 void
5366 5372 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5367 5373 {
5368 5374 int i;
5369 5375 struct fcp_tgt *ptgt;
5370 5376
5371 5377 ASSERT(mutex_owned(&pptr->port_mutex));
5372 5378
5373 5379 for (i = 0; i < FCP_NUM_HASH; i++) {
5374 5380 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5375 5381 ptgt = ptgt->tgt_next) {
5376 5382 mutex_enter(&ptgt->tgt_mutex);
5377 5383 fcp_update_tgt_state(ptgt, FCP_SET, state);
5378 5384 ptgt->tgt_change_cnt++;
5379 5385 ptgt->tgt_statec_cause = cause;
5380 5386 ptgt->tgt_tmp_cnt = 1;
5381 5387 ptgt->tgt_done = 0;
5382 5388 mutex_exit(&ptgt->tgt_mutex);
5383 5389 }
5384 5390 }
5385 5391 }
5386 5392
5387 5393
5388 5394 static void
5389 5395 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5390 5396 {
5391 5397 int i;
5392 5398 int ndevs;
5393 5399 struct fcp_tgt *ptgt;
5394 5400
5395 5401 ASSERT(mutex_owned(&pptr->port_mutex));
5396 5402
5397 5403 for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5398 5404 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5399 5405 ptgt = ptgt->tgt_next) {
5400 5406 ndevs++;
5401 5407 }
5402 5408 }
5403 5409
5404 5410 if (ndevs == 0) {
5405 5411 return;
5406 5412 }
5407 5413 pptr->port_tmp_cnt = ndevs;
5408 5414
5409 5415 for (i = 0; i < FCP_NUM_HASH; i++) {
5410 5416 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5411 5417 ptgt = ptgt->tgt_next) {
5412 5418 (void) fcp_call_finish_init_held(pptr, ptgt,
5413 5419 lcount, ptgt->tgt_change_cnt, cause);
5414 5420 }
5415 5421 }
5416 5422 }
5417 5423
5418 5424 /*
5419 5425 * Function: fcp_update_tgt_state
5420 5426 *
5421 5427 * Description: This function updates the field tgt_state of a target. That
5422 5428 * field is a bitmap and which bit can be set or reset
5423 5429 * individually. The action applied to the target state is also
5424 5430 * applied to all the LUNs belonging to the target (provided the
5425 5431 * LUN is not offline). A side effect of applying the state
5426 5432 * modification to the target and the LUNs is the field tgt_trace
5427 5433 * of the target and lun_trace of the LUNs is set to zero.
5428 5434 *
5429 5435 *
5430 5436 * Argument: *ptgt Target structure.
5431 5437 * flag Flag indication what action to apply (set/reset).
5432 5438 * state State bits to update.
5433 5439 *
5434 5440 * Return Value: None
5435 5441 *
5436 5442 * Context: Interrupt, Kernel or User context.
5437 5443 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5438 5444 * calling this function.
5439 5445 */
5440 5446 void
5441 5447 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5442 5448 {
5443 5449 struct fcp_lun *plun;
5444 5450
5445 5451 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5446 5452
5447 5453 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5448 5454 /* The target is not offline. */
5449 5455 if (flag == FCP_SET) {
5450 5456 ptgt->tgt_state |= state;
5451 5457 ptgt->tgt_trace = 0;
5452 5458 } else {
5453 5459 ptgt->tgt_state &= ~state;
5454 5460 }
5455 5461
5456 5462 for (plun = ptgt->tgt_lun; plun != NULL;
5457 5463 plun = plun->lun_next) {
5458 5464 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5459 5465 /* The LUN is not offline. */
5460 5466 if (flag == FCP_SET) {
5461 5467 plun->lun_state |= state;
5462 5468 plun->lun_trace = 0;
5463 5469 } else {
5464 5470 plun->lun_state &= ~state;
5465 5471 }
5466 5472 }
5467 5473 }
5468 5474 }
5469 5475 }
5470 5476
5471 5477 /*
5472 5478 * Function: fcp_update_tgt_state
5473 5479 *
5474 5480 * Description: This function updates the field lun_state of a LUN. That
5475 5481 * field is a bitmap and which bit can be set or reset
5476 5482 * individually.
5477 5483 *
5478 5484 * Argument: *plun LUN structure.
5479 5485 * flag Flag indication what action to apply (set/reset).
5480 5486 * state State bits to update.
5481 5487 *
5482 5488 * Return Value: None
5483 5489 *
5484 5490 * Context: Interrupt, Kernel or User context.
5485 5491 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5486 5492 * calling this function.
5487 5493 */
5488 5494 void
5489 5495 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5490 5496 {
5491 5497 struct fcp_tgt *ptgt = plun->lun_tgt;
5492 5498
5493 5499 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5494 5500
5495 5501 if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5496 5502 if (flag == FCP_SET) {
5497 5503 plun->lun_state |= state;
5498 5504 } else {
5499 5505 plun->lun_state &= ~state;
5500 5506 }
5501 5507 }
5502 5508 }
5503 5509
5504 5510 /*
5505 5511 * Function: fcp_get_port
5506 5512 *
5507 5513 * Description: This function returns the fcp_port structure from the opaque
5508 5514 * handle passed by the caller. That opaque handle is the handle
5509 5515 * used by fp/fctl to identify a particular local port. That
5510 5516 * handle has been stored in the corresponding fcp_port
5511 5517 * structure. This function is going to walk the global list of
5512 5518 * fcp_port structures till one has a port_fp_handle that matches
5513 5519 * the handle passed by the caller. This function enters the
5514 5520 * mutex fcp_global_mutex while walking the global list and then
5515 5521 * releases it.
5516 5522 *
5517 5523 * Argument: port_handle Opaque handle that fp/fctl uses to identify a
5518 5524 * particular port.
5519 5525 *
5520 5526 * Return Value: NULL Not found.
5521 5527 * Not NULL Pointer to the fcp_port structure.
5522 5528 *
5523 5529 * Context: Interrupt, Kernel or User context.
5524 5530 */
5525 5531 static struct fcp_port *
5526 5532 fcp_get_port(opaque_t port_handle)
5527 5533 {
5528 5534 struct fcp_port *pptr;
5529 5535
5530 5536 ASSERT(port_handle != NULL);
5531 5537
5532 5538 mutex_enter(&fcp_global_mutex);
5533 5539 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5534 5540 if (pptr->port_fp_handle == port_handle) {
5535 5541 break;
5536 5542 }
5537 5543 }
5538 5544 mutex_exit(&fcp_global_mutex);
5539 5545
5540 5546 return (pptr);
5541 5547 }
5542 5548
5543 5549
5544 5550 static void
5545 5551 fcp_unsol_callback(fc_packet_t *fpkt)
5546 5552 {
5547 5553 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5548 5554 struct fcp_port *pptr = icmd->ipkt_port;
5549 5555
5550 5556 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5551 5557 caddr_t state, reason, action, expln;
5552 5558
5553 5559 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
5554 5560 &action, &expln);
5555 5561
5556 5562 fcp_log(CE_WARN, pptr->port_dip,
5557 5563 "!couldn't post response to unsolicited request: "
5558 5564 " state=%s reason=%s rx_id=%x ox_id=%x",
5559 5565 state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5560 5566 fpkt->pkt_cmd_fhdr.rx_id);
5561 5567 }
5562 5568 fcp_icmd_free(pptr, icmd);
5563 5569 }
5564 5570
5565 5571
5566 5572 /*
5567 5573 * Perform general purpose preparation of a response to an unsolicited request
5568 5574 */
5569 5575 static void
5570 5576 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5571 5577 uchar_t r_ctl, uchar_t type)
5572 5578 {
5573 5579 pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5574 5580 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5575 5581 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5576 5582 pkt->pkt_cmd_fhdr.type = type;
5577 5583 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5578 5584 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5579 5585 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl;
5580 5586 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5581 5587 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5582 5588 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5583 5589 pkt->pkt_cmd_fhdr.ro = 0;
5584 5590 pkt->pkt_cmd_fhdr.rsvd = 0;
5585 5591 pkt->pkt_comp = fcp_unsol_callback;
5586 5592 pkt->pkt_pd = NULL;
5587 5593 pkt->pkt_ub_resp_token = (opaque_t)buf;
5588 5594 }
5589 5595
5590 5596
5591 5597 /*ARGSUSED*/
5592 5598 static int
5593 5599 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5594 5600 {
5595 5601 fc_packet_t *fpkt;
5596 5602 struct la_els_prli prli;
5597 5603 struct fcp_prli *fprli;
5598 5604 struct fcp_ipkt *icmd;
5599 5605 struct la_els_prli *from;
5600 5606 struct fcp_prli *orig;
5601 5607 struct fcp_tgt *ptgt;
5602 5608 int tcount = 0;
5603 5609 int lcount;
5604 5610
5605 5611 from = (struct la_els_prli *)buf->ub_buffer;
5606 5612 orig = (struct fcp_prli *)from->service_params;
5607 5613 if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5608 5614 NULL) {
5609 5615 mutex_enter(&ptgt->tgt_mutex);
5610 5616 tcount = ptgt->tgt_change_cnt;
5611 5617 mutex_exit(&ptgt->tgt_mutex);
5612 5618 }
5613 5619
5614 5620 mutex_enter(&pptr->port_mutex);
5615 5621 lcount = pptr->port_link_cnt;
5616 5622 mutex_exit(&pptr->port_mutex);
5617 5623
5618 5624 if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5619 5625 sizeof (la_els_prli_t), 0,
5620 5626 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5621 5627 lcount, tcount, 0, FC_INVALID_RSCN_COUNT)) == NULL) {
5622 5628 return (FC_FAILURE);
5623 5629 }
5624 5630
5625 5631 fpkt = icmd->ipkt_fpkt;
5626 5632 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5627 5633 fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5628 5634 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5629 5635 fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5630 5636 fpkt->pkt_rsplen = 0;
5631 5637 fpkt->pkt_datalen = 0;
5632 5638
5633 5639 icmd->ipkt_opcode = LA_ELS_PRLI;
5634 5640
5635 5641 bzero(&prli, sizeof (struct la_els_prli));
5636 5642 fprli = (struct fcp_prli *)prli.service_params;
5637 5643 prli.ls_code = LA_ELS_ACC;
5638 5644 prli.page_length = 0x10;
5639 5645 prli.payload_length = sizeof (struct la_els_prli);
5640 5646
5641 5647 /* fill in service params */
5642 5648 fprli->type = 0x08;
5643 5649 fprli->resvd1 = 0;
5644 5650 fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5645 5651 fprli->orig_process_associator = orig->orig_process_associator;
5646 5652 fprli->resp_process_assoc_valid = 0;
5647 5653 fprli->establish_image_pair = 1;
5648 5654 fprli->resvd2 = 0;
5649 5655 fprli->resvd3 = 0;
5650 5656 fprli->obsolete_1 = 0;
5651 5657 fprli->obsolete_2 = 0;
5652 5658 fprli->data_overlay_allowed = 0;
5653 5659 fprli->initiator_fn = 1;
5654 5660 fprli->confirmed_compl_allowed = 1;
5655 5661
5656 5662 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5657 5663 fprli->target_fn = 1;
5658 5664 } else {
5659 5665 fprli->target_fn = 0;
5660 5666 }
5661 5667
5662 5668 fprli->retry = 1;
5663 5669 fprli->read_xfer_rdy_disabled = 1;
5664 5670 fprli->write_xfer_rdy_disabled = 0;
5665 5671
5666 5672 /* save the unsol prli payload first */
5667 5673 FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5668 5674 fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5669 5675
5670 5676 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5671 5677 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5672 5678
5673 5679 fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5674 5680
5675 5681 mutex_enter(&pptr->port_mutex);
5676 5682 if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5677 5683 int rval;
5678 5684 mutex_exit(&pptr->port_mutex);
5679 5685
5680 5686 if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5681 5687 FC_SUCCESS) {
5682 5688 if ((rval == FC_STATEC_BUSY || rval == FC_OFFLINE) &&
5683 5689 ptgt != NULL) {
5684 5690 fcp_queue_ipkt(pptr, fpkt);
5685 5691 return (FC_SUCCESS);
5686 5692 }
5687 5693 /* Let it timeout */
5688 5694 fcp_icmd_free(pptr, icmd);
5689 5695 return (FC_FAILURE);
5690 5696 }
5691 5697 } else {
5692 5698 mutex_exit(&pptr->port_mutex);
5693 5699 fcp_icmd_free(pptr, icmd);
5694 5700 return (FC_FAILURE);
5695 5701 }
5696 5702
5697 5703 (void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5698 5704
5699 5705 return (FC_SUCCESS);
5700 5706 }
5701 5707
5702 5708 /*
5703 5709 * Function: fcp_icmd_alloc
5704 5710 *
5705 5711 * Description: This function allocated a fcp_ipkt structure. The pkt_comp
5706 5712 * field is initialized to fcp_icmd_callback. Sometimes it is
5707 5713 * modified by the caller (such as fcp_send_scsi). The
5708 5714 * structure is also tied to the state of the line and of the
5709 5715 * target at a particular time. That link is established by
5710 5716 * setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5711 5717 * and tcount which came respectively from pptr->link_cnt and
5712 5718 * ptgt->tgt_change_cnt.
5713 5719 *
5714 5720 * Argument: *pptr Fcp port.
5715 5721 * *ptgt Target (destination of the command).
5716 5722 * cmd_len Length of the command.
5717 5723 * resp_len Length of the expected response.
5718 5724 * data_len Length of the data.
5719 5725 * nodma Indicates weither the command and response.
5720 5726 * will be transfer through DMA or not.
5721 5727 * lcount Link state change counter.
5722 5728 * tcount Target state change counter.
5723 5729 * cause Reason that lead to this call.
5724 5730 *
5725 5731 * Return Value: NULL Failed.
5726 5732 * Not NULL Internal packet address.
5727 5733 */
5728 5734 static struct fcp_ipkt *
5729 5735 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5730 5736 int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5731 5737 uint32_t rscn_count)
5732 5738 {
5733 5739 int dma_setup = 0;
5734 5740 fc_packet_t *fpkt;
5735 5741 struct fcp_ipkt *icmd = NULL;
5736 5742
5737 5743 icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5738 5744 pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5739 5745 KM_NOSLEEP);
5740 5746 if (icmd == NULL) {
5741 5747 fcp_log(CE_WARN, pptr->port_dip,
5742 5748 "!internal packet allocation failed");
5743 5749 return (NULL);
5744 5750 }
5745 5751
5746 5752 /*
5747 5753 * initialize the allocated packet
5748 5754 */
5749 5755 icmd->ipkt_nodma = nodma;
5750 5756 icmd->ipkt_next = icmd->ipkt_prev = NULL;
5751 5757 icmd->ipkt_lun = NULL;
5752 5758
5753 5759 icmd->ipkt_link_cnt = lcount;
5754 5760 icmd->ipkt_change_cnt = tcount;
5755 5761 icmd->ipkt_cause = cause;
5756 5762
5757 5763 mutex_enter(&pptr->port_mutex);
5758 5764 icmd->ipkt_port = pptr;
5759 5765 mutex_exit(&pptr->port_mutex);
5760 5766
5761 5767 /* keep track of amt of data to be sent in pkt */
5762 5768 icmd->ipkt_cmdlen = cmd_len;
5763 5769 icmd->ipkt_resplen = resp_len;
5764 5770 icmd->ipkt_datalen = data_len;
5765 5771
5766 5772 /* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5767 5773 icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5768 5774
5769 5775 /* set pkt's private ptr to point to cmd pkt */
5770 5776 icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5771 5777
5772 5778 /* set FCA private ptr to memory just beyond */
5773 5779 icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5774 5780 ((char *)icmd + sizeof (struct fcp_ipkt) +
5775 5781 pptr->port_dmacookie_sz);
5776 5782
5777 5783 /* get ptr to fpkt substruct and fill it in */
5778 5784 fpkt = icmd->ipkt_fpkt;
5779 5785 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5780 5786 sizeof (struct fcp_ipkt));
5781 5787
5782 5788 if (ptgt != NULL) {
5783 5789 icmd->ipkt_tgt = ptgt;
5784 5790 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5785 5791 }
5786 5792
5787 5793 fpkt->pkt_comp = fcp_icmd_callback;
5788 5794 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5789 5795 fpkt->pkt_cmdlen = cmd_len;
5790 5796 fpkt->pkt_rsplen = resp_len;
5791 5797 fpkt->pkt_datalen = data_len;
5792 5798
5793 5799 /*
5794 5800 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5795 5801 * rscn_count as fcp knows down to the transport. If a valid count was
5796 5802 * passed into this function, we allocate memory to actually pass down
5797 5803 * this info.
5798 5804 *
5799 5805 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5800 5806 * basically mean that fcp will not be able to help transport
5801 5807 * distinguish if a new RSCN has come after fcp was last informed about
5802 5808 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5803 5809 * 5068068 where the device might end up going offline in case of RSCN
5804 5810 * storms.
5805 5811 */
5806 5812 fpkt->pkt_ulp_rscn_infop = NULL;
5807 5813 if (rscn_count != FC_INVALID_RSCN_COUNT) {
5808 5814 fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5809 5815 sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5810 5816 if (fpkt->pkt_ulp_rscn_infop == NULL) {
5811 5817 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5812 5818 fcp_trace, FCP_BUF_LEVEL_6, 0,
5813 5819 "Failed to alloc memory to pass rscn info");
5814 5820 }
5815 5821 }
5816 5822
5817 5823 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5818 5824 fc_ulp_rscn_info_t *rscnp;
5819 5825
5820 5826 rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5821 5827 rscnp->ulp_rscn_count = rscn_count;
5822 5828 }
5823 5829
5824 5830 if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5825 5831 goto fail;
5826 5832 }
5827 5833 dma_setup++;
5828 5834
5829 5835 /*
5830 5836 * Must hold target mutex across setting of pkt_pd and call to
5831 5837 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5832 5838 * away while we're not looking.
5833 5839 */
5834 5840 if (ptgt != NULL) {
5835 5841 mutex_enter(&ptgt->tgt_mutex);
5836 5842 fpkt->pkt_pd = ptgt->tgt_pd_handle;
5837 5843
5838 5844 /* ask transport to do its initialization on this pkt */
5839 5845 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5840 5846 != FC_SUCCESS) {
5841 5847 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5842 5848 fcp_trace, FCP_BUF_LEVEL_6, 0,
5843 5849 "fc_ulp_init_packet failed");
5844 5850 mutex_exit(&ptgt->tgt_mutex);
5845 5851 goto fail;
5846 5852 }
5847 5853 mutex_exit(&ptgt->tgt_mutex);
5848 5854 } else {
5849 5855 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5850 5856 != FC_SUCCESS) {
5851 5857 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5852 5858 fcp_trace, FCP_BUF_LEVEL_6, 0,
5853 5859 "fc_ulp_init_packet failed");
5854 5860 goto fail;
5855 5861 }
5856 5862 }
5857 5863
5858 5864 mutex_enter(&pptr->port_mutex);
5859 5865 if (pptr->port_state & (FCP_STATE_DETACHING |
5860 5866 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5861 5867 int rval;
5862 5868
5863 5869 mutex_exit(&pptr->port_mutex);
5864 5870
5865 5871 rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5866 5872 ASSERT(rval == FC_SUCCESS);
5867 5873
5868 5874 goto fail;
5869 5875 }
5870 5876
5871 5877 if (ptgt != NULL) {
5872 5878 mutex_enter(&ptgt->tgt_mutex);
5873 5879 ptgt->tgt_ipkt_cnt++;
5874 5880 mutex_exit(&ptgt->tgt_mutex);
5875 5881 }
5876 5882
5877 5883 pptr->port_ipkt_cnt++;
5878 5884
5879 5885 mutex_exit(&pptr->port_mutex);
5880 5886
5881 5887 return (icmd);
5882 5888
5883 5889 fail:
5884 5890 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5885 5891 kmem_free(fpkt->pkt_ulp_rscn_infop,
5886 5892 sizeof (fc_ulp_rscn_info_t));
5887 5893 fpkt->pkt_ulp_rscn_infop = NULL;
5888 5894 }
5889 5895
5890 5896 if (dma_setup) {
5891 5897 fcp_free_dma(pptr, icmd);
5892 5898 }
5893 5899 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5894 5900 (size_t)pptr->port_dmacookie_sz);
5895 5901
5896 5902 return (NULL);
5897 5903 }
5898 5904
5899 5905 /*
5900 5906 * Function: fcp_icmd_free
5901 5907 *
5902 5908 * Description: Frees the internal command passed by the caller.
5903 5909 *
5904 5910 * Argument: *pptr Fcp port.
5905 5911 * *icmd Internal packet to free.
5906 5912 *
5907 5913 * Return Value: None
5908 5914 */
5909 5915 static void
5910 5916 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5911 5917 {
5912 5918 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
5913 5919
5914 5920 /* Let the underlying layers do their cleanup. */
5915 5921 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5916 5922 icmd->ipkt_fpkt);
5917 5923
5918 5924 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5919 5925 kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5920 5926 sizeof (fc_ulp_rscn_info_t));
5921 5927 }
5922 5928
5923 5929 fcp_free_dma(pptr, icmd);
5924 5930
5925 5931 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5926 5932 (size_t)pptr->port_dmacookie_sz);
5927 5933
5928 5934 mutex_enter(&pptr->port_mutex);
5929 5935
5930 5936 if (ptgt) {
5931 5937 mutex_enter(&ptgt->tgt_mutex);
5932 5938 ptgt->tgt_ipkt_cnt--;
5933 5939 mutex_exit(&ptgt->tgt_mutex);
5934 5940 }
5935 5941
5936 5942 pptr->port_ipkt_cnt--;
5937 5943 mutex_exit(&pptr->port_mutex);
5938 5944 }
5939 5945
5940 5946 /*
5941 5947 * Function: fcp_alloc_dma
5942 5948 *
5943 5949 * Description: Allocated the DMA resources required for the internal
5944 5950 * packet.
5945 5951 *
5946 5952 * Argument: *pptr FCP port.
5947 5953 * *icmd Internal FCP packet.
5948 5954 * nodma Indicates if the Cmd and Resp will be DMAed.
5949 5955 * flags Allocation flags (Sleep or NoSleep).
5950 5956 *
5951 5957 * Return Value: FC_SUCCESS
5952 5958 * FC_NOMEM
5953 5959 */
5954 5960 static int
5955 5961 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5956 5962 int nodma, int flags)
5957 5963 {
5958 5964 int rval;
5959 5965 size_t real_size;
5960 5966 uint_t ccount;
5961 5967 int bound = 0;
5962 5968 int cmd_resp = 0;
5963 5969 fc_packet_t *fpkt;
5964 5970 ddi_dma_cookie_t pkt_data_cookie;
5965 5971 ddi_dma_cookie_t *cp;
5966 5972 uint32_t cnt;
5967 5973
5968 5974 fpkt = &icmd->ipkt_fc_packet;
5969 5975
5970 5976 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5971 5977 fpkt->pkt_resp_dma == NULL);
5972 5978
5973 5979 icmd->ipkt_nodma = nodma;
5974 5980
5975 5981 if (nodma) {
5976 5982 fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5977 5983 if (fpkt->pkt_cmd == NULL) {
5978 5984 goto fail;
5979 5985 }
5980 5986
5981 5987 fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5982 5988 if (fpkt->pkt_resp == NULL) {
5983 5989 goto fail;
5984 5990 }
5985 5991 } else {
5986 5992 ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5987 5993
5988 5994 rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5989 5995 if (rval == FC_FAILURE) {
5990 5996 ASSERT(fpkt->pkt_cmd_dma == NULL &&
5991 5997 fpkt->pkt_resp_dma == NULL);
5992 5998 goto fail;
5993 5999 }
5994 6000 cmd_resp++;
5995 6001 }
5996 6002
5997 6003 if ((fpkt->pkt_datalen != 0) &&
5998 6004 !(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
5999 6005 /*
6000 6006 * set up DMA handle and memory for the data in this packet
6001 6007 */
6002 6008 if (ddi_dma_alloc_handle(pptr->port_dip,
6003 6009 &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
6004 6010 NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
6005 6011 goto fail;
6006 6012 }
6007 6013
6008 6014 if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
6009 6015 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
6010 6016 DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
6011 6017 &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
6012 6018 goto fail;
6013 6019 }
6014 6020
6015 6021 /* was DMA mem size gotten < size asked for/needed ?? */
6016 6022 if (real_size < fpkt->pkt_datalen) {
6017 6023 goto fail;
6018 6024 }
6019 6025
6020 6026 /* bind DMA address and handle together */
6021 6027 if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
6022 6028 NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
6023 6029 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
6024 6030 &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
6025 6031 goto fail;
6026 6032 }
6027 6033 bound++;
6028 6034
6029 6035 if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
6030 6036 goto fail;
6031 6037 }
6032 6038
6033 6039 fpkt->pkt_data_cookie_cnt = ccount;
6034 6040
6035 6041 cp = fpkt->pkt_data_cookie;
6036 6042 *cp = pkt_data_cookie;
6037 6043 cp++;
6038 6044
6039 6045 for (cnt = 1; cnt < ccount; cnt++, cp++) {
6040 6046 ddi_dma_nextcookie(fpkt->pkt_data_dma,
6041 6047 &pkt_data_cookie);
6042 6048 *cp = pkt_data_cookie;
6043 6049 }
6044 6050
6045 6051 } else if (fpkt->pkt_datalen != 0) {
6046 6052 /*
6047 6053 * If it's a pseudo FCA, then it can't support DMA even in
6048 6054 * SCSI data phase.
6049 6055 */
6050 6056 fpkt->pkt_data = kmem_alloc(fpkt->pkt_datalen, flags);
6051 6057 if (fpkt->pkt_data == NULL) {
6052 6058 goto fail;
6053 6059 }
6054 6060
6055 6061 }
6056 6062
6057 6063 return (FC_SUCCESS);
6058 6064
6059 6065 fail:
6060 6066 if (bound) {
6061 6067 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6062 6068 }
6063 6069
6064 6070 if (fpkt->pkt_data_dma) {
6065 6071 if (fpkt->pkt_data) {
6066 6072 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6067 6073 }
6068 6074 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6069 6075 } else {
6070 6076 if (fpkt->pkt_data) {
6071 6077 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6072 6078 }
6073 6079 }
6074 6080
6075 6081 if (nodma) {
6076 6082 if (fpkt->pkt_cmd) {
6077 6083 kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
6078 6084 }
6079 6085 if (fpkt->pkt_resp) {
6080 6086 kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
6081 6087 }
6082 6088 } else {
6083 6089 if (cmd_resp) {
6084 6090 fcp_free_cmd_resp(pptr, fpkt);
6085 6091 }
6086 6092 }
6087 6093
6088 6094 return (FC_NOMEM);
6089 6095 }
6090 6096
6091 6097
6092 6098 static void
6093 6099 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
6094 6100 {
6095 6101 fc_packet_t *fpkt = icmd->ipkt_fpkt;
6096 6102
6097 6103 if (fpkt->pkt_data_dma) {
6098 6104 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6099 6105 if (fpkt->pkt_data) {
6100 6106 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6101 6107 }
6102 6108 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6103 6109 } else {
6104 6110 if (fpkt->pkt_data) {
6105 6111 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6106 6112 }
6107 6113 /*
6108 6114 * Need we reset pkt_* to zero???
6109 6115 */
6110 6116 }
6111 6117
6112 6118 if (icmd->ipkt_nodma) {
6113 6119 if (fpkt->pkt_cmd) {
6114 6120 kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
6115 6121 }
6116 6122 if (fpkt->pkt_resp) {
6117 6123 kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
6118 6124 }
6119 6125 } else {
6120 6126 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
6121 6127
6122 6128 fcp_free_cmd_resp(pptr, fpkt);
6123 6129 }
6124 6130 }
6125 6131
6126 6132 /*
6127 6133 * Function: fcp_lookup_target
6128 6134 *
6129 6135 * Description: Finds a target given a WWN.
6130 6136 *
6131 6137 * Argument: *pptr FCP port.
6132 6138 * *wwn World Wide Name of the device to look for.
6133 6139 *
6134 6140 * Return Value: NULL No target found
6135 6141 * Not NULL Target structure
6136 6142 *
6137 6143 * Context: Interrupt context.
6138 6144 * The mutex pptr->port_mutex must be owned.
6139 6145 */
6140 6146 /* ARGSUSED */
6141 6147 static struct fcp_tgt *
6142 6148 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6143 6149 {
6144 6150 int hash;
6145 6151 struct fcp_tgt *ptgt;
6146 6152
6147 6153 ASSERT(mutex_owned(&pptr->port_mutex));
6148 6154
6149 6155 hash = FCP_HASH(wwn);
6150 6156
6151 6157 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6152 6158 ptgt = ptgt->tgt_next) {
6153 6159 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6154 6160 bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6155 6161 sizeof (ptgt->tgt_port_wwn)) == 0) {
6156 6162 break;
6157 6163 }
6158 6164 }
6159 6165
6160 6166 return (ptgt);
6161 6167 }
6162 6168
6163 6169
6164 6170 /*
6165 6171 * Find target structure given a port identifier
6166 6172 */
6167 6173 static struct fcp_tgt *
6168 6174 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6169 6175 {
6170 6176 fc_portid_t port_id;
6171 6177 la_wwn_t pwwn;
6172 6178 struct fcp_tgt *ptgt = NULL;
6173 6179
6174 6180 port_id.priv_lilp_posit = 0;
6175 6181 port_id.port_id = d_id;
6176 6182 if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6177 6183 &pwwn) == FC_SUCCESS) {
6178 6184 mutex_enter(&pptr->port_mutex);
6179 6185 ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6180 6186 mutex_exit(&pptr->port_mutex);
6181 6187 }
6182 6188
6183 6189 return (ptgt);
6184 6190 }
6185 6191
6186 6192
6187 6193 /*
6188 6194 * the packet completion callback routine for info cmd pkts
6189 6195 *
6190 6196 * this means fpkt pts to a response to either a PLOGI or a PRLI
6191 6197 *
6192 6198 * if there is an error an attempt is made to call a routine to resend
6193 6199 * the command that failed
6194 6200 */
6195 6201 static void
6196 6202 fcp_icmd_callback(fc_packet_t *fpkt)
6197 6203 {
6198 6204 struct fcp_ipkt *icmd;
6199 6205 struct fcp_port *pptr;
6200 6206 struct fcp_tgt *ptgt;
6201 6207 struct la_els_prli *prli;
6202 6208 struct la_els_prli prli_s;
6203 6209 struct fcp_prli *fprli;
6204 6210 struct fcp_lun *plun;
6205 6211 int free_pkt = 1;
6206 6212 int rval;
6207 6213 ls_code_t resp;
6208 6214 uchar_t prli_acc = 0;
6209 6215 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
6210 6216 int lun0_newalloc;
6211 6217
6212 6218 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6213 6219
6214 6220 /* get ptrs to the port and target structs for the cmd */
6215 6221 pptr = icmd->ipkt_port;
6216 6222 ptgt = icmd->ipkt_tgt;
6217 6223
6218 6224 FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6219 6225
6220 6226 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6221 6227 FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6222 6228 sizeof (prli_s));
6223 6229 prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6224 6230 }
6225 6231
6226 6232 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6227 6233 fcp_trace, FCP_BUF_LEVEL_2, 0,
6228 6234 "ELS (%x) callback state=0x%x reason=0x%x for %x",
6229 6235 icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6230 6236 ptgt->tgt_d_id);
6231 6237
6232 6238 if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6233 6239 ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6234 6240
6235 6241 mutex_enter(&ptgt->tgt_mutex);
6236 6242 if (ptgt->tgt_pd_handle == NULL) {
6237 6243 /*
6238 6244 * in a fabric environment the port device handles
6239 6245 * get created only after successful LOGIN into the
6240 6246 * transport, so the transport makes this port
6241 6247 * device (pd) handle available in this packet, so
6242 6248 * save it now
6243 6249 */
6244 6250 ASSERT(fpkt->pkt_pd != NULL);
6245 6251 ptgt->tgt_pd_handle = fpkt->pkt_pd;
6246 6252 }
6247 6253 mutex_exit(&ptgt->tgt_mutex);
6248 6254
6249 6255 /* which ELS cmd is this response for ?? */
6250 6256 switch (icmd->ipkt_opcode) {
6251 6257 case LA_ELS_PLOGI:
6252 6258 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6253 6259 fcp_trace, FCP_BUF_LEVEL_5, 0,
6254 6260 "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6255 6261 ptgt->tgt_d_id,
6256 6262 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6257 6263 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6258 6264
6259 6265 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6260 6266 FCP_TGT_TRACE_15);
6261 6267
6262 6268 /* Note that we are not allocating a new icmd */
6263 6269 if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6264 6270 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6265 6271 icmd->ipkt_cause) != DDI_SUCCESS) {
6266 6272 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6267 6273 FCP_TGT_TRACE_16);
6268 6274 goto fail;
6269 6275 }
6270 6276 break;
6271 6277
6272 6278 case LA_ELS_PRLI:
6273 6279 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6274 6280 fcp_trace, FCP_BUF_LEVEL_5, 0,
6275 6281 "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6276 6282
6277 6283 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6278 6284 FCP_TGT_TRACE_17);
6279 6285
6280 6286 prli = &prli_s;
6281 6287
6282 6288 FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6283 6289 sizeof (prli_s));
6284 6290
6285 6291 fprli = (struct fcp_prli *)prli->service_params;
6286 6292
6287 6293 mutex_enter(&ptgt->tgt_mutex);
6288 6294 ptgt->tgt_icap = fprli->initiator_fn;
6289 6295 ptgt->tgt_tcap = fprli->target_fn;
6290 6296 mutex_exit(&ptgt->tgt_mutex);
6291 6297
6292 6298 if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6293 6299 /*
6294 6300 * this FCP device does not support target mode
6295 6301 */
6296 6302 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6297 6303 FCP_TGT_TRACE_18);
6298 6304 goto fail;
6299 6305 }
6300 6306 if (fprli->retry == 1) {
6301 6307 fc_ulp_disable_relogin(pptr->port_fp_handle,
6302 6308 &ptgt->tgt_port_wwn);
6303 6309 }
6304 6310
6305 6311 /* target is no longer offline */
6306 6312 mutex_enter(&pptr->port_mutex);
6307 6313 mutex_enter(&ptgt->tgt_mutex);
6308 6314 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6309 6315 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6310 6316 FCP_TGT_MARK);
6311 6317 } else {
6312 6318 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6313 6319 fcp_trace, FCP_BUF_LEVEL_2, 0,
6314 6320 "fcp_icmd_callback,1: state change "
6315 6321 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6316 6322 mutex_exit(&ptgt->tgt_mutex);
6317 6323 mutex_exit(&pptr->port_mutex);
6318 6324 goto fail;
6319 6325 }
6320 6326 mutex_exit(&ptgt->tgt_mutex);
6321 6327 mutex_exit(&pptr->port_mutex);
6322 6328
6323 6329 /*
6324 6330 * lun 0 should always respond to inquiry, so
6325 6331 * get the LUN struct for LUN 0
6326 6332 *
6327 6333 * Currently we deal with first level of addressing.
6328 6334 * If / when we start supporting 0x device types
6329 6335 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6330 6336 * this logic will need revisiting.
6331 6337 */
6332 6338 lun0_newalloc = 0;
6333 6339 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6334 6340 /*
6335 6341 * no LUN struct for LUN 0 yet exists,
6336 6342 * so create one
6337 6343 */
6338 6344 plun = fcp_alloc_lun(ptgt);
6339 6345 if (plun == NULL) {
6340 6346 fcp_log(CE_WARN, pptr->port_dip,
6341 6347 "!Failed to allocate lun 0 for"
6342 6348 " D_ID=%x", ptgt->tgt_d_id);
6343 6349 goto fail;
6344 6350 }
6345 6351 lun0_newalloc = 1;
6346 6352 }
6347 6353
6348 6354 /* fill in LUN info */
6349 6355 mutex_enter(&ptgt->tgt_mutex);
6350 6356 /*
6351 6357 * consider lun 0 as device not connected if it is
6352 6358 * offlined or newly allocated
6353 6359 */
6354 6360 if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6355 6361 lun0_newalloc) {
6356 6362 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6357 6363 }
6358 6364 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6359 6365 plun->lun_state &= ~FCP_LUN_OFFLINE;
6360 6366 ptgt->tgt_lun_cnt = 1;
6361 6367 ptgt->tgt_report_lun_cnt = 0;
6362 6368 mutex_exit(&ptgt->tgt_mutex);
6363 6369
6364 6370 /* Retrieve the rscn count (if a valid one exists) */
6365 6371 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6366 6372 rscn_count = ((fc_ulp_rscn_info_t *)
6367 6373 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6368 6374 ->ulp_rscn_count;
6369 6375 } else {
6370 6376 rscn_count = FC_INVALID_RSCN_COUNT;
6371 6377 }
6372 6378
6373 6379 /* send Report Lun request to target */
6374 6380 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6375 6381 sizeof (struct fcp_reportlun_resp),
6376 6382 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6377 6383 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6378 6384 mutex_enter(&pptr->port_mutex);
6379 6385 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6380 6386 fcp_log(CE_WARN, pptr->port_dip,
6381 6387 "!Failed to send REPORT LUN to"
6382 6388 " D_ID=%x", ptgt->tgt_d_id);
6383 6389 } else {
6384 6390 FCP_TRACE(fcp_logq,
6385 6391 pptr->port_instbuf, fcp_trace,
6386 6392 FCP_BUF_LEVEL_5, 0,
6387 6393 "fcp_icmd_callback,2:state change"
6388 6394 " occured for D_ID=0x%x",
6389 6395 ptgt->tgt_d_id);
6390 6396 }
6391 6397 mutex_exit(&pptr->port_mutex);
6392 6398
6393 6399 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6394 6400 FCP_TGT_TRACE_19);
6395 6401
6396 6402 goto fail;
6397 6403 } else {
6398 6404 free_pkt = 0;
6399 6405 fcp_icmd_free(pptr, icmd);
6400 6406 }
6401 6407 break;
6402 6408
6403 6409 default:
6404 6410 fcp_log(CE_WARN, pptr->port_dip,
6405 6411 "!fcp_icmd_callback Invalid opcode");
6406 6412 goto fail;
6407 6413 }
6408 6414
6409 6415 return;
6410 6416 }
6411 6417
6412 6418
6413 6419 /*
6414 6420 * Other PLOGI failures are not retried as the
6415 6421 * transport does it already
6416 6422 */
6417 6423 if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6418 6424 if (fcp_is_retryable(icmd) &&
6419 6425 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6420 6426
6421 6427 if (FCP_MUST_RETRY(fpkt)) {
6422 6428 fcp_queue_ipkt(pptr, fpkt);
6423 6429 return;
6424 6430 }
6425 6431
6426 6432 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6427 6433 fcp_trace, FCP_BUF_LEVEL_2, 0,
6428 6434 "ELS PRLI is retried for d_id=0x%x, state=%x,"
6429 6435 " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6430 6436 fpkt->pkt_reason);
6431 6437
6432 6438 /*
6433 6439 * Retry by recalling the routine that
6434 6440 * originally queued this packet
6435 6441 */
6436 6442 mutex_enter(&pptr->port_mutex);
6437 6443 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6438 6444 caddr_t msg;
6439 6445
6440 6446 mutex_exit(&pptr->port_mutex);
6441 6447
6442 6448 ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6443 6449
6444 6450 if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6445 6451 fpkt->pkt_timeout +=
6446 6452 FCP_TIMEOUT_DELTA;
6447 6453 }
6448 6454
6449 6455 rval = fc_ulp_issue_els(pptr->port_fp_handle,
6450 6456 fpkt);
6451 6457 if (rval == FC_SUCCESS) {
6452 6458 return;
6453 6459 }
6454 6460
6455 6461 if (rval == FC_STATEC_BUSY ||
6456 6462 rval == FC_OFFLINE) {
6457 6463 fcp_queue_ipkt(pptr, fpkt);
6458 6464 return;
6459 6465 }
6460 6466 (void) fc_ulp_error(rval, &msg);
6461 6467
6462 6468 fcp_log(CE_NOTE, pptr->port_dip,
6463 6469 "!ELS 0x%x failed to d_id=0x%x;"
6464 6470 " %s", icmd->ipkt_opcode,
6465 6471 ptgt->tgt_d_id, msg);
6466 6472 } else {
6467 6473 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6468 6474 fcp_trace, FCP_BUF_LEVEL_2, 0,
6469 6475 "fcp_icmd_callback,3: state change "
6470 6476 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6471 6477 mutex_exit(&pptr->port_mutex);
6472 6478 }
6473 6479 }
6474 6480 } else {
6475 6481 if (fcp_is_retryable(icmd) &&
6476 6482 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6477 6483 if (FCP_MUST_RETRY(fpkt)) {
6478 6484 fcp_queue_ipkt(pptr, fpkt);
6479 6485 return;
6480 6486 }
6481 6487 }
6482 6488 mutex_enter(&pptr->port_mutex);
6483 6489 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6484 6490 fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6485 6491 mutex_exit(&pptr->port_mutex);
6486 6492 fcp_print_error(fpkt);
6487 6493 } else {
6488 6494 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6489 6495 fcp_trace, FCP_BUF_LEVEL_2, 0,
6490 6496 "fcp_icmd_callback,4: state change occured"
6491 6497 " for D_ID=0x%x", ptgt->tgt_d_id);
6492 6498 mutex_exit(&pptr->port_mutex);
6493 6499 }
6494 6500 }
6495 6501
6496 6502 fail:
6497 6503 if (free_pkt) {
6498 6504 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6499 6505 icmd->ipkt_change_cnt, icmd->ipkt_cause);
6500 6506 fcp_icmd_free(pptr, icmd);
6501 6507 }
6502 6508 }
6503 6509
6504 6510
6505 6511 /*
6506 6512 * called internally to send an info cmd using the transport
6507 6513 *
6508 6514 * sends either an INQ or a REPORT_LUN
6509 6515 *
6510 6516 * when the packet is completed fcp_scsi_callback is called
6511 6517 */
6512 6518 static int
6513 6519 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6514 6520 int lcount, int tcount, int cause, uint32_t rscn_count)
6515 6521 {
6516 6522 int nodma;
6517 6523 struct fcp_ipkt *icmd;
6518 6524 struct fcp_tgt *ptgt;
6519 6525 struct fcp_port *pptr;
6520 6526 fc_frame_hdr_t *hp;
6521 6527 fc_packet_t *fpkt;
6522 6528 struct fcp_cmd fcp_cmd;
6523 6529 struct fcp_cmd *fcmd;
6524 6530 union scsi_cdb *scsi_cdb;
6525 6531
6526 6532 ASSERT(plun != NULL);
6527 6533
6528 6534 ptgt = plun->lun_tgt;
6529 6535 ASSERT(ptgt != NULL);
6530 6536
6531 6537 pptr = ptgt->tgt_port;
6532 6538 ASSERT(pptr != NULL);
6533 6539
6534 6540 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6535 6541 fcp_trace, FCP_BUF_LEVEL_5, 0,
6536 6542 "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6537 6543
6538 6544 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6539 6545 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6540 6546 FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6541 6547 rscn_count);
6542 6548
6543 6549 if (icmd == NULL) {
6544 6550 return (DDI_FAILURE);
6545 6551 }
6546 6552
6547 6553 fpkt = icmd->ipkt_fpkt;
6548 6554 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6549 6555 icmd->ipkt_retries = 0;
6550 6556 icmd->ipkt_opcode = opcode;
6551 6557 icmd->ipkt_lun = plun;
6552 6558
6553 6559 if (nodma) {
6554 6560 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6555 6561 } else {
6556 6562 fcmd = &fcp_cmd;
6557 6563 }
6558 6564 bzero(fcmd, sizeof (struct fcp_cmd));
6559 6565
6560 6566 fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6561 6567
6562 6568 hp = &fpkt->pkt_cmd_fhdr;
6563 6569
6564 6570 hp->s_id = pptr->port_id;
6565 6571 hp->d_id = ptgt->tgt_d_id;
6566 6572 hp->r_ctl = R_CTL_COMMAND;
6567 6573 hp->type = FC_TYPE_SCSI_FCP;
6568 6574 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6569 6575 hp->rsvd = 0;
6570 6576 hp->seq_id = 0;
6571 6577 hp->seq_cnt = 0;
6572 6578 hp->ox_id = 0xffff;
6573 6579 hp->rx_id = 0xffff;
6574 6580 hp->ro = 0;
6575 6581
6576 6582 bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6577 6583
6578 6584 /*
6579 6585 * Request SCSI target for expedited processing
6580 6586 */
6581 6587
6582 6588 /*
6583 6589 * Set up for untagged queuing because we do not
6584 6590 * know if the fibre device supports queuing.
6585 6591 */
6586 6592 fcmd->fcp_cntl.cntl_reserved_0 = 0;
6587 6593 fcmd->fcp_cntl.cntl_reserved_1 = 0;
6588 6594 fcmd->fcp_cntl.cntl_reserved_2 = 0;
6589 6595 fcmd->fcp_cntl.cntl_reserved_3 = 0;
6590 6596 fcmd->fcp_cntl.cntl_reserved_4 = 0;
6591 6597 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6592 6598 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6593 6599
6594 6600 switch (opcode) {
6595 6601 case SCMD_INQUIRY_PAGE83:
6596 6602 /*
6597 6603 * Prepare to get the Inquiry VPD page 83 information
6598 6604 */
6599 6605 fcmd->fcp_cntl.cntl_read_data = 1;
6600 6606 fcmd->fcp_cntl.cntl_write_data = 0;
6601 6607 fcmd->fcp_data_len = alloc_len;
6602 6608
6603 6609 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6604 6610 fpkt->pkt_comp = fcp_scsi_callback;
6605 6611
6606 6612 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6607 6613 scsi_cdb->g0_addr2 = 0x01;
6608 6614 scsi_cdb->g0_addr1 = 0x83;
6609 6615 scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6610 6616 break;
6611 6617
6612 6618 case SCMD_INQUIRY:
6613 6619 fcmd->fcp_cntl.cntl_read_data = 1;
6614 6620 fcmd->fcp_cntl.cntl_write_data = 0;
6615 6621 fcmd->fcp_data_len = alloc_len;
6616 6622
6617 6623 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6618 6624 fpkt->pkt_comp = fcp_scsi_callback;
6619 6625
6620 6626 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6621 6627 scsi_cdb->g0_count0 = SUN_INQSIZE;
6622 6628 break;
6623 6629
6624 6630 case SCMD_REPORT_LUN: {
6625 6631 fc_portid_t d_id;
6626 6632 opaque_t fca_dev;
6627 6633
6628 6634 ASSERT(alloc_len >= 16);
6629 6635
6630 6636 d_id.priv_lilp_posit = 0;
6631 6637 d_id.port_id = ptgt->tgt_d_id;
6632 6638
6633 6639 fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6634 6640
6635 6641 mutex_enter(&ptgt->tgt_mutex);
6636 6642 ptgt->tgt_fca_dev = fca_dev;
6637 6643 mutex_exit(&ptgt->tgt_mutex);
6638 6644
6639 6645 fcmd->fcp_cntl.cntl_read_data = 1;
6640 6646 fcmd->fcp_cntl.cntl_write_data = 0;
6641 6647 fcmd->fcp_data_len = alloc_len;
6642 6648
6643 6649 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6644 6650 fpkt->pkt_comp = fcp_scsi_callback;
6645 6651
6646 6652 scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6647 6653 scsi_cdb->scc5_count0 = alloc_len & 0xff;
6648 6654 scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6649 6655 scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6650 6656 scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6651 6657 break;
6652 6658 }
6653 6659
6654 6660 default:
6655 6661 fcp_log(CE_WARN, pptr->port_dip,
6656 6662 "!fcp_send_scsi Invalid opcode");
6657 6663 break;
6658 6664 }
6659 6665
6660 6666 if (!nodma) {
6661 6667 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6662 6668 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6663 6669 }
6664 6670
6665 6671 mutex_enter(&pptr->port_mutex);
6666 6672 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6667 6673
6668 6674 mutex_exit(&pptr->port_mutex);
6669 6675 if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6670 6676 FC_SUCCESS) {
6671 6677 fcp_icmd_free(pptr, icmd);
6672 6678 return (DDI_FAILURE);
6673 6679 }
6674 6680 return (DDI_SUCCESS);
6675 6681 } else {
6676 6682 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6677 6683 fcp_trace, FCP_BUF_LEVEL_2, 0,
6678 6684 "fcp_send_scsi,1: state change occured"
6679 6685 " for D_ID=0x%x", ptgt->tgt_d_id);
6680 6686 mutex_exit(&pptr->port_mutex);
6681 6687 fcp_icmd_free(pptr, icmd);
6682 6688 return (DDI_FAILURE);
6683 6689 }
6684 6690 }
6685 6691
6686 6692
6687 6693 /*
6688 6694 * called by fcp_scsi_callback to check to handle the case where
6689 6695 * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6690 6696 */
6691 6697 static int
6692 6698 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6693 6699 {
6694 6700 uchar_t rqlen;
6695 6701 int rval = DDI_FAILURE;
6696 6702 struct scsi_extended_sense sense_info, *sense;
6697 6703 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6698 6704 fpkt->pkt_ulp_private;
6699 6705 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
6700 6706 struct fcp_port *pptr = ptgt->tgt_port;
6701 6707
6702 6708 ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6703 6709
6704 6710 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6705 6711 /*
6706 6712 * SCSI-II Reserve Release support. Some older FC drives return
6707 6713 * Reservation conflict for Report Luns command.
6708 6714 */
6709 6715 if (icmd->ipkt_nodma) {
6710 6716 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6711 6717 rsp->fcp_u.fcp_status.sense_len_set = 0;
6712 6718 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6713 6719 } else {
6714 6720 fcp_rsp_t new_resp;
6715 6721
6716 6722 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6717 6723 fpkt->pkt_resp_acc, sizeof (new_resp));
6718 6724
6719 6725 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6720 6726 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6721 6727 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6722 6728
6723 6729 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6724 6730 fpkt->pkt_resp_acc, sizeof (new_resp));
6725 6731 }
6726 6732
6727 6733 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6728 6734 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6729 6735
6730 6736 return (DDI_SUCCESS);
6731 6737 }
6732 6738
6733 6739 sense = &sense_info;
6734 6740 if (!rsp->fcp_u.fcp_status.sense_len_set) {
6735 6741 /* no need to continue if sense length is not set */
6736 6742 return (rval);
6737 6743 }
6738 6744
6739 6745 /* casting 64-bit integer to 8-bit */
6740 6746 rqlen = (uchar_t)min(rsp->fcp_sense_len,
6741 6747 sizeof (struct scsi_extended_sense));
6742 6748
6743 6749 if (rqlen < 14) {
6744 6750 /* no need to continue if request length isn't long enough */
6745 6751 return (rval);
6746 6752 }
6747 6753
6748 6754 if (icmd->ipkt_nodma) {
6749 6755 /*
6750 6756 * We can safely use fcp_response_len here since the
6751 6757 * only path that calls fcp_check_reportlun,
6752 6758 * fcp_scsi_callback, has already called
6753 6759 * fcp_validate_fcp_response.
6754 6760 */
6755 6761 sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6756 6762 sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6757 6763 } else {
6758 6764 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6759 6765 rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6760 6766 sizeof (struct scsi_extended_sense));
6761 6767 }
6762 6768
6763 6769 if (!FCP_SENSE_NO_LUN(sense)) {
6764 6770 mutex_enter(&ptgt->tgt_mutex);
6765 6771 /* clear the flag if any */
6766 6772 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6767 6773 mutex_exit(&ptgt->tgt_mutex);
6768 6774 }
6769 6775
6770 6776 if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6771 6777 (sense->es_add_code == 0x20)) {
6772 6778 if (icmd->ipkt_nodma) {
6773 6779 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6774 6780 rsp->fcp_u.fcp_status.sense_len_set = 0;
6775 6781 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6776 6782 } else {
6777 6783 fcp_rsp_t new_resp;
6778 6784
6779 6785 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6780 6786 fpkt->pkt_resp_acc, sizeof (new_resp));
6781 6787
6782 6788 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6783 6789 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6784 6790 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6785 6791
6786 6792 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6787 6793 fpkt->pkt_resp_acc, sizeof (new_resp));
6788 6794 }
6789 6795
6790 6796 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6791 6797 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6792 6798
6793 6799 return (DDI_SUCCESS);
6794 6800 }
6795 6801
6796 6802 /*
6797 6803 * This is for the STK library which returns a check condition,
6798 6804 * to indicate device is not ready, manual assistance needed.
6799 6805 * This is to a report lun command when the door is open.
6800 6806 */
6801 6807 if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6802 6808 if (icmd->ipkt_nodma) {
6803 6809 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6804 6810 rsp->fcp_u.fcp_status.sense_len_set = 0;
6805 6811 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6806 6812 } else {
6807 6813 fcp_rsp_t new_resp;
6808 6814
6809 6815 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6810 6816 fpkt->pkt_resp_acc, sizeof (new_resp));
6811 6817
6812 6818 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6813 6819 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6814 6820 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6815 6821
6816 6822 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6817 6823 fpkt->pkt_resp_acc, sizeof (new_resp));
6818 6824 }
6819 6825
6820 6826 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6821 6827 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6822 6828
6823 6829 return (DDI_SUCCESS);
6824 6830 }
6825 6831
6826 6832 if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6827 6833 (FCP_SENSE_NO_LUN(sense))) {
6828 6834 mutex_enter(&ptgt->tgt_mutex);
6829 6835 if ((FCP_SENSE_NO_LUN(sense)) &&
6830 6836 (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6831 6837 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6832 6838 mutex_exit(&ptgt->tgt_mutex);
6833 6839 /*
6834 6840 * reconfig was triggred by ILLEGAL REQUEST but
6835 6841 * got ILLEGAL REQUEST again
6836 6842 */
6837 6843 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6838 6844 fcp_trace, FCP_BUF_LEVEL_3, 0,
6839 6845 "!FCP: Unable to obtain Report Lun data"
6840 6846 " target=%x", ptgt->tgt_d_id);
6841 6847 } else {
6842 6848 if (ptgt->tgt_tid == NULL) {
6843 6849 timeout_id_t tid;
6844 6850 /*
6845 6851 * REPORT LUN data has changed. Kick off
6846 6852 * rediscovery
6847 6853 */
6848 6854 tid = timeout(fcp_reconfigure_luns,
6849 6855 (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6850 6856
6851 6857 ptgt->tgt_tid = tid;
6852 6858 ptgt->tgt_state |= FCP_TGT_BUSY;
6853 6859 }
6854 6860 if (FCP_SENSE_NO_LUN(sense)) {
6855 6861 ptgt->tgt_state |= FCP_TGT_ILLREQ;
6856 6862 }
6857 6863 mutex_exit(&ptgt->tgt_mutex);
6858 6864 if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6859 6865 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6860 6866 fcp_trace, FCP_BUF_LEVEL_3, 0,
6861 6867 "!FCP:Report Lun Has Changed"
6862 6868 " target=%x", ptgt->tgt_d_id);
6863 6869 } else if (FCP_SENSE_NO_LUN(sense)) {
6864 6870 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6865 6871 fcp_trace, FCP_BUF_LEVEL_3, 0,
6866 6872 "!FCP:LU Not Supported"
6867 6873 " target=%x", ptgt->tgt_d_id);
6868 6874 }
6869 6875 }
6870 6876 rval = DDI_SUCCESS;
6871 6877 }
6872 6878
6873 6879 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6874 6880 fcp_trace, FCP_BUF_LEVEL_5, 0,
6875 6881 "D_ID=%x, sense=%x, status=%x",
6876 6882 fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6877 6883 rsp->fcp_u.fcp_status.scsi_status);
6878 6884
6879 6885 return (rval);
6880 6886 }
6881 6887
6882 6888 /*
6883 6889 * Function: fcp_scsi_callback
6884 6890 *
6885 6891 * Description: This is the callback routine set by fcp_send_scsi() after
6886 6892 * it calls fcp_icmd_alloc(). The SCSI command completed here
6887 6893 * and autogenerated by FCP are: REPORT_LUN, INQUIRY and
6888 6894 * INQUIRY_PAGE83.
6889 6895 *
6890 6896 * Argument: *fpkt FC packet used to convey the command
6891 6897 *
6892 6898 * Return Value: None
6893 6899 */
6894 6900 static void
6895 6901 fcp_scsi_callback(fc_packet_t *fpkt)
6896 6902 {
6897 6903 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6898 6904 fpkt->pkt_ulp_private;
6899 6905 struct fcp_rsp_info fcp_rsp_err, *bep;
6900 6906 struct fcp_port *pptr;
6901 6907 struct fcp_tgt *ptgt;
6902 6908 struct fcp_lun *plun;
6903 6909 struct fcp_rsp response, *rsp;
6904 6910
6905 6911 ptgt = icmd->ipkt_tgt;
6906 6912 pptr = ptgt->tgt_port;
6907 6913 plun = icmd->ipkt_lun;
6908 6914
6909 6915 if (icmd->ipkt_nodma) {
6910 6916 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6911 6917 } else {
6912 6918 rsp = &response;
6913 6919 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6914 6920 sizeof (struct fcp_rsp));
6915 6921 }
6916 6922
6917 6923 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6918 6924 fcp_trace, FCP_BUF_LEVEL_2, 0,
6919 6925 "SCSI callback state=0x%x for %x, op_code=0x%x, "
6920 6926 "status=%x, lun num=%x",
6921 6927 fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6922 6928 rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6923 6929
6924 6930 /*
6925 6931 * Pre-init LUN GUID with NWWN if it is not a device that
6926 6932 * supports multiple luns and we know it's not page83
6927 6933 * compliant. Although using a NWWN is not lun unique,
6928 6934 * we will be fine since there is only one lun behind the taget
6929 6935 * in this case.
6930 6936 */
6931 6937 if ((plun->lun_guid_size == 0) &&
6932 6938 (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6933 6939 (fcp_symmetric_device_probe(plun) == 0)) {
6934 6940
6935 6941 char ascii_wwn[FC_WWN_SIZE*2+1];
6936 6942 fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6937 6943 (void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6938 6944 }
6939 6945
6940 6946 /*
6941 6947 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6942 6948 * when thay have more data than what is asked in CDB. An overrun
6943 6949 * is really when FCP_DL is smaller than the data length in CDB.
6944 6950 * In the case here we know that REPORT LUN command we formed within
6945 6951 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6946 6952 * behavior. In reality this is FC_SUCCESS.
6947 6953 */
6948 6954 if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6949 6955 (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6950 6956 (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6951 6957 fpkt->pkt_state = FC_PKT_SUCCESS;
6952 6958 }
6953 6959
6954 6960 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6955 6961 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6956 6962 fcp_trace, FCP_BUF_LEVEL_2, 0,
6957 6963 "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6958 6964 ptgt->tgt_d_id);
6959 6965
6960 6966 if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6961 6967 /*
6962 6968 * Inquiry VPD page command on A5K SES devices would
6963 6969 * result in data CRC errors.
6964 6970 */
6965 6971 if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6966 6972 (void) fcp_handle_page83(fpkt, icmd, 1);
6967 6973 return;
6968 6974 }
6969 6975 }
6970 6976 if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6971 6977 FCP_MUST_RETRY(fpkt)) {
6972 6978 fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6973 6979 fcp_retry_scsi_cmd(fpkt);
6974 6980 return;
6975 6981 }
6976 6982
6977 6983 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6978 6984 FCP_TGT_TRACE_20);
6979 6985
6980 6986 mutex_enter(&pptr->port_mutex);
6981 6987 mutex_enter(&ptgt->tgt_mutex);
6982 6988 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6983 6989 mutex_exit(&ptgt->tgt_mutex);
6984 6990 mutex_exit(&pptr->port_mutex);
6985 6991 fcp_print_error(fpkt);
6986 6992 } else {
6987 6993 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6988 6994 fcp_trace, FCP_BUF_LEVEL_2, 0,
6989 6995 "fcp_scsi_callback,1: state change occured"
6990 6996 " for D_ID=0x%x", ptgt->tgt_d_id);
6991 6997 mutex_exit(&ptgt->tgt_mutex);
6992 6998 mutex_exit(&pptr->port_mutex);
6993 6999 }
6994 7000 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6995 7001 icmd->ipkt_change_cnt, icmd->ipkt_cause);
6996 7002 fcp_icmd_free(pptr, icmd);
6997 7003 return;
6998 7004 }
6999 7005
7000 7006 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
7001 7007
7002 7008 mutex_enter(&pptr->port_mutex);
7003 7009 mutex_enter(&ptgt->tgt_mutex);
7004 7010 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7005 7011 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7006 7012 fcp_trace, FCP_BUF_LEVEL_2, 0,
7007 7013 "fcp_scsi_callback,2: state change occured"
7008 7014 " for D_ID=0x%x", ptgt->tgt_d_id);
7009 7015 mutex_exit(&ptgt->tgt_mutex);
7010 7016 mutex_exit(&pptr->port_mutex);
7011 7017 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7012 7018 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7013 7019 fcp_icmd_free(pptr, icmd);
7014 7020 return;
7015 7021 }
7016 7022 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7017 7023
7018 7024 mutex_exit(&ptgt->tgt_mutex);
7019 7025 mutex_exit(&pptr->port_mutex);
7020 7026
7021 7027 if (icmd->ipkt_nodma) {
7022 7028 bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
7023 7029 sizeof (struct fcp_rsp));
7024 7030 } else {
7025 7031 bep = &fcp_rsp_err;
7026 7032 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
7027 7033 fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
7028 7034 }
7029 7035
7030 7036 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
7031 7037 fcp_retry_scsi_cmd(fpkt);
7032 7038 return;
7033 7039 }
7034 7040
7035 7041 if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
7036 7042 FCP_NO_FAILURE) {
7037 7043 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7038 7044 fcp_trace, FCP_BUF_LEVEL_2, 0,
7039 7045 "rsp_code=0x%x, rsp_len_set=0x%x",
7040 7046 bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
7041 7047 fcp_retry_scsi_cmd(fpkt);
7042 7048 return;
7043 7049 }
7044 7050
7045 7051 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
7046 7052 rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
7047 7053 fcp_queue_ipkt(pptr, fpkt);
7048 7054 return;
7049 7055 }
7050 7056
7051 7057 /*
7052 7058 * Devices that do not support INQUIRY_PAGE83, return check condition
7053 7059 * with illegal request as per SCSI spec.
7054 7060 * Crossbridge is one such device and Daktari's SES node is another.
7055 7061 * We want to ideally enumerate these devices as a non-mpxio devices.
7056 7062 * SES nodes (Daktari only currently) are an exception to this.
7057 7063 */
7058 7064 if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
7059 7065 (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
7060 7066
7061 7067 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7062 7068 fcp_trace, FCP_BUF_LEVEL_3, 0,
7063 7069 "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
7064 7070 "check condition. May enumerate as non-mpxio device",
7065 7071 ptgt->tgt_d_id, plun->lun_type);
7066 7072
7067 7073 /*
7068 7074 * If we let Daktari's SES be enumerated as a non-mpxio
7069 7075 * device, there will be a discrepency in that the other
7070 7076 * internal FC disks will get enumerated as mpxio devices.
7071 7077 * Applications like luxadm expect this to be consistent.
7072 7078 *
7073 7079 * So, we put in a hack here to check if this is an SES device
7074 7080 * and handle it here.
7075 7081 */
7076 7082 if (plun->lun_type == DTYPE_ESI) {
7077 7083 /*
7078 7084 * Since, pkt_state is actually FC_PKT_SUCCESS
7079 7085 * at this stage, we fake a failure here so that
7080 7086 * fcp_handle_page83 will create a device path using
7081 7087 * the WWN instead of the GUID which is not there anyway
7082 7088 */
7083 7089 fpkt->pkt_state = FC_PKT_LOCAL_RJT;
7084 7090 (void) fcp_handle_page83(fpkt, icmd, 1);
7085 7091 return;
7086 7092 }
7087 7093
7088 7094 mutex_enter(&ptgt->tgt_mutex);
7089 7095 plun->lun_state &= ~(FCP_LUN_OFFLINE |
7090 7096 FCP_LUN_MARK | FCP_LUN_BUSY);
7091 7097 mutex_exit(&ptgt->tgt_mutex);
7092 7098
7093 7099 (void) fcp_call_finish_init(pptr, ptgt,
7094 7100 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7095 7101 icmd->ipkt_cause);
7096 7102 fcp_icmd_free(pptr, icmd);
7097 7103 return;
7098 7104 }
7099 7105
7100 7106 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7101 7107 int rval = DDI_FAILURE;
7102 7108
7103 7109 /*
7104 7110 * handle cases where report lun isn't supported
7105 7111 * by faking up our own REPORT_LUN response or
7106 7112 * UNIT ATTENTION
7107 7113 */
7108 7114 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7109 7115 rval = fcp_check_reportlun(rsp, fpkt);
7110 7116
7111 7117 /*
7112 7118 * fcp_check_reportlun might have modified the
7113 7119 * FCP response. Copy it in again to get an updated
7114 7120 * FCP response
7115 7121 */
7116 7122 if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
7117 7123 rsp = &response;
7118 7124
7119 7125 FCP_CP_IN(fpkt->pkt_resp, rsp,
7120 7126 fpkt->pkt_resp_acc,
7121 7127 sizeof (struct fcp_rsp));
7122 7128 }
7123 7129 }
7124 7130
7125 7131 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7126 7132 if (rval == DDI_SUCCESS) {
7127 7133 (void) fcp_call_finish_init(pptr, ptgt,
7128 7134 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7129 7135 icmd->ipkt_cause);
7130 7136 fcp_icmd_free(pptr, icmd);
7131 7137 } else {
7132 7138 fcp_retry_scsi_cmd(fpkt);
7133 7139 }
7134 7140
7135 7141 return;
7136 7142 }
7137 7143 } else {
7138 7144 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7139 7145 mutex_enter(&ptgt->tgt_mutex);
7140 7146 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7141 7147 mutex_exit(&ptgt->tgt_mutex);
7142 7148 }
7143 7149 }
7144 7150
7145 7151 ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7146 7152 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
7147 7153 (void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0,
7148 7154 DDI_DMA_SYNC_FORCPU);
7149 7155 }
7150 7156
7151 7157 switch (icmd->ipkt_opcode) {
7152 7158 case SCMD_INQUIRY:
7153 7159 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7154 7160 fcp_handle_inquiry(fpkt, icmd);
7155 7161 break;
7156 7162
7157 7163 case SCMD_REPORT_LUN:
7158 7164 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7159 7165 FCP_TGT_TRACE_22);
7160 7166 fcp_handle_reportlun(fpkt, icmd);
7161 7167 break;
7162 7168
7163 7169 case SCMD_INQUIRY_PAGE83:
7164 7170 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7165 7171 (void) fcp_handle_page83(fpkt, icmd, 0);
7166 7172 break;
7167 7173
7168 7174 default:
7169 7175 fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7170 7176 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7171 7177 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7172 7178 fcp_icmd_free(pptr, icmd);
7173 7179 break;
7174 7180 }
7175 7181 }
7176 7182
7177 7183
7178 7184 static void
7179 7185 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7180 7186 {
7181 7187 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
7182 7188 fpkt->pkt_ulp_private;
7183 7189 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
7184 7190 struct fcp_port *pptr = ptgt->tgt_port;
7185 7191
7186 7192 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7187 7193 fcp_is_retryable(icmd)) {
7188 7194 mutex_enter(&pptr->port_mutex);
7189 7195 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7190 7196 mutex_exit(&pptr->port_mutex);
7191 7197 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7192 7198 fcp_trace, FCP_BUF_LEVEL_3, 0,
7193 7199 "Retrying %s to %x; state=%x, reason=%x",
7194 7200 (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7195 7201 "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7196 7202 fpkt->pkt_state, fpkt->pkt_reason);
7197 7203
7198 7204 fcp_queue_ipkt(pptr, fpkt);
7199 7205 } else {
7200 7206 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7201 7207 fcp_trace, FCP_BUF_LEVEL_3, 0,
7202 7208 "fcp_retry_scsi_cmd,1: state change occured"
7203 7209 " for D_ID=0x%x", ptgt->tgt_d_id);
7204 7210 mutex_exit(&pptr->port_mutex);
7205 7211 (void) fcp_call_finish_init(pptr, ptgt,
7206 7212 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7207 7213 icmd->ipkt_cause);
7208 7214 fcp_icmd_free(pptr, icmd);
7209 7215 }
7210 7216 } else {
7211 7217 fcp_print_error(fpkt);
7212 7218 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7213 7219 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7214 7220 fcp_icmd_free(pptr, icmd);
7215 7221 }
7216 7222 }
7217 7223
7218 7224 /*
7219 7225 * Function: fcp_handle_page83
7220 7226 *
7221 7227 * Description: Treats the response to INQUIRY_PAGE83.
7222 7228 *
7223 7229 * Argument: *fpkt FC packet used to convey the command.
7224 7230 * *icmd Original fcp_ipkt structure.
7225 7231 * ignore_page83_data
7226 7232 * if it's 1, that means it's a special devices's
7227 7233 * page83 response, it should be enumerated under mpxio
7228 7234 *
7229 7235 * Return Value: None
7230 7236 */
7231 7237 static void
7232 7238 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7233 7239 int ignore_page83_data)
7234 7240 {
7235 7241 struct fcp_port *pptr;
7236 7242 struct fcp_lun *plun;
7237 7243 struct fcp_tgt *ptgt;
7238 7244 uchar_t dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7239 7245 int fail = 0;
7240 7246 ddi_devid_t devid;
7241 7247 char *guid = NULL;
7242 7248 int ret;
7243 7249
7244 7250 ASSERT(icmd != NULL && fpkt != NULL);
7245 7251
7246 7252 pptr = icmd->ipkt_port;
7247 7253 ptgt = icmd->ipkt_tgt;
7248 7254 plun = icmd->ipkt_lun;
7249 7255
7250 7256 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7251 7257 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7252 7258
7253 7259 FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7254 7260 SCMD_MAX_INQUIRY_PAGE83_SIZE);
7255 7261
7256 7262 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7257 7263 fcp_trace, FCP_BUF_LEVEL_5, 0,
7258 7264 "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7259 7265 "dtype=0x%x, lun num=%x",
7260 7266 pptr->port_instance, ptgt->tgt_d_id,
7261 7267 dev_id_page[0], plun->lun_num);
7262 7268
7263 7269 ret = ddi_devid_scsi_encode(
7264 7270 DEVID_SCSI_ENCODE_VERSION_LATEST,
7265 7271 NULL, /* driver name */
7266 7272 (unsigned char *) &plun->lun_inq, /* standard inquiry */
7267 7273 sizeof (plun->lun_inq), /* size of standard inquiry */
7268 7274 NULL, /* page 80 data */
7269 7275 0, /* page 80 len */
7270 7276 dev_id_page, /* page 83 data */
7271 7277 SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7272 7278 &devid);
7273 7279
7274 7280 if (ret == DDI_SUCCESS) {
7275 7281
7276 7282 guid = ddi_devid_to_guid(devid);
7277 7283
7278 7284 if (guid) {
7279 7285 /*
7280 7286 * Check our current guid. If it's non null
7281 7287 * and it has changed, we need to copy it into
7282 7288 * lun_old_guid since we might still need it.
7283 7289 */
7284 7290 if (plun->lun_guid &&
7285 7291 strcmp(guid, plun->lun_guid)) {
7286 7292 unsigned int len;
7287 7293
7288 7294 /*
7289 7295 * If the guid of the LUN changes,
7290 7296 * reconfiguration should be triggered
7291 7297 * to reflect the changes.
7292 7298 * i.e. we should offline the LUN with
7293 7299 * the old guid, and online the LUN with
7294 7300 * the new guid.
7295 7301 */
7296 7302 plun->lun_state |= FCP_LUN_CHANGED;
7297 7303
7298 7304 if (plun->lun_old_guid) {
7299 7305 kmem_free(plun->lun_old_guid,
7300 7306 plun->lun_old_guid_size);
7301 7307 }
7302 7308
7303 7309 len = plun->lun_guid_size;
7304 7310 plun->lun_old_guid_size = len;
7305 7311
7306 7312 plun->lun_old_guid = kmem_zalloc(len,
7307 7313 KM_NOSLEEP);
7308 7314
7309 7315 if (plun->lun_old_guid) {
7310 7316 /*
7311 7317 * The alloc was successful then
7312 7318 * let's do the copy.
7313 7319 */
7314 7320 bcopy(plun->lun_guid,
7315 7321 plun->lun_old_guid, len);
7316 7322 } else {
7317 7323 fail = 1;
7318 7324 plun->lun_old_guid_size = 0;
7319 7325 }
7320 7326 }
7321 7327 if (!fail) {
7322 7328 if (fcp_copy_guid_2_lun_block(
7323 7329 plun, guid)) {
7324 7330 fail = 1;
7325 7331 }
7326 7332 }
7327 7333 ddi_devid_free_guid(guid);
7328 7334
7329 7335 } else {
7330 7336 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7331 7337 fcp_trace, FCP_BUF_LEVEL_2, 0,
7332 7338 "fcp_handle_page83: unable to create "
7333 7339 "GUID");
7334 7340
7335 7341 /* couldn't create good guid from devid */
7336 7342 fail = 1;
7337 7343 }
7338 7344 ddi_devid_free(devid);
7339 7345
7340 7346 } else if (ret == DDI_NOT_WELL_FORMED) {
7341 7347 /* NULL filled data for page 83 */
7342 7348 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7343 7349 fcp_trace, FCP_BUF_LEVEL_2, 0,
7344 7350 "fcp_handle_page83: retry GUID");
7345 7351
7346 7352 icmd->ipkt_retries = 0;
7347 7353 fcp_retry_scsi_cmd(fpkt);
7348 7354 return;
7349 7355 } else {
7350 7356 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7351 7357 fcp_trace, FCP_BUF_LEVEL_2, 0,
7352 7358 "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7353 7359 ret);
7354 7360 /*
7355 7361 * Since the page83 validation
7356 7362 * introduced late, we are being
7357 7363 * tolerant to the existing devices
7358 7364 * that already found to be working
7359 7365 * under mpxio, like A5200's SES device,
7360 7366 * its page83 response will not be standard-compliant,
7361 7367 * but we still want it to be enumerated under mpxio.
7362 7368 */
7363 7369 if (fcp_symmetric_device_probe(plun) != 0) {
7364 7370 fail = 1;
7365 7371 }
7366 7372 }
7367 7373
7368 7374 } else {
7369 7375 /* bad packet state */
7370 7376 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7371 7377
7372 7378 /*
7373 7379 * For some special devices (A5K SES and Daktari's SES devices),
7374 7380 * they should be enumerated under mpxio
7375 7381 * or "luxadm dis" will fail
7376 7382 */
7377 7383 if (ignore_page83_data) {
7378 7384 fail = 0;
7379 7385 } else {
7380 7386 fail = 1;
7381 7387 }
7382 7388 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7383 7389 fcp_trace, FCP_BUF_LEVEL_2, 0,
7384 7390 "!Devid page cmd failed. "
7385 7391 "fpkt_state: %x fpkt_reason: %x",
7386 7392 "ignore_page83: %d",
7387 7393 fpkt->pkt_state, fpkt->pkt_reason,
7388 7394 ignore_page83_data);
7389 7395 }
7390 7396
7391 7397 mutex_enter(&pptr->port_mutex);
7392 7398 mutex_enter(&plun->lun_mutex);
7393 7399 /*
7394 7400 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7395 7401 * mismatch between lun_cip and lun_mpxio.
7396 7402 */
7397 7403 if (plun->lun_cip == NULL) {
7398 7404 /*
7399 7405 * If we don't have a guid for this lun it's because we were
7400 7406 * unable to glean one from the page 83 response. Set the
7401 7407 * control flag to 0 here to make sure that we don't attempt to
7402 7408 * enumerate it under mpxio.
7403 7409 */
7404 7410 if (fail || pptr->port_mpxio == 0) {
7405 7411 plun->lun_mpxio = 0;
7406 7412 } else {
7407 7413 plun->lun_mpxio = 1;
7408 7414 }
7409 7415 }
7410 7416 mutex_exit(&plun->lun_mutex);
7411 7417 mutex_exit(&pptr->port_mutex);
7412 7418
7413 7419 mutex_enter(&ptgt->tgt_mutex);
7414 7420 plun->lun_state &=
7415 7421 ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7416 7422 mutex_exit(&ptgt->tgt_mutex);
7417 7423
7418 7424 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7419 7425 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7420 7426
7421 7427 fcp_icmd_free(pptr, icmd);
7422 7428 }
7423 7429
7424 7430 /*
7425 7431 * Function: fcp_handle_inquiry
7426 7432 *
7427 7433 * Description: Called by fcp_scsi_callback to handle the response to an
7428 7434 * INQUIRY request.
7429 7435 *
7430 7436 * Argument: *fpkt FC packet used to convey the command.
7431 7437 * *icmd Original fcp_ipkt structure.
7432 7438 *
7433 7439 * Return Value: None
7434 7440 */
7435 7441 static void
7436 7442 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7437 7443 {
7438 7444 struct fcp_port *pptr;
7439 7445 struct fcp_lun *plun;
7440 7446 struct fcp_tgt *ptgt;
7441 7447 uchar_t dtype;
7442 7448 uchar_t pqual;
7443 7449 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7444 7450
7445 7451 ASSERT(icmd != NULL && fpkt != NULL);
7446 7452
7447 7453 pptr = icmd->ipkt_port;
7448 7454 ptgt = icmd->ipkt_tgt;
7449 7455 plun = icmd->ipkt_lun;
7450 7456
7451 7457 FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7452 7458 sizeof (struct scsi_inquiry));
7453 7459
7454 7460 dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7455 7461 pqual = plun->lun_inq.inq_dtype >> 5;
7456 7462
7457 7463 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7458 7464 fcp_trace, FCP_BUF_LEVEL_5, 0,
7459 7465 "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7460 7466 "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7461 7467 plun->lun_num, dtype, pqual);
7462 7468
7463 7469 if (pqual != 0) {
7464 7470 /*
7465 7471 * Non-zero peripheral qualifier
7466 7472 */
7467 7473 fcp_log(CE_CONT, pptr->port_dip,
7468 7474 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7469 7475 "Device type=0x%x Peripheral qual=0x%x\n",
7470 7476 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7471 7477
7472 7478 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7473 7479 fcp_trace, FCP_BUF_LEVEL_5, 0,
7474 7480 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7475 7481 "Device type=0x%x Peripheral qual=0x%x\n",
7476 7482 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7477 7483
7478 7484 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7479 7485
7480 7486 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7481 7487 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7482 7488 fcp_icmd_free(pptr, icmd);
7483 7489 return;
7484 7490 }
7485 7491
7486 7492 /*
7487 7493 * If the device is already initialized, check the dtype
7488 7494 * for a change. If it has changed then update the flags
7489 7495 * so the create_luns will offline the old device and
7490 7496 * create the new device. Refer to bug: 4764752
7491 7497 */
7492 7498 if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7493 7499 plun->lun_state |= FCP_LUN_CHANGED;
7494 7500 }
7495 7501 plun->lun_type = plun->lun_inq.inq_dtype;
7496 7502
7497 7503 /*
7498 7504 * This code is setting/initializing the throttling in the FCA
7499 7505 * driver.
7500 7506 */
7501 7507 mutex_enter(&pptr->port_mutex);
7502 7508 if (!pptr->port_notify) {
7503 7509 if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7504 7510 uint32_t cmd = 0;
7505 7511 cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7506 7512 ((cmd & 0xFFFFFF00 >> 8) |
7507 7513 FCP_SVE_THROTTLE << 8));
7508 7514 pptr->port_notify = 1;
7509 7515 mutex_exit(&pptr->port_mutex);
7510 7516 (void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7511 7517 mutex_enter(&pptr->port_mutex);
7512 7518 }
7513 7519 }
7514 7520
7515 7521 if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7516 7522 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7517 7523 fcp_trace, FCP_BUF_LEVEL_2, 0,
7518 7524 "fcp_handle_inquiry,1:state change occured"
7519 7525 " for D_ID=0x%x", ptgt->tgt_d_id);
7520 7526 mutex_exit(&pptr->port_mutex);
7521 7527
7522 7528 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7523 7529 (void) fcp_call_finish_init(pptr, ptgt,
7524 7530 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7525 7531 icmd->ipkt_cause);
7526 7532 fcp_icmd_free(pptr, icmd);
7527 7533 return;
7528 7534 }
7529 7535 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7530 7536 mutex_exit(&pptr->port_mutex);
7531 7537
7532 7538 /* Retrieve the rscn count (if a valid one exists) */
7533 7539 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7534 7540 rscn_count = ((fc_ulp_rscn_info_t *)
7535 7541 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7536 7542 } else {
7537 7543 rscn_count = FC_INVALID_RSCN_COUNT;
7538 7544 }
7539 7545
7540 7546 if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7541 7547 SCMD_MAX_INQUIRY_PAGE83_SIZE,
7542 7548 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7543 7549 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7544 7550 fcp_log(CE_WARN, NULL, "!failed to send page 83");
7545 7551 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7546 7552 (void) fcp_call_finish_init(pptr, ptgt,
7547 7553 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7548 7554 icmd->ipkt_cause);
7549 7555 }
7550 7556
7551 7557 /*
7552 7558 * Read Inquiry VPD Page 0x83 to uniquely
7553 7559 * identify this logical unit.
7554 7560 */
7555 7561 fcp_icmd_free(pptr, icmd);
7556 7562 }
7557 7563
7558 7564 /*
7559 7565 * Function: fcp_handle_reportlun
7560 7566 *
7561 7567 * Description: Called by fcp_scsi_callback to handle the response to a
7562 7568 * REPORT_LUN request.
7563 7569 *
7564 7570 * Argument: *fpkt FC packet used to convey the command.
7565 7571 * *icmd Original fcp_ipkt structure.
7566 7572 *
7567 7573 * Return Value: None
7568 7574 */
7569 7575 static void
7570 7576 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7571 7577 {
7572 7578 int i;
7573 7579 int nluns_claimed;
7574 7580 int nluns_bufmax;
7575 7581 int len;
7576 7582 uint16_t lun_num;
7577 7583 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7578 7584 struct fcp_port *pptr;
7579 7585 struct fcp_tgt *ptgt;
7580 7586 struct fcp_lun *plun;
7581 7587 struct fcp_reportlun_resp *report_lun;
7582 7588
7583 7589 pptr = icmd->ipkt_port;
7584 7590 ptgt = icmd->ipkt_tgt;
7585 7591 len = fpkt->pkt_datalen;
7586 7592
7587 7593 if ((len < FCP_LUN_HEADER) ||
7588 7594 ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7589 7595 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7590 7596 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7591 7597 fcp_icmd_free(pptr, icmd);
7592 7598 return;
7593 7599 }
7594 7600
7595 7601 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7596 7602 fpkt->pkt_datalen);
7597 7603
7598 7604 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7599 7605 fcp_trace, FCP_BUF_LEVEL_5, 0,
7600 7606 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7601 7607 pptr->port_instance, ptgt->tgt_d_id);
7602 7608
7603 7609 /*
7604 7610 * Get the number of luns (which is supplied as LUNS * 8) the
7605 7611 * device claims it has.
7606 7612 */
7607 7613 nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7608 7614
7609 7615 /*
7610 7616 * Get the maximum number of luns the buffer submitted can hold.
7611 7617 */
7612 7618 nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7613 7619
7614 7620 /*
7615 7621 * Due to limitations of certain hardware, we support only 16 bit LUNs
7616 7622 */
7617 7623 if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7618 7624 kmem_free(report_lun, len);
7619 7625
7620 7626 fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7621 7627 " 0x%x number of LUNs for target=%x", nluns_claimed,
7622 7628 ptgt->tgt_d_id);
7623 7629
7624 7630 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7625 7631 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7626 7632 fcp_icmd_free(pptr, icmd);
7627 7633 return;
7628 7634 }
7629 7635
7630 7636 /*
7631 7637 * If there are more LUNs than we have allocated memory for,
7632 7638 * allocate more space and send down yet another report lun if
7633 7639 * the maximum number of attempts hasn't been reached.
7634 7640 */
7635 7641 mutex_enter(&ptgt->tgt_mutex);
7636 7642
7637 7643 if ((nluns_claimed > nluns_bufmax) &&
7638 7644 (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7639 7645
7640 7646 struct fcp_lun *plun;
7641 7647
7642 7648 ptgt->tgt_report_lun_cnt++;
7643 7649 plun = ptgt->tgt_lun;
7644 7650 ASSERT(plun != NULL);
7645 7651 mutex_exit(&ptgt->tgt_mutex);
7646 7652
7647 7653 kmem_free(report_lun, len);
7648 7654
7649 7655 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7650 7656 fcp_trace, FCP_BUF_LEVEL_5, 0,
7651 7657 "!Dynamically discovered %d LUNs for D_ID=%x",
7652 7658 nluns_claimed, ptgt->tgt_d_id);
7653 7659
7654 7660 /* Retrieve the rscn count (if a valid one exists) */
7655 7661 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7656 7662 rscn_count = ((fc_ulp_rscn_info_t *)
7657 7663 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7658 7664 ulp_rscn_count;
7659 7665 } else {
7660 7666 rscn_count = FC_INVALID_RSCN_COUNT;
7661 7667 }
7662 7668
7663 7669 if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7664 7670 FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7665 7671 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7666 7672 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7667 7673 (void) fcp_call_finish_init(pptr, ptgt,
7668 7674 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7669 7675 icmd->ipkt_cause);
7670 7676 }
7671 7677
7672 7678 fcp_icmd_free(pptr, icmd);
7673 7679 return;
7674 7680 }
7675 7681
7676 7682 if (nluns_claimed > nluns_bufmax) {
7677 7683 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7678 7684 fcp_trace, FCP_BUF_LEVEL_5, 0,
7679 7685 "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7680 7686 " Number of LUNs lost=%x",
7681 7687 ptgt->tgt_port_wwn.raw_wwn[0],
7682 7688 ptgt->tgt_port_wwn.raw_wwn[1],
7683 7689 ptgt->tgt_port_wwn.raw_wwn[2],
7684 7690 ptgt->tgt_port_wwn.raw_wwn[3],
7685 7691 ptgt->tgt_port_wwn.raw_wwn[4],
7686 7692 ptgt->tgt_port_wwn.raw_wwn[5],
7687 7693 ptgt->tgt_port_wwn.raw_wwn[6],
7688 7694 ptgt->tgt_port_wwn.raw_wwn[7],
7689 7695 nluns_claimed - nluns_bufmax);
7690 7696
7691 7697 nluns_claimed = nluns_bufmax;
7692 7698 }
7693 7699 ptgt->tgt_lun_cnt = nluns_claimed;
7694 7700
7695 7701 /*
7696 7702 * Identify missing LUNs and print warning messages
7697 7703 */
7698 7704 for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7699 7705 int offline;
7700 7706 int exists = 0;
7701 7707
7702 7708 offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7703 7709
7704 7710 for (i = 0; i < nluns_claimed && exists == 0; i++) {
7705 7711 uchar_t *lun_string;
7706 7712
7707 7713 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7708 7714
7709 7715 switch (lun_string[0] & 0xC0) {
7710 7716 case FCP_LUN_ADDRESSING:
7711 7717 case FCP_PD_ADDRESSING:
7712 7718 case FCP_VOLUME_ADDRESSING:
7713 7719 lun_num = ((lun_string[0] & 0x3F) << 8) |
7714 7720 lun_string[1];
7715 7721 if (plun->lun_num == lun_num) {
7716 7722 exists++;
7717 7723 break;
7718 7724 }
7719 7725 break;
7720 7726
7721 7727 default:
7722 7728 break;
7723 7729 }
7724 7730 }
7725 7731
7726 7732 if (!exists && !offline) {
7727 7733 mutex_exit(&ptgt->tgt_mutex);
7728 7734
7729 7735 mutex_enter(&pptr->port_mutex);
7730 7736 mutex_enter(&ptgt->tgt_mutex);
7731 7737 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7732 7738 /*
7733 7739 * set disappear flag when device was connected
7734 7740 */
7735 7741 if (!(plun->lun_state &
7736 7742 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7737 7743 plun->lun_state |= FCP_LUN_DISAPPEARED;
7738 7744 }
7739 7745 mutex_exit(&ptgt->tgt_mutex);
7740 7746 mutex_exit(&pptr->port_mutex);
7741 7747 if (!(plun->lun_state &
7742 7748 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7743 7749 fcp_log(CE_NOTE, pptr->port_dip,
7744 7750 "!Lun=%x for target=%x disappeared",
7745 7751 plun->lun_num, ptgt->tgt_d_id);
7746 7752 }
7747 7753 mutex_enter(&ptgt->tgt_mutex);
7748 7754 } else {
7749 7755 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7750 7756 fcp_trace, FCP_BUF_LEVEL_5, 0,
7751 7757 "fcp_handle_reportlun,1: state change"
7752 7758 " occured for D_ID=0x%x", ptgt->tgt_d_id);
7753 7759 mutex_exit(&ptgt->tgt_mutex);
7754 7760 mutex_exit(&pptr->port_mutex);
7755 7761 kmem_free(report_lun, len);
7756 7762 (void) fcp_call_finish_init(pptr, ptgt,
7757 7763 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7758 7764 icmd->ipkt_cause);
7759 7765 fcp_icmd_free(pptr, icmd);
7760 7766 return;
7761 7767 }
7762 7768 } else if (exists) {
7763 7769 /*
7764 7770 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7765 7771 * actually exists in REPORT_LUN response
7766 7772 */
7767 7773 if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7768 7774 plun->lun_state &=
7769 7775 ~FCP_LUN_DEVICE_NOT_CONNECTED;
7770 7776 }
7771 7777 if (offline || plun->lun_num == 0) {
7772 7778 if (plun->lun_state & FCP_LUN_DISAPPEARED) {
7773 7779 plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7774 7780 mutex_exit(&ptgt->tgt_mutex);
7775 7781 fcp_log(CE_NOTE, pptr->port_dip,
7776 7782 "!Lun=%x for target=%x reappeared",
7777 7783 plun->lun_num, ptgt->tgt_d_id);
7778 7784 mutex_enter(&ptgt->tgt_mutex);
7779 7785 }
7780 7786 }
7781 7787 }
7782 7788 }
7783 7789
7784 7790 ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7785 7791 mutex_exit(&ptgt->tgt_mutex);
7786 7792
7787 7793 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7788 7794 fcp_trace, FCP_BUF_LEVEL_5, 0,
7789 7795 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7790 7796 pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7791 7797
7792 7798 /* scan each lun */
7793 7799 for (i = 0; i < nluns_claimed; i++) {
7794 7800 uchar_t *lun_string;
7795 7801
7796 7802 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7797 7803
7798 7804 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7799 7805 fcp_trace, FCP_BUF_LEVEL_5, 0,
7800 7806 "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7801 7807 " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7802 7808 lun_string[0]);
7803 7809
7804 7810 switch (lun_string[0] & 0xC0) {
7805 7811 case FCP_LUN_ADDRESSING:
7806 7812 case FCP_PD_ADDRESSING:
7807 7813 case FCP_VOLUME_ADDRESSING:
7808 7814 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7809 7815
7810 7816 /* We will skip masked LUNs because of the blacklist. */
7811 7817 if (fcp_lun_blacklist != NULL) {
7812 7818 mutex_enter(&ptgt->tgt_mutex);
7813 7819 if (fcp_should_mask(&ptgt->tgt_port_wwn,
7814 7820 lun_num) == TRUE) {
7815 7821 ptgt->tgt_lun_cnt--;
7816 7822 mutex_exit(&ptgt->tgt_mutex);
7817 7823 break;
7818 7824 }
7819 7825 mutex_exit(&ptgt->tgt_mutex);
7820 7826 }
7821 7827
7822 7828 /* see if this LUN is already allocated */
7823 7829 if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7824 7830 plun = fcp_alloc_lun(ptgt);
7825 7831 if (plun == NULL) {
7826 7832 fcp_log(CE_NOTE, pptr->port_dip,
7827 7833 "!Lun allocation failed"
7828 7834 " target=%x lun=%x",
7829 7835 ptgt->tgt_d_id, lun_num);
7830 7836 break;
7831 7837 }
7832 7838 }
7833 7839
7834 7840 mutex_enter(&plun->lun_tgt->tgt_mutex);
7835 7841 /* convert to LUN */
7836 7842 plun->lun_addr.ent_addr_0 =
7837 7843 BE_16(*(uint16_t *)&(lun_string[0]));
7838 7844 plun->lun_addr.ent_addr_1 =
7839 7845 BE_16(*(uint16_t *)&(lun_string[2]));
7840 7846 plun->lun_addr.ent_addr_2 =
7841 7847 BE_16(*(uint16_t *)&(lun_string[4]));
7842 7848 plun->lun_addr.ent_addr_3 =
7843 7849 BE_16(*(uint16_t *)&(lun_string[6]));
7844 7850
7845 7851 plun->lun_num = lun_num;
7846 7852 plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7847 7853 plun->lun_state &= ~FCP_LUN_OFFLINE;
7848 7854 mutex_exit(&plun->lun_tgt->tgt_mutex);
7849 7855
7850 7856 /* Retrieve the rscn count (if a valid one exists) */
7851 7857 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7852 7858 rscn_count = ((fc_ulp_rscn_info_t *)
7853 7859 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7854 7860 ulp_rscn_count;
7855 7861 } else {
7856 7862 rscn_count = FC_INVALID_RSCN_COUNT;
7857 7863 }
7858 7864
7859 7865 if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7860 7866 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7861 7867 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7862 7868 mutex_enter(&pptr->port_mutex);
7863 7869 mutex_enter(&plun->lun_tgt->tgt_mutex);
7864 7870 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7865 7871 fcp_log(CE_NOTE, pptr->port_dip,
7866 7872 "!failed to send INQUIRY"
7867 7873 " target=%x lun=%x",
7868 7874 ptgt->tgt_d_id, plun->lun_num);
7869 7875 } else {
7870 7876 FCP_TRACE(fcp_logq,
7871 7877 pptr->port_instbuf, fcp_trace,
7872 7878 FCP_BUF_LEVEL_5, 0,
7873 7879 "fcp_handle_reportlun,2: state"
7874 7880 " change occured for D_ID=0x%x",
7875 7881 ptgt->tgt_d_id);
7876 7882 }
7877 7883 mutex_exit(&plun->lun_tgt->tgt_mutex);
7878 7884 mutex_exit(&pptr->port_mutex);
7879 7885 } else {
7880 7886 continue;
7881 7887 }
7882 7888 break;
7883 7889
7884 7890 default:
7885 7891 fcp_log(CE_WARN, NULL,
7886 7892 "!Unsupported LUN Addressing method %x "
7887 7893 "in response to REPORT_LUN", lun_string[0]);
7888 7894 break;
7889 7895 }
7890 7896
7891 7897 /*
7892 7898 * each time through this loop we should decrement
7893 7899 * the tmp_cnt by one -- since we go through this loop
7894 7900 * one time for each LUN, the tmp_cnt should never be <=0
7895 7901 */
7896 7902 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7897 7903 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7898 7904 }
7899 7905
7900 7906 if (i == 0) {
7901 7907 fcp_log(CE_WARN, pptr->port_dip,
7902 7908 "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7903 7909 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7904 7910 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7905 7911 }
7906 7912
7907 7913 kmem_free(report_lun, len);
7908 7914 fcp_icmd_free(pptr, icmd);
7909 7915 }
7910 7916
7911 7917
7912 7918 /*
7913 7919 * called internally to return a LUN given a target and a LUN number
7914 7920 */
7915 7921 static struct fcp_lun *
7916 7922 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7917 7923 {
7918 7924 struct fcp_lun *plun;
7919 7925
7920 7926 mutex_enter(&ptgt->tgt_mutex);
7921 7927 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7922 7928 if (plun->lun_num == lun_num) {
7923 7929 mutex_exit(&ptgt->tgt_mutex);
7924 7930 return (plun);
7925 7931 }
7926 7932 }
7927 7933 mutex_exit(&ptgt->tgt_mutex);
7928 7934
7929 7935 return (NULL);
7930 7936 }
7931 7937
7932 7938
7933 7939 /*
7934 7940 * handle finishing one target for fcp_finish_init
7935 7941 *
7936 7942 * return true (non-zero) if we want finish_init to continue with the
7937 7943 * next target
7938 7944 *
7939 7945 * called with the port mutex held
7940 7946 */
7941 7947 /*ARGSUSED*/
7942 7948 static int
7943 7949 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7944 7950 int link_cnt, int tgt_cnt, int cause)
7945 7951 {
7946 7952 int rval = 1;
7947 7953 ASSERT(pptr != NULL);
7948 7954 ASSERT(ptgt != NULL);
7949 7955
7950 7956 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7951 7957 fcp_trace, FCP_BUF_LEVEL_5, 0,
7952 7958 "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7953 7959 ptgt->tgt_state);
7954 7960
7955 7961 ASSERT(mutex_owned(&pptr->port_mutex));
7956 7962
7957 7963 if ((pptr->port_link_cnt != link_cnt) ||
7958 7964 (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7959 7965 /*
7960 7966 * oh oh -- another link reset or target change
7961 7967 * must have occurred while we are in here
7962 7968 */
7963 7969 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7964 7970
7965 7971 return (0);
7966 7972 } else {
7967 7973 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7968 7974 }
7969 7975
7970 7976 mutex_enter(&ptgt->tgt_mutex);
7971 7977
7972 7978 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7973 7979 /*
7974 7980 * tgt is not offline -- is it marked (i.e. needs
7975 7981 * to be offlined) ??
7976 7982 */
7977 7983 if (ptgt->tgt_state & FCP_TGT_MARK) {
7978 7984 /*
7979 7985 * this target not offline *and*
7980 7986 * marked
7981 7987 */
7982 7988 ptgt->tgt_state &= ~FCP_TGT_MARK;
7983 7989 rval = fcp_offline_target(pptr, ptgt, link_cnt,
7984 7990 tgt_cnt, 0, 0);
7985 7991 } else {
7986 7992 ptgt->tgt_state &= ~FCP_TGT_BUSY;
7987 7993
7988 7994 /* create the LUNs */
7989 7995 if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7990 7996 ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7991 7997 fcp_create_luns(ptgt, link_cnt, tgt_cnt,
7992 7998 cause);
7993 7999 ptgt->tgt_device_created = 1;
7994 8000 } else {
7995 8001 fcp_update_tgt_state(ptgt, FCP_RESET,
7996 8002 FCP_LUN_BUSY);
7997 8003 }
7998 8004 }
7999 8005 }
8000 8006
8001 8007 mutex_exit(&ptgt->tgt_mutex);
8002 8008
8003 8009 return (rval);
8004 8010 }
8005 8011
8006 8012
8007 8013 /*
8008 8014 * this routine is called to finish port initialization
8009 8015 *
8010 8016 * Each port has a "temp" counter -- when a state change happens (e.g.
8011 8017 * port online), the temp count is set to the number of devices in the map.
8012 8018 * Then, as each device gets "discovered", the temp counter is decremented
8013 8019 * by one. When this count reaches zero we know that all of the devices
8014 8020 * in the map have been discovered (or an error has occurred), so we can
8015 8021 * then finish initialization -- which is done by this routine (well, this
8016 8022 * and fcp-finish_tgt())
8017 8023 *
8018 8024 * acquires and releases the global mutex
8019 8025 *
8020 8026 * called with the port mutex owned
8021 8027 */
8022 8028 static void
8023 8029 fcp_finish_init(struct fcp_port *pptr)
8024 8030 {
8025 8031 #ifdef DEBUG
8026 8032 bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
8027 8033 pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
8028 8034 FCP_STACK_DEPTH);
8029 8035 #endif /* DEBUG */
8030 8036
8031 8037 ASSERT(mutex_owned(&pptr->port_mutex));
8032 8038
8033 8039 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8034 8040 fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
8035 8041 " entering; ipkt count=%d", pptr->port_ipkt_cnt);
8036 8042
8037 8043 if ((pptr->port_state & FCP_STATE_ONLINING) &&
8038 8044 !(pptr->port_state & (FCP_STATE_SUSPENDED |
8039 8045 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
8040 8046 pptr->port_state &= ~FCP_STATE_ONLINING;
8041 8047 pptr->port_state |= FCP_STATE_ONLINE;
8042 8048 }
8043 8049
8044 8050 /* Wake up threads waiting on config done */
8045 8051 cv_broadcast(&pptr->port_config_cv);
8046 8052 }
8047 8053
8048 8054
8049 8055 /*
8050 8056 * called from fcp_finish_init to create the LUNs for a target
8051 8057 *
8052 8058 * called with the port mutex owned
8053 8059 */
8054 8060 static void
8055 8061 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
8056 8062 {
8057 8063 struct fcp_lun *plun;
8058 8064 struct fcp_port *pptr;
8059 8065 child_info_t *cip = NULL;
8060 8066
8061 8067 ASSERT(ptgt != NULL);
8062 8068 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8063 8069
8064 8070 pptr = ptgt->tgt_port;
8065 8071
8066 8072 ASSERT(pptr != NULL);
8067 8073
8068 8074 /* scan all LUNs for this target */
8069 8075 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8070 8076 if (plun->lun_state & FCP_LUN_OFFLINE) {
8071 8077 continue;
8072 8078 }
8073 8079
8074 8080 if (plun->lun_state & FCP_LUN_MARK) {
8075 8081 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8076 8082 fcp_trace, FCP_BUF_LEVEL_2, 0,
8077 8083 "fcp_create_luns: offlining marked LUN!");
8078 8084 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
8079 8085 continue;
8080 8086 }
8081 8087
8082 8088 plun->lun_state &= ~FCP_LUN_BUSY;
8083 8089
8084 8090 /*
8085 8091 * There are conditions in which FCP_LUN_INIT flag is cleared
8086 8092 * but we have a valid plun->lun_cip. To cover this case also
8087 8093 * CLEAR_BUSY whenever we have a valid lun_cip.
8088 8094 */
8089 8095 if (plun->lun_mpxio && plun->lun_cip &&
8090 8096 (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
8091 8097 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8092 8098 0, 0))) {
8093 8099 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8094 8100 fcp_trace, FCP_BUF_LEVEL_2, 0,
8095 8101 "fcp_create_luns: enable lun %p failed!",
8096 8102 plun);
8097 8103 }
8098 8104
8099 8105 if (plun->lun_state & FCP_LUN_INIT &&
8100 8106 !(plun->lun_state & FCP_LUN_CHANGED)) {
8101 8107 continue;
8102 8108 }
8103 8109
8104 8110 if (cause == FCP_CAUSE_USER_CREATE) {
8105 8111 continue;
8106 8112 }
8107 8113
8108 8114 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8109 8115 fcp_trace, FCP_BUF_LEVEL_6, 0,
8110 8116 "create_luns: passing ONLINE elem to HP thread");
8111 8117
8112 8118 /*
8113 8119 * If lun has changed, prepare for offlining the old path.
8114 8120 * Do not offline the old path right now, since it may be
8115 8121 * still opened.
8116 8122 */
8117 8123 if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
8118 8124 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8119 8125 }
8120 8126
8121 8127 /* pass an ONLINE element to the hotplug thread */
8122 8128 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8123 8129 link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
8124 8130
8125 8131 /*
8126 8132 * We can not synchronous attach (i.e pass
8127 8133 * NDI_ONLINE_ATTACH) here as we might be
8128 8134 * coming from an interrupt or callback
8129 8135 * thread.
8130 8136 */
8131 8137 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8132 8138 link_cnt, tgt_cnt, 0, 0)) {
8133 8139 fcp_log(CE_CONT, pptr->port_dip,
8134 8140 "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
8135 8141 plun->lun_tgt->tgt_d_id, plun->lun_num);
8136 8142 }
8137 8143 }
8138 8144 }
8139 8145 }
8140 8146
8141 8147
8142 8148 /*
8143 8149 * function to online/offline devices
↓ open down ↓ |
8090 lines elided |
↑ open up ↑ |
8144 8150 */
8145 8151 static int
8146 8152 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8147 8153 int online, int lcount, int tcount, int flags)
8148 8154 {
8149 8155 int rval = NDI_FAILURE;
8150 8156 int circ;
8151 8157 child_info_t *ccip;
8152 8158 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8153 8159 int is_mpxio = pptr->port_mpxio;
8154 - dev_info_t *cdip, *pdip;
8155 - char *devname;
8156 8160
8157 8161 if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8158 8162 /*
8159 8163 * When this event gets serviced, lun_cip and lun_mpxio
8160 8164 * has changed, so it should be invalidated now.
8161 8165 */
8162 8166 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8163 8167 FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8164 8168 "plun: %p, cip: %p, what:%d", plun, cip, online);
8165 8169 return (rval);
8166 8170 }
8167 8171
8168 8172 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8169 8173 fcp_trace, FCP_BUF_LEVEL_2, 0,
8170 8174 "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8171 8175 "flags=%x mpxio=%x\n",
8172 8176 plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8173 8177 plun->lun_mpxio);
8174 8178
8175 8179 /*
8176 8180 * lun_mpxio needs checking here because we can end up in a race
8177 8181 * condition where this task has been dispatched while lun_mpxio is
8178 8182 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8179 8183 * enable MPXIO for the LUN, but was unable to, and hence cleared
8180 8184 * the flag. We rely on the serialization of the tasks here. We return
8181 8185 * NDI_SUCCESS so any callers continue without reporting spurious
8182 8186 * errors, and the still think we're an MPXIO LUN.
8183 8187 */
8184 8188
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
8185 8189 if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8186 8190 online == FCP_MPXIO_PATH_SET_BUSY) {
8187 8191 if (plun->lun_mpxio) {
8188 8192 rval = fcp_update_mpxio_path(plun, cip, online);
8189 8193 } else {
8190 8194 rval = NDI_SUCCESS;
8191 8195 }
8192 8196 return (rval);
8193 8197 }
8194 8198
8195 - /*
8196 - * Explicit devfs_clean() due to ndi_devi_offline() not
8197 - * executing devfs_clean() if parent lock is held.
8198 - */
8199 - ASSERT(!servicing_interrupt());
8200 - if (online == FCP_OFFLINE) {
8201 - if (plun->lun_mpxio == 0) {
8202 - if (plun->lun_cip == cip) {
8203 - cdip = DIP(plun->lun_cip);
8204 - } else {
8205 - cdip = DIP(cip);
8206 - }
8207 - } else if ((plun->lun_cip == cip) && plun->lun_cip) {
8208 - cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8209 - } else if ((plun->lun_cip != cip) && cip) {
8210 - /*
8211 - * This means a DTYPE/GUID change, we shall get the
8212 - * dip of the old cip instead of the current lun_cip.
8213 - */
8214 - cdip = mdi_pi_get_client(PIP(cip));
8215 - }
8216 - if (cdip) {
8217 - if (i_ddi_devi_attached(cdip)) {
8218 - pdip = ddi_get_parent(cdip);
8219 - devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
8220 - ndi_devi_enter(pdip, &circ);
8221 - (void) ddi_deviname(cdip, devname);
8222 - /*
8223 - * Release parent lock before calling
8224 - * devfs_clean().
8225 - */
8226 - ndi_devi_exit(pdip, circ);
8227 - (void) devfs_clean(pdip, devname + 1,
8228 - DV_CLEAN_FORCE);
8229 - kmem_free(devname, MAXNAMELEN + 1);
8230 - }
8231 - }
8232 - }
8233 -
8234 8199 if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8235 8200 return (NDI_FAILURE);
8236 8201 }
8237 8202
8238 8203 if (is_mpxio) {
8239 8204 mdi_devi_enter(pptr->port_dip, &circ);
8240 8205 } else {
8241 8206 ndi_devi_enter(pptr->port_dip, &circ);
8242 8207 }
8243 8208
8244 8209 mutex_enter(&pptr->port_mutex);
8245 8210 mutex_enter(&plun->lun_mutex);
8246 8211
8247 8212 if (online == FCP_ONLINE) {
8248 8213 ccip = fcp_get_cip(plun, cip, lcount, tcount);
8249 8214 if (ccip == NULL) {
8250 8215 goto fail;
8251 8216 }
8252 8217 } else {
8253 8218 if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8254 8219 goto fail;
8255 8220 }
8256 8221 ccip = cip;
8257 8222 }
8258 8223
8259 8224 if (online == FCP_ONLINE) {
8260 8225 rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8261 8226 &circ);
8262 8227 fc_ulp_log_device_event(pptr->port_fp_handle,
8263 8228 FC_ULP_DEVICE_ONLINE);
8264 8229 } else {
8265 8230 rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8266 8231 &circ);
8267 8232 fc_ulp_log_device_event(pptr->port_fp_handle,
8268 8233 FC_ULP_DEVICE_OFFLINE);
8269 8234 }
8270 8235
8271 8236 fail: mutex_exit(&plun->lun_mutex);
8272 8237 mutex_exit(&pptr->port_mutex);
8273 8238
8274 8239 if (is_mpxio) {
8275 8240 mdi_devi_exit(pptr->port_dip, circ);
8276 8241 } else {
8277 8242 ndi_devi_exit(pptr->port_dip, circ);
8278 8243 }
8279 8244
8280 8245 fc_ulp_idle_port(pptr->port_fp_handle);
8281 8246
8282 8247 return (rval);
8283 8248 }
8284 8249
8285 8250
8286 8251 /*
8287 8252 * take a target offline by taking all of its LUNs offline
8288 8253 */
8289 8254 /*ARGSUSED*/
8290 8255 static int
8291 8256 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8292 8257 int link_cnt, int tgt_cnt, int nowait, int flags)
8293 8258 {
8294 8259 struct fcp_tgt_elem *elem;
8295 8260
8296 8261 ASSERT(mutex_owned(&pptr->port_mutex));
8297 8262 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8298 8263
8299 8264 ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8300 8265
8301 8266 if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8302 8267 ptgt->tgt_change_cnt)) {
8303 8268 mutex_exit(&ptgt->tgt_mutex);
8304 8269 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8305 8270 mutex_enter(&ptgt->tgt_mutex);
8306 8271
8307 8272 return (0);
8308 8273 }
8309 8274
8310 8275 ptgt->tgt_pd_handle = NULL;
8311 8276 mutex_exit(&ptgt->tgt_mutex);
8312 8277 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8313 8278 mutex_enter(&ptgt->tgt_mutex);
8314 8279
8315 8280 tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8316 8281
8317 8282 if (ptgt->tgt_tcap &&
8318 8283 (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8319 8284 elem->flags = flags;
8320 8285 elem->time = fcp_watchdog_time;
8321 8286 if (nowait == 0) {
8322 8287 elem->time += fcp_offline_delay;
8323 8288 }
8324 8289 elem->ptgt = ptgt;
8325 8290 elem->link_cnt = link_cnt;
8326 8291 elem->tgt_cnt = tgt_cnt;
8327 8292 elem->next = pptr->port_offline_tgts;
8328 8293 pptr->port_offline_tgts = elem;
8329 8294 } else {
8330 8295 fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8331 8296 }
8332 8297
8333 8298 return (1);
8334 8299 }
8335 8300
8336 8301
8337 8302 static void
8338 8303 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8339 8304 int link_cnt, int tgt_cnt, int flags)
8340 8305 {
8341 8306 ASSERT(mutex_owned(&pptr->port_mutex));
8342 8307 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8343 8308
8344 8309 fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8345 8310 ptgt->tgt_state = FCP_TGT_OFFLINE;
8346 8311 ptgt->tgt_pd_handle = NULL;
8347 8312 fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8348 8313 }
8349 8314
8350 8315
8351 8316 static void
8352 8317 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8353 8318 int flags)
8354 8319 {
8355 8320 struct fcp_lun *plun;
8356 8321
8357 8322 ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8358 8323 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8359 8324
8360 8325 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8361 8326 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8362 8327 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8363 8328 }
8364 8329 }
8365 8330 }
8366 8331
8367 8332
8368 8333 /*
8369 8334 * take a LUN offline
8370 8335 *
8371 8336 * enters and leaves with the target mutex held, releasing it in the process
8372 8337 *
8373 8338 * allocates memory in non-sleep mode
8374 8339 */
8375 8340 static void
8376 8341 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8377 8342 int nowait, int flags)
8378 8343 {
8379 8344 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8380 8345 struct fcp_lun_elem *elem;
8381 8346
8382 8347 ASSERT(plun != NULL);
8383 8348 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8384 8349
8385 8350 if (nowait) {
8386 8351 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8387 8352 return;
8388 8353 }
8389 8354
8390 8355 if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8391 8356 elem->flags = flags;
8392 8357 elem->time = fcp_watchdog_time;
8393 8358 if (nowait == 0) {
8394 8359 elem->time += fcp_offline_delay;
8395 8360 }
8396 8361 elem->plun = plun;
8397 8362 elem->link_cnt = link_cnt;
8398 8363 elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8399 8364 elem->next = pptr->port_offline_luns;
8400 8365 pptr->port_offline_luns = elem;
8401 8366 } else {
8402 8367 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8403 8368 }
8404 8369 }
8405 8370
8406 8371
8407 8372 static void
8408 8373 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8409 8374 {
8410 8375 struct fcp_pkt *head = NULL;
8411 8376
8412 8377 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8413 8378
8414 8379 mutex_exit(&LUN_TGT->tgt_mutex);
8415 8380
8416 8381 head = fcp_scan_commands(plun);
8417 8382 if (head != NULL) {
8418 8383 fcp_abort_commands(head, LUN_PORT);
8419 8384 }
8420 8385
8421 8386 mutex_enter(&LUN_TGT->tgt_mutex);
8422 8387
8423 8388 if (plun->lun_cip && plun->lun_mpxio) {
8424 8389 /*
8425 8390 * Intimate MPxIO lun busy is cleared
8426 8391 */
8427 8392 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8428 8393 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8429 8394 0, 0)) {
8430 8395 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8431 8396 "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8432 8397 LUN_TGT->tgt_d_id, plun->lun_num);
8433 8398 }
8434 8399 /*
8435 8400 * Intimate MPxIO that the lun is now marked for offline
8436 8401 */
8437 8402 mutex_exit(&LUN_TGT->tgt_mutex);
8438 8403 (void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8439 8404 mutex_enter(&LUN_TGT->tgt_mutex);
8440 8405 }
8441 8406 }
8442 8407
8443 8408 static void
8444 8409 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8445 8410 int flags)
8446 8411 {
8447 8412 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8448 8413
8449 8414 mutex_exit(&LUN_TGT->tgt_mutex);
8450 8415 fcp_update_offline_flags(plun);
8451 8416 mutex_enter(&LUN_TGT->tgt_mutex);
8452 8417
8453 8418 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8454 8419
8455 8420 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8456 8421 fcp_trace, FCP_BUF_LEVEL_4, 0,
8457 8422 "offline_lun: passing OFFLINE elem to HP thread");
8458 8423
8459 8424 if (plun->lun_cip) {
8460 8425 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8461 8426 "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8462 8427 plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8463 8428 LUN_TGT->tgt_trace);
8464 8429
8465 8430 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8466 8431 link_cnt, tgt_cnt, flags, 0)) {
8467 8432 fcp_log(CE_CONT, LUN_PORT->port_dip,
8468 8433 "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8469 8434 LUN_TGT->tgt_d_id, plun->lun_num);
8470 8435 }
8471 8436 }
8472 8437 }
8473 8438
8474 8439 static void
8475 8440 fcp_scan_offline_luns(struct fcp_port *pptr)
8476 8441 {
8477 8442 struct fcp_lun_elem *elem;
8478 8443 struct fcp_lun_elem *prev;
8479 8444 struct fcp_lun_elem *next;
8480 8445
8481 8446 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8482 8447
8483 8448 prev = NULL;
8484 8449 elem = pptr->port_offline_luns;
8485 8450 while (elem) {
8486 8451 next = elem->next;
8487 8452 if (elem->time <= fcp_watchdog_time) {
8488 8453 int changed = 1;
8489 8454 struct fcp_tgt *ptgt = elem->plun->lun_tgt;
8490 8455
8491 8456 mutex_enter(&ptgt->tgt_mutex);
8492 8457 if (pptr->port_link_cnt == elem->link_cnt &&
8493 8458 ptgt->tgt_change_cnt == elem->tgt_cnt) {
8494 8459 changed = 0;
8495 8460 }
8496 8461
8497 8462 if (!changed &&
8498 8463 !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8499 8464 fcp_offline_lun_now(elem->plun,
8500 8465 elem->link_cnt, elem->tgt_cnt, elem->flags);
8501 8466 }
8502 8467 mutex_exit(&ptgt->tgt_mutex);
8503 8468
8504 8469 kmem_free(elem, sizeof (*elem));
8505 8470
8506 8471 if (prev) {
8507 8472 prev->next = next;
8508 8473 } else {
8509 8474 pptr->port_offline_luns = next;
8510 8475 }
8511 8476 } else {
8512 8477 prev = elem;
8513 8478 }
8514 8479 elem = next;
8515 8480 }
8516 8481 }
8517 8482
8518 8483
8519 8484 static void
8520 8485 fcp_scan_offline_tgts(struct fcp_port *pptr)
8521 8486 {
8522 8487 struct fcp_tgt_elem *elem;
8523 8488 struct fcp_tgt_elem *prev;
8524 8489 struct fcp_tgt_elem *next;
8525 8490
8526 8491 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8527 8492
8528 8493 prev = NULL;
8529 8494 elem = pptr->port_offline_tgts;
8530 8495 while (elem) {
8531 8496 next = elem->next;
8532 8497 if (elem->time <= fcp_watchdog_time) {
8533 8498 int outdated = 1;
8534 8499 struct fcp_tgt *ptgt = elem->ptgt;
8535 8500
8536 8501 mutex_enter(&ptgt->tgt_mutex);
8537 8502
8538 8503 if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8539 8504 /* No change on tgt since elem was created. */
8540 8505 outdated = 0;
8541 8506 } else if (ptgt->tgt_change_cnt == elem->tgt_cnt + 1 &&
8542 8507 pptr->port_link_cnt == elem->link_cnt + 1 &&
8543 8508 ptgt->tgt_statec_cause == FCP_CAUSE_LINK_DOWN) {
8544 8509 /*
8545 8510 * Exactly one thing happened to the target
8546 8511 * inbetween: the local port went offline.
8547 8512 * For fp the remote port is already gone so
8548 8513 * it will not tell us again to offline the
8549 8514 * target. We must offline it now.
8550 8515 */
8551 8516 outdated = 0;
8552 8517 }
8553 8518
8554 8519 if (!outdated && !(ptgt->tgt_state &
8555 8520 FCP_TGT_OFFLINE)) {
8556 8521 fcp_offline_target_now(pptr,
8557 8522 ptgt, elem->link_cnt, elem->tgt_cnt,
8558 8523 elem->flags);
8559 8524 }
8560 8525
8561 8526 mutex_exit(&ptgt->tgt_mutex);
8562 8527
8563 8528 kmem_free(elem, sizeof (*elem));
8564 8529
8565 8530 if (prev) {
8566 8531 prev->next = next;
8567 8532 } else {
8568 8533 pptr->port_offline_tgts = next;
8569 8534 }
8570 8535 } else {
8571 8536 prev = elem;
8572 8537 }
8573 8538 elem = next;
8574 8539 }
8575 8540 }
8576 8541
8577 8542
8578 8543 static void
8579 8544 fcp_update_offline_flags(struct fcp_lun *plun)
8580 8545 {
8581 8546 struct fcp_port *pptr = LUN_PORT;
8582 8547 ASSERT(plun != NULL);
8583 8548
8584 8549 mutex_enter(&LUN_TGT->tgt_mutex);
8585 8550 plun->lun_state |= FCP_LUN_OFFLINE;
8586 8551 plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8587 8552
8588 8553 mutex_enter(&plun->lun_mutex);
8589 8554 if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8590 8555 dev_info_t *cdip = NULL;
8591 8556
8592 8557 mutex_exit(&LUN_TGT->tgt_mutex);
8593 8558
8594 8559 if (plun->lun_mpxio == 0) {
8595 8560 cdip = DIP(plun->lun_cip);
8596 8561 } else if (plun->lun_cip) {
8597 8562 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8598 8563 }
8599 8564
8600 8565 mutex_exit(&plun->lun_mutex);
8601 8566 if (cdip) {
8602 8567 (void) ndi_event_retrieve_cookie(
8603 8568 pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8604 8569 &fcp_remove_eid, NDI_EVENT_NOPASS);
8605 8570 (void) ndi_event_run_callbacks(
8606 8571 pptr->port_ndi_event_hdl, cdip,
8607 8572 fcp_remove_eid, NULL);
8608 8573 }
8609 8574 } else {
8610 8575 mutex_exit(&plun->lun_mutex);
8611 8576 mutex_exit(&LUN_TGT->tgt_mutex);
8612 8577 }
8613 8578 }
8614 8579
8615 8580
8616 8581 /*
8617 8582 * Scan all of the command pkts for this port, moving pkts that
8618 8583 * match our LUN onto our own list (headed by "head")
8619 8584 */
8620 8585 static struct fcp_pkt *
8621 8586 fcp_scan_commands(struct fcp_lun *plun)
8622 8587 {
8623 8588 struct fcp_port *pptr = LUN_PORT;
8624 8589
8625 8590 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8626 8591 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8627 8592 struct fcp_pkt *pcmd = NULL; /* the previous command */
8628 8593
8629 8594 struct fcp_pkt *head = NULL; /* head of our list */
8630 8595 struct fcp_pkt *tail = NULL; /* tail of our list */
8631 8596
8632 8597 int cmds_found = 0;
8633 8598
8634 8599 mutex_enter(&pptr->port_pkt_mutex);
8635 8600 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8636 8601 struct fcp_lun *tlun =
8637 8602 ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8638 8603
8639 8604 ncmd = cmd->cmd_next; /* set next command */
8640 8605
8641 8606 /*
8642 8607 * if this pkt is for a different LUN or the
8643 8608 * command is sent down, skip it.
8644 8609 */
8645 8610 if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8646 8611 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8647 8612 pcmd = cmd;
8648 8613 continue;
8649 8614 }
8650 8615 cmds_found++;
8651 8616 if (pcmd != NULL) {
8652 8617 ASSERT(pptr->port_pkt_head != cmd);
8653 8618 pcmd->cmd_next = cmd->cmd_next;
8654 8619 } else {
8655 8620 ASSERT(cmd == pptr->port_pkt_head);
8656 8621 pptr->port_pkt_head = cmd->cmd_next;
8657 8622 }
8658 8623
8659 8624 if (cmd == pptr->port_pkt_tail) {
8660 8625 pptr->port_pkt_tail = pcmd;
8661 8626 if (pcmd) {
8662 8627 pcmd->cmd_next = NULL;
8663 8628 }
8664 8629 }
8665 8630
8666 8631 if (head == NULL) {
8667 8632 head = tail = cmd;
8668 8633 } else {
8669 8634 ASSERT(tail != NULL);
8670 8635
8671 8636 tail->cmd_next = cmd;
8672 8637 tail = cmd;
8673 8638 }
8674 8639 cmd->cmd_next = NULL;
8675 8640 }
8676 8641 mutex_exit(&pptr->port_pkt_mutex);
8677 8642
8678 8643 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8679 8644 fcp_trace, FCP_BUF_LEVEL_8, 0,
8680 8645 "scan commands: %d cmd(s) found", cmds_found);
8681 8646
8682 8647 return (head);
8683 8648 }
8684 8649
8685 8650
8686 8651 /*
8687 8652 * Abort all the commands in the command queue
8688 8653 */
8689 8654 static void
8690 8655 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8691 8656 {
8692 8657 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8693 8658 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8694 8659
8695 8660 ASSERT(mutex_owned(&pptr->port_mutex));
8696 8661
8697 8662 /* scan through the pkts and invalid them */
8698 8663 for (cmd = head; cmd != NULL; cmd = ncmd) {
8699 8664 struct scsi_pkt *pkt = cmd->cmd_pkt;
8700 8665
8701 8666 ncmd = cmd->cmd_next;
8702 8667 ASSERT(pkt != NULL);
8703 8668
8704 8669 /*
8705 8670 * The lun is going to be marked offline. Indicate
8706 8671 * the target driver not to requeue or retry this command
8707 8672 * as the device is going to be offlined pretty soon.
8708 8673 */
8709 8674 pkt->pkt_reason = CMD_DEV_GONE;
8710 8675 pkt->pkt_statistics = 0;
8711 8676 pkt->pkt_state = 0;
8712 8677
8713 8678 /* reset cmd flags/state */
8714 8679 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8715 8680 cmd->cmd_state = FCP_PKT_IDLE;
8716 8681
8717 8682 /*
8718 8683 * ensure we have a packet completion routine,
8719 8684 * then call it.
8720 8685 */
8721 8686 ASSERT(pkt->pkt_comp != NULL);
8722 8687
8723 8688 mutex_exit(&pptr->port_mutex);
8724 8689 fcp_post_callback(cmd);
8725 8690 mutex_enter(&pptr->port_mutex);
8726 8691 }
8727 8692 }
8728 8693
8729 8694
8730 8695 /*
8731 8696 * the pkt_comp callback for command packets
8732 8697 */
8733 8698 static void
8734 8699 fcp_cmd_callback(fc_packet_t *fpkt)
8735 8700 {
8736 8701 struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8737 8702 struct scsi_pkt *pkt = cmd->cmd_pkt;
8738 8703 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8739 8704
8740 8705 ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8741 8706
8742 8707 if (cmd->cmd_state == FCP_PKT_IDLE) {
8743 8708 cmn_err(CE_PANIC, "Packet already completed %p",
8744 8709 (void *)cmd);
8745 8710 }
8746 8711
8747 8712 /*
8748 8713 * Watch thread should be freeing the packet, ignore the pkt.
8749 8714 */
8750 8715 if (cmd->cmd_state == FCP_PKT_ABORTING) {
8751 8716 fcp_log(CE_CONT, pptr->port_dip,
8752 8717 "!FCP: Pkt completed while aborting\n");
8753 8718 return;
8754 8719 }
8755 8720 cmd->cmd_state = FCP_PKT_IDLE;
8756 8721
8757 8722 fcp_complete_pkt(fpkt);
8758 8723
8759 8724 #ifdef DEBUG
8760 8725 mutex_enter(&pptr->port_pkt_mutex);
8761 8726 pptr->port_npkts--;
8762 8727 mutex_exit(&pptr->port_pkt_mutex);
8763 8728 #endif /* DEBUG */
8764 8729
8765 8730 fcp_post_callback(cmd);
8766 8731 }
8767 8732
8768 8733
8769 8734 static void
8770 8735 fcp_complete_pkt(fc_packet_t *fpkt)
8771 8736 {
8772 8737 int error = 0;
8773 8738 struct fcp_pkt *cmd = (struct fcp_pkt *)
8774 8739 fpkt->pkt_ulp_private;
8775 8740 struct scsi_pkt *pkt = cmd->cmd_pkt;
8776 8741 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8777 8742 struct fcp_lun *plun;
8778 8743 struct fcp_tgt *ptgt;
8779 8744 struct fcp_rsp *rsp;
8780 8745 struct scsi_address save;
8781 8746
8782 8747 #ifdef DEBUG
8783 8748 save = pkt->pkt_address;
8784 8749 #endif /* DEBUG */
8785 8750
8786 8751 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8787 8752
8788 8753 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8789 8754 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8790 8755 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8791 8756 sizeof (struct fcp_rsp));
8792 8757 }
8793 8758
8794 8759 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8795 8760 STATE_SENT_CMD | STATE_GOT_STATUS;
8796 8761
8797 8762 pkt->pkt_resid = 0;
8798 8763
8799 8764 if (fpkt->pkt_datalen) {
8800 8765 pkt->pkt_state |= STATE_XFERRED_DATA;
8801 8766 if (fpkt->pkt_data_resid) {
8802 8767 error++;
8803 8768 }
8804 8769 }
8805 8770
8806 8771 if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8807 8772 rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8808 8773 /*
8809 8774 * The next two checks make sure that if there
8810 8775 * is no sense data or a valid response and
8811 8776 * the command came back with check condition,
8812 8777 * the command should be retried.
8813 8778 */
8814 8779 if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8815 8780 !rsp->fcp_u.fcp_status.sense_len_set) {
8816 8781 pkt->pkt_state &= ~STATE_XFERRED_DATA;
8817 8782 pkt->pkt_resid = cmd->cmd_dmacount;
8818 8783 }
8819 8784 }
8820 8785
8821 8786 if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8822 8787 return;
8823 8788 }
8824 8789
8825 8790 plun = ADDR2LUN(&pkt->pkt_address);
8826 8791 ptgt = plun->lun_tgt;
8827 8792 ASSERT(ptgt != NULL);
8828 8793
8829 8794 /*
8830 8795 * Update the transfer resid, if appropriate
8831 8796 */
8832 8797 if (rsp->fcp_u.fcp_status.resid_over ||
8833 8798 rsp->fcp_u.fcp_status.resid_under) {
8834 8799 pkt->pkt_resid = rsp->fcp_resid;
8835 8800 }
8836 8801
8837 8802 /*
8838 8803 * First see if we got a FCP protocol error.
8839 8804 */
8840 8805 if (rsp->fcp_u.fcp_status.rsp_len_set) {
8841 8806 struct fcp_rsp_info *bep;
8842 8807 bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8843 8808 sizeof (struct fcp_rsp));
8844 8809
8845 8810 if (fcp_validate_fcp_response(rsp, pptr) !=
8846 8811 FC_SUCCESS) {
8847 8812 pkt->pkt_reason = CMD_CMPLT;
8848 8813 *(pkt->pkt_scbp) = STATUS_CHECK;
8849 8814
8850 8815 fcp_log(CE_WARN, pptr->port_dip,
8851 8816 "!SCSI command to d_id=0x%x lun=0x%x"
8852 8817 " failed, Bad FCP response values:"
8853 8818 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8854 8819 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8855 8820 ptgt->tgt_d_id, plun->lun_num,
8856 8821 rsp->reserved_0, rsp->reserved_1,
8857 8822 rsp->fcp_u.fcp_status.reserved_0,
8858 8823 rsp->fcp_u.fcp_status.reserved_1,
8859 8824 rsp->fcp_response_len, rsp->fcp_sense_len);
8860 8825
8861 8826 return;
8862 8827 }
8863 8828
8864 8829 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8865 8830 FCP_CP_IN(fpkt->pkt_resp +
8866 8831 sizeof (struct fcp_rsp), bep,
8867 8832 fpkt->pkt_resp_acc,
8868 8833 sizeof (struct fcp_rsp_info));
8869 8834 }
8870 8835
8871 8836 if (bep->rsp_code != FCP_NO_FAILURE) {
8872 8837 child_info_t *cip;
8873 8838
8874 8839 pkt->pkt_reason = CMD_TRAN_ERR;
8875 8840
8876 8841 mutex_enter(&plun->lun_mutex);
8877 8842 cip = plun->lun_cip;
8878 8843 mutex_exit(&plun->lun_mutex);
8879 8844
8880 8845 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8881 8846 fcp_trace, FCP_BUF_LEVEL_2, 0,
8882 8847 "FCP response error on cmd=%p"
8883 8848 " target=0x%x, cip=%p", cmd,
8884 8849 ptgt->tgt_d_id, cip);
8885 8850 }
8886 8851 }
8887 8852
8888 8853 /*
8889 8854 * See if we got a SCSI error with sense data
8890 8855 */
8891 8856 if (rsp->fcp_u.fcp_status.sense_len_set) {
8892 8857 uchar_t rqlen;
8893 8858 caddr_t sense_from;
8894 8859 child_info_t *cip;
8895 8860 timeout_id_t tid;
8896 8861 struct scsi_arq_status *arq;
8897 8862 struct scsi_extended_sense *sense_to;
8898 8863
8899 8864 arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8900 8865 sense_to = &arq->sts_sensedata;
8901 8866
8902 8867 rqlen = (uchar_t)min(rsp->fcp_sense_len,
8903 8868 sizeof (struct scsi_extended_sense));
8904 8869
8905 8870 sense_from = (caddr_t)fpkt->pkt_resp +
8906 8871 sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8907 8872
8908 8873 if (fcp_validate_fcp_response(rsp, pptr) !=
8909 8874 FC_SUCCESS) {
8910 8875 pkt->pkt_reason = CMD_CMPLT;
8911 8876 *(pkt->pkt_scbp) = STATUS_CHECK;
8912 8877
8913 8878 fcp_log(CE_WARN, pptr->port_dip,
8914 8879 "!SCSI command to d_id=0x%x lun=0x%x"
8915 8880 " failed, Bad FCP response values:"
8916 8881 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8917 8882 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8918 8883 ptgt->tgt_d_id, plun->lun_num,
8919 8884 rsp->reserved_0, rsp->reserved_1,
8920 8885 rsp->fcp_u.fcp_status.reserved_0,
8921 8886 rsp->fcp_u.fcp_status.reserved_1,
8922 8887 rsp->fcp_response_len, rsp->fcp_sense_len);
8923 8888
8924 8889 return;
8925 8890 }
8926 8891
8927 8892 /*
8928 8893 * copy in sense information
8929 8894 */
8930 8895 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8931 8896 FCP_CP_IN(sense_from, sense_to,
8932 8897 fpkt->pkt_resp_acc, rqlen);
8933 8898 } else {
8934 8899 bcopy(sense_from, sense_to, rqlen);
8935 8900 }
8936 8901
8937 8902 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8938 8903 (FCP_SENSE_NO_LUN(sense_to))) {
8939 8904 mutex_enter(&ptgt->tgt_mutex);
8940 8905 if (ptgt->tgt_tid == NULL) {
8941 8906 /*
8942 8907 * Kick off rediscovery
8943 8908 */
8944 8909 tid = timeout(fcp_reconfigure_luns,
8945 8910 (caddr_t)ptgt, drv_usectohz(1));
8946 8911
8947 8912 ptgt->tgt_tid = tid;
8948 8913 ptgt->tgt_state |= FCP_TGT_BUSY;
8949 8914 }
8950 8915 mutex_exit(&ptgt->tgt_mutex);
8951 8916 if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8952 8917 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8953 8918 fcp_trace, FCP_BUF_LEVEL_3, 0,
8954 8919 "!FCP: Report Lun Has Changed"
8955 8920 " target=%x", ptgt->tgt_d_id);
8956 8921 } else if (FCP_SENSE_NO_LUN(sense_to)) {
8957 8922 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8958 8923 fcp_trace, FCP_BUF_LEVEL_3, 0,
8959 8924 "!FCP: LU Not Supported"
8960 8925 " target=%x", ptgt->tgt_d_id);
8961 8926 }
8962 8927 }
8963 8928 ASSERT(pkt->pkt_scbp != NULL);
8964 8929
8965 8930 pkt->pkt_state |= STATE_ARQ_DONE;
8966 8931
8967 8932 arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8968 8933
8969 8934 *((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8970 8935 arq->sts_rqpkt_reason = 0;
8971 8936 arq->sts_rqpkt_statistics = 0;
8972 8937
8973 8938 arq->sts_rqpkt_state = STATE_GOT_BUS |
8974 8939 STATE_GOT_TARGET | STATE_SENT_CMD |
8975 8940 STATE_GOT_STATUS | STATE_ARQ_DONE |
8976 8941 STATE_XFERRED_DATA;
8977 8942
8978 8943 mutex_enter(&plun->lun_mutex);
8979 8944 cip = plun->lun_cip;
8980 8945 mutex_exit(&plun->lun_mutex);
8981 8946
8982 8947 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8983 8948 fcp_trace, FCP_BUF_LEVEL_8, 0,
8984 8949 "SCSI Check condition on cmd=%p target=0x%x"
8985 8950 " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
8986 8951 " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
8987 8952 cmd->cmd_fcp_cmd.fcp_cdb[0],
8988 8953 rsp->fcp_u.fcp_status.scsi_status,
8989 8954 sense_to->es_key, sense_to->es_add_code,
8990 8955 sense_to->es_qual_code);
8991 8956 }
8992 8957 } else {
8993 8958 plun = ADDR2LUN(&pkt->pkt_address);
8994 8959 ptgt = plun->lun_tgt;
8995 8960 ASSERT(ptgt != NULL);
8996 8961
8997 8962 /*
8998 8963 * Work harder to translate errors into target driver
8999 8964 * understandable ones. Note with despair that the target
9000 8965 * drivers don't decode pkt_state and pkt_reason exhaustively
9001 8966 * They resort to using the big hammer most often, which
9002 8967 * may not get fixed in the life time of this driver.
9003 8968 */
9004 8969 pkt->pkt_state = 0;
9005 8970 pkt->pkt_statistics = 0;
9006 8971
9007 8972 switch (fpkt->pkt_state) {
9008 8973 case FC_PKT_TRAN_ERROR:
9009 8974 switch (fpkt->pkt_reason) {
9010 8975 case FC_REASON_OVERRUN:
9011 8976 pkt->pkt_reason = CMD_CMD_OVR;
9012 8977 pkt->pkt_statistics |= STAT_ABORTED;
9013 8978 break;
9014 8979
9015 8980 case FC_REASON_XCHG_BSY: {
9016 8981 caddr_t ptr;
9017 8982
9018 8983 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9019 8984
9020 8985 ptr = (caddr_t)pkt->pkt_scbp;
9021 8986 if (ptr) {
9022 8987 *ptr = STATUS_BUSY;
9023 8988 }
9024 8989 break;
9025 8990 }
9026 8991
9027 8992 case FC_REASON_ABORTED:
9028 8993 pkt->pkt_reason = CMD_TRAN_ERR;
9029 8994 pkt->pkt_statistics |= STAT_ABORTED;
9030 8995 break;
9031 8996
9032 8997 case FC_REASON_ABORT_FAILED:
9033 8998 pkt->pkt_reason = CMD_ABORT_FAIL;
9034 8999 break;
9035 9000
9036 9001 case FC_REASON_NO_SEQ_INIT:
9037 9002 case FC_REASON_CRC_ERROR:
9038 9003 pkt->pkt_reason = CMD_TRAN_ERR;
9039 9004 pkt->pkt_statistics |= STAT_ABORTED;
9040 9005 break;
9041 9006 default:
9042 9007 pkt->pkt_reason = CMD_TRAN_ERR;
9043 9008 break;
9044 9009 }
9045 9010 break;
9046 9011
9047 9012 case FC_PKT_PORT_OFFLINE: {
9048 9013 dev_info_t *cdip = NULL;
9049 9014 caddr_t ptr;
9050 9015
9051 9016 if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
9052 9017 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9053 9018 fcp_trace, FCP_BUF_LEVEL_8, 0,
9054 9019 "SCSI cmd; LOGIN REQUIRED from FCA for %x",
9055 9020 ptgt->tgt_d_id);
9056 9021 }
9057 9022
9058 9023 mutex_enter(&plun->lun_mutex);
9059 9024 if (plun->lun_mpxio == 0) {
9060 9025 cdip = DIP(plun->lun_cip);
9061 9026 } else if (plun->lun_cip) {
9062 9027 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
9063 9028 }
9064 9029
9065 9030 mutex_exit(&plun->lun_mutex);
9066 9031
9067 9032 if (cdip) {
9068 9033 (void) ndi_event_retrieve_cookie(
9069 9034 pptr->port_ndi_event_hdl, cdip,
9070 9035 FCAL_REMOVE_EVENT, &fcp_remove_eid,
9071 9036 NDI_EVENT_NOPASS);
9072 9037 (void) ndi_event_run_callbacks(
9073 9038 pptr->port_ndi_event_hdl, cdip,
9074 9039 fcp_remove_eid, NULL);
9075 9040 }
9076 9041
9077 9042 /*
9078 9043 * If the link goes off-line for a lip,
9079 9044 * this will cause a error to the ST SG
9080 9045 * SGEN drivers. By setting BUSY we will
9081 9046 * give the drivers the chance to retry
9082 9047 * before it blows of the job. ST will
9083 9048 * remember how many times it has retried.
9084 9049 */
9085 9050
9086 9051 if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
9087 9052 (plun->lun_type == DTYPE_CHANGER)) {
9088 9053 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9089 9054 ptr = (caddr_t)pkt->pkt_scbp;
9090 9055 if (ptr) {
9091 9056 *ptr = STATUS_BUSY;
9092 9057 }
9093 9058 } else {
9094 9059 pkt->pkt_reason = CMD_TRAN_ERR;
9095 9060 pkt->pkt_statistics |= STAT_BUS_RESET;
9096 9061 }
9097 9062 break;
9098 9063 }
9099 9064
9100 9065 case FC_PKT_TRAN_BSY:
9101 9066 /*
9102 9067 * Use the ssd Qfull handling here.
9103 9068 */
9104 9069 *pkt->pkt_scbp = STATUS_INTERMEDIATE;
9105 9070 pkt->pkt_state = STATE_GOT_BUS;
9106 9071 break;
9107 9072
9108 9073 case FC_PKT_TIMEOUT:
9109 9074 pkt->pkt_reason = CMD_TIMEOUT;
9110 9075 if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
9111 9076 pkt->pkt_statistics |= STAT_TIMEOUT;
9112 9077 } else {
9113 9078 pkt->pkt_statistics |= STAT_ABORTED;
9114 9079 }
9115 9080 break;
9116 9081
9117 9082 case FC_PKT_LOCAL_RJT:
9118 9083 switch (fpkt->pkt_reason) {
9119 9084 case FC_REASON_OFFLINE: {
9120 9085 dev_info_t *cdip = NULL;
9121 9086
9122 9087 mutex_enter(&plun->lun_mutex);
9123 9088 if (plun->lun_mpxio == 0) {
9124 9089 cdip = DIP(plun->lun_cip);
9125 9090 } else if (plun->lun_cip) {
9126 9091 cdip = mdi_pi_get_client(
9127 9092 PIP(plun->lun_cip));
9128 9093 }
9129 9094 mutex_exit(&plun->lun_mutex);
9130 9095
9131 9096 if (cdip) {
9132 9097 (void) ndi_event_retrieve_cookie(
9133 9098 pptr->port_ndi_event_hdl, cdip,
9134 9099 FCAL_REMOVE_EVENT,
9135 9100 &fcp_remove_eid,
9136 9101 NDI_EVENT_NOPASS);
9137 9102 (void) ndi_event_run_callbacks(
9138 9103 pptr->port_ndi_event_hdl,
9139 9104 cdip, fcp_remove_eid, NULL);
9140 9105 }
9141 9106
9142 9107 pkt->pkt_reason = CMD_TRAN_ERR;
9143 9108 pkt->pkt_statistics |= STAT_BUS_RESET;
9144 9109
9145 9110 break;
9146 9111 }
9147 9112
9148 9113 case FC_REASON_NOMEM:
9149 9114 case FC_REASON_QFULL: {
9150 9115 caddr_t ptr;
9151 9116
9152 9117 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9153 9118 ptr = (caddr_t)pkt->pkt_scbp;
9154 9119 if (ptr) {
9155 9120 *ptr = STATUS_BUSY;
9156 9121 }
9157 9122 break;
9158 9123 }
9159 9124
9160 9125 case FC_REASON_DMA_ERROR:
9161 9126 pkt->pkt_reason = CMD_DMA_DERR;
9162 9127 pkt->pkt_statistics |= STAT_ABORTED;
9163 9128 break;
9164 9129
9165 9130 case FC_REASON_CRC_ERROR:
9166 9131 case FC_REASON_UNDERRUN: {
9167 9132 uchar_t status;
9168 9133 /*
9169 9134 * Work around for Bugid: 4240945.
9170 9135 * IB on A5k doesn't set the Underrun bit
9171 9136 * in the fcp status, when it is transferring
9172 9137 * less than requested amount of data. Work
9173 9138 * around the ses problem to keep luxadm
9174 9139 * happy till ibfirmware is fixed.
9175 9140 */
9176 9141 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9177 9142 FCP_CP_IN(fpkt->pkt_resp, rsp,
9178 9143 fpkt->pkt_resp_acc,
9179 9144 sizeof (struct fcp_rsp));
9180 9145 }
9181 9146 status = rsp->fcp_u.fcp_status.scsi_status;
9182 9147 if (((plun->lun_type & DTYPE_MASK) ==
9183 9148 DTYPE_ESI) && (status == STATUS_GOOD)) {
9184 9149 pkt->pkt_reason = CMD_CMPLT;
9185 9150 *pkt->pkt_scbp = status;
9186 9151 pkt->pkt_resid = 0;
9187 9152 } else {
9188 9153 pkt->pkt_reason = CMD_TRAN_ERR;
9189 9154 pkt->pkt_statistics |= STAT_ABORTED;
9190 9155 }
9191 9156 break;
9192 9157 }
9193 9158
9194 9159 case FC_REASON_NO_CONNECTION:
9195 9160 case FC_REASON_UNSUPPORTED:
9196 9161 case FC_REASON_ILLEGAL_REQ:
9197 9162 case FC_REASON_BAD_SID:
9198 9163 case FC_REASON_DIAG_BUSY:
9199 9164 case FC_REASON_FCAL_OPN_FAIL:
9200 9165 case FC_REASON_BAD_XID:
9201 9166 default:
9202 9167 pkt->pkt_reason = CMD_TRAN_ERR;
9203 9168 pkt->pkt_statistics |= STAT_ABORTED;
9204 9169 break;
9205 9170
9206 9171 }
9207 9172 break;
9208 9173
9209 9174 case FC_PKT_NPORT_RJT:
9210 9175 case FC_PKT_FABRIC_RJT:
9211 9176 case FC_PKT_NPORT_BSY:
9212 9177 case FC_PKT_FABRIC_BSY:
9213 9178 default:
9214 9179 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9215 9180 fcp_trace, FCP_BUF_LEVEL_8, 0,
9216 9181 "FC Status 0x%x, reason 0x%x",
9217 9182 fpkt->pkt_state, fpkt->pkt_reason);
9218 9183 pkt->pkt_reason = CMD_TRAN_ERR;
9219 9184 pkt->pkt_statistics |= STAT_ABORTED;
9220 9185 break;
9221 9186 }
9222 9187
9223 9188 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9224 9189 fcp_trace, FCP_BUF_LEVEL_9, 0,
9225 9190 "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9226 9191 " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9227 9192 fpkt->pkt_reason);
9228 9193 }
9229 9194
9230 9195 ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9231 9196 }
9232 9197
9233 9198
9234 9199 static int
9235 9200 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9236 9201 {
9237 9202 if (rsp->reserved_0 || rsp->reserved_1 ||
9238 9203 rsp->fcp_u.fcp_status.reserved_0 ||
9239 9204 rsp->fcp_u.fcp_status.reserved_1) {
9240 9205 /*
9241 9206 * These reserved fields should ideally be zero. FCP-2 does say
9242 9207 * that the recipient need not check for reserved fields to be
9243 9208 * zero. If they are not zero, we will not make a fuss about it
9244 9209 * - just log it (in debug to both trace buffer and messages
9245 9210 * file and to trace buffer only in non-debug) and move on.
9246 9211 *
9247 9212 * Non-zero reserved fields were seen with minnows.
9248 9213 *
9249 9214 * qlc takes care of some of this but we cannot assume that all
9250 9215 * FCAs will do so.
9251 9216 */
9252 9217 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9253 9218 FCP_BUF_LEVEL_5, 0,
9254 9219 "Got fcp response packet with non-zero reserved fields "
9255 9220 "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9256 9221 "status.reserved_0:0x%x, status.reserved_1:0x%x",
9257 9222 rsp->reserved_0, rsp->reserved_1,
9258 9223 rsp->fcp_u.fcp_status.reserved_0,
9259 9224 rsp->fcp_u.fcp_status.reserved_1);
9260 9225 }
9261 9226
9262 9227 if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9263 9228 (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9264 9229 return (FC_FAILURE);
9265 9230 }
9266 9231
9267 9232 if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9268 9233 (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9269 9234 sizeof (struct fcp_rsp))) {
9270 9235 return (FC_FAILURE);
9271 9236 }
9272 9237
9273 9238 return (FC_SUCCESS);
9274 9239 }
9275 9240
9276 9241
9277 9242 /*
9278 9243 * This is called when there is a change the in device state. The case we're
9279 9244 * handling here is, if the d_id s does not match, offline this tgt and online
9280 9245 * a new tgt with the new d_id. called from fcp_handle_devices with
9281 9246 * port_mutex held.
9282 9247 */
9283 9248 static int
9284 9249 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9285 9250 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9286 9251 {
9287 9252 ASSERT(mutex_owned(&pptr->port_mutex));
9288 9253
9289 9254 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9290 9255 fcp_trace, FCP_BUF_LEVEL_3, 0,
9291 9256 "Starting fcp_device_changed...");
9292 9257
9293 9258 /*
9294 9259 * The two cases where the port_device_changed is called is
9295 9260 * either it changes it's d_id or it's hard address.
9296 9261 */
9297 9262 if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9298 9263 (FC_TOP_EXTERNAL(pptr->port_topology) &&
9299 9264 (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9300 9265
9301 9266 /* offline this target */
9302 9267 mutex_enter(&ptgt->tgt_mutex);
9303 9268 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9304 9269 (void) fcp_offline_target(pptr, ptgt, link_cnt,
9305 9270 0, 1, NDI_DEVI_REMOVE);
9306 9271 }
9307 9272 mutex_exit(&ptgt->tgt_mutex);
9308 9273
9309 9274 fcp_log(CE_NOTE, pptr->port_dip,
9310 9275 "Change in target properties: Old D_ID=%x New D_ID=%x"
9311 9276 " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9312 9277 map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9313 9278 map_entry->map_hard_addr.hard_addr);
9314 9279 }
9315 9280
9316 9281 return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9317 9282 link_cnt, tgt_cnt, cause));
9318 9283 }
9319 9284
9320 9285 /*
9321 9286 * Function: fcp_alloc_lun
9322 9287 *
9323 9288 * Description: Creates a new lun structure and adds it to the list
9324 9289 * of luns of the target.
9325 9290 *
9326 9291 * Argument: ptgt Target the lun will belong to.
9327 9292 *
9328 9293 * Return Value: NULL Failed
9329 9294 * Not NULL Succeeded
9330 9295 *
9331 9296 * Context: Kernel context
9332 9297 */
9333 9298 static struct fcp_lun *
9334 9299 fcp_alloc_lun(struct fcp_tgt *ptgt)
9335 9300 {
9336 9301 struct fcp_lun *plun;
9337 9302
9338 9303 plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9339 9304 if (plun != NULL) {
9340 9305 /*
9341 9306 * Initialize the mutex before putting in the target list
9342 9307 * especially before releasing the target mutex.
9343 9308 */
9344 9309 mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9345 9310 plun->lun_tgt = ptgt;
9346 9311
9347 9312 mutex_enter(&ptgt->tgt_mutex);
9348 9313 plun->lun_next = ptgt->tgt_lun;
9349 9314 ptgt->tgt_lun = plun;
9350 9315 plun->lun_old_guid = NULL;
9351 9316 plun->lun_old_guid_size = 0;
9352 9317 mutex_exit(&ptgt->tgt_mutex);
9353 9318 }
9354 9319
9355 9320 return (plun);
9356 9321 }
9357 9322
9358 9323 /*
9359 9324 * Function: fcp_dealloc_lun
9360 9325 *
9361 9326 * Description: Frees the LUN structure passed by the caller.
9362 9327 *
9363 9328 * Argument: plun LUN structure to free.
9364 9329 *
9365 9330 * Return Value: None
9366 9331 *
9367 9332 * Context: Kernel context.
9368 9333 */
9369 9334 static void
9370 9335 fcp_dealloc_lun(struct fcp_lun *plun)
9371 9336 {
9372 9337 mutex_enter(&plun->lun_mutex);
9373 9338 if (plun->lun_cip) {
9374 9339 fcp_remove_child(plun);
9375 9340 }
9376 9341 mutex_exit(&plun->lun_mutex);
9377 9342
9378 9343 mutex_destroy(&plun->lun_mutex);
9379 9344 if (plun->lun_guid) {
9380 9345 kmem_free(plun->lun_guid, plun->lun_guid_size);
9381 9346 }
9382 9347 if (plun->lun_old_guid) {
9383 9348 kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9384 9349 }
9385 9350 kmem_free(plun, sizeof (*plun));
9386 9351 }
9387 9352
9388 9353 /*
9389 9354 * Function: fcp_alloc_tgt
9390 9355 *
9391 9356 * Description: Creates a new target structure and adds it to the port
9392 9357 * hash list.
9393 9358 *
9394 9359 * Argument: pptr fcp port structure
9395 9360 * *map_entry entry describing the target to create
9396 9361 * link_cnt Link state change counter
9397 9362 *
9398 9363 * Return Value: NULL Failed
9399 9364 * Not NULL Succeeded
9400 9365 *
9401 9366 * Context: Kernel context.
9402 9367 */
9403 9368 static struct fcp_tgt *
9404 9369 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9405 9370 {
9406 9371 int hash;
9407 9372 uchar_t *wwn;
9408 9373 struct fcp_tgt *ptgt;
9409 9374
9410 9375 ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9411 9376 if (ptgt != NULL) {
9412 9377 mutex_enter(&pptr->port_mutex);
9413 9378 if (link_cnt != pptr->port_link_cnt) {
9414 9379 /*
9415 9380 * oh oh -- another link reset
9416 9381 * in progress -- give up
9417 9382 */
9418 9383 mutex_exit(&pptr->port_mutex);
9419 9384 kmem_free(ptgt, sizeof (*ptgt));
9420 9385 ptgt = NULL;
9421 9386 } else {
9422 9387 /*
9423 9388 * initialize the mutex before putting in the port
9424 9389 * wwn list, especially before releasing the port
9425 9390 * mutex.
9426 9391 */
9427 9392 mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9428 9393
9429 9394 /* add new target entry to the port's hash list */
9430 9395 wwn = (uchar_t *)&map_entry->map_pwwn;
9431 9396 hash = FCP_HASH(wwn);
9432 9397
9433 9398 ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9434 9399 pptr->port_tgt_hash_table[hash] = ptgt;
9435 9400
9436 9401 /* save cross-ptr */
9437 9402 ptgt->tgt_port = pptr;
9438 9403
9439 9404 ptgt->tgt_change_cnt = 1;
9440 9405
9441 9406 /* initialize the target manual_config_only flag */
9442 9407 if (fcp_enable_auto_configuration) {
9443 9408 ptgt->tgt_manual_config_only = 0;
9444 9409 } else {
9445 9410 ptgt->tgt_manual_config_only = 1;
9446 9411 }
9447 9412
9448 9413 mutex_exit(&pptr->port_mutex);
9449 9414 }
9450 9415 }
9451 9416
9452 9417 return (ptgt);
9453 9418 }
9454 9419
9455 9420 /*
9456 9421 * Function: fcp_dealloc_tgt
9457 9422 *
9458 9423 * Description: Frees the target structure passed by the caller.
9459 9424 *
9460 9425 * Argument: ptgt Target structure to free.
9461 9426 *
9462 9427 * Return Value: None
9463 9428 *
9464 9429 * Context: Kernel context.
9465 9430 */
9466 9431 static void
9467 9432 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9468 9433 {
9469 9434 mutex_destroy(&ptgt->tgt_mutex);
9470 9435 kmem_free(ptgt, sizeof (*ptgt));
9471 9436 }
9472 9437
9473 9438
9474 9439 /*
9475 9440 * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9476 9441 *
9477 9442 * Device discovery commands will not be retried for-ever as
9478 9443 * this will have repercussions on other devices that need to
9479 9444 * be submitted to the hotplug thread. After a quick glance
9480 9445 * at the SCSI-3 spec, it was found that the spec doesn't
9481 9446 * mandate a forever retry, rather recommends a delayed retry.
9482 9447 *
9483 9448 * Since Photon IB is single threaded, STATUS_BUSY is common
9484 9449 * in a 4+initiator environment. Make sure the total time
9485 9450 * spent on retries (including command timeout) does not
9486 9451 * 60 seconds
9487 9452 */
9488 9453 static void
9489 9454 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9490 9455 {
9491 9456 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9492 9457 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9493 9458
9494 9459 mutex_enter(&pptr->port_mutex);
9495 9460 mutex_enter(&ptgt->tgt_mutex);
9496 9461 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9497 9462 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9498 9463 fcp_trace, FCP_BUF_LEVEL_2, 0,
9499 9464 "fcp_queue_ipkt,1:state change occured"
9500 9465 " for D_ID=0x%x", ptgt->tgt_d_id);
9501 9466 mutex_exit(&ptgt->tgt_mutex);
9502 9467 mutex_exit(&pptr->port_mutex);
9503 9468 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9504 9469 icmd->ipkt_change_cnt, icmd->ipkt_cause);
9505 9470 fcp_icmd_free(pptr, icmd);
9506 9471 return;
9507 9472 }
9508 9473 mutex_exit(&ptgt->tgt_mutex);
9509 9474
9510 9475 icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9511 9476
9512 9477 if (pptr->port_ipkt_list != NULL) {
9513 9478 /* add pkt to front of doubly-linked list */
9514 9479 pptr->port_ipkt_list->ipkt_prev = icmd;
9515 9480 icmd->ipkt_next = pptr->port_ipkt_list;
9516 9481 pptr->port_ipkt_list = icmd;
9517 9482 icmd->ipkt_prev = NULL;
9518 9483 } else {
9519 9484 /* this is the first/only pkt on the list */
9520 9485 pptr->port_ipkt_list = icmd;
9521 9486 icmd->ipkt_next = NULL;
9522 9487 icmd->ipkt_prev = NULL;
9523 9488 }
9524 9489 mutex_exit(&pptr->port_mutex);
9525 9490 }
9526 9491
9527 9492 /*
9528 9493 * Function: fcp_transport
9529 9494 *
9530 9495 * Description: This function submits the Fibre Channel packet to the transort
9531 9496 * layer by calling fc_ulp_transport(). If fc_ulp_transport()
9532 9497 * fails the submission, the treatment depends on the value of
9533 9498 * the variable internal.
9534 9499 *
9535 9500 * Argument: port_handle fp/fctl port handle.
9536 9501 * *fpkt Packet to submit to the transport layer.
9537 9502 * internal Not zero when it's an internal packet.
9538 9503 *
9539 9504 * Return Value: FC_TRAN_BUSY
9540 9505 * FC_STATEC_BUSY
9541 9506 * FC_OFFLINE
9542 9507 * FC_LOGINREQ
9543 9508 * FC_DEVICE_BUSY
9544 9509 * FC_SUCCESS
9545 9510 */
9546 9511 static int
9547 9512 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9548 9513 {
9549 9514 int rval;
9550 9515
9551 9516 rval = fc_ulp_transport(port_handle, fpkt);
9552 9517 if (rval == FC_SUCCESS) {
9553 9518 return (rval);
9554 9519 }
9555 9520
9556 9521 /*
9557 9522 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9558 9523 * a command, if the underlying modules see that there is a state
9559 9524 * change, or if a port is OFFLINE, that means, that state change
9560 9525 * hasn't reached FCP yet, so re-queue the command for deferred
9561 9526 * submission.
9562 9527 */
9563 9528 if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9564 9529 (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9565 9530 (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9566 9531 /*
9567 9532 * Defer packet re-submission. Life hang is possible on
9568 9533 * internal commands if the port driver sends FC_STATEC_BUSY
9569 9534 * for ever, but that shouldn't happen in a good environment.
9570 9535 * Limiting re-transport for internal commands is probably a
9571 9536 * good idea..
9572 9537 * A race condition can happen when a port sees barrage of
9573 9538 * link transitions offline to online. If the FCTL has
9574 9539 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9575 9540 * internal commands should be queued to do the discovery.
9576 9541 * The race condition is when an online comes and FCP starts
9577 9542 * its internal discovery and the link goes offline. It is
9578 9543 * possible that the statec_callback has not reached FCP
9579 9544 * and FCP is carrying on with its internal discovery.
9580 9545 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9581 9546 * that the link has gone offline. At this point FCP should
9582 9547 * drop all the internal commands and wait for the
9583 9548 * statec_callback. It will be facilitated by incrementing
9584 9549 * port_link_cnt.
9585 9550 *
9586 9551 * For external commands, the (FC)pkt_timeout is decremented
9587 9552 * by the QUEUE Delay added by our driver, Care is taken to
9588 9553 * ensure that it doesn't become zero (zero means no timeout)
9589 9554 * If the time expires right inside driver queue itself,
9590 9555 * the watch thread will return it to the original caller
9591 9556 * indicating that the command has timed-out.
9592 9557 */
9593 9558 if (internal) {
9594 9559 char *op;
9595 9560 struct fcp_ipkt *icmd;
9596 9561
9597 9562 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9598 9563 switch (icmd->ipkt_opcode) {
9599 9564 case SCMD_REPORT_LUN:
9600 9565 op = "REPORT LUN";
9601 9566 break;
9602 9567
9603 9568 case SCMD_INQUIRY:
9604 9569 op = "INQUIRY";
9605 9570 break;
9606 9571
9607 9572 case SCMD_INQUIRY_PAGE83:
9608 9573 op = "INQUIRY-83";
9609 9574 break;
9610 9575
9611 9576 default:
9612 9577 op = "Internal SCSI COMMAND";
9613 9578 break;
9614 9579 }
9615 9580
9616 9581 if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9617 9582 icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9618 9583 rval = FC_SUCCESS;
9619 9584 }
9620 9585 } else {
9621 9586 struct fcp_pkt *cmd;
9622 9587 struct fcp_port *pptr;
9623 9588
9624 9589 cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9625 9590 cmd->cmd_state = FCP_PKT_IDLE;
9626 9591 pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9627 9592
9628 9593 if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9629 9594 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9630 9595 fcp_trace, FCP_BUF_LEVEL_9, 0,
9631 9596 "fcp_transport: xport busy for pkt %p",
9632 9597 cmd->cmd_pkt);
9633 9598 rval = FC_TRAN_BUSY;
9634 9599 } else {
9635 9600 fcp_queue_pkt(pptr, cmd);
9636 9601 rval = FC_SUCCESS;
9637 9602 }
9638 9603 }
9639 9604 }
9640 9605
9641 9606 return (rval);
9642 9607 }
9643 9608
9644 9609 /*VARARGS3*/
9645 9610 static void
9646 9611 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9647 9612 {
9648 9613 char buf[256];
9649 9614 va_list ap;
9650 9615
9651 9616 if (dip == NULL) {
9652 9617 dip = fcp_global_dip;
9653 9618 }
9654 9619
9655 9620 va_start(ap, fmt);
9656 9621 (void) vsprintf(buf, fmt, ap);
9657 9622 va_end(ap);
9658 9623
9659 9624 scsi_log(dip, "fcp", level, buf);
9660 9625 }
9661 9626
9662 9627 /*
9663 9628 * This function retries NS registry of FC4 type.
9664 9629 * It assumes that fcp_mutex is held.
9665 9630 * The function does nothing if topology is not fabric
9666 9631 * So, the topology has to be set before this function can be called
9667 9632 */
9668 9633 static void
9669 9634 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9670 9635 {
9671 9636 int rval;
9672 9637
9673 9638 ASSERT(MUTEX_HELD(&pptr->port_mutex));
9674 9639
9675 9640 if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9676 9641 ((pptr->port_topology != FC_TOP_FABRIC) &&
9677 9642 (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9678 9643 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9679 9644 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9680 9645 }
9681 9646 return;
9682 9647 }
9683 9648 mutex_exit(&pptr->port_mutex);
9684 9649 rval = fcp_do_ns_registry(pptr, s_id);
9685 9650 mutex_enter(&pptr->port_mutex);
9686 9651
9687 9652 if (rval == 0) {
9688 9653 /* Registry successful. Reset flag */
9689 9654 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9690 9655 }
9691 9656 }
9692 9657
9693 9658 /*
9694 9659 * This function registers the ULP with the switch by calling transport i/f
9695 9660 */
9696 9661 static int
9697 9662 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9698 9663 {
9699 9664 fc_ns_cmd_t ns_cmd;
9700 9665 ns_rfc_type_t rfc;
9701 9666 uint32_t types[8];
9702 9667
9703 9668 /*
9704 9669 * Prepare the Name server structure to
9705 9670 * register with the transport in case of
9706 9671 * Fabric configuration.
9707 9672 */
9708 9673 bzero(&rfc, sizeof (rfc));
9709 9674 bzero(types, sizeof (types));
9710 9675
9711 9676 types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9712 9677 (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9713 9678
9714 9679 rfc.rfc_port_id.port_id = s_id;
9715 9680 bcopy(types, rfc.rfc_types, sizeof (types));
9716 9681
9717 9682 ns_cmd.ns_flags = 0;
9718 9683 ns_cmd.ns_cmd = NS_RFT_ID;
9719 9684 ns_cmd.ns_req_len = sizeof (rfc);
9720 9685 ns_cmd.ns_req_payload = (caddr_t)&rfc;
9721 9686 ns_cmd.ns_resp_len = 0;
9722 9687 ns_cmd.ns_resp_payload = NULL;
9723 9688
9724 9689 /*
9725 9690 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9726 9691 */
9727 9692 if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9728 9693 fcp_log(CE_WARN, pptr->port_dip,
9729 9694 "!ns_registry: failed name server registration");
9730 9695 return (1);
9731 9696 }
9732 9697
9733 9698 return (0);
9734 9699 }
9735 9700
9736 9701 /*
9737 9702 * Function: fcp_handle_port_attach
9738 9703 *
9739 9704 * Description: This function is called from fcp_port_attach() to attach a
9740 9705 * new port. This routine does the following:
9741 9706 *
9742 9707 * 1) Allocates an fcp_port structure and initializes it.
9743 9708 * 2) Tries to register the new FC-4 (FCP) capablity with the name
9744 9709 * server.
9745 9710 * 3) Kicks off the enumeration of the targets/luns visible
9746 9711 * through this new port. That is done by calling
9747 9712 * fcp_statec_callback() if the port is online.
9748 9713 *
9749 9714 * Argument: ulph fp/fctl port handle.
9750 9715 * *pinfo Port information.
9751 9716 * s_id Port ID.
9752 9717 * instance Device instance number for the local port
9753 9718 * (returned by ddi_get_instance()).
9754 9719 *
9755 9720 * Return Value: DDI_SUCCESS
9756 9721 * DDI_FAILURE
9757 9722 *
9758 9723 * Context: User and Kernel context.
9759 9724 */
9760 9725 /*ARGSUSED*/
9761 9726 int
9762 9727 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9763 9728 uint32_t s_id, int instance)
9764 9729 {
9765 9730 int res = DDI_FAILURE;
9766 9731 scsi_hba_tran_t *tran;
9767 9732 int mutex_initted = FALSE;
9768 9733 int hba_attached = FALSE;
9769 9734 int soft_state_linked = FALSE;
9770 9735 int event_bind = FALSE;
9771 9736 struct fcp_port *pptr;
9772 9737 fc_portmap_t *tmp_list = NULL;
9773 9738 uint32_t max_cnt, alloc_cnt;
9774 9739 uchar_t *boot_wwn = NULL;
9775 9740 uint_t nbytes;
9776 9741 int manual_cfg;
9777 9742
9778 9743 /*
9779 9744 * this port instance attaching for the first time (or after
9780 9745 * being detached before)
9781 9746 */
9782 9747 FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9783 9748 FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9784 9749
9785 9750 if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9786 9751 cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9787 9752 "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9788 9753 instance);
9789 9754 return (res);
9790 9755 }
9791 9756
9792 9757 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9793 9758 /* this shouldn't happen */
9794 9759 ddi_soft_state_free(fcp_softstate, instance);
9795 9760 cmn_err(CE_WARN, "fcp: bad soft state");
9796 9761 return (res);
9797 9762 }
9798 9763
9799 9764 (void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9800 9765
9801 9766 /*
9802 9767 * Make a copy of ulp_port_info as fctl allocates
9803 9768 * a temp struct.
9804 9769 */
9805 9770 (void) fcp_cp_pinfo(pptr, pinfo);
9806 9771
9807 9772 /*
9808 9773 * Check for manual_configuration_only property.
9809 9774 * Enable manual configurtion if the property is
9810 9775 * set to 1, otherwise disable manual configuration.
9811 9776 */
9812 9777 if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9813 9778 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9814 9779 MANUAL_CFG_ONLY,
9815 9780 -1)) != -1) {
9816 9781 if (manual_cfg == 1) {
9817 9782 char *pathname;
9818 9783 pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9819 9784 (void) ddi_pathname(pptr->port_dip, pathname);
9820 9785 cmn_err(CE_NOTE,
9821 9786 "%s (%s%d) %s is enabled via %s.conf.",
9822 9787 pathname,
9823 9788 ddi_driver_name(pptr->port_dip),
9824 9789 ddi_get_instance(pptr->port_dip),
9825 9790 MANUAL_CFG_ONLY,
9826 9791 ddi_driver_name(pptr->port_dip));
9827 9792 fcp_enable_auto_configuration = 0;
9828 9793 kmem_free(pathname, MAXPATHLEN);
9829 9794 }
9830 9795 }
9831 9796 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9832 9797 pptr->port_link_cnt = 1;
9833 9798 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9834 9799 pptr->port_id = s_id;
9835 9800 pptr->port_instance = instance;
9836 9801 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state));
9837 9802 pptr->port_state = FCP_STATE_INIT;
9838 9803 if (pinfo->port_acc_attr == NULL) {
9839 9804 /*
9840 9805 * The corresponding FCA doesn't support DMA at all
9841 9806 */
9842 9807 pptr->port_state |= FCP_STATE_FCA_IS_NODMA;
9843 9808 }
9844 9809
9845 9810 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state));
9846 9811
9847 9812 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
9848 9813 /*
9849 9814 * If FCA supports DMA in SCSI data phase, we need preallocate
9850 9815 * dma cookie, so stash the cookie size
9851 9816 */
9852 9817 pptr->port_dmacookie_sz = sizeof (ddi_dma_cookie_t) *
9853 9818 pptr->port_data_dma_attr.dma_attr_sgllen;
9854 9819 }
9855 9820
9856 9821 /*
9857 9822 * The two mutexes of fcp_port are initialized. The variable
9858 9823 * mutex_initted is incremented to remember that fact. That variable
9859 9824 * is checked when the routine fails and the mutexes have to be
9860 9825 * destroyed.
9861 9826 */
9862 9827 mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9863 9828 mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9864 9829 mutex_initted++;
9865 9830
9866 9831 /*
9867 9832 * The SCSI tran structure is allocate and initialized now.
9868 9833 */
9869 9834 if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9870 9835 fcp_log(CE_WARN, pptr->port_dip,
9871 9836 "!fcp%d: scsi_hba_tran_alloc failed", instance);
9872 9837 goto fail;
9873 9838 }
9874 9839
9875 9840 /* link in the transport structure then fill it in */
9876 9841 pptr->port_tran = tran;
9877 9842 tran->tran_hba_private = pptr;
9878 9843 tran->tran_tgt_init = fcp_scsi_tgt_init;
9879 9844 tran->tran_tgt_probe = NULL;
9880 9845 tran->tran_tgt_free = fcp_scsi_tgt_free;
9881 9846 tran->tran_start = fcp_scsi_start;
9882 9847 tran->tran_reset = fcp_scsi_reset;
9883 9848 tran->tran_abort = fcp_scsi_abort;
9884 9849 tran->tran_getcap = fcp_scsi_getcap;
9885 9850 tran->tran_setcap = fcp_scsi_setcap;
9886 9851 tran->tran_init_pkt = NULL;
9887 9852 tran->tran_destroy_pkt = NULL;
9888 9853 tran->tran_dmafree = NULL;
9889 9854 tran->tran_sync_pkt = NULL;
9890 9855 tran->tran_reset_notify = fcp_scsi_reset_notify;
9891 9856 tran->tran_get_bus_addr = fcp_scsi_get_bus_addr;
9892 9857 tran->tran_get_name = fcp_scsi_get_name;
9893 9858 tran->tran_clear_aca = NULL;
9894 9859 tran->tran_clear_task_set = NULL;
9895 9860 tran->tran_terminate_task = NULL;
9896 9861 tran->tran_get_eventcookie = fcp_scsi_bus_get_eventcookie;
9897 9862 tran->tran_add_eventcall = fcp_scsi_bus_add_eventcall;
9898 9863 tran->tran_remove_eventcall = fcp_scsi_bus_remove_eventcall;
9899 9864 tran->tran_post_event = fcp_scsi_bus_post_event;
9900 9865 tran->tran_quiesce = NULL;
9901 9866 tran->tran_unquiesce = NULL;
9902 9867 tran->tran_bus_reset = NULL;
9903 9868 tran->tran_bus_config = fcp_scsi_bus_config;
9904 9869 tran->tran_bus_unconfig = fcp_scsi_bus_unconfig;
9905 9870 tran->tran_bus_power = NULL;
9906 9871 tran->tran_interconnect_type = INTERCONNECT_FABRIC;
9907 9872
9908 9873 tran->tran_pkt_constructor = fcp_kmem_cache_constructor;
9909 9874 tran->tran_pkt_destructor = fcp_kmem_cache_destructor;
9910 9875 tran->tran_setup_pkt = fcp_pkt_setup;
9911 9876 tran->tran_teardown_pkt = fcp_pkt_teardown;
9912 9877 tran->tran_hba_len = pptr->port_priv_pkt_len +
9913 9878 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9914 9879 if (pptr->port_state & FCP_STATE_FCA_IS_NODMA) {
9915 9880 /*
9916 9881 * If FCA don't support DMA, then we use different vectors to
9917 9882 * minimize the effects on DMA code flow path
9918 9883 */
9919 9884 tran->tran_start = fcp_pseudo_start;
9920 9885 tran->tran_init_pkt = fcp_pseudo_init_pkt;
9921 9886 tran->tran_destroy_pkt = fcp_pseudo_destroy_pkt;
9922 9887 tran->tran_sync_pkt = fcp_pseudo_sync_pkt;
9923 9888 tran->tran_dmafree = fcp_pseudo_dmafree;
9924 9889 tran->tran_setup_pkt = NULL;
9925 9890 tran->tran_teardown_pkt = NULL;
9926 9891 tran->tran_pkt_constructor = NULL;
9927 9892 tran->tran_pkt_destructor = NULL;
9928 9893 pptr->port_data_dma_attr = pseudo_fca_dma_attr;
9929 9894 }
9930 9895
9931 9896 /*
9932 9897 * Allocate an ndi event handle
9933 9898 */
9934 9899 pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9935 9900 kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9936 9901
9937 9902 bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9938 9903 sizeof (fcp_ndi_event_defs));
9939 9904
9940 9905 (void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9941 9906 &pptr->port_ndi_event_hdl, NDI_SLEEP);
9942 9907
9943 9908 pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9944 9909 pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9945 9910 pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9946 9911
9947 9912 if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9948 9913 (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9949 9914 &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9950 9915 goto fail;
9951 9916 }
9952 9917 event_bind++; /* Checked in fail case */
9953 9918
9954 9919 if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9955 9920 tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9956 9921 != DDI_SUCCESS) {
9957 9922 fcp_log(CE_WARN, pptr->port_dip,
9958 9923 "!fcp%d: scsi_hba_attach_setup failed", instance);
9959 9924 goto fail;
9960 9925 }
9961 9926 hba_attached++; /* Checked in fail case */
9962 9927
9963 9928 pptr->port_mpxio = 0;
9964 9929 if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9965 9930 MDI_SUCCESS) {
9966 9931 pptr->port_mpxio++;
9967 9932 }
9968 9933
9969 9934 /*
9970 9935 * The following code is putting the new port structure in the global
9971 9936 * list of ports and, if it is the first port to attach, it start the
9972 9937 * fcp_watchdog_tick.
9973 9938 *
9974 9939 * Why put this new port in the global before we are done attaching it?
9975 9940 * We are actually making the structure globally known before we are
9976 9941 * done attaching it. The reason for that is: because of the code that
9977 9942 * follows. At this point the resources to handle the port are
9978 9943 * allocated. This function is now going to do the following:
9979 9944 *
9980 9945 * 1) It is going to try to register with the name server advertizing
9981 9946 * the new FCP capability of the port.
9982 9947 * 2) It is going to play the role of the fp/fctl layer by building
9983 9948 * a list of worlwide names reachable through this port and call
9984 9949 * itself on fcp_statec_callback(). That requires the port to
9985 9950 * be part of the global list.
9986 9951 */
9987 9952 mutex_enter(&fcp_global_mutex);
9988 9953 if (fcp_port_head == NULL) {
9989 9954 fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
9990 9955 }
9991 9956 pptr->port_next = fcp_port_head;
9992 9957 fcp_port_head = pptr;
9993 9958 soft_state_linked++;
9994 9959
9995 9960 if (fcp_watchdog_init++ == 0) {
9996 9961 fcp_watchdog_tick = fcp_watchdog_timeout *
9997 9962 drv_usectohz(1000000);
9998 9963 fcp_watchdog_id = timeout(fcp_watch, NULL,
9999 9964 fcp_watchdog_tick);
10000 9965 }
10001 9966 mutex_exit(&fcp_global_mutex);
10002 9967
10003 9968 /*
10004 9969 * Here an attempt is made to register with the name server, the new
10005 9970 * FCP capability. That is done using an RTF_ID to the name server.
10006 9971 * It is done synchronously. The function fcp_do_ns_registry()
10007 9972 * doesn't return till the name server responded.
10008 9973 * On failures, just ignore it for now and it will get retried during
10009 9974 * state change callbacks. We'll set a flag to show this failure
10010 9975 */
10011 9976 if (fcp_do_ns_registry(pptr, s_id)) {
10012 9977 mutex_enter(&pptr->port_mutex);
10013 9978 pptr->port_state |= FCP_STATE_NS_REG_FAILED;
10014 9979 mutex_exit(&pptr->port_mutex);
10015 9980 } else {
10016 9981 mutex_enter(&pptr->port_mutex);
10017 9982 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
10018 9983 mutex_exit(&pptr->port_mutex);
10019 9984 }
10020 9985
10021 9986 /*
10022 9987 * Lookup for boot WWN property
10023 9988 */
10024 9989 if (modrootloaded != 1) {
10025 9990 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
10026 9991 ddi_get_parent(pinfo->port_dip),
10027 9992 DDI_PROP_DONTPASS, OBP_BOOT_WWN,
10028 9993 &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
10029 9994 (nbytes == FC_WWN_SIZE)) {
10030 9995 bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
10031 9996 }
10032 9997 if (boot_wwn) {
10033 9998 ddi_prop_free(boot_wwn);
10034 9999 }
10035 10000 }
10036 10001
10037 10002 /*
10038 10003 * Handle various topologies and link states.
10039 10004 */
10040 10005 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
10041 10006 case FC_STATE_OFFLINE:
10042 10007
10043 10008 /*
10044 10009 * we're attaching a port where the link is offline
10045 10010 *
10046 10011 * Wait for ONLINE, at which time a state
10047 10012 * change will cause a statec_callback
10048 10013 *
10049 10014 * in the mean time, do not do anything
10050 10015 */
10051 10016 res = DDI_SUCCESS;
10052 10017 pptr->port_state |= FCP_STATE_OFFLINE;
10053 10018 break;
10054 10019
10055 10020 case FC_STATE_ONLINE: {
10056 10021 if (pptr->port_topology == FC_TOP_UNKNOWN) {
10057 10022 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
10058 10023 res = DDI_SUCCESS;
10059 10024 break;
10060 10025 }
10061 10026 /*
10062 10027 * discover devices and create nodes (a private
10063 10028 * loop or point-to-point)
10064 10029 */
10065 10030 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
10066 10031
10067 10032 /*
10068 10033 * At this point we are going to build a list of all the ports
10069 10034 * that can be reached through this local port. It looks like
10070 10035 * we cannot handle more than FCP_MAX_DEVICES per local port
10071 10036 * (128).
10072 10037 */
10073 10038 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
10074 10039 sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
10075 10040 KM_NOSLEEP)) == NULL) {
10076 10041 fcp_log(CE_WARN, pptr->port_dip,
10077 10042 "!fcp%d: failed to allocate portmap",
10078 10043 instance);
10079 10044 goto fail;
10080 10045 }
10081 10046
10082 10047 /*
10083 10048 * fc_ulp_getportmap() is going to provide us with the list of
10084 10049 * remote ports in the buffer we just allocated. The way the
10085 10050 * list is going to be retrieved depends on the topology.
10086 10051 * However, if we are connected to a Fabric, a name server
10087 10052 * request may be sent to get the list of FCP capable ports.
10088 10053 * It should be noted that is the case the request is
10089 10054 * synchronous. This means we are stuck here till the name
10090 10055 * server replies. A lot of things can change during that time
10091 10056 * and including, may be, being called on
10092 10057 * fcp_statec_callback() for different reasons. I'm not sure
10093 10058 * the code can handle that.
10094 10059 */
10095 10060 max_cnt = FCP_MAX_DEVICES;
10096 10061 alloc_cnt = FCP_MAX_DEVICES;
10097 10062 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
10098 10063 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
10099 10064 FC_SUCCESS) {
10100 10065 caddr_t msg;
10101 10066
10102 10067 (void) fc_ulp_error(res, &msg);
10103 10068
10104 10069 /*
10105 10070 * this just means the transport is
10106 10071 * busy perhaps building a portmap so,
10107 10072 * for now, succeed this port attach
10108 10073 * when the transport has a new map,
10109 10074 * it'll send us a state change then
10110 10075 */
10111 10076 fcp_log(CE_WARN, pptr->port_dip,
10112 10077 "!failed to get port map : %s", msg);
10113 10078
10114 10079 res = DDI_SUCCESS;
10115 10080 break; /* go return result */
10116 10081 }
10117 10082 if (max_cnt > alloc_cnt) {
10118 10083 alloc_cnt = max_cnt;
10119 10084 }
10120 10085
10121 10086 /*
10122 10087 * We are now going to call fcp_statec_callback() ourselves.
10123 10088 * By issuing this call we are trying to kick off the enumera-
10124 10089 * tion process.
10125 10090 */
10126 10091 /*
10127 10092 * let the state change callback do the SCSI device
10128 10093 * discovery and create the devinfos
10129 10094 */
10130 10095 fcp_statec_callback(ulph, pptr->port_fp_handle,
10131 10096 pptr->port_phys_state, pptr->port_topology, tmp_list,
10132 10097 max_cnt, pptr->port_id);
10133 10098
10134 10099 res = DDI_SUCCESS;
10135 10100 break;
10136 10101 }
10137 10102
10138 10103 default:
10139 10104 /* unknown port state */
10140 10105 fcp_log(CE_WARN, pptr->port_dip,
10141 10106 "!fcp%d: invalid port state at attach=0x%x",
10142 10107 instance, pptr->port_phys_state);
10143 10108
10144 10109 mutex_enter(&pptr->port_mutex);
10145 10110 pptr->port_phys_state = FCP_STATE_OFFLINE;
10146 10111 mutex_exit(&pptr->port_mutex);
10147 10112
10148 10113 res = DDI_SUCCESS;
10149 10114 break;
10150 10115 }
10151 10116
10152 10117 /* free temp list if used */
10153 10118 if (tmp_list != NULL) {
10154 10119 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10155 10120 }
10156 10121
10157 10122 /* note the attach time */
10158 10123 pptr->port_attach_time = ddi_get_lbolt64();
10159 10124
10160 10125 /* all done */
10161 10126 return (res);
10162 10127
10163 10128 /* a failure we have to clean up after */
10164 10129 fail:
10165 10130 fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
10166 10131
10167 10132 if (soft_state_linked) {
10168 10133 /* remove this fcp_port from the linked list */
10169 10134 (void) fcp_soft_state_unlink(pptr);
10170 10135 }
10171 10136
10172 10137 /* unbind and free event set */
10173 10138 if (pptr->port_ndi_event_hdl) {
10174 10139 if (event_bind) {
10175 10140 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10176 10141 &pptr->port_ndi_events, NDI_SLEEP);
10177 10142 }
10178 10143 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10179 10144 }
10180 10145
10181 10146 if (pptr->port_ndi_event_defs) {
10182 10147 (void) kmem_free(pptr->port_ndi_event_defs,
10183 10148 sizeof (fcp_ndi_event_defs));
10184 10149 }
10185 10150
10186 10151 /*
10187 10152 * Clean up mpxio stuff
10188 10153 */
10189 10154 if (pptr->port_mpxio) {
10190 10155 (void) mdi_phci_unregister(pptr->port_dip, 0);
10191 10156 pptr->port_mpxio--;
10192 10157 }
10193 10158
10194 10159 /* undo SCSI HBA setup */
10195 10160 if (hba_attached) {
10196 10161 (void) scsi_hba_detach(pptr->port_dip);
10197 10162 }
10198 10163 if (pptr->port_tran != NULL) {
10199 10164 scsi_hba_tran_free(pptr->port_tran);
10200 10165 }
10201 10166
10202 10167 mutex_enter(&fcp_global_mutex);
10203 10168
10204 10169 /*
10205 10170 * We check soft_state_linked, because it is incremented right before
10206 10171 * we call increment fcp_watchdog_init. Therefore, we know if
10207 10172 * soft_state_linked is still FALSE, we do not want to decrement
10208 10173 * fcp_watchdog_init or possibly call untimeout.
10209 10174 */
10210 10175
10211 10176 if (soft_state_linked) {
10212 10177 if (--fcp_watchdog_init == 0) {
10213 10178 timeout_id_t tid = fcp_watchdog_id;
10214 10179
10215 10180 mutex_exit(&fcp_global_mutex);
10216 10181 (void) untimeout(tid);
10217 10182 } else {
10218 10183 mutex_exit(&fcp_global_mutex);
10219 10184 }
10220 10185 } else {
10221 10186 mutex_exit(&fcp_global_mutex);
10222 10187 }
10223 10188
10224 10189 if (mutex_initted) {
10225 10190 mutex_destroy(&pptr->port_mutex);
10226 10191 mutex_destroy(&pptr->port_pkt_mutex);
10227 10192 }
10228 10193
10229 10194 if (tmp_list != NULL) {
10230 10195 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10231 10196 }
10232 10197
10233 10198 /* this makes pptr invalid */
10234 10199 ddi_soft_state_free(fcp_softstate, instance);
10235 10200
10236 10201 return (DDI_FAILURE);
10237 10202 }
10238 10203
10239 10204
10240 10205 static int
10241 10206 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10242 10207 {
10243 10208 int count = 0;
10244 10209
10245 10210 mutex_enter(&pptr->port_mutex);
10246 10211
10247 10212 /*
10248 10213 * if the port is powered down or suspended, nothing else
10249 10214 * to do; just return.
10250 10215 */
10251 10216 if (flag != FCP_STATE_DETACHING) {
10252 10217 if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10253 10218 FCP_STATE_SUSPENDED)) {
10254 10219 pptr->port_state |= flag;
10255 10220 mutex_exit(&pptr->port_mutex);
10256 10221 return (FC_SUCCESS);
10257 10222 }
10258 10223 }
10259 10224
10260 10225 if (pptr->port_state & FCP_STATE_IN_MDI) {
10261 10226 mutex_exit(&pptr->port_mutex);
10262 10227 return (FC_FAILURE);
10263 10228 }
10264 10229
10265 10230 FCP_TRACE(fcp_logq, pptr->port_instbuf,
10266 10231 fcp_trace, FCP_BUF_LEVEL_2, 0,
10267 10232 "fcp_handle_port_detach: port is detaching");
10268 10233
10269 10234 pptr->port_state |= flag;
10270 10235
10271 10236 /*
10272 10237 * Wait for any ongoing reconfig/ipkt to complete, that
10273 10238 * ensures the freeing to targets/luns is safe.
10274 10239 * No more ref to this port should happen from statec/ioctl
10275 10240 * after that as it was removed from the global port list.
10276 10241 */
10277 10242 while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10278 10243 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10279 10244 /*
10280 10245 * Let's give sufficient time for reconfig/ipkt
10281 10246 * to complete.
10282 10247 */
10283 10248 if (count++ >= FCP_ICMD_DEADLINE) {
10284 10249 break;
10285 10250 }
10286 10251 mutex_exit(&pptr->port_mutex);
10287 10252 delay(drv_usectohz(1000000));
10288 10253 mutex_enter(&pptr->port_mutex);
10289 10254 }
10290 10255
10291 10256 /*
10292 10257 * if the driver is still busy then fail to
10293 10258 * suspend/power down.
10294 10259 */
10295 10260 if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10296 10261 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10297 10262 pptr->port_state &= ~flag;
10298 10263 mutex_exit(&pptr->port_mutex);
10299 10264 return (FC_FAILURE);
10300 10265 }
10301 10266
10302 10267 if (flag == FCP_STATE_DETACHING) {
10303 10268 pptr = fcp_soft_state_unlink(pptr);
10304 10269 ASSERT(pptr != NULL);
10305 10270 }
10306 10271
10307 10272 pptr->port_link_cnt++;
10308 10273 pptr->port_state |= FCP_STATE_OFFLINE;
10309 10274 pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10310 10275
10311 10276 fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10312 10277 FCP_CAUSE_LINK_DOWN);
10313 10278 mutex_exit(&pptr->port_mutex);
10314 10279
10315 10280 /* kill watch dog timer if we're the last */
10316 10281 mutex_enter(&fcp_global_mutex);
10317 10282 if (--fcp_watchdog_init == 0) {
10318 10283 timeout_id_t tid = fcp_watchdog_id;
10319 10284 mutex_exit(&fcp_global_mutex);
10320 10285 (void) untimeout(tid);
10321 10286 } else {
10322 10287 mutex_exit(&fcp_global_mutex);
10323 10288 }
10324 10289
10325 10290 /* clean up the port structures */
10326 10291 if (flag == FCP_STATE_DETACHING) {
10327 10292 fcp_cleanup_port(pptr, instance);
10328 10293 }
10329 10294
10330 10295 return (FC_SUCCESS);
10331 10296 }
10332 10297
10333 10298
10334 10299 static void
10335 10300 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10336 10301 {
10337 10302 ASSERT(pptr != NULL);
10338 10303
10339 10304 /* unbind and free event set */
10340 10305 if (pptr->port_ndi_event_hdl) {
10341 10306 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10342 10307 &pptr->port_ndi_events, NDI_SLEEP);
10343 10308 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10344 10309 }
10345 10310
10346 10311 if (pptr->port_ndi_event_defs) {
10347 10312 (void) kmem_free(pptr->port_ndi_event_defs,
10348 10313 sizeof (fcp_ndi_event_defs));
10349 10314 }
10350 10315
10351 10316 /* free the lun/target structures and devinfos */
10352 10317 fcp_free_targets(pptr);
10353 10318
10354 10319 /*
10355 10320 * Clean up mpxio stuff
10356 10321 */
10357 10322 if (pptr->port_mpxio) {
10358 10323 (void) mdi_phci_unregister(pptr->port_dip, 0);
10359 10324 pptr->port_mpxio--;
10360 10325 }
10361 10326
10362 10327 /* clean up SCSA stuff */
10363 10328 (void) scsi_hba_detach(pptr->port_dip);
10364 10329 if (pptr->port_tran != NULL) {
10365 10330 scsi_hba_tran_free(pptr->port_tran);
10366 10331 }
10367 10332
10368 10333 #ifdef KSTATS_CODE
10369 10334 /* clean up kstats */
10370 10335 if (pptr->fcp_ksp != NULL) {
10371 10336 kstat_delete(pptr->fcp_ksp);
10372 10337 }
10373 10338 #endif
10374 10339
10375 10340 /* clean up soft state mutexes/condition variables */
10376 10341 mutex_destroy(&pptr->port_mutex);
10377 10342 mutex_destroy(&pptr->port_pkt_mutex);
10378 10343
10379 10344 /* all done with soft state */
10380 10345 ddi_soft_state_free(fcp_softstate, instance);
10381 10346 }
10382 10347
10383 10348 /*
10384 10349 * Function: fcp_kmem_cache_constructor
10385 10350 *
10386 10351 * Description: This function allocates and initializes the resources required
10387 10352 * to build a scsi_pkt structure the target driver. The result
10388 10353 * of the allocation and initialization will be cached in the
10389 10354 * memory cache. As DMA resources may be allocated here, that
10390 10355 * means DMA resources will be tied up in the cache manager.
10391 10356 * This is a tradeoff that has been made for performance reasons.
10392 10357 *
10393 10358 * Argument: *buf Memory to preinitialize.
10394 10359 * *arg FCP port structure (fcp_port).
10395 10360 * kmflags Value passed to kmem_cache_alloc() and
10396 10361 * propagated to the constructor.
10397 10362 *
10398 10363 * Return Value: 0 Allocation/Initialization was successful.
10399 10364 * -1 Allocation or Initialization failed.
10400 10365 *
10401 10366 *
10402 10367 * If the returned value is 0, the buffer is initialized like this:
10403 10368 *
10404 10369 * +================================+
10405 10370 * +----> | struct scsi_pkt |
10406 10371 * | | |
10407 10372 * | +--- | pkt_ha_private |
10408 10373 * | | | |
10409 10374 * | | +================================+
10410 10375 * | |
10411 10376 * | | +================================+
10412 10377 * | +--> | struct fcp_pkt | <---------+
10413 10378 * | | | |
10414 10379 * +----- | cmd_pkt | |
10415 10380 * | cmd_fp_pkt | ---+ |
10416 10381 * +-------->| cmd_fcp_rsp[] | | |
10417 10382 * | +--->| cmd_fcp_cmd[] | | |
10418 10383 * | | |--------------------------------| | |
10419 10384 * | | | struct fc_packet | <--+ |
10420 10385 * | | | | |
10421 10386 * | | | pkt_ulp_private | ----------+
10422 10387 * | | | pkt_fca_private | -----+
10423 10388 * | | | pkt_data_cookie | ---+ |
10424 10389 * | | | pkt_cmdlen | | |
10425 10390 * | |(a) | pkt_rsplen | | |
10426 10391 * | +----| .......... pkt_cmd ........... | ---|-|---------------+
10427 10392 * | (b) | pkt_cmd_cookie | ---|-|----------+ |
10428 10393 * +---------| .......... pkt_resp .......... | ---|-|------+ | |
10429 10394 * | pkt_resp_cookie | ---|-|--+ | | |
10430 10395 * | pkt_cmd_dma | | | | | | |
10431 10396 * | pkt_cmd_acc | | | | | | |
10432 10397 * +================================+ | | | | | |
10433 10398 * | dma_cookies | <--+ | | | | |
10434 10399 * | | | | | | |
10435 10400 * +================================+ | | | | |
10436 10401 * | fca_private | <----+ | | | |
10437 10402 * | | | | | |
10438 10403 * +================================+ | | | |
10439 10404 * | | | |
10440 10405 * | | | |
10441 10406 * +================================+ (d) | | | |
10442 10407 * | fcp_resp cookies | <-------+ | | |
10443 10408 * | | | | |
10444 10409 * +================================+ | | |
10445 10410 * | | |
10446 10411 * +================================+ (d) | | |
10447 10412 * | fcp_resp | <-----------+ | |
10448 10413 * | (DMA resources associated) | | |
10449 10414 * +================================+ | |
10450 10415 * | |
10451 10416 * | |
10452 10417 * | |
10453 10418 * +================================+ (c) | |
10454 10419 * | fcp_cmd cookies | <---------------+ |
10455 10420 * | | |
10456 10421 * +================================+ |
10457 10422 * |
10458 10423 * +================================+ (c) |
10459 10424 * | fcp_cmd | <--------------------+
10460 10425 * | (DMA resources associated) |
10461 10426 * +================================+
10462 10427 *
10463 10428 * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10464 10429 * (b) Only if DMA is NOT used for the FCP_RESP buffer
10465 10430 * (c) Only if DMA is used for the FCP_CMD buffer.
10466 10431 * (d) Only if DMA is used for the FCP_RESP buffer
10467 10432 */
10468 10433 static int
10469 10434 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10470 10435 int kmflags)
10471 10436 {
10472 10437 struct fcp_pkt *cmd;
10473 10438 struct fcp_port *pptr;
10474 10439 fc_packet_t *fpkt;
10475 10440
10476 10441 pptr = (struct fcp_port *)tran->tran_hba_private;
10477 10442 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10478 10443 bzero(cmd, tran->tran_hba_len);
10479 10444
10480 10445 cmd->cmd_pkt = pkt;
10481 10446 pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10482 10447 fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10483 10448 cmd->cmd_fp_pkt = fpkt;
10484 10449
10485 10450 cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10486 10451 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10487 10452 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10488 10453 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10489 10454
10490 10455 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10491 10456 sizeof (struct fcp_pkt));
10492 10457
10493 10458 fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10494 10459 fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10495 10460
10496 10461 if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10497 10462 /*
10498 10463 * The underlying HBA doesn't want to DMA the fcp_cmd or
10499 10464 * fcp_resp. The transfer of information will be done by
10500 10465 * bcopy.
10501 10466 * The naming of the flags (that is actually a value) is
10502 10467 * unfortunate. FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10503 10468 * DMA" but instead "NO DMA".
10504 10469 */
10505 10470 fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10506 10471 fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10507 10472 fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10508 10473 } else {
10509 10474 /*
10510 10475 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10511 10476 * buffer. A buffer is allocated for each one the ddi_dma_*
10512 10477 * interfaces.
10513 10478 */
10514 10479 if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10515 10480 return (-1);
10516 10481 }
10517 10482 }
10518 10483
10519 10484 return (0);
10520 10485 }
10521 10486
10522 10487 /*
10523 10488 * Function: fcp_kmem_cache_destructor
10524 10489 *
10525 10490 * Description: Called by the destructor of the cache managed by SCSA.
10526 10491 * All the resources pre-allocated in fcp_pkt_constructor
10527 10492 * and the data also pre-initialized in fcp_pkt_constructor
10528 10493 * are freed and uninitialized here.
10529 10494 *
10530 10495 * Argument: *buf Memory to uninitialize.
10531 10496 * *arg FCP port structure (fcp_port).
10532 10497 *
10533 10498 * Return Value: None
10534 10499 *
10535 10500 * Context: kernel
10536 10501 */
10537 10502 static void
10538 10503 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10539 10504 {
10540 10505 struct fcp_pkt *cmd;
10541 10506 struct fcp_port *pptr;
10542 10507
10543 10508 pptr = (struct fcp_port *)(tran->tran_hba_private);
10544 10509 cmd = pkt->pkt_ha_private;
10545 10510
10546 10511 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10547 10512 /*
10548 10513 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10549 10514 * buffer and DMA resources allocated to do so are released.
10550 10515 */
10551 10516 fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10552 10517 }
10553 10518 }
10554 10519
10555 10520 /*
10556 10521 * Function: fcp_alloc_cmd_resp
10557 10522 *
10558 10523 * Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10559 10524 * will be DMAed by the HBA. The buffer is allocated applying
10560 10525 * the DMA requirements for the HBA. The buffers allocated will
10561 10526 * also be bound. DMA resources are allocated in the process.
10562 10527 * They will be released by fcp_free_cmd_resp().
10563 10528 *
10564 10529 * Argument: *pptr FCP port.
10565 10530 * *fpkt fc packet for which the cmd and resp packet should be
10566 10531 * allocated.
10567 10532 * flags Allocation flags.
10568 10533 *
10569 10534 * Return Value: FC_FAILURE
10570 10535 * FC_SUCCESS
10571 10536 *
10572 10537 * Context: User or Kernel context only if flags == KM_SLEEP.
10573 10538 * Interrupt context if the KM_SLEEP is not specified.
10574 10539 */
10575 10540 static int
10576 10541 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10577 10542 {
10578 10543 int rval;
10579 10544 int cmd_len;
10580 10545 int resp_len;
10581 10546 ulong_t real_len;
10582 10547 int (*cb) (caddr_t);
10583 10548 ddi_dma_cookie_t pkt_cookie;
10584 10549 ddi_dma_cookie_t *cp;
10585 10550 uint32_t cnt;
10586 10551
10587 10552 cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10588 10553
10589 10554 cmd_len = fpkt->pkt_cmdlen;
10590 10555 resp_len = fpkt->pkt_rsplen;
10591 10556
10592 10557 ASSERT(fpkt->pkt_cmd_dma == NULL);
10593 10558
10594 10559 /* Allocation of a DMA handle used in subsequent calls. */
10595 10560 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10596 10561 cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10597 10562 return (FC_FAILURE);
10598 10563 }
10599 10564
10600 10565 /* A buffer is allocated that satisfies the DMA requirements. */
10601 10566 rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10602 10567 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10603 10568 (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10604 10569
10605 10570 if (rval != DDI_SUCCESS) {
10606 10571 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10607 10572 return (FC_FAILURE);
10608 10573 }
10609 10574
10610 10575 if (real_len < cmd_len) {
10611 10576 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10612 10577 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10613 10578 return (FC_FAILURE);
10614 10579 }
10615 10580
10616 10581 /* The buffer allocated is DMA bound. */
10617 10582 rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10618 10583 fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10619 10584 cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10620 10585
10621 10586 if (rval != DDI_DMA_MAPPED) {
10622 10587 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10623 10588 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10624 10589 return (FC_FAILURE);
10625 10590 }
10626 10591
10627 10592 if (fpkt->pkt_cmd_cookie_cnt >
10628 10593 pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10629 10594 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10630 10595 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10631 10596 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10632 10597 return (FC_FAILURE);
10633 10598 }
10634 10599
10635 10600 ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10636 10601
10637 10602 /*
10638 10603 * The buffer where the scatter/gather list is going to be built is
10639 10604 * allocated.
10640 10605 */
10641 10606 cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10642 10607 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10643 10608 KM_NOSLEEP);
10644 10609
10645 10610 if (cp == NULL) {
10646 10611 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10647 10612 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10648 10613 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10649 10614 return (FC_FAILURE);
10650 10615 }
10651 10616
10652 10617 /*
10653 10618 * The scatter/gather list for the buffer we just allocated is built
10654 10619 * here.
10655 10620 */
10656 10621 *cp = pkt_cookie;
10657 10622 cp++;
10658 10623
10659 10624 for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10660 10625 ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10661 10626 &pkt_cookie);
10662 10627 *cp = pkt_cookie;
10663 10628 }
10664 10629
10665 10630 ASSERT(fpkt->pkt_resp_dma == NULL);
10666 10631 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10667 10632 cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10668 10633 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10669 10634 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10670 10635 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10671 10636 return (FC_FAILURE);
10672 10637 }
10673 10638
10674 10639 rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10675 10640 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10676 10641 (caddr_t *)&fpkt->pkt_resp, &real_len,
10677 10642 &fpkt->pkt_resp_acc);
10678 10643
10679 10644 if (rval != DDI_SUCCESS) {
10680 10645 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10681 10646 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10682 10647 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10683 10648 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10684 10649 kmem_free(fpkt->pkt_cmd_cookie,
10685 10650 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10686 10651 return (FC_FAILURE);
10687 10652 }
10688 10653
10689 10654 if (real_len < resp_len) {
10690 10655 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10691 10656 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10692 10657 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10693 10658 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10694 10659 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10695 10660 kmem_free(fpkt->pkt_cmd_cookie,
10696 10661 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10697 10662 return (FC_FAILURE);
10698 10663 }
10699 10664
10700 10665 rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10701 10666 fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10702 10667 cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10703 10668
10704 10669 if (rval != DDI_DMA_MAPPED) {
10705 10670 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10706 10671 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10707 10672 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10708 10673 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10709 10674 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10710 10675 kmem_free(fpkt->pkt_cmd_cookie,
10711 10676 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10712 10677 return (FC_FAILURE);
10713 10678 }
10714 10679
10715 10680 if (fpkt->pkt_resp_cookie_cnt >
10716 10681 pptr->port_resp_dma_attr.dma_attr_sgllen) {
10717 10682 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10718 10683 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10719 10684 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10720 10685 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10721 10686 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10722 10687 kmem_free(fpkt->pkt_cmd_cookie,
10723 10688 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10724 10689 return (FC_FAILURE);
10725 10690 }
10726 10691
10727 10692 ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10728 10693
10729 10694 cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10730 10695 fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10731 10696 KM_NOSLEEP);
10732 10697
10733 10698 if (cp == NULL) {
10734 10699 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10735 10700 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10736 10701 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10737 10702 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10738 10703 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10739 10704 kmem_free(fpkt->pkt_cmd_cookie,
10740 10705 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10741 10706 return (FC_FAILURE);
10742 10707 }
10743 10708
10744 10709 *cp = pkt_cookie;
10745 10710 cp++;
10746 10711
10747 10712 for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10748 10713 ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10749 10714 &pkt_cookie);
10750 10715 *cp = pkt_cookie;
10751 10716 }
10752 10717
10753 10718 return (FC_SUCCESS);
10754 10719 }
10755 10720
10756 10721 /*
10757 10722 * Function: fcp_free_cmd_resp
10758 10723 *
10759 10724 * Description: This function releases the FCP_CMD and FCP_RESP buffer
10760 10725 * allocated by fcp_alloc_cmd_resp() and all the resources
10761 10726 * associated with them. That includes the DMA resources and the
10762 10727 * buffer allocated for the cookies of each one of them.
10763 10728 *
10764 10729 * Argument: *pptr FCP port context.
10765 10730 * *fpkt fc packet containing the cmd and resp packet
10766 10731 * to be released.
10767 10732 *
10768 10733 * Return Value: None
10769 10734 *
10770 10735 * Context: Interrupt, User and Kernel context.
10771 10736 */
10772 10737 /* ARGSUSED */
10773 10738 static void
10774 10739 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10775 10740 {
10776 10741 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10777 10742
10778 10743 if (fpkt->pkt_resp_dma) {
10779 10744 (void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10780 10745 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10781 10746 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10782 10747 }
10783 10748
10784 10749 if (fpkt->pkt_resp_cookie) {
10785 10750 kmem_free(fpkt->pkt_resp_cookie,
10786 10751 fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10787 10752 fpkt->pkt_resp_cookie = NULL;
10788 10753 }
10789 10754
10790 10755 if (fpkt->pkt_cmd_dma) {
10791 10756 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10792 10757 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10793 10758 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10794 10759 }
10795 10760
10796 10761 if (fpkt->pkt_cmd_cookie) {
10797 10762 kmem_free(fpkt->pkt_cmd_cookie,
10798 10763 fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10799 10764 fpkt->pkt_cmd_cookie = NULL;
10800 10765 }
10801 10766 }
10802 10767
10803 10768
10804 10769 /*
10805 10770 * called by the transport to do our own target initialization
10806 10771 *
10807 10772 * can acquire and release the global mutex
10808 10773 */
10809 10774 /* ARGSUSED */
10810 10775 static int
10811 10776 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10812 10777 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10813 10778 {
10814 10779 uchar_t *bytes;
10815 10780 uint_t nbytes;
10816 10781 uint16_t lun_num;
10817 10782 struct fcp_tgt *ptgt;
10818 10783 struct fcp_lun *plun;
10819 10784 struct fcp_port *pptr = (struct fcp_port *)
10820 10785 hba_tran->tran_hba_private;
10821 10786
10822 10787 ASSERT(pptr != NULL);
10823 10788
10824 10789 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10825 10790 FCP_BUF_LEVEL_8, 0,
10826 10791 "fcp_phys_tgt_init: called for %s (instance %d)",
10827 10792 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10828 10793
10829 10794 /* get our port WWN property */
10830 10795 bytes = NULL;
10831 10796 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10832 10797 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10833 10798 (nbytes != FC_WWN_SIZE)) {
10834 10799 /* no port WWN property */
10835 10800 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10836 10801 FCP_BUF_LEVEL_8, 0,
10837 10802 "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10838 10803 " for %s (instance %d): bytes=%p nbytes=%x",
10839 10804 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10840 10805 nbytes);
10841 10806
10842 10807 if (bytes != NULL) {
10843 10808 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10844 10809 }
10845 10810
10846 10811 return (DDI_NOT_WELL_FORMED);
10847 10812 }
10848 10813 ASSERT(bytes != NULL);
10849 10814
10850 10815 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10851 10816 LUN_PROP, 0xFFFF);
10852 10817 if (lun_num == 0xFFFF) {
10853 10818 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10854 10819 FCP_BUF_LEVEL_8, 0,
10855 10820 "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10856 10821 " for %s (instance %d)", ddi_get_name(tgt_dip),
10857 10822 ddi_get_instance(tgt_dip));
10858 10823
10859 10824 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10860 10825 return (DDI_NOT_WELL_FORMED);
10861 10826 }
10862 10827
10863 10828 mutex_enter(&pptr->port_mutex);
10864 10829 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10865 10830 mutex_exit(&pptr->port_mutex);
10866 10831 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10867 10832 FCP_BUF_LEVEL_8, 0,
10868 10833 "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10869 10834 " for %s (instance %d)", ddi_get_name(tgt_dip),
10870 10835 ddi_get_instance(tgt_dip));
10871 10836
10872 10837 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10873 10838 return (DDI_FAILURE);
10874 10839 }
10875 10840
10876 10841 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10877 10842 FC_WWN_SIZE) == 0);
10878 10843 ASSERT(plun->lun_num == lun_num);
10879 10844
10880 10845 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10881 10846
10882 10847 ptgt = plun->lun_tgt;
10883 10848
10884 10849 mutex_enter(&ptgt->tgt_mutex);
10885 10850 plun->lun_tgt_count++;
10886 10851 scsi_device_hba_private_set(sd, plun);
10887 10852 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10888 10853 plun->lun_sd = sd;
10889 10854 mutex_exit(&ptgt->tgt_mutex);
10890 10855 mutex_exit(&pptr->port_mutex);
10891 10856
10892 10857 return (DDI_SUCCESS);
10893 10858 }
10894 10859
10895 10860 /*ARGSUSED*/
10896 10861 static int
10897 10862 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10898 10863 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10899 10864 {
10900 10865 uchar_t *bytes;
10901 10866 uint_t nbytes;
10902 10867 uint16_t lun_num;
10903 10868 struct fcp_tgt *ptgt;
10904 10869 struct fcp_lun *plun;
10905 10870 struct fcp_port *pptr = (struct fcp_port *)
10906 10871 hba_tran->tran_hba_private;
10907 10872 child_info_t *cip;
10908 10873
10909 10874 ASSERT(pptr != NULL);
10910 10875
10911 10876 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10912 10877 fcp_trace, FCP_BUF_LEVEL_8, 0,
10913 10878 "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10914 10879 " (tgt_dip %p)", ddi_get_name(tgt_dip),
10915 10880 ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10916 10881
10917 10882 cip = (child_info_t *)sd->sd_pathinfo;
10918 10883 if (cip == NULL) {
10919 10884 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10920 10885 fcp_trace, FCP_BUF_LEVEL_8, 0,
10921 10886 "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10922 10887 " for %s (instance %d)", ddi_get_name(tgt_dip),
10923 10888 ddi_get_instance(tgt_dip));
10924 10889
10925 10890 return (DDI_NOT_WELL_FORMED);
10926 10891 }
10927 10892
10928 10893 /* get our port WWN property */
10929 10894 bytes = NULL;
10930 10895 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10931 10896 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10932 10897 (nbytes != FC_WWN_SIZE)) {
10933 10898 if (bytes) {
10934 10899 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10935 10900 }
10936 10901 return (DDI_NOT_WELL_FORMED);
10937 10902 }
10938 10903
10939 10904 ASSERT(bytes != NULL);
10940 10905
10941 10906 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10942 10907 LUN_PROP, 0xFFFF);
10943 10908 if (lun_num == 0xFFFF) {
10944 10909 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10945 10910 fcp_trace, FCP_BUF_LEVEL_8, 0,
10946 10911 "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10947 10912 " for %s (instance %d)", ddi_get_name(tgt_dip),
10948 10913 ddi_get_instance(tgt_dip));
10949 10914
10950 10915 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10951 10916 return (DDI_NOT_WELL_FORMED);
10952 10917 }
10953 10918
10954 10919 mutex_enter(&pptr->port_mutex);
10955 10920 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10956 10921 mutex_exit(&pptr->port_mutex);
10957 10922 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10958 10923 fcp_trace, FCP_BUF_LEVEL_8, 0,
10959 10924 "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10960 10925 " for %s (instance %d)", ddi_get_name(tgt_dip),
10961 10926 ddi_get_instance(tgt_dip));
10962 10927
10963 10928 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10964 10929 return (DDI_FAILURE);
10965 10930 }
10966 10931
10967 10932 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10968 10933 FC_WWN_SIZE) == 0);
10969 10934 ASSERT(plun->lun_num == lun_num);
10970 10935
10971 10936 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10972 10937
10973 10938 ptgt = plun->lun_tgt;
10974 10939
10975 10940 mutex_enter(&ptgt->tgt_mutex);
10976 10941 plun->lun_tgt_count++;
10977 10942 scsi_device_hba_private_set(sd, plun);
10978 10943 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10979 10944 plun->lun_sd = sd;
10980 10945 mutex_exit(&ptgt->tgt_mutex);
10981 10946 mutex_exit(&pptr->port_mutex);
10982 10947
10983 10948 return (DDI_SUCCESS);
10984 10949 }
10985 10950
10986 10951
10987 10952 /*
10988 10953 * called by the transport to do our own target initialization
10989 10954 *
10990 10955 * can acquire and release the global mutex
10991 10956 */
10992 10957 /* ARGSUSED */
10993 10958 static int
10994 10959 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10995 10960 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10996 10961 {
10997 10962 struct fcp_port *pptr = (struct fcp_port *)
10998 10963 hba_tran->tran_hba_private;
10999 10964 int rval;
11000 10965
11001 10966 ASSERT(pptr != NULL);
11002 10967
11003 10968 /*
11004 10969 * Child node is getting initialized. Look at the mpxio component
11005 10970 * type on the child device to see if this device is mpxio managed
11006 10971 * or not.
11007 10972 */
11008 10973 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
11009 10974 rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11010 10975 } else {
11011 10976 rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11012 10977 }
11013 10978
11014 10979 return (rval);
11015 10980 }
11016 10981
11017 10982
11018 10983 /* ARGSUSED */
11019 10984 static void
11020 10985 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
11021 10986 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
11022 10987 {
11023 10988 struct fcp_lun *plun = scsi_device_hba_private_get(sd);
11024 10989 struct fcp_tgt *ptgt;
11025 10990
11026 10991 FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
11027 10992 fcp_trace, FCP_BUF_LEVEL_8, 0,
11028 10993 "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
11029 10994 ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
11030 10995 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
11031 10996
11032 10997 if (plun == NULL) {
11033 10998 return;
11034 10999 }
11035 11000 ptgt = plun->lun_tgt;
11036 11001
11037 11002 ASSERT(ptgt != NULL);
11038 11003
11039 11004 mutex_enter(&ptgt->tgt_mutex);
11040 11005 ASSERT(plun->lun_tgt_count > 0);
11041 11006
11042 11007 if (--plun->lun_tgt_count == 0) {
11043 11008 plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
11044 11009 }
11045 11010 plun->lun_sd = NULL;
11046 11011 mutex_exit(&ptgt->tgt_mutex);
11047 11012 }
11048 11013
11049 11014 /*
11050 11015 * Function: fcp_scsi_start
11051 11016 *
11052 11017 * Description: This function is called by the target driver to request a
11053 11018 * command to be sent.
11054 11019 *
11055 11020 * Argument: *ap SCSI address of the device.
11056 11021 * *pkt SCSI packet containing the cmd to send.
11057 11022 *
11058 11023 * Return Value: TRAN_ACCEPT
11059 11024 * TRAN_BUSY
11060 11025 * TRAN_BADPKT
11061 11026 * TRAN_FATAL_ERROR
11062 11027 */
11063 11028 static int
11064 11029 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
11065 11030 {
11066 11031 struct fcp_port *pptr = ADDR2FCP(ap);
11067 11032 struct fcp_lun *plun = ADDR2LUN(ap);
11068 11033 struct fcp_pkt *cmd = PKT2CMD(pkt);
11069 11034 struct fcp_tgt *ptgt = plun->lun_tgt;
11070 11035 int rval;
11071 11036
11072 11037 /* ensure command isn't already issued */
11073 11038 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
11074 11039
11075 11040 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11076 11041 fcp_trace, FCP_BUF_LEVEL_9, 0,
11077 11042 "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
11078 11043
11079 11044 /*
11080 11045 * It is strange that we enter the fcp_port mutex and the target
11081 11046 * mutex to check the lun state (which has a mutex of its own).
11082 11047 */
11083 11048 mutex_enter(&pptr->port_mutex);
11084 11049 mutex_enter(&ptgt->tgt_mutex);
11085 11050
11086 11051 /*
11087 11052 * If the device is offline and is not in the process of coming
11088 11053 * online, fail the request.
11089 11054 */
11090 11055
11091 11056 if ((plun->lun_state & FCP_LUN_OFFLINE) &&
11092 11057 !(plun->lun_state & FCP_LUN_ONLINING)) {
11093 11058 mutex_exit(&ptgt->tgt_mutex);
11094 11059 mutex_exit(&pptr->port_mutex);
11095 11060
11096 11061 if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
11097 11062 pkt->pkt_reason = CMD_DEV_GONE;
11098 11063 }
11099 11064
11100 11065 return (TRAN_FATAL_ERROR);
11101 11066 }
11102 11067 cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
11103 11068
11104 11069 /*
11105 11070 * If we are suspended, kernel is trying to dump, so don't
11106 11071 * block, fail or defer requests - send them down right away.
11107 11072 * NOTE: If we are in panic (i.e. trying to dump), we can't
11108 11073 * assume we have been suspended. There is hardware such as
11109 11074 * the v880 that doesn't do PM. Thus, the check for
11110 11075 * ddi_in_panic.
11111 11076 *
11112 11077 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
11113 11078 * of changing. So, if we can queue the packet, do it. Eventually,
11114 11079 * either the device will have gone away or changed and we can fail
11115 11080 * the request, or we can proceed if the device didn't change.
11116 11081 *
11117 11082 * If the pd in the target or the packet is NULL it's probably
11118 11083 * because the device has gone away, we allow the request to be
11119 11084 * put on the internal queue here in case the device comes back within
11120 11085 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
11121 11086 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
11122 11087 * could be NULL because the device was disappearing during or since
11123 11088 * packet initialization.
11124 11089 */
11125 11090
11126 11091 if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
11127 11092 FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
11128 11093 (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
11129 11094 (ptgt->tgt_pd_handle == NULL) ||
11130 11095 (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
11131 11096 /*
11132 11097 * If ((LUN is busy AND
11133 11098 * LUN not suspended AND
11134 11099 * The system is not in panic state) OR
11135 11100 * (The port is coming up))
11136 11101 *
11137 11102 * We check to see if the any of the flags FLAG_NOINTR or
11138 11103 * FLAG_NOQUEUE is set. If one of them is set the value
11139 11104 * returned will be TRAN_BUSY. If not, the request is queued.
11140 11105 */
11141 11106 mutex_exit(&ptgt->tgt_mutex);
11142 11107 mutex_exit(&pptr->port_mutex);
11143 11108
11144 11109 /* see if using interrupts is allowed (so queueing'll work) */
11145 11110 if (pkt->pkt_flags & FLAG_NOINTR) {
11146 11111 pkt->pkt_resid = 0;
11147 11112 return (TRAN_BUSY);
11148 11113 }
11149 11114 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11150 11115 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11151 11116 fcp_trace, FCP_BUF_LEVEL_9, 0,
11152 11117 "fcp_scsi_start: lun busy for pkt %p", pkt);
11153 11118 return (TRAN_BUSY);
11154 11119 }
11155 11120 #ifdef DEBUG
11156 11121 mutex_enter(&pptr->port_pkt_mutex);
11157 11122 pptr->port_npkts++;
11158 11123 mutex_exit(&pptr->port_pkt_mutex);
11159 11124 #endif /* DEBUG */
11160 11125
11161 11126 /* got queue up the pkt for later */
11162 11127 fcp_queue_pkt(pptr, cmd);
11163 11128 return (TRAN_ACCEPT);
11164 11129 }
11165 11130 cmd->cmd_state = FCP_PKT_ISSUED;
11166 11131
11167 11132 mutex_exit(&ptgt->tgt_mutex);
11168 11133 mutex_exit(&pptr->port_mutex);
11169 11134
11170 11135 /*
11171 11136 * Now that we released the mutexes, what was protected by them can
11172 11137 * change.
11173 11138 */
11174 11139
11175 11140 /*
11176 11141 * If there is a reconfiguration in progress, wait for it to complete.
11177 11142 */
11178 11143 fcp_reconfig_wait(pptr);
11179 11144
11180 11145 cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11181 11146 pkt->pkt_time : 0;
11182 11147
11183 11148 /* prepare the packet */
11184 11149
11185 11150 fcp_prepare_pkt(pptr, cmd, plun);
11186 11151
11187 11152 if (cmd->cmd_pkt->pkt_time) {
11188 11153 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11189 11154 } else {
11190 11155 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11191 11156 }
11192 11157
11193 11158 /*
11194 11159 * if interrupts aren't allowed (e.g. at dump time) then we'll
11195 11160 * have to do polled I/O
11196 11161 */
11197 11162 if (pkt->pkt_flags & FLAG_NOINTR) {
11198 11163 cmd->cmd_state &= ~FCP_PKT_ISSUED;
11199 11164 return (fcp_dopoll(pptr, cmd));
11200 11165 }
11201 11166
11202 11167 #ifdef DEBUG
11203 11168 mutex_enter(&pptr->port_pkt_mutex);
11204 11169 pptr->port_npkts++;
11205 11170 mutex_exit(&pptr->port_pkt_mutex);
11206 11171 #endif /* DEBUG */
11207 11172
11208 11173 rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11209 11174 if (rval == FC_SUCCESS) {
11210 11175 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11211 11176 fcp_trace, FCP_BUF_LEVEL_9, 0,
11212 11177 "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11213 11178 return (TRAN_ACCEPT);
11214 11179 }
11215 11180
11216 11181 cmd->cmd_state = FCP_PKT_IDLE;
11217 11182
11218 11183 #ifdef DEBUG
11219 11184 mutex_enter(&pptr->port_pkt_mutex);
11220 11185 pptr->port_npkts--;
11221 11186 mutex_exit(&pptr->port_pkt_mutex);
11222 11187 #endif /* DEBUG */
11223 11188
11224 11189 /*
11225 11190 * For lack of clearer definitions, choose
11226 11191 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11227 11192 */
11228 11193
11229 11194 if (rval == FC_TRAN_BUSY) {
11230 11195 pkt->pkt_resid = 0;
11231 11196 rval = TRAN_BUSY;
11232 11197 } else {
11233 11198 mutex_enter(&ptgt->tgt_mutex);
11234 11199 if (plun->lun_state & FCP_LUN_OFFLINE) {
11235 11200 child_info_t *cip;
11236 11201
11237 11202 mutex_enter(&plun->lun_mutex);
11238 11203 cip = plun->lun_cip;
11239 11204 mutex_exit(&plun->lun_mutex);
11240 11205
11241 11206 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11242 11207 fcp_trace, FCP_BUF_LEVEL_6, 0,
11243 11208 "fcp_transport failed 2 for %x: %x; dip=%p",
11244 11209 plun->lun_tgt->tgt_d_id, rval, cip);
11245 11210
11246 11211 rval = TRAN_FATAL_ERROR;
11247 11212 } else {
11248 11213 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11249 11214 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11250 11215 fcp_trace, FCP_BUF_LEVEL_9, 0,
11251 11216 "fcp_scsi_start: FC_BUSY for pkt %p",
11252 11217 pkt);
11253 11218 rval = TRAN_BUSY;
11254 11219 } else {
11255 11220 rval = TRAN_ACCEPT;
11256 11221 fcp_queue_pkt(pptr, cmd);
11257 11222 }
11258 11223 }
11259 11224 mutex_exit(&ptgt->tgt_mutex);
11260 11225 }
11261 11226
11262 11227 return (rval);
11263 11228 }
11264 11229
11265 11230 /*
11266 11231 * called by the transport to abort a packet
11267 11232 */
11268 11233 /*ARGSUSED*/
11269 11234 static int
11270 11235 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11271 11236 {
11272 11237 int tgt_cnt;
11273 11238 struct fcp_port *pptr = ADDR2FCP(ap);
11274 11239 struct fcp_lun *plun = ADDR2LUN(ap);
11275 11240 struct fcp_tgt *ptgt = plun->lun_tgt;
11276 11241
11277 11242 if (pkt == NULL) {
11278 11243 if (ptgt) {
11279 11244 mutex_enter(&ptgt->tgt_mutex);
11280 11245 tgt_cnt = ptgt->tgt_change_cnt;
11281 11246 mutex_exit(&ptgt->tgt_mutex);
11282 11247 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11283 11248 return (TRUE);
11284 11249 }
11285 11250 }
11286 11251 return (FALSE);
11287 11252 }
11288 11253
11289 11254
11290 11255 /*
11291 11256 * Perform reset
11292 11257 */
11293 11258 int
11294 11259 fcp_scsi_reset(struct scsi_address *ap, int level)
11295 11260 {
11296 11261 int rval = 0;
11297 11262 struct fcp_port *pptr = ADDR2FCP(ap);
11298 11263 struct fcp_lun *plun = ADDR2LUN(ap);
11299 11264 struct fcp_tgt *ptgt = plun->lun_tgt;
11300 11265
11301 11266 if (level == RESET_ALL) {
11302 11267 if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11303 11268 rval = 1;
11304 11269 }
11305 11270 } else if (level == RESET_TARGET || level == RESET_LUN) {
11306 11271 /*
11307 11272 * If we are in the middle of discovery, return
11308 11273 * SUCCESS as this target will be rediscovered
11309 11274 * anyway
11310 11275 */
11311 11276 mutex_enter(&ptgt->tgt_mutex);
11312 11277 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11313 11278 mutex_exit(&ptgt->tgt_mutex);
11314 11279 return (1);
11315 11280 }
11316 11281 mutex_exit(&ptgt->tgt_mutex);
11317 11282
11318 11283 if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11319 11284 rval = 1;
11320 11285 }
11321 11286 }
11322 11287 return (rval);
11323 11288 }
11324 11289
11325 11290
11326 11291 /*
11327 11292 * called by the framework to get a SCSI capability
11328 11293 */
11329 11294 static int
11330 11295 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11331 11296 {
11332 11297 return (fcp_commoncap(ap, cap, 0, whom, 0));
11333 11298 }
11334 11299
11335 11300
11336 11301 /*
11337 11302 * called by the framework to set a SCSI capability
11338 11303 */
11339 11304 static int
11340 11305 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11341 11306 {
11342 11307 return (fcp_commoncap(ap, cap, value, whom, 1));
11343 11308 }
11344 11309
11345 11310 /*
11346 11311 * Function: fcp_pkt_setup
11347 11312 *
11348 11313 * Description: This function sets up the scsi_pkt structure passed by the
11349 11314 * caller. This function assumes fcp_pkt_constructor has been
11350 11315 * called previously for the packet passed by the caller. If
11351 11316 * successful this call will have the following results:
11352 11317 *
11353 11318 * - The resources needed that will be constant through out
11354 11319 * the whole transaction are allocated.
11355 11320 * - The fields that will be constant through out the whole
11356 11321 * transaction are initialized.
11357 11322 * - The scsi packet will be linked to the LUN structure
11358 11323 * addressed by the transaction.
11359 11324 *
11360 11325 * Argument:
11361 11326 * *pkt Pointer to a scsi_pkt structure.
11362 11327 * callback
11363 11328 * arg
11364 11329 *
11365 11330 * Return Value: 0 Success
11366 11331 * !0 Failure
11367 11332 *
11368 11333 * Context: Kernel context or interrupt context
11369 11334 */
11370 11335 /* ARGSUSED */
11371 11336 static int
11372 11337 fcp_pkt_setup(struct scsi_pkt *pkt,
11373 11338 int (*callback)(caddr_t arg),
11374 11339 caddr_t arg)
11375 11340 {
11376 11341 struct fcp_pkt *cmd;
11377 11342 struct fcp_port *pptr;
11378 11343 struct fcp_lun *plun;
11379 11344 struct fcp_tgt *ptgt;
11380 11345 int kf;
11381 11346 fc_packet_t *fpkt;
11382 11347 fc_frame_hdr_t *hp;
11383 11348
11384 11349 pptr = ADDR2FCP(&pkt->pkt_address);
11385 11350 plun = ADDR2LUN(&pkt->pkt_address);
11386 11351 ptgt = plun->lun_tgt;
11387 11352
11388 11353 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11389 11354 fpkt = cmd->cmd_fp_pkt;
11390 11355
11391 11356 /*
11392 11357 * this request is for dma allocation only
11393 11358 */
11394 11359 /*
11395 11360 * First step of fcp_scsi_init_pkt: pkt allocation
11396 11361 * We determine if the caller is willing to wait for the
11397 11362 * resources.
11398 11363 */
11399 11364 kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11400 11365
11401 11366 /*
11402 11367 * Selective zeroing of the pkt.
11403 11368 */
11404 11369 cmd->cmd_back = NULL;
11405 11370 cmd->cmd_next = NULL;
11406 11371
11407 11372 /*
11408 11373 * Zero out fcp command
11409 11374 */
11410 11375 bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11411 11376
11412 11377 cmd->cmd_state = FCP_PKT_IDLE;
11413 11378
11414 11379 fpkt = cmd->cmd_fp_pkt;
11415 11380 fpkt->pkt_data_acc = NULL;
11416 11381
11417 11382 /*
11418 11383 * When port_state is FCP_STATE_OFFLINE, remote_port (tgt_pd_handle)
11419 11384 * could be destroyed. We need fail pkt_setup.
11420 11385 */
11421 11386 if (pptr->port_state & FCP_STATE_OFFLINE) {
11422 11387 return (-1);
11423 11388 }
11424 11389
11425 11390 mutex_enter(&ptgt->tgt_mutex);
11426 11391 fpkt->pkt_pd = ptgt->tgt_pd_handle;
11427 11392
11428 11393 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11429 11394 != FC_SUCCESS) {
11430 11395 mutex_exit(&ptgt->tgt_mutex);
11431 11396 return (-1);
11432 11397 }
11433 11398
11434 11399 mutex_exit(&ptgt->tgt_mutex);
11435 11400
11436 11401 /* Fill in the Fabric Channel Header */
11437 11402 hp = &fpkt->pkt_cmd_fhdr;
11438 11403 hp->r_ctl = R_CTL_COMMAND;
11439 11404 hp->rsvd = 0;
11440 11405 hp->type = FC_TYPE_SCSI_FCP;
11441 11406 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11442 11407 hp->seq_id = 0;
11443 11408 hp->df_ctl = 0;
11444 11409 hp->seq_cnt = 0;
11445 11410 hp->ox_id = 0xffff;
11446 11411 hp->rx_id = 0xffff;
11447 11412 hp->ro = 0;
11448 11413
11449 11414 /*
11450 11415 * A doubly linked list (cmd_forw, cmd_back) is built
11451 11416 * out of every allocated packet on a per-lun basis
11452 11417 *
11453 11418 * The packets are maintained in the list so as to satisfy
11454 11419 * scsi_abort() requests. At present (which is unlikely to
11455 11420 * change in the future) nobody performs a real scsi_abort
11456 11421 * in the SCSI target drivers (as they don't keep the packets
11457 11422 * after doing scsi_transport - so they don't know how to
11458 11423 * abort a packet other than sending a NULL to abort all
11459 11424 * outstanding packets)
11460 11425 */
11461 11426 mutex_enter(&plun->lun_mutex);
11462 11427 if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11463 11428 plun->lun_pkt_head->cmd_back = cmd;
11464 11429 } else {
11465 11430 plun->lun_pkt_tail = cmd;
11466 11431 }
11467 11432 plun->lun_pkt_head = cmd;
11468 11433 mutex_exit(&plun->lun_mutex);
11469 11434 return (0);
11470 11435 }
11471 11436
11472 11437 /*
11473 11438 * Function: fcp_pkt_teardown
11474 11439 *
11475 11440 * Description: This function releases a scsi_pkt structure and all the
11476 11441 * resources attached to it.
11477 11442 *
11478 11443 * Argument: *pkt Pointer to a scsi_pkt structure.
11479 11444 *
11480 11445 * Return Value: None
11481 11446 *
11482 11447 * Context: User, Kernel or Interrupt context.
11483 11448 */
11484 11449 static void
11485 11450 fcp_pkt_teardown(struct scsi_pkt *pkt)
11486 11451 {
11487 11452 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
11488 11453 struct fcp_lun *plun = ADDR2LUN(&pkt->pkt_address);
11489 11454 struct fcp_pkt *cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11490 11455
11491 11456 /*
11492 11457 * Remove the packet from the per-lun list
11493 11458 */
11494 11459 mutex_enter(&plun->lun_mutex);
11495 11460 if (cmd->cmd_back) {
11496 11461 ASSERT(cmd != plun->lun_pkt_head);
11497 11462 cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11498 11463 } else {
11499 11464 ASSERT(cmd == plun->lun_pkt_head);
11500 11465 plun->lun_pkt_head = cmd->cmd_forw;
11501 11466 }
11502 11467
11503 11468 if (cmd->cmd_forw) {
11504 11469 cmd->cmd_forw->cmd_back = cmd->cmd_back;
11505 11470 } else {
11506 11471 ASSERT(cmd == plun->lun_pkt_tail);
11507 11472 plun->lun_pkt_tail = cmd->cmd_back;
11508 11473 }
11509 11474
11510 11475 mutex_exit(&plun->lun_mutex);
11511 11476
11512 11477 (void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11513 11478 }
11514 11479
11515 11480 /*
11516 11481 * Routine for reset notification setup, to register or cancel.
11517 11482 * This function is called by SCSA
11518 11483 */
11519 11484 /*ARGSUSED*/
11520 11485 static int
11521 11486 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11522 11487 void (*callback)(caddr_t), caddr_t arg)
11523 11488 {
11524 11489 struct fcp_port *pptr = ADDR2FCP(ap);
11525 11490
11526 11491 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11527 11492 &pptr->port_mutex, &pptr->port_reset_notify_listf));
11528 11493 }
11529 11494
11530 11495
11531 11496 static int
11532 11497 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11533 11498 ddi_eventcookie_t *event_cookiep)
11534 11499 {
11535 11500 struct fcp_port *pptr = fcp_dip2port(dip);
11536 11501
11537 11502 if (pptr == NULL) {
11538 11503 return (DDI_FAILURE);
11539 11504 }
11540 11505
11541 11506 return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11542 11507 event_cookiep, NDI_EVENT_NOPASS));
11543 11508 }
11544 11509
11545 11510
11546 11511 static int
11547 11512 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11548 11513 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11549 11514 ddi_callback_id_t *cb_id)
11550 11515 {
11551 11516 struct fcp_port *pptr = fcp_dip2port(dip);
11552 11517
11553 11518 if (pptr == NULL) {
11554 11519 return (DDI_FAILURE);
11555 11520 }
11556 11521
11557 11522 return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11558 11523 eventid, callback, arg, NDI_SLEEP, cb_id));
11559 11524 }
11560 11525
11561 11526
11562 11527 static int
11563 11528 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11564 11529 {
11565 11530
11566 11531 struct fcp_port *pptr = fcp_dip2port(dip);
11567 11532
11568 11533 if (pptr == NULL) {
11569 11534 return (DDI_FAILURE);
11570 11535 }
11571 11536 return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11572 11537 }
11573 11538
11574 11539
11575 11540 /*
11576 11541 * called by the transport to post an event
11577 11542 */
11578 11543 static int
11579 11544 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11580 11545 ddi_eventcookie_t eventid, void *impldata)
11581 11546 {
11582 11547 struct fcp_port *pptr = fcp_dip2port(dip);
11583 11548
11584 11549 if (pptr == NULL) {
11585 11550 return (DDI_FAILURE);
11586 11551 }
11587 11552
11588 11553 return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11589 11554 eventid, impldata));
11590 11555 }
11591 11556
11592 11557
11593 11558 /*
11594 11559 * A target in in many cases in Fibre Channel has a one to one relation
11595 11560 * with a port identifier (which is also known as D_ID and also as AL_PA
11596 11561 * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11597 11562 * will most likely result in resetting all LUNs (which means a reset will
11598 11563 * occur on all the SCSI devices connected at the other end of the bridge)
11599 11564 * That is the latest favorite topic for discussion, for, one can debate as
11600 11565 * hot as one likes and come up with arguably a best solution to one's
11601 11566 * satisfaction
11602 11567 *
11603 11568 * To stay on track and not digress much, here are the problems stated
11604 11569 * briefly:
11605 11570 *
11606 11571 * SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11607 11572 * target drivers use RESET_TARGET even if their instance is on a
11608 11573 * LUN. Doesn't that sound a bit broken ?
11609 11574 *
11610 11575 * FCP SCSI (the current spec) only defines RESET TARGET in the
11611 11576 * control fields of an FCP_CMND structure. It should have been
11612 11577 * fixed right there, giving flexibility to the initiators to
11613 11578 * minimize havoc that could be caused by resetting a target.
11614 11579 */
11615 11580 static int
11616 11581 fcp_reset_target(struct scsi_address *ap, int level)
11617 11582 {
11618 11583 int rval = FC_FAILURE;
11619 11584 char lun_id[25];
11620 11585 struct fcp_port *pptr = ADDR2FCP(ap);
11621 11586 struct fcp_lun *plun = ADDR2LUN(ap);
11622 11587 struct fcp_tgt *ptgt = plun->lun_tgt;
11623 11588 struct scsi_pkt *pkt;
11624 11589 struct fcp_pkt *cmd;
11625 11590 struct fcp_rsp *rsp;
11626 11591 uint32_t tgt_cnt;
11627 11592 struct fcp_rsp_info *rsp_info;
11628 11593 struct fcp_reset_elem *p;
11629 11594 int bval;
11630 11595
11631 11596 if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11632 11597 KM_NOSLEEP)) == NULL) {
11633 11598 return (rval);
11634 11599 }
11635 11600
11636 11601 mutex_enter(&ptgt->tgt_mutex);
11637 11602 if (level == RESET_TARGET) {
11638 11603 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11639 11604 mutex_exit(&ptgt->tgt_mutex);
11640 11605 kmem_free(p, sizeof (struct fcp_reset_elem));
11641 11606 return (rval);
11642 11607 }
11643 11608 fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11644 11609 (void) strcpy(lun_id, " ");
11645 11610 } else {
11646 11611 if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11647 11612 mutex_exit(&ptgt->tgt_mutex);
11648 11613 kmem_free(p, sizeof (struct fcp_reset_elem));
11649 11614 return (rval);
11650 11615 }
11651 11616 fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11652 11617
11653 11618 (void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11654 11619 }
11655 11620 tgt_cnt = ptgt->tgt_change_cnt;
11656 11621
11657 11622 mutex_exit(&ptgt->tgt_mutex);
11658 11623
11659 11624 if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11660 11625 0, 0, NULL, 0)) == NULL) {
11661 11626 kmem_free(p, sizeof (struct fcp_reset_elem));
11662 11627 mutex_enter(&ptgt->tgt_mutex);
11663 11628 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11664 11629 mutex_exit(&ptgt->tgt_mutex);
11665 11630 return (rval);
11666 11631 }
11667 11632 pkt->pkt_time = FCP_POLL_TIMEOUT;
11668 11633
11669 11634 /* fill in cmd part of packet */
11670 11635 cmd = PKT2CMD(pkt);
11671 11636 if (level == RESET_TARGET) {
11672 11637 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11673 11638 } else {
11674 11639 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11675 11640 }
11676 11641 cmd->cmd_fp_pkt->pkt_comp = NULL;
11677 11642 cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11678 11643
11679 11644 /* prepare a packet for transport */
11680 11645 fcp_prepare_pkt(pptr, cmd, plun);
11681 11646
11682 11647 if (cmd->cmd_pkt->pkt_time) {
11683 11648 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11684 11649 } else {
11685 11650 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11686 11651 }
11687 11652
11688 11653 (void) fc_ulp_busy_port(pptr->port_fp_handle);
11689 11654 bval = fcp_dopoll(pptr, cmd);
11690 11655 fc_ulp_idle_port(pptr->port_fp_handle);
11691 11656
11692 11657 /* submit the packet */
11693 11658 if (bval == TRAN_ACCEPT) {
11694 11659 int error = 3;
11695 11660
11696 11661 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11697 11662 rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11698 11663 sizeof (struct fcp_rsp));
11699 11664
11700 11665 if (rsp->fcp_u.fcp_status.rsp_len_set) {
11701 11666 if (fcp_validate_fcp_response(rsp, pptr) ==
11702 11667 FC_SUCCESS) {
11703 11668 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11704 11669 FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11705 11670 sizeof (struct fcp_rsp), rsp_info,
11706 11671 cmd->cmd_fp_pkt->pkt_resp_acc,
11707 11672 sizeof (struct fcp_rsp_info));
11708 11673 }
11709 11674 if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11710 11675 rval = FC_SUCCESS;
11711 11676 error = 0;
11712 11677 } else {
11713 11678 error = 1;
11714 11679 }
11715 11680 } else {
11716 11681 error = 2;
11717 11682 }
11718 11683 }
11719 11684
11720 11685 switch (error) {
11721 11686 case 0:
11722 11687 fcp_log(CE_WARN, pptr->port_dip,
11723 11688 "!FCP: WWN 0x%08x%08x %s reset successfully",
11724 11689 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11725 11690 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11726 11691 break;
11727 11692
11728 11693 case 1:
11729 11694 fcp_log(CE_WARN, pptr->port_dip,
11730 11695 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11731 11696 " response code=%x",
11732 11697 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11733 11698 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11734 11699 rsp_info->rsp_code);
11735 11700 break;
11736 11701
11737 11702 case 2:
11738 11703 fcp_log(CE_WARN, pptr->port_dip,
11739 11704 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11740 11705 " Bad FCP response values: rsvd1=%x,"
11741 11706 " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11742 11707 " rsplen=%x, senselen=%x",
11743 11708 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11744 11709 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11745 11710 rsp->reserved_0, rsp->reserved_1,
11746 11711 rsp->fcp_u.fcp_status.reserved_0,
11747 11712 rsp->fcp_u.fcp_status.reserved_1,
11748 11713 rsp->fcp_response_len, rsp->fcp_sense_len);
11749 11714 break;
11750 11715
11751 11716 default:
11752 11717 fcp_log(CE_WARN, pptr->port_dip,
11753 11718 "!FCP: Reset to WWN 0x%08x%08x %s failed",
11754 11719 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11755 11720 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11756 11721 break;
11757 11722 }
11758 11723 }
11759 11724 scsi_destroy_pkt(pkt);
11760 11725
11761 11726 if (rval == FC_FAILURE) {
11762 11727 mutex_enter(&ptgt->tgt_mutex);
11763 11728 if (level == RESET_TARGET) {
11764 11729 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11765 11730 } else {
11766 11731 fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11767 11732 }
11768 11733 mutex_exit(&ptgt->tgt_mutex);
11769 11734 kmem_free(p, sizeof (struct fcp_reset_elem));
11770 11735 return (rval);
11771 11736 }
11772 11737
11773 11738 mutex_enter(&pptr->port_mutex);
11774 11739 if (level == RESET_TARGET) {
11775 11740 p->tgt = ptgt;
11776 11741 p->lun = NULL;
11777 11742 } else {
11778 11743 p->tgt = NULL;
11779 11744 p->lun = plun;
11780 11745 }
11781 11746 p->tgt = ptgt;
11782 11747 p->tgt_cnt = tgt_cnt;
11783 11748 p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11784 11749 p->next = pptr->port_reset_list;
11785 11750 pptr->port_reset_list = p;
11786 11751
11787 11752 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11788 11753 fcp_trace, FCP_BUF_LEVEL_3, 0,
11789 11754 "Notify ssd of the reset to reinstate the reservations");
11790 11755
11791 11756 scsi_hba_reset_notify_callback(&pptr->port_mutex,
11792 11757 &pptr->port_reset_notify_listf);
11793 11758
11794 11759 mutex_exit(&pptr->port_mutex);
11795 11760
11796 11761 return (rval);
11797 11762 }
11798 11763
11799 11764
11800 11765 /*
11801 11766 * called by fcp_getcap and fcp_setcap to get and set (respectively)
11802 11767 * SCSI capabilities
11803 11768 */
11804 11769 /* ARGSUSED */
11805 11770 static int
11806 11771 fcp_commoncap(struct scsi_address *ap, char *cap,
11807 11772 int val, int tgtonly, int doset)
11808 11773 {
11809 11774 struct fcp_port *pptr = ADDR2FCP(ap);
11810 11775 struct fcp_lun *plun = ADDR2LUN(ap);
11811 11776 struct fcp_tgt *ptgt = plun->lun_tgt;
11812 11777 int cidx;
11813 11778 int rval = FALSE;
11814 11779
11815 11780 if (cap == (char *)0) {
11816 11781 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11817 11782 fcp_trace, FCP_BUF_LEVEL_3, 0,
11818 11783 "fcp_commoncap: invalid arg");
11819 11784 return (rval);
11820 11785 }
11821 11786
11822 11787 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11823 11788 return (UNDEFINED);
11824 11789 }
11825 11790
11826 11791 /*
11827 11792 * Process setcap request.
11828 11793 */
11829 11794 if (doset) {
11830 11795 /*
11831 11796 * At present, we can only set binary (0/1) values
11832 11797 */
11833 11798 switch (cidx) {
11834 11799 case SCSI_CAP_ARQ:
11835 11800 if (val == 0) {
11836 11801 rval = FALSE;
11837 11802 } else {
11838 11803 rval = TRUE;
11839 11804 }
11840 11805 break;
11841 11806
11842 11807 case SCSI_CAP_LUN_RESET:
11843 11808 if (val) {
11844 11809 plun->lun_cap |= FCP_LUN_CAP_RESET;
11845 11810 } else {
11846 11811 plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11847 11812 }
11848 11813 rval = TRUE;
11849 11814 break;
11850 11815
11851 11816 case SCSI_CAP_SECTOR_SIZE:
11852 11817 rval = TRUE;
11853 11818 break;
11854 11819 default:
11855 11820 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11856 11821 fcp_trace, FCP_BUF_LEVEL_4, 0,
11857 11822 "fcp_setcap: unsupported %d", cidx);
11858 11823 rval = UNDEFINED;
11859 11824 break;
11860 11825 }
11861 11826
11862 11827 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11863 11828 fcp_trace, FCP_BUF_LEVEL_5, 0,
11864 11829 "set cap: cap=%s, val/tgtonly/doset/rval = "
11865 11830 "0x%x/0x%x/0x%x/%d",
11866 11831 cap, val, tgtonly, doset, rval);
11867 11832
11868 11833 } else {
11869 11834 /*
11870 11835 * Process getcap request.
11871 11836 */
11872 11837 switch (cidx) {
11873 11838 case SCSI_CAP_DMA_MAX:
11874 11839 rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11875 11840
11876 11841 /*
11877 11842 * Need to make an adjustment qlc is uint_t 64
11878 11843 * st is int, so we will make the adjustment here
11879 11844 * being as nobody wants to touch this.
11880 11845 * It still leaves the max single block length
11881 11846 * of 2 gig. This should last .
11882 11847 */
11883 11848
11884 11849 if (rval == -1) {
11885 11850 rval = MAX_INT_DMA;
11886 11851 }
11887 11852
11888 11853 break;
11889 11854
11890 11855 case SCSI_CAP_INITIATOR_ID:
11891 11856 rval = pptr->port_id;
11892 11857 break;
11893 11858
11894 11859 case SCSI_CAP_ARQ:
11895 11860 case SCSI_CAP_RESET_NOTIFICATION:
11896 11861 case SCSI_CAP_TAGGED_QING:
11897 11862 rval = TRUE;
11898 11863 break;
11899 11864
11900 11865 case SCSI_CAP_SCSI_VERSION:
11901 11866 rval = 3;
11902 11867 break;
11903 11868
11904 11869 case SCSI_CAP_INTERCONNECT_TYPE:
11905 11870 if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11906 11871 (ptgt->tgt_hard_addr == 0)) {
11907 11872 rval = INTERCONNECT_FABRIC;
11908 11873 } else {
11909 11874 rval = INTERCONNECT_FIBRE;
11910 11875 }
11911 11876 break;
11912 11877
11913 11878 case SCSI_CAP_LUN_RESET:
11914 11879 rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11915 11880 TRUE : FALSE;
11916 11881 break;
11917 11882
11918 11883 default:
11919 11884 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11920 11885 fcp_trace, FCP_BUF_LEVEL_4, 0,
11921 11886 "fcp_getcap: unsupported %d", cidx);
11922 11887 rval = UNDEFINED;
11923 11888 break;
11924 11889 }
11925 11890
11926 11891 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11927 11892 fcp_trace, FCP_BUF_LEVEL_8, 0,
11928 11893 "get cap: cap=%s, val/tgtonly/doset/rval = "
11929 11894 "0x%x/0x%x/0x%x/%d",
11930 11895 cap, val, tgtonly, doset, rval);
11931 11896 }
11932 11897
11933 11898 return (rval);
11934 11899 }
11935 11900
11936 11901 /*
11937 11902 * called by the transport to get the port-wwn and lun
11938 11903 * properties of this device, and to create a "name" based on them
11939 11904 *
11940 11905 * these properties don't exist on sun4m
11941 11906 *
11942 11907 * return 1 for success else return 0
11943 11908 */
11944 11909 /* ARGSUSED */
11945 11910 static int
11946 11911 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11947 11912 {
11948 11913 int i;
11949 11914 int *lun;
11950 11915 int numChars;
11951 11916 uint_t nlun;
11952 11917 uint_t count;
11953 11918 uint_t nbytes;
11954 11919 uchar_t *bytes;
11955 11920 uint16_t lun_num;
11956 11921 uint32_t tgt_id;
11957 11922 char **conf_wwn;
11958 11923 char tbuf[(FC_WWN_SIZE << 1) + 1];
11959 11924 uchar_t barray[FC_WWN_SIZE];
11960 11925 dev_info_t *tgt_dip;
11961 11926 struct fcp_tgt *ptgt;
11962 11927 struct fcp_port *pptr;
11963 11928 struct fcp_lun *plun;
11964 11929
11965 11930 ASSERT(sd != NULL);
11966 11931 ASSERT(name != NULL);
11967 11932
11968 11933 tgt_dip = sd->sd_dev;
11969 11934 pptr = ddi_get_soft_state(fcp_softstate,
11970 11935 ddi_get_instance(ddi_get_parent(tgt_dip)));
11971 11936 if (pptr == NULL) {
11972 11937 return (0);
11973 11938 }
11974 11939
11975 11940 ASSERT(tgt_dip != NULL);
11976 11941
11977 11942 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11978 11943 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11979 11944 LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11980 11945 name[0] = '\0';
11981 11946 return (0);
11982 11947 }
11983 11948
11984 11949 if (nlun == 0) {
11985 11950 ddi_prop_free(lun);
11986 11951 return (0);
11987 11952 }
11988 11953
11989 11954 lun_num = lun[0];
11990 11955 ddi_prop_free(lun);
11991 11956
11992 11957 /*
11993 11958 * Lookup for .conf WWN property
11994 11959 */
11995 11960 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
11996 11961 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
11997 11962 &conf_wwn, &count) == DDI_PROP_SUCCESS) {
11998 11963 ASSERT(count >= 1);
11999 11964
12000 11965 fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
12001 11966 ddi_prop_free(conf_wwn);
12002 11967 mutex_enter(&pptr->port_mutex);
12003 11968 if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
12004 11969 mutex_exit(&pptr->port_mutex);
12005 11970 return (0);
12006 11971 }
12007 11972 ptgt = plun->lun_tgt;
12008 11973 mutex_exit(&pptr->port_mutex);
12009 11974
12010 11975 (void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
12011 11976 tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
12012 11977
12013 11978 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12014 11979 ptgt->tgt_hard_addr != 0) {
12015 11980 tgt_id = (uint32_t)fcp_alpa_to_switch[
12016 11981 ptgt->tgt_hard_addr];
12017 11982 } else {
12018 11983 tgt_id = ptgt->tgt_d_id;
12019 11984 }
12020 11985
12021 11986 (void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
12022 11987 TARGET_PROP, tgt_id);
12023 11988 }
12024 11989
12025 11990 /* get the our port-wwn property */
12026 11991 bytes = NULL;
12027 11992 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
12028 11993 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12029 11994 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
12030 11995 if (bytes != NULL) {
12031 11996 ddi_prop_free(bytes);
12032 11997 }
12033 11998 return (0);
12034 11999 }
12035 12000
12036 12001 for (i = 0; i < FC_WWN_SIZE; i++) {
12037 12002 (void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
12038 12003 }
12039 12004
12040 12005 /* Stick in the address of the form "wWWN,LUN" */
12041 12006 numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
12042 12007
12043 12008 ASSERT(numChars < len);
12044 12009 if (numChars >= len) {
12045 12010 fcp_log(CE_WARN, pptr->port_dip,
12046 12011 "!fcp_scsi_get_name: "
12047 12012 "name parameter length too small, it needs to be %d",
12048 12013 numChars+1);
12049 12014 }
12050 12015
12051 12016 ddi_prop_free(bytes);
12052 12017
12053 12018 return (1);
12054 12019 }
12055 12020
12056 12021
12057 12022 /*
12058 12023 * called by the transport to get the SCSI target id value, returning
12059 12024 * it in "name"
12060 12025 *
12061 12026 * this isn't needed/used on sun4m
12062 12027 *
12063 12028 * return 1 for success else return 0
12064 12029 */
12065 12030 /* ARGSUSED */
12066 12031 static int
12067 12032 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
12068 12033 {
12069 12034 struct fcp_lun *plun = ADDR2LUN(&sd->sd_address);
12070 12035 struct fcp_tgt *ptgt;
12071 12036 int numChars;
12072 12037
12073 12038 if (plun == NULL) {
12074 12039 return (0);
12075 12040 }
12076 12041
12077 12042 if ((ptgt = plun->lun_tgt) == NULL) {
12078 12043 return (0);
12079 12044 }
12080 12045
12081 12046 numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
12082 12047
12083 12048 ASSERT(numChars < len);
12084 12049 if (numChars >= len) {
12085 12050 fcp_log(CE_WARN, NULL,
12086 12051 "!fcp_scsi_get_bus_addr: "
12087 12052 "name parameter length too small, it needs to be %d",
12088 12053 numChars+1);
12089 12054 }
12090 12055
12091 12056 return (1);
12092 12057 }
12093 12058
12094 12059
12095 12060 /*
12096 12061 * called internally to reset the link where the specified port lives
12097 12062 */
12098 12063 static int
12099 12064 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
12100 12065 {
12101 12066 la_wwn_t wwn;
12102 12067 struct fcp_lun *plun;
12103 12068 struct fcp_tgt *ptgt;
12104 12069
12105 12070 /* disable restart of lip if we're suspended */
12106 12071 mutex_enter(&pptr->port_mutex);
12107 12072
12108 12073 if (pptr->port_state & (FCP_STATE_SUSPENDED |
12109 12074 FCP_STATE_POWER_DOWN)) {
12110 12075 mutex_exit(&pptr->port_mutex);
12111 12076 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12112 12077 fcp_trace, FCP_BUF_LEVEL_2, 0,
12113 12078 "fcp_linkreset, fcp%d: link reset "
12114 12079 "disabled due to DDI_SUSPEND",
12115 12080 ddi_get_instance(pptr->port_dip));
12116 12081 return (FC_FAILURE);
12117 12082 }
12118 12083
12119 12084 if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
12120 12085 mutex_exit(&pptr->port_mutex);
12121 12086 return (FC_SUCCESS);
12122 12087 }
12123 12088
12124 12089 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
12125 12090 fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
12126 12091
12127 12092 /*
12128 12093 * If ap == NULL assume local link reset.
12129 12094 */
12130 12095 if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
12131 12096 plun = ADDR2LUN(ap);
12132 12097 ptgt = plun->lun_tgt;
12133 12098 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
12134 12099 } else {
12135 12100 bzero((caddr_t)&wwn, sizeof (wwn));
12136 12101 }
12137 12102 mutex_exit(&pptr->port_mutex);
12138 12103
12139 12104 return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
12140 12105 }
12141 12106
12142 12107
12143 12108 /*
12144 12109 * called from fcp_port_attach() to resume a port
12145 12110 * return DDI_* success/failure status
12146 12111 * acquires and releases the global mutex
12147 12112 * acquires and releases the port mutex
12148 12113 */
12149 12114 /*ARGSUSED*/
12150 12115
12151 12116 static int
12152 12117 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
12153 12118 uint32_t s_id, fc_attach_cmd_t cmd, int instance)
12154 12119 {
12155 12120 int res = DDI_FAILURE; /* default result */
12156 12121 struct fcp_port *pptr; /* port state ptr */
12157 12122 uint32_t alloc_cnt;
12158 12123 uint32_t max_cnt;
12159 12124 fc_portmap_t *tmp_list = NULL;
12160 12125
12161 12126 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
12162 12127 FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
12163 12128 instance);
12164 12129
12165 12130 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
12166 12131 cmn_err(CE_WARN, "fcp: bad soft state");
12167 12132 return (res);
12168 12133 }
12169 12134
12170 12135 mutex_enter(&pptr->port_mutex);
12171 12136 switch (cmd) {
12172 12137 case FC_CMD_RESUME:
12173 12138 ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
12174 12139 pptr->port_state &= ~FCP_STATE_SUSPENDED;
12175 12140 break;
12176 12141
12177 12142 case FC_CMD_POWER_UP:
12178 12143 /*
12179 12144 * If the port is DDI_SUSPENded, defer rediscovery
12180 12145 * until DDI_RESUME occurs
12181 12146 */
12182 12147 if (pptr->port_state & FCP_STATE_SUSPENDED) {
12183 12148 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12184 12149 mutex_exit(&pptr->port_mutex);
12185 12150 return (DDI_SUCCESS);
12186 12151 }
12187 12152 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12188 12153 }
12189 12154 pptr->port_id = s_id;
12190 12155 pptr->port_state = FCP_STATE_INIT;
12191 12156 mutex_exit(&pptr->port_mutex);
12192 12157
12193 12158 /*
12194 12159 * Make a copy of ulp_port_info as fctl allocates
12195 12160 * a temp struct.
12196 12161 */
12197 12162 (void) fcp_cp_pinfo(pptr, pinfo);
12198 12163
12199 12164 mutex_enter(&fcp_global_mutex);
12200 12165 if (fcp_watchdog_init++ == 0) {
12201 12166 fcp_watchdog_tick = fcp_watchdog_timeout *
12202 12167 drv_usectohz(1000000);
12203 12168 fcp_watchdog_id = timeout(fcp_watch,
12204 12169 NULL, fcp_watchdog_tick);
12205 12170 }
12206 12171 mutex_exit(&fcp_global_mutex);
12207 12172
12208 12173 /*
12209 12174 * Handle various topologies and link states.
12210 12175 */
12211 12176 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12212 12177 case FC_STATE_OFFLINE:
12213 12178 /*
12214 12179 * Wait for ONLINE, at which time a state
12215 12180 * change will cause a statec_callback
12216 12181 */
12217 12182 res = DDI_SUCCESS;
12218 12183 break;
12219 12184
12220 12185 case FC_STATE_ONLINE:
12221 12186
12222 12187 if (pptr->port_topology == FC_TOP_UNKNOWN) {
12223 12188 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12224 12189 res = DDI_SUCCESS;
12225 12190 break;
12226 12191 }
12227 12192
12228 12193 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12229 12194 !fcp_enable_auto_configuration) {
12230 12195 tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12231 12196 if (tmp_list == NULL) {
12232 12197 if (!alloc_cnt) {
12233 12198 res = DDI_SUCCESS;
12234 12199 }
12235 12200 break;
12236 12201 }
12237 12202 max_cnt = alloc_cnt;
12238 12203 } else {
12239 12204 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12240 12205
12241 12206 alloc_cnt = FCP_MAX_DEVICES;
12242 12207
12243 12208 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12244 12209 (sizeof (fc_portmap_t)) * alloc_cnt,
12245 12210 KM_NOSLEEP)) == NULL) {
12246 12211 fcp_log(CE_WARN, pptr->port_dip,
12247 12212 "!fcp%d: failed to allocate portmap",
12248 12213 instance);
12249 12214 break;
12250 12215 }
12251 12216
12252 12217 max_cnt = alloc_cnt;
12253 12218 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12254 12219 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12255 12220 FC_SUCCESS) {
12256 12221 caddr_t msg;
12257 12222
12258 12223 (void) fc_ulp_error(res, &msg);
12259 12224
12260 12225 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12261 12226 fcp_trace, FCP_BUF_LEVEL_2, 0,
12262 12227 "resume failed getportmap: reason=0x%x",
12263 12228 res);
12264 12229
12265 12230 fcp_log(CE_WARN, pptr->port_dip,
12266 12231 "!failed to get port map : %s", msg);
12267 12232 break;
12268 12233 }
12269 12234 if (max_cnt > alloc_cnt) {
12270 12235 alloc_cnt = max_cnt;
12271 12236 }
12272 12237 }
12273 12238
12274 12239 /*
12275 12240 * do the SCSI device discovery and create
12276 12241 * the devinfos
12277 12242 */
12278 12243 fcp_statec_callback(ulph, pptr->port_fp_handle,
12279 12244 pptr->port_phys_state, pptr->port_topology, tmp_list,
12280 12245 max_cnt, pptr->port_id);
12281 12246
12282 12247 res = DDI_SUCCESS;
12283 12248 break;
12284 12249
12285 12250 default:
12286 12251 fcp_log(CE_WARN, pptr->port_dip,
12287 12252 "!fcp%d: invalid port state at attach=0x%x",
12288 12253 instance, pptr->port_phys_state);
12289 12254
12290 12255 mutex_enter(&pptr->port_mutex);
12291 12256 pptr->port_phys_state = FCP_STATE_OFFLINE;
12292 12257 mutex_exit(&pptr->port_mutex);
12293 12258 res = DDI_SUCCESS;
12294 12259
12295 12260 break;
12296 12261 }
12297 12262
12298 12263 if (tmp_list != NULL) {
12299 12264 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12300 12265 }
12301 12266
12302 12267 return (res);
12303 12268 }
12304 12269
12305 12270
12306 12271 static void
12307 12272 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12308 12273 {
12309 12274 pptr->port_fp_modlinkage = *pinfo->port_linkage;
12310 12275 pptr->port_dip = pinfo->port_dip;
12311 12276 pptr->port_fp_handle = pinfo->port_handle;
12312 12277 if (pinfo->port_acc_attr != NULL) {
12313 12278 /*
12314 12279 * FCA supports DMA
12315 12280 */
12316 12281 pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12317 12282 pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12318 12283 pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12319 12284 pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12320 12285 }
12321 12286 pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12322 12287 pptr->port_max_exch = pinfo->port_fca_max_exch;
12323 12288 pptr->port_phys_state = pinfo->port_state;
12324 12289 pptr->port_topology = pinfo->port_flags;
12325 12290 pptr->port_reset_action = pinfo->port_reset_action;
12326 12291 pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12327 12292 pptr->port_fcp_dma = pinfo->port_fcp_dma;
12328 12293 bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12329 12294 bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12330 12295
12331 12296 /* Clear FMA caps to avoid fm-capability ereport */
12332 12297 if (pptr->port_cmd_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12333 12298 pptr->port_cmd_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12334 12299 if (pptr->port_data_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12335 12300 pptr->port_data_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12336 12301 if (pptr->port_resp_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12337 12302 pptr->port_resp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12338 12303 }
12339 12304
12340 12305 /*
12341 12306 * If the elements wait field is set to 1 then
12342 12307 * another thread is waiting for the operation to complete. Once
12343 12308 * it is complete, the waiting thread is signaled and the element is
12344 12309 * freed by the waiting thread. If the elements wait field is set to 0
12345 12310 * the element is freed.
12346 12311 */
12347 12312 static void
12348 12313 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12349 12314 {
12350 12315 ASSERT(elem != NULL);
12351 12316 mutex_enter(&elem->mutex);
12352 12317 elem->result = result;
12353 12318 if (elem->wait) {
12354 12319 elem->wait = 0;
12355 12320 cv_signal(&elem->cv);
12356 12321 mutex_exit(&elem->mutex);
12357 12322 } else {
12358 12323 mutex_exit(&elem->mutex);
12359 12324 cv_destroy(&elem->cv);
12360 12325 mutex_destroy(&elem->mutex);
12361 12326 kmem_free(elem, sizeof (struct fcp_hp_elem));
12362 12327 }
12363 12328 }
12364 12329
12365 12330 /*
12366 12331 * This function is invoked from the taskq thread to allocate
12367 12332 * devinfo nodes and to online/offline them.
12368 12333 */
12369 12334 static void
12370 12335 fcp_hp_task(void *arg)
12371 12336 {
12372 12337 struct fcp_hp_elem *elem = (struct fcp_hp_elem *)arg;
12373 12338 struct fcp_lun *plun = elem->lun;
12374 12339 struct fcp_port *pptr = elem->port;
12375 12340 int result;
12376 12341
12377 12342 ASSERT(elem->what == FCP_ONLINE ||
12378 12343 elem->what == FCP_OFFLINE ||
12379 12344 elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12380 12345 elem->what == FCP_MPXIO_PATH_SET_BUSY);
12381 12346
12382 12347 mutex_enter(&pptr->port_mutex);
12383 12348 mutex_enter(&plun->lun_mutex);
12384 12349 if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12385 12350 plun->lun_event_count != elem->event_cnt) ||
12386 12351 pptr->port_state & (FCP_STATE_SUSPENDED |
12387 12352 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12388 12353 mutex_exit(&plun->lun_mutex);
12389 12354 mutex_exit(&pptr->port_mutex);
12390 12355 fcp_process_elem(elem, NDI_FAILURE);
12391 12356 return;
12392 12357 }
12393 12358 mutex_exit(&plun->lun_mutex);
12394 12359 mutex_exit(&pptr->port_mutex);
12395 12360
12396 12361 result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12397 12362 elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12398 12363 fcp_process_elem(elem, result);
12399 12364 }
12400 12365
12401 12366
12402 12367 static child_info_t *
12403 12368 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12404 12369 int tcount)
12405 12370 {
12406 12371 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12407 12372
12408 12373 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12409 12374 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12410 12375
12411 12376 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12412 12377 /*
12413 12378 * Child has not been created yet. Create the child device
12414 12379 * based on the per-Lun flags.
12415 12380 */
12416 12381 if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12417 12382 plun->lun_cip =
12418 12383 CIP(fcp_create_dip(plun, lcount, tcount));
12419 12384 plun->lun_mpxio = 0;
12420 12385 } else {
12421 12386 plun->lun_cip =
12422 12387 CIP(fcp_create_pip(plun, lcount, tcount));
12423 12388 plun->lun_mpxio = 1;
12424 12389 }
12425 12390 } else {
12426 12391 plun->lun_cip = cip;
12427 12392 }
12428 12393
12429 12394 return (plun->lun_cip);
12430 12395 }
12431 12396
12432 12397
12433 12398 static int
12434 12399 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12435 12400 {
12436 12401 int rval = FC_FAILURE;
12437 12402 dev_info_t *pdip;
12438 12403 struct dev_info *dip;
12439 12404 int circular;
12440 12405
12441 12406 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12442 12407
12443 12408 pdip = plun->lun_tgt->tgt_port->port_dip;
12444 12409
12445 12410 if (plun->lun_cip == NULL) {
12446 12411 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12447 12412 fcp_trace, FCP_BUF_LEVEL_3, 0,
12448 12413 "fcp_is_dip_present: plun->lun_cip is NULL: "
12449 12414 "plun: %p lun state: %x num: %d target state: %x",
12450 12415 plun, plun->lun_state, plun->lun_num,
12451 12416 plun->lun_tgt->tgt_port->port_state);
12452 12417 return (rval);
12453 12418 }
12454 12419 ndi_devi_enter(pdip, &circular);
12455 12420 dip = DEVI(pdip)->devi_child;
12456 12421 while (dip) {
12457 12422 if (dip == DEVI(cdip)) {
12458 12423 rval = FC_SUCCESS;
12459 12424 break;
12460 12425 }
12461 12426 dip = dip->devi_sibling;
12462 12427 }
12463 12428 ndi_devi_exit(pdip, circular);
12464 12429 return (rval);
12465 12430 }
12466 12431
12467 12432 static int
12468 12433 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12469 12434 {
12470 12435 int rval = FC_FAILURE;
12471 12436
12472 12437 ASSERT(plun != NULL);
12473 12438 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12474 12439
12475 12440 if (plun->lun_mpxio == 0) {
12476 12441 rval = fcp_is_dip_present(plun, DIP(cip));
12477 12442 } else {
12478 12443 rval = fcp_is_pip_present(plun, PIP(cip));
12479 12444 }
12480 12445
12481 12446 return (rval);
12482 12447 }
12483 12448
12484 12449 /*
12485 12450 * Function: fcp_create_dip
12486 12451 *
12487 12452 * Description: Creates a dev_info_t structure for the LUN specified by the
12488 12453 * caller.
12489 12454 *
12490 12455 * Argument: plun Lun structure
12491 12456 * link_cnt Link state count.
12492 12457 * tgt_cnt Target state change count.
12493 12458 *
12494 12459 * Return Value: NULL if it failed
12495 12460 * dev_info_t structure address if it succeeded
12496 12461 *
12497 12462 * Context: Kernel context
12498 12463 */
12499 12464 static dev_info_t *
12500 12465 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12501 12466 {
12502 12467 int failure = 0;
12503 12468 uint32_t tgt_id;
12504 12469 uint64_t sam_lun;
12505 12470 struct fcp_tgt *ptgt = plun->lun_tgt;
12506 12471 struct fcp_port *pptr = ptgt->tgt_port;
12507 12472 dev_info_t *pdip = pptr->port_dip;
12508 12473 dev_info_t *cdip = NULL;
12509 12474 dev_info_t *old_dip = DIP(plun->lun_cip);
12510 12475 char *nname = NULL;
12511 12476 char **compatible = NULL;
12512 12477 int ncompatible;
12513 12478 char *scsi_binding_set;
12514 12479 char t_pwwn[17];
12515 12480
12516 12481 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12517 12482 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12518 12483
12519 12484 /* get the 'scsi-binding-set' property */
12520 12485 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12521 12486 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12522 12487 &scsi_binding_set) != DDI_PROP_SUCCESS) {
12523 12488 scsi_binding_set = NULL;
12524 12489 }
12525 12490
12526 12491 /* determine the node name and compatible */
12527 12492 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12528 12493 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12529 12494 if (scsi_binding_set) {
12530 12495 ddi_prop_free(scsi_binding_set);
12531 12496 }
12532 12497
12533 12498 if (nname == NULL) {
12534 12499 #ifdef DEBUG
12535 12500 cmn_err(CE_WARN, "%s%d: no driver for "
12536 12501 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12537 12502 " compatible: %s",
12538 12503 ddi_driver_name(pdip), ddi_get_instance(pdip),
12539 12504 ptgt->tgt_port_wwn.raw_wwn[0],
12540 12505 ptgt->tgt_port_wwn.raw_wwn[1],
12541 12506 ptgt->tgt_port_wwn.raw_wwn[2],
12542 12507 ptgt->tgt_port_wwn.raw_wwn[3],
12543 12508 ptgt->tgt_port_wwn.raw_wwn[4],
12544 12509 ptgt->tgt_port_wwn.raw_wwn[5],
12545 12510 ptgt->tgt_port_wwn.raw_wwn[6],
12546 12511 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12547 12512 *compatible);
12548 12513 #endif /* DEBUG */
12549 12514 failure++;
12550 12515 goto end_of_fcp_create_dip;
12551 12516 }
12552 12517
12553 12518 cdip = fcp_find_existing_dip(plun, pdip, nname);
12554 12519
12555 12520 /*
12556 12521 * if the old_dip does not match the cdip, that means there is
12557 12522 * some property change. since we'll be using the cdip, we need
12558 12523 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12559 12524 * then the dtype for the device has been updated. Offline the
12560 12525 * the old device and create a new device with the new device type
12561 12526 * Refer to bug: 4764752
12562 12527 */
12563 12528 if (old_dip && (cdip != old_dip ||
12564 12529 plun->lun_state & FCP_LUN_CHANGED)) {
12565 12530 plun->lun_state &= ~(FCP_LUN_INIT);
12566 12531 mutex_exit(&plun->lun_mutex);
12567 12532 mutex_exit(&pptr->port_mutex);
12568 12533
12569 12534 mutex_enter(&ptgt->tgt_mutex);
12570 12535 (void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12571 12536 link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12572 12537 mutex_exit(&ptgt->tgt_mutex);
12573 12538
12574 12539 #ifdef DEBUG
12575 12540 if (cdip != NULL) {
12576 12541 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12577 12542 fcp_trace, FCP_BUF_LEVEL_2, 0,
12578 12543 "Old dip=%p; New dip=%p don't match", old_dip,
12579 12544 cdip);
12580 12545 } else {
12581 12546 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12582 12547 fcp_trace, FCP_BUF_LEVEL_2, 0,
12583 12548 "Old dip=%p; New dip=NULL don't match", old_dip);
12584 12549 }
12585 12550 #endif
12586 12551
12587 12552 mutex_enter(&pptr->port_mutex);
12588 12553 mutex_enter(&plun->lun_mutex);
12589 12554 }
12590 12555
12591 12556 if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12592 12557 plun->lun_state &= ~(FCP_LUN_CHANGED);
12593 12558 if (ndi_devi_alloc(pptr->port_dip, nname,
12594 12559 DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12595 12560 failure++;
12596 12561 goto end_of_fcp_create_dip;
12597 12562 }
12598 12563 }
12599 12564
12600 12565 /*
12601 12566 * Previously all the properties for the devinfo were destroyed here
12602 12567 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12603 12568 * the devid property (and other properties established by the target
12604 12569 * driver or framework) which the code does not always recreate, this
12605 12570 * call was removed.
12606 12571 * This opens a theoretical possibility that we may return with a
12607 12572 * stale devid on the node if the scsi entity behind the fibre channel
12608 12573 * lun has changed.
12609 12574 */
12610 12575
12611 12576 /* decorate the node with compatible */
12612 12577 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12613 12578 "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12614 12579 failure++;
12615 12580 goto end_of_fcp_create_dip;
12616 12581 }
12617 12582
12618 12583 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12619 12584 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12620 12585 failure++;
12621 12586 goto end_of_fcp_create_dip;
12622 12587 }
12623 12588
12624 12589 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12625 12590 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12626 12591 failure++;
12627 12592 goto end_of_fcp_create_dip;
12628 12593 }
12629 12594
12630 12595 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12631 12596 t_pwwn[16] = '\0';
12632 12597 if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12633 12598 != DDI_PROP_SUCCESS) {
12634 12599 failure++;
12635 12600 goto end_of_fcp_create_dip;
12636 12601 }
12637 12602
12638 12603 /*
12639 12604 * If there is no hard address - We might have to deal with
12640 12605 * that by using WWN - Having said that it is important to
12641 12606 * recognize this problem early so ssd can be informed of
12642 12607 * the right interconnect type.
12643 12608 */
12644 12609 if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12645 12610 tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12646 12611 } else {
12647 12612 tgt_id = ptgt->tgt_d_id;
12648 12613 }
12649 12614
12650 12615 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12651 12616 tgt_id) != DDI_PROP_SUCCESS) {
12652 12617 failure++;
12653 12618 goto end_of_fcp_create_dip;
12654 12619 }
12655 12620
12656 12621 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12657 12622 (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12658 12623 failure++;
12659 12624 goto end_of_fcp_create_dip;
12660 12625 }
12661 12626 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12662 12627 if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12663 12628 sam_lun) != DDI_PROP_SUCCESS) {
12664 12629 failure++;
12665 12630 goto end_of_fcp_create_dip;
12666 12631 }
12667 12632
12668 12633 end_of_fcp_create_dip:
12669 12634 scsi_hba_nodename_compatible_free(nname, compatible);
12670 12635
12671 12636 if (cdip != NULL && failure) {
12672 12637 (void) ndi_prop_remove_all(cdip);
12673 12638 (void) ndi_devi_free(cdip);
12674 12639 cdip = NULL;
12675 12640 }
12676 12641
12677 12642 return (cdip);
12678 12643 }
12679 12644
12680 12645 /*
12681 12646 * Function: fcp_create_pip
12682 12647 *
12683 12648 * Description: Creates a Path Id for the LUN specified by the caller.
12684 12649 *
12685 12650 * Argument: plun Lun structure
12686 12651 * link_cnt Link state count.
12687 12652 * tgt_cnt Target state count.
12688 12653 *
12689 12654 * Return Value: NULL if it failed
12690 12655 * mdi_pathinfo_t structure address if it succeeded
12691 12656 *
12692 12657 * Context: Kernel context
12693 12658 */
12694 12659 static mdi_pathinfo_t *
12695 12660 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12696 12661 {
12697 12662 int i;
12698 12663 char buf[MAXNAMELEN];
12699 12664 char uaddr[MAXNAMELEN];
12700 12665 int failure = 0;
12701 12666 uint32_t tgt_id;
12702 12667 uint64_t sam_lun;
12703 12668 struct fcp_tgt *ptgt = plun->lun_tgt;
12704 12669 struct fcp_port *pptr = ptgt->tgt_port;
12705 12670 dev_info_t *pdip = pptr->port_dip;
12706 12671 mdi_pathinfo_t *pip = NULL;
12707 12672 mdi_pathinfo_t *old_pip = PIP(plun->lun_cip);
12708 12673 char *nname = NULL;
12709 12674 char **compatible = NULL;
12710 12675 int ncompatible;
12711 12676 char *scsi_binding_set;
12712 12677 char t_pwwn[17];
12713 12678
12714 12679 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12715 12680 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12716 12681
12717 12682 scsi_binding_set = "vhci";
12718 12683
12719 12684 /* determine the node name and compatible */
12720 12685 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12721 12686 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12722 12687
12723 12688 if (nname == NULL) {
12724 12689 #ifdef DEBUG
12725 12690 cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12726 12691 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12727 12692 " compatible: %s",
12728 12693 ddi_driver_name(pdip), ddi_get_instance(pdip),
12729 12694 ptgt->tgt_port_wwn.raw_wwn[0],
12730 12695 ptgt->tgt_port_wwn.raw_wwn[1],
12731 12696 ptgt->tgt_port_wwn.raw_wwn[2],
12732 12697 ptgt->tgt_port_wwn.raw_wwn[3],
12733 12698 ptgt->tgt_port_wwn.raw_wwn[4],
12734 12699 ptgt->tgt_port_wwn.raw_wwn[5],
12735 12700 ptgt->tgt_port_wwn.raw_wwn[6],
12736 12701 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12737 12702 *compatible);
12738 12703 #endif /* DEBUG */
12739 12704 failure++;
12740 12705 goto end_of_fcp_create_pip;
12741 12706 }
12742 12707
12743 12708 pip = fcp_find_existing_pip(plun, pdip);
12744 12709
12745 12710 /*
12746 12711 * if the old_dip does not match the cdip, that means there is
12747 12712 * some property change. since we'll be using the cdip, we need
12748 12713 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12749 12714 * then the dtype for the device has been updated. Offline the
12750 12715 * the old device and create a new device with the new device type
12751 12716 * Refer to bug: 4764752
12752 12717 */
12753 12718 if (old_pip && (pip != old_pip ||
12754 12719 plun->lun_state & FCP_LUN_CHANGED)) {
12755 12720 plun->lun_state &= ~(FCP_LUN_INIT);
12756 12721 mutex_exit(&plun->lun_mutex);
12757 12722 mutex_exit(&pptr->port_mutex);
12758 12723
12759 12724 mutex_enter(&ptgt->tgt_mutex);
12760 12725 (void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12761 12726 FCP_OFFLINE, lcount, tcount,
12762 12727 NDI_DEVI_REMOVE, 0);
12763 12728 mutex_exit(&ptgt->tgt_mutex);
12764 12729
12765 12730 if (pip != NULL) {
12766 12731 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12767 12732 fcp_trace, FCP_BUF_LEVEL_2, 0,
12768 12733 "Old pip=%p; New pip=%p don't match",
12769 12734 old_pip, pip);
12770 12735 } else {
12771 12736 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12772 12737 fcp_trace, FCP_BUF_LEVEL_2, 0,
12773 12738 "Old pip=%p; New pip=NULL don't match",
12774 12739 old_pip);
12775 12740 }
12776 12741
12777 12742 mutex_enter(&pptr->port_mutex);
12778 12743 mutex_enter(&plun->lun_mutex);
12779 12744 }
12780 12745
12781 12746 /*
12782 12747 * Since FC_WWN_SIZE is 8 bytes and its not like the
12783 12748 * lun_guid_size which is dependent on the target, I don't
12784 12749 * believe the same trancation happens here UNLESS the standards
12785 12750 * change the FC_WWN_SIZE value to something larger than
12786 12751 * MAXNAMELEN(currently 255 bytes).
12787 12752 */
12788 12753
12789 12754 for (i = 0; i < FC_WWN_SIZE; i++) {
12790 12755 (void) sprintf(&buf[i << 1], "%02x",
12791 12756 ptgt->tgt_port_wwn.raw_wwn[i]);
12792 12757 }
12793 12758
12794 12759 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12795 12760 buf, plun->lun_num);
12796 12761
12797 12762 if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12798 12763 /*
12799 12764 * Release the locks before calling into
12800 12765 * mdi_pi_alloc_compatible() since this can result in a
12801 12766 * callback into fcp which can result in a deadlock
12802 12767 * (see bug # 4870272).
12803 12768 *
12804 12769 * Basically, what we are trying to avoid is the scenario where
12805 12770 * one thread does ndi_devi_enter() and tries to grab
12806 12771 * fcp_mutex and another does it the other way round.
12807 12772 *
12808 12773 * But before we do that, make sure that nobody releases the
12809 12774 * port in the meantime. We can do this by setting a flag.
12810 12775 */
12811 12776 plun->lun_state &= ~(FCP_LUN_CHANGED);
12812 12777 pptr->port_state |= FCP_STATE_IN_MDI;
12813 12778 mutex_exit(&plun->lun_mutex);
12814 12779 mutex_exit(&pptr->port_mutex);
12815 12780 if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12816 12781 uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12817 12782 fcp_log(CE_WARN, pptr->port_dip,
12818 12783 "!path alloc failed:0x%x", plun);
12819 12784 mutex_enter(&pptr->port_mutex);
12820 12785 mutex_enter(&plun->lun_mutex);
12821 12786 pptr->port_state &= ~FCP_STATE_IN_MDI;
12822 12787 failure++;
12823 12788 goto end_of_fcp_create_pip;
12824 12789 }
12825 12790 mutex_enter(&pptr->port_mutex);
12826 12791 mutex_enter(&plun->lun_mutex);
12827 12792 pptr->port_state &= ~FCP_STATE_IN_MDI;
12828 12793 } else {
12829 12794 (void) mdi_prop_remove(pip, NULL);
12830 12795 }
12831 12796
12832 12797 mdi_pi_set_phci_private(pip, (caddr_t)plun);
12833 12798
12834 12799 if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12835 12800 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12836 12801 != DDI_PROP_SUCCESS) {
12837 12802 failure++;
12838 12803 goto end_of_fcp_create_pip;
12839 12804 }
12840 12805
12841 12806 if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12842 12807 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12843 12808 != DDI_PROP_SUCCESS) {
12844 12809 failure++;
12845 12810 goto end_of_fcp_create_pip;
12846 12811 }
12847 12812
12848 12813 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12849 12814 t_pwwn[16] = '\0';
12850 12815 if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12851 12816 != DDI_PROP_SUCCESS) {
12852 12817 failure++;
12853 12818 goto end_of_fcp_create_pip;
12854 12819 }
12855 12820
12856 12821 /*
12857 12822 * If there is no hard address - We might have to deal with
12858 12823 * that by using WWN - Having said that it is important to
12859 12824 * recognize this problem early so ssd can be informed of
12860 12825 * the right interconnect type.
12861 12826 */
12862 12827 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12863 12828 ptgt->tgt_hard_addr != 0) {
12864 12829 tgt_id = (uint32_t)
12865 12830 fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12866 12831 } else {
12867 12832 tgt_id = ptgt->tgt_d_id;
12868 12833 }
12869 12834
12870 12835 if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12871 12836 != DDI_PROP_SUCCESS) {
12872 12837 failure++;
12873 12838 goto end_of_fcp_create_pip;
12874 12839 }
12875 12840
12876 12841 if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12877 12842 != DDI_PROP_SUCCESS) {
12878 12843 failure++;
12879 12844 goto end_of_fcp_create_pip;
12880 12845 }
12881 12846 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12882 12847 if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12883 12848 != DDI_PROP_SUCCESS) {
12884 12849 failure++;
12885 12850 goto end_of_fcp_create_pip;
12886 12851 }
12887 12852
12888 12853 end_of_fcp_create_pip:
12889 12854 scsi_hba_nodename_compatible_free(nname, compatible);
12890 12855
12891 12856 if (pip != NULL && failure) {
12892 12857 (void) mdi_prop_remove(pip, NULL);
12893 12858 mutex_exit(&plun->lun_mutex);
12894 12859 mutex_exit(&pptr->port_mutex);
12895 12860 (void) mdi_pi_free(pip, 0);
12896 12861 mutex_enter(&pptr->port_mutex);
12897 12862 mutex_enter(&plun->lun_mutex);
12898 12863 pip = NULL;
12899 12864 }
12900 12865
12901 12866 return (pip);
12902 12867 }
12903 12868
12904 12869 static dev_info_t *
12905 12870 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12906 12871 {
12907 12872 uint_t nbytes;
12908 12873 uchar_t *bytes;
12909 12874 uint_t nwords;
12910 12875 uint32_t tgt_id;
12911 12876 int *words;
12912 12877 dev_info_t *cdip;
12913 12878 dev_info_t *ndip;
12914 12879 struct fcp_tgt *ptgt = plun->lun_tgt;
12915 12880 struct fcp_port *pptr = ptgt->tgt_port;
12916 12881 int circular;
12917 12882
12918 12883 ndi_devi_enter(pdip, &circular);
12919 12884
12920 12885 ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12921 12886 while ((cdip = ndip) != NULL) {
12922 12887 ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12923 12888
12924 12889 if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12925 12890 continue;
12926 12891 }
12927 12892
12928 12893 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12929 12894 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12930 12895 &nbytes) != DDI_PROP_SUCCESS) {
12931 12896 continue;
12932 12897 }
12933 12898
12934 12899 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12935 12900 if (bytes != NULL) {
12936 12901 ddi_prop_free(bytes);
12937 12902 }
12938 12903 continue;
12939 12904 }
12940 12905 ASSERT(bytes != NULL);
12941 12906
12942 12907 if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12943 12908 ddi_prop_free(bytes);
12944 12909 continue;
12945 12910 }
12946 12911
12947 12912 ddi_prop_free(bytes);
12948 12913
12949 12914 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12950 12915 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12951 12916 &nbytes) != DDI_PROP_SUCCESS) {
12952 12917 continue;
12953 12918 }
12954 12919
12955 12920 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12956 12921 if (bytes != NULL) {
12957 12922 ddi_prop_free(bytes);
12958 12923 }
12959 12924 continue;
12960 12925 }
12961 12926 ASSERT(bytes != NULL);
12962 12927
12963 12928 if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12964 12929 ddi_prop_free(bytes);
12965 12930 continue;
12966 12931 }
12967 12932
12968 12933 ddi_prop_free(bytes);
12969 12934
12970 12935 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12971 12936 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12972 12937 &nwords) != DDI_PROP_SUCCESS) {
12973 12938 continue;
12974 12939 }
12975 12940
12976 12941 if (nwords != 1 || words == NULL) {
12977 12942 if (words != NULL) {
12978 12943 ddi_prop_free(words);
12979 12944 }
12980 12945 continue;
12981 12946 }
12982 12947 ASSERT(words != NULL);
12983 12948
12984 12949 /*
12985 12950 * If there is no hard address - We might have to deal with
12986 12951 * that by using WWN - Having said that it is important to
12987 12952 * recognize this problem early so ssd can be informed of
12988 12953 * the right interconnect type.
12989 12954 */
12990 12955 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12991 12956 ptgt->tgt_hard_addr != 0) {
12992 12957 tgt_id =
12993 12958 (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12994 12959 } else {
12995 12960 tgt_id = ptgt->tgt_d_id;
12996 12961 }
12997 12962
12998 12963 if (tgt_id != (uint32_t)*words) {
12999 12964 ddi_prop_free(words);
13000 12965 continue;
13001 12966 }
13002 12967 ddi_prop_free(words);
13003 12968
13004 12969 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
13005 12970 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
13006 12971 &nwords) != DDI_PROP_SUCCESS) {
13007 12972 continue;
13008 12973 }
13009 12974
13010 12975 if (nwords != 1 || words == NULL) {
13011 12976 if (words != NULL) {
13012 12977 ddi_prop_free(words);
13013 12978 }
13014 12979 continue;
13015 12980 }
13016 12981 ASSERT(words != NULL);
13017 12982
13018 12983 if (plun->lun_num == (uint16_t)*words) {
13019 12984 ddi_prop_free(words);
13020 12985 break;
13021 12986 }
13022 12987 ddi_prop_free(words);
13023 12988 }
13024 12989 ndi_devi_exit(pdip, circular);
13025 12990
13026 12991 return (cdip);
13027 12992 }
13028 12993
13029 12994
13030 12995 static int
13031 12996 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
13032 12997 {
13033 12998 dev_info_t *pdip;
13034 12999 char buf[MAXNAMELEN];
13035 13000 char uaddr[MAXNAMELEN];
13036 13001 int rval = FC_FAILURE;
13037 13002
13038 13003 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13039 13004
13040 13005 pdip = plun->lun_tgt->tgt_port->port_dip;
13041 13006
13042 13007 /*
13043 13008 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
13044 13009 * non-NULL even when the LUN is not there as in the case when a LUN is
13045 13010 * configured and then deleted on the device end (for T3/T4 case). In
13046 13011 * such cases, pip will be NULL.
13047 13012 *
13048 13013 * If the device generates an RSCN, it will end up getting offlined when
13049 13014 * it disappeared and a new LUN will get created when it is rediscovered
13050 13015 * on the device. If we check for lun_cip here, the LUN will not end
13051 13016 * up getting onlined since this function will end up returning a
13052 13017 * FC_SUCCESS.
13053 13018 *
13054 13019 * The behavior is different on other devices. For instance, on a HDS,
13055 13020 * there was no RSCN generated by the device but the next I/O generated
13056 13021 * a check condition and rediscovery got triggered that way. So, in
13057 13022 * such cases, this path will not be exercised
13058 13023 */
13059 13024 if (pip == NULL) {
13060 13025 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
13061 13026 fcp_trace, FCP_BUF_LEVEL_4, 0,
13062 13027 "fcp_is_pip_present: plun->lun_cip is NULL: "
13063 13028 "plun: %p lun state: %x num: %d target state: %x",
13064 13029 plun, plun->lun_state, plun->lun_num,
13065 13030 plun->lun_tgt->tgt_port->port_state);
13066 13031 return (rval);
13067 13032 }
13068 13033
13069 13034 fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
13070 13035
13071 13036 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13072 13037
13073 13038 if (plun->lun_old_guid) {
13074 13039 if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
13075 13040 rval = FC_SUCCESS;
13076 13041 }
13077 13042 } else {
13078 13043 if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
13079 13044 rval = FC_SUCCESS;
13080 13045 }
13081 13046 }
13082 13047 return (rval);
13083 13048 }
13084 13049
13085 13050 static mdi_pathinfo_t *
13086 13051 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
13087 13052 {
13088 13053 char buf[MAXNAMELEN];
13089 13054 char uaddr[MAXNAMELEN];
13090 13055 mdi_pathinfo_t *pip;
13091 13056 struct fcp_tgt *ptgt = plun->lun_tgt;
13092 13057 struct fcp_port *pptr = ptgt->tgt_port;
13093 13058
13094 13059 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13095 13060
13096 13061 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
13097 13062 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13098 13063
13099 13064 pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
13100 13065
13101 13066 return (pip);
13102 13067 }
13103 13068
13104 13069
13105 13070 static int
13106 13071 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13107 13072 int tcount, int flags, int *circ)
13108 13073 {
13109 13074 int rval;
13110 13075 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13111 13076 struct fcp_tgt *ptgt = plun->lun_tgt;
13112 13077 dev_info_t *cdip = NULL;
13113 13078
13114 13079 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13115 13080 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13116 13081
13117 13082 if (plun->lun_cip == NULL) {
13118 13083 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13119 13084 fcp_trace, FCP_BUF_LEVEL_3, 0,
13120 13085 "fcp_online_child: plun->lun_cip is NULL: "
13121 13086 "plun: %p state: %x num: %d target state: %x",
13122 13087 plun, plun->lun_state, plun->lun_num,
13123 13088 plun->lun_tgt->tgt_port->port_state);
13124 13089 return (NDI_FAILURE);
13125 13090 }
13126 13091 again:
13127 13092 if (plun->lun_mpxio == 0) {
13128 13093 cdip = DIP(cip);
13129 13094 mutex_exit(&plun->lun_mutex);
13130 13095 mutex_exit(&pptr->port_mutex);
13131 13096
13132 13097 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13133 13098 fcp_trace, FCP_BUF_LEVEL_3, 0,
13134 13099 "!Invoking ndi_devi_online for %s: target=%x lun=%x",
13135 13100 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13136 13101
13137 13102 /*
13138 13103 * We could check for FCP_LUN_INIT here but chances
13139 13104 * of getting here when it's already in FCP_LUN_INIT
13140 13105 * is rare and a duplicate ndi_devi_online wouldn't
13141 13106 * hurt either (as the node would already have been
13142 13107 * in CF2)
13143 13108 */
13144 13109 if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
13145 13110 rval = ndi_devi_bind_driver(cdip, flags);
13146 13111 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13147 13112 fcp_trace, FCP_BUF_LEVEL_3, 0,
13148 13113 "!Invoking ndi_devi_bind_driver: rval=%d", rval);
13149 13114 } else {
13150 13115 rval = ndi_devi_online(cdip, flags);
13151 13116 }
13152 13117
13153 13118 /*
13154 13119 * We log the message into trace buffer if the device
13155 13120 * is "ses" and into syslog for any other device
13156 13121 * type. This is to prevent the ndi_devi_online failure
13157 13122 * message that appears for V880/A5K ses devices.
13158 13123 */
13159 13124 if (rval == NDI_SUCCESS) {
13160 13125 mutex_enter(&ptgt->tgt_mutex);
13161 13126 plun->lun_state |= FCP_LUN_INIT;
13162 13127 mutex_exit(&ptgt->tgt_mutex);
13163 13128 } else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
13164 13129 fcp_log(CE_NOTE, pptr->port_dip,
13165 13130 "!ndi_devi_online:"
13166 13131 " failed for %s: target=%x lun=%x %x",
13167 13132 ddi_get_name(cdip), ptgt->tgt_d_id,
13168 13133 plun->lun_num, rval);
13169 13134 } else {
13170 13135 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13171 13136 fcp_trace, FCP_BUF_LEVEL_3, 0,
13172 13137 " !ndi_devi_online:"
13173 13138 " failed for %s: target=%x lun=%x %x",
13174 13139 ddi_get_name(cdip), ptgt->tgt_d_id,
13175 13140 plun->lun_num, rval);
13176 13141 }
13177 13142 } else {
13178 13143 cdip = mdi_pi_get_client(PIP(cip));
13179 13144 mutex_exit(&plun->lun_mutex);
13180 13145 mutex_exit(&pptr->port_mutex);
13181 13146
13182 13147 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13183 13148 fcp_trace, FCP_BUF_LEVEL_3, 0,
13184 13149 "!Invoking mdi_pi_online for %s: target=%x lun=%x",
13185 13150 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13186 13151
13187 13152 /*
13188 13153 * Hold path and exit phci to avoid deadlock with power
13189 13154 * management code during mdi_pi_online.
13190 13155 */
13191 13156 mdi_hold_path(PIP(cip));
13192 13157 mdi_devi_exit_phci(pptr->port_dip, *circ);
13193 13158
13194 13159 rval = mdi_pi_online(PIP(cip), flags);
13195 13160
13196 13161 mdi_devi_enter_phci(pptr->port_dip, circ);
13197 13162 mdi_rele_path(PIP(cip));
13198 13163
13199 13164 if (rval == MDI_SUCCESS) {
13200 13165 mutex_enter(&ptgt->tgt_mutex);
13201 13166 plun->lun_state |= FCP_LUN_INIT;
13202 13167 mutex_exit(&ptgt->tgt_mutex);
13203 13168
13204 13169 /*
13205 13170 * Clear MPxIO path permanent disable in case
13206 13171 * fcp hotplug dropped the offline event.
13207 13172 */
13208 13173 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13209 13174
13210 13175 } else if (rval == MDI_NOT_SUPPORTED) {
13211 13176 child_info_t *old_cip = cip;
13212 13177
13213 13178 /*
13214 13179 * MPxIO does not support this device yet.
13215 13180 * Enumerate in legacy mode.
13216 13181 */
13217 13182 mutex_enter(&pptr->port_mutex);
13218 13183 mutex_enter(&plun->lun_mutex);
13219 13184 plun->lun_mpxio = 0;
13220 13185 plun->lun_cip = NULL;
13221 13186 cdip = fcp_create_dip(plun, lcount, tcount);
13222 13187 plun->lun_cip = cip = CIP(cdip);
13223 13188 if (cip == NULL) {
13224 13189 fcp_log(CE_WARN, pptr->port_dip,
13225 13190 "!fcp_online_child: "
13226 13191 "Create devinfo failed for LU=%p", plun);
13227 13192 mutex_exit(&plun->lun_mutex);
13228 13193
13229 13194 mutex_enter(&ptgt->tgt_mutex);
13230 13195 plun->lun_state |= FCP_LUN_OFFLINE;
13231 13196 mutex_exit(&ptgt->tgt_mutex);
13232 13197
13233 13198 mutex_exit(&pptr->port_mutex);
13234 13199
13235 13200 /*
13236 13201 * free the mdi_pathinfo node
13237 13202 */
13238 13203 (void) mdi_pi_free(PIP(old_cip), 0);
13239 13204 } else {
13240 13205 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13241 13206 fcp_trace, FCP_BUF_LEVEL_3, 0,
13242 13207 "fcp_online_child: creating devinfo "
13243 13208 "node 0x%p for plun 0x%p",
13244 13209 cip, plun);
13245 13210 mutex_exit(&plun->lun_mutex);
13246 13211 mutex_exit(&pptr->port_mutex);
13247 13212 /*
13248 13213 * free the mdi_pathinfo node
13249 13214 */
13250 13215 (void) mdi_pi_free(PIP(old_cip), 0);
13251 13216 mutex_enter(&pptr->port_mutex);
13252 13217 mutex_enter(&plun->lun_mutex);
13253 13218 goto again;
13254 13219 }
13255 13220 } else {
13256 13221 if (cdip) {
13257 13222 fcp_log(CE_NOTE, pptr->port_dip,
13258 13223 "!fcp_online_child: mdi_pi_online:"
13259 13224 " failed for %s: target=%x lun=%x %x",
13260 13225 ddi_get_name(cdip), ptgt->tgt_d_id,
13261 13226 plun->lun_num, rval);
13262 13227 }
13263 13228 }
13264 13229 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13265 13230 }
13266 13231
13267 13232 if (rval == NDI_SUCCESS) {
13268 13233 if (cdip) {
13269 13234 (void) ndi_event_retrieve_cookie(
13270 13235 pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13271 13236 &fcp_insert_eid, NDI_EVENT_NOPASS);
13272 13237 (void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13273 13238 cdip, fcp_insert_eid, NULL);
13274 13239 }
13275 13240 }
13276 13241 mutex_enter(&pptr->port_mutex);
13277 13242 mutex_enter(&plun->lun_mutex);
13278 13243 return (rval);
13279 13244 }
13280 13245
13281 13246 /* ARGSUSED */
13282 13247 static int
13283 13248 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13284 13249 int tcount, int flags, int *circ)
13285 13250 {
13286 13251 int rval;
13287 13252 int lun_mpxio;
13288 13253 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13289 13254 struct fcp_tgt *ptgt = plun->lun_tgt;
13290 13255 dev_info_t *cdip;
13291 13256
13292 13257 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13293 13258 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13294 13259
13295 13260 if (plun->lun_cip == NULL) {
13296 13261 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13297 13262 fcp_trace, FCP_BUF_LEVEL_3, 0,
13298 13263 "fcp_offline_child: plun->lun_cip is NULL: "
13299 13264 "plun: %p lun state: %x num: %d target state: %x",
13300 13265 plun, plun->lun_state, plun->lun_num,
13301 13266 plun->lun_tgt->tgt_port->port_state);
13302 13267 return (NDI_FAILURE);
13303 13268 }
13304 13269
↓ open down ↓ |
5061 lines elided |
↑ open up ↑ |
13305 13270 /*
13306 13271 * We will use this value twice. Make a copy to be sure we use
13307 13272 * the same value in both places.
13308 13273 */
13309 13274 lun_mpxio = plun->lun_mpxio;
13310 13275
13311 13276 if (lun_mpxio == 0) {
13312 13277 cdip = DIP(cip);
13313 13278 mutex_exit(&plun->lun_mutex);
13314 13279 mutex_exit(&pptr->port_mutex);
13315 - rval = ndi_devi_offline(DIP(cip), flags);
13280 + rval = ndi_devi_offline(DIP(cip), NDI_DEVFS_CLEAN | flags);
13316 13281 if (rval != NDI_SUCCESS) {
13317 13282 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13318 13283 fcp_trace, FCP_BUF_LEVEL_3, 0,
13319 13284 "fcp_offline_child: ndi_devi_offline failed "
13320 13285 "rval=%x cip=%p", rval, cip);
13321 13286 }
13322 13287 } else {
13323 13288 cdip = mdi_pi_get_client(PIP(cip));
13324 13289 mutex_exit(&plun->lun_mutex);
13325 13290 mutex_exit(&pptr->port_mutex);
13326 13291
13327 13292 /*
13328 13293 * Exit phci to avoid deadlock with power management code
13329 13294 * during mdi_pi_offline
13330 13295 */
13331 13296 mdi_hold_path(PIP(cip));
13332 13297 mdi_devi_exit_phci(pptr->port_dip, *circ);
13333 13298
13334 13299 rval = mdi_pi_offline(PIP(cip), flags);
13335 13300
13336 13301 mdi_devi_enter_phci(pptr->port_dip, circ);
13337 13302 mdi_rele_path(PIP(cip));
13338 13303
13339 13304 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13340 13305 }
13341 13306
13342 13307 mutex_enter(&ptgt->tgt_mutex);
13343 13308 plun->lun_state &= ~FCP_LUN_INIT;
13344 13309 mutex_exit(&ptgt->tgt_mutex);
13345 13310
13346 13311 if (rval == NDI_SUCCESS) {
13347 13312 cdip = NULL;
13348 13313 if (flags & NDI_DEVI_REMOVE) {
13349 13314 mutex_enter(&plun->lun_mutex);
13350 13315 /*
13351 13316 * If the guid of the LUN changes, lun_cip will not
13352 13317 * equal to cip, and after offlining the LUN with the
13353 13318 * old guid, we should keep lun_cip since it's the cip
13354 13319 * of the LUN with the new guid.
13355 13320 * Otherwise remove our reference to child node.
13356 13321 *
13357 13322 * This must be done before the child node is freed,
13358 13323 * otherwise other threads could see a stale lun_cip
13359 13324 * pointer.
13360 13325 */
13361 13326 if (plun->lun_cip == cip) {
13362 13327 plun->lun_cip = NULL;
13363 13328 }
13364 13329 if (plun->lun_old_guid) {
13365 13330 kmem_free(plun->lun_old_guid,
13366 13331 plun->lun_old_guid_size);
13367 13332 plun->lun_old_guid = NULL;
13368 13333 plun->lun_old_guid_size = 0;
13369 13334 }
13370 13335 mutex_exit(&plun->lun_mutex);
13371 13336 }
13372 13337 }
13373 13338
13374 13339 if (lun_mpxio != 0) {
13375 13340 if (rval == NDI_SUCCESS) {
13376 13341 /*
13377 13342 * Clear MPxIO path permanent disable as the path is
13378 13343 * already offlined.
13379 13344 */
13380 13345 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13381 13346
13382 13347 if (flags & NDI_DEVI_REMOVE) {
13383 13348 (void) mdi_pi_free(PIP(cip), 0);
13384 13349 }
13385 13350 } else {
13386 13351 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13387 13352 fcp_trace, FCP_BUF_LEVEL_3, 0,
13388 13353 "fcp_offline_child: mdi_pi_offline failed "
13389 13354 "rval=%x cip=%p", rval, cip);
13390 13355 }
13391 13356 }
13392 13357
13393 13358 mutex_enter(&pptr->port_mutex);
13394 13359 mutex_enter(&plun->lun_mutex);
13395 13360
13396 13361 if (cdip) {
13397 13362 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13398 13363 fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13399 13364 " target=%x lun=%x", "ndi_offline",
13400 13365 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13401 13366 }
13402 13367
13403 13368 return (rval);
13404 13369 }
13405 13370
13406 13371 static void
13407 13372 fcp_remove_child(struct fcp_lun *plun)
13408 13373 {
13409 13374 child_info_t *cip;
13410 13375 int circ;
13411 13376
13412 13377 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13413 13378
13414 13379 if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13415 13380 if (plun->lun_mpxio == 0) {
13416 13381 (void) ndi_prop_remove_all(DIP(plun->lun_cip));
13417 13382 (void) ndi_devi_free(DIP(plun->lun_cip));
13418 13383 plun->lun_cip = NULL;
13419 13384 } else {
13420 13385 /*
13421 13386 * Clear reference to the child node in the lun.
13422 13387 * This must be done before freeing it with mdi_pi_free
13423 13388 * and with lun_mutex held so that other threads always
13424 13389 * see either valid lun_cip or NULL when holding
13425 13390 * lun_mutex. We keep a copy in cip.
13426 13391 */
13427 13392 cip = plun->lun_cip;
13428 13393 plun->lun_cip = NULL;
13429 13394
13430 13395 mutex_exit(&plun->lun_mutex);
13431 13396 mutex_exit(&plun->lun_tgt->tgt_mutex);
13432 13397 mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13433 13398
13434 13399 mdi_devi_enter(
13435 13400 plun->lun_tgt->tgt_port->port_dip, &circ);
13436 13401
13437 13402 /*
13438 13403 * Exit phci to avoid deadlock with power management
13439 13404 * code during mdi_pi_offline
13440 13405 */
13441 13406 mdi_hold_path(PIP(cip));
13442 13407 mdi_devi_exit_phci(
13443 13408 plun->lun_tgt->tgt_port->port_dip, circ);
13444 13409 (void) mdi_pi_offline(PIP(cip),
13445 13410 NDI_DEVI_REMOVE);
13446 13411 mdi_devi_enter_phci(
13447 13412 plun->lun_tgt->tgt_port->port_dip, &circ);
13448 13413 mdi_rele_path(PIP(cip));
13449 13414
13450 13415 mdi_devi_exit(
13451 13416 plun->lun_tgt->tgt_port->port_dip, circ);
13452 13417
13453 13418 FCP_TRACE(fcp_logq,
13454 13419 plun->lun_tgt->tgt_port->port_instbuf,
13455 13420 fcp_trace, FCP_BUF_LEVEL_3, 0,
13456 13421 "lun=%p pip freed %p", plun, cip);
13457 13422
13458 13423 (void) mdi_prop_remove(PIP(cip), NULL);
13459 13424 (void) mdi_pi_free(PIP(cip), 0);
13460 13425
13461 13426 mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13462 13427 mutex_enter(&plun->lun_tgt->tgt_mutex);
13463 13428 mutex_enter(&plun->lun_mutex);
13464 13429 }
13465 13430 } else {
13466 13431 plun->lun_cip = NULL;
13467 13432 }
13468 13433 }
13469 13434
13470 13435 /*
13471 13436 * called when a timeout occurs
13472 13437 *
13473 13438 * can be scheduled during an attach or resume (if not already running)
13474 13439 *
13475 13440 * one timeout is set up for all ports
13476 13441 *
13477 13442 * acquires and releases the global mutex
13478 13443 */
13479 13444 /*ARGSUSED*/
13480 13445 static void
13481 13446 fcp_watch(void *arg)
13482 13447 {
13483 13448 struct fcp_port *pptr;
13484 13449 struct fcp_ipkt *icmd;
13485 13450 struct fcp_ipkt *nicmd;
13486 13451 struct fcp_pkt *cmd;
13487 13452 struct fcp_pkt *ncmd;
13488 13453 struct fcp_pkt *tail;
13489 13454 struct fcp_pkt *pcmd;
13490 13455 struct fcp_pkt *save_head;
13491 13456 struct fcp_port *save_port;
13492 13457
13493 13458 /* increment global watchdog time */
13494 13459 fcp_watchdog_time += fcp_watchdog_timeout;
13495 13460
13496 13461 mutex_enter(&fcp_global_mutex);
13497 13462
13498 13463 /* scan each port in our list */
13499 13464 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13500 13465 save_port = fcp_port_head;
13501 13466 pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13502 13467 mutex_exit(&fcp_global_mutex);
13503 13468
13504 13469 mutex_enter(&pptr->port_mutex);
13505 13470 if (pptr->port_ipkt_list == NULL &&
13506 13471 (pptr->port_state & (FCP_STATE_SUSPENDED |
13507 13472 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13508 13473 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13509 13474 mutex_exit(&pptr->port_mutex);
13510 13475 mutex_enter(&fcp_global_mutex);
13511 13476 goto end_of_watchdog;
13512 13477 }
13513 13478
13514 13479 /*
13515 13480 * We check if a list of targets need to be offlined.
13516 13481 */
13517 13482 if (pptr->port_offline_tgts) {
13518 13483 fcp_scan_offline_tgts(pptr);
13519 13484 }
13520 13485
13521 13486 /*
13522 13487 * We check if a list of luns need to be offlined.
13523 13488 */
13524 13489 if (pptr->port_offline_luns) {
13525 13490 fcp_scan_offline_luns(pptr);
13526 13491 }
13527 13492
13528 13493 /*
13529 13494 * We check if a list of targets or luns need to be reset.
13530 13495 */
13531 13496 if (pptr->port_reset_list) {
13532 13497 fcp_check_reset_delay(pptr);
13533 13498 }
13534 13499
13535 13500 mutex_exit(&pptr->port_mutex);
13536 13501
13537 13502 /*
13538 13503 * This is where the pending commands (pkt) are checked for
13539 13504 * timeout.
13540 13505 */
13541 13506 mutex_enter(&pptr->port_pkt_mutex);
13542 13507 tail = pptr->port_pkt_tail;
13543 13508
13544 13509 for (pcmd = NULL, cmd = pptr->port_pkt_head;
13545 13510 cmd != NULL; cmd = ncmd) {
13546 13511 ncmd = cmd->cmd_next;
13547 13512 /*
13548 13513 * If a command is in this queue the bit CFLAG_IN_QUEUE
13549 13514 * must be set.
13550 13515 */
13551 13516 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13552 13517 /*
13553 13518 * FCP_INVALID_TIMEOUT will be set for those
13554 13519 * command that need to be failed. Mostly those
13555 13520 * cmds that could not be queued down for the
13556 13521 * "timeout" value. cmd->cmd_timeout is used
13557 13522 * to try and requeue the command regularly.
13558 13523 */
13559 13524 if (cmd->cmd_timeout >= fcp_watchdog_time) {
13560 13525 /*
13561 13526 * This command hasn't timed out yet. Let's
13562 13527 * go to the next one.
13563 13528 */
13564 13529 pcmd = cmd;
13565 13530 goto end_of_loop;
13566 13531 }
13567 13532
13568 13533 if (cmd == pptr->port_pkt_head) {
13569 13534 ASSERT(pcmd == NULL);
13570 13535 pptr->port_pkt_head = cmd->cmd_next;
13571 13536 } else {
13572 13537 ASSERT(pcmd != NULL);
13573 13538 pcmd->cmd_next = cmd->cmd_next;
13574 13539 }
13575 13540
13576 13541 if (cmd == pptr->port_pkt_tail) {
13577 13542 ASSERT(cmd->cmd_next == NULL);
13578 13543 pptr->port_pkt_tail = pcmd;
13579 13544 if (pcmd) {
13580 13545 pcmd->cmd_next = NULL;
13581 13546 }
13582 13547 }
13583 13548 cmd->cmd_next = NULL;
13584 13549
13585 13550 /*
13586 13551 * save the current head before dropping the
13587 13552 * mutex - If the head doesn't remain the
13588 13553 * same after re acquiring the mutex, just
13589 13554 * bail out and revisit on next tick.
13590 13555 *
13591 13556 * PS: The tail pointer can change as the commands
13592 13557 * get requeued after failure to retransport
13593 13558 */
13594 13559 save_head = pptr->port_pkt_head;
13595 13560 mutex_exit(&pptr->port_pkt_mutex);
13596 13561
13597 13562 if (cmd->cmd_fp_pkt->pkt_timeout ==
13598 13563 FCP_INVALID_TIMEOUT) {
13599 13564 struct scsi_pkt *pkt = cmd->cmd_pkt;
13600 13565 struct fcp_lun *plun;
13601 13566 struct fcp_tgt *ptgt;
13602 13567
13603 13568 plun = ADDR2LUN(&pkt->pkt_address);
13604 13569 ptgt = plun->lun_tgt;
13605 13570
13606 13571 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13607 13572 fcp_trace, FCP_BUF_LEVEL_2, 0,
13608 13573 "SCSI cmd 0x%x to D_ID=%x timed out",
13609 13574 pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13610 13575
13611 13576 cmd->cmd_state == FCP_PKT_ABORTING ?
13612 13577 fcp_fail_cmd(cmd, CMD_RESET,
13613 13578 STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13614 13579 CMD_TIMEOUT, STAT_ABORTED);
13615 13580 } else {
13616 13581 fcp_retransport_cmd(pptr, cmd);
13617 13582 }
13618 13583 mutex_enter(&pptr->port_pkt_mutex);
13619 13584 if (save_head && save_head != pptr->port_pkt_head) {
13620 13585 /*
13621 13586 * Looks like linked list got changed (mostly
13622 13587 * happens when an an OFFLINE LUN code starts
13623 13588 * returning overflow queue commands in
13624 13589 * parallel. So bail out and revisit during
13625 13590 * next tick
13626 13591 */
13627 13592 break;
13628 13593 }
13629 13594 end_of_loop:
13630 13595 /*
13631 13596 * Scan only upto the previously known tail pointer
13632 13597 * to avoid excessive processing - lots of new packets
13633 13598 * could have been added to the tail or the old ones
13634 13599 * re-queued.
13635 13600 */
13636 13601 if (cmd == tail) {
13637 13602 break;
13638 13603 }
13639 13604 }
13640 13605 mutex_exit(&pptr->port_pkt_mutex);
13641 13606
13642 13607 mutex_enter(&pptr->port_mutex);
13643 13608 for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13644 13609 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13645 13610
13646 13611 nicmd = icmd->ipkt_next;
13647 13612 if ((icmd->ipkt_restart != 0) &&
13648 13613 (icmd->ipkt_restart >= fcp_watchdog_time)) {
13649 13614 /* packet has not timed out */
13650 13615 continue;
13651 13616 }
13652 13617
13653 13618 /* time for packet re-transport */
13654 13619 if (icmd == pptr->port_ipkt_list) {
13655 13620 pptr->port_ipkt_list = icmd->ipkt_next;
13656 13621 if (pptr->port_ipkt_list) {
13657 13622 pptr->port_ipkt_list->ipkt_prev =
13658 13623 NULL;
13659 13624 }
13660 13625 } else {
13661 13626 icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13662 13627 if (icmd->ipkt_next) {
13663 13628 icmd->ipkt_next->ipkt_prev =
13664 13629 icmd->ipkt_prev;
13665 13630 }
13666 13631 }
13667 13632 icmd->ipkt_next = NULL;
13668 13633 icmd->ipkt_prev = NULL;
13669 13634 mutex_exit(&pptr->port_mutex);
13670 13635
13671 13636 if (fcp_is_retryable(icmd)) {
13672 13637 fc_ulp_rscn_info_t *rscnp =
13673 13638 (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13674 13639 pkt_ulp_rscn_infop;
13675 13640
13676 13641 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13677 13642 fcp_trace, FCP_BUF_LEVEL_2, 0,
13678 13643 "%x to D_ID=%x Retrying..",
13679 13644 icmd->ipkt_opcode,
13680 13645 icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13681 13646
13682 13647 /*
13683 13648 * Update the RSCN count in the packet
13684 13649 * before resending.
13685 13650 */
13686 13651
13687 13652 if (rscnp != NULL) {
13688 13653 rscnp->ulp_rscn_count =
13689 13654 fc_ulp_get_rscn_count(pptr->
13690 13655 port_fp_handle);
13691 13656 }
13692 13657
13693 13658 mutex_enter(&pptr->port_mutex);
13694 13659 mutex_enter(&ptgt->tgt_mutex);
13695 13660 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13696 13661 mutex_exit(&ptgt->tgt_mutex);
13697 13662 mutex_exit(&pptr->port_mutex);
13698 13663 switch (icmd->ipkt_opcode) {
13699 13664 int rval;
13700 13665 case LA_ELS_PLOGI:
13701 13666 if ((rval = fc_ulp_login(
13702 13667 pptr->port_fp_handle,
13703 13668 &icmd->ipkt_fpkt, 1)) ==
13704 13669 FC_SUCCESS) {
13705 13670 mutex_enter(
13706 13671 &pptr->port_mutex);
13707 13672 continue;
13708 13673 }
13709 13674 if (fcp_handle_ipkt_errors(
13710 13675 pptr, ptgt, icmd, rval,
13711 13676 "PLOGI") == DDI_SUCCESS) {
13712 13677 mutex_enter(
13713 13678 &pptr->port_mutex);
13714 13679 continue;
13715 13680 }
13716 13681 break;
13717 13682
13718 13683 case LA_ELS_PRLI:
13719 13684 if ((rval = fc_ulp_issue_els(
13720 13685 pptr->port_fp_handle,
13721 13686 icmd->ipkt_fpkt)) ==
13722 13687 FC_SUCCESS) {
13723 13688 mutex_enter(
13724 13689 &pptr->port_mutex);
13725 13690 continue;
13726 13691 }
13727 13692 if (fcp_handle_ipkt_errors(
13728 13693 pptr, ptgt, icmd, rval,
13729 13694 "PRLI") == DDI_SUCCESS) {
13730 13695 mutex_enter(
13731 13696 &pptr->port_mutex);
13732 13697 continue;
13733 13698 }
13734 13699 break;
13735 13700
13736 13701 default:
13737 13702 if ((rval = fcp_transport(
13738 13703 pptr->port_fp_handle,
13739 13704 icmd->ipkt_fpkt, 1)) ==
13740 13705 FC_SUCCESS) {
13741 13706 mutex_enter(
13742 13707 &pptr->port_mutex);
13743 13708 continue;
13744 13709 }
13745 13710 if (fcp_handle_ipkt_errors(
13746 13711 pptr, ptgt, icmd, rval,
13747 13712 "PRLI") == DDI_SUCCESS) {
13748 13713 mutex_enter(
13749 13714 &pptr->port_mutex);
13750 13715 continue;
13751 13716 }
13752 13717 break;
13753 13718 }
13754 13719 } else {
13755 13720 mutex_exit(&ptgt->tgt_mutex);
13756 13721 mutex_exit(&pptr->port_mutex);
13757 13722 }
13758 13723 } else {
13759 13724 fcp_print_error(icmd->ipkt_fpkt);
13760 13725 }
13761 13726
13762 13727 (void) fcp_call_finish_init(pptr, ptgt,
13763 13728 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13764 13729 icmd->ipkt_cause);
13765 13730 fcp_icmd_free(pptr, icmd);
13766 13731 mutex_enter(&pptr->port_mutex);
13767 13732 }
13768 13733
13769 13734 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13770 13735 mutex_exit(&pptr->port_mutex);
13771 13736 mutex_enter(&fcp_global_mutex);
13772 13737
13773 13738 end_of_watchdog:
13774 13739 /*
13775 13740 * Bail out early before getting into trouble
13776 13741 */
13777 13742 if (save_port != fcp_port_head) {
13778 13743 break;
13779 13744 }
13780 13745 }
13781 13746
13782 13747 if (fcp_watchdog_init > 0) {
13783 13748 /* reschedule timeout to go again */
13784 13749 fcp_watchdog_id =
13785 13750 timeout(fcp_watch, NULL, fcp_watchdog_tick);
13786 13751 }
13787 13752 mutex_exit(&fcp_global_mutex);
13788 13753 }
13789 13754
13790 13755
13791 13756 static void
13792 13757 fcp_check_reset_delay(struct fcp_port *pptr)
13793 13758 {
13794 13759 uint32_t tgt_cnt;
13795 13760 int level;
13796 13761 struct fcp_tgt *ptgt;
13797 13762 struct fcp_lun *plun;
13798 13763 struct fcp_reset_elem *cur = NULL;
13799 13764 struct fcp_reset_elem *next = NULL;
13800 13765 struct fcp_reset_elem *prev = NULL;
13801 13766
13802 13767 ASSERT(mutex_owned(&pptr->port_mutex));
13803 13768
13804 13769 next = pptr->port_reset_list;
13805 13770 while ((cur = next) != NULL) {
13806 13771 next = cur->next;
13807 13772
13808 13773 if (cur->timeout < fcp_watchdog_time) {
13809 13774 prev = cur;
13810 13775 continue;
13811 13776 }
13812 13777
13813 13778 ptgt = cur->tgt;
13814 13779 plun = cur->lun;
13815 13780 tgt_cnt = cur->tgt_cnt;
13816 13781
13817 13782 if (ptgt) {
13818 13783 level = RESET_TARGET;
13819 13784 } else {
13820 13785 ASSERT(plun != NULL);
13821 13786 level = RESET_LUN;
13822 13787 ptgt = plun->lun_tgt;
13823 13788 }
13824 13789 if (prev) {
13825 13790 prev->next = next;
13826 13791 } else {
13827 13792 /*
13828 13793 * Because we drop port mutex while doing aborts for
13829 13794 * packets, we can't rely on reset_list pointing to
13830 13795 * our head
13831 13796 */
13832 13797 if (cur == pptr->port_reset_list) {
13833 13798 pptr->port_reset_list = next;
13834 13799 } else {
13835 13800 struct fcp_reset_elem *which;
13836 13801
13837 13802 which = pptr->port_reset_list;
13838 13803 while (which && which->next != cur) {
13839 13804 which = which->next;
13840 13805 }
13841 13806 ASSERT(which != NULL);
13842 13807
13843 13808 which->next = next;
13844 13809 prev = which;
13845 13810 }
13846 13811 }
13847 13812
13848 13813 kmem_free(cur, sizeof (*cur));
13849 13814
13850 13815 if (tgt_cnt == ptgt->tgt_change_cnt) {
13851 13816 mutex_enter(&ptgt->tgt_mutex);
13852 13817 if (level == RESET_TARGET) {
13853 13818 fcp_update_tgt_state(ptgt,
13854 13819 FCP_RESET, FCP_LUN_BUSY);
13855 13820 } else {
13856 13821 fcp_update_lun_state(plun,
13857 13822 FCP_RESET, FCP_LUN_BUSY);
13858 13823 }
13859 13824 mutex_exit(&ptgt->tgt_mutex);
13860 13825
13861 13826 mutex_exit(&pptr->port_mutex);
13862 13827 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13863 13828 mutex_enter(&pptr->port_mutex);
13864 13829 }
13865 13830 }
13866 13831 }
13867 13832
13868 13833
13869 13834 static void
13870 13835 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13871 13836 struct fcp_lun *rlun, int tgt_cnt)
13872 13837 {
13873 13838 int rval;
13874 13839 struct fcp_lun *tlun, *nlun;
13875 13840 struct fcp_pkt *pcmd = NULL, *ncmd = NULL,
13876 13841 *cmd = NULL, *head = NULL,
13877 13842 *tail = NULL;
13878 13843
13879 13844 mutex_enter(&pptr->port_pkt_mutex);
13880 13845 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13881 13846 struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13882 13847 struct fcp_tgt *ptgt = plun->lun_tgt;
13883 13848
13884 13849 ncmd = cmd->cmd_next;
13885 13850
13886 13851 if (ptgt != ttgt && plun != rlun) {
13887 13852 pcmd = cmd;
13888 13853 continue;
13889 13854 }
13890 13855
13891 13856 if (pcmd != NULL) {
13892 13857 ASSERT(pptr->port_pkt_head != cmd);
13893 13858 pcmd->cmd_next = ncmd;
13894 13859 } else {
13895 13860 ASSERT(cmd == pptr->port_pkt_head);
13896 13861 pptr->port_pkt_head = ncmd;
13897 13862 }
13898 13863 if (pptr->port_pkt_tail == cmd) {
13899 13864 ASSERT(cmd->cmd_next == NULL);
13900 13865 pptr->port_pkt_tail = pcmd;
13901 13866 if (pcmd != NULL) {
13902 13867 pcmd->cmd_next = NULL;
13903 13868 }
13904 13869 }
13905 13870
13906 13871 if (head == NULL) {
13907 13872 head = tail = cmd;
13908 13873 } else {
13909 13874 ASSERT(tail != NULL);
13910 13875 tail->cmd_next = cmd;
13911 13876 tail = cmd;
13912 13877 }
13913 13878 cmd->cmd_next = NULL;
13914 13879 }
13915 13880 mutex_exit(&pptr->port_pkt_mutex);
13916 13881
13917 13882 for (cmd = head; cmd != NULL; cmd = ncmd) {
13918 13883 struct scsi_pkt *pkt = cmd->cmd_pkt;
13919 13884
13920 13885 ncmd = cmd->cmd_next;
13921 13886 ASSERT(pkt != NULL);
13922 13887
13923 13888 mutex_enter(&pptr->port_mutex);
13924 13889 if (ttgt->tgt_change_cnt == tgt_cnt) {
13925 13890 mutex_exit(&pptr->port_mutex);
13926 13891 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13927 13892 pkt->pkt_reason = CMD_RESET;
13928 13893 pkt->pkt_statistics |= STAT_DEV_RESET;
13929 13894 cmd->cmd_state = FCP_PKT_IDLE;
13930 13895 fcp_post_callback(cmd);
13931 13896 } else {
13932 13897 mutex_exit(&pptr->port_mutex);
13933 13898 }
13934 13899 }
13935 13900
13936 13901 /*
13937 13902 * If the FCA will return all the commands in its queue then our
13938 13903 * work is easy, just return.
13939 13904 */
13940 13905
13941 13906 if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13942 13907 return;
13943 13908 }
13944 13909
13945 13910 /*
13946 13911 * For RESET_LUN get hold of target pointer
13947 13912 */
13948 13913 if (ttgt == NULL) {
13949 13914 ASSERT(rlun != NULL);
13950 13915
13951 13916 ttgt = rlun->lun_tgt;
13952 13917
13953 13918 ASSERT(ttgt != NULL);
13954 13919 }
13955 13920
13956 13921 /*
13957 13922 * There are some severe race conditions here.
13958 13923 * While we are trying to abort the pkt, it might be completing
13959 13924 * so mark it aborted and if the abort does not succeed then
13960 13925 * handle it in the watch thread.
13961 13926 */
13962 13927 mutex_enter(&ttgt->tgt_mutex);
13963 13928 nlun = ttgt->tgt_lun;
13964 13929 mutex_exit(&ttgt->tgt_mutex);
13965 13930 while ((tlun = nlun) != NULL) {
13966 13931 int restart = 0;
13967 13932 if (rlun && rlun != tlun) {
13968 13933 mutex_enter(&ttgt->tgt_mutex);
13969 13934 nlun = tlun->lun_next;
13970 13935 mutex_exit(&ttgt->tgt_mutex);
13971 13936 continue;
13972 13937 }
13973 13938 mutex_enter(&tlun->lun_mutex);
13974 13939 cmd = tlun->lun_pkt_head;
13975 13940 while (cmd != NULL) {
13976 13941 if (cmd->cmd_state == FCP_PKT_ISSUED) {
13977 13942 struct scsi_pkt *pkt;
13978 13943
13979 13944 restart = 1;
13980 13945 cmd->cmd_state = FCP_PKT_ABORTING;
13981 13946 mutex_exit(&tlun->lun_mutex);
13982 13947 rval = fc_ulp_abort(pptr->port_fp_handle,
13983 13948 cmd->cmd_fp_pkt, KM_SLEEP);
13984 13949 if (rval == FC_SUCCESS) {
13985 13950 pkt = cmd->cmd_pkt;
13986 13951 pkt->pkt_reason = CMD_RESET;
13987 13952 pkt->pkt_statistics |= STAT_DEV_RESET;
13988 13953 cmd->cmd_state = FCP_PKT_IDLE;
13989 13954 fcp_post_callback(cmd);
13990 13955 } else {
13991 13956 caddr_t msg;
13992 13957
13993 13958 (void) fc_ulp_error(rval, &msg);
13994 13959
13995 13960 /*
13996 13961 * This part is tricky. The abort
13997 13962 * failed and now the command could
13998 13963 * be completing. The cmd_state ==
13999 13964 * FCP_PKT_ABORTING should save
14000 13965 * us in fcp_cmd_callback. If we
14001 13966 * are already aborting ignore the
14002 13967 * command in fcp_cmd_callback.
14003 13968 * Here we leave this packet for 20
14004 13969 * sec to be aborted in the
14005 13970 * fcp_watch thread.
14006 13971 */
14007 13972 fcp_log(CE_WARN, pptr->port_dip,
14008 13973 "!Abort failed after reset %s",
14009 13974 msg);
14010 13975
14011 13976 cmd->cmd_timeout =
14012 13977 fcp_watchdog_time +
14013 13978 cmd->cmd_pkt->pkt_time +
14014 13979 FCP_FAILED_DELAY;
14015 13980
14016 13981 cmd->cmd_fp_pkt->pkt_timeout =
14017 13982 FCP_INVALID_TIMEOUT;
14018 13983 /*
14019 13984 * This is a hack, cmd is put in the
14020 13985 * overflow queue so that it can be
14021 13986 * timed out finally
14022 13987 */
14023 13988 cmd->cmd_flags |= CFLAG_IN_QUEUE;
14024 13989
14025 13990 mutex_enter(&pptr->port_pkt_mutex);
14026 13991 if (pptr->port_pkt_head) {
14027 13992 ASSERT(pptr->port_pkt_tail
14028 13993 != NULL);
14029 13994 pptr->port_pkt_tail->cmd_next
14030 13995 = cmd;
14031 13996 pptr->port_pkt_tail = cmd;
14032 13997 } else {
14033 13998 ASSERT(pptr->port_pkt_tail
14034 13999 == NULL);
14035 14000 pptr->port_pkt_head =
14036 14001 pptr->port_pkt_tail
14037 14002 = cmd;
14038 14003 }
14039 14004 cmd->cmd_next = NULL;
14040 14005 mutex_exit(&pptr->port_pkt_mutex);
14041 14006 }
14042 14007 mutex_enter(&tlun->lun_mutex);
14043 14008 cmd = tlun->lun_pkt_head;
14044 14009 } else {
14045 14010 cmd = cmd->cmd_forw;
14046 14011 }
14047 14012 }
14048 14013 mutex_exit(&tlun->lun_mutex);
14049 14014
14050 14015 mutex_enter(&ttgt->tgt_mutex);
14051 14016 restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
14052 14017 mutex_exit(&ttgt->tgt_mutex);
14053 14018
14054 14019 mutex_enter(&pptr->port_mutex);
14055 14020 if (tgt_cnt != ttgt->tgt_change_cnt) {
14056 14021 mutex_exit(&pptr->port_mutex);
14057 14022 return;
14058 14023 } else {
14059 14024 mutex_exit(&pptr->port_mutex);
14060 14025 }
14061 14026 }
14062 14027 }
14063 14028
14064 14029
14065 14030 /*
14066 14031 * unlink the soft state, returning the soft state found (if any)
14067 14032 *
14068 14033 * acquires and releases the global mutex
14069 14034 */
14070 14035 struct fcp_port *
14071 14036 fcp_soft_state_unlink(struct fcp_port *pptr)
14072 14037 {
14073 14038 struct fcp_port *hptr; /* ptr index */
14074 14039 struct fcp_port *tptr; /* prev hptr */
14075 14040
14076 14041 mutex_enter(&fcp_global_mutex);
14077 14042 for (hptr = fcp_port_head, tptr = NULL;
14078 14043 hptr != NULL;
14079 14044 tptr = hptr, hptr = hptr->port_next) {
14080 14045 if (hptr == pptr) {
14081 14046 /* we found a match -- remove this item */
14082 14047 if (tptr == NULL) {
14083 14048 /* we're at the head of the list */
14084 14049 fcp_port_head = hptr->port_next;
14085 14050 } else {
14086 14051 tptr->port_next = hptr->port_next;
14087 14052 }
14088 14053 break; /* success */
14089 14054 }
14090 14055 }
14091 14056 if (fcp_port_head == NULL) {
14092 14057 fcp_cleanup_blacklist(&fcp_lun_blacklist);
14093 14058 }
14094 14059 mutex_exit(&fcp_global_mutex);
14095 14060 return (hptr);
14096 14061 }
14097 14062
14098 14063
14099 14064 /*
14100 14065 * called by fcp_scsi_hba_tgt_init to find a LUN given a
14101 14066 * WWN and a LUN number
14102 14067 */
14103 14068 /* ARGSUSED */
14104 14069 static struct fcp_lun *
14105 14070 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
14106 14071 {
14107 14072 int hash;
14108 14073 struct fcp_tgt *ptgt;
14109 14074 struct fcp_lun *plun;
14110 14075
14111 14076 ASSERT(mutex_owned(&pptr->port_mutex));
14112 14077
14113 14078 hash = FCP_HASH(wwn);
14114 14079 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
14115 14080 ptgt = ptgt->tgt_next) {
14116 14081 if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
14117 14082 sizeof (ptgt->tgt_port_wwn)) == 0) {
14118 14083 mutex_enter(&ptgt->tgt_mutex);
14119 14084 for (plun = ptgt->tgt_lun;
14120 14085 plun != NULL;
14121 14086 plun = plun->lun_next) {
14122 14087 if (plun->lun_num == lun) {
14123 14088 mutex_exit(&ptgt->tgt_mutex);
14124 14089 return (plun);
14125 14090 }
14126 14091 }
14127 14092 mutex_exit(&ptgt->tgt_mutex);
14128 14093 return (NULL);
14129 14094 }
14130 14095 }
14131 14096 return (NULL);
14132 14097 }
14133 14098
14134 14099 /*
14135 14100 * Function: fcp_prepare_pkt
14136 14101 *
14137 14102 * Description: This function prepares the SCSI cmd pkt, passed by the caller,
14138 14103 * for fcp_start(). It binds the data or partially maps it.
14139 14104 * Builds the FCP header and starts the initialization of the
14140 14105 * Fibre Channel header.
14141 14106 *
14142 14107 * Argument: *pptr FCP port.
14143 14108 * *cmd FCP packet.
14144 14109 * *plun LUN the command will be sent to.
14145 14110 *
14146 14111 * Context: User, Kernel and Interrupt context.
14147 14112 */
14148 14113 static void
14149 14114 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
14150 14115 struct fcp_lun *plun)
14151 14116 {
14152 14117 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
14153 14118 struct fcp_tgt *ptgt = plun->lun_tgt;
14154 14119 struct fcp_cmd *fcmd = &cmd->cmd_fcp_cmd;
14155 14120
14156 14121 ASSERT(cmd->cmd_pkt->pkt_comp ||
14157 14122 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
14158 14123
14159 14124 if (cmd->cmd_pkt->pkt_numcookies) {
14160 14125 if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
14161 14126 fcmd->fcp_cntl.cntl_read_data = 1;
14162 14127 fcmd->fcp_cntl.cntl_write_data = 0;
14163 14128 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
14164 14129 } else {
14165 14130 fcmd->fcp_cntl.cntl_read_data = 0;
14166 14131 fcmd->fcp_cntl.cntl_write_data = 1;
14167 14132 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
14168 14133 }
14169 14134
14170 14135 fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
14171 14136
14172 14137 fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
14173 14138 ASSERT(fpkt->pkt_data_cookie_cnt <=
14174 14139 pptr->port_data_dma_attr.dma_attr_sgllen);
14175 14140
14176 14141 cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
14177 14142
14178 14143 /* FCA needs pkt_datalen to be set */
14179 14144 fpkt->pkt_datalen = cmd->cmd_dmacount;
14180 14145 fcmd->fcp_data_len = cmd->cmd_dmacount;
14181 14146 } else {
14182 14147 fcmd->fcp_cntl.cntl_read_data = 0;
14183 14148 fcmd->fcp_cntl.cntl_write_data = 0;
14184 14149 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
14185 14150 fpkt->pkt_datalen = 0;
14186 14151 fcmd->fcp_data_len = 0;
14187 14152 }
14188 14153
14189 14154 /* set up the Tagged Queuing type */
14190 14155 if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
14191 14156 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
14192 14157 } else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
14193 14158 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
14194 14159 } else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
14195 14160 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
14196 14161 } else {
14197 14162 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
14198 14163 }
14199 14164
14200 14165 fcmd->fcp_ent_addr = plun->lun_addr;
14201 14166
14202 14167 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
14203 14168 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
14204 14169 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
14205 14170 } else {
14206 14171 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
14207 14172 }
14208 14173
14209 14174 cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
14210 14175 cmd->cmd_pkt->pkt_state = 0;
14211 14176 cmd->cmd_pkt->pkt_statistics = 0;
14212 14177 cmd->cmd_pkt->pkt_resid = 0;
14213 14178
14214 14179 cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
14215 14180
14216 14181 if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
14217 14182 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
14218 14183 fpkt->pkt_comp = NULL;
14219 14184 } else {
14220 14185 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
14221 14186 if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
14222 14187 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
14223 14188 }
14224 14189 fpkt->pkt_comp = fcp_cmd_callback;
14225 14190 }
14226 14191
14227 14192 mutex_enter(&pptr->port_mutex);
14228 14193 if (pptr->port_state & FCP_STATE_SUSPENDED) {
14229 14194 fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
14230 14195 }
14231 14196 mutex_exit(&pptr->port_mutex);
14232 14197
14233 14198 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
14234 14199 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
14235 14200
14236 14201 /*
14237 14202 * Save a few kernel cycles here
14238 14203 */
14239 14204 #ifndef __lock_lint
14240 14205 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
14241 14206 #endif /* __lock_lint */
14242 14207 }
14243 14208
14244 14209 static void
14245 14210 fcp_post_callback(struct fcp_pkt *cmd)
14246 14211 {
14247 14212 scsi_hba_pkt_comp(cmd->cmd_pkt);
14248 14213 }
14249 14214
14250 14215
14251 14216 /*
14252 14217 * called to do polled I/O by fcp_start()
14253 14218 *
14254 14219 * return a transport status value, i.e. TRAN_ACCECPT for success
14255 14220 */
14256 14221 static int
14257 14222 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14258 14223 {
14259 14224 int rval;
14260 14225
14261 14226 #ifdef DEBUG
14262 14227 mutex_enter(&pptr->port_pkt_mutex);
14263 14228 pptr->port_npkts++;
14264 14229 mutex_exit(&pptr->port_pkt_mutex);
14265 14230 #endif /* DEBUG */
14266 14231
14267 14232 if (cmd->cmd_fp_pkt->pkt_timeout) {
14268 14233 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14269 14234 } else {
14270 14235 cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14271 14236 }
14272 14237
14273 14238 ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14274 14239
14275 14240 cmd->cmd_state = FCP_PKT_ISSUED;
14276 14241
14277 14242 rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14278 14243
14279 14244 #ifdef DEBUG
14280 14245 mutex_enter(&pptr->port_pkt_mutex);
14281 14246 pptr->port_npkts--;
14282 14247 mutex_exit(&pptr->port_pkt_mutex);
14283 14248 #endif /* DEBUG */
14284 14249
14285 14250 cmd->cmd_state = FCP_PKT_IDLE;
14286 14251
14287 14252 switch (rval) {
14288 14253 case FC_SUCCESS:
14289 14254 if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14290 14255 fcp_complete_pkt(cmd->cmd_fp_pkt);
14291 14256 rval = TRAN_ACCEPT;
14292 14257 } else {
14293 14258 rval = TRAN_FATAL_ERROR;
14294 14259 }
14295 14260 break;
14296 14261
14297 14262 case FC_TRAN_BUSY:
14298 14263 rval = TRAN_BUSY;
14299 14264 cmd->cmd_pkt->pkt_resid = 0;
14300 14265 break;
14301 14266
14302 14267 case FC_BADPACKET:
14303 14268 rval = TRAN_BADPKT;
14304 14269 break;
14305 14270
14306 14271 default:
14307 14272 rval = TRAN_FATAL_ERROR;
14308 14273 break;
14309 14274 }
14310 14275
14311 14276 return (rval);
14312 14277 }
14313 14278
14314 14279
14315 14280 /*
14316 14281 * called by some of the following transport-called routines to convert
14317 14282 * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14318 14283 */
14319 14284 static struct fcp_port *
14320 14285 fcp_dip2port(dev_info_t *dip)
14321 14286 {
14322 14287 int instance;
14323 14288
14324 14289 instance = ddi_get_instance(dip);
14325 14290 return (ddi_get_soft_state(fcp_softstate, instance));
14326 14291 }
14327 14292
14328 14293
14329 14294 /*
14330 14295 * called internally to return a LUN given a dip
14331 14296 */
14332 14297 struct fcp_lun *
14333 14298 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14334 14299 {
14335 14300 struct fcp_tgt *ptgt;
14336 14301 struct fcp_lun *plun;
14337 14302 int i;
14338 14303
14339 14304
14340 14305 ASSERT(mutex_owned(&pptr->port_mutex));
14341 14306
14342 14307 for (i = 0; i < FCP_NUM_HASH; i++) {
14343 14308 for (ptgt = pptr->port_tgt_hash_table[i];
14344 14309 ptgt != NULL;
14345 14310 ptgt = ptgt->tgt_next) {
14346 14311 mutex_enter(&ptgt->tgt_mutex);
14347 14312 for (plun = ptgt->tgt_lun; plun != NULL;
14348 14313 plun = plun->lun_next) {
14349 14314 mutex_enter(&plun->lun_mutex);
14350 14315 if (plun->lun_cip == cip) {
14351 14316 mutex_exit(&plun->lun_mutex);
14352 14317 mutex_exit(&ptgt->tgt_mutex);
14353 14318 return (plun); /* match found */
14354 14319 }
14355 14320 mutex_exit(&plun->lun_mutex);
14356 14321 }
14357 14322 mutex_exit(&ptgt->tgt_mutex);
14358 14323 }
14359 14324 }
14360 14325 return (NULL); /* no LUN found */
14361 14326 }
14362 14327
14363 14328 /*
14364 14329 * pass an element to the hotplug list, kick the hotplug thread
14365 14330 * and wait for the element to get processed by the hotplug thread.
14366 14331 * on return the element is freed.
14367 14332 *
14368 14333 * return zero success and non-zero on failure
14369 14334 *
14370 14335 * acquires/releases the target mutex
14371 14336 *
14372 14337 */
14373 14338 static int
14374 14339 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14375 14340 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14376 14341 {
14377 14342 struct fcp_hp_elem *elem;
14378 14343 int rval;
14379 14344
14380 14345 mutex_enter(&plun->lun_tgt->tgt_mutex);
14381 14346 if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14382 14347 what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14383 14348 mutex_exit(&plun->lun_tgt->tgt_mutex);
14384 14349 fcp_log(CE_CONT, pptr->port_dip,
14385 14350 "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14386 14351 what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14387 14352 return (NDI_FAILURE);
14388 14353 }
14389 14354 mutex_exit(&plun->lun_tgt->tgt_mutex);
14390 14355 mutex_enter(&elem->mutex);
14391 14356 if (elem->wait) {
14392 14357 while (elem->wait) {
14393 14358 cv_wait(&elem->cv, &elem->mutex);
14394 14359 }
14395 14360 }
14396 14361 rval = (elem->result);
14397 14362 mutex_exit(&elem->mutex);
14398 14363 mutex_destroy(&elem->mutex);
14399 14364 cv_destroy(&elem->cv);
14400 14365 kmem_free(elem, sizeof (struct fcp_hp_elem));
14401 14366 return (rval);
14402 14367 }
14403 14368
14404 14369 /*
14405 14370 * pass an element to the hotplug list, and then
14406 14371 * kick the hotplug thread
14407 14372 *
14408 14373 * return Boolean success, i.e. non-zero if all goes well, else zero on error
14409 14374 *
14410 14375 * acquires/releases the hotplug mutex
14411 14376 *
14412 14377 * called with the target mutex owned
14413 14378 *
14414 14379 * memory acquired in NOSLEEP mode
14415 14380 * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14416 14381 * for the hp daemon to process the request and is responsible for
14417 14382 * freeing the element
14418 14383 */
14419 14384 static struct fcp_hp_elem *
14420 14385 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14421 14386 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14422 14387 {
14423 14388 struct fcp_hp_elem *elem;
14424 14389 dev_info_t *pdip;
14425 14390
14426 14391 ASSERT(pptr != NULL);
14427 14392 ASSERT(plun != NULL);
14428 14393 ASSERT(plun->lun_tgt != NULL);
14429 14394 ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14430 14395
14431 14396 /* create space for a hotplug element */
14432 14397 if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14433 14398 == NULL) {
14434 14399 fcp_log(CE_WARN, NULL,
14435 14400 "!can't allocate memory for hotplug element");
14436 14401 return (NULL);
14437 14402 }
14438 14403
14439 14404 /* fill in hotplug element */
14440 14405 elem->port = pptr;
14441 14406 elem->lun = plun;
14442 14407 elem->cip = cip;
14443 14408 elem->old_lun_mpxio = plun->lun_mpxio;
14444 14409 elem->what = what;
14445 14410 elem->flags = flags;
14446 14411 elem->link_cnt = link_cnt;
14447 14412 elem->tgt_cnt = tgt_cnt;
14448 14413 elem->wait = wait;
14449 14414 mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14450 14415 cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14451 14416
14452 14417 /* schedule the hotplug task */
14453 14418 pdip = pptr->port_dip;
14454 14419 mutex_enter(&plun->lun_mutex);
14455 14420 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14456 14421 plun->lun_event_count++;
14457 14422 elem->event_cnt = plun->lun_event_count;
14458 14423 }
14459 14424 mutex_exit(&plun->lun_mutex);
14460 14425 if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14461 14426 (void *)elem, KM_NOSLEEP) == NULL) {
14462 14427 mutex_enter(&plun->lun_mutex);
14463 14428 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14464 14429 plun->lun_event_count--;
14465 14430 }
14466 14431 mutex_exit(&plun->lun_mutex);
14467 14432 kmem_free(elem, sizeof (*elem));
14468 14433 return (0);
14469 14434 }
14470 14435
14471 14436 return (elem);
14472 14437 }
14473 14438
14474 14439
14475 14440 static void
14476 14441 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14477 14442 {
14478 14443 int rval;
14479 14444 struct scsi_address *ap;
14480 14445 struct fcp_lun *plun;
14481 14446 struct fcp_tgt *ptgt;
14482 14447 fc_packet_t *fpkt;
14483 14448
14484 14449 ap = &cmd->cmd_pkt->pkt_address;
14485 14450 plun = ADDR2LUN(ap);
14486 14451 ptgt = plun->lun_tgt;
14487 14452
14488 14453 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14489 14454
14490 14455 cmd->cmd_state = FCP_PKT_IDLE;
14491 14456
14492 14457 mutex_enter(&pptr->port_mutex);
14493 14458 mutex_enter(&ptgt->tgt_mutex);
14494 14459 if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14495 14460 (!(pptr->port_state & FCP_STATE_ONLINING))) {
14496 14461 fc_ulp_rscn_info_t *rscnp;
14497 14462
14498 14463 cmd->cmd_state = FCP_PKT_ISSUED;
14499 14464
14500 14465 /*
14501 14466 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14502 14467 * originally NULL, hence we try to set it to the pd pointed
14503 14468 * to by the SCSI device we're trying to get to.
14504 14469 */
14505 14470
14506 14471 fpkt = cmd->cmd_fp_pkt;
14507 14472 if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14508 14473 fpkt->pkt_pd = ptgt->tgt_pd_handle;
14509 14474 /*
14510 14475 * We need to notify the transport that we now have a
14511 14476 * reference to the remote port handle.
14512 14477 */
14513 14478 fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14514 14479 }
14515 14480
14516 14481 mutex_exit(&ptgt->tgt_mutex);
14517 14482 mutex_exit(&pptr->port_mutex);
14518 14483
14519 14484 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14520 14485
14521 14486 /* prepare the packet */
14522 14487
14523 14488 fcp_prepare_pkt(pptr, cmd, plun);
14524 14489
14525 14490 rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14526 14491 pkt_ulp_rscn_infop;
14527 14492
14528 14493 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14529 14494 fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14530 14495
14531 14496 if (rscnp != NULL) {
14532 14497 rscnp->ulp_rscn_count =
14533 14498 fc_ulp_get_rscn_count(pptr->
14534 14499 port_fp_handle);
14535 14500 }
14536 14501
14537 14502 rval = fcp_transport(pptr->port_fp_handle,
14538 14503 cmd->cmd_fp_pkt, 0);
14539 14504
14540 14505 if (rval == FC_SUCCESS) {
14541 14506 return;
14542 14507 }
14543 14508 cmd->cmd_state &= ~FCP_PKT_ISSUED;
14544 14509 } else {
14545 14510 mutex_exit(&ptgt->tgt_mutex);
14546 14511 mutex_exit(&pptr->port_mutex);
14547 14512 }
14548 14513
14549 14514 fcp_queue_pkt(pptr, cmd);
14550 14515 }
14551 14516
14552 14517
14553 14518 static void
14554 14519 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14555 14520 {
14556 14521 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14557 14522
14558 14523 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14559 14524 cmd->cmd_state = FCP_PKT_IDLE;
14560 14525
14561 14526 cmd->cmd_pkt->pkt_reason = reason;
14562 14527 cmd->cmd_pkt->pkt_state = 0;
14563 14528 cmd->cmd_pkt->pkt_statistics = statistics;
14564 14529
14565 14530 fcp_post_callback(cmd);
14566 14531 }
14567 14532
14568 14533 /*
14569 14534 * Function: fcp_queue_pkt
14570 14535 *
14571 14536 * Description: This function queues the packet passed by the caller into
14572 14537 * the list of packets of the FCP port.
14573 14538 *
14574 14539 * Argument: *pptr FCP port.
14575 14540 * *cmd FCP packet to queue.
14576 14541 *
14577 14542 * Return Value: None
14578 14543 *
14579 14544 * Context: User, Kernel and Interrupt context.
14580 14545 */
14581 14546 static void
14582 14547 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14583 14548 {
14584 14549 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14585 14550
14586 14551 mutex_enter(&pptr->port_pkt_mutex);
14587 14552 cmd->cmd_flags |= CFLAG_IN_QUEUE;
14588 14553 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14589 14554 cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14590 14555
14591 14556 /*
14592 14557 * zero pkt_time means hang around for ever
14593 14558 */
14594 14559 if (cmd->cmd_pkt->pkt_time) {
14595 14560 if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14596 14561 cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14597 14562 } else {
14598 14563 /*
14599 14564 * Indicate the watch thread to fail the
14600 14565 * command by setting it to highest value
14601 14566 */
14602 14567 cmd->cmd_timeout = fcp_watchdog_time;
14603 14568 cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14604 14569 }
14605 14570 }
14606 14571
14607 14572 if (pptr->port_pkt_head) {
14608 14573 ASSERT(pptr->port_pkt_tail != NULL);
14609 14574
14610 14575 pptr->port_pkt_tail->cmd_next = cmd;
14611 14576 pptr->port_pkt_tail = cmd;
14612 14577 } else {
14613 14578 ASSERT(pptr->port_pkt_tail == NULL);
14614 14579
14615 14580 pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14616 14581 }
14617 14582 cmd->cmd_next = NULL;
14618 14583 mutex_exit(&pptr->port_pkt_mutex);
14619 14584 }
14620 14585
14621 14586 /*
14622 14587 * Function: fcp_update_targets
14623 14588 *
14624 14589 * Description: This function applies the specified change of state to all
14625 14590 * the targets listed. The operation applied is 'set'.
14626 14591 *
14627 14592 * Argument: *pptr FCP port.
14628 14593 * *dev_list Array of fc_portmap_t structures.
14629 14594 * count Length of dev_list.
14630 14595 * state State bits to update.
14631 14596 * cause Reason for the update.
14632 14597 *
14633 14598 * Return Value: None
14634 14599 *
14635 14600 * Context: User, Kernel and Interrupt context.
14636 14601 * The mutex pptr->port_mutex must be held.
14637 14602 */
14638 14603 static void
14639 14604 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14640 14605 uint32_t count, uint32_t state, int cause)
14641 14606 {
14642 14607 fc_portmap_t *map_entry;
14643 14608 struct fcp_tgt *ptgt;
14644 14609
14645 14610 ASSERT(MUTEX_HELD(&pptr->port_mutex));
14646 14611
14647 14612 while (count--) {
14648 14613 map_entry = &(dev_list[count]);
14649 14614 ptgt = fcp_lookup_target(pptr,
14650 14615 (uchar_t *)&(map_entry->map_pwwn));
14651 14616 if (ptgt == NULL) {
14652 14617 continue;
14653 14618 }
14654 14619
14655 14620 mutex_enter(&ptgt->tgt_mutex);
14656 14621 ptgt->tgt_trace = 0;
14657 14622 ptgt->tgt_change_cnt++;
14658 14623 ptgt->tgt_statec_cause = cause;
14659 14624 ptgt->tgt_tmp_cnt = 1;
14660 14625 fcp_update_tgt_state(ptgt, FCP_SET, state);
14661 14626 mutex_exit(&ptgt->tgt_mutex);
14662 14627 }
14663 14628 }
14664 14629
14665 14630 static int
14666 14631 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14667 14632 int lcount, int tcount, int cause)
14668 14633 {
14669 14634 int rval;
14670 14635
14671 14636 mutex_enter(&pptr->port_mutex);
14672 14637 rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14673 14638 mutex_exit(&pptr->port_mutex);
14674 14639
14675 14640 return (rval);
14676 14641 }
14677 14642
14678 14643
14679 14644 static int
14680 14645 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14681 14646 int lcount, int tcount, int cause)
14682 14647 {
14683 14648 int finish_init = 0;
14684 14649 int finish_tgt = 0;
14685 14650 int do_finish_init = 0;
14686 14651 int rval = FCP_NO_CHANGE;
14687 14652
14688 14653 if (cause == FCP_CAUSE_LINK_CHANGE ||
14689 14654 cause == FCP_CAUSE_LINK_DOWN) {
14690 14655 do_finish_init = 1;
14691 14656 }
14692 14657
14693 14658 if (ptgt != NULL) {
14694 14659 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14695 14660 FCP_BUF_LEVEL_2, 0,
14696 14661 "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14697 14662 " cause = %d, d_id = 0x%x, tgt_done = %d",
14698 14663 pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14699 14664 pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14700 14665 ptgt->tgt_d_id, ptgt->tgt_done);
14701 14666
14702 14667 mutex_enter(&ptgt->tgt_mutex);
14703 14668
14704 14669 if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14705 14670 rval = FCP_DEV_CHANGE;
14706 14671 if (do_finish_init && ptgt->tgt_done == 0) {
14707 14672 ptgt->tgt_done++;
14708 14673 finish_init = 1;
14709 14674 }
14710 14675 } else {
14711 14676 if (--ptgt->tgt_tmp_cnt <= 0) {
14712 14677 ptgt->tgt_tmp_cnt = 0;
14713 14678 finish_tgt = 1;
14714 14679
14715 14680 if (do_finish_init) {
14716 14681 finish_init = 1;
14717 14682 }
14718 14683 }
14719 14684 }
14720 14685 mutex_exit(&ptgt->tgt_mutex);
14721 14686 } else {
14722 14687 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14723 14688 FCP_BUF_LEVEL_2, 0,
14724 14689 "Call Finish Init for NO target");
14725 14690
14726 14691 if (do_finish_init) {
14727 14692 finish_init = 1;
14728 14693 }
14729 14694 }
14730 14695
14731 14696 if (finish_tgt) {
14732 14697 ASSERT(ptgt != NULL);
14733 14698
14734 14699 mutex_enter(&ptgt->tgt_mutex);
14735 14700 #ifdef DEBUG
14736 14701 bzero(ptgt->tgt_tmp_cnt_stack,
14737 14702 sizeof (ptgt->tgt_tmp_cnt_stack));
14738 14703
14739 14704 ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14740 14705 FCP_STACK_DEPTH);
14741 14706 #endif /* DEBUG */
14742 14707 mutex_exit(&ptgt->tgt_mutex);
14743 14708
14744 14709 (void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14745 14710 }
14746 14711
14747 14712 if (finish_init && lcount == pptr->port_link_cnt) {
14748 14713 ASSERT(pptr->port_tmp_cnt > 0);
14749 14714 if (--pptr->port_tmp_cnt == 0) {
14750 14715 fcp_finish_init(pptr);
14751 14716 }
14752 14717 } else if (lcount != pptr->port_link_cnt) {
14753 14718 FCP_TRACE(fcp_logq, pptr->port_instbuf,
14754 14719 fcp_trace, FCP_BUF_LEVEL_2, 0,
14755 14720 "fcp_call_finish_init_held,1: state change occured"
14756 14721 " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14757 14722 }
14758 14723
14759 14724 return (rval);
14760 14725 }
14761 14726
14762 14727 static void
14763 14728 fcp_reconfigure_luns(void * tgt_handle)
14764 14729 {
14765 14730 uint32_t dev_cnt;
14766 14731 fc_portmap_t *devlist;
14767 14732 struct fcp_tgt *ptgt = (struct fcp_tgt *)tgt_handle;
14768 14733 struct fcp_port *pptr = ptgt->tgt_port;
14769 14734
14770 14735 /*
14771 14736 * If the timer that fires this off got canceled too late, the
14772 14737 * target could have been destroyed.
14773 14738 */
14774 14739
14775 14740 if (ptgt->tgt_tid == NULL) {
14776 14741 return;
14777 14742 }
14778 14743
14779 14744 devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14780 14745 if (devlist == NULL) {
14781 14746 fcp_log(CE_WARN, pptr->port_dip,
14782 14747 "!fcp%d: failed to allocate for portmap",
14783 14748 pptr->port_instance);
14784 14749 return;
14785 14750 }
14786 14751
14787 14752 dev_cnt = 1;
14788 14753 devlist->map_pd = ptgt->tgt_pd_handle;
14789 14754 devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14790 14755 devlist->map_did.port_id = ptgt->tgt_d_id;
14791 14756
14792 14757 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14793 14758 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14794 14759
14795 14760 devlist->map_state = PORT_DEVICE_LOGGED_IN;
14796 14761 devlist->map_type = PORT_DEVICE_REPORTLUN_CHANGED;
14797 14762 devlist->map_flags = 0;
14798 14763
14799 14764 fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14800 14765 pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14801 14766
14802 14767 /*
14803 14768 * Clear the tgt_tid after no more references to
14804 14769 * the fcp_tgt
14805 14770 */
14806 14771 mutex_enter(&ptgt->tgt_mutex);
14807 14772 ptgt->tgt_tid = NULL;
14808 14773 mutex_exit(&ptgt->tgt_mutex);
14809 14774
14810 14775 kmem_free(devlist, sizeof (*devlist));
14811 14776 }
14812 14777
14813 14778
14814 14779 static void
14815 14780 fcp_free_targets(struct fcp_port *pptr)
14816 14781 {
14817 14782 int i;
14818 14783 struct fcp_tgt *ptgt;
14819 14784
14820 14785 mutex_enter(&pptr->port_mutex);
14821 14786 for (i = 0; i < FCP_NUM_HASH; i++) {
14822 14787 ptgt = pptr->port_tgt_hash_table[i];
14823 14788 while (ptgt != NULL) {
14824 14789 struct fcp_tgt *next_tgt = ptgt->tgt_next;
14825 14790
14826 14791 fcp_free_target(ptgt);
14827 14792 ptgt = next_tgt;
14828 14793 }
14829 14794 }
14830 14795 mutex_exit(&pptr->port_mutex);
14831 14796 }
14832 14797
14833 14798
14834 14799 static void
14835 14800 fcp_free_target(struct fcp_tgt *ptgt)
14836 14801 {
14837 14802 struct fcp_lun *plun;
14838 14803 timeout_id_t tid;
14839 14804
14840 14805 mutex_enter(&ptgt->tgt_mutex);
14841 14806 tid = ptgt->tgt_tid;
14842 14807
14843 14808 /*
14844 14809 * Cancel any pending timeouts for this target.
14845 14810 */
14846 14811
14847 14812 if (tid != NULL) {
14848 14813 /*
14849 14814 * Set tgt_tid to NULL first to avoid a race in the callback.
14850 14815 * If tgt_tid is NULL, the callback will simply return.
14851 14816 */
14852 14817 ptgt->tgt_tid = NULL;
14853 14818 mutex_exit(&ptgt->tgt_mutex);
14854 14819 (void) untimeout(tid);
14855 14820 mutex_enter(&ptgt->tgt_mutex);
14856 14821 }
14857 14822
14858 14823 plun = ptgt->tgt_lun;
14859 14824 while (plun != NULL) {
14860 14825 struct fcp_lun *next_lun = plun->lun_next;
14861 14826
14862 14827 fcp_dealloc_lun(plun);
14863 14828 plun = next_lun;
14864 14829 }
14865 14830
14866 14831 mutex_exit(&ptgt->tgt_mutex);
14867 14832 fcp_dealloc_tgt(ptgt);
14868 14833 }
14869 14834
14870 14835 /*
14871 14836 * Function: fcp_is_retryable
14872 14837 *
14873 14838 * Description: Indicates if the internal packet is retryable.
14874 14839 *
14875 14840 * Argument: *icmd FCP internal packet.
14876 14841 *
14877 14842 * Return Value: 0 Not retryable
14878 14843 * 1 Retryable
14879 14844 *
14880 14845 * Context: User, Kernel and Interrupt context
14881 14846 */
14882 14847 static int
14883 14848 fcp_is_retryable(struct fcp_ipkt *icmd)
14884 14849 {
14885 14850 if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14886 14851 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14887 14852 return (0);
14888 14853 }
14889 14854
14890 14855 return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14891 14856 icmd->ipkt_port->port_deadline) ? 1 : 0);
14892 14857 }
14893 14858
14894 14859 /*
14895 14860 * Function: fcp_create_on_demand
14896 14861 *
14897 14862 * Argument: *pptr FCP port.
14898 14863 * *pwwn Port WWN.
14899 14864 *
14900 14865 * Return Value: 0 Success
14901 14866 * EIO
14902 14867 * ENOMEM
14903 14868 * EBUSY
14904 14869 * EINVAL
14905 14870 *
14906 14871 * Context: User and Kernel context
14907 14872 */
14908 14873 static int
14909 14874 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14910 14875 {
14911 14876 int wait_ms;
14912 14877 int tcount;
14913 14878 int lcount;
14914 14879 int ret;
14915 14880 int error;
14916 14881 int rval = EIO;
14917 14882 int ntries;
14918 14883 fc_portmap_t *devlist;
14919 14884 opaque_t pd;
14920 14885 struct fcp_lun *plun;
14921 14886 struct fcp_tgt *ptgt;
14922 14887 int old_manual = 0;
14923 14888
14924 14889 /* Allocates the fc_portmap_t structure. */
14925 14890 devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14926 14891
14927 14892 /*
14928 14893 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14929 14894 * in the commented statement below:
14930 14895 *
14931 14896 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14932 14897 *
14933 14898 * Below, the deadline for the discovery process is set.
14934 14899 */
14935 14900 mutex_enter(&pptr->port_mutex);
14936 14901 pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14937 14902 mutex_exit(&pptr->port_mutex);
14938 14903
14939 14904 /*
14940 14905 * We try to find the remote port based on the WWN provided by the
14941 14906 * caller. We actually ask fp/fctl if it has it.
14942 14907 */
14943 14908 pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14944 14909 (la_wwn_t *)pwwn, &error, 1);
14945 14910
14946 14911 if (pd == NULL) {
14947 14912 kmem_free(devlist, sizeof (*devlist));
14948 14913 return (rval);
14949 14914 }
14950 14915
14951 14916 /*
14952 14917 * The remote port was found. We ask fp/fctl to update our
14953 14918 * fc_portmap_t structure.
14954 14919 */
14955 14920 ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14956 14921 (la_wwn_t *)pwwn, devlist);
14957 14922 if (ret != FC_SUCCESS) {
14958 14923 kmem_free(devlist, sizeof (*devlist));
14959 14924 return (rval);
14960 14925 }
14961 14926
14962 14927 /*
14963 14928 * The map flag field is set to indicates that the creation is being
14964 14929 * done at the user request (Ioclt probably luxadm or cfgadm).
14965 14930 */
14966 14931 devlist->map_type = PORT_DEVICE_USER_CREATE;
14967 14932
14968 14933 mutex_enter(&pptr->port_mutex);
14969 14934
14970 14935 /*
14971 14936 * We check to see if fcp already has a target that describes the
14972 14937 * device being created. If not it is created.
14973 14938 */
14974 14939 ptgt = fcp_lookup_target(pptr, pwwn);
14975 14940 if (ptgt == NULL) {
14976 14941 lcount = pptr->port_link_cnt;
14977 14942 mutex_exit(&pptr->port_mutex);
14978 14943
14979 14944 ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14980 14945 if (ptgt == NULL) {
14981 14946 fcp_log(CE_WARN, pptr->port_dip,
14982 14947 "!FC target allocation failed");
14983 14948 return (ENOMEM);
14984 14949 }
14985 14950
14986 14951 mutex_enter(&pptr->port_mutex);
14987 14952 }
14988 14953
14989 14954 mutex_enter(&ptgt->tgt_mutex);
14990 14955 ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
14991 14956 ptgt->tgt_tmp_cnt = 1;
14992 14957 ptgt->tgt_device_created = 0;
14993 14958 /*
14994 14959 * If fabric and auto config is set but the target was
14995 14960 * manually unconfigured then reset to the manual_config_only to
14996 14961 * 0 so the device will get configured.
14997 14962 */
14998 14963 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14999 14964 fcp_enable_auto_configuration &&
15000 14965 ptgt->tgt_manual_config_only == 1) {
15001 14966 old_manual = 1;
15002 14967 ptgt->tgt_manual_config_only = 0;
15003 14968 }
15004 14969 mutex_exit(&ptgt->tgt_mutex);
15005 14970
15006 14971 fcp_update_targets(pptr, devlist, 1,
15007 14972 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
15008 14973
15009 14974 lcount = pptr->port_link_cnt;
15010 14975 tcount = ptgt->tgt_change_cnt;
15011 14976
15012 14977 if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
15013 14978 tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
15014 14979 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15015 14980 fcp_enable_auto_configuration && old_manual) {
15016 14981 mutex_enter(&ptgt->tgt_mutex);
15017 14982 ptgt->tgt_manual_config_only = 1;
15018 14983 mutex_exit(&ptgt->tgt_mutex);
15019 14984 }
15020 14985
15021 14986 if (pptr->port_link_cnt != lcount ||
15022 14987 ptgt->tgt_change_cnt != tcount) {
15023 14988 rval = EBUSY;
15024 14989 }
15025 14990 mutex_exit(&pptr->port_mutex);
15026 14991
15027 14992 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15028 14993 FCP_BUF_LEVEL_3, 0,
15029 14994 "fcp_create_on_demand: mapflags ptgt=%x, "
15030 14995 "lcount=%x::port_link_cnt=%x, "
15031 14996 "tcount=%x: tgt_change_cnt=%x, rval=%x",
15032 14997 ptgt, lcount, pptr->port_link_cnt,
15033 14998 tcount, ptgt->tgt_change_cnt, rval);
15034 14999 return (rval);
15035 15000 }
15036 15001
15037 15002 /*
15038 15003 * Due to lack of synchronization mechanisms, we perform
15039 15004 * periodic monitoring of our request; Because requests
15040 15005 * get dropped when another one supercedes (either because
15041 15006 * of a link change or a target change), it is difficult to
15042 15007 * provide a clean synchronization mechanism (such as a
15043 15008 * semaphore or a conditional variable) without exhaustively
15044 15009 * rewriting the mainline discovery code of this driver.
15045 15010 */
15046 15011 wait_ms = 500;
15047 15012
15048 15013 ntries = fcp_max_target_retries;
15049 15014
15050 15015 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15051 15016 FCP_BUF_LEVEL_3, 0,
15052 15017 "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
15053 15018 "lcount=%x::port_link_cnt=%x, "
15054 15019 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15055 15020 "tgt_tmp_cnt =%x",
15056 15021 ntries, ptgt, lcount, pptr->port_link_cnt,
15057 15022 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15058 15023 ptgt->tgt_tmp_cnt);
15059 15024
15060 15025 mutex_enter(&ptgt->tgt_mutex);
15061 15026 while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
15062 15027 ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
15063 15028 mutex_exit(&ptgt->tgt_mutex);
15064 15029 mutex_exit(&pptr->port_mutex);
15065 15030
15066 15031 delay(drv_usectohz(wait_ms * 1000));
15067 15032
15068 15033 mutex_enter(&pptr->port_mutex);
15069 15034 mutex_enter(&ptgt->tgt_mutex);
15070 15035 }
15071 15036
15072 15037
15073 15038 if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
15074 15039 rval = EBUSY;
15075 15040 } else {
15076 15041 if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
15077 15042 FCP_TGT_NODE_PRESENT) {
15078 15043 rval = 0;
15079 15044 }
15080 15045 }
15081 15046
15082 15047 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15083 15048 FCP_BUF_LEVEL_3, 0,
15084 15049 "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
15085 15050 "lcount=%x::port_link_cnt=%x, "
15086 15051 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15087 15052 "tgt_tmp_cnt =%x",
15088 15053 ntries, ptgt, lcount, pptr->port_link_cnt,
15089 15054 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15090 15055 ptgt->tgt_tmp_cnt);
15091 15056
15092 15057 if (rval) {
15093 15058 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15094 15059 fcp_enable_auto_configuration && old_manual) {
15095 15060 ptgt->tgt_manual_config_only = 1;
15096 15061 }
15097 15062 mutex_exit(&ptgt->tgt_mutex);
15098 15063 mutex_exit(&pptr->port_mutex);
15099 15064 kmem_free(devlist, sizeof (*devlist));
15100 15065
15101 15066 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15102 15067 FCP_BUF_LEVEL_3, 0,
15103 15068 "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
15104 15069 "lcount=%x::port_link_cnt=%x, "
15105 15070 "tcount=%x::tgt_change_cnt=%x, rval=%x, "
15106 15071 "tgt_device_created=%x, tgt D_ID=%x",
15107 15072 ntries, ptgt, lcount, pptr->port_link_cnt,
15108 15073 tcount, ptgt->tgt_change_cnt, rval,
15109 15074 ptgt->tgt_device_created, ptgt->tgt_d_id);
15110 15075 return (rval);
15111 15076 }
15112 15077
15113 15078 if ((plun = ptgt->tgt_lun) != NULL) {
15114 15079 tcount = plun->lun_tgt->tgt_change_cnt;
15115 15080 } else {
15116 15081 rval = EINVAL;
15117 15082 }
15118 15083 lcount = pptr->port_link_cnt;
15119 15084
15120 15085 /*
15121 15086 * Configuring the target with no LUNs will fail. We
15122 15087 * should reset the node state so that it is not
15123 15088 * automatically configured when the LUNs are added
15124 15089 * to this target.
15125 15090 */
15126 15091 if (ptgt->tgt_lun_cnt == 0) {
15127 15092 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
15128 15093 }
15129 15094 mutex_exit(&ptgt->tgt_mutex);
15130 15095 mutex_exit(&pptr->port_mutex);
15131 15096
15132 15097 while (plun) {
15133 15098 child_info_t *cip;
15134 15099
15135 15100 mutex_enter(&plun->lun_mutex);
15136 15101 cip = plun->lun_cip;
15137 15102 mutex_exit(&plun->lun_mutex);
15138 15103
15139 15104 mutex_enter(&ptgt->tgt_mutex);
15140 15105 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
15141 15106 mutex_exit(&ptgt->tgt_mutex);
15142 15107
15143 15108 rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
15144 15109 FCP_ONLINE, lcount, tcount,
15145 15110 NDI_ONLINE_ATTACH);
15146 15111 if (rval != NDI_SUCCESS) {
15147 15112 FCP_TRACE(fcp_logq,
15148 15113 pptr->port_instbuf, fcp_trace,
15149 15114 FCP_BUF_LEVEL_3, 0,
15150 15115 "fcp_create_on_demand: "
15151 15116 "pass_to_hp_and_wait failed "
15152 15117 "rval=%x", rval);
15153 15118 rval = EIO;
15154 15119 } else {
15155 15120 mutex_enter(&LUN_TGT->tgt_mutex);
15156 15121 plun->lun_state &= ~(FCP_LUN_OFFLINE |
15157 15122 FCP_LUN_BUSY);
15158 15123 mutex_exit(&LUN_TGT->tgt_mutex);
15159 15124 }
15160 15125 mutex_enter(&ptgt->tgt_mutex);
15161 15126 }
15162 15127
15163 15128 plun = plun->lun_next;
15164 15129 mutex_exit(&ptgt->tgt_mutex);
15165 15130 }
15166 15131
15167 15132 kmem_free(devlist, sizeof (*devlist));
15168 15133
15169 15134 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15170 15135 fcp_enable_auto_configuration && old_manual) {
15171 15136 mutex_enter(&ptgt->tgt_mutex);
15172 15137 /* if successful then set manual to 0 */
15173 15138 if (rval == 0) {
15174 15139 ptgt->tgt_manual_config_only = 0;
15175 15140 } else {
15176 15141 /* reset to 1 so the user has to do the config */
15177 15142 ptgt->tgt_manual_config_only = 1;
15178 15143 }
15179 15144 mutex_exit(&ptgt->tgt_mutex);
15180 15145 }
15181 15146
15182 15147 return (rval);
15183 15148 }
15184 15149
15185 15150
15186 15151 static void
15187 15152 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
15188 15153 {
15189 15154 int count;
15190 15155 uchar_t byte;
15191 15156
15192 15157 count = 0;
15193 15158 while (*string) {
15194 15159 byte = FCP_ATOB(*string); string++;
15195 15160 byte = byte << 4 | FCP_ATOB(*string); string++;
15196 15161 bytes[count++] = byte;
15197 15162
15198 15163 if (count >= byte_len) {
15199 15164 break;
15200 15165 }
15201 15166 }
15202 15167 }
15203 15168
15204 15169 static void
15205 15170 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
15206 15171 {
15207 15172 int i;
15208 15173
15209 15174 for (i = 0; i < FC_WWN_SIZE; i++) {
15210 15175 (void) sprintf(string + (i * 2),
15211 15176 "%02x", wwn[i]);
15212 15177 }
15213 15178
15214 15179 }
15215 15180
15216 15181 static void
15217 15182 fcp_print_error(fc_packet_t *fpkt)
15218 15183 {
15219 15184 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
15220 15185 fpkt->pkt_ulp_private;
15221 15186 struct fcp_port *pptr;
15222 15187 struct fcp_tgt *ptgt;
15223 15188 struct fcp_lun *plun;
15224 15189 caddr_t buf;
15225 15190 int scsi_cmd = 0;
15226 15191
15227 15192 ptgt = icmd->ipkt_tgt;
15228 15193 plun = icmd->ipkt_lun;
15229 15194 pptr = ptgt->tgt_port;
15230 15195
15231 15196 buf = kmem_zalloc(256, KM_NOSLEEP);
15232 15197 if (buf == NULL) {
15233 15198 return;
15234 15199 }
15235 15200
15236 15201 switch (icmd->ipkt_opcode) {
15237 15202 case SCMD_REPORT_LUN:
15238 15203 (void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
15239 15204 " lun=0x%%x failed");
15240 15205 scsi_cmd++;
15241 15206 break;
15242 15207
15243 15208 case SCMD_INQUIRY_PAGE83:
15244 15209 (void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15245 15210 " lun=0x%%x failed");
15246 15211 scsi_cmd++;
15247 15212 break;
15248 15213
15249 15214 case SCMD_INQUIRY:
15250 15215 (void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15251 15216 " lun=0x%%x failed");
15252 15217 scsi_cmd++;
15253 15218 break;
15254 15219
15255 15220 case LA_ELS_PLOGI:
15256 15221 (void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15257 15222 break;
15258 15223
15259 15224 case LA_ELS_PRLI:
15260 15225 (void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15261 15226 break;
15262 15227 }
15263 15228
15264 15229 if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15265 15230 struct fcp_rsp response, *rsp;
15266 15231 uchar_t asc, ascq;
15267 15232 caddr_t sense_key = NULL;
15268 15233 struct fcp_rsp_info fcp_rsp_err, *bep;
15269 15234
15270 15235 if (icmd->ipkt_nodma) {
15271 15236 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15272 15237 bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15273 15238 sizeof (struct fcp_rsp));
15274 15239 } else {
15275 15240 rsp = &response;
15276 15241 bep = &fcp_rsp_err;
15277 15242
15278 15243 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15279 15244 sizeof (struct fcp_rsp));
15280 15245
15281 15246 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15282 15247 bep, fpkt->pkt_resp_acc,
15283 15248 sizeof (struct fcp_rsp_info));
15284 15249 }
15285 15250
15286 15251
15287 15252 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15288 15253 (void) sprintf(buf + strlen(buf),
15289 15254 " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15290 15255 " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15291 15256 " senselen=%%x. Giving up");
15292 15257
15293 15258 fcp_log(CE_WARN, pptr->port_dip, buf,
15294 15259 ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15295 15260 rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15296 15261 rsp->fcp_u.fcp_status.reserved_1,
15297 15262 rsp->fcp_response_len, rsp->fcp_sense_len);
15298 15263
15299 15264 kmem_free(buf, 256);
15300 15265 return;
15301 15266 }
15302 15267
15303 15268 if (rsp->fcp_u.fcp_status.rsp_len_set &&
15304 15269 bep->rsp_code != FCP_NO_FAILURE) {
15305 15270 (void) sprintf(buf + strlen(buf),
15306 15271 " FCP Response code = 0x%x", bep->rsp_code);
15307 15272 }
15308 15273
15309 15274 if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15310 15275 struct scsi_extended_sense sense_info, *sense_ptr;
15311 15276
15312 15277 if (icmd->ipkt_nodma) {
15313 15278 sense_ptr = (struct scsi_extended_sense *)
15314 15279 ((caddr_t)fpkt->pkt_resp +
15315 15280 sizeof (struct fcp_rsp) +
15316 15281 rsp->fcp_response_len);
15317 15282 } else {
15318 15283 sense_ptr = &sense_info;
15319 15284
15320 15285 FCP_CP_IN(fpkt->pkt_resp +
15321 15286 sizeof (struct fcp_rsp) +
15322 15287 rsp->fcp_response_len, &sense_info,
15323 15288 fpkt->pkt_resp_acc,
15324 15289 sizeof (struct scsi_extended_sense));
15325 15290 }
15326 15291
15327 15292 if (sense_ptr->es_key < NUM_SENSE_KEYS +
15328 15293 NUM_IMPL_SENSE_KEYS) {
15329 15294 sense_key = sense_keys[sense_ptr->es_key];
15330 15295 } else {
15331 15296 sense_key = "Undefined";
15332 15297 }
15333 15298
15334 15299 asc = sense_ptr->es_add_code;
15335 15300 ascq = sense_ptr->es_qual_code;
15336 15301
15337 15302 (void) sprintf(buf + strlen(buf),
15338 15303 ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15339 15304 " Giving up");
15340 15305
15341 15306 fcp_log(CE_WARN, pptr->port_dip, buf,
15342 15307 ptgt->tgt_d_id, plun->lun_num, sense_key,
15343 15308 asc, ascq);
15344 15309 } else {
15345 15310 (void) sprintf(buf + strlen(buf),
15346 15311 " : SCSI status=%%x. Giving up");
15347 15312
15348 15313 fcp_log(CE_WARN, pptr->port_dip, buf,
15349 15314 ptgt->tgt_d_id, plun->lun_num,
15350 15315 rsp->fcp_u.fcp_status.scsi_status);
15351 15316 }
15352 15317 } else {
15353 15318 caddr_t state, reason, action, expln;
15354 15319
15355 15320 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
15356 15321 &action, &expln);
15357 15322
15358 15323 (void) sprintf(buf + strlen(buf), ": State:%%s,"
15359 15324 " Reason:%%s. Giving up");
15360 15325
15361 15326 if (scsi_cmd) {
15362 15327 fcp_log(CE_WARN, pptr->port_dip, buf,
15363 15328 ptgt->tgt_d_id, plun->lun_num, state, reason);
15364 15329 } else {
15365 15330 fcp_log(CE_WARN, pptr->port_dip, buf,
15366 15331 ptgt->tgt_d_id, state, reason);
15367 15332 }
15368 15333 }
15369 15334
15370 15335 kmem_free(buf, 256);
15371 15336 }
15372 15337
15373 15338
15374 15339 static int
15375 15340 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15376 15341 struct fcp_ipkt *icmd, int rval, caddr_t op)
15377 15342 {
15378 15343 int ret = DDI_FAILURE;
15379 15344 char *error;
15380 15345
15381 15346 switch (rval) {
15382 15347 case FC_DEVICE_BUSY_NEW_RSCN:
15383 15348 /*
15384 15349 * This means that there was a new RSCN that the transport
15385 15350 * knows about (which the ULP *may* know about too) but the
15386 15351 * pkt that was sent down was related to an older RSCN. So, we
15387 15352 * are just going to reset the retry count and deadline and
15388 15353 * continue to retry. The idea is that transport is currently
15389 15354 * working on the new RSCN and will soon let the ULPs know
15390 15355 * about it and when it does the existing logic will kick in
15391 15356 * where it will change the tcount to indicate that something
15392 15357 * changed on the target. So, rediscovery will start and there
15393 15358 * will not be an infinite retry.
15394 15359 *
15395 15360 * For a full flow of how the RSCN info is transferred back and
15396 15361 * forth, see fp.c
15397 15362 */
15398 15363 icmd->ipkt_retries = 0;
15399 15364 icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15400 15365 FCP_ICMD_DEADLINE;
15401 15366
15402 15367 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15403 15368 FCP_BUF_LEVEL_3, 0,
15404 15369 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15405 15370 rval, ptgt->tgt_d_id);
15406 15371 /* FALLTHROUGH */
15407 15372
15408 15373 case FC_STATEC_BUSY:
15409 15374 case FC_DEVICE_BUSY:
15410 15375 case FC_PBUSY:
15411 15376 case FC_FBUSY:
15412 15377 case FC_TRAN_BUSY:
15413 15378 case FC_OFFLINE:
15414 15379 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15415 15380 FCP_BUF_LEVEL_3, 0,
15416 15381 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15417 15382 rval, ptgt->tgt_d_id);
15418 15383 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15419 15384 fcp_is_retryable(icmd)) {
15420 15385 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15421 15386 ret = DDI_SUCCESS;
15422 15387 }
15423 15388 break;
15424 15389
15425 15390 case FC_LOGINREQ:
15426 15391 /*
15427 15392 * FC_LOGINREQ used to be handled just like all the cases
15428 15393 * above. It has been changed to handled a PRLI that fails
15429 15394 * with FC_LOGINREQ different than other ipkts that fail
15430 15395 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15431 15396 * a simple matter to turn it into a PLOGI instead, so that's
15432 15397 * exactly what we do here.
15433 15398 */
15434 15399 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15435 15400 ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15436 15401 icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15437 15402 icmd->ipkt_change_cnt, icmd->ipkt_cause);
15438 15403 } else {
15439 15404 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15440 15405 FCP_BUF_LEVEL_3, 0,
15441 15406 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15442 15407 rval, ptgt->tgt_d_id);
15443 15408 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15444 15409 fcp_is_retryable(icmd)) {
15445 15410 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15446 15411 ret = DDI_SUCCESS;
15447 15412 }
15448 15413 }
15449 15414 break;
15450 15415
15451 15416 default:
15452 15417 mutex_enter(&pptr->port_mutex);
15453 15418 mutex_enter(&ptgt->tgt_mutex);
15454 15419 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15455 15420 mutex_exit(&ptgt->tgt_mutex);
15456 15421 mutex_exit(&pptr->port_mutex);
15457 15422
15458 15423 (void) fc_ulp_error(rval, &error);
15459 15424 fcp_log(CE_WARN, pptr->port_dip,
15460 15425 "!Failed to send %s to D_ID=%x error=%s",
15461 15426 op, ptgt->tgt_d_id, error);
15462 15427 } else {
15463 15428 FCP_TRACE(fcp_logq, pptr->port_instbuf,
15464 15429 fcp_trace, FCP_BUF_LEVEL_2, 0,
15465 15430 "fcp_handle_ipkt_errors,1: state change occured"
15466 15431 " for D_ID=0x%x", ptgt->tgt_d_id);
15467 15432 mutex_exit(&ptgt->tgt_mutex);
15468 15433 mutex_exit(&pptr->port_mutex);
15469 15434 }
15470 15435 break;
15471 15436 }
15472 15437
15473 15438 return (ret);
15474 15439 }
15475 15440
15476 15441
15477 15442 /*
15478 15443 * Check of outstanding commands on any LUN for this target
15479 15444 */
15480 15445 static int
15481 15446 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15482 15447 {
15483 15448 struct fcp_lun *plun;
15484 15449 struct fcp_pkt *cmd;
15485 15450
15486 15451 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15487 15452 mutex_enter(&plun->lun_mutex);
15488 15453 for (cmd = plun->lun_pkt_head; cmd != NULL;
15489 15454 cmd = cmd->cmd_forw) {
15490 15455 if (cmd->cmd_state == FCP_PKT_ISSUED) {
15491 15456 mutex_exit(&plun->lun_mutex);
15492 15457 return (FC_SUCCESS);
15493 15458 }
15494 15459 }
15495 15460 mutex_exit(&plun->lun_mutex);
15496 15461 }
15497 15462
15498 15463 return (FC_FAILURE);
15499 15464 }
15500 15465
15501 15466 static fc_portmap_t *
15502 15467 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15503 15468 {
15504 15469 int i;
15505 15470 fc_portmap_t *devlist;
15506 15471 fc_portmap_t *devptr = NULL;
15507 15472 struct fcp_tgt *ptgt;
15508 15473
15509 15474 mutex_enter(&pptr->port_mutex);
15510 15475 for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15511 15476 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15512 15477 ptgt = ptgt->tgt_next) {
15513 15478 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15514 15479 ++*dev_cnt;
15515 15480 }
15516 15481 }
15517 15482 }
15518 15483
15519 15484 devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15520 15485 KM_NOSLEEP);
15521 15486 if (devlist == NULL) {
15522 15487 mutex_exit(&pptr->port_mutex);
15523 15488 fcp_log(CE_WARN, pptr->port_dip,
15524 15489 "!fcp%d: failed to allocate for portmap for construct map",
15525 15490 pptr->port_instance);
15526 15491 return (devptr);
15527 15492 }
15528 15493
15529 15494 for (i = 0; i < FCP_NUM_HASH; i++) {
15530 15495 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15531 15496 ptgt = ptgt->tgt_next) {
15532 15497 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15533 15498 int ret;
15534 15499
15535 15500 ret = fc_ulp_pwwn_to_portmap(
15536 15501 pptr->port_fp_handle,
15537 15502 (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15538 15503 devlist);
15539 15504
15540 15505 if (ret == FC_SUCCESS) {
15541 15506 devlist++;
15542 15507 continue;
15543 15508 }
15544 15509
15545 15510 devlist->map_pd = NULL;
15546 15511 devlist->map_did.port_id = ptgt->tgt_d_id;
15547 15512 devlist->map_hard_addr.hard_addr =
15548 15513 ptgt->tgt_hard_addr;
15549 15514
15550 15515 devlist->map_state = PORT_DEVICE_INVALID;
15551 15516 devlist->map_type = PORT_DEVICE_OLD;
15552 15517
15553 15518 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15554 15519 &devlist->map_nwwn, FC_WWN_SIZE);
15555 15520
15556 15521 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15557 15522 &devlist->map_pwwn, FC_WWN_SIZE);
15558 15523
15559 15524 devlist++;
15560 15525 }
15561 15526 }
15562 15527 }
15563 15528
15564 15529 mutex_exit(&pptr->port_mutex);
15565 15530
15566 15531 return (devptr);
15567 15532 }
15568 15533 /*
15569 15534 * Inimate MPxIO that the lun is busy and cannot accept regular IO
15570 15535 */
15571 15536 static void
15572 15537 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15573 15538 {
15574 15539 int i;
15575 15540 struct fcp_tgt *ptgt;
15576 15541 struct fcp_lun *plun;
15577 15542
15578 15543 for (i = 0; i < FCP_NUM_HASH; i++) {
15579 15544 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15580 15545 ptgt = ptgt->tgt_next) {
15581 15546 mutex_enter(&ptgt->tgt_mutex);
15582 15547 for (plun = ptgt->tgt_lun; plun != NULL;
15583 15548 plun = plun->lun_next) {
15584 15549 if (plun->lun_mpxio &&
15585 15550 plun->lun_state & FCP_LUN_BUSY) {
15586 15551 if (!fcp_pass_to_hp(pptr, plun,
15587 15552 plun->lun_cip,
15588 15553 FCP_MPXIO_PATH_SET_BUSY,
15589 15554 pptr->port_link_cnt,
15590 15555 ptgt->tgt_change_cnt, 0, 0)) {
15591 15556 FCP_TRACE(fcp_logq,
15592 15557 pptr->port_instbuf,
15593 15558 fcp_trace,
15594 15559 FCP_BUF_LEVEL_2, 0,
15595 15560 "path_verifybusy: "
15596 15561 "disable lun %p failed!",
15597 15562 plun);
15598 15563 }
15599 15564 }
15600 15565 }
15601 15566 mutex_exit(&ptgt->tgt_mutex);
15602 15567 }
15603 15568 }
15604 15569 }
15605 15570
15606 15571 static int
15607 15572 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15608 15573 {
15609 15574 dev_info_t *cdip = NULL;
15610 15575 dev_info_t *pdip = NULL;
15611 15576
15612 15577 ASSERT(plun);
15613 15578
15614 15579 mutex_enter(&plun->lun_mutex);
15615 15580 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15616 15581 mutex_exit(&plun->lun_mutex);
15617 15582 return (NDI_FAILURE);
15618 15583 }
15619 15584 mutex_exit(&plun->lun_mutex);
15620 15585 cdip = mdi_pi_get_client(PIP(cip));
15621 15586 pdip = mdi_pi_get_phci(PIP(cip));
15622 15587
15623 15588 ASSERT(cdip != NULL);
15624 15589 ASSERT(pdip != NULL);
15625 15590
15626 15591 if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15627 15592 /* LUN ready for IO */
15628 15593 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15629 15594 } else {
15630 15595 /* LUN busy to accept IO */
15631 15596 (void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15632 15597 }
15633 15598 return (NDI_SUCCESS);
15634 15599 }
15635 15600
15636 15601 /*
15637 15602 * Caller must free the returned string of MAXPATHLEN len
15638 15603 * If the device is offline (-1 instance number) NULL
15639 15604 * will be returned.
15640 15605 */
15641 15606 static char *
15642 15607 fcp_get_lun_path(struct fcp_lun *plun)
15643 15608 {
15644 15609 dev_info_t *dip = NULL;
15645 15610 char *path = NULL;
15646 15611 mdi_pathinfo_t *pip = NULL;
15647 15612
15648 15613 if (plun == NULL) {
15649 15614 return (NULL);
15650 15615 }
15651 15616
15652 15617 mutex_enter(&plun->lun_mutex);
15653 15618 if (plun->lun_mpxio == 0) {
15654 15619 dip = DIP(plun->lun_cip);
15655 15620 mutex_exit(&plun->lun_mutex);
15656 15621 } else {
15657 15622 /*
15658 15623 * lun_cip must be accessed with lun_mutex held. Here
15659 15624 * plun->lun_cip either points to a valid node or it is NULL.
15660 15625 * Make a copy so that we can release lun_mutex.
15661 15626 */
15662 15627 pip = PIP(plun->lun_cip);
15663 15628
15664 15629 /*
15665 15630 * Increase ref count on the path so that we can release
15666 15631 * lun_mutex and still be sure that the pathinfo node (and thus
15667 15632 * also the client) is not deallocated. If pip is NULL, this
15668 15633 * has no effect.
15669 15634 */
15670 15635 mdi_hold_path(pip);
15671 15636
15672 15637 mutex_exit(&plun->lun_mutex);
15673 15638
15674 15639 /* Get the client. If pip is NULL, we get NULL. */
15675 15640 dip = mdi_pi_get_client(pip);
15676 15641 }
15677 15642
15678 15643 if (dip == NULL)
15679 15644 goto out;
15680 15645 if (ddi_get_instance(dip) < 0)
15681 15646 goto out;
15682 15647
15683 15648 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15684 15649 if (path == NULL)
15685 15650 goto out;
15686 15651
15687 15652 (void) ddi_pathname(dip, path);
15688 15653
15689 15654 /* Clean up. */
15690 15655 out:
15691 15656 if (pip != NULL)
15692 15657 mdi_rele_path(pip);
15693 15658
15694 15659 /*
15695 15660 * In reality, the user wants a fully valid path (one they can open)
15696 15661 * but this string is lacking the mount point, and the minor node.
15697 15662 * It would be nice if we could "figure these out" somehow
15698 15663 * and fill them in. Otherwise, the userland code has to understand
15699 15664 * driver specific details of which minor node is the "best" or
15700 15665 * "right" one to expose. (Ex: which slice is the whole disk, or
15701 15666 * which tape doesn't rewind)
15702 15667 */
15703 15668 return (path);
15704 15669 }
15705 15670
15706 15671 static int
15707 15672 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15708 15673 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15709 15674 {
15710 15675 int64_t reset_delay;
15711 15676 int rval, retry = 0;
15712 15677 struct fcp_port *pptr = fcp_dip2port(parent);
15713 15678
15714 15679 reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15715 15680 (ddi_get_lbolt64() - pptr->port_attach_time);
15716 15681 if (reset_delay < 0) {
15717 15682 reset_delay = 0;
15718 15683 }
15719 15684
15720 15685 if (fcp_bus_config_debug) {
15721 15686 flag |= NDI_DEVI_DEBUG;
15722 15687 }
15723 15688
15724 15689 switch (op) {
15725 15690 case BUS_CONFIG_ONE:
15726 15691 /*
15727 15692 * Retry the command since we need to ensure
15728 15693 * the fabric devices are available for root
15729 15694 */
15730 15695 while (retry++ < fcp_max_bus_config_retries) {
15731 15696 rval = (ndi_busop_bus_config(parent,
15732 15697 flag | NDI_MDI_FALLBACK, op,
15733 15698 arg, childp, (clock_t)reset_delay));
15734 15699 if (rval == 0) {
15735 15700 return (rval);
15736 15701 }
15737 15702 }
15738 15703
15739 15704 /*
15740 15705 * drain taskq to make sure nodes are created and then
15741 15706 * try again.
15742 15707 */
15743 15708 taskq_wait(DEVI(parent)->devi_taskq);
15744 15709 return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15745 15710 op, arg, childp, 0));
15746 15711
15747 15712 case BUS_CONFIG_DRIVER:
15748 15713 case BUS_CONFIG_ALL: {
15749 15714 /*
15750 15715 * delay till all devices report in (port_tmp_cnt == 0)
15751 15716 * or FCP_INIT_WAIT_TIMEOUT
15752 15717 */
15753 15718 mutex_enter(&pptr->port_mutex);
15754 15719 while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15755 15720 (void) cv_timedwait(&pptr->port_config_cv,
15756 15721 &pptr->port_mutex,
15757 15722 ddi_get_lbolt() + (clock_t)reset_delay);
15758 15723 reset_delay =
15759 15724 (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15760 15725 (ddi_get_lbolt64() - pptr->port_attach_time);
15761 15726 }
15762 15727 mutex_exit(&pptr->port_mutex);
15763 15728 /* drain taskq to make sure nodes are created */
15764 15729 taskq_wait(DEVI(parent)->devi_taskq);
15765 15730 return (ndi_busop_bus_config(parent, flag, op,
15766 15731 arg, childp, 0));
15767 15732 }
15768 15733
15769 15734 default:
15770 15735 return (NDI_FAILURE);
15771 15736 }
15772 15737 /*NOTREACHED*/
15773 15738 }
15774 15739
15775 15740 static int
15776 15741 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15777 15742 ddi_bus_config_op_t op, void *arg)
15778 15743 {
15779 15744 if (fcp_bus_config_debug) {
15780 15745 flag |= NDI_DEVI_DEBUG;
15781 15746 }
15782 15747
15783 15748 return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15784 15749 }
15785 15750
15786 15751
15787 15752 /*
15788 15753 * Routine to copy GUID into the lun structure.
15789 15754 * returns 0 if copy was successful and 1 if encountered a
15790 15755 * failure and did not copy the guid.
15791 15756 */
15792 15757 static int
15793 15758 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15794 15759 {
15795 15760
15796 15761 int retval = 0;
15797 15762
15798 15763 /* add one for the null terminator */
15799 15764 const unsigned int len = strlen(guidp) + 1;
15800 15765
15801 15766 if ((guidp == NULL) || (plun == NULL)) {
15802 15767 return (1);
15803 15768 }
15804 15769
15805 15770 /*
15806 15771 * if the plun->lun_guid already has been allocated,
15807 15772 * then check the size. if the size is exact, reuse
15808 15773 * it....if not free it an allocate the required size.
15809 15774 * The reallocation should NOT typically happen
15810 15775 * unless the GUIDs reported changes between passes.
15811 15776 * We free up and alloc again even if the
15812 15777 * size was more than required. This is due to the
15813 15778 * fact that the field lun_guid_size - serves
15814 15779 * dual role of indicating the size of the wwn
15815 15780 * size and ALSO the allocation size.
15816 15781 */
15817 15782 if (plun->lun_guid) {
15818 15783 if (plun->lun_guid_size != len) {
15819 15784 /*
15820 15785 * free the allocated memory and
15821 15786 * initialize the field
15822 15787 * lun_guid_size to 0.
15823 15788 */
15824 15789 kmem_free(plun->lun_guid, plun->lun_guid_size);
15825 15790 plun->lun_guid = NULL;
15826 15791 plun->lun_guid_size = 0;
15827 15792 }
15828 15793 }
15829 15794 /*
15830 15795 * alloc only if not already done.
15831 15796 */
15832 15797 if (plun->lun_guid == NULL) {
15833 15798 plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15834 15799 if (plun->lun_guid == NULL) {
15835 15800 cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15836 15801 "Unable to allocate"
15837 15802 "Memory for GUID!!! size %d", len);
15838 15803 retval = 1;
15839 15804 } else {
15840 15805 plun->lun_guid_size = len;
15841 15806 }
15842 15807 }
15843 15808 if (plun->lun_guid) {
15844 15809 /*
15845 15810 * now copy the GUID
15846 15811 */
15847 15812 bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15848 15813 }
15849 15814 return (retval);
15850 15815 }
15851 15816
15852 15817 /*
15853 15818 * fcp_reconfig_wait
15854 15819 *
15855 15820 * Wait for a rediscovery/reconfiguration to complete before continuing.
15856 15821 */
15857 15822
15858 15823 static void
15859 15824 fcp_reconfig_wait(struct fcp_port *pptr)
15860 15825 {
15861 15826 clock_t reconfig_start, wait_timeout;
15862 15827
15863 15828 /*
15864 15829 * Quick check. If pptr->port_tmp_cnt is 0, there is no
15865 15830 * reconfiguration in progress.
15866 15831 */
15867 15832
15868 15833 mutex_enter(&pptr->port_mutex);
15869 15834 if (pptr->port_tmp_cnt == 0) {
15870 15835 mutex_exit(&pptr->port_mutex);
15871 15836 return;
15872 15837 }
15873 15838 mutex_exit(&pptr->port_mutex);
15874 15839
15875 15840 /*
15876 15841 * If we cause a reconfig by raising power, delay until all devices
15877 15842 * report in (port_tmp_cnt returns to 0)
15878 15843 */
15879 15844
15880 15845 reconfig_start = ddi_get_lbolt();
15881 15846 wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15882 15847
15883 15848 mutex_enter(&pptr->port_mutex);
15884 15849
15885 15850 while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15886 15851 pptr->port_tmp_cnt) {
15887 15852
15888 15853 (void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15889 15854 reconfig_start + wait_timeout);
15890 15855 }
15891 15856
15892 15857 mutex_exit(&pptr->port_mutex);
15893 15858
15894 15859 /*
15895 15860 * Even if fcp_tmp_count isn't 0, continue without error. The port
15896 15861 * we want may still be ok. If not, it will error out later
15897 15862 */
15898 15863 }
15899 15864
15900 15865 /*
15901 15866 * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15902 15867 * We rely on the fcp_global_mutex to provide protection against changes to
15903 15868 * the fcp_lun_blacklist.
15904 15869 *
15905 15870 * You can describe a list of target port WWNs and LUN numbers which will
15906 15871 * not be configured. LUN numbers will be interpreted as decimal. White
15907 15872 * spaces and ',' can be used in the list of LUN numbers.
15908 15873 *
15909 15874 * To prevent LUNs 1 and 2 from being configured for target
15910 15875 * port 510000f010fd92a1 and target port 510000e012079df1, set:
15911 15876 *
15912 15877 * pwwn-lun-blacklist=
15913 15878 * "510000f010fd92a1,1,2",
15914 15879 * "510000e012079df1,1,2";
15915 15880 */
15916 15881 static void
15917 15882 fcp_read_blacklist(dev_info_t *dip,
15918 15883 struct fcp_black_list_entry **pplun_blacklist)
15919 15884 {
15920 15885 char **prop_array = NULL;
15921 15886 char *curr_pwwn = NULL;
15922 15887 char *curr_lun = NULL;
15923 15888 uint32_t prop_item = 0;
15924 15889 int idx = 0;
15925 15890 int len = 0;
15926 15891
15927 15892 ASSERT(mutex_owned(&fcp_global_mutex));
15928 15893 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15929 15894 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15930 15895 LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15931 15896 return;
15932 15897 }
15933 15898
15934 15899 for (idx = 0; idx < prop_item; idx++) {
15935 15900
15936 15901 curr_pwwn = prop_array[idx];
15937 15902 while (*curr_pwwn == ' ') {
15938 15903 curr_pwwn++;
15939 15904 }
15940 15905 if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15941 15906 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15942 15907 ", please check.", curr_pwwn);
15943 15908 continue;
15944 15909 }
15945 15910 if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15946 15911 (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15947 15912 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15948 15913 ", please check.", curr_pwwn);
15949 15914 continue;
15950 15915 }
15951 15916 for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15952 15917 if (isxdigit(curr_pwwn[len]) != TRUE) {
15953 15918 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15954 15919 "blacklist, please check.", curr_pwwn);
15955 15920 break;
15956 15921 }
15957 15922 }
15958 15923 if (len != sizeof (la_wwn_t) * 2) {
15959 15924 continue;
15960 15925 }
15961 15926
15962 15927 curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15963 15928 *(curr_lun - 1) = '\0';
15964 15929 fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15965 15930 }
15966 15931
15967 15932 ddi_prop_free(prop_array);
15968 15933 }
15969 15934
15970 15935 /*
15971 15936 * Get the masking info about one remote target port designated by wwn.
15972 15937 * Lun ids could be separated by ',' or white spaces.
15973 15938 */
15974 15939 static void
15975 15940 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15976 15941 struct fcp_black_list_entry **pplun_blacklist)
15977 15942 {
15978 15943 int idx = 0;
15979 15944 uint32_t offset = 0;
15980 15945 unsigned long lun_id = 0;
15981 15946 char lunid_buf[16];
15982 15947 char *pend = NULL;
15983 15948 int illegal_digit = 0;
15984 15949
15985 15950 while (offset < strlen(curr_lun)) {
15986 15951 while ((curr_lun[offset + idx] != ',') &&
15987 15952 (curr_lun[offset + idx] != '\0') &&
15988 15953 (curr_lun[offset + idx] != ' ')) {
15989 15954 if (isdigit(curr_lun[offset + idx]) == 0) {
15990 15955 illegal_digit++;
15991 15956 }
15992 15957 idx++;
15993 15958 }
15994 15959 if (illegal_digit > 0) {
15995 15960 offset += (idx+1); /* To the start of next lun */
15996 15961 idx = 0;
15997 15962 illegal_digit = 0;
15998 15963 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15999 15964 "the blacklist, please check digits.",
16000 15965 curr_lun, curr_pwwn);
16001 15966 continue;
16002 15967 }
16003 15968 if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
16004 15969 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16005 15970 "the blacklist, please check the length of LUN#.",
16006 15971 curr_lun, curr_pwwn);
16007 15972 break;
16008 15973 }
16009 15974 if (idx == 0) { /* ignore ' ' or ',' or '\0' */
16010 15975 offset++;
16011 15976 continue;
16012 15977 }
16013 15978
16014 15979 bcopy(curr_lun + offset, lunid_buf, idx);
16015 15980 lunid_buf[idx] = '\0';
16016 15981 if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
16017 15982 fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
16018 15983 } else {
16019 15984 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16020 15985 "the blacklist, please check %s.",
16021 15986 curr_lun, curr_pwwn, lunid_buf);
16022 15987 }
16023 15988 offset += (idx+1); /* To the start of next lun */
16024 15989 idx = 0;
16025 15990 }
16026 15991 }
16027 15992
16028 15993 /*
16029 15994 * Add one masking record
16030 15995 */
16031 15996 static void
16032 15997 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
16033 15998 struct fcp_black_list_entry **pplun_blacklist)
16034 15999 {
16035 16000 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
16036 16001 struct fcp_black_list_entry *new_entry = NULL;
16037 16002 la_wwn_t wwn;
16038 16003
16039 16004 fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
16040 16005 while (tmp_entry) {
16041 16006 if ((bcmp(&tmp_entry->wwn, &wwn,
16042 16007 sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
16043 16008 return;
16044 16009 }
16045 16010
16046 16011 tmp_entry = tmp_entry->next;
16047 16012 }
16048 16013
16049 16014 /* add to black list */
16050 16015 new_entry = (struct fcp_black_list_entry *)kmem_zalloc
16051 16016 (sizeof (struct fcp_black_list_entry), KM_SLEEP);
16052 16017 bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
16053 16018 new_entry->lun = lun_id;
16054 16019 new_entry->masked = 0;
16055 16020 new_entry->next = *pplun_blacklist;
16056 16021 *pplun_blacklist = new_entry;
16057 16022 }
16058 16023
16059 16024 /*
16060 16025 * Check if we should mask the specified lun of this fcp_tgt
16061 16026 */
16062 16027 static int
16063 16028 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id)
16064 16029 {
16065 16030 struct fcp_black_list_entry *remote_port;
16066 16031
16067 16032 remote_port = fcp_lun_blacklist;
16068 16033 while (remote_port != NULL) {
16069 16034 if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
16070 16035 if (remote_port->lun == lun_id) {
16071 16036 remote_port->masked++;
16072 16037 if (remote_port->masked == 1) {
16073 16038 fcp_log(CE_NOTE, NULL, "LUN %d of port "
16074 16039 "%02x%02x%02x%02x%02x%02x%02x%02x "
16075 16040 "is masked due to black listing.\n",
16076 16041 lun_id, wwn->raw_wwn[0],
16077 16042 wwn->raw_wwn[1], wwn->raw_wwn[2],
16078 16043 wwn->raw_wwn[3], wwn->raw_wwn[4],
16079 16044 wwn->raw_wwn[5], wwn->raw_wwn[6],
16080 16045 wwn->raw_wwn[7]);
16081 16046 }
16082 16047 return (TRUE);
16083 16048 }
16084 16049 }
16085 16050 remote_port = remote_port->next;
16086 16051 }
16087 16052 return (FALSE);
16088 16053 }
16089 16054
16090 16055 /*
16091 16056 * Release all allocated resources
16092 16057 */
16093 16058 static void
16094 16059 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist)
16095 16060 {
16096 16061 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
16097 16062 struct fcp_black_list_entry *current_entry = NULL;
16098 16063
16099 16064 ASSERT(mutex_owned(&fcp_global_mutex));
16100 16065 /*
16101 16066 * Traverse all luns
16102 16067 */
16103 16068 while (tmp_entry) {
16104 16069 current_entry = tmp_entry;
16105 16070 tmp_entry = tmp_entry->next;
16106 16071 kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
16107 16072 }
16108 16073 *pplun_blacklist = NULL;
16109 16074 }
16110 16075
16111 16076 /*
16112 16077 * In fcp module,
16113 16078 * pkt@scsi_pkt, cmd@fcp_pkt, icmd@fcp_ipkt, fpkt@fc_packet, pptr@fcp_port
16114 16079 */
16115 16080 static struct scsi_pkt *
16116 16081 fcp_pseudo_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
16117 16082 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
16118 16083 int flags, int (*callback)(), caddr_t arg)
16119 16084 {
16120 16085 fcp_port_t *pptr = ADDR2FCP(ap);
16121 16086 fcp_pkt_t *cmd = NULL;
16122 16087 fc_frame_hdr_t *hp;
16123 16088
16124 16089 /*
16125 16090 * First step: get the packet
16126 16091 */
16127 16092 if (pkt == NULL) {
16128 16093 pkt = scsi_hba_pkt_alloc(pptr->port_dip, ap, cmdlen, statuslen,
16129 16094 tgtlen, sizeof (fcp_pkt_t) + pptr->port_priv_pkt_len,
16130 16095 callback, arg);
16131 16096 if (pkt == NULL) {
16132 16097 return (NULL);
16133 16098 }
16134 16099
16135 16100 /*
16136 16101 * All fields in scsi_pkt will be initialized properly or
16137 16102 * set to zero. We need do nothing for scsi_pkt.
16138 16103 */
16139 16104 /*
16140 16105 * But it's our responsibility to link other related data
16141 16106 * structures. Their initialization will be done, just
16142 16107 * before the scsi_pkt will be sent to FCA.
16143 16108 */
16144 16109 cmd = PKT2CMD(pkt);
16145 16110 cmd->cmd_pkt = pkt;
16146 16111 cmd->cmd_fp_pkt = &cmd->cmd_fc_packet;
16147 16112 /*
16148 16113 * fc_packet_t
16149 16114 */
16150 16115 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
16151 16116 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
16152 16117 sizeof (struct fcp_pkt));
16153 16118 cmd->cmd_fp_pkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
16154 16119 cmd->cmd_fp_pkt->pkt_cmdlen = sizeof (struct fcp_cmd);
16155 16120 cmd->cmd_fp_pkt->pkt_resp = cmd->cmd_fcp_rsp;
16156 16121 cmd->cmd_fp_pkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
16157 16122 /*
16158 16123 * Fill in the Fabric Channel Header
16159 16124 */
16160 16125 hp = &cmd->cmd_fp_pkt->pkt_cmd_fhdr;
16161 16126 hp->r_ctl = R_CTL_COMMAND;
16162 16127 hp->rsvd = 0;
16163 16128 hp->type = FC_TYPE_SCSI_FCP;
16164 16129 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
16165 16130 hp->seq_id = 0;
16166 16131 hp->df_ctl = 0;
16167 16132 hp->seq_cnt = 0;
16168 16133 hp->ox_id = 0xffff;
16169 16134 hp->rx_id = 0xffff;
16170 16135 hp->ro = 0;
16171 16136 } else {
16172 16137 /*
16173 16138 * We need think if we should reset any elements in
16174 16139 * related data structures.
16175 16140 */
16176 16141 FCP_TRACE(fcp_logq, pptr->port_instbuf,
16177 16142 fcp_trace, FCP_BUF_LEVEL_6, 0,
16178 16143 "reusing pkt, flags %d", flags);
16179 16144 cmd = PKT2CMD(pkt);
16180 16145 if (cmd->cmd_fp_pkt->pkt_pd) {
16181 16146 cmd->cmd_fp_pkt->pkt_pd = NULL;
16182 16147 }
16183 16148 }
16184 16149
16185 16150 /*
16186 16151 * Second step: dma allocation/move
16187 16152 */
16188 16153 if (bp && bp->b_bcount != 0) {
16189 16154 /*
16190 16155 * Mark if it's read or write
16191 16156 */
16192 16157 if (bp->b_flags & B_READ) {
16193 16158 cmd->cmd_flags |= CFLAG_IS_READ;
16194 16159 } else {
16195 16160 cmd->cmd_flags &= ~CFLAG_IS_READ;
16196 16161 }
16197 16162
16198 16163 bp_mapin(bp);
16199 16164 cmd->cmd_fp_pkt->pkt_data = bp->b_un.b_addr;
16200 16165 cmd->cmd_fp_pkt->pkt_datalen = bp->b_bcount;
16201 16166 cmd->cmd_fp_pkt->pkt_data_resid = 0;
16202 16167 } else {
16203 16168 /*
16204 16169 * It seldom happens, except when CLUSTER or SCSI_VHCI wants
16205 16170 * to send zero-length read/write.
16206 16171 */
16207 16172 cmd->cmd_fp_pkt->pkt_data = NULL;
16208 16173 cmd->cmd_fp_pkt->pkt_datalen = 0;
16209 16174 }
16210 16175
16211 16176 return (pkt);
16212 16177 }
16213 16178
16214 16179 static void
16215 16180 fcp_pseudo_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16216 16181 {
16217 16182 fcp_port_t *pptr = ADDR2FCP(ap);
16218 16183
16219 16184 /*
16220 16185 * First we let FCA to uninitilize private part.
16221 16186 */
16222 16187 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
16223 16188 PKT2CMD(pkt)->cmd_fp_pkt);
16224 16189
16225 16190 /*
16226 16191 * Then we uninitialize fc_packet.
16227 16192 */
16228 16193
16229 16194 /*
16230 16195 * Thirdly, we uninitializae fcp_pkt.
16231 16196 */
16232 16197
16233 16198 /*
16234 16199 * In the end, we free scsi_pkt.
16235 16200 */
16236 16201 scsi_hba_pkt_free(ap, pkt);
16237 16202 }
16238 16203
16239 16204 static int
16240 16205 fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt)
16241 16206 {
16242 16207 fcp_port_t *pptr = ADDR2FCP(ap);
16243 16208 fcp_lun_t *plun = ADDR2LUN(ap);
16244 16209 fcp_tgt_t *ptgt = plun->lun_tgt;
16245 16210 fcp_pkt_t *cmd = PKT2CMD(pkt);
16246 16211 fcp_cmd_t *fcmd = &cmd->cmd_fcp_cmd;
16247 16212 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
16248 16213 int rval;
16249 16214
16250 16215 fpkt->pkt_pd = ptgt->tgt_pd_handle;
16251 16216 (void) fc_ulp_init_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt, 1);
16252 16217
16253 16218 /*
16254 16219 * Firstly, we need initialize fcp_pkt_t
16255 16220 * Secondly, we need initialize fcp_cmd_t.
16256 16221 */
16257 16222 bcopy(pkt->pkt_cdbp, fcmd->fcp_cdb, pkt->pkt_cdblen);
16258 16223 fcmd->fcp_data_len = fpkt->pkt_datalen;
16259 16224 fcmd->fcp_ent_addr = plun->lun_addr;
16260 16225 if (pkt->pkt_flags & FLAG_HTAG) {
16261 16226 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
16262 16227 } else if (pkt->pkt_flags & FLAG_OTAG) {
16263 16228 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
16264 16229 } else if (pkt->pkt_flags & FLAG_STAG) {
16265 16230 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
16266 16231 } else {
16267 16232 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
16268 16233 }
16269 16234
16270 16235 if (cmd->cmd_flags & CFLAG_IS_READ) {
16271 16236 fcmd->fcp_cntl.cntl_read_data = 1;
16272 16237 fcmd->fcp_cntl.cntl_write_data = 0;
16273 16238 } else {
16274 16239 fcmd->fcp_cntl.cntl_read_data = 0;
16275 16240 fcmd->fcp_cntl.cntl_write_data = 1;
16276 16241 }
16277 16242
16278 16243 /*
16279 16244 * Then we need initialize fc_packet_t too.
16280 16245 */
16281 16246 fpkt->pkt_timeout = pkt->pkt_time + 2;
16282 16247 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
16283 16248 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
16284 16249 if (cmd->cmd_flags & CFLAG_IS_READ) {
16285 16250 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
16286 16251 } else {
16287 16252 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
16288 16253 }
16289 16254
16290 16255 if (pkt->pkt_flags & FLAG_NOINTR) {
16291 16256 fpkt->pkt_comp = NULL;
16292 16257 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
16293 16258 } else {
16294 16259 fpkt->pkt_comp = fcp_cmd_callback;
16295 16260 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
16296 16261 if (pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
16297 16262 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
16298 16263 }
16299 16264 }
16300 16265
16301 16266 /*
16302 16267 * Lastly, we need initialize scsi_pkt
16303 16268 */
16304 16269 pkt->pkt_reason = CMD_CMPLT;
16305 16270 pkt->pkt_state = 0;
16306 16271 pkt->pkt_statistics = 0;
16307 16272 pkt->pkt_resid = 0;
16308 16273
16309 16274 /*
16310 16275 * if interrupts aren't allowed (e.g. at dump time) then we'll
16311 16276 * have to do polled I/O
16312 16277 */
16313 16278 if (pkt->pkt_flags & FLAG_NOINTR) {
16314 16279 return (fcp_dopoll(pptr, cmd));
16315 16280 }
16316 16281
16317 16282 cmd->cmd_state = FCP_PKT_ISSUED;
16318 16283 rval = fcp_transport(pptr->port_fp_handle, fpkt, 0);
16319 16284 if (rval == FC_SUCCESS) {
16320 16285 return (TRAN_ACCEPT);
16321 16286 }
16322 16287
16323 16288 /*
16324 16289 * Need more consideration
16325 16290 *
16326 16291 * pkt->pkt_flags & FLAG_NOQUEUE could abort other pkt
16327 16292 */
16328 16293 cmd->cmd_state = FCP_PKT_IDLE;
16329 16294 if (rval == FC_TRAN_BUSY) {
16330 16295 return (TRAN_BUSY);
16331 16296 } else {
16332 16297 return (TRAN_FATAL_ERROR);
16333 16298 }
16334 16299 }
16335 16300
16336 16301 /*
16337 16302 * scsi_poll will always call tran_sync_pkt for pseudo FC-HBAs
16338 16303 * SCSA will initialize it to scsi_sync_cache_pkt for physical FC-HBAs
16339 16304 */
16340 16305 static void
16341 16306 fcp_pseudo_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16342 16307 {
16343 16308 FCP_TRACE(fcp_logq, "fcp_pseudo_sync_pkt", fcp_trace,
16344 16309 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16345 16310 }
16346 16311
16347 16312 /*
16348 16313 * scsi_dmafree will always call tran_dmafree, when STATE_ARQ_DONE
16349 16314 */
16350 16315 static void
16351 16316 fcp_pseudo_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
16352 16317 {
16353 16318 FCP_TRACE(fcp_logq, "fcp_pseudo_dmafree", fcp_trace,
16354 16319 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16355 16320 }
↓ open down ↓ |
3030 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX