Print this page
8368 remove warlock leftovers from usr/src/uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/ulp/fcp.c
+++ new/usr/src/uts/common/io/fibre-channel/ulp/fcp.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 *
24 24 * Fibre Channel SCSI ULP Mapping driver
25 25 */
26 26
27 27 #include <sys/scsi/scsi.h>
28 28 #include <sys/types.h>
29 29 #include <sys/varargs.h>
30 30 #include <sys/devctl.h>
31 31 #include <sys/thread.h>
32 32 #include <sys/thread.h>
33 33 #include <sys/open.h>
34 34 #include <sys/file.h>
35 35 #include <sys/sunndi.h>
36 36 #include <sys/console.h>
37 37 #include <sys/proc.h>
38 38 #include <sys/time.h>
39 39 #include <sys/utsname.h>
40 40 #include <sys/scsi/impl/scsi_reset_notify.h>
41 41 #include <sys/ndi_impldefs.h>
42 42 #include <sys/byteorder.h>
43 43 #include <sys/fs/dv_node.h>
44 44 #include <sys/ctype.h>
45 45 #include <sys/sunmdi.h>
46 46
47 47 #include <sys/fibre-channel/fc.h>
48 48 #include <sys/fibre-channel/impl/fc_ulpif.h>
49 49 #include <sys/fibre-channel/ulp/fcpvar.h>
50 50
51 51 /*
52 52 * Discovery Process
53 53 * =================
54 54 *
55 55 * The discovery process is a major function of FCP. In order to help
56 56 * understand that function a flow diagram is given here. This diagram
57 57 * doesn't claim to cover all the cases and the events that can occur during
58 58 * the discovery process nor the subtleties of the code. The code paths shown
59 59 * are simplified. Its purpose is to help the reader (and potentially bug
60 60 * fixer) have an overall view of the logic of the code. For that reason the
61 61 * diagram covers the simple case of the line coming up cleanly or of a new
62 62 * port attaching to FCP the link being up. The reader must keep in mind
63 63 * that:
64 64 *
65 65 * - There are special cases where bringing devices online and offline
66 66 * is driven by Ioctl.
67 67 *
68 68 * - The behavior of the discovery process can be modified through the
69 69 * .conf file.
70 70 *
71 71 * - The line can go down and come back up at any time during the
72 72 * discovery process which explains some of the complexity of the code.
73 73 *
74 74 * ............................................................................
75 75 *
76 76 * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
77 77 *
78 78 *
79 79 * +-------------------------+
80 80 * fp/fctl module --->| fcp_port_attach |
81 81 * +-------------------------+
82 82 * | |
83 83 * | |
84 84 * | v
85 85 * | +-------------------------+
86 86 * | | fcp_handle_port_attach |
87 87 * | +-------------------------+
88 88 * | |
89 89 * | |
90 90 * +--------------------+ |
91 91 * | |
92 92 * v v
93 93 * +-------------------------+
94 94 * | fcp_statec_callback |
95 95 * +-------------------------+
96 96 * |
97 97 * |
98 98 * v
99 99 * +-------------------------+
100 100 * | fcp_handle_devices |
101 101 * +-------------------------+
102 102 * |
103 103 * |
104 104 * v
105 105 * +-------------------------+
106 106 * | fcp_handle_mapflags |
107 107 * +-------------------------+
108 108 * |
109 109 * |
110 110 * v
111 111 * +-------------------------+
112 112 * | fcp_send_els |
113 113 * | |
114 114 * | PLOGI or PRLI To all the|
115 115 * | reachable devices. |
116 116 * +-------------------------+
117 117 *
118 118 *
119 119 * ............................................................................
120 120 *
121 121 * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
122 122 * STEP 1 are called (it is actually the same function).
123 123 *
124 124 *
125 125 * +-------------------------+
126 126 * | fcp_icmd_callback |
127 127 * fp/fctl module --->| |
128 128 * | callback for PLOGI and |
129 129 * | PRLI. |
130 130 * +-------------------------+
131 131 * |
132 132 * |
133 133 * Received PLOGI Accept /-\ Received PRLI Accept
134 134 * _ _ _ _ _ _ / \_ _ _ _ _ _
135 135 * | \ / |
136 136 * | \-/ |
137 137 * | |
138 138 * v v
139 139 * +-------------------------+ +-------------------------+
140 140 * | fcp_send_els | | fcp_send_scsi |
141 141 * | | | |
142 142 * | PRLI | | REPORT_LUN |
143 143 * +-------------------------+ +-------------------------+
144 144 *
145 145 * ............................................................................
146 146 *
147 147 * STEP 3: The callback functions of the SCSI commands issued by FCP are called
148 148 * (It is actually the same function).
149 149 *
150 150 *
151 151 * +-------------------------+
152 152 * fp/fctl module ------->| fcp_scsi_callback |
153 153 * +-------------------------+
154 154 * |
155 155 * |
156 156 * |
157 157 * Receive REPORT_LUN reply /-\ Receive INQUIRY PAGE83 reply
158 158 * _ _ _ _ _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ _ _ _ _
159 159 * | \ / |
160 160 * | \-/ |
161 161 * | | |
162 162 * | Receive INQUIRY reply| |
163 163 * | | |
164 164 * v v v
165 165 * +------------------------+ +----------------------+ +----------------------+
166 166 * | fcp_handle_reportlun | | fcp_handle_inquiry | | fcp_handle_page83 |
167 167 * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
168 168 * +------------------------+ +----------------------+ +----------------------+
169 169 * | | |
170 170 * | | |
171 171 * | | |
172 172 * v v |
173 173 * +-----------------+ +-----------------+ |
174 174 * | fcp_send_scsi | | fcp_send_scsi | |
175 175 * | | | | |
176 176 * | INQUIRY | | INQUIRY PAGE83 | |
177 177 * | (To each LUN) | +-----------------+ |
178 178 * +-----------------+ |
179 179 * |
180 180 * v
181 181 * +------------------------+
182 182 * | fcp_call_finish_init |
183 183 * +------------------------+
184 184 * |
185 185 * v
186 186 * +-----------------------------+
187 187 * | fcp_call_finish_init_held |
188 188 * +-----------------------------+
189 189 * |
190 190 * |
191 191 * All LUNs scanned /-\
192 192 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ / \
193 193 * | \ /
194 194 * | \-/
195 195 * v |
196 196 * +------------------+ |
197 197 * | fcp_finish_tgt | |
198 198 * +------------------+ |
199 199 * | Target Not Offline and |
200 200 * Target Not Offline and | not marked and tgt_node_state |
201 201 * marked /-\ not FCP_TGT_NODE_ON_DEMAND |
202 202 * _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ |
203 203 * | \ / | |
204 204 * | \-/ | |
205 205 * v v |
206 206 * +----------------------------+ +-------------------+ |
207 207 * | fcp_offline_target | | fcp_create_luns | |
208 208 * | | +-------------------+ |
209 209 * | A structure fcp_tgt_elem | | |
210 210 * | is created and queued in | v |
211 211 * | the FCP port list | +-------------------+ |
212 212 * | port_offline_tgts. It | | fcp_pass_to_hp | |
213 213 * | will be unqueued by the | | | |
214 214 * | watchdog timer. | | Called for each | |
215 215 * +----------------------------+ | LUN. Dispatches | |
216 216 * | | fcp_hp_task | |
217 217 * | +-------------------+ |
218 218 * | | |
219 219 * | | |
220 220 * | | |
221 221 * | +---------------->|
222 222 * | |
223 223 * +---------------------------------------------->|
224 224 * |
225 225 * |
226 226 * All the targets (devices) have been scanned /-\
227 227 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ / \
228 228 * | \ /
229 229 * | \-/
230 230 * +-------------------------------------+ |
231 231 * | fcp_finish_init | |
232 232 * | | |
233 233 * | Signal broadcasts the condition | |
234 234 * | variable port_config_cv of the FCP | |
235 235 * | port. One potential code sequence | |
236 236 * | waiting on the condition variable | |
237 237 * | the code sequence handling | |
238 238 * | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER| |
239 239 * | The other is in the function | |
240 240 * | fcp_reconfig_wait which is called | |
241 241 * | in the transmit path preventing IOs | |
242 242 * | from going through till the disco- | |
243 243 * | very process is over. | |
244 244 * +-------------------------------------+ |
245 245 * | |
246 246 * | |
247 247 * +--------------------------------->|
248 248 * |
249 249 * v
250 250 * Return
251 251 *
252 252 * ............................................................................
253 253 *
254 254 * STEP 4: The hot plug task is called (for each fcp_hp_elem).
255 255 *
256 256 *
257 257 * +-------------------------+
258 258 * | fcp_hp_task |
259 259 * +-------------------------+
260 260 * |
261 261 * |
262 262 * v
263 263 * +-------------------------+
264 264 * | fcp_trigger_lun |
265 265 * +-------------------------+
266 266 * |
267 267 * |
268 268 * v
269 269 * Bring offline /-\ Bring online
270 270 * _ _ _ _ _ _ _ _ _/ \_ _ _ _ _ _ _ _ _ _
271 271 * | \ / |
272 272 * | \-/ |
273 273 * v v
274 274 * +---------------------+ +-----------------------+
275 275 * | fcp_offline_child | | fcp_get_cip |
276 276 * +---------------------+ | |
277 277 * | Creates a dev_info_t |
278 278 * | or a mdi_pathinfo_t |
279 279 * | depending on whether |
280 280 * | mpxio is on or off. |
281 281 * +-----------------------+
282 282 * |
283 283 * |
284 284 * v
285 285 * +-----------------------+
286 286 * | fcp_online_child |
287 287 * | |
288 288 * | Set device online |
289 289 * | using NDI or MDI. |
290 290 * +-----------------------+
291 291 *
292 292 * ............................................................................
293 293 *
294 294 * STEP 5: The watchdog timer expires. The watch dog timer does much more that
295 295 * what is described here. We only show the target offline path.
296 296 *
297 297 *
298 298 * +--------------------------+
299 299 * | fcp_watch |
300 300 * +--------------------------+
301 301 * |
302 302 * |
303 303 * v
304 304 * +--------------------------+
305 305 * | fcp_scan_offline_tgts |
306 306 * +--------------------------+
307 307 * |
308 308 * |
309 309 * v
310 310 * +--------------------------+
311 311 * | fcp_offline_target_now |
312 312 * +--------------------------+
313 313 * |
314 314 * |
315 315 * v
316 316 * +--------------------------+
317 317 * | fcp_offline_tgt_luns |
318 318 * +--------------------------+
319 319 * |
320 320 * |
321 321 * v
322 322 * +--------------------------+
323 323 * | fcp_offline_lun |
324 324 * +--------------------------+
325 325 * |
326 326 * |
327 327 * v
328 328 * +----------------------------------+
329 329 * | fcp_offline_lun_now |
330 330 * | |
331 331 * | A request (or two if mpxio) is |
332 332 * | sent to the hot plug task using |
333 333 * | a fcp_hp_elem structure. |
334 334 * +----------------------------------+
335 335 */
336 336
337 337 /*
338 338 * Functions registered with DDI framework
339 339 */
340 340 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
341 341 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
342 342 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
343 343 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
344 344 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
345 345 cred_t *credp, int *rval);
346 346
347 347 /*
348 348 * Functions registered with FC Transport framework
349 349 */
350 350 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
351 351 fc_attach_cmd_t cmd, uint32_t s_id);
352 352 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
353 353 fc_detach_cmd_t cmd);
354 354 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
355 355 int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
356 356 uint32_t claimed);
357 357 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
358 358 fc_unsol_buf_t *buf, uint32_t claimed);
359 359 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
360 360 fc_unsol_buf_t *buf, uint32_t claimed);
361 361 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
362 362 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
363 363 uint32_t dev_cnt, uint32_t port_sid);
364 364
365 365 /*
366 366 * Functions registered with SCSA framework
367 367 */
368 368 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
369 369 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
370 370 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
371 371 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
372 372 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
373 373 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
374 374 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
375 375 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
376 376 static int fcp_scsi_reset(struct scsi_address *ap, int level);
377 377 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
378 378 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
379 379 int whom);
380 380 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
381 381 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
382 382 void (*callback)(caddr_t), caddr_t arg);
383 383 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
384 384 char *name, ddi_eventcookie_t *event_cookiep);
385 385 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
386 386 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
387 387 ddi_callback_id_t *cb_id);
388 388 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
389 389 ddi_callback_id_t cb_id);
390 390 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
391 391 ddi_eventcookie_t eventid, void *impldata);
392 392 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
393 393 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
394 394 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
395 395 ddi_bus_config_op_t op, void *arg);
396 396
397 397 /*
398 398 * Internal functions
399 399 */
400 400 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
401 401 int mode, int *rval);
402 402
403 403 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
404 404 int mode, int *rval);
405 405 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
406 406 struct fcp_scsi_cmd *fscsi, int mode);
407 407 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
408 408 caddr_t base_addr, int mode);
409 409 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
410 410
411 411 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
412 412 la_wwn_t *pwwn, int *ret_val, int *fc_status, int *fc_pkt_state,
413 413 int *fc_pkt_reason, int *fc_pkt_action);
414 414 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
415 415 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
416 416 static int fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status,
417 417 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
418 418 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
419 419 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
420 420 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
421 421 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
422 422
423 423 static void fcp_handle_devices(struct fcp_port *pptr,
424 424 fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
425 425 fcp_map_tag_t *map_tag, int cause);
426 426 static int fcp_handle_mapflags(struct fcp_port *pptr,
427 427 struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
428 428 int tgt_cnt, int cause);
429 429 static int fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause);
430 430 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
431 431 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
432 432 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
433 433 int cause);
434 434 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
435 435 uint32_t state);
436 436 static struct fcp_port *fcp_get_port(opaque_t port_handle);
437 437 static void fcp_unsol_callback(fc_packet_t *fpkt);
438 438 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
439 439 uchar_t r_ctl, uchar_t type);
440 440 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
441 441 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
442 442 struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
443 443 int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
444 444 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
445 445 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
446 446 int nodma, int flags);
447 447 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
448 448 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
449 449 uchar_t *wwn);
450 450 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
451 451 uint32_t d_id);
452 452 static void fcp_icmd_callback(fc_packet_t *fpkt);
453 453 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
454 454 int len, int lcount, int tcount, int cause, uint32_t rscn_count);
455 455 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
456 456 static void fcp_scsi_callback(fc_packet_t *fpkt);
457 457 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
458 458 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
459 459 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
460 460 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
461 461 uint16_t lun_num);
462 462 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
463 463 int link_cnt, int tgt_cnt, int cause);
464 464 static void fcp_finish_init(struct fcp_port *pptr);
465 465 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
466 466 int tgt_cnt, int cause);
467 467 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
468 468 int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
469 469 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
470 470 int link_cnt, int tgt_cnt, int nowait, int flags);
471 471 static void fcp_offline_target_now(struct fcp_port *pptr,
472 472 struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
473 473 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
474 474 int tgt_cnt, int flags);
475 475 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
476 476 int nowait, int flags);
477 477 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
478 478 int tgt_cnt);
479 479 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
480 480 int tgt_cnt, int flags);
481 481 static void fcp_scan_offline_luns(struct fcp_port *pptr);
482 482 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
483 483 static void fcp_update_offline_flags(struct fcp_lun *plun);
484 484 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
485 485 static void fcp_abort_commands(struct fcp_pkt *head, struct
486 486 fcp_port *pptr);
487 487 static void fcp_cmd_callback(fc_packet_t *fpkt);
488 488 static void fcp_complete_pkt(fc_packet_t *fpkt);
489 489 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
490 490 struct fcp_port *pptr);
491 491 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
492 492 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
493 493 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
494 494 static void fcp_dealloc_lun(struct fcp_lun *plun);
495 495 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
496 496 fc_portmap_t *map_entry, int link_cnt);
497 497 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
498 498 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
499 499 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
500 500 int internal);
501 501 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
502 502 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
503 503 uint32_t s_id, int instance);
504 504 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
505 505 int instance);
506 506 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
507 507 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
508 508 int);
509 509 static void fcp_kmem_cache_destructor(struct scsi_pkt *, scsi_hba_tran_t *);
510 510 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
511 511 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
512 512 int flags);
513 513 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
514 514 static int fcp_reset_target(struct scsi_address *ap, int level);
515 515 static int fcp_commoncap(struct scsi_address *ap, char *cap,
516 516 int val, int tgtonly, int doset);
517 517 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
518 518 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
519 519 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
520 520 int sleep);
521 521 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
522 522 uint32_t s_id, fc_attach_cmd_t cmd, int instance);
523 523 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
524 524 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
525 525 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
526 526 int lcount, int tcount);
527 527 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
528 528 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
529 529 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
530 530 int tgt_cnt);
531 531 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
532 532 dev_info_t *pdip, caddr_t name);
533 533 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
534 534 int lcount, int tcount, int flags, int *circ);
535 535 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
536 536 int lcount, int tcount, int flags, int *circ);
537 537 static void fcp_remove_child(struct fcp_lun *plun);
538 538 static void fcp_watch(void *arg);
539 539 static void fcp_check_reset_delay(struct fcp_port *pptr);
540 540 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
541 541 struct fcp_lun *rlun, int tgt_cnt);
542 542 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
543 543 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
544 544 uchar_t *wwn, uint16_t lun);
545 545 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
546 546 struct fcp_lun *plun);
547 547 static void fcp_post_callback(struct fcp_pkt *cmd);
548 548 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
549 549 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
550 550 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
551 551 child_info_t *cip);
552 552 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
553 553 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
554 554 int tgt_cnt, int flags);
555 555 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
556 556 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
557 557 int tgt_cnt, int flags, int wait);
558 558 static void fcp_retransport_cmd(struct fcp_port *pptr,
559 559 struct fcp_pkt *cmd);
560 560 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
561 561 uint_t statistics);
562 562 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
563 563 static void fcp_update_targets(struct fcp_port *pptr,
564 564 fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
565 565 static int fcp_call_finish_init(struct fcp_port *pptr,
566 566 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
567 567 static int fcp_call_finish_init_held(struct fcp_port *pptr,
568 568 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
569 569 static void fcp_reconfigure_luns(void * tgt_handle);
570 570 static void fcp_free_targets(struct fcp_port *pptr);
571 571 static void fcp_free_target(struct fcp_tgt *ptgt);
572 572 static int fcp_is_retryable(struct fcp_ipkt *icmd);
573 573 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
574 574 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
575 575 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
576 576 static void fcp_print_error(fc_packet_t *fpkt);
577 577 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
578 578 struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
579 579 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
580 580 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
581 581 uint32_t *dev_cnt);
582 582 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
583 583 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
584 584 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
585 585 struct fcp_ioctl *, struct fcp_port **);
586 586 static char *fcp_get_lun_path(struct fcp_lun *plun);
587 587 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
588 588 int *rval);
589 589 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
590 590 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
591 591 static char *fcp_get_lun_path(struct fcp_lun *plun);
592 592 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
593 593 int *rval);
594 594 static void fcp_reconfig_wait(struct fcp_port *pptr);
595 595
596 596 /*
597 597 * New functions added for mpxio support
598 598 */
599 599 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
600 600 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
601 601 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
602 602 int tcount);
603 603 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
604 604 dev_info_t *pdip);
605 605 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
606 606 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
607 607 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
608 608 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
609 609 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
610 610 int what);
611 611 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
612 612 fc_packet_t *fpkt);
613 613 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
614 614
615 615 /*
616 616 * New functions added for lun masking support
617 617 */
618 618 static void fcp_read_blacklist(dev_info_t *dip,
619 619 struct fcp_black_list_entry **pplun_blacklist);
620 620 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
621 621 struct fcp_black_list_entry **pplun_blacklist);
622 622 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
623 623 struct fcp_black_list_entry **pplun_blacklist);
624 624 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
625 625 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
626 626
627 627 /*
628 628 * New functions to support software FCA (like fcoei)
629 629 */
630 630 static struct scsi_pkt *fcp_pseudo_init_pkt(
631 631 struct scsi_address *ap, struct scsi_pkt *pkt,
632 632 struct buf *bp, int cmdlen, int statuslen,
633 633 int tgtlen, int flags, int (*callback)(), caddr_t arg);
634 634 static void fcp_pseudo_destroy_pkt(
635 635 struct scsi_address *ap, struct scsi_pkt *pkt);
636 636 static void fcp_pseudo_sync_pkt(
637 637 struct scsi_address *ap, struct scsi_pkt *pkt);
638 638 static int fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt);
639 639 static void fcp_pseudo_dmafree(
640 640 struct scsi_address *ap, struct scsi_pkt *pkt);
641 641
642 642 extern struct mod_ops mod_driverops;
643 643 /*
644 644 * This variable is defined in modctl.c and set to '1' after the root driver
645 645 * and fs are loaded. It serves as an indication that the root filesystem can
646 646 * be used.
647 647 */
648 648 extern int modrootloaded;
649 649 /*
650 650 * This table contains strings associated with the SCSI sense key codes. It
651 651 * is used by FCP to print a clear explanation of the code returned in the
652 652 * sense information by a device.
653 653 */
654 654 extern char *sense_keys[];
655 655 /*
656 656 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
657 657 * under this device that the paths to a physical device are created when
658 658 * MPxIO is used.
659 659 */
660 660 extern dev_info_t *scsi_vhci_dip;
661 661
662 662 /*
663 663 * Report lun processing
664 664 */
665 665 #define FCP_LUN_ADDRESSING 0x80
666 666 #define FCP_PD_ADDRESSING 0x00
667 667 #define FCP_VOLUME_ADDRESSING 0x40
668 668
669 669 #define FCP_SVE_THROTTLE 0x28 /* Vicom */
670 670 #define MAX_INT_DMA 0x7fffffff
671 671 /*
672 672 * Property definitions
673 673 */
674 674 #define NODE_WWN_PROP (char *)fcp_node_wwn_prop
675 675 #define PORT_WWN_PROP (char *)fcp_port_wwn_prop
676 676 #define TARGET_PROP (char *)fcp_target_prop
677 677 #define LUN_PROP (char *)fcp_lun_prop
678 678 #define SAM_LUN_PROP (char *)fcp_sam_lun_prop
679 679 #define CONF_WWN_PROP (char *)fcp_conf_wwn_prop
680 680 #define OBP_BOOT_WWN (char *)fcp_obp_boot_wwn
681 681 #define MANUAL_CFG_ONLY (char *)fcp_manual_config_only
682 682 #define INIT_PORT_PROP (char *)fcp_init_port_prop
683 683 #define TGT_PORT_PROP (char *)fcp_tgt_port_prop
684 684 #define LUN_BLACKLIST_PROP (char *)fcp_lun_blacklist_prop
685 685 /*
686 686 * Short hand macros.
687 687 */
688 688 #define LUN_PORT (plun->lun_tgt->tgt_port)
689 689 #define LUN_TGT (plun->lun_tgt)
690 690
691 691 /*
692 692 * Driver private macros
693 693 */
694 694 #define FCP_ATOB(x) (((x) >= '0' && (x) <= '9') ? ((x) - '0') : \
695 695 ((x) >= 'a' && (x) <= 'f') ? \
696 696 ((x) - 'a' + 10) : ((x) - 'A' + 10))
697 697
698 698 #define FCP_MAX(a, b) ((a) > (b) ? (a) : (b))
699 699
700 700 #define FCP_N_NDI_EVENTS \
701 701 (sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
702 702
703 703 #define FCP_LINK_STATE_CHANGED(p, c) \
704 704 ((p)->port_link_cnt != (c)->ipkt_link_cnt)
705 705
706 706 #define FCP_TGT_STATE_CHANGED(t, c) \
707 707 ((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
708 708
709 709 #define FCP_STATE_CHANGED(p, t, c) \
710 710 (FCP_TGT_STATE_CHANGED(t, c))
711 711
712 712 #define FCP_MUST_RETRY(fpkt) \
713 713 ((fpkt)->pkt_state == FC_PKT_LOCAL_BSY || \
714 714 (fpkt)->pkt_state == FC_PKT_LOCAL_RJT || \
715 715 (fpkt)->pkt_state == FC_PKT_TRAN_BSY || \
716 716 (fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS || \
717 717 (fpkt)->pkt_state == FC_PKT_NPORT_BSY || \
718 718 (fpkt)->pkt_state == FC_PKT_FABRIC_BSY || \
719 719 (fpkt)->pkt_state == FC_PKT_PORT_OFFLINE || \
720 720 (fpkt)->pkt_reason == FC_REASON_OFFLINE)
721 721
722 722 #define FCP_SENSE_REPORTLUN_CHANGED(es) \
723 723 ((es)->es_key == KEY_UNIT_ATTENTION && \
724 724 (es)->es_add_code == 0x3f && \
725 725 (es)->es_qual_code == 0x0e)
726 726
727 727 #define FCP_SENSE_NO_LUN(es) \
728 728 ((es)->es_key == KEY_ILLEGAL_REQUEST && \
729 729 (es)->es_add_code == 0x25 && \
730 730 (es)->es_qual_code == 0x0)
731 731
732 732 #define FCP_VERSION "20091208-1.192"
733 733 #define FCP_NAME_VERSION "SunFC FCP v" FCP_VERSION
734 734
735 735 #define FCP_NUM_ELEMENTS(array) \
736 736 (sizeof (array) / sizeof ((array)[0]))
737 737
738 738 /*
739 739 * Debugging, Error reporting, and tracing
740 740 */
741 741 #define FCP_LOG_SIZE 1024 * 1024
742 742
743 743 #define FCP_LEVEL_1 0x00001 /* attach/detach PM CPR */
744 744 #define FCP_LEVEL_2 0x00002 /* failures/Invalid data */
745 745 #define FCP_LEVEL_3 0x00004 /* state change, discovery */
746 746 #define FCP_LEVEL_4 0x00008 /* ULP messages */
747 747 #define FCP_LEVEL_5 0x00010 /* ELS/SCSI cmds */
748 748 #define FCP_LEVEL_6 0x00020 /* Transport failures */
749 749 #define FCP_LEVEL_7 0x00040
750 750 #define FCP_LEVEL_8 0x00080 /* I/O tracing */
751 751 #define FCP_LEVEL_9 0x00100 /* I/O tracing */
752 752
753 753
754 754
755 755 /*
756 756 * Log contents to system messages file
757 757 */
758 758 #define FCP_MSG_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
759 759 #define FCP_MSG_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
760 760 #define FCP_MSG_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
761 761 #define FCP_MSG_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
762 762 #define FCP_MSG_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
763 763 #define FCP_MSG_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
764 764 #define FCP_MSG_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
765 765 #define FCP_MSG_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
766 766 #define FCP_MSG_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
767 767
768 768
769 769 /*
770 770 * Log contents to trace buffer
771 771 */
772 772 #define FCP_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
773 773 #define FCP_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
774 774 #define FCP_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
775 775 #define FCP_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
776 776 #define FCP_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
777 777 #define FCP_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
778 778 #define FCP_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
779 779 #define FCP_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
780 780 #define FCP_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
781 781
782 782
783 783 /*
784 784 * Log contents to both system messages file and trace buffer
785 785 */
786 786 #define FCP_MSG_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF | \
787 787 FC_TRACE_LOG_MSG)
788 788 #define FCP_MSG_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF | \
789 789 FC_TRACE_LOG_MSG)
790 790 #define FCP_MSG_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF | \
791 791 FC_TRACE_LOG_MSG)
792 792 #define FCP_MSG_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF | \
793 793 FC_TRACE_LOG_MSG)
794 794 #define FCP_MSG_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF | \
795 795 FC_TRACE_LOG_MSG)
796 796 #define FCP_MSG_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF | \
797 797 FC_TRACE_LOG_MSG)
798 798 #define FCP_MSG_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF | \
799 799 FC_TRACE_LOG_MSG)
800 800 #define FCP_MSG_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF | \
801 801 FC_TRACE_LOG_MSG)
802 802 #define FCP_MSG_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF | \
803 803 FC_TRACE_LOG_MSG)
804 804 #ifdef DEBUG
805 805 #define FCP_DTRACE fc_trace_debug
806 806 #else
807 807 #define FCP_DTRACE
808 808 #endif
809 809
810 810 #define FCP_TRACE fc_trace_debug
811 811
812 812 static struct cb_ops fcp_cb_ops = {
813 813 fcp_open, /* open */
814 814 fcp_close, /* close */
815 815 nodev, /* strategy */
816 816 nodev, /* print */
817 817 nodev, /* dump */
818 818 nodev, /* read */
819 819 nodev, /* write */
820 820 fcp_ioctl, /* ioctl */
821 821 nodev, /* devmap */
822 822 nodev, /* mmap */
823 823 nodev, /* segmap */
824 824 nochpoll, /* chpoll */
825 825 ddi_prop_op, /* cb_prop_op */
826 826 0, /* streamtab */
827 827 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
828 828 CB_REV, /* rev */
829 829 nodev, /* aread */
830 830 nodev /* awrite */
831 831 };
832 832
833 833
834 834 static struct dev_ops fcp_ops = {
835 835 DEVO_REV,
836 836 0,
837 837 ddi_getinfo_1to1,
838 838 nulldev, /* identify */
839 839 nulldev, /* probe */
840 840 fcp_attach, /* attach and detach are mandatory */
841 841 fcp_detach,
842 842 nodev, /* reset */
843 843 &fcp_cb_ops, /* cb_ops */
844 844 NULL, /* bus_ops */
845 845 NULL, /* power */
846 846 };
847 847
848 848
849 849 char *fcp_version = FCP_NAME_VERSION;
850 850
851 851 static struct modldrv modldrv = {
852 852 &mod_driverops,
853 853 FCP_NAME_VERSION,
854 854 &fcp_ops
855 855 };
856 856
857 857
858 858 static struct modlinkage modlinkage = {
859 859 MODREV_1,
860 860 &modldrv,
861 861 NULL
862 862 };
863 863
864 864
865 865 static fc_ulp_modinfo_t fcp_modinfo = {
866 866 &fcp_modinfo, /* ulp_handle */
867 867 FCTL_ULP_MODREV_4, /* ulp_rev */
868 868 FC4_SCSI_FCP, /* ulp_type */
869 869 "fcp", /* ulp_name */
870 870 FCP_STATEC_MASK, /* ulp_statec_mask */
871 871 fcp_port_attach, /* ulp_port_attach */
872 872 fcp_port_detach, /* ulp_port_detach */
873 873 fcp_port_ioctl, /* ulp_port_ioctl */
874 874 fcp_els_callback, /* ulp_els_callback */
875 875 fcp_data_callback, /* ulp_data_callback */
876 876 fcp_statec_callback /* ulp_statec_callback */
877 877 };
878 878
879 879 #ifdef DEBUG
880 880 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
881 881 FCP_LEVEL_2 | FCP_LEVEL_3 | \
882 882 FCP_LEVEL_4 | FCP_LEVEL_5 | \
883 883 FCP_LEVEL_6 | FCP_LEVEL_7)
884 884 #else
885 885 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
886 886 FCP_LEVEL_2 | FCP_LEVEL_3 | \
887 887 FCP_LEVEL_4 | FCP_LEVEL_5 | \
888 888 FCP_LEVEL_6 | FCP_LEVEL_7)
889 889 #endif
890 890
891 891 /* FCP global variables */
892 892 int fcp_bus_config_debug = 0;
893 893 static int fcp_log_size = FCP_LOG_SIZE;
894 894 static int fcp_trace = FCP_TRACE_DEFAULT;
895 895 static fc_trace_logq_t *fcp_logq = NULL;
896 896 static struct fcp_black_list_entry *fcp_lun_blacklist = NULL;
897 897 /*
898 898 * The auto-configuration is set by default. The only way of disabling it is
899 899 * through the property MANUAL_CFG_ONLY in the fcp.conf file.
900 900 */
901 901 static int fcp_enable_auto_configuration = 1;
902 902 static int fcp_max_bus_config_retries = 4;
903 903 static int fcp_lun_ready_retry = 300;
904 904 /*
905 905 * The value assigned to the following variable has changed several times due
906 906 * to a problem with the data underruns reporting of some firmware(s). The
907 907 * current value of 50 gives a timeout value of 25 seconds for a max number
908 908 * of 256 LUNs.
909 909 */
910 910 static int fcp_max_target_retries = 50;
911 911 /*
912 912 * Watchdog variables
913 913 * ------------------
914 914 *
915 915 * fcp_watchdog_init
916 916 *
917 917 * Indicates if the watchdog timer is running or not. This is actually
918 918 * a counter of the number of Fibre Channel ports that attached. When
919 919 * the first port attaches the watchdog is started. When the last port
920 920 * detaches the watchdog timer is stopped.
921 921 *
922 922 * fcp_watchdog_time
923 923 *
924 924 * This is the watchdog clock counter. It is incremented by
925 925 * fcp_watchdog_time each time the watchdog timer expires.
926 926 *
927 927 * fcp_watchdog_timeout
928 928 *
929 929 * Increment value of the variable fcp_watchdog_time as well as the
930 930 * the timeout value of the watchdog timer. The unit is 1 second. It
931 931 * is strange that this is not a #define but a variable since the code
932 932 * never changes this value. The reason why it can be said that the
933 933 * unit is 1 second is because the number of ticks for the watchdog
934 934 * timer is determined like this:
935 935 *
936 936 * fcp_watchdog_tick = fcp_watchdog_timeout *
937 937 * drv_usectohz(1000000);
938 938 *
939 939 * The value 1000000 is hard coded in the code.
940 940 *
941 941 * fcp_watchdog_tick
942 942 *
943 943 * Watchdog timer value in ticks.
944 944 */
945 945 static int fcp_watchdog_init = 0;
946 946 static int fcp_watchdog_time = 0;
947 947 static int fcp_watchdog_timeout = 1;
948 948 static int fcp_watchdog_tick;
949 949
950 950 /*
951 951 * fcp_offline_delay is a global variable to enable customisation of
952 952 * the timeout on link offlines or RSCNs. The default value is set
953 953 * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
954 954 * specified in FCP4 Chapter 11 (see www.t10.org).
955 955 *
956 956 * The variable fcp_offline_delay is specified in SECONDS.
957 957 *
958 958 * If we made this a static var then the user would not be able to
959 959 * change it. This variable is set in fcp_attach().
960 960 */
961 961 unsigned int fcp_offline_delay = FCP_OFFLINE_DELAY;
962 962
963 963 static void *fcp_softstate = NULL; /* for soft state */
964 964 static uchar_t fcp_oflag = FCP_IDLE; /* open flag */
965 965 static kmutex_t fcp_global_mutex;
966 966 static kmutex_t fcp_ioctl_mutex;
967 967 static dev_info_t *fcp_global_dip = NULL;
968 968 static timeout_id_t fcp_watchdog_id;
969 969 const char *fcp_lun_prop = "lun";
970 970 const char *fcp_sam_lun_prop = "sam-lun";
971 971 const char *fcp_target_prop = "target";
972 972 /*
973 973 * NOTE: consumers of "node-wwn" property include stmsboot in ON
974 974 * consolidation.
975 975 */
976 976 const char *fcp_node_wwn_prop = "node-wwn";
977 977 const char *fcp_port_wwn_prop = "port-wwn";
978 978 const char *fcp_conf_wwn_prop = "fc-port-wwn";
979 979 const char *fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
980 980 const char *fcp_manual_config_only = "manual_configuration_only";
981 981 const char *fcp_init_port_prop = "initiator-port";
982 982 const char *fcp_tgt_port_prop = "target-port";
983 983 const char *fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
984 984
985 985 static struct fcp_port *fcp_port_head = NULL;
986 986 static ddi_eventcookie_t fcp_insert_eid;
987 987 static ddi_eventcookie_t fcp_remove_eid;
988 988
989 989 static ndi_event_definition_t fcp_ndi_event_defs[] = {
990 990 { FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
991 991 { FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
992 992 };
993 993
994 994 /*
995 995 * List of valid commands for the scsi_ioctl call
996 996 */
997 997 static uint8_t scsi_ioctl_list[] = {
998 998 SCMD_INQUIRY,
999 999 SCMD_REPORT_LUN,
1000 1000 SCMD_READ_CAPACITY
1001 1001 };
1002 1002
1003 1003 /*
1004 1004 * this is used to dummy up a report lun response for cases
1005 1005 * where the target doesn't support it
1006 1006 */
1007 1007 static uchar_t fcp_dummy_lun[] = {
1008 1008 0x00, /* MSB length (length = no of luns * 8) */
1009 1009 0x00,
1010 1010 0x00,
1011 1011 0x08, /* LSB length */
1012 1012 0x00, /* MSB reserved */
1013 1013 0x00,
1014 1014 0x00,
1015 1015 0x00, /* LSB reserved */
1016 1016 FCP_PD_ADDRESSING,
1017 1017 0x00, /* LUN is ZERO at the first level */
1018 1018 0x00,
1019 1019 0x00, /* second level is zero */
1020 1020 0x00,
1021 1021 0x00, /* third level is zero */
1022 1022 0x00,
1023 1023 0x00 /* fourth level is zero */
1024 1024 };
1025 1025
1026 1026 static uchar_t fcp_alpa_to_switch[] = {
1027 1027 0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1028 1028 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1029 1029 0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1030 1030 0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1031 1031 0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1032 1032 0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1033 1033 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1034 1034 0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1035 1035 0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1036 1036 0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1037 1037 0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1038 1038 0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1039 1039 0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1040 1040 0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1041 1041 0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1042 1042 0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1043 1043 0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1044 1044 0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
↓ open down ↓ |
1044 lines elided |
↑ open up ↑ |
1045 1045 0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1046 1046 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1047 1047 0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1048 1048 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1049 1049 0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1050 1050 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1051 1051 };
1052 1052
1053 1053 static caddr_t pid = "SESS01 ";
1054 1054
1055 -#if !defined(lint)
1056 -
1057 -_NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1058 - fcp_port::fcp_next fcp_watchdog_id))
1059 -
1060 -_NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1061 -
1062 -_NOTE(SCHEME_PROTECTS_DATA("Unshared",
1063 - fcp_insert_eid
1064 - fcp_remove_eid
1065 - fcp_watchdog_time))
1066 -
1067 -_NOTE(SCHEME_PROTECTS_DATA("Unshared",
1068 - fcp_cb_ops
1069 - fcp_ops
1070 - callb_cpr))
1071 -
1072 -#endif /* lint */
1073 -
1074 1055 /*
1075 1056 * This table is used to determine whether or not it's safe to copy in
1076 1057 * the target node name for a lun. Since all luns behind the same target
1077 1058 * have the same wwnn, only tagets that do not support multiple luns are
1078 1059 * eligible to be enumerated under mpxio if they aren't page83 compliant.
1079 1060 */
1080 1061
1081 1062 char *fcp_symmetric_disk_table[] = {
1082 1063 "SEAGATE ST",
1083 1064 "IBM DDYFT",
1084 1065 "SUNW SUNWGS", /* Daktari enclosure */
1085 1066 "SUN SENA", /* SES device */
1086 1067 "SUN SESS01" /* VICOM SVE box */
1087 1068 };
1088 1069
1089 1070 int fcp_symmetric_disk_table_size =
1090 1071 sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1091 1072
1092 1073 /*
1093 1074 * This structure is bogus. scsi_hba_attach_setup() requires, as in the kernel
1094 1075 * will panic if you don't pass this in to the routine, this information.
1095 1076 * Need to determine what the actual impact to the system is by providing
1096 1077 * this information if any. Since dma allocation is done in pkt_init it may
1097 1078 * not have any impact. These values are straight from the Writing Device
1098 1079 * Driver manual.
1099 1080 */
1100 1081 static ddi_dma_attr_t pseudo_fca_dma_attr = {
1101 1082 DMA_ATTR_V0, /* ddi_dma_attr version */
1102 1083 0, /* low address */
1103 1084 0xffffffff, /* high address */
1104 1085 0x00ffffff, /* counter upper bound */
1105 1086 1, /* alignment requirements */
1106 1087 0x3f, /* burst sizes */
1107 1088 1, /* minimum DMA access */
1108 1089 0xffffffff, /* maximum DMA access */
1109 1090 (1 << 24) - 1, /* segment boundary restrictions */
1110 1091 1, /* scater/gather list length */
1111 1092 512, /* device granularity */
1112 1093 0 /* DMA flags */
1113 1094 };
1114 1095
1115 1096 /*
1116 1097 * The _init(9e) return value should be that of mod_install(9f). Under
1117 1098 * some circumstances, a failure may not be related mod_install(9f) and
1118 1099 * one would then require a return value to indicate the failure. Looking
1119 1100 * at mod_install(9f), it is expected to return 0 for success and non-zero
1120 1101 * for failure. mod_install(9f) for device drivers, further goes down the
1121 1102 * calling chain and ends up in ddi_installdrv(), whose return values are
1122 1103 * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1123 1104 * calling chain of mod_install(9f) which return values like EINVAL and
1124 1105 * in some even return -1.
1125 1106 *
1126 1107 * To work around the vagaries of the mod_install() calling chain, return
1127 1108 * either 0 or ENODEV depending on the success or failure of mod_install()
1128 1109 */
1129 1110 int
1130 1111 _init(void)
1131 1112 {
1132 1113 int rval;
1133 1114
1134 1115 /*
1135 1116 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1136 1117 * before registering with the transport first.
1137 1118 */
1138 1119 if (ddi_soft_state_init(&fcp_softstate,
1139 1120 sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1140 1121 return (EINVAL);
1141 1122 }
1142 1123
1143 1124 mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1144 1125 mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1145 1126
1146 1127 if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1147 1128 cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1148 1129 mutex_destroy(&fcp_global_mutex);
1149 1130 mutex_destroy(&fcp_ioctl_mutex);
1150 1131 ddi_soft_state_fini(&fcp_softstate);
1151 1132 return (ENODEV);
1152 1133 }
1153 1134
1154 1135 fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1155 1136
1156 1137 if ((rval = mod_install(&modlinkage)) != 0) {
1157 1138 fc_trace_free_logq(fcp_logq);
1158 1139 (void) fc_ulp_remove(&fcp_modinfo);
1159 1140 mutex_destroy(&fcp_global_mutex);
1160 1141 mutex_destroy(&fcp_ioctl_mutex);
1161 1142 ddi_soft_state_fini(&fcp_softstate);
1162 1143 rval = ENODEV;
1163 1144 }
1164 1145
1165 1146 return (rval);
1166 1147 }
1167 1148
1168 1149
1169 1150 /*
1170 1151 * the system is done with us as a driver, so clean up
1171 1152 */
1172 1153 int
1173 1154 _fini(void)
1174 1155 {
1175 1156 int rval;
1176 1157
1177 1158 /*
1178 1159 * don't start cleaning up until we know that the module remove
1179 1160 * has worked -- if this works, then we know that each instance
1180 1161 * has successfully been DDI_DETACHed
1181 1162 */
1182 1163 if ((rval = mod_remove(&modlinkage)) != 0) {
1183 1164 return (rval);
1184 1165 }
1185 1166
1186 1167 (void) fc_ulp_remove(&fcp_modinfo);
1187 1168
1188 1169 ddi_soft_state_fini(&fcp_softstate);
1189 1170 mutex_destroy(&fcp_global_mutex);
1190 1171 mutex_destroy(&fcp_ioctl_mutex);
1191 1172 fc_trace_free_logq(fcp_logq);
1192 1173
1193 1174 return (rval);
1194 1175 }
1195 1176
1196 1177
1197 1178 int
1198 1179 _info(struct modinfo *modinfop)
1199 1180 {
1200 1181 return (mod_info(&modlinkage, modinfop));
1201 1182 }
1202 1183
1203 1184
1204 1185 /*
1205 1186 * attach the module
1206 1187 */
1207 1188 static int
1208 1189 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1209 1190 {
1210 1191 int rval = DDI_SUCCESS;
1211 1192
1212 1193 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1213 1194 FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1214 1195
1215 1196 if (cmd == DDI_ATTACH) {
1216 1197 /* The FCP pseudo device is created here. */
1217 1198 mutex_enter(&fcp_global_mutex);
1218 1199 fcp_global_dip = devi;
1219 1200 mutex_exit(&fcp_global_mutex);
1220 1201
1221 1202 if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1222 1203 0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1223 1204 ddi_report_dev(fcp_global_dip);
1224 1205 } else {
1225 1206 cmn_err(CE_WARN, "FCP: Cannot create minor node");
1226 1207 mutex_enter(&fcp_global_mutex);
1227 1208 fcp_global_dip = NULL;
1228 1209 mutex_exit(&fcp_global_mutex);
1229 1210
1230 1211 rval = DDI_FAILURE;
1231 1212 }
1232 1213 /*
1233 1214 * We check the fcp_offline_delay property at this
1234 1215 * point. This variable is global for the driver,
1235 1216 * not specific to an instance.
1236 1217 *
1237 1218 * We do not recommend setting the value to less
1238 1219 * than 10 seconds (RA_TOV_els), or greater than
1239 1220 * 60 seconds.
1240 1221 */
1241 1222 fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1242 1223 devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1243 1224 "fcp_offline_delay", FCP_OFFLINE_DELAY);
1244 1225 if ((fcp_offline_delay < 10) ||
1245 1226 (fcp_offline_delay > 60)) {
1246 1227 cmn_err(CE_WARN, "Setting fcp_offline_delay "
1247 1228 "to %d second(s). This is outside the "
1248 1229 "recommended range of 10..60 seconds.",
1249 1230 fcp_offline_delay);
1250 1231 }
1251 1232 }
1252 1233
1253 1234 return (rval);
1254 1235 }
1255 1236
1256 1237
1257 1238 /*ARGSUSED*/
1258 1239 static int
1259 1240 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1260 1241 {
1261 1242 int res = DDI_SUCCESS;
1262 1243
1263 1244 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1264 1245 FCP_BUF_LEVEL_8, 0, "module detach: cmd=0x%x", cmd);
1265 1246
1266 1247 if (cmd == DDI_DETACH) {
1267 1248 /*
1268 1249 * Check if there are active ports/threads. If there
1269 1250 * are any, we will fail, else we will succeed (there
1270 1251 * should not be much to clean up)
1271 1252 */
1272 1253 mutex_enter(&fcp_global_mutex);
1273 1254 FCP_DTRACE(fcp_logq, "fcp",
1274 1255 fcp_trace, FCP_BUF_LEVEL_8, 0, "port_head=%p",
1275 1256 (void *) fcp_port_head);
1276 1257
1277 1258 if (fcp_port_head == NULL) {
1278 1259 ddi_remove_minor_node(fcp_global_dip, NULL);
1279 1260 fcp_global_dip = NULL;
1280 1261 mutex_exit(&fcp_global_mutex);
1281 1262 } else {
1282 1263 mutex_exit(&fcp_global_mutex);
1283 1264 res = DDI_FAILURE;
1284 1265 }
1285 1266 }
1286 1267 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1287 1268 FCP_BUF_LEVEL_8, 0, "module detach returning %d", res);
1288 1269
1289 1270 return (res);
1290 1271 }
1291 1272
1292 1273
1293 1274 /* ARGSUSED */
1294 1275 static int
1295 1276 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1296 1277 {
1297 1278 if (otype != OTYP_CHR) {
1298 1279 return (EINVAL);
1299 1280 }
1300 1281
1301 1282 /*
1302 1283 * Allow only root to talk;
1303 1284 */
1304 1285 if (drv_priv(credp)) {
1305 1286 return (EPERM);
1306 1287 }
1307 1288
1308 1289 mutex_enter(&fcp_global_mutex);
1309 1290 if (fcp_oflag & FCP_EXCL) {
1310 1291 mutex_exit(&fcp_global_mutex);
1311 1292 return (EBUSY);
1312 1293 }
1313 1294
1314 1295 if (flag & FEXCL) {
1315 1296 if (fcp_oflag & FCP_OPEN) {
1316 1297 mutex_exit(&fcp_global_mutex);
1317 1298 return (EBUSY);
1318 1299 }
1319 1300 fcp_oflag |= FCP_EXCL;
1320 1301 }
1321 1302 fcp_oflag |= FCP_OPEN;
1322 1303 mutex_exit(&fcp_global_mutex);
1323 1304
1324 1305 return (0);
1325 1306 }
1326 1307
1327 1308
1328 1309 /* ARGSUSED */
1329 1310 static int
1330 1311 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1331 1312 {
1332 1313 if (otype != OTYP_CHR) {
1333 1314 return (EINVAL);
1334 1315 }
1335 1316
1336 1317 mutex_enter(&fcp_global_mutex);
1337 1318 if (!(fcp_oflag & FCP_OPEN)) {
1338 1319 mutex_exit(&fcp_global_mutex);
1339 1320 return (ENODEV);
1340 1321 }
1341 1322 fcp_oflag = FCP_IDLE;
1342 1323 mutex_exit(&fcp_global_mutex);
1343 1324
1344 1325 return (0);
1345 1326 }
1346 1327
1347 1328
1348 1329 /*
1349 1330 * fcp_ioctl
1350 1331 * Entry point for the FCP ioctls
1351 1332 *
1352 1333 * Input:
1353 1334 * See ioctl(9E)
1354 1335 *
1355 1336 * Output:
1356 1337 * See ioctl(9E)
1357 1338 *
1358 1339 * Returns:
1359 1340 * See ioctl(9E)
1360 1341 *
1361 1342 * Context:
1362 1343 * Kernel context.
1363 1344 */
1364 1345 /* ARGSUSED */
1365 1346 static int
1366 1347 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1367 1348 int *rval)
1368 1349 {
1369 1350 int ret = 0;
1370 1351
1371 1352 mutex_enter(&fcp_global_mutex);
1372 1353 if (!(fcp_oflag & FCP_OPEN)) {
1373 1354 mutex_exit(&fcp_global_mutex);
1374 1355 return (ENXIO);
1375 1356 }
1376 1357 mutex_exit(&fcp_global_mutex);
1377 1358
1378 1359 switch (cmd) {
1379 1360 case FCP_TGT_INQUIRY:
1380 1361 case FCP_TGT_CREATE:
1381 1362 case FCP_TGT_DELETE:
1382 1363 ret = fcp_setup_device_data_ioctl(cmd,
1383 1364 (struct fcp_ioctl *)data, mode, rval);
1384 1365 break;
1385 1366
1386 1367 case FCP_TGT_SEND_SCSI:
1387 1368 mutex_enter(&fcp_ioctl_mutex);
1388 1369 ret = fcp_setup_scsi_ioctl(
1389 1370 (struct fcp_scsi_cmd *)data, mode, rval);
1390 1371 mutex_exit(&fcp_ioctl_mutex);
1391 1372 break;
1392 1373
1393 1374 case FCP_STATE_COUNT:
1394 1375 ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1395 1376 mode, rval);
1396 1377 break;
1397 1378 case FCP_GET_TARGET_MAPPINGS:
1398 1379 ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1399 1380 mode, rval);
1400 1381 break;
1401 1382 default:
1402 1383 fcp_log(CE_WARN, NULL,
1403 1384 "!Invalid ioctl opcode = 0x%x", cmd);
1404 1385 ret = EINVAL;
1405 1386 }
1406 1387
1407 1388 return (ret);
1408 1389 }
1409 1390
1410 1391
1411 1392 /*
1412 1393 * fcp_setup_device_data_ioctl
1413 1394 * Setup handler for the "device data" style of
1414 1395 * ioctl for FCP. See "fcp_util.h" for data structure
1415 1396 * definition.
1416 1397 *
1417 1398 * Input:
1418 1399 * cmd = FCP ioctl command
1419 1400 * data = ioctl data
1420 1401 * mode = See ioctl(9E)
1421 1402 *
1422 1403 * Output:
1423 1404 * data = ioctl data
1424 1405 * rval = return value - see ioctl(9E)
1425 1406 *
1426 1407 * Returns:
1427 1408 * See ioctl(9E)
1428 1409 *
1429 1410 * Context:
1430 1411 * Kernel context.
1431 1412 */
1432 1413 /* ARGSUSED */
1433 1414 static int
1434 1415 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1435 1416 int *rval)
1436 1417 {
1437 1418 struct fcp_port *pptr;
1438 1419 struct device_data *dev_data;
1439 1420 uint32_t link_cnt;
1440 1421 la_wwn_t *wwn_ptr = NULL;
1441 1422 struct fcp_tgt *ptgt = NULL;
1442 1423 struct fcp_lun *plun = NULL;
1443 1424 int i, error;
1444 1425 struct fcp_ioctl fioctl;
1445 1426
1446 1427 #ifdef _MULTI_DATAMODEL
1447 1428 switch (ddi_model_convert_from(mode & FMODELS)) {
1448 1429 case DDI_MODEL_ILP32: {
1449 1430 struct fcp32_ioctl f32_ioctl;
1450 1431
1451 1432 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1452 1433 sizeof (struct fcp32_ioctl), mode)) {
1453 1434 return (EFAULT);
1454 1435 }
1455 1436 fioctl.fp_minor = f32_ioctl.fp_minor;
1456 1437 fioctl.listlen = f32_ioctl.listlen;
1457 1438 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1458 1439 break;
1459 1440 }
1460 1441 case DDI_MODEL_NONE:
1461 1442 if (ddi_copyin((void *)data, (void *)&fioctl,
1462 1443 sizeof (struct fcp_ioctl), mode)) {
1463 1444 return (EFAULT);
1464 1445 }
1465 1446 break;
1466 1447 }
1467 1448
1468 1449 #else /* _MULTI_DATAMODEL */
1469 1450 if (ddi_copyin((void *)data, (void *)&fioctl,
1470 1451 sizeof (struct fcp_ioctl), mode)) {
1471 1452 return (EFAULT);
1472 1453 }
1473 1454 #endif /* _MULTI_DATAMODEL */
1474 1455
1475 1456 /*
1476 1457 * Right now we can assume that the minor number matches with
1477 1458 * this instance of fp. If this changes we will need to
1478 1459 * revisit this logic.
1479 1460 */
1480 1461 mutex_enter(&fcp_global_mutex);
1481 1462 pptr = fcp_port_head;
1482 1463 while (pptr) {
1483 1464 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1484 1465 break;
1485 1466 } else {
1486 1467 pptr = pptr->port_next;
1487 1468 }
1488 1469 }
1489 1470 mutex_exit(&fcp_global_mutex);
1490 1471 if (pptr == NULL) {
1491 1472 return (ENXIO);
1492 1473 }
1493 1474 mutex_enter(&pptr->port_mutex);
1494 1475
1495 1476
1496 1477 if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1497 1478 fioctl.listlen, KM_NOSLEEP)) == NULL) {
1498 1479 mutex_exit(&pptr->port_mutex);
1499 1480 return (ENOMEM);
1500 1481 }
1501 1482
1502 1483 if (ddi_copyin(fioctl.list, dev_data,
1503 1484 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1504 1485 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1505 1486 mutex_exit(&pptr->port_mutex);
1506 1487 return (EFAULT);
1507 1488 }
1508 1489 link_cnt = pptr->port_link_cnt;
1509 1490
1510 1491 if (cmd == FCP_TGT_INQUIRY) {
1511 1492 wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1512 1493 if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1513 1494 sizeof (wwn_ptr->raw_wwn)) == 0) {
1514 1495 /* This ioctl is requesting INQ info of local HBA */
1515 1496 mutex_exit(&pptr->port_mutex);
1516 1497 dev_data[0].dev0_type = DTYPE_UNKNOWN;
1517 1498 dev_data[0].dev_status = 0;
1518 1499 if (ddi_copyout(dev_data, fioctl.list,
1519 1500 (sizeof (struct device_data)) * fioctl.listlen,
1520 1501 mode)) {
1521 1502 kmem_free(dev_data,
1522 1503 sizeof (*dev_data) * fioctl.listlen);
1523 1504 return (EFAULT);
1524 1505 }
1525 1506 kmem_free(dev_data,
1526 1507 sizeof (*dev_data) * fioctl.listlen);
1527 1508 #ifdef _MULTI_DATAMODEL
1528 1509 switch (ddi_model_convert_from(mode & FMODELS)) {
1529 1510 case DDI_MODEL_ILP32: {
1530 1511 struct fcp32_ioctl f32_ioctl;
1531 1512 f32_ioctl.fp_minor = fioctl.fp_minor;
1532 1513 f32_ioctl.listlen = fioctl.listlen;
1533 1514 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1534 1515 if (ddi_copyout((void *)&f32_ioctl,
1535 1516 (void *)data,
1536 1517 sizeof (struct fcp32_ioctl), mode)) {
1537 1518 return (EFAULT);
1538 1519 }
1539 1520 break;
1540 1521 }
1541 1522 case DDI_MODEL_NONE:
1542 1523 if (ddi_copyout((void *)&fioctl, (void *)data,
1543 1524 sizeof (struct fcp_ioctl), mode)) {
1544 1525 return (EFAULT);
1545 1526 }
1546 1527 break;
1547 1528 }
1548 1529 #else /* _MULTI_DATAMODEL */
1549 1530 if (ddi_copyout((void *)&fioctl, (void *)data,
1550 1531 sizeof (struct fcp_ioctl), mode)) {
1551 1532 return (EFAULT);
1552 1533 }
1553 1534 #endif /* _MULTI_DATAMODEL */
1554 1535 return (0);
1555 1536 }
1556 1537 }
1557 1538
1558 1539 if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1559 1540 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1560 1541 mutex_exit(&pptr->port_mutex);
1561 1542 return (ENXIO);
1562 1543 }
1563 1544
1564 1545 for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1565 1546 i++) {
1566 1547 wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1567 1548
1568 1549 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1569 1550
1570 1551
1571 1552 dev_data[i].dev_status = ENXIO;
1572 1553
1573 1554 if ((ptgt = fcp_lookup_target(pptr,
1574 1555 (uchar_t *)wwn_ptr)) == NULL) {
1575 1556 mutex_exit(&pptr->port_mutex);
1576 1557 if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1577 1558 wwn_ptr, &error, 0) == NULL) {
1578 1559 dev_data[i].dev_status = ENODEV;
1579 1560 mutex_enter(&pptr->port_mutex);
1580 1561 continue;
1581 1562 } else {
1582 1563
1583 1564 dev_data[i].dev_status = EAGAIN;
1584 1565
1585 1566 mutex_enter(&pptr->port_mutex);
1586 1567 continue;
1587 1568 }
1588 1569 } else {
1589 1570 mutex_enter(&ptgt->tgt_mutex);
1590 1571 if (ptgt->tgt_state & (FCP_TGT_MARK |
1591 1572 FCP_TGT_BUSY)) {
1592 1573 dev_data[i].dev_status = EAGAIN;
1593 1574 mutex_exit(&ptgt->tgt_mutex);
1594 1575 continue;
1595 1576 }
1596 1577
1597 1578 if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1598 1579 if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1599 1580 dev_data[i].dev_status = ENOTSUP;
1600 1581 } else {
1601 1582 dev_data[i].dev_status = ENXIO;
1602 1583 }
1603 1584 mutex_exit(&ptgt->tgt_mutex);
1604 1585 continue;
1605 1586 }
1606 1587
1607 1588 switch (cmd) {
1608 1589 case FCP_TGT_INQUIRY:
1609 1590 /*
1610 1591 * The reason we give device type of
1611 1592 * lun 0 only even though in some
1612 1593 * cases(like maxstrat) lun 0 device
1613 1594 * type may be 0x3f(invalid) is that
1614 1595 * for bridge boxes target will appear
1615 1596 * as luns and the first lun could be
1616 1597 * a device that utility may not care
1617 1598 * about (like a tape device).
1618 1599 */
1619 1600 dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1620 1601 dev_data[i].dev_status = 0;
1621 1602 mutex_exit(&ptgt->tgt_mutex);
1622 1603
1623 1604 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1624 1605 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1625 1606 } else {
1626 1607 dev_data[i].dev0_type = plun->lun_type;
1627 1608 }
1628 1609 mutex_enter(&ptgt->tgt_mutex);
1629 1610 break;
1630 1611
1631 1612 case FCP_TGT_CREATE:
1632 1613 mutex_exit(&ptgt->tgt_mutex);
1633 1614 mutex_exit(&pptr->port_mutex);
1634 1615
1635 1616 /*
1636 1617 * serialize state change call backs.
1637 1618 * only one call back will be handled
1638 1619 * at a time.
1639 1620 */
1640 1621 mutex_enter(&fcp_global_mutex);
1641 1622 if (fcp_oflag & FCP_BUSY) {
1642 1623 mutex_exit(&fcp_global_mutex);
1643 1624 if (dev_data) {
1644 1625 kmem_free(dev_data,
1645 1626 sizeof (*dev_data) *
1646 1627 fioctl.listlen);
1647 1628 }
1648 1629 return (EBUSY);
1649 1630 }
1650 1631 fcp_oflag |= FCP_BUSY;
1651 1632 mutex_exit(&fcp_global_mutex);
1652 1633
1653 1634 dev_data[i].dev_status =
1654 1635 fcp_create_on_demand(pptr,
1655 1636 wwn_ptr->raw_wwn);
1656 1637
1657 1638 if (dev_data[i].dev_status != 0) {
1658 1639 char buf[25];
1659 1640
1660 1641 for (i = 0; i < FC_WWN_SIZE; i++) {
1661 1642 (void) sprintf(&buf[i << 1],
1662 1643 "%02x",
1663 1644 wwn_ptr->raw_wwn[i]);
1664 1645 }
1665 1646
1666 1647 fcp_log(CE_WARN, pptr->port_dip,
1667 1648 "!Failed to create nodes for"
1668 1649 " pwwn=%s; error=%x", buf,
1669 1650 dev_data[i].dev_status);
1670 1651 }
1671 1652
1672 1653 /* allow state change call backs again */
1673 1654 mutex_enter(&fcp_global_mutex);
1674 1655 fcp_oflag &= ~FCP_BUSY;
1675 1656 mutex_exit(&fcp_global_mutex);
1676 1657
1677 1658 mutex_enter(&pptr->port_mutex);
1678 1659 mutex_enter(&ptgt->tgt_mutex);
1679 1660
1680 1661 break;
1681 1662
1682 1663 case FCP_TGT_DELETE:
1683 1664 break;
1684 1665
1685 1666 default:
1686 1667 fcp_log(CE_WARN, pptr->port_dip,
1687 1668 "!Invalid device data ioctl "
1688 1669 "opcode = 0x%x", cmd);
1689 1670 }
1690 1671 mutex_exit(&ptgt->tgt_mutex);
1691 1672 }
1692 1673 }
1693 1674 mutex_exit(&pptr->port_mutex);
1694 1675
1695 1676 if (ddi_copyout(dev_data, fioctl.list,
1696 1677 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1697 1678 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1698 1679 return (EFAULT);
1699 1680 }
1700 1681 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1701 1682
1702 1683 #ifdef _MULTI_DATAMODEL
1703 1684 switch (ddi_model_convert_from(mode & FMODELS)) {
1704 1685 case DDI_MODEL_ILP32: {
1705 1686 struct fcp32_ioctl f32_ioctl;
1706 1687
1707 1688 f32_ioctl.fp_minor = fioctl.fp_minor;
1708 1689 f32_ioctl.listlen = fioctl.listlen;
1709 1690 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1710 1691 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1711 1692 sizeof (struct fcp32_ioctl), mode)) {
1712 1693 return (EFAULT);
1713 1694 }
1714 1695 break;
1715 1696 }
1716 1697 case DDI_MODEL_NONE:
1717 1698 if (ddi_copyout((void *)&fioctl, (void *)data,
1718 1699 sizeof (struct fcp_ioctl), mode)) {
1719 1700 return (EFAULT);
1720 1701 }
1721 1702 break;
1722 1703 }
1723 1704 #else /* _MULTI_DATAMODEL */
1724 1705
1725 1706 if (ddi_copyout((void *)&fioctl, (void *)data,
1726 1707 sizeof (struct fcp_ioctl), mode)) {
1727 1708 return (EFAULT);
1728 1709 }
1729 1710 #endif /* _MULTI_DATAMODEL */
1730 1711
1731 1712 return (0);
1732 1713 }
1733 1714
1734 1715 /*
1735 1716 * Fetch the target mappings (path, etc.) for all LUNs
1736 1717 * on this port.
1737 1718 */
1738 1719 /* ARGSUSED */
1739 1720 static int
1740 1721 fcp_get_target_mappings(struct fcp_ioctl *data,
1741 1722 int mode, int *rval)
1742 1723 {
1743 1724 struct fcp_port *pptr;
1744 1725 fc_hba_target_mappings_t *mappings;
1745 1726 fc_hba_mapping_entry_t *map;
1746 1727 struct fcp_tgt *ptgt = NULL;
1747 1728 struct fcp_lun *plun = NULL;
1748 1729 int i, mapIndex, mappingSize;
1749 1730 int listlen;
1750 1731 struct fcp_ioctl fioctl;
1751 1732 char *path;
1752 1733 fcp_ent_addr_t sam_lun_addr;
1753 1734
1754 1735 #ifdef _MULTI_DATAMODEL
1755 1736 switch (ddi_model_convert_from(mode & FMODELS)) {
1756 1737 case DDI_MODEL_ILP32: {
1757 1738 struct fcp32_ioctl f32_ioctl;
1758 1739
1759 1740 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1760 1741 sizeof (struct fcp32_ioctl), mode)) {
1761 1742 return (EFAULT);
1762 1743 }
1763 1744 fioctl.fp_minor = f32_ioctl.fp_minor;
1764 1745 fioctl.listlen = f32_ioctl.listlen;
1765 1746 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1766 1747 break;
1767 1748 }
1768 1749 case DDI_MODEL_NONE:
1769 1750 if (ddi_copyin((void *)data, (void *)&fioctl,
1770 1751 sizeof (struct fcp_ioctl), mode)) {
1771 1752 return (EFAULT);
1772 1753 }
1773 1754 break;
1774 1755 }
1775 1756
1776 1757 #else /* _MULTI_DATAMODEL */
1777 1758 if (ddi_copyin((void *)data, (void *)&fioctl,
1778 1759 sizeof (struct fcp_ioctl), mode)) {
1779 1760 return (EFAULT);
1780 1761 }
1781 1762 #endif /* _MULTI_DATAMODEL */
1782 1763
1783 1764 /*
1784 1765 * Right now we can assume that the minor number matches with
1785 1766 * this instance of fp. If this changes we will need to
1786 1767 * revisit this logic.
1787 1768 */
1788 1769 mutex_enter(&fcp_global_mutex);
1789 1770 pptr = fcp_port_head;
1790 1771 while (pptr) {
1791 1772 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1792 1773 break;
1793 1774 } else {
1794 1775 pptr = pptr->port_next;
1795 1776 }
1796 1777 }
1797 1778 mutex_exit(&fcp_global_mutex);
1798 1779 if (pptr == NULL) {
1799 1780 cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1800 1781 fioctl.fp_minor);
1801 1782 return (ENXIO);
1802 1783 }
1803 1784
1804 1785
1805 1786 /* We use listlen to show the total buffer size */
1806 1787 mappingSize = fioctl.listlen;
1807 1788
1808 1789 /* Now calculate how many mapping entries will fit */
1809 1790 listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1810 1791 - sizeof (fc_hba_target_mappings_t);
1811 1792 if (listlen <= 0) {
1812 1793 cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1813 1794 return (ENXIO);
1814 1795 }
1815 1796 listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1816 1797
1817 1798 if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1818 1799 return (ENOMEM);
1819 1800 }
1820 1801 mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1821 1802
1822 1803 /* Now get to work */
1823 1804 mapIndex = 0;
1824 1805
1825 1806 mutex_enter(&pptr->port_mutex);
1826 1807 /* Loop through all targets on this port */
1827 1808 for (i = 0; i < FCP_NUM_HASH; i++) {
1828 1809 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1829 1810 ptgt = ptgt->tgt_next) {
1830 1811
1831 1812 mutex_enter(&ptgt->tgt_mutex);
1832 1813
1833 1814 /* Loop through all LUNs on this target */
1834 1815 for (plun = ptgt->tgt_lun; plun != NULL;
1835 1816 plun = plun->lun_next) {
1836 1817 if (plun->lun_state & FCP_LUN_OFFLINE) {
1837 1818 continue;
1838 1819 }
1839 1820
1840 1821 path = fcp_get_lun_path(plun);
1841 1822 if (path == NULL) {
1842 1823 continue;
1843 1824 }
1844 1825
1845 1826 if (mapIndex >= listlen) {
1846 1827 mapIndex ++;
1847 1828 kmem_free(path, MAXPATHLEN);
1848 1829 continue;
1849 1830 }
1850 1831 map = &mappings->entries[mapIndex++];
1851 1832 bcopy(path, map->targetDriver,
1852 1833 sizeof (map->targetDriver));
1853 1834 map->d_id = ptgt->tgt_d_id;
1854 1835 map->busNumber = 0;
1855 1836 map->targetNumber = ptgt->tgt_d_id;
1856 1837 map->osLUN = plun->lun_num;
1857 1838
1858 1839 /*
1859 1840 * We had swapped lun when we stored it in
1860 1841 * lun_addr. We need to swap it back before
1861 1842 * returning it to user land
1862 1843 */
1863 1844
1864 1845 sam_lun_addr.ent_addr_0 =
1865 1846 BE_16(plun->lun_addr.ent_addr_0);
1866 1847 sam_lun_addr.ent_addr_1 =
1867 1848 BE_16(plun->lun_addr.ent_addr_1);
1868 1849 sam_lun_addr.ent_addr_2 =
1869 1850 BE_16(plun->lun_addr.ent_addr_2);
1870 1851 sam_lun_addr.ent_addr_3 =
1871 1852 BE_16(plun->lun_addr.ent_addr_3);
1872 1853
1873 1854 bcopy(&sam_lun_addr, &map->samLUN,
1874 1855 FCP_LUN_SIZE);
1875 1856 bcopy(ptgt->tgt_node_wwn.raw_wwn,
1876 1857 map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1877 1858 bcopy(ptgt->tgt_port_wwn.raw_wwn,
1878 1859 map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1879 1860
1880 1861 if (plun->lun_guid) {
1881 1862
1882 1863 /* convert ascii wwn to bytes */
1883 1864 fcp_ascii_to_wwn(plun->lun_guid,
1884 1865 map->guid, sizeof (map->guid));
1885 1866
1886 1867 if ((sizeof (map->guid)) <
1887 1868 plun->lun_guid_size / 2) {
1888 1869 cmn_err(CE_WARN,
1889 1870 "fcp_get_target_mappings:"
1890 1871 "guid copy space "
1891 1872 "insufficient."
1892 1873 "Copy Truncation - "
1893 1874 "available %d; need %d",
1894 1875 (int)sizeof (map->guid),
1895 1876 (int)
1896 1877 plun->lun_guid_size / 2);
1897 1878 }
1898 1879 }
1899 1880 kmem_free(path, MAXPATHLEN);
1900 1881 }
1901 1882 mutex_exit(&ptgt->tgt_mutex);
1902 1883 }
1903 1884 }
1904 1885 mutex_exit(&pptr->port_mutex);
1905 1886 mappings->numLuns = mapIndex;
1906 1887
1907 1888 if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1908 1889 kmem_free(mappings, mappingSize);
1909 1890 return (EFAULT);
1910 1891 }
1911 1892 kmem_free(mappings, mappingSize);
1912 1893
1913 1894 #ifdef _MULTI_DATAMODEL
1914 1895 switch (ddi_model_convert_from(mode & FMODELS)) {
1915 1896 case DDI_MODEL_ILP32: {
1916 1897 struct fcp32_ioctl f32_ioctl;
1917 1898
1918 1899 f32_ioctl.fp_minor = fioctl.fp_minor;
1919 1900 f32_ioctl.listlen = fioctl.listlen;
1920 1901 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1921 1902 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1922 1903 sizeof (struct fcp32_ioctl), mode)) {
1923 1904 return (EFAULT);
1924 1905 }
1925 1906 break;
1926 1907 }
1927 1908 case DDI_MODEL_NONE:
1928 1909 if (ddi_copyout((void *)&fioctl, (void *)data,
1929 1910 sizeof (struct fcp_ioctl), mode)) {
1930 1911 return (EFAULT);
1931 1912 }
1932 1913 break;
1933 1914 }
1934 1915 #else /* _MULTI_DATAMODEL */
1935 1916
1936 1917 if (ddi_copyout((void *)&fioctl, (void *)data,
1937 1918 sizeof (struct fcp_ioctl), mode)) {
1938 1919 return (EFAULT);
1939 1920 }
1940 1921 #endif /* _MULTI_DATAMODEL */
1941 1922
1942 1923 return (0);
1943 1924 }
1944 1925
1945 1926 /*
1946 1927 * fcp_setup_scsi_ioctl
1947 1928 * Setup handler for the "scsi passthru" style of
1948 1929 * ioctl for FCP. See "fcp_util.h" for data structure
1949 1930 * definition.
1950 1931 *
1951 1932 * Input:
1952 1933 * u_fscsi = ioctl data (user address space)
1953 1934 * mode = See ioctl(9E)
1954 1935 *
1955 1936 * Output:
1956 1937 * u_fscsi = ioctl data (user address space)
1957 1938 * rval = return value - see ioctl(9E)
1958 1939 *
1959 1940 * Returns:
1960 1941 * 0 = OK
1961 1942 * EAGAIN = See errno.h
1962 1943 * EBUSY = See errno.h
1963 1944 * EFAULT = See errno.h
1964 1945 * EINTR = See errno.h
1965 1946 * EINVAL = See errno.h
1966 1947 * EIO = See errno.h
1967 1948 * ENOMEM = See errno.h
1968 1949 * ENXIO = See errno.h
1969 1950 *
1970 1951 * Context:
1971 1952 * Kernel context.
1972 1953 */
1973 1954 /* ARGSUSED */
1974 1955 static int
1975 1956 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1976 1957 int mode, int *rval)
1977 1958 {
1978 1959 int ret = 0;
1979 1960 int temp_ret;
1980 1961 caddr_t k_cdbbufaddr = NULL;
1981 1962 caddr_t k_bufaddr = NULL;
1982 1963 caddr_t k_rqbufaddr = NULL;
1983 1964 caddr_t u_cdbbufaddr;
1984 1965 caddr_t u_bufaddr;
1985 1966 caddr_t u_rqbufaddr;
1986 1967 struct fcp_scsi_cmd k_fscsi;
1987 1968
1988 1969 /*
1989 1970 * Get fcp_scsi_cmd array element from user address space
1990 1971 */
1991 1972 if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1992 1973 != 0) {
1993 1974 return (ret);
1994 1975 }
1995 1976
1996 1977
1997 1978 /*
1998 1979 * Even though kmem_alloc() checks the validity of the
1999 1980 * buffer length, this check is needed when the
2000 1981 * kmem_flags set and the zero buffer length is passed.
2001 1982 */
2002 1983 if ((k_fscsi.scsi_cdblen <= 0) ||
2003 1984 (k_fscsi.scsi_buflen <= 0) ||
2004 1985 (k_fscsi.scsi_rqlen <= 0)) {
2005 1986 return (EINVAL);
2006 1987 }
2007 1988
2008 1989 /*
2009 1990 * Allocate data for fcp_scsi_cmd pointer fields
2010 1991 */
2011 1992 if (ret == 0) {
2012 1993 k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
2013 1994 k_bufaddr = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
2014 1995 k_rqbufaddr = kmem_alloc(k_fscsi.scsi_rqlen, KM_NOSLEEP);
2015 1996
2016 1997 if (k_cdbbufaddr == NULL ||
2017 1998 k_bufaddr == NULL ||
2018 1999 k_rqbufaddr == NULL) {
2019 2000 ret = ENOMEM;
2020 2001 }
2021 2002 }
2022 2003
2023 2004 /*
2024 2005 * Get fcp_scsi_cmd pointer fields from user
2025 2006 * address space
2026 2007 */
2027 2008 if (ret == 0) {
2028 2009 u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
2029 2010 u_bufaddr = k_fscsi.scsi_bufaddr;
2030 2011 u_rqbufaddr = k_fscsi.scsi_rqbufaddr;
2031 2012
2032 2013 if (ddi_copyin(u_cdbbufaddr,
2033 2014 k_cdbbufaddr,
2034 2015 k_fscsi.scsi_cdblen,
2035 2016 mode)) {
2036 2017 ret = EFAULT;
2037 2018 } else if (ddi_copyin(u_bufaddr,
2038 2019 k_bufaddr,
2039 2020 k_fscsi.scsi_buflen,
2040 2021 mode)) {
2041 2022 ret = EFAULT;
2042 2023 } else if (ddi_copyin(u_rqbufaddr,
2043 2024 k_rqbufaddr,
2044 2025 k_fscsi.scsi_rqlen,
2045 2026 mode)) {
2046 2027 ret = EFAULT;
2047 2028 }
2048 2029 }
2049 2030
2050 2031 /*
2051 2032 * Send scsi command (blocking)
2052 2033 */
2053 2034 if (ret == 0) {
2054 2035 /*
2055 2036 * Prior to sending the scsi command, the
2056 2037 * fcp_scsi_cmd data structure must contain kernel,
2057 2038 * not user, addresses.
2058 2039 */
2059 2040 k_fscsi.scsi_cdbbufaddr = k_cdbbufaddr;
2060 2041 k_fscsi.scsi_bufaddr = k_bufaddr;
2061 2042 k_fscsi.scsi_rqbufaddr = k_rqbufaddr;
2062 2043
2063 2044 ret = fcp_send_scsi_ioctl(&k_fscsi);
2064 2045
2065 2046 /*
2066 2047 * After sending the scsi command, the
2067 2048 * fcp_scsi_cmd data structure must contain user,
2068 2049 * not kernel, addresses.
2069 2050 */
2070 2051 k_fscsi.scsi_cdbbufaddr = u_cdbbufaddr;
2071 2052 k_fscsi.scsi_bufaddr = u_bufaddr;
2072 2053 k_fscsi.scsi_rqbufaddr = u_rqbufaddr;
2073 2054 }
2074 2055
2075 2056 /*
2076 2057 * Put fcp_scsi_cmd pointer fields to user address space
2077 2058 */
2078 2059 if (ret == 0) {
2079 2060 if (ddi_copyout(k_cdbbufaddr,
2080 2061 u_cdbbufaddr,
2081 2062 k_fscsi.scsi_cdblen,
2082 2063 mode)) {
2083 2064 ret = EFAULT;
2084 2065 } else if (ddi_copyout(k_bufaddr,
2085 2066 u_bufaddr,
2086 2067 k_fscsi.scsi_buflen,
2087 2068 mode)) {
2088 2069 ret = EFAULT;
2089 2070 } else if (ddi_copyout(k_rqbufaddr,
2090 2071 u_rqbufaddr,
2091 2072 k_fscsi.scsi_rqlen,
2092 2073 mode)) {
2093 2074 ret = EFAULT;
2094 2075 }
2095 2076 }
2096 2077
2097 2078 /*
2098 2079 * Free data for fcp_scsi_cmd pointer fields
2099 2080 */
2100 2081 if (k_cdbbufaddr != NULL) {
2101 2082 kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2102 2083 }
2103 2084 if (k_bufaddr != NULL) {
2104 2085 kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2105 2086 }
2106 2087 if (k_rqbufaddr != NULL) {
2107 2088 kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2108 2089 }
2109 2090
2110 2091 /*
2111 2092 * Put fcp_scsi_cmd array element to user address space
2112 2093 */
2113 2094 temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2114 2095 if (temp_ret != 0) {
2115 2096 ret = temp_ret;
2116 2097 }
2117 2098
2118 2099 /*
2119 2100 * Return status
2120 2101 */
2121 2102 return (ret);
2122 2103 }
2123 2104
2124 2105
2125 2106 /*
2126 2107 * fcp_copyin_scsi_cmd
2127 2108 * Copy in fcp_scsi_cmd data structure from user address space.
2128 2109 * The data may be in 32 bit or 64 bit modes.
2129 2110 *
2130 2111 * Input:
2131 2112 * base_addr = from address (user address space)
2132 2113 * mode = See ioctl(9E) and ddi_copyin(9F)
2133 2114 *
2134 2115 * Output:
2135 2116 * fscsi = to address (kernel address space)
2136 2117 *
2137 2118 * Returns:
2138 2119 * 0 = OK
2139 2120 * EFAULT = Error
2140 2121 *
2141 2122 * Context:
2142 2123 * Kernel context.
2143 2124 */
2144 2125 static int
2145 2126 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2146 2127 {
2147 2128 #ifdef _MULTI_DATAMODEL
2148 2129 struct fcp32_scsi_cmd f32scsi;
2149 2130
2150 2131 switch (ddi_model_convert_from(mode & FMODELS)) {
2151 2132 case DDI_MODEL_ILP32:
2152 2133 /*
2153 2134 * Copy data from user address space
2154 2135 */
2155 2136 if (ddi_copyin((void *)base_addr,
2156 2137 &f32scsi,
2157 2138 sizeof (struct fcp32_scsi_cmd),
2158 2139 mode)) {
2159 2140 return (EFAULT);
2160 2141 }
2161 2142 /*
2162 2143 * Convert from 32 bit to 64 bit
2163 2144 */
2164 2145 FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2165 2146 break;
2166 2147 case DDI_MODEL_NONE:
2167 2148 /*
2168 2149 * Copy data from user address space
2169 2150 */
2170 2151 if (ddi_copyin((void *)base_addr,
2171 2152 fscsi,
2172 2153 sizeof (struct fcp_scsi_cmd),
2173 2154 mode)) {
2174 2155 return (EFAULT);
2175 2156 }
2176 2157 break;
2177 2158 }
2178 2159 #else /* _MULTI_DATAMODEL */
2179 2160 /*
2180 2161 * Copy data from user address space
2181 2162 */
2182 2163 if (ddi_copyin((void *)base_addr,
2183 2164 fscsi,
2184 2165 sizeof (struct fcp_scsi_cmd),
2185 2166 mode)) {
2186 2167 return (EFAULT);
2187 2168 }
2188 2169 #endif /* _MULTI_DATAMODEL */
2189 2170
2190 2171 return (0);
2191 2172 }
2192 2173
2193 2174
2194 2175 /*
2195 2176 * fcp_copyout_scsi_cmd
2196 2177 * Copy out fcp_scsi_cmd data structure to user address space.
2197 2178 * The data may be in 32 bit or 64 bit modes.
2198 2179 *
2199 2180 * Input:
2200 2181 * fscsi = to address (kernel address space)
2201 2182 * mode = See ioctl(9E) and ddi_copyin(9F)
2202 2183 *
2203 2184 * Output:
2204 2185 * base_addr = from address (user address space)
2205 2186 *
2206 2187 * Returns:
2207 2188 * 0 = OK
2208 2189 * EFAULT = Error
2209 2190 *
2210 2191 * Context:
2211 2192 * Kernel context.
2212 2193 */
2213 2194 static int
2214 2195 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2215 2196 {
2216 2197 #ifdef _MULTI_DATAMODEL
2217 2198 struct fcp32_scsi_cmd f32scsi;
2218 2199
2219 2200 switch (ddi_model_convert_from(mode & FMODELS)) {
2220 2201 case DDI_MODEL_ILP32:
2221 2202 /*
2222 2203 * Convert from 64 bit to 32 bit
2223 2204 */
2224 2205 FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2225 2206 /*
2226 2207 * Copy data to user address space
2227 2208 */
2228 2209 if (ddi_copyout(&f32scsi,
2229 2210 (void *)base_addr,
2230 2211 sizeof (struct fcp32_scsi_cmd),
2231 2212 mode)) {
2232 2213 return (EFAULT);
2233 2214 }
2234 2215 break;
2235 2216 case DDI_MODEL_NONE:
2236 2217 /*
2237 2218 * Copy data to user address space
2238 2219 */
2239 2220 if (ddi_copyout(fscsi,
2240 2221 (void *)base_addr,
2241 2222 sizeof (struct fcp_scsi_cmd),
2242 2223 mode)) {
2243 2224 return (EFAULT);
2244 2225 }
2245 2226 break;
2246 2227 }
2247 2228 #else /* _MULTI_DATAMODEL */
2248 2229 /*
2249 2230 * Copy data to user address space
2250 2231 */
2251 2232 if (ddi_copyout(fscsi,
2252 2233 (void *)base_addr,
2253 2234 sizeof (struct fcp_scsi_cmd),
2254 2235 mode)) {
2255 2236 return (EFAULT);
2256 2237 }
2257 2238 #endif /* _MULTI_DATAMODEL */
2258 2239
2259 2240 return (0);
2260 2241 }
2261 2242
2262 2243
2263 2244 /*
2264 2245 * fcp_send_scsi_ioctl
2265 2246 * Sends the SCSI command in blocking mode.
2266 2247 *
2267 2248 * Input:
2268 2249 * fscsi = SCSI command data structure
2269 2250 *
2270 2251 * Output:
2271 2252 * fscsi = SCSI command data structure
2272 2253 *
2273 2254 * Returns:
2274 2255 * 0 = OK
2275 2256 * EAGAIN = See errno.h
2276 2257 * EBUSY = See errno.h
2277 2258 * EINTR = See errno.h
2278 2259 * EINVAL = See errno.h
2279 2260 * EIO = See errno.h
2280 2261 * ENOMEM = See errno.h
2281 2262 * ENXIO = See errno.h
2282 2263 *
2283 2264 * Context:
2284 2265 * Kernel context.
2285 2266 */
2286 2267 static int
2287 2268 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2288 2269 {
2289 2270 struct fcp_lun *plun = NULL;
2290 2271 struct fcp_port *pptr = NULL;
2291 2272 struct fcp_tgt *ptgt = NULL;
2292 2273 fc_packet_t *fpkt = NULL;
2293 2274 struct fcp_ipkt *icmd = NULL;
2294 2275 int target_created = FALSE;
2295 2276 fc_frame_hdr_t *hp;
2296 2277 struct fcp_cmd fcp_cmd;
2297 2278 struct fcp_cmd *fcmd;
2298 2279 union scsi_cdb *scsi_cdb;
2299 2280 la_wwn_t *wwn_ptr;
2300 2281 int nodma;
2301 2282 struct fcp_rsp *rsp;
2302 2283 struct fcp_rsp_info *rsp_info;
2303 2284 caddr_t rsp_sense;
2304 2285 int buf_len;
2305 2286 int info_len;
2306 2287 int sense_len;
2307 2288 struct scsi_extended_sense *sense_to = NULL;
2308 2289 timeout_id_t tid;
2309 2290 uint8_t reconfig_lun = FALSE;
2310 2291 uint8_t reconfig_pending = FALSE;
2311 2292 uint8_t scsi_cmd;
2312 2293 int rsp_len;
2313 2294 int cmd_index;
2314 2295 int fc_status;
2315 2296 int pkt_state;
2316 2297 int pkt_action;
2317 2298 int pkt_reason;
2318 2299 int ret, xport_retval = ~FC_SUCCESS;
2319 2300 int lcount;
2320 2301 int tcount;
2321 2302 int reconfig_status;
2322 2303 int port_busy = FALSE;
2323 2304 uchar_t *lun_string;
2324 2305
2325 2306 /*
2326 2307 * Check valid SCSI command
2327 2308 */
2328 2309 scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2329 2310 ret = EINVAL;
2330 2311 for (cmd_index = 0;
2331 2312 cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2332 2313 ret != 0;
2333 2314 cmd_index++) {
2334 2315 /*
2335 2316 * First byte of CDB is the SCSI command
2336 2317 */
2337 2318 if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2338 2319 ret = 0;
2339 2320 }
2340 2321 }
2341 2322
2342 2323 /*
2343 2324 * Check inputs
2344 2325 */
2345 2326 if (fscsi->scsi_flags != FCP_SCSI_READ) {
2346 2327 ret = EINVAL;
2347 2328 } else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2348 2329 /* no larger than */
2349 2330 ret = EINVAL;
2350 2331 }
2351 2332
2352 2333
2353 2334 /*
2354 2335 * Find FC port
2355 2336 */
2356 2337 if (ret == 0) {
2357 2338 /*
2358 2339 * Acquire global mutex
2359 2340 */
2360 2341 mutex_enter(&fcp_global_mutex);
2361 2342
2362 2343 pptr = fcp_port_head;
2363 2344 while (pptr) {
2364 2345 if (pptr->port_instance ==
2365 2346 (uint32_t)fscsi->scsi_fc_port_num) {
2366 2347 break;
2367 2348 } else {
2368 2349 pptr = pptr->port_next;
2369 2350 }
2370 2351 }
2371 2352
2372 2353 if (pptr == NULL) {
2373 2354 ret = ENXIO;
2374 2355 } else {
2375 2356 /*
2376 2357 * fc_ulp_busy_port can raise power
2377 2358 * so, we must not hold any mutexes involved in PM
2378 2359 */
2379 2360 mutex_exit(&fcp_global_mutex);
2380 2361 ret = fc_ulp_busy_port(pptr->port_fp_handle);
2381 2362 }
2382 2363
2383 2364 if (ret == 0) {
2384 2365
2385 2366 /* remember port is busy, so we will release later */
2386 2367 port_busy = TRUE;
2387 2368
2388 2369 /*
2389 2370 * If there is a reconfiguration in progress, wait
2390 2371 * for it to complete.
2391 2372 */
2392 2373
2393 2374 fcp_reconfig_wait(pptr);
2394 2375
2395 2376 /* reacquire mutexes in order */
2396 2377 mutex_enter(&fcp_global_mutex);
2397 2378 mutex_enter(&pptr->port_mutex);
2398 2379
2399 2380 /*
2400 2381 * Will port accept DMA?
2401 2382 */
2402 2383 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2403 2384 ? 1 : 0;
2404 2385
2405 2386 /*
2406 2387 * If init or offline, device not known
2407 2388 *
2408 2389 * If we are discovering (onlining), we can
2409 2390 * NOT obviously provide reliable data about
2410 2391 * devices until it is complete
2411 2392 */
2412 2393 if (pptr->port_state & (FCP_STATE_INIT |
2413 2394 FCP_STATE_OFFLINE)) {
2414 2395 ret = ENXIO;
2415 2396 } else if (pptr->port_state & FCP_STATE_ONLINING) {
2416 2397 ret = EBUSY;
2417 2398 } else {
2418 2399 /*
2419 2400 * Find target from pwwn
2420 2401 *
2421 2402 * The wwn must be put into a local
2422 2403 * variable to ensure alignment.
2423 2404 */
2424 2405 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2425 2406 ptgt = fcp_lookup_target(pptr,
2426 2407 (uchar_t *)wwn_ptr);
2427 2408
2428 2409 /*
2429 2410 * If target not found,
2430 2411 */
2431 2412 if (ptgt == NULL) {
2432 2413 /*
2433 2414 * Note: Still have global &
2434 2415 * port mutexes
2435 2416 */
2436 2417 mutex_exit(&pptr->port_mutex);
2437 2418 ptgt = fcp_port_create_tgt(pptr,
2438 2419 wwn_ptr, &ret, &fc_status,
2439 2420 &pkt_state, &pkt_action,
2440 2421 &pkt_reason);
2441 2422 mutex_enter(&pptr->port_mutex);
2442 2423
2443 2424 fscsi->scsi_fc_status = fc_status;
2444 2425 fscsi->scsi_pkt_state =
2445 2426 (uchar_t)pkt_state;
2446 2427 fscsi->scsi_pkt_reason = pkt_reason;
2447 2428 fscsi->scsi_pkt_action =
2448 2429 (uchar_t)pkt_action;
2449 2430
2450 2431 if (ptgt != NULL) {
2451 2432 target_created = TRUE;
2452 2433 } else if (ret == 0) {
2453 2434 ret = ENOMEM;
2454 2435 }
2455 2436 }
2456 2437
2457 2438 if (ret == 0) {
2458 2439 /*
2459 2440 * Acquire target
2460 2441 */
2461 2442 mutex_enter(&ptgt->tgt_mutex);
2462 2443
2463 2444 /*
2464 2445 * If target is mark or busy,
2465 2446 * then target can not be used
2466 2447 */
2467 2448 if (ptgt->tgt_state &
2468 2449 (FCP_TGT_MARK |
2469 2450 FCP_TGT_BUSY)) {
2470 2451 ret = EBUSY;
2471 2452 } else {
2472 2453 /*
2473 2454 * Mark target as busy
2474 2455 */
2475 2456 ptgt->tgt_state |=
2476 2457 FCP_TGT_BUSY;
2477 2458 }
2478 2459
2479 2460 /*
2480 2461 * Release target
2481 2462 */
2482 2463 lcount = pptr->port_link_cnt;
2483 2464 tcount = ptgt->tgt_change_cnt;
2484 2465 mutex_exit(&ptgt->tgt_mutex);
2485 2466 }
2486 2467 }
2487 2468
2488 2469 /*
2489 2470 * Release port
2490 2471 */
2491 2472 mutex_exit(&pptr->port_mutex);
2492 2473 }
2493 2474
2494 2475 /*
2495 2476 * Release global mutex
2496 2477 */
2497 2478 mutex_exit(&fcp_global_mutex);
2498 2479 }
2499 2480
2500 2481 if (ret == 0) {
2501 2482 uint64_t belun = BE_64(fscsi->scsi_lun);
2502 2483
2503 2484 /*
2504 2485 * If it's a target device, find lun from pwwn
2505 2486 * The wwn must be put into a local
2506 2487 * variable to ensure alignment.
2507 2488 */
2508 2489 mutex_enter(&pptr->port_mutex);
2509 2490 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2510 2491 if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2511 2492 /* this is not a target */
2512 2493 fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2513 2494 ret = ENXIO;
2514 2495 } else if ((belun << 16) != 0) {
2515 2496 /*
2516 2497 * Since fcp only support PD and LU addressing method
2517 2498 * so far, the last 6 bytes of a valid LUN are expected
2518 2499 * to be filled with 00h.
2519 2500 */
2520 2501 fscsi->scsi_fc_status = FC_INVALID_LUN;
2521 2502 cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2522 2503 " method 0x%02x with LUN number 0x%016" PRIx64,
2523 2504 (uint8_t)(belun >> 62), belun);
2524 2505 ret = ENXIO;
2525 2506 } else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2526 2507 (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2527 2508 /*
2528 2509 * This is a SCSI target, but no LUN at this
2529 2510 * address.
2530 2511 *
2531 2512 * In the future, we may want to send this to
2532 2513 * the target, and let it respond
2533 2514 * appropriately
2534 2515 */
2535 2516 ret = ENXIO;
2536 2517 }
2537 2518 mutex_exit(&pptr->port_mutex);
2538 2519 }
2539 2520
2540 2521 /*
2541 2522 * Finished grabbing external resources
2542 2523 * Allocate internal packet (icmd)
2543 2524 */
2544 2525 if (ret == 0) {
2545 2526 /*
2546 2527 * Calc rsp len assuming rsp info included
2547 2528 */
2548 2529 rsp_len = sizeof (struct fcp_rsp) +
2549 2530 sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2550 2531
2551 2532 icmd = fcp_icmd_alloc(pptr, ptgt,
2552 2533 sizeof (struct fcp_cmd),
2553 2534 rsp_len,
2554 2535 fscsi->scsi_buflen,
2555 2536 nodma,
2556 2537 lcount, /* ipkt_link_cnt */
2557 2538 tcount, /* ipkt_change_cnt */
2558 2539 0, /* cause */
2559 2540 FC_INVALID_RSCN_COUNT); /* invalidate the count */
2560 2541
2561 2542 if (icmd == NULL) {
2562 2543 ret = ENOMEM;
2563 2544 } else {
2564 2545 /*
2565 2546 * Setup internal packet as sema sync
2566 2547 */
2567 2548 fcp_ipkt_sema_init(icmd);
2568 2549 }
2569 2550 }
2570 2551
2571 2552 if (ret == 0) {
2572 2553 /*
2573 2554 * Init fpkt pointer for use.
2574 2555 */
2575 2556
2576 2557 fpkt = icmd->ipkt_fpkt;
2577 2558
2578 2559 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
2579 2560 fpkt->pkt_tran_type = FC_PKT_FCP_READ; /* only rd for now */
2580 2561 fpkt->pkt_timeout = fscsi->scsi_timeout;
2581 2562
2582 2563 /*
2583 2564 * Init fcmd pointer for use by SCSI command
2584 2565 */
2585 2566
2586 2567 if (nodma) {
2587 2568 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2588 2569 } else {
2589 2570 fcmd = &fcp_cmd;
2590 2571 }
2591 2572 bzero(fcmd, sizeof (struct fcp_cmd));
2592 2573 ptgt = plun->lun_tgt;
2593 2574
2594 2575 lun_string = (uchar_t *)&fscsi->scsi_lun;
2595 2576
2596 2577 fcmd->fcp_ent_addr.ent_addr_0 =
2597 2578 BE_16(*(uint16_t *)&(lun_string[0]));
2598 2579 fcmd->fcp_ent_addr.ent_addr_1 =
2599 2580 BE_16(*(uint16_t *)&(lun_string[2]));
2600 2581 fcmd->fcp_ent_addr.ent_addr_2 =
2601 2582 BE_16(*(uint16_t *)&(lun_string[4]));
2602 2583 fcmd->fcp_ent_addr.ent_addr_3 =
2603 2584 BE_16(*(uint16_t *)&(lun_string[6]));
2604 2585
2605 2586 /*
2606 2587 * Setup internal packet(icmd)
2607 2588 */
2608 2589 icmd->ipkt_lun = plun;
2609 2590 icmd->ipkt_restart = 0;
2610 2591 icmd->ipkt_retries = 0;
2611 2592 icmd->ipkt_opcode = 0;
2612 2593
2613 2594 /*
2614 2595 * Init the frame HEADER Pointer for use
2615 2596 */
2616 2597 hp = &fpkt->pkt_cmd_fhdr;
2617 2598
2618 2599 hp->s_id = pptr->port_id;
2619 2600 hp->d_id = ptgt->tgt_d_id;
2620 2601 hp->r_ctl = R_CTL_COMMAND;
2621 2602 hp->type = FC_TYPE_SCSI_FCP;
2622 2603 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2623 2604 hp->rsvd = 0;
2624 2605 hp->seq_id = 0;
2625 2606 hp->seq_cnt = 0;
2626 2607 hp->ox_id = 0xffff;
2627 2608 hp->rx_id = 0xffff;
2628 2609 hp->ro = 0;
2629 2610
2630 2611 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
2631 2612 fcmd->fcp_cntl.cntl_read_data = 1; /* only rd for now */
2632 2613 fcmd->fcp_cntl.cntl_write_data = 0;
2633 2614 fcmd->fcp_data_len = fscsi->scsi_buflen;
2634 2615
2635 2616 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2636 2617 bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2637 2618 fscsi->scsi_cdblen);
2638 2619
2639 2620 if (!nodma) {
2640 2621 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2641 2622 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2642 2623 }
2643 2624
2644 2625 /*
2645 2626 * Send SCSI command to FC transport
2646 2627 */
2647 2628
2648 2629 if (ret == 0) {
2649 2630 mutex_enter(&ptgt->tgt_mutex);
2650 2631
2651 2632 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2652 2633 mutex_exit(&ptgt->tgt_mutex);
2653 2634 fscsi->scsi_fc_status = xport_retval =
2654 2635 fc_ulp_transport(pptr->port_fp_handle,
2655 2636 fpkt);
2656 2637 if (fscsi->scsi_fc_status != FC_SUCCESS) {
2657 2638 ret = EIO;
2658 2639 }
2659 2640 } else {
2660 2641 mutex_exit(&ptgt->tgt_mutex);
2661 2642 ret = EBUSY;
2662 2643 }
2663 2644 }
2664 2645 }
2665 2646
2666 2647 /*
2667 2648 * Wait for completion only if fc_ulp_transport was called and it
2668 2649 * returned a success. This is the only time callback will happen.
2669 2650 * Otherwise, there is no point in waiting
2670 2651 */
2671 2652 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2672 2653 ret = fcp_ipkt_sema_wait(icmd);
2673 2654 }
2674 2655
2675 2656 /*
2676 2657 * Copy data to IOCTL data structures
2677 2658 */
2678 2659 rsp = NULL;
2679 2660 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2680 2661 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2681 2662
2682 2663 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2683 2664 fcp_log(CE_WARN, pptr->port_dip,
2684 2665 "!SCSI command to d_id=0x%x lun=0x%x"
2685 2666 " failed, Bad FCP response values:"
2686 2667 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2687 2668 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2688 2669 ptgt->tgt_d_id, plun->lun_num,
2689 2670 rsp->reserved_0, rsp->reserved_1,
2690 2671 rsp->fcp_u.fcp_status.reserved_0,
2691 2672 rsp->fcp_u.fcp_status.reserved_1,
2692 2673 rsp->fcp_response_len, rsp->fcp_sense_len);
2693 2674
2694 2675 ret = EIO;
2695 2676 }
2696 2677 }
2697 2678
2698 2679 if ((ret == 0) && (rsp != NULL)) {
2699 2680 /*
2700 2681 * Calc response lengths
2701 2682 */
2702 2683 sense_len = 0;
2703 2684 info_len = 0;
2704 2685
2705 2686 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2706 2687 info_len = rsp->fcp_response_len;
2707 2688 }
2708 2689
2709 2690 rsp_info = (struct fcp_rsp_info *)
2710 2691 ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2711 2692
2712 2693 /*
2713 2694 * Get SCSI status
2714 2695 */
2715 2696 fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2716 2697 /*
2717 2698 * If a lun was just added or removed and the next command
2718 2699 * comes through this interface, we need to capture the check
2719 2700 * condition so we can discover the new topology.
2720 2701 */
2721 2702 if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2722 2703 rsp->fcp_u.fcp_status.sense_len_set) {
2723 2704 sense_len = rsp->fcp_sense_len;
2724 2705 rsp_sense = (caddr_t)((uint8_t *)rsp_info + info_len);
2725 2706 sense_to = (struct scsi_extended_sense *)rsp_sense;
2726 2707 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2727 2708 (FCP_SENSE_NO_LUN(sense_to))) {
2728 2709 reconfig_lun = TRUE;
2729 2710 }
2730 2711 }
2731 2712
2732 2713 if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2733 2714 (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2734 2715 if (reconfig_lun == FALSE) {
2735 2716 reconfig_status =
2736 2717 fcp_is_reconfig_needed(ptgt, fpkt);
2737 2718 }
2738 2719
2739 2720 if ((reconfig_lun == TRUE) ||
2740 2721 (reconfig_status == TRUE)) {
2741 2722 mutex_enter(&ptgt->tgt_mutex);
2742 2723 if (ptgt->tgt_tid == NULL) {
2743 2724 /*
2744 2725 * Either we've been notified the
2745 2726 * REPORT_LUN data has changed, or
2746 2727 * we've determined on our own that
2747 2728 * we're out of date. Kick off
2748 2729 * rediscovery.
2749 2730 */
2750 2731 tid = timeout(fcp_reconfigure_luns,
2751 2732 (caddr_t)ptgt, drv_usectohz(1));
2752 2733
2753 2734 ptgt->tgt_tid = tid;
2754 2735 ptgt->tgt_state |= FCP_TGT_BUSY;
2755 2736 ret = EBUSY;
2756 2737 reconfig_pending = TRUE;
2757 2738 }
2758 2739 mutex_exit(&ptgt->tgt_mutex);
2759 2740 }
2760 2741 }
2761 2742
2762 2743 /*
2763 2744 * Calc residuals and buffer lengths
2764 2745 */
2765 2746
2766 2747 if (ret == 0) {
2767 2748 buf_len = fscsi->scsi_buflen;
2768 2749 fscsi->scsi_bufresid = 0;
2769 2750 if (rsp->fcp_u.fcp_status.resid_under) {
2770 2751 if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2771 2752 fscsi->scsi_bufresid = rsp->fcp_resid;
2772 2753 } else {
2773 2754 cmn_err(CE_WARN, "fcp: bad residue %x "
2774 2755 "for txfer len %x", rsp->fcp_resid,
2775 2756 fscsi->scsi_buflen);
2776 2757 fscsi->scsi_bufresid =
2777 2758 fscsi->scsi_buflen;
2778 2759 }
2779 2760 buf_len -= fscsi->scsi_bufresid;
2780 2761 }
2781 2762 if (rsp->fcp_u.fcp_status.resid_over) {
2782 2763 fscsi->scsi_bufresid = -rsp->fcp_resid;
2783 2764 }
2784 2765
2785 2766 fscsi->scsi_rqresid = fscsi->scsi_rqlen - sense_len;
2786 2767 if (fscsi->scsi_rqlen < sense_len) {
2787 2768 sense_len = fscsi->scsi_rqlen;
2788 2769 }
2789 2770
2790 2771 fscsi->scsi_fc_rspcode = 0;
2791 2772 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2792 2773 fscsi->scsi_fc_rspcode = rsp_info->rsp_code;
2793 2774 }
2794 2775 fscsi->scsi_pkt_state = fpkt->pkt_state;
2795 2776 fscsi->scsi_pkt_action = fpkt->pkt_action;
2796 2777 fscsi->scsi_pkt_reason = fpkt->pkt_reason;
2797 2778
2798 2779 /*
2799 2780 * Copy data and request sense
2800 2781 *
2801 2782 * Data must be copied by using the FCP_CP_IN macro.
2802 2783 * This will ensure the proper byte order since the data
2803 2784 * is being copied directly from the memory mapped
2804 2785 * device register.
2805 2786 *
2806 2787 * The response (and request sense) will be in the
2807 2788 * correct byte order. No special copy is necessary.
2808 2789 */
2809 2790
2810 2791 if (buf_len) {
2811 2792 FCP_CP_IN(fpkt->pkt_data,
2812 2793 fscsi->scsi_bufaddr,
2813 2794 fpkt->pkt_data_acc,
2814 2795 buf_len);
2815 2796 }
2816 2797 bcopy((void *)rsp_sense,
2817 2798 (void *)fscsi->scsi_rqbufaddr,
2818 2799 sense_len);
2819 2800 }
2820 2801 }
2821 2802
2822 2803 /*
2823 2804 * Cleanup transport data structures if icmd was alloc-ed
2824 2805 * So, cleanup happens in the same thread that icmd was alloc-ed
2825 2806 */
2826 2807 if (icmd != NULL) {
2827 2808 fcp_ipkt_sema_cleanup(icmd);
2828 2809 }
2829 2810
2830 2811 /* restore pm busy/idle status */
2831 2812 if (port_busy) {
2832 2813 fc_ulp_idle_port(pptr->port_fp_handle);
2833 2814 }
2834 2815
2835 2816 /*
2836 2817 * Cleanup target. if a reconfig is pending, don't clear the BUSY
2837 2818 * flag, it'll be cleared when the reconfig is complete.
2838 2819 */
2839 2820 if ((ptgt != NULL) && !reconfig_pending) {
2840 2821 /*
2841 2822 * If target was created,
2842 2823 */
2843 2824 if (target_created) {
2844 2825 mutex_enter(&ptgt->tgt_mutex);
2845 2826 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2846 2827 mutex_exit(&ptgt->tgt_mutex);
2847 2828 } else {
2848 2829 /*
2849 2830 * De-mark target as busy
2850 2831 */
2851 2832 mutex_enter(&ptgt->tgt_mutex);
2852 2833 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2853 2834 mutex_exit(&ptgt->tgt_mutex);
2854 2835 }
2855 2836 }
2856 2837 return (ret);
2857 2838 }
2858 2839
2859 2840
2860 2841 static int
2861 2842 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2862 2843 fc_packet_t *fpkt)
2863 2844 {
2864 2845 uchar_t *lun_string;
2865 2846 uint16_t lun_num, i;
2866 2847 int num_luns;
2867 2848 int actual_luns;
2868 2849 int num_masked_luns;
2869 2850 int lun_buflen;
2870 2851 struct fcp_lun *plun = NULL;
2871 2852 struct fcp_reportlun_resp *report_lun;
2872 2853 uint8_t reconfig_needed = FALSE;
2873 2854 uint8_t lun_exists = FALSE;
2874 2855 fcp_port_t *pptr = ptgt->tgt_port;
2875 2856
2876 2857 report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2877 2858
2878 2859 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2879 2860 fpkt->pkt_datalen);
2880 2861
2881 2862 /* get number of luns (which is supplied as LUNS * 8) */
2882 2863 num_luns = BE_32(report_lun->num_lun) >> 3;
2883 2864
2884 2865 /*
2885 2866 * Figure out exactly how many lun strings our response buffer
2886 2867 * can hold.
2887 2868 */
2888 2869 lun_buflen = (fpkt->pkt_datalen -
2889 2870 2 * sizeof (uint32_t)) / sizeof (longlong_t);
2890 2871
2891 2872 /*
2892 2873 * Is our response buffer full or not? We don't want to
2893 2874 * potentially walk beyond the number of luns we have.
2894 2875 */
2895 2876 if (num_luns <= lun_buflen) {
2896 2877 actual_luns = num_luns;
2897 2878 } else {
2898 2879 actual_luns = lun_buflen;
2899 2880 }
2900 2881
2901 2882 mutex_enter(&ptgt->tgt_mutex);
2902 2883
2903 2884 /* Scan each lun to see if we have masked it. */
2904 2885 num_masked_luns = 0;
2905 2886 if (fcp_lun_blacklist != NULL) {
2906 2887 for (i = 0; i < actual_luns; i++) {
2907 2888 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2908 2889 switch (lun_string[0] & 0xC0) {
2909 2890 case FCP_LUN_ADDRESSING:
2910 2891 case FCP_PD_ADDRESSING:
2911 2892 case FCP_VOLUME_ADDRESSING:
2912 2893 lun_num = ((lun_string[0] & 0x3F) << 8)
2913 2894 | lun_string[1];
2914 2895 if (fcp_should_mask(&ptgt->tgt_port_wwn,
2915 2896 lun_num) == TRUE) {
2916 2897 num_masked_luns++;
2917 2898 }
2918 2899 break;
2919 2900 default:
2920 2901 break;
2921 2902 }
2922 2903 }
2923 2904 }
2924 2905
2925 2906 /*
2926 2907 * The quick and easy check. If the number of LUNs reported
2927 2908 * doesn't match the number we currently know about, we need
2928 2909 * to reconfigure.
2929 2910 */
2930 2911 if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2931 2912 mutex_exit(&ptgt->tgt_mutex);
2932 2913 kmem_free(report_lun, fpkt->pkt_datalen);
2933 2914 return (TRUE);
2934 2915 }
2935 2916
2936 2917 /*
2937 2918 * If the quick and easy check doesn't turn up anything, we walk
2938 2919 * the list of luns from the REPORT_LUN response and look for
2939 2920 * any luns we don't know about. If we find one, we know we need
2940 2921 * to reconfigure. We will skip LUNs that are masked because of the
2941 2922 * blacklist.
2942 2923 */
2943 2924 for (i = 0; i < actual_luns; i++) {
2944 2925 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2945 2926 lun_exists = FALSE;
2946 2927 switch (lun_string[0] & 0xC0) {
2947 2928 case FCP_LUN_ADDRESSING:
2948 2929 case FCP_PD_ADDRESSING:
2949 2930 case FCP_VOLUME_ADDRESSING:
2950 2931 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2951 2932
2952 2933 if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2953 2934 &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2954 2935 lun_exists = TRUE;
2955 2936 break;
2956 2937 }
2957 2938
2958 2939 for (plun = ptgt->tgt_lun; plun;
2959 2940 plun = plun->lun_next) {
2960 2941 if (plun->lun_num == lun_num) {
2961 2942 lun_exists = TRUE;
2962 2943 break;
2963 2944 }
2964 2945 }
2965 2946 break;
2966 2947 default:
2967 2948 break;
2968 2949 }
2969 2950
2970 2951 if (lun_exists == FALSE) {
2971 2952 reconfig_needed = TRUE;
2972 2953 break;
2973 2954 }
2974 2955 }
2975 2956
2976 2957 mutex_exit(&ptgt->tgt_mutex);
2977 2958 kmem_free(report_lun, fpkt->pkt_datalen);
2978 2959
2979 2960 return (reconfig_needed);
2980 2961 }
2981 2962
2982 2963 /*
2983 2964 * This function is called by fcp_handle_page83 and uses inquiry response data
2984 2965 * stored in plun->lun_inq to determine whether or not a device is a member of
2985 2966 * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2986 2967 * otherwise 1.
2987 2968 */
2988 2969 static int
2989 2970 fcp_symmetric_device_probe(struct fcp_lun *plun)
2990 2971 {
2991 2972 struct scsi_inquiry *stdinq = &plun->lun_inq;
2992 2973 char *devidptr;
2993 2974 int i, len;
2994 2975
2995 2976 for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
2996 2977 devidptr = fcp_symmetric_disk_table[i];
2997 2978 len = (int)strlen(devidptr);
2998 2979
2999 2980 if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
3000 2981 return (0);
3001 2982 }
3002 2983 }
3003 2984 return (1);
3004 2985 }
3005 2986
3006 2987
3007 2988 /*
3008 2989 * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
3009 2990 * It basically returns the current count of # of state change callbacks
3010 2991 * i.e the value of tgt_change_cnt.
3011 2992 *
3012 2993 * INPUT:
3013 2994 * fcp_ioctl.fp_minor -> The minor # of the fp port
3014 2995 * fcp_ioctl.listlen -> 1
3015 2996 * fcp_ioctl.list -> Pointer to a 32 bit integer
3016 2997 */
3017 2998 /*ARGSUSED2*/
3018 2999 static int
3019 3000 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
3020 3001 {
3021 3002 int ret;
3022 3003 uint32_t link_cnt;
3023 3004 struct fcp_ioctl fioctl;
3024 3005 struct fcp_port *pptr = NULL;
3025 3006
3026 3007 if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
3027 3008 &pptr)) != 0) {
3028 3009 return (ret);
3029 3010 }
3030 3011
3031 3012 ASSERT(pptr != NULL);
3032 3013
3033 3014 if (fioctl.listlen != 1) {
3034 3015 return (EINVAL);
3035 3016 }
3036 3017
3037 3018 mutex_enter(&pptr->port_mutex);
3038 3019 if (pptr->port_state & FCP_STATE_OFFLINE) {
3039 3020 mutex_exit(&pptr->port_mutex);
3040 3021 return (ENXIO);
3041 3022 }
3042 3023
3043 3024 /*
3044 3025 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3045 3026 * When the fcp initially attaches to the port and there are nothing
3046 3027 * hanging out of the port or if there was a repeat offline state change
3047 3028 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3048 3029 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3049 3030 * will differentiate the 2 cases.
3050 3031 */
3051 3032 if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3052 3033 mutex_exit(&pptr->port_mutex);
3053 3034 return (ENXIO);
3054 3035 }
3055 3036
3056 3037 link_cnt = pptr->port_link_cnt;
3057 3038 mutex_exit(&pptr->port_mutex);
3058 3039
3059 3040 if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3060 3041 return (EFAULT);
3061 3042 }
3062 3043
3063 3044 #ifdef _MULTI_DATAMODEL
3064 3045 switch (ddi_model_convert_from(mode & FMODELS)) {
3065 3046 case DDI_MODEL_ILP32: {
3066 3047 struct fcp32_ioctl f32_ioctl;
3067 3048
3068 3049 f32_ioctl.fp_minor = fioctl.fp_minor;
3069 3050 f32_ioctl.listlen = fioctl.listlen;
3070 3051 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3071 3052 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3072 3053 sizeof (struct fcp32_ioctl), mode)) {
3073 3054 return (EFAULT);
3074 3055 }
3075 3056 break;
3076 3057 }
3077 3058 case DDI_MODEL_NONE:
3078 3059 if (ddi_copyout((void *)&fioctl, (void *)data,
3079 3060 sizeof (struct fcp_ioctl), mode)) {
3080 3061 return (EFAULT);
3081 3062 }
3082 3063 break;
3083 3064 }
3084 3065 #else /* _MULTI_DATAMODEL */
3085 3066
3086 3067 if (ddi_copyout((void *)&fioctl, (void *)data,
3087 3068 sizeof (struct fcp_ioctl), mode)) {
3088 3069 return (EFAULT);
3089 3070 }
3090 3071 #endif /* _MULTI_DATAMODEL */
3091 3072
3092 3073 return (0);
3093 3074 }
3094 3075
3095 3076 /*
3096 3077 * This function copies the fcp_ioctl structure passed in from user land
3097 3078 * into kernel land. Handles 32 bit applications.
3098 3079 */
3099 3080 /*ARGSUSED*/
3100 3081 static int
3101 3082 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3102 3083 struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3103 3084 {
3104 3085 struct fcp_port *t_pptr;
3105 3086
3106 3087 #ifdef _MULTI_DATAMODEL
3107 3088 switch (ddi_model_convert_from(mode & FMODELS)) {
3108 3089 case DDI_MODEL_ILP32: {
3109 3090 struct fcp32_ioctl f32_ioctl;
3110 3091
3111 3092 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3112 3093 sizeof (struct fcp32_ioctl), mode)) {
3113 3094 return (EFAULT);
3114 3095 }
3115 3096 fioctl->fp_minor = f32_ioctl.fp_minor;
3116 3097 fioctl->listlen = f32_ioctl.listlen;
3117 3098 fioctl->list = (caddr_t)(long)f32_ioctl.list;
3118 3099 break;
3119 3100 }
3120 3101 case DDI_MODEL_NONE:
3121 3102 if (ddi_copyin((void *)data, (void *)fioctl,
3122 3103 sizeof (struct fcp_ioctl), mode)) {
3123 3104 return (EFAULT);
3124 3105 }
3125 3106 break;
3126 3107 }
3127 3108
3128 3109 #else /* _MULTI_DATAMODEL */
3129 3110 if (ddi_copyin((void *)data, (void *)fioctl,
3130 3111 sizeof (struct fcp_ioctl), mode)) {
3131 3112 return (EFAULT);
3132 3113 }
3133 3114 #endif /* _MULTI_DATAMODEL */
3134 3115
3135 3116 /*
3136 3117 * Right now we can assume that the minor number matches with
3137 3118 * this instance of fp. If this changes we will need to
3138 3119 * revisit this logic.
3139 3120 */
3140 3121 mutex_enter(&fcp_global_mutex);
3141 3122 t_pptr = fcp_port_head;
3142 3123 while (t_pptr) {
3143 3124 if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3144 3125 break;
3145 3126 } else {
3146 3127 t_pptr = t_pptr->port_next;
3147 3128 }
3148 3129 }
3149 3130 *pptr = t_pptr;
3150 3131 mutex_exit(&fcp_global_mutex);
3151 3132 if (t_pptr == NULL) {
3152 3133 return (ENXIO);
3153 3134 }
3154 3135
3155 3136 return (0);
3156 3137 }
3157 3138
3158 3139 /*
3159 3140 * Function: fcp_port_create_tgt
3160 3141 *
3161 3142 * Description: As the name suggest this function creates the target context
3162 3143 * specified by the the WWN provided by the caller. If the
3163 3144 * creation goes well and the target is known by fp/fctl a PLOGI
3164 3145 * followed by a PRLI are issued.
3165 3146 *
3166 3147 * Argument: pptr fcp port structure
3167 3148 * pwwn WWN of the target
3168 3149 * ret_val Address of the return code. It could be:
3169 3150 * EIO, ENOMEM or 0.
3170 3151 * fc_status PLOGI or PRLI status completion
3171 3152 * fc_pkt_state PLOGI or PRLI state completion
3172 3153 * fc_pkt_reason PLOGI or PRLI reason completion
3173 3154 * fc_pkt_action PLOGI or PRLI action completion
3174 3155 *
3175 3156 * Return Value: NULL if it failed
3176 3157 * Target structure address if it succeeds
3177 3158 */
3178 3159 static struct fcp_tgt *
3179 3160 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3180 3161 int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3181 3162 {
3182 3163 struct fcp_tgt *ptgt = NULL;
3183 3164 fc_portmap_t devlist;
3184 3165 int lcount;
3185 3166 int error;
3186 3167
3187 3168 *ret_val = 0;
3188 3169
3189 3170 /*
3190 3171 * Check FC port device & get port map
3191 3172 */
3192 3173 if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3193 3174 &error, 1) == NULL) {
3194 3175 *ret_val = EIO;
3195 3176 } else {
3196 3177 if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3197 3178 &devlist) != FC_SUCCESS) {
3198 3179 *ret_val = EIO;
3199 3180 }
3200 3181 }
3201 3182
3202 3183 /* Set port map flags */
3203 3184 devlist.map_type = PORT_DEVICE_USER_CREATE;
3204 3185
3205 3186 /* Allocate target */
3206 3187 if (*ret_val == 0) {
3207 3188 lcount = pptr->port_link_cnt;
3208 3189 ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3209 3190 if (ptgt == NULL) {
3210 3191 fcp_log(CE_WARN, pptr->port_dip,
3211 3192 "!FC target allocation failed");
3212 3193 *ret_val = ENOMEM;
3213 3194 } else {
3214 3195 /* Setup target */
3215 3196 mutex_enter(&ptgt->tgt_mutex);
3216 3197
3217 3198 ptgt->tgt_statec_cause = FCP_CAUSE_TGT_CHANGE;
3218 3199 ptgt->tgt_tmp_cnt = 1;
3219 3200 ptgt->tgt_d_id = devlist.map_did.port_id;
3220 3201 ptgt->tgt_hard_addr =
3221 3202 devlist.map_hard_addr.hard_addr;
3222 3203 ptgt->tgt_pd_handle = devlist.map_pd;
3223 3204 ptgt->tgt_fca_dev = NULL;
3224 3205
3225 3206 bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3226 3207 FC_WWN_SIZE);
3227 3208 bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3228 3209 FC_WWN_SIZE);
3229 3210
3230 3211 mutex_exit(&ptgt->tgt_mutex);
3231 3212 }
3232 3213 }
3233 3214
3234 3215 /* Release global mutex for PLOGI and PRLI */
3235 3216 mutex_exit(&fcp_global_mutex);
3236 3217
3237 3218 /* Send PLOGI (If necessary) */
3238 3219 if (*ret_val == 0) {
3239 3220 *ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3240 3221 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3241 3222 }
3242 3223
3243 3224 /* Send PRLI (If necessary) */
3244 3225 if (*ret_val == 0) {
3245 3226 *ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3246 3227 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3247 3228 }
3248 3229
3249 3230 mutex_enter(&fcp_global_mutex);
3250 3231
3251 3232 return (ptgt);
3252 3233 }
3253 3234
3254 3235 /*
3255 3236 * Function: fcp_tgt_send_plogi
3256 3237 *
3257 3238 * Description: This function sends a PLOGI to the target specified by the
3258 3239 * caller and waits till it completes.
3259 3240 *
3260 3241 * Argument: ptgt Target to send the plogi to.
3261 3242 * fc_status Status returned by fp/fctl in the PLOGI request.
3262 3243 * fc_pkt_state State returned by fp/fctl in the PLOGI request.
3263 3244 * fc_pkt_reason Reason returned by fp/fctl in the PLOGI request.
3264 3245 * fc_pkt_action Action returned by fp/fctl in the PLOGI request.
3265 3246 *
3266 3247 * Return Value: 0
3267 3248 * ENOMEM
3268 3249 * EIO
3269 3250 *
3270 3251 * Context: User context.
3271 3252 */
3272 3253 static int
3273 3254 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3274 3255 int *fc_pkt_reason, int *fc_pkt_action)
3275 3256 {
3276 3257 struct fcp_port *pptr;
3277 3258 struct fcp_ipkt *icmd;
3278 3259 struct fc_packet *fpkt;
3279 3260 fc_frame_hdr_t *hp;
3280 3261 struct la_els_logi logi;
3281 3262 int tcount;
3282 3263 int lcount;
3283 3264 int ret, login_retval = ~FC_SUCCESS;
3284 3265
3285 3266 ret = 0;
3286 3267
3287 3268 pptr = ptgt->tgt_port;
3288 3269
3289 3270 lcount = pptr->port_link_cnt;
3290 3271 tcount = ptgt->tgt_change_cnt;
3291 3272
3292 3273 /* Alloc internal packet */
3293 3274 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3294 3275 sizeof (la_els_logi_t), 0,
3295 3276 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
3296 3277 lcount, tcount, 0, FC_INVALID_RSCN_COUNT);
3297 3278
3298 3279 if (icmd == NULL) {
3299 3280 ret = ENOMEM;
3300 3281 } else {
3301 3282 /*
3302 3283 * Setup internal packet as sema sync
3303 3284 */
3304 3285 fcp_ipkt_sema_init(icmd);
3305 3286
3306 3287 /*
3307 3288 * Setup internal packet (icmd)
3308 3289 */
3309 3290 icmd->ipkt_lun = NULL;
3310 3291 icmd->ipkt_restart = 0;
3311 3292 icmd->ipkt_retries = 0;
3312 3293 icmd->ipkt_opcode = LA_ELS_PLOGI;
3313 3294
3314 3295 /*
3315 3296 * Setup fc_packet
3316 3297 */
3317 3298 fpkt = icmd->ipkt_fpkt;
3318 3299
3319 3300 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
3320 3301 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
3321 3302 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
3322 3303
3323 3304 /*
3324 3305 * Setup FC frame header
3325 3306 */
3326 3307 hp = &fpkt->pkt_cmd_fhdr;
3327 3308
3328 3309 hp->s_id = pptr->port_id; /* source ID */
3329 3310 hp->d_id = ptgt->tgt_d_id; /* dest ID */
3330 3311 hp->r_ctl = R_CTL_ELS_REQ;
3331 3312 hp->type = FC_TYPE_EXTENDED_LS;
3332 3313 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3333 3314 hp->seq_id = 0;
3334 3315 hp->rsvd = 0;
3335 3316 hp->df_ctl = 0;
3336 3317 hp->seq_cnt = 0;
3337 3318 hp->ox_id = 0xffff; /* i.e. none */
3338 3319 hp->rx_id = 0xffff; /* i.e. none */
3339 3320 hp->ro = 0;
3340 3321
3341 3322 /*
3342 3323 * Setup PLOGI
3343 3324 */
3344 3325 bzero(&logi, sizeof (struct la_els_logi));
3345 3326 logi.ls_code.ls_code = LA_ELS_PLOGI;
3346 3327
3347 3328 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3348 3329 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3349 3330
3350 3331 /*
3351 3332 * Send PLOGI
3352 3333 */
3353 3334 *fc_status = login_retval =
3354 3335 fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3355 3336 if (*fc_status != FC_SUCCESS) {
3356 3337 ret = EIO;
3357 3338 }
3358 3339 }
3359 3340
3360 3341 /*
3361 3342 * Wait for completion
3362 3343 */
3363 3344 if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3364 3345 ret = fcp_ipkt_sema_wait(icmd);
3365 3346
3366 3347 *fc_pkt_state = fpkt->pkt_state;
3367 3348 *fc_pkt_reason = fpkt->pkt_reason;
3368 3349 *fc_pkt_action = fpkt->pkt_action;
3369 3350 }
3370 3351
3371 3352 /*
3372 3353 * Cleanup transport data structures if icmd was alloc-ed AND if there
3373 3354 * is going to be no callback (i.e if fc_ulp_login() failed).
3374 3355 * Otherwise, cleanup happens in callback routine.
3375 3356 */
3376 3357 if (icmd != NULL) {
3377 3358 fcp_ipkt_sema_cleanup(icmd);
3378 3359 }
3379 3360
3380 3361 return (ret);
3381 3362 }
3382 3363
3383 3364 /*
3384 3365 * Function: fcp_tgt_send_prli
3385 3366 *
3386 3367 * Description: Does nothing as of today.
3387 3368 *
3388 3369 * Argument: ptgt Target to send the prli to.
3389 3370 * fc_status Status returned by fp/fctl in the PRLI request.
3390 3371 * fc_pkt_state State returned by fp/fctl in the PRLI request.
3391 3372 * fc_pkt_reason Reason returned by fp/fctl in the PRLI request.
3392 3373 * fc_pkt_action Action returned by fp/fctl in the PRLI request.
3393 3374 *
3394 3375 * Return Value: 0
3395 3376 */
3396 3377 /*ARGSUSED*/
3397 3378 static int
3398 3379 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3399 3380 int *fc_pkt_reason, int *fc_pkt_action)
3400 3381 {
3401 3382 return (0);
3402 3383 }
3403 3384
3404 3385 /*
3405 3386 * Function: fcp_ipkt_sema_init
3406 3387 *
3407 3388 * Description: Initializes the semaphore contained in the internal packet.
3408 3389 *
3409 3390 * Argument: icmd Internal packet the semaphore of which must be
3410 3391 * initialized.
3411 3392 *
3412 3393 * Return Value: None
3413 3394 *
3414 3395 * Context: User context only.
3415 3396 */
3416 3397 static void
3417 3398 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3418 3399 {
3419 3400 struct fc_packet *fpkt;
3420 3401
3421 3402 fpkt = icmd->ipkt_fpkt;
3422 3403
3423 3404 /* Create semaphore for sync */
3424 3405 sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3425 3406
3426 3407 /* Setup the completion callback */
3427 3408 fpkt->pkt_comp = fcp_ipkt_sema_callback;
3428 3409 }
3429 3410
3430 3411 /*
3431 3412 * Function: fcp_ipkt_sema_wait
3432 3413 *
3433 3414 * Description: Wait on the semaphore embedded in the internal packet. The
3434 3415 * semaphore is released in the callback.
3435 3416 *
3436 3417 * Argument: icmd Internal packet to wait on for completion.
3437 3418 *
3438 3419 * Return Value: 0
3439 3420 * EIO
3440 3421 * EBUSY
3441 3422 * EAGAIN
3442 3423 *
3443 3424 * Context: User context only.
3444 3425 *
3445 3426 * This function does a conversion between the field pkt_state of the fc_packet
3446 3427 * embedded in the internal packet (icmd) and the code it returns.
3447 3428 */
3448 3429 static int
3449 3430 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3450 3431 {
3451 3432 struct fc_packet *fpkt;
3452 3433 int ret;
3453 3434
3454 3435 ret = EIO;
3455 3436 fpkt = icmd->ipkt_fpkt;
3456 3437
3457 3438 /*
3458 3439 * Wait on semaphore
3459 3440 */
3460 3441 sema_p(&(icmd->ipkt_sema));
3461 3442
3462 3443 /*
3463 3444 * Check the status of the FC packet
3464 3445 */
3465 3446 switch (fpkt->pkt_state) {
3466 3447 case FC_PKT_SUCCESS:
3467 3448 ret = 0;
3468 3449 break;
3469 3450 case FC_PKT_LOCAL_RJT:
3470 3451 switch (fpkt->pkt_reason) {
3471 3452 case FC_REASON_SEQ_TIMEOUT:
3472 3453 case FC_REASON_RX_BUF_TIMEOUT:
3473 3454 ret = EAGAIN;
3474 3455 break;
3475 3456 case FC_REASON_PKT_BUSY:
3476 3457 ret = EBUSY;
3477 3458 break;
3478 3459 }
3479 3460 break;
3480 3461 case FC_PKT_TIMEOUT:
3481 3462 ret = EAGAIN;
3482 3463 break;
3483 3464 case FC_PKT_LOCAL_BSY:
3484 3465 case FC_PKT_TRAN_BSY:
3485 3466 case FC_PKT_NPORT_BSY:
3486 3467 case FC_PKT_FABRIC_BSY:
3487 3468 ret = EBUSY;
3488 3469 break;
3489 3470 case FC_PKT_LS_RJT:
3490 3471 case FC_PKT_BA_RJT:
3491 3472 switch (fpkt->pkt_reason) {
3492 3473 case FC_REASON_LOGICAL_BSY:
3493 3474 ret = EBUSY;
3494 3475 break;
3495 3476 }
3496 3477 break;
3497 3478 case FC_PKT_FS_RJT:
3498 3479 switch (fpkt->pkt_reason) {
3499 3480 case FC_REASON_FS_LOGICAL_BUSY:
3500 3481 ret = EBUSY;
3501 3482 break;
3502 3483 }
3503 3484 break;
3504 3485 }
3505 3486
3506 3487 return (ret);
3507 3488 }
3508 3489
3509 3490 /*
3510 3491 * Function: fcp_ipkt_sema_callback
3511 3492 *
3512 3493 * Description: Registered as the completion callback function for the FC
3513 3494 * transport when the ipkt semaphore is used for sync. This will
3514 3495 * cleanup the used data structures, if necessary and wake up
3515 3496 * the user thread to complete the transaction.
3516 3497 *
3517 3498 * Argument: fpkt FC packet (points to the icmd)
3518 3499 *
3519 3500 * Return Value: None
3520 3501 *
3521 3502 * Context: User context only
3522 3503 */
3523 3504 static void
3524 3505 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3525 3506 {
3526 3507 struct fcp_ipkt *icmd;
3527 3508
3528 3509 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3529 3510
3530 3511 /*
3531 3512 * Wake up user thread
3532 3513 */
3533 3514 sema_v(&(icmd->ipkt_sema));
3534 3515 }
3535 3516
3536 3517 /*
3537 3518 * Function: fcp_ipkt_sema_cleanup
3538 3519 *
3539 3520 * Description: Called to cleanup (if necessary) the data structures used
3540 3521 * when ipkt sema is used for sync. This function will detect
3541 3522 * whether the caller is the last thread (via counter) and
3542 3523 * cleanup only if necessary.
3543 3524 *
3544 3525 * Argument: icmd Internal command packet
3545 3526 *
3546 3527 * Return Value: None
3547 3528 *
3548 3529 * Context: User context only
3549 3530 */
3550 3531 static void
3551 3532 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3552 3533 {
3553 3534 struct fcp_tgt *ptgt;
3554 3535 struct fcp_port *pptr;
3555 3536
3556 3537 ptgt = icmd->ipkt_tgt;
3557 3538 pptr = icmd->ipkt_port;
3558 3539
3559 3540 /*
3560 3541 * Acquire data structure
3561 3542 */
3562 3543 mutex_enter(&ptgt->tgt_mutex);
3563 3544
3564 3545 /*
3565 3546 * Destroy semaphore
3566 3547 */
3567 3548 sema_destroy(&(icmd->ipkt_sema));
3568 3549
3569 3550 /*
3570 3551 * Cleanup internal packet
3571 3552 */
3572 3553 mutex_exit(&ptgt->tgt_mutex);
3573 3554 fcp_icmd_free(pptr, icmd);
3574 3555 }
3575 3556
3576 3557 /*
3577 3558 * Function: fcp_port_attach
3578 3559 *
3579 3560 * Description: Called by the transport framework to resume, suspend or
3580 3561 * attach a new port.
3581 3562 *
3582 3563 * Argument: ulph Port handle
3583 3564 * *pinfo Port information
3584 3565 * cmd Command
3585 3566 * s_id Port ID
3586 3567 *
3587 3568 * Return Value: FC_FAILURE or FC_SUCCESS
3588 3569 */
3589 3570 /*ARGSUSED*/
3590 3571 static int
3591 3572 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3592 3573 fc_attach_cmd_t cmd, uint32_t s_id)
3593 3574 {
3594 3575 int instance;
3595 3576 int res = FC_FAILURE; /* default result */
3596 3577
3597 3578 ASSERT(pinfo != NULL);
3598 3579
3599 3580 instance = ddi_get_instance(pinfo->port_dip);
3600 3581
3601 3582 switch (cmd) {
3602 3583 case FC_CMD_ATTACH:
3603 3584 /*
3604 3585 * this port instance attaching for the first time (or after
3605 3586 * being detached before)
3606 3587 */
3607 3588 if (fcp_handle_port_attach(ulph, pinfo, s_id,
3608 3589 instance) == DDI_SUCCESS) {
3609 3590 res = FC_SUCCESS;
3610 3591 } else {
3611 3592 ASSERT(ddi_get_soft_state(fcp_softstate,
3612 3593 instance) == NULL);
3613 3594 }
3614 3595 break;
3615 3596
3616 3597 case FC_CMD_RESUME:
3617 3598 case FC_CMD_POWER_UP:
3618 3599 /*
3619 3600 * this port instance was attached and the suspended and
3620 3601 * will now be resumed
3621 3602 */
3622 3603 if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3623 3604 instance) == DDI_SUCCESS) {
3624 3605 res = FC_SUCCESS;
3625 3606 }
3626 3607 break;
3627 3608
3628 3609 default:
3629 3610 /* shouldn't happen */
3630 3611 FCP_TRACE(fcp_logq, "fcp",
3631 3612 fcp_trace, FCP_BUF_LEVEL_2, 0,
3632 3613 "port_attach: unknown cmdcommand: %d", cmd);
3633 3614 break;
3634 3615 }
3635 3616
3636 3617 /* return result */
3637 3618 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3638 3619 FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3639 3620
3640 3621 return (res);
3641 3622 }
3642 3623
3643 3624
3644 3625 /*
3645 3626 * detach or suspend this port instance
3646 3627 *
3647 3628 * acquires and releases the global mutex
3648 3629 *
3649 3630 * acquires and releases the mutex for this port
3650 3631 *
3651 3632 * acquires and releases the hotplug mutex for this port
3652 3633 */
3653 3634 /*ARGSUSED*/
3654 3635 static int
3655 3636 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3656 3637 fc_detach_cmd_t cmd)
3657 3638 {
3658 3639 int flag;
3659 3640 int instance;
3660 3641 struct fcp_port *pptr;
3661 3642
3662 3643 instance = ddi_get_instance(info->port_dip);
3663 3644 pptr = ddi_get_soft_state(fcp_softstate, instance);
3664 3645
3665 3646 switch (cmd) {
3666 3647 case FC_CMD_SUSPEND:
3667 3648 FCP_DTRACE(fcp_logq, "fcp",
3668 3649 fcp_trace, FCP_BUF_LEVEL_8, 0,
3669 3650 "port suspend called for port %d", instance);
3670 3651 flag = FCP_STATE_SUSPENDED;
3671 3652 break;
3672 3653
3673 3654 case FC_CMD_POWER_DOWN:
3674 3655 FCP_DTRACE(fcp_logq, "fcp",
3675 3656 fcp_trace, FCP_BUF_LEVEL_8, 0,
3676 3657 "port power down called for port %d", instance);
3677 3658 flag = FCP_STATE_POWER_DOWN;
3678 3659 break;
3679 3660
3680 3661 case FC_CMD_DETACH:
3681 3662 FCP_DTRACE(fcp_logq, "fcp",
3682 3663 fcp_trace, FCP_BUF_LEVEL_8, 0,
3683 3664 "port detach called for port %d", instance);
3684 3665 flag = FCP_STATE_DETACHING;
3685 3666 break;
3686 3667
3687 3668 default:
3688 3669 /* shouldn't happen */
3689 3670 return (FC_FAILURE);
3690 3671 }
3691 3672 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3692 3673 FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3693 3674
3694 3675 return (fcp_handle_port_detach(pptr, flag, instance));
3695 3676 }
3696 3677
3697 3678
3698 3679 /*
3699 3680 * called for ioctls on the transport's devctl interface, and the transport
3700 3681 * has passed it to us
3701 3682 *
3702 3683 * this will only be called for device control ioctls (i.e. hotplugging stuff)
3703 3684 *
3704 3685 * return FC_SUCCESS if we decide to claim the ioctl,
3705 3686 * else return FC_UNCLAIMED
3706 3687 *
3707 3688 * *rval is set iff we decide to claim the ioctl
3708 3689 */
3709 3690 /*ARGSUSED*/
3710 3691 static int
3711 3692 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3712 3693 intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3713 3694 {
3714 3695 int retval = FC_UNCLAIMED; /* return value */
3715 3696 struct fcp_port *pptr = NULL; /* our soft state */
3716 3697 struct devctl_iocdata *dcp = NULL; /* for devctl */
3717 3698 dev_info_t *cdip;
3718 3699 mdi_pathinfo_t *pip = NULL;
3719 3700 char *ndi_nm; /* NDI name */
3720 3701 char *ndi_addr; /* NDI addr */
3721 3702 int is_mpxio, circ;
3722 3703 int devi_entered = 0;
3723 3704 clock_t end_time;
3724 3705
3725 3706 ASSERT(rval != NULL);
3726 3707
3727 3708 FCP_DTRACE(fcp_logq, "fcp",
3728 3709 fcp_trace, FCP_BUF_LEVEL_8, 0,
3729 3710 "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3730 3711
3731 3712 /* if already claimed then forget it */
3732 3713 if (claimed) {
3733 3714 /*
3734 3715 * for now, if this ioctl has already been claimed, then
3735 3716 * we just ignore it
3736 3717 */
3737 3718 return (retval);
3738 3719 }
3739 3720
3740 3721 /* get our port info */
3741 3722 if ((pptr = fcp_get_port(port_handle)) == NULL) {
3742 3723 fcp_log(CE_WARN, NULL,
3743 3724 "!fcp:Invalid port handle handle in ioctl");
3744 3725 *rval = ENXIO;
3745 3726 return (retval);
3746 3727 }
3747 3728 is_mpxio = pptr->port_mpxio;
3748 3729
3749 3730 switch (cmd) {
3750 3731 case DEVCTL_BUS_GETSTATE:
3751 3732 case DEVCTL_BUS_QUIESCE:
3752 3733 case DEVCTL_BUS_UNQUIESCE:
3753 3734 case DEVCTL_BUS_RESET:
3754 3735 case DEVCTL_BUS_RESETALL:
3755 3736
3756 3737 case DEVCTL_BUS_DEV_CREATE:
3757 3738 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3758 3739 return (retval);
3759 3740 }
3760 3741 break;
3761 3742
3762 3743 case DEVCTL_DEVICE_GETSTATE:
3763 3744 case DEVCTL_DEVICE_OFFLINE:
3764 3745 case DEVCTL_DEVICE_ONLINE:
3765 3746 case DEVCTL_DEVICE_REMOVE:
3766 3747 case DEVCTL_DEVICE_RESET:
3767 3748 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3768 3749 return (retval);
3769 3750 }
3770 3751
3771 3752 ASSERT(dcp != NULL);
3772 3753
3773 3754 /* ensure we have a name and address */
3774 3755 if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3775 3756 ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3776 3757 FCP_TRACE(fcp_logq, pptr->port_instbuf,
3777 3758 fcp_trace, FCP_BUF_LEVEL_2, 0,
3778 3759 "ioctl: can't get name (%s) or addr (%s)",
3779 3760 ndi_nm ? ndi_nm : "<null ptr>",
3780 3761 ndi_addr ? ndi_addr : "<null ptr>");
3781 3762 ndi_dc_freehdl(dcp);
3782 3763 return (retval);
3783 3764 }
3784 3765
3785 3766
3786 3767 /* get our child's DIP */
3787 3768 ASSERT(pptr != NULL);
3788 3769 if (is_mpxio) {
3789 3770 mdi_devi_enter(pptr->port_dip, &circ);
3790 3771 } else {
3791 3772 ndi_devi_enter(pptr->port_dip, &circ);
3792 3773 }
3793 3774 devi_entered = 1;
3794 3775
3795 3776 if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3796 3777 ndi_addr)) == NULL) {
3797 3778 /* Look for virtually enumerated devices. */
3798 3779 pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3799 3780 if (pip == NULL ||
3800 3781 ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3801 3782 *rval = ENXIO;
3802 3783 goto out;
3803 3784 }
3804 3785 }
3805 3786 break;
3806 3787
3807 3788 default:
3808 3789 *rval = ENOTTY;
3809 3790 return (retval);
3810 3791 }
3811 3792
3812 3793 /* this ioctl is ours -- process it */
3813 3794
3814 3795 retval = FC_SUCCESS; /* just means we claim the ioctl */
3815 3796
3816 3797 /* we assume it will be a success; else we'll set error value */
3817 3798 *rval = 0;
3818 3799
3819 3800
3820 3801 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3821 3802 fcp_trace, FCP_BUF_LEVEL_8, 0,
3822 3803 "ioctl: claiming this one");
3823 3804
3824 3805 /* handle ioctls now */
3825 3806 switch (cmd) {
3826 3807 case DEVCTL_DEVICE_GETSTATE:
3827 3808 ASSERT(cdip != NULL);
3828 3809 ASSERT(dcp != NULL);
3829 3810 if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3830 3811 *rval = EFAULT;
3831 3812 }
3832 3813 break;
3833 3814
3834 3815 case DEVCTL_DEVICE_REMOVE:
3835 3816 case DEVCTL_DEVICE_OFFLINE: {
3836 3817 int flag = 0;
3837 3818 int lcount;
3838 3819 int tcount;
3839 3820 struct fcp_pkt *head = NULL;
3840 3821 struct fcp_lun *plun;
3841 3822 child_info_t *cip = CIP(cdip);
3842 3823 int all = 1;
3843 3824 struct fcp_lun *tplun;
3844 3825 struct fcp_tgt *ptgt;
3845 3826
3846 3827 ASSERT(pptr != NULL);
3847 3828 ASSERT(cdip != NULL);
3848 3829
3849 3830 mutex_enter(&pptr->port_mutex);
3850 3831 if (pip != NULL) {
3851 3832 cip = CIP(pip);
3852 3833 }
3853 3834 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3854 3835 mutex_exit(&pptr->port_mutex);
3855 3836 *rval = ENXIO;
3856 3837 break;
3857 3838 }
3858 3839
3859 3840 head = fcp_scan_commands(plun);
3860 3841 if (head != NULL) {
3861 3842 fcp_abort_commands(head, LUN_PORT);
3862 3843 }
3863 3844 lcount = pptr->port_link_cnt;
3864 3845 tcount = plun->lun_tgt->tgt_change_cnt;
3865 3846 mutex_exit(&pptr->port_mutex);
3866 3847
3867 3848 if (cmd == DEVCTL_DEVICE_REMOVE) {
3868 3849 flag = NDI_DEVI_REMOVE;
3869 3850 }
3870 3851
3871 3852 if (is_mpxio) {
3872 3853 mdi_devi_exit(pptr->port_dip, circ);
3873 3854 } else {
3874 3855 ndi_devi_exit(pptr->port_dip, circ);
3875 3856 }
3876 3857 devi_entered = 0;
3877 3858
3878 3859 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3879 3860 FCP_OFFLINE, lcount, tcount, flag);
3880 3861
3881 3862 if (*rval != NDI_SUCCESS) {
3882 3863 *rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3883 3864 break;
3884 3865 }
3885 3866
3886 3867 fcp_update_offline_flags(plun);
3887 3868
3888 3869 ptgt = plun->lun_tgt;
3889 3870 mutex_enter(&ptgt->tgt_mutex);
3890 3871 for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3891 3872 tplun->lun_next) {
3892 3873 mutex_enter(&tplun->lun_mutex);
3893 3874 if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3894 3875 all = 0;
3895 3876 }
3896 3877 mutex_exit(&tplun->lun_mutex);
3897 3878 }
3898 3879
3899 3880 if (all) {
3900 3881 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3901 3882 /*
3902 3883 * The user is unconfiguring/offlining the device.
3903 3884 * If fabric and the auto configuration is set
3904 3885 * then make sure the user is the only one who
3905 3886 * can reconfigure the device.
3906 3887 */
3907 3888 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3908 3889 fcp_enable_auto_configuration) {
3909 3890 ptgt->tgt_manual_config_only = 1;
3910 3891 }
3911 3892 }
3912 3893 mutex_exit(&ptgt->tgt_mutex);
3913 3894 break;
3914 3895 }
3915 3896
3916 3897 case DEVCTL_DEVICE_ONLINE: {
3917 3898 int lcount;
3918 3899 int tcount;
3919 3900 struct fcp_lun *plun;
3920 3901 child_info_t *cip = CIP(cdip);
3921 3902
3922 3903 ASSERT(cdip != NULL);
3923 3904 ASSERT(pptr != NULL);
3924 3905
3925 3906 mutex_enter(&pptr->port_mutex);
3926 3907 if (pip != NULL) {
3927 3908 cip = CIP(pip);
3928 3909 }
3929 3910 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3930 3911 mutex_exit(&pptr->port_mutex);
3931 3912 *rval = ENXIO;
3932 3913 break;
3933 3914 }
3934 3915 lcount = pptr->port_link_cnt;
3935 3916 tcount = plun->lun_tgt->tgt_change_cnt;
3936 3917 mutex_exit(&pptr->port_mutex);
3937 3918
3938 3919 /*
3939 3920 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3940 3921 * to allow the device attach to occur when the device is
3941 3922 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3942 3923 * from the scsi_probe()).
3943 3924 */
3944 3925 mutex_enter(&LUN_TGT->tgt_mutex);
3945 3926 plun->lun_state |= FCP_LUN_ONLINING;
3946 3927 mutex_exit(&LUN_TGT->tgt_mutex);
3947 3928
3948 3929 if (is_mpxio) {
3949 3930 mdi_devi_exit(pptr->port_dip, circ);
3950 3931 } else {
3951 3932 ndi_devi_exit(pptr->port_dip, circ);
3952 3933 }
3953 3934 devi_entered = 0;
3954 3935
3955 3936 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3956 3937 FCP_ONLINE, lcount, tcount, 0);
3957 3938
3958 3939 if (*rval != NDI_SUCCESS) {
3959 3940 /* Reset the FCP_LUN_ONLINING bit */
3960 3941 mutex_enter(&LUN_TGT->tgt_mutex);
3961 3942 plun->lun_state &= ~FCP_LUN_ONLINING;
3962 3943 mutex_exit(&LUN_TGT->tgt_mutex);
3963 3944 *rval = EIO;
3964 3945 break;
3965 3946 }
3966 3947 mutex_enter(&LUN_TGT->tgt_mutex);
3967 3948 plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3968 3949 FCP_LUN_ONLINING);
3969 3950 mutex_exit(&LUN_TGT->tgt_mutex);
3970 3951 break;
3971 3952 }
3972 3953
3973 3954 case DEVCTL_BUS_DEV_CREATE: {
3974 3955 uchar_t *bytes = NULL;
3975 3956 uint_t nbytes;
3976 3957 struct fcp_tgt *ptgt = NULL;
3977 3958 struct fcp_lun *plun = NULL;
3978 3959 dev_info_t *useless_dip = NULL;
3979 3960
3980 3961 *rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3981 3962 DEVCTL_CONSTRUCT, &useless_dip);
3982 3963 if (*rval != 0 || useless_dip == NULL) {
3983 3964 break;
3984 3965 }
3985 3966
3986 3967 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3987 3968 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3988 3969 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3989 3970 *rval = EINVAL;
3990 3971 (void) ndi_devi_free(useless_dip);
3991 3972 if (bytes != NULL) {
3992 3973 ddi_prop_free(bytes);
3993 3974 }
3994 3975 break;
3995 3976 }
3996 3977
3997 3978 *rval = fcp_create_on_demand(pptr, bytes);
3998 3979 if (*rval == 0) {
3999 3980 mutex_enter(&pptr->port_mutex);
4000 3981 ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
4001 3982 if (ptgt) {
4002 3983 /*
4003 3984 * We now have a pointer to the target that
4004 3985 * was created. Lets point to the first LUN on
4005 3986 * this new target.
4006 3987 */
4007 3988 mutex_enter(&ptgt->tgt_mutex);
4008 3989
4009 3990 plun = ptgt->tgt_lun;
4010 3991 /*
4011 3992 * There may be stale/offline LUN entries on
4012 3993 * this list (this is by design) and so we have
4013 3994 * to make sure we point to the first online
4014 3995 * LUN
4015 3996 */
4016 3997 while (plun &&
4017 3998 plun->lun_state & FCP_LUN_OFFLINE) {
4018 3999 plun = plun->lun_next;
4019 4000 }
4020 4001
4021 4002 mutex_exit(&ptgt->tgt_mutex);
4022 4003 }
4023 4004 mutex_exit(&pptr->port_mutex);
4024 4005 }
4025 4006
4026 4007 if (*rval == 0 && ptgt && plun) {
4027 4008 mutex_enter(&plun->lun_mutex);
4028 4009 /*
4029 4010 * Allow up to fcp_lun_ready_retry seconds to
4030 4011 * configure all the luns behind the target.
4031 4012 *
4032 4013 * The intent here is to allow targets with long
4033 4014 * reboot/reset-recovery times to become available
4034 4015 * while limiting the maximum wait time for an
4035 4016 * unresponsive target.
4036 4017 */
4037 4018 end_time = ddi_get_lbolt() +
4038 4019 SEC_TO_TICK(fcp_lun_ready_retry);
4039 4020
4040 4021 while (ddi_get_lbolt() < end_time) {
4041 4022 retval = FC_SUCCESS;
4042 4023
4043 4024 /*
4044 4025 * The new ndi interfaces for on-demand creation
4045 4026 * are inflexible, Do some more work to pass on
4046 4027 * a path name of some LUN (design is broken !)
4047 4028 */
4048 4029 if (plun->lun_cip) {
4049 4030 if (plun->lun_mpxio == 0) {
4050 4031 cdip = DIP(plun->lun_cip);
4051 4032 } else {
4052 4033 cdip = mdi_pi_get_client(
4053 4034 PIP(plun->lun_cip));
4054 4035 }
4055 4036 if (cdip == NULL) {
4056 4037 *rval = ENXIO;
4057 4038 break;
4058 4039 }
4059 4040
4060 4041 if (!i_ddi_devi_attached(cdip)) {
4061 4042 mutex_exit(&plun->lun_mutex);
4062 4043 delay(drv_usectohz(1000000));
4063 4044 mutex_enter(&plun->lun_mutex);
4064 4045 } else {
4065 4046 /*
4066 4047 * This Lun is ready, lets
4067 4048 * check the next one.
4068 4049 */
4069 4050 mutex_exit(&plun->lun_mutex);
4070 4051 plun = plun->lun_next;
4071 4052 while (plun && (plun->lun_state
4072 4053 & FCP_LUN_OFFLINE)) {
4073 4054 plun = plun->lun_next;
4074 4055 }
4075 4056 if (!plun) {
4076 4057 break;
4077 4058 }
4078 4059 mutex_enter(&plun->lun_mutex);
4079 4060 }
4080 4061 } else {
4081 4062 /*
4082 4063 * lun_cip field for a valid lun
4083 4064 * should never be NULL. Fail the
4084 4065 * command.
4085 4066 */
4086 4067 *rval = ENXIO;
4087 4068 break;
4088 4069 }
4089 4070 }
4090 4071 if (plun) {
4091 4072 mutex_exit(&plun->lun_mutex);
4092 4073 } else {
4093 4074 char devnm[MAXNAMELEN];
4094 4075 int nmlen;
4095 4076
4096 4077 nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4097 4078 ddi_node_name(cdip),
4098 4079 ddi_get_name_addr(cdip));
4099 4080
4100 4081 if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4101 4082 0) {
4102 4083 *rval = EFAULT;
4103 4084 }
4104 4085 }
4105 4086 } else {
4106 4087 int i;
4107 4088 char buf[25];
4108 4089
4109 4090 for (i = 0; i < FC_WWN_SIZE; i++) {
4110 4091 (void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4111 4092 }
4112 4093
4113 4094 fcp_log(CE_WARN, pptr->port_dip,
4114 4095 "!Failed to create nodes for pwwn=%s; error=%x",
4115 4096 buf, *rval);
4116 4097 }
4117 4098
4118 4099 (void) ndi_devi_free(useless_dip);
4119 4100 ddi_prop_free(bytes);
4120 4101 break;
4121 4102 }
4122 4103
4123 4104 case DEVCTL_DEVICE_RESET: {
4124 4105 struct fcp_lun *plun;
4125 4106 child_info_t *cip = CIP(cdip);
4126 4107
4127 4108 ASSERT(cdip != NULL);
4128 4109 ASSERT(pptr != NULL);
4129 4110 mutex_enter(&pptr->port_mutex);
4130 4111 if (pip != NULL) {
4131 4112 cip = CIP(pip);
4132 4113 }
4133 4114 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4134 4115 mutex_exit(&pptr->port_mutex);
4135 4116 *rval = ENXIO;
4136 4117 break;
4137 4118 }
4138 4119 mutex_exit(&pptr->port_mutex);
4139 4120
4140 4121 mutex_enter(&plun->lun_tgt->tgt_mutex);
4141 4122 if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4142 4123 mutex_exit(&plun->lun_tgt->tgt_mutex);
4143 4124
4144 4125 *rval = ENXIO;
4145 4126 break;
4146 4127 }
4147 4128
4148 4129 if (plun->lun_sd == NULL) {
4149 4130 mutex_exit(&plun->lun_tgt->tgt_mutex);
4150 4131
4151 4132 *rval = ENXIO;
4152 4133 break;
4153 4134 }
4154 4135 mutex_exit(&plun->lun_tgt->tgt_mutex);
4155 4136
4156 4137 /*
4157 4138 * set up ap so that fcp_reset can figure out
4158 4139 * which target to reset
4159 4140 */
4160 4141 if (fcp_scsi_reset(&plun->lun_sd->sd_address,
4161 4142 RESET_TARGET) == FALSE) {
4162 4143 *rval = EIO;
4163 4144 }
4164 4145 break;
4165 4146 }
4166 4147
4167 4148 case DEVCTL_BUS_GETSTATE:
4168 4149 ASSERT(dcp != NULL);
4169 4150 ASSERT(pptr != NULL);
4170 4151 ASSERT(pptr->port_dip != NULL);
4171 4152 if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4172 4153 NDI_SUCCESS) {
4173 4154 *rval = EFAULT;
4174 4155 }
4175 4156 break;
4176 4157
4177 4158 case DEVCTL_BUS_QUIESCE:
4178 4159 case DEVCTL_BUS_UNQUIESCE:
4179 4160 *rval = ENOTSUP;
4180 4161 break;
4181 4162
4182 4163 case DEVCTL_BUS_RESET:
4183 4164 case DEVCTL_BUS_RESETALL:
4184 4165 ASSERT(pptr != NULL);
4185 4166 (void) fcp_linkreset(pptr, NULL, KM_SLEEP);
4186 4167 break;
4187 4168
4188 4169 default:
4189 4170 ASSERT(dcp != NULL);
4190 4171 *rval = ENOTTY;
4191 4172 break;
4192 4173 }
4193 4174
4194 4175 /* all done -- clean up and return */
4195 4176 out: if (devi_entered) {
4196 4177 if (is_mpxio) {
4197 4178 mdi_devi_exit(pptr->port_dip, circ);
4198 4179 } else {
4199 4180 ndi_devi_exit(pptr->port_dip, circ);
4200 4181 }
4201 4182 }
4202 4183
4203 4184 if (dcp != NULL) {
4204 4185 ndi_dc_freehdl(dcp);
4205 4186 }
4206 4187
4207 4188 return (retval);
4208 4189 }
4209 4190
4210 4191
4211 4192 /*ARGSUSED*/
4212 4193 static int
4213 4194 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4214 4195 uint32_t claimed)
4215 4196 {
4216 4197 uchar_t r_ctl;
4217 4198 uchar_t ls_code;
4218 4199 struct fcp_port *pptr;
4219 4200
4220 4201 if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4221 4202 return (FC_UNCLAIMED);
4222 4203 }
4223 4204
4224 4205 mutex_enter(&pptr->port_mutex);
4225 4206 if (pptr->port_state & (FCP_STATE_DETACHING |
4226 4207 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4227 4208 mutex_exit(&pptr->port_mutex);
4228 4209 return (FC_UNCLAIMED);
4229 4210 }
4230 4211 mutex_exit(&pptr->port_mutex);
4231 4212
4232 4213 r_ctl = buf->ub_frame.r_ctl;
4233 4214
4234 4215 switch (r_ctl & R_CTL_ROUTING) {
4235 4216 case R_CTL_EXTENDED_SVC:
4236 4217 if (r_ctl == R_CTL_ELS_REQ) {
4237 4218 ls_code = buf->ub_buffer[0];
4238 4219
4239 4220 switch (ls_code) {
4240 4221 case LA_ELS_PRLI:
4241 4222 /*
4242 4223 * We really don't care if something fails.
4243 4224 * If the PRLI was not sent out, then the
4244 4225 * other end will time it out.
4245 4226 */
4246 4227 if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4247 4228 return (FC_SUCCESS);
4248 4229 }
4249 4230 return (FC_UNCLAIMED);
4250 4231 /* NOTREACHED */
4251 4232
4252 4233 default:
4253 4234 break;
4254 4235 }
4255 4236 }
4256 4237 /* FALLTHROUGH */
4257 4238
4258 4239 default:
4259 4240 return (FC_UNCLAIMED);
4260 4241 }
4261 4242 }
4262 4243
4263 4244
4264 4245 /*ARGSUSED*/
4265 4246 static int
4266 4247 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4267 4248 uint32_t claimed)
4268 4249 {
4269 4250 return (FC_UNCLAIMED);
4270 4251 }
4271 4252
4272 4253 /*
4273 4254 * Function: fcp_statec_callback
4274 4255 *
4275 4256 * Description: The purpose of this function is to handle a port state change.
4276 4257 * It is called from fp/fctl and, in a few instances, internally.
4277 4258 *
4278 4259 * Argument: ulph fp/fctl port handle
4279 4260 * port_handle fcp_port structure
4280 4261 * port_state Physical state of the port
4281 4262 * port_top Topology
4282 4263 * *devlist Pointer to the first entry of a table
4283 4264 * containing the remote ports that can be
4284 4265 * reached.
4285 4266 * dev_cnt Number of entries pointed by devlist.
4286 4267 * port_sid Port ID of the local port.
4287 4268 *
4288 4269 * Return Value: None
4289 4270 */
4290 4271 /*ARGSUSED*/
4291 4272 static void
4292 4273 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4293 4274 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4294 4275 uint32_t dev_cnt, uint32_t port_sid)
4295 4276 {
4296 4277 uint32_t link_count;
4297 4278 int map_len = 0;
4298 4279 struct fcp_port *pptr;
4299 4280 fcp_map_tag_t *map_tag = NULL;
4300 4281
4301 4282 if ((pptr = fcp_get_port(port_handle)) == NULL) {
4302 4283 fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4303 4284 return; /* nothing to work with! */
4304 4285 }
4305 4286
4306 4287 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4307 4288 fcp_trace, FCP_BUF_LEVEL_2, 0,
4308 4289 "fcp_statec_callback: port state/dev_cnt/top ="
4309 4290 "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4310 4291 dev_cnt, port_top);
4311 4292
4312 4293 mutex_enter(&pptr->port_mutex);
4313 4294
4314 4295 /*
4315 4296 * If a thread is in detach, don't do anything.
4316 4297 */
4317 4298 if (pptr->port_state & (FCP_STATE_DETACHING |
4318 4299 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4319 4300 mutex_exit(&pptr->port_mutex);
4320 4301 return;
4321 4302 }
4322 4303
4323 4304 /*
4324 4305 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4325 4306 * init_pkt is called, it knows whether or not the target's status
4326 4307 * (or pd) might be changing.
4327 4308 */
4328 4309
4329 4310 if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4330 4311 pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4331 4312 }
4332 4313
4333 4314 /*
4334 4315 * the transport doesn't allocate or probe unless being
4335 4316 * asked to by either the applications or ULPs
4336 4317 *
4337 4318 * in cases where the port is OFFLINE at the time of port
4338 4319 * attach callback and the link comes ONLINE later, for
4339 4320 * easier automatic node creation (i.e. without you having to
4340 4321 * go out and run the utility to perform LOGINs) the
4341 4322 * following conditional is helpful
4342 4323 */
4343 4324 pptr->port_phys_state = port_state;
4344 4325
4345 4326 if (dev_cnt) {
4346 4327 mutex_exit(&pptr->port_mutex);
4347 4328
4348 4329 map_len = sizeof (*map_tag) * dev_cnt;
4349 4330 map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4350 4331 if (map_tag == NULL) {
4351 4332 fcp_log(CE_WARN, pptr->port_dip,
4352 4333 "!fcp%d: failed to allocate for map tags; "
4353 4334 " state change will not be processed",
4354 4335 pptr->port_instance);
4355 4336
4356 4337 mutex_enter(&pptr->port_mutex);
4357 4338 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4358 4339 mutex_exit(&pptr->port_mutex);
4359 4340
4360 4341 return;
4361 4342 }
4362 4343
4363 4344 mutex_enter(&pptr->port_mutex);
4364 4345 }
4365 4346
4366 4347 if (pptr->port_id != port_sid) {
4367 4348 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4368 4349 fcp_trace, FCP_BUF_LEVEL_3, 0,
4369 4350 "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4370 4351 port_sid);
4371 4352 /*
4372 4353 * The local port changed ID. It is the first time a port ID
4373 4354 * is assigned or something drastic happened. We might have
4374 4355 * been unplugged and replugged on another loop or fabric port
4375 4356 * or somebody grabbed the AL_PA we had or somebody rezoned
4376 4357 * the fabric we were plugged into.
4377 4358 */
4378 4359 pptr->port_id = port_sid;
4379 4360 }
4380 4361
4381 4362 switch (FC_PORT_STATE_MASK(port_state)) {
4382 4363 case FC_STATE_OFFLINE:
4383 4364 case FC_STATE_RESET_REQUESTED:
4384 4365 /*
4385 4366 * link has gone from online to offline -- just update the
4386 4367 * state of this port to BUSY and MARKed to go offline
4387 4368 */
4388 4369 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4389 4370 fcp_trace, FCP_BUF_LEVEL_3, 0,
4390 4371 "link went offline");
4391 4372 if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4392 4373 /*
4393 4374 * We were offline a while ago and this one
4394 4375 * seems to indicate that the loop has gone
4395 4376 * dead forever.
4396 4377 */
4397 4378 pptr->port_tmp_cnt += dev_cnt;
4398 4379 pptr->port_state &= ~FCP_STATE_OFFLINE;
4399 4380 pptr->port_state |= FCP_STATE_INIT;
4400 4381 link_count = pptr->port_link_cnt;
4401 4382 fcp_handle_devices(pptr, devlist, dev_cnt,
4402 4383 link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4403 4384 } else {
4404 4385 pptr->port_link_cnt++;
4405 4386 ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4406 4387 fcp_update_state(pptr, (FCP_LUN_BUSY |
4407 4388 FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4408 4389 if (pptr->port_mpxio) {
4409 4390 fcp_update_mpxio_path_verifybusy(pptr);
4410 4391 }
4411 4392 pptr->port_state |= FCP_STATE_OFFLINE;
4412 4393 pptr->port_state &=
4413 4394 ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4414 4395 pptr->port_tmp_cnt = 0;
4415 4396 }
4416 4397 mutex_exit(&pptr->port_mutex);
4417 4398 break;
4418 4399
4419 4400 case FC_STATE_ONLINE:
4420 4401 case FC_STATE_LIP:
4421 4402 case FC_STATE_LIP_LBIT_SET:
4422 4403 /*
4423 4404 * link has gone from offline to online
4424 4405 */
4425 4406 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4426 4407 fcp_trace, FCP_BUF_LEVEL_3, 0,
4427 4408 "link went online");
4428 4409
4429 4410 pptr->port_link_cnt++;
4430 4411
4431 4412 while (pptr->port_ipkt_cnt) {
4432 4413 mutex_exit(&pptr->port_mutex);
4433 4414 delay(drv_usectohz(1000000));
4434 4415 mutex_enter(&pptr->port_mutex);
4435 4416 }
4436 4417
4437 4418 pptr->port_topology = port_top;
4438 4419
4439 4420 /*
4440 4421 * The state of the targets and luns accessible through this
4441 4422 * port is updated.
4442 4423 */
4443 4424 fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4444 4425 FCP_CAUSE_LINK_CHANGE);
4445 4426
4446 4427 pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4447 4428 pptr->port_state |= FCP_STATE_ONLINING;
4448 4429 pptr->port_tmp_cnt = dev_cnt;
4449 4430 link_count = pptr->port_link_cnt;
4450 4431
4451 4432 pptr->port_deadline = fcp_watchdog_time +
4452 4433 FCP_ICMD_DEADLINE;
4453 4434
4454 4435 if (!dev_cnt) {
4455 4436 /*
4456 4437 * We go directly to the online state if no remote
4457 4438 * ports were discovered.
4458 4439 */
4459 4440 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4460 4441 fcp_trace, FCP_BUF_LEVEL_3, 0,
4461 4442 "No remote ports discovered");
4462 4443
4463 4444 pptr->port_state &= ~FCP_STATE_ONLINING;
4464 4445 pptr->port_state |= FCP_STATE_ONLINE;
4465 4446 }
4466 4447
4467 4448 switch (port_top) {
4468 4449 case FC_TOP_FABRIC:
4469 4450 case FC_TOP_PUBLIC_LOOP:
4470 4451 case FC_TOP_PRIVATE_LOOP:
4471 4452 case FC_TOP_PT_PT:
4472 4453
4473 4454 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4474 4455 fcp_retry_ns_registry(pptr, port_sid);
4475 4456 }
4476 4457
4477 4458 fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4478 4459 map_tag, FCP_CAUSE_LINK_CHANGE);
4479 4460 break;
4480 4461
4481 4462 default:
4482 4463 /*
4483 4464 * We got here because we were provided with an unknown
4484 4465 * topology.
4485 4466 */
4486 4467 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4487 4468 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4488 4469 }
4489 4470
4490 4471 pptr->port_tmp_cnt -= dev_cnt;
4491 4472 fcp_log(CE_WARN, pptr->port_dip,
4492 4473 "!unknown/unsupported topology (0x%x)", port_top);
4493 4474 break;
4494 4475 }
4495 4476 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4496 4477 fcp_trace, FCP_BUF_LEVEL_3, 0,
4497 4478 "Notify ssd of the reset to reinstate the reservations");
4498 4479
4499 4480 scsi_hba_reset_notify_callback(&pptr->port_mutex,
4500 4481 &pptr->port_reset_notify_listf);
4501 4482
4502 4483 mutex_exit(&pptr->port_mutex);
4503 4484
4504 4485 break;
4505 4486
4506 4487 case FC_STATE_RESET:
4507 4488 ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4508 4489 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4509 4490 fcp_trace, FCP_BUF_LEVEL_3, 0,
4510 4491 "RESET state, waiting for Offline/Online state_cb");
4511 4492 mutex_exit(&pptr->port_mutex);
4512 4493 break;
4513 4494
4514 4495 case FC_STATE_DEVICE_CHANGE:
4515 4496 /*
4516 4497 * We come here when an application has requested
4517 4498 * Dynamic node creation/deletion in Fabric connectivity.
4518 4499 */
4519 4500 if (pptr->port_state & (FCP_STATE_OFFLINE |
4520 4501 FCP_STATE_INIT)) {
4521 4502 /*
4522 4503 * This case can happen when the FCTL is in the
4523 4504 * process of giving us on online and the host on
4524 4505 * the other side issues a PLOGI/PLOGO. Ideally
4525 4506 * the state changes should be serialized unless
4526 4507 * they are opposite (online-offline).
4527 4508 * The transport will give us a final state change
4528 4509 * so we can ignore this for the time being.
4529 4510 */
4530 4511 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4531 4512 mutex_exit(&pptr->port_mutex);
4532 4513 break;
4533 4514 }
4534 4515
4535 4516 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4536 4517 fcp_retry_ns_registry(pptr, port_sid);
4537 4518 }
4538 4519
4539 4520 /*
4540 4521 * Extend the deadline under steady state conditions
4541 4522 * to provide more time for the device-change-commands
4542 4523 */
4543 4524 if (!pptr->port_ipkt_cnt) {
4544 4525 pptr->port_deadline = fcp_watchdog_time +
4545 4526 FCP_ICMD_DEADLINE;
4546 4527 }
4547 4528
4548 4529 /*
4549 4530 * There is another race condition here, where if we were
4550 4531 * in ONLINEING state and a devices in the map logs out,
4551 4532 * fp will give another state change as DEVICE_CHANGE
4552 4533 * and OLD. This will result in that target being offlined.
4553 4534 * The pd_handle is freed. If from the first statec callback
4554 4535 * we were going to fire a PLOGI/PRLI, the system will
4555 4536 * panic in fc_ulp_transport with invalid pd_handle.
4556 4537 * The fix is to check for the link_cnt before issuing
4557 4538 * any command down.
4558 4539 */
4559 4540 fcp_update_targets(pptr, devlist, dev_cnt,
4560 4541 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4561 4542
4562 4543 link_count = pptr->port_link_cnt;
4563 4544
4564 4545 fcp_handle_devices(pptr, devlist, dev_cnt,
4565 4546 link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4566 4547
4567 4548 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4568 4549
4569 4550 mutex_exit(&pptr->port_mutex);
4570 4551 break;
4571 4552
4572 4553 case FC_STATE_TARGET_PORT_RESET:
4573 4554 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4574 4555 fcp_retry_ns_registry(pptr, port_sid);
4575 4556 }
4576 4557
4577 4558 /* Do nothing else */
4578 4559 mutex_exit(&pptr->port_mutex);
4579 4560 break;
4580 4561
4581 4562 default:
4582 4563 fcp_log(CE_WARN, pptr->port_dip,
4583 4564 "!Invalid state change=0x%x", port_state);
4584 4565 mutex_exit(&pptr->port_mutex);
4585 4566 break;
4586 4567 }
4587 4568
4588 4569 if (map_tag) {
4589 4570 kmem_free(map_tag, map_len);
4590 4571 }
4591 4572 }
4592 4573
4593 4574 /*
4594 4575 * Function: fcp_handle_devices
4595 4576 *
4596 4577 * Description: This function updates the devices currently known by
4597 4578 * walking the list provided by the caller. The list passed
4598 4579 * by the caller is supposed to be the list of reachable
4599 4580 * devices.
4600 4581 *
4601 4582 * Argument: *pptr Fcp port structure.
4602 4583 * *devlist Pointer to the first entry of a table
4603 4584 * containing the remote ports that can be
4604 4585 * reached.
4605 4586 * dev_cnt Number of entries pointed by devlist.
4606 4587 * link_cnt Link state count.
4607 4588 * *map_tag Array of fcp_map_tag_t structures.
4608 4589 * cause What caused this function to be called.
4609 4590 *
4610 4591 * Return Value: None
4611 4592 *
4612 4593 * Notes: The pptr->port_mutex must be held.
4613 4594 */
4614 4595 static void
4615 4596 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4616 4597 uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4617 4598 {
4618 4599 int i;
4619 4600 int check_finish_init = 0;
4620 4601 fc_portmap_t *map_entry;
4621 4602 struct fcp_tgt *ptgt = NULL;
4622 4603
4623 4604 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4624 4605 fcp_trace, FCP_BUF_LEVEL_3, 0,
4625 4606 "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4626 4607
4627 4608 if (dev_cnt) {
4628 4609 ASSERT(map_tag != NULL);
4629 4610 }
4630 4611
4631 4612 /*
4632 4613 * The following code goes through the list of remote ports that are
4633 4614 * accessible through this (pptr) local port (The list walked is the
4634 4615 * one provided by the caller which is the list of the remote ports
4635 4616 * currently reachable). It checks if any of them was already
4636 4617 * known by looking for the corresponding target structure based on
4637 4618 * the world wide name. If a target is part of the list it is tagged
4638 4619 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4639 4620 *
4640 4621 * Old comment
4641 4622 * -----------
4642 4623 * Before we drop port mutex; we MUST get the tags updated; This
4643 4624 * two step process is somewhat slow, but more reliable.
4644 4625 */
4645 4626 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4646 4627 map_entry = &(devlist[i]);
4647 4628
4648 4629 /*
4649 4630 * get ptr to this map entry in our port's
4650 4631 * list (if any)
4651 4632 */
4652 4633 ptgt = fcp_lookup_target(pptr,
4653 4634 (uchar_t *)&(map_entry->map_pwwn));
4654 4635
4655 4636 if (ptgt) {
4656 4637 map_tag[i] = ptgt->tgt_change_cnt;
4657 4638 if (cause == FCP_CAUSE_LINK_CHANGE) {
4658 4639 ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4659 4640 }
4660 4641 }
4661 4642 }
4662 4643
4663 4644 /*
4664 4645 * At this point we know which devices of the new list were already
4665 4646 * known (The field tgt_aux_state of the target structure has been
4666 4647 * set to FCP_TGT_TAGGED).
4667 4648 *
4668 4649 * The following code goes through the list of targets currently known
4669 4650 * by the local port (the list is actually a hashing table). If a
4670 4651 * target is found and is not tagged, it means the target cannot
4671 4652 * be reached anymore through the local port (pptr). It is offlined.
4672 4653 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4673 4654 */
4674 4655 for (i = 0; i < FCP_NUM_HASH; i++) {
4675 4656 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4676 4657 ptgt = ptgt->tgt_next) {
4677 4658 mutex_enter(&ptgt->tgt_mutex);
4678 4659 if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4679 4660 (cause == FCP_CAUSE_LINK_CHANGE) &&
4680 4661 !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4681 4662 fcp_offline_target_now(pptr, ptgt,
4682 4663 link_cnt, ptgt->tgt_change_cnt, 0);
4683 4664 }
4684 4665 mutex_exit(&ptgt->tgt_mutex);
4685 4666 }
4686 4667 }
4687 4668
4688 4669 /*
4689 4670 * At this point, the devices that were known but cannot be reached
4690 4671 * anymore, have most likely been offlined.
4691 4672 *
4692 4673 * The following section of code seems to go through the list of
4693 4674 * remote ports that can now be reached. For every single one it
4694 4675 * checks if it is already known or if it is a new port.
4695 4676 */
4696 4677 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4697 4678
4698 4679 if (check_finish_init) {
4699 4680 ASSERT(i > 0);
4700 4681 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4701 4682 map_tag[i - 1], cause);
4702 4683 check_finish_init = 0;
4703 4684 }
4704 4685
4705 4686 /* get a pointer to this map entry */
4706 4687 map_entry = &(devlist[i]);
4707 4688
4708 4689 /*
4709 4690 * Check for the duplicate map entry flag. If we have marked
4710 4691 * this entry as a duplicate we skip it since the correct
4711 4692 * (perhaps even same) state change will be encountered
4712 4693 * later in the list.
4713 4694 */
4714 4695 if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4715 4696 continue;
4716 4697 }
4717 4698
4718 4699 /* get ptr to this map entry in our port's list (if any) */
4719 4700 ptgt = fcp_lookup_target(pptr,
4720 4701 (uchar_t *)&(map_entry->map_pwwn));
4721 4702
4722 4703 if (ptgt) {
4723 4704 /*
4724 4705 * This device was already known. The field
4725 4706 * tgt_aux_state is reset (was probably set to
4726 4707 * FCP_TGT_TAGGED previously in this routine).
4727 4708 */
4728 4709 ptgt->tgt_aux_state = 0;
4729 4710 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4730 4711 fcp_trace, FCP_BUF_LEVEL_3, 0,
4731 4712 "handle_devices: map did/state/type/flags = "
4732 4713 "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4733 4714 "tgt_state=%d",
4734 4715 map_entry->map_did.port_id, map_entry->map_state,
4735 4716 map_entry->map_type, map_entry->map_flags,
4736 4717 ptgt->tgt_d_id, ptgt->tgt_state);
4737 4718 }
4738 4719
4739 4720 if (map_entry->map_type == PORT_DEVICE_OLD ||
4740 4721 map_entry->map_type == PORT_DEVICE_NEW ||
4741 4722 map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED ||
4742 4723 map_entry->map_type == PORT_DEVICE_CHANGED) {
4743 4724 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4744 4725 fcp_trace, FCP_BUF_LEVEL_2, 0,
4745 4726 "map_type=%x, did = %x",
4746 4727 map_entry->map_type,
4747 4728 map_entry->map_did.port_id);
4748 4729 }
4749 4730
4750 4731 switch (map_entry->map_type) {
4751 4732 case PORT_DEVICE_NOCHANGE:
4752 4733 case PORT_DEVICE_USER_CREATE:
4753 4734 case PORT_DEVICE_USER_LOGIN:
4754 4735 case PORT_DEVICE_NEW:
4755 4736 case PORT_DEVICE_REPORTLUN_CHANGED:
4756 4737 FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4757 4738
4758 4739 if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4759 4740 link_cnt, (ptgt) ? map_tag[i] : 0,
4760 4741 cause) == TRUE) {
4761 4742
4762 4743 FCP_TGT_TRACE(ptgt, map_tag[i],
4763 4744 FCP_TGT_TRACE_2);
4764 4745 check_finish_init++;
4765 4746 }
4766 4747 break;
4767 4748
4768 4749 case PORT_DEVICE_OLD:
4769 4750 if (ptgt != NULL) {
4770 4751 FCP_TGT_TRACE(ptgt, map_tag[i],
4771 4752 FCP_TGT_TRACE_3);
4772 4753
4773 4754 mutex_enter(&ptgt->tgt_mutex);
4774 4755 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4775 4756 /*
4776 4757 * Must do an in-line wait for I/Os
4777 4758 * to get drained
4778 4759 */
4779 4760 mutex_exit(&ptgt->tgt_mutex);
4780 4761 mutex_exit(&pptr->port_mutex);
4781 4762
4782 4763 mutex_enter(&ptgt->tgt_mutex);
4783 4764 while (ptgt->tgt_ipkt_cnt ||
4784 4765 fcp_outstanding_lun_cmds(ptgt)
4785 4766 == FC_SUCCESS) {
4786 4767 mutex_exit(&ptgt->tgt_mutex);
4787 4768 delay(drv_usectohz(1000000));
4788 4769 mutex_enter(&ptgt->tgt_mutex);
4789 4770 }
4790 4771 mutex_exit(&ptgt->tgt_mutex);
4791 4772
4792 4773 mutex_enter(&pptr->port_mutex);
4793 4774 mutex_enter(&ptgt->tgt_mutex);
4794 4775
4795 4776 (void) fcp_offline_target(pptr, ptgt,
4796 4777 link_cnt, map_tag[i], 0, 0);
4797 4778 }
4798 4779 mutex_exit(&ptgt->tgt_mutex);
4799 4780 }
4800 4781 check_finish_init++;
4801 4782 break;
4802 4783
4803 4784 case PORT_DEVICE_USER_DELETE:
4804 4785 case PORT_DEVICE_USER_LOGOUT:
4805 4786 if (ptgt != NULL) {
4806 4787 FCP_TGT_TRACE(ptgt, map_tag[i],
4807 4788 FCP_TGT_TRACE_4);
4808 4789
4809 4790 mutex_enter(&ptgt->tgt_mutex);
4810 4791 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4811 4792 (void) fcp_offline_target(pptr, ptgt,
4812 4793 link_cnt, map_tag[i], 1, 0);
4813 4794 }
4814 4795 mutex_exit(&ptgt->tgt_mutex);
4815 4796 }
4816 4797 check_finish_init++;
4817 4798 break;
4818 4799
4819 4800 case PORT_DEVICE_CHANGED:
4820 4801 if (ptgt != NULL) {
4821 4802 FCP_TGT_TRACE(ptgt, map_tag[i],
4822 4803 FCP_TGT_TRACE_5);
4823 4804
4824 4805 if (fcp_device_changed(pptr, ptgt,
4825 4806 map_entry, link_cnt, map_tag[i],
4826 4807 cause) == TRUE) {
4827 4808 check_finish_init++;
4828 4809 }
4829 4810 } else {
4830 4811 if (fcp_handle_mapflags(pptr, ptgt,
4831 4812 map_entry, link_cnt, 0, cause) == TRUE) {
4832 4813 check_finish_init++;
4833 4814 }
4834 4815 }
4835 4816 break;
4836 4817
4837 4818 default:
4838 4819 fcp_log(CE_WARN, pptr->port_dip,
4839 4820 "!Invalid map_type=0x%x", map_entry->map_type);
4840 4821 check_finish_init++;
4841 4822 break;
4842 4823 }
4843 4824 }
4844 4825
4845 4826 if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4846 4827 ASSERT(i > 0);
4847 4828 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4848 4829 map_tag[i-1], cause);
4849 4830 } else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4850 4831 fcp_offline_all(pptr, link_cnt, cause);
4851 4832 }
4852 4833 }
4853 4834
4854 4835 static int
4855 4836 fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause)
4856 4837 {
4857 4838 struct fcp_lun *plun;
4858 4839 struct fcp_port *pptr;
4859 4840 int rscn_count;
4860 4841 int lun0_newalloc;
4861 4842 int ret = TRUE;
4862 4843
4863 4844 ASSERT(ptgt);
4864 4845 pptr = ptgt->tgt_port;
4865 4846 lun0_newalloc = 0;
4866 4847 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
4867 4848 /*
4868 4849 * no LUN struct for LUN 0 yet exists,
4869 4850 * so create one
4870 4851 */
4871 4852 plun = fcp_alloc_lun(ptgt);
4872 4853 if (plun == NULL) {
4873 4854 fcp_log(CE_WARN, pptr->port_dip,
4874 4855 "!Failed to allocate lun 0 for"
4875 4856 " D_ID=%x", ptgt->tgt_d_id);
4876 4857 return (ret);
4877 4858 }
4878 4859 lun0_newalloc = 1;
4879 4860 }
4880 4861
4881 4862 mutex_enter(&ptgt->tgt_mutex);
4882 4863 /*
4883 4864 * consider lun 0 as device not connected if it is
4884 4865 * offlined or newly allocated
4885 4866 */
4886 4867 if ((plun->lun_state & FCP_LUN_OFFLINE) || lun0_newalloc) {
4887 4868 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
4888 4869 }
4889 4870 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
4890 4871 plun->lun_state &= ~FCP_LUN_OFFLINE;
4891 4872 ptgt->tgt_lun_cnt = 1;
4892 4873 ptgt->tgt_report_lun_cnt = 0;
4893 4874 mutex_exit(&ptgt->tgt_mutex);
4894 4875
4895 4876 rscn_count = fc_ulp_get_rscn_count(pptr->port_fp_handle);
4896 4877 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
4897 4878 sizeof (struct fcp_reportlun_resp), pptr->port_link_cnt,
4898 4879 ptgt->tgt_change_cnt, cause, rscn_count) != DDI_SUCCESS) {
4899 4880 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4900 4881 fcp_trace, FCP_BUF_LEVEL_3, 0, "!Failed to send REPORTLUN "
4901 4882 "to D_ID=%x", ptgt->tgt_d_id);
4902 4883 } else {
4903 4884 ret = FALSE;
4904 4885 }
4905 4886
4906 4887 return (ret);
4907 4888 }
4908 4889
4909 4890 /*
4910 4891 * Function: fcp_handle_mapflags
4911 4892 *
4912 4893 * Description: This function creates a target structure if the ptgt passed
4913 4894 * is NULL. It also kicks off the PLOGI if we are not logged
4914 4895 * into the target yet or the PRLI if we are logged into the
4915 4896 * target already. The rest of the treatment is done in the
4916 4897 * callbacks of the PLOGI or PRLI.
4917 4898 *
4918 4899 * Argument: *pptr FCP Port structure.
4919 4900 * *ptgt Target structure.
4920 4901 * *map_entry Array of fc_portmap_t structures.
4921 4902 * link_cnt Link state count.
4922 4903 * tgt_cnt Target state count.
4923 4904 * cause What caused this function to be called.
4924 4905 *
4925 4906 * Return Value: TRUE Failed
4926 4907 * FALSE Succeeded
4927 4908 *
4928 4909 * Notes: pptr->port_mutex must be owned.
4929 4910 */
4930 4911 static int
4931 4912 fcp_handle_mapflags(struct fcp_port *pptr, struct fcp_tgt *ptgt,
4932 4913 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4933 4914 {
4934 4915 int lcount;
4935 4916 int tcount;
4936 4917 int ret = TRUE;
4937 4918 int alloc;
4938 4919 struct fcp_ipkt *icmd;
4939 4920 struct fcp_lun *pseq_lun = NULL;
4940 4921 uchar_t opcode;
4941 4922 int valid_ptgt_was_passed = FALSE;
4942 4923
4943 4924 ASSERT(mutex_owned(&pptr->port_mutex));
4944 4925
4945 4926 /*
4946 4927 * This case is possible where the FCTL has come up and done discovery
4947 4928 * before FCP was loaded and attached. FCTL would have discovered the
4948 4929 * devices and later the ULP came online. In this case ULP's would get
4949 4930 * PORT_DEVICE_NOCHANGE but target would be NULL.
4950 4931 */
4951 4932 if (ptgt == NULL) {
4952 4933 /* don't already have a target */
4953 4934 mutex_exit(&pptr->port_mutex);
4954 4935 ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4955 4936 mutex_enter(&pptr->port_mutex);
4956 4937
4957 4938 if (ptgt == NULL) {
4958 4939 fcp_log(CE_WARN, pptr->port_dip,
4959 4940 "!FC target allocation failed");
4960 4941 return (ret);
4961 4942 }
4962 4943 mutex_enter(&ptgt->tgt_mutex);
4963 4944 ptgt->tgt_statec_cause = cause;
4964 4945 ptgt->tgt_tmp_cnt = 1;
4965 4946 mutex_exit(&ptgt->tgt_mutex);
4966 4947 } else {
4967 4948 valid_ptgt_was_passed = TRUE;
4968 4949 }
4969 4950
4970 4951 /*
4971 4952 * Copy in the target parameters
4972 4953 */
4973 4954 mutex_enter(&ptgt->tgt_mutex);
4974 4955 ptgt->tgt_d_id = map_entry->map_did.port_id;
4975 4956 ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4976 4957 ptgt->tgt_pd_handle = map_entry->map_pd;
4977 4958 ptgt->tgt_fca_dev = NULL;
4978 4959
4979 4960 /* Copy port and node WWNs */
4980 4961 bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4981 4962 FC_WWN_SIZE);
4982 4963 bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4983 4964 FC_WWN_SIZE);
4984 4965
4985 4966 if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4986 4967 (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4987 4968 (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4988 4969 valid_ptgt_was_passed) {
4989 4970 /*
4990 4971 * determine if there are any tape LUNs on this target
4991 4972 */
4992 4973 for (pseq_lun = ptgt->tgt_lun;
4993 4974 pseq_lun != NULL;
4994 4975 pseq_lun = pseq_lun->lun_next) {
4995 4976 if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
4996 4977 !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
4997 4978 fcp_update_tgt_state(ptgt, FCP_RESET,
4998 4979 FCP_LUN_MARK);
4999 4980 mutex_exit(&ptgt->tgt_mutex);
5000 4981 return (ret);
5001 4982 }
5002 4983 }
5003 4984 }
5004 4985
5005 4986 /*
5006 4987 * if UA'REPORT_LUN_CHANGED received,
5007 4988 * send out REPORT LUN promptly, skip PLOGI/PRLI process
5008 4989 */
5009 4990 if (map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED) {
5010 4991 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | FCP_TGT_MARK);
5011 4992 mutex_exit(&ptgt->tgt_mutex);
5012 4993 mutex_exit(&pptr->port_mutex);
5013 4994
5014 4995 ret = fcp_handle_reportlun_changed(ptgt, cause);
5015 4996
5016 4997 mutex_enter(&pptr->port_mutex);
5017 4998 return (ret);
5018 4999 }
5019 5000
5020 5001 /*
5021 5002 * If ptgt was NULL when this function was entered, then tgt_node_state
5022 5003 * was never specifically initialized but zeroed out which means
5023 5004 * FCP_TGT_NODE_NONE.
5024 5005 */
5025 5006 switch (ptgt->tgt_node_state) {
5026 5007 case FCP_TGT_NODE_NONE:
5027 5008 case FCP_TGT_NODE_ON_DEMAND:
5028 5009 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5029 5010 !fcp_enable_auto_configuration &&
5030 5011 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5031 5012 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5032 5013 } else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5033 5014 fcp_enable_auto_configuration &&
5034 5015 (ptgt->tgt_manual_config_only == 1) &&
5035 5016 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5036 5017 /*
5037 5018 * If auto configuration is set and
5038 5019 * the tgt_manual_config_only flag is set then
5039 5020 * we only want the user to be able to change
5040 5021 * the state through create_on_demand.
5041 5022 */
5042 5023 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5043 5024 } else {
5044 5025 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5045 5026 }
5046 5027 break;
5047 5028
5048 5029 case FCP_TGT_NODE_PRESENT:
5049 5030 break;
5050 5031 }
5051 5032 /*
5052 5033 * If we are booting from a fabric device, make sure we
5053 5034 * mark the node state appropriately for this target to be
5054 5035 * enumerated
5055 5036 */
5056 5037 if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
5057 5038 if (bcmp((caddr_t)pptr->port_boot_wwn,
5058 5039 (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
5059 5040 sizeof (ptgt->tgt_port_wwn)) == 0) {
5060 5041 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5061 5042 }
5062 5043 }
5063 5044 mutex_exit(&ptgt->tgt_mutex);
5064 5045
5065 5046 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5066 5047 fcp_trace, FCP_BUF_LEVEL_3, 0,
5067 5048 "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
5068 5049 map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
5069 5050 map_entry->map_rscn_info.ulp_rscn_count);
5070 5051
5071 5052 mutex_enter(&ptgt->tgt_mutex);
5072 5053
5073 5054 /*
5074 5055 * Reset target OFFLINE state and mark the target BUSY
5075 5056 */
5076 5057 ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
5077 5058 ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
5078 5059
5079 5060 tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
5080 5061 lcount = link_cnt;
5081 5062
5082 5063 mutex_exit(&ptgt->tgt_mutex);
5083 5064 mutex_exit(&pptr->port_mutex);
5084 5065
5085 5066 /*
5086 5067 * if we are already logged in, then we do a PRLI, else
5087 5068 * we do a PLOGI first (to get logged in)
5088 5069 *
5089 5070 * We will not check if we are the PLOGI initiator
5090 5071 */
5091 5072 opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
5092 5073 map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
5093 5074
5094 5075 alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
5095 5076
5096 5077 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5097 5078 pptr->port_state & FCP_STATE_FCA_IS_NODMA, lcount, tcount,
5098 5079 cause, map_entry->map_rscn_info.ulp_rscn_count);
5099 5080
5100 5081 if (icmd == NULL) {
5101 5082 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
5102 5083 /*
5103 5084 * We've exited port_mutex before calling fcp_icmd_alloc,
5104 5085 * we need to make sure we reacquire it before returning.
5105 5086 */
5106 5087 mutex_enter(&pptr->port_mutex);
5107 5088 return (FALSE);
5108 5089 }
5109 5090
5110 5091 /* TRUE is only returned while target is intended skipped */
5111 5092 ret = FALSE;
5112 5093 /* discover info about this target */
5113 5094 if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5114 5095 lcount, tcount, cause)) == DDI_SUCCESS) {
5115 5096 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5116 5097 } else {
5117 5098 fcp_icmd_free(pptr, icmd);
5118 5099 ret = TRUE;
5119 5100 }
5120 5101 mutex_enter(&pptr->port_mutex);
5121 5102
5122 5103 return (ret);
5123 5104 }
5124 5105
5125 5106 /*
5126 5107 * Function: fcp_send_els
5127 5108 *
5128 5109 * Description: Sends an ELS to the target specified by the caller. Supports
5129 5110 * PLOGI and PRLI.
5130 5111 *
5131 5112 * Argument: *pptr Fcp port.
5132 5113 * *ptgt Target to send the ELS to.
5133 5114 * *icmd Internal packet
5134 5115 * opcode ELS opcode
5135 5116 * lcount Link state change counter
5136 5117 * tcount Target state change counter
5137 5118 * cause What caused the call
5138 5119 *
5139 5120 * Return Value: DDI_SUCCESS
5140 5121 * Others
5141 5122 */
5142 5123 static int
5143 5124 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5144 5125 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5145 5126 {
5146 5127 fc_packet_t *fpkt;
5147 5128 fc_frame_hdr_t *hp;
5148 5129 int internal = 0;
5149 5130 int alloc;
5150 5131 int cmd_len;
5151 5132 int resp_len;
5152 5133 int res = DDI_FAILURE; /* default result */
5153 5134 int rval = DDI_FAILURE;
5154 5135
5155 5136 ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5156 5137 ASSERT(ptgt->tgt_port == pptr);
5157 5138
5158 5139 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5159 5140 fcp_trace, FCP_BUF_LEVEL_5, 0,
5160 5141 "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5161 5142 (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5162 5143
5163 5144 if (opcode == LA_ELS_PLOGI) {
5164 5145 cmd_len = sizeof (la_els_logi_t);
5165 5146 resp_len = sizeof (la_els_logi_t);
5166 5147 } else {
5167 5148 ASSERT(opcode == LA_ELS_PRLI);
5168 5149 cmd_len = sizeof (la_els_prli_t);
5169 5150 resp_len = sizeof (la_els_prli_t);
5170 5151 }
5171 5152
5172 5153 if (icmd == NULL) {
5173 5154 alloc = FCP_MAX(sizeof (la_els_logi_t),
5174 5155 sizeof (la_els_prli_t));
5175 5156 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5176 5157 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5177 5158 lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5178 5159 if (icmd == NULL) {
5179 5160 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5180 5161 return (res);
5181 5162 }
5182 5163 internal++;
5183 5164 }
5184 5165 fpkt = icmd->ipkt_fpkt;
5185 5166
5186 5167 fpkt->pkt_cmdlen = cmd_len;
5187 5168 fpkt->pkt_rsplen = resp_len;
5188 5169 fpkt->pkt_datalen = 0;
5189 5170 icmd->ipkt_retries = 0;
5190 5171
5191 5172 /* fill in fpkt info */
5192 5173 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5193 5174 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5194 5175 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5195 5176
5196 5177 /* get ptr to frame hdr in fpkt */
5197 5178 hp = &fpkt->pkt_cmd_fhdr;
5198 5179
5199 5180 /*
5200 5181 * fill in frame hdr
5201 5182 */
5202 5183 hp->r_ctl = R_CTL_ELS_REQ;
5203 5184 hp->s_id = pptr->port_id; /* source ID */
5204 5185 hp->d_id = ptgt->tgt_d_id; /* dest ID */
5205 5186 hp->type = FC_TYPE_EXTENDED_LS;
5206 5187 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5207 5188 hp->seq_id = 0;
5208 5189 hp->rsvd = 0;
5209 5190 hp->df_ctl = 0;
5210 5191 hp->seq_cnt = 0;
5211 5192 hp->ox_id = 0xffff; /* i.e. none */
5212 5193 hp->rx_id = 0xffff; /* i.e. none */
5213 5194 hp->ro = 0;
5214 5195
5215 5196 /*
5216 5197 * at this point we have a filled in cmd pkt
5217 5198 *
5218 5199 * fill in the respective info, then use the transport to send
5219 5200 * the packet
5220 5201 *
5221 5202 * for a PLOGI call fc_ulp_login(), and
5222 5203 * for a PRLI call fc_ulp_issue_els()
5223 5204 */
5224 5205 switch (opcode) {
5225 5206 case LA_ELS_PLOGI: {
5226 5207 struct la_els_logi logi;
5227 5208
5228 5209 bzero(&logi, sizeof (struct la_els_logi));
5229 5210
5230 5211 hp = &fpkt->pkt_cmd_fhdr;
5231 5212 hp->r_ctl = R_CTL_ELS_REQ;
5232 5213 logi.ls_code.ls_code = LA_ELS_PLOGI;
5233 5214 logi.ls_code.mbz = 0;
5234 5215
5235 5216 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5236 5217 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5237 5218
5238 5219 icmd->ipkt_opcode = LA_ELS_PLOGI;
5239 5220
5240 5221 mutex_enter(&pptr->port_mutex);
5241 5222 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5242 5223
5243 5224 mutex_exit(&pptr->port_mutex);
5244 5225
5245 5226 rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5246 5227 if (rval == FC_SUCCESS) {
5247 5228 res = DDI_SUCCESS;
5248 5229 break;
5249 5230 }
5250 5231
5251 5232 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5252 5233
5253 5234 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5254 5235 rval, "PLOGI");
5255 5236 } else {
5256 5237 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5257 5238 fcp_trace, FCP_BUF_LEVEL_5, 0,
5258 5239 "fcp_send_els1: state change occured"
5259 5240 " for D_ID=0x%x", ptgt->tgt_d_id);
5260 5241 mutex_exit(&pptr->port_mutex);
5261 5242 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5262 5243 }
5263 5244 break;
5264 5245 }
5265 5246
5266 5247 case LA_ELS_PRLI: {
5267 5248 struct la_els_prli prli;
5268 5249 struct fcp_prli *fprli;
5269 5250
5270 5251 bzero(&prli, sizeof (struct la_els_prli));
5271 5252
5272 5253 hp = &fpkt->pkt_cmd_fhdr;
5273 5254 hp->r_ctl = R_CTL_ELS_REQ;
5274 5255
5275 5256 /* fill in PRLI cmd ELS fields */
5276 5257 prli.ls_code = LA_ELS_PRLI;
5277 5258 prli.page_length = 0x10; /* huh? */
5278 5259 prli.payload_length = sizeof (struct la_els_prli);
5279 5260
5280 5261 icmd->ipkt_opcode = LA_ELS_PRLI;
5281 5262
5282 5263 /* get ptr to PRLI service params */
5283 5264 fprli = (struct fcp_prli *)prli.service_params;
5284 5265
5285 5266 /* fill in service params */
5286 5267 fprli->type = 0x08;
5287 5268 fprli->resvd1 = 0;
5288 5269 fprli->orig_process_assoc_valid = 0;
5289 5270 fprli->resp_process_assoc_valid = 0;
5290 5271 fprli->establish_image_pair = 1;
5291 5272 fprli->resvd2 = 0;
5292 5273 fprli->resvd3 = 0;
5293 5274 fprli->obsolete_1 = 0;
5294 5275 fprli->obsolete_2 = 0;
5295 5276 fprli->data_overlay_allowed = 0;
5296 5277 fprli->initiator_fn = 1;
5297 5278 fprli->confirmed_compl_allowed = 1;
5298 5279
5299 5280 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5300 5281 fprli->target_fn = 1;
5301 5282 } else {
5302 5283 fprli->target_fn = 0;
5303 5284 }
5304 5285
5305 5286 fprli->retry = 1;
5306 5287 fprli->read_xfer_rdy_disabled = 1;
5307 5288 fprli->write_xfer_rdy_disabled = 0;
5308 5289
5309 5290 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5310 5291 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5311 5292
5312 5293 /* issue the PRLI request */
5313 5294
5314 5295 mutex_enter(&pptr->port_mutex);
5315 5296 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5316 5297
5317 5298 mutex_exit(&pptr->port_mutex);
5318 5299
5319 5300 rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5320 5301 if (rval == FC_SUCCESS) {
5321 5302 res = DDI_SUCCESS;
5322 5303 break;
5323 5304 }
5324 5305
5325 5306 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5326 5307
5327 5308 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5328 5309 rval, "PRLI");
5329 5310 } else {
5330 5311 mutex_exit(&pptr->port_mutex);
5331 5312 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5332 5313 }
5333 5314 break;
5334 5315 }
5335 5316
5336 5317 default:
5337 5318 fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5338 5319 break;
5339 5320 }
5340 5321
5341 5322 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5342 5323 fcp_trace, FCP_BUF_LEVEL_5, 0,
5343 5324 "fcp_send_els: returning %d", res);
5344 5325
5345 5326 if (res != DDI_SUCCESS) {
5346 5327 if (internal) {
5347 5328 fcp_icmd_free(pptr, icmd);
5348 5329 }
5349 5330 }
5350 5331
5351 5332 return (res);
5352 5333 }
5353 5334
5354 5335
5355 5336 /*
5356 5337 * called internally update the state of all of the tgts and each LUN
5357 5338 * for this port (i.e. each target known to be attached to this port)
5358 5339 * if they are not already offline
5359 5340 *
5360 5341 * must be called with the port mutex owned
5361 5342 *
5362 5343 * acquires and releases the target mutexes for each target attached
5363 5344 * to this port
5364 5345 */
5365 5346 void
5366 5347 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5367 5348 {
5368 5349 int i;
5369 5350 struct fcp_tgt *ptgt;
5370 5351
5371 5352 ASSERT(mutex_owned(&pptr->port_mutex));
5372 5353
5373 5354 for (i = 0; i < FCP_NUM_HASH; i++) {
5374 5355 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5375 5356 ptgt = ptgt->tgt_next) {
5376 5357 mutex_enter(&ptgt->tgt_mutex);
5377 5358 fcp_update_tgt_state(ptgt, FCP_SET, state);
5378 5359 ptgt->tgt_change_cnt++;
5379 5360 ptgt->tgt_statec_cause = cause;
5380 5361 ptgt->tgt_tmp_cnt = 1;
5381 5362 ptgt->tgt_done = 0;
5382 5363 mutex_exit(&ptgt->tgt_mutex);
5383 5364 }
5384 5365 }
5385 5366 }
5386 5367
5387 5368
5388 5369 static void
5389 5370 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5390 5371 {
5391 5372 int i;
5392 5373 int ndevs;
5393 5374 struct fcp_tgt *ptgt;
5394 5375
5395 5376 ASSERT(mutex_owned(&pptr->port_mutex));
5396 5377
5397 5378 for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5398 5379 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5399 5380 ptgt = ptgt->tgt_next) {
5400 5381 ndevs++;
5401 5382 }
5402 5383 }
5403 5384
5404 5385 if (ndevs == 0) {
5405 5386 return;
5406 5387 }
5407 5388 pptr->port_tmp_cnt = ndevs;
5408 5389
5409 5390 for (i = 0; i < FCP_NUM_HASH; i++) {
5410 5391 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5411 5392 ptgt = ptgt->tgt_next) {
5412 5393 (void) fcp_call_finish_init_held(pptr, ptgt,
5413 5394 lcount, ptgt->tgt_change_cnt, cause);
5414 5395 }
5415 5396 }
5416 5397 }
5417 5398
5418 5399 /*
5419 5400 * Function: fcp_update_tgt_state
5420 5401 *
5421 5402 * Description: This function updates the field tgt_state of a target. That
5422 5403 * field is a bitmap and which bit can be set or reset
5423 5404 * individually. The action applied to the target state is also
5424 5405 * applied to all the LUNs belonging to the target (provided the
5425 5406 * LUN is not offline). A side effect of applying the state
5426 5407 * modification to the target and the LUNs is the field tgt_trace
5427 5408 * of the target and lun_trace of the LUNs is set to zero.
5428 5409 *
5429 5410 *
5430 5411 * Argument: *ptgt Target structure.
5431 5412 * flag Flag indication what action to apply (set/reset).
5432 5413 * state State bits to update.
5433 5414 *
5434 5415 * Return Value: None
5435 5416 *
5436 5417 * Context: Interrupt, Kernel or User context.
5437 5418 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5438 5419 * calling this function.
5439 5420 */
5440 5421 void
5441 5422 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5442 5423 {
5443 5424 struct fcp_lun *plun;
5444 5425
5445 5426 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5446 5427
5447 5428 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5448 5429 /* The target is not offline. */
5449 5430 if (flag == FCP_SET) {
5450 5431 ptgt->tgt_state |= state;
5451 5432 ptgt->tgt_trace = 0;
5452 5433 } else {
5453 5434 ptgt->tgt_state &= ~state;
5454 5435 }
5455 5436
5456 5437 for (plun = ptgt->tgt_lun; plun != NULL;
5457 5438 plun = plun->lun_next) {
5458 5439 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5459 5440 /* The LUN is not offline. */
5460 5441 if (flag == FCP_SET) {
5461 5442 plun->lun_state |= state;
5462 5443 plun->lun_trace = 0;
5463 5444 } else {
5464 5445 plun->lun_state &= ~state;
5465 5446 }
5466 5447 }
5467 5448 }
5468 5449 }
5469 5450 }
5470 5451
5471 5452 /*
5472 5453 * Function: fcp_update_tgt_state
5473 5454 *
5474 5455 * Description: This function updates the field lun_state of a LUN. That
5475 5456 * field is a bitmap and which bit can be set or reset
5476 5457 * individually.
5477 5458 *
5478 5459 * Argument: *plun LUN structure.
5479 5460 * flag Flag indication what action to apply (set/reset).
5480 5461 * state State bits to update.
5481 5462 *
5482 5463 * Return Value: None
5483 5464 *
5484 5465 * Context: Interrupt, Kernel or User context.
5485 5466 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5486 5467 * calling this function.
5487 5468 */
5488 5469 void
5489 5470 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5490 5471 {
5491 5472 struct fcp_tgt *ptgt = plun->lun_tgt;
5492 5473
5493 5474 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5494 5475
5495 5476 if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5496 5477 if (flag == FCP_SET) {
5497 5478 plun->lun_state |= state;
5498 5479 } else {
5499 5480 plun->lun_state &= ~state;
5500 5481 }
5501 5482 }
5502 5483 }
5503 5484
5504 5485 /*
5505 5486 * Function: fcp_get_port
5506 5487 *
5507 5488 * Description: This function returns the fcp_port structure from the opaque
5508 5489 * handle passed by the caller. That opaque handle is the handle
5509 5490 * used by fp/fctl to identify a particular local port. That
5510 5491 * handle has been stored in the corresponding fcp_port
5511 5492 * structure. This function is going to walk the global list of
5512 5493 * fcp_port structures till one has a port_fp_handle that matches
5513 5494 * the handle passed by the caller. This function enters the
5514 5495 * mutex fcp_global_mutex while walking the global list and then
5515 5496 * releases it.
5516 5497 *
5517 5498 * Argument: port_handle Opaque handle that fp/fctl uses to identify a
5518 5499 * particular port.
5519 5500 *
5520 5501 * Return Value: NULL Not found.
5521 5502 * Not NULL Pointer to the fcp_port structure.
5522 5503 *
5523 5504 * Context: Interrupt, Kernel or User context.
5524 5505 */
5525 5506 static struct fcp_port *
5526 5507 fcp_get_port(opaque_t port_handle)
5527 5508 {
5528 5509 struct fcp_port *pptr;
5529 5510
5530 5511 ASSERT(port_handle != NULL);
5531 5512
5532 5513 mutex_enter(&fcp_global_mutex);
5533 5514 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5534 5515 if (pptr->port_fp_handle == port_handle) {
5535 5516 break;
5536 5517 }
5537 5518 }
5538 5519 mutex_exit(&fcp_global_mutex);
5539 5520
5540 5521 return (pptr);
5541 5522 }
5542 5523
5543 5524
5544 5525 static void
5545 5526 fcp_unsol_callback(fc_packet_t *fpkt)
5546 5527 {
5547 5528 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5548 5529 struct fcp_port *pptr = icmd->ipkt_port;
5549 5530
5550 5531 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5551 5532 caddr_t state, reason, action, expln;
5552 5533
5553 5534 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
5554 5535 &action, &expln);
5555 5536
5556 5537 fcp_log(CE_WARN, pptr->port_dip,
5557 5538 "!couldn't post response to unsolicited request: "
5558 5539 " state=%s reason=%s rx_id=%x ox_id=%x",
5559 5540 state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5560 5541 fpkt->pkt_cmd_fhdr.rx_id);
5561 5542 }
5562 5543 fcp_icmd_free(pptr, icmd);
5563 5544 }
5564 5545
5565 5546
5566 5547 /*
5567 5548 * Perform general purpose preparation of a response to an unsolicited request
5568 5549 */
5569 5550 static void
5570 5551 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5571 5552 uchar_t r_ctl, uchar_t type)
5572 5553 {
5573 5554 pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5574 5555 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5575 5556 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5576 5557 pkt->pkt_cmd_fhdr.type = type;
5577 5558 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5578 5559 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5579 5560 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl;
5580 5561 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5581 5562 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5582 5563 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5583 5564 pkt->pkt_cmd_fhdr.ro = 0;
5584 5565 pkt->pkt_cmd_fhdr.rsvd = 0;
5585 5566 pkt->pkt_comp = fcp_unsol_callback;
5586 5567 pkt->pkt_pd = NULL;
5587 5568 pkt->pkt_ub_resp_token = (opaque_t)buf;
5588 5569 }
5589 5570
5590 5571
5591 5572 /*ARGSUSED*/
5592 5573 static int
5593 5574 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5594 5575 {
5595 5576 fc_packet_t *fpkt;
5596 5577 struct la_els_prli prli;
5597 5578 struct fcp_prli *fprli;
5598 5579 struct fcp_ipkt *icmd;
5599 5580 struct la_els_prli *from;
5600 5581 struct fcp_prli *orig;
5601 5582 struct fcp_tgt *ptgt;
5602 5583 int tcount = 0;
5603 5584 int lcount;
5604 5585
5605 5586 from = (struct la_els_prli *)buf->ub_buffer;
5606 5587 orig = (struct fcp_prli *)from->service_params;
5607 5588 if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5608 5589 NULL) {
5609 5590 mutex_enter(&ptgt->tgt_mutex);
5610 5591 tcount = ptgt->tgt_change_cnt;
5611 5592 mutex_exit(&ptgt->tgt_mutex);
5612 5593 }
5613 5594
5614 5595 mutex_enter(&pptr->port_mutex);
5615 5596 lcount = pptr->port_link_cnt;
5616 5597 mutex_exit(&pptr->port_mutex);
5617 5598
5618 5599 if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5619 5600 sizeof (la_els_prli_t), 0,
5620 5601 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5621 5602 lcount, tcount, 0, FC_INVALID_RSCN_COUNT)) == NULL) {
5622 5603 return (FC_FAILURE);
5623 5604 }
5624 5605
5625 5606 fpkt = icmd->ipkt_fpkt;
5626 5607 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5627 5608 fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5628 5609 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5629 5610 fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5630 5611 fpkt->pkt_rsplen = 0;
5631 5612 fpkt->pkt_datalen = 0;
5632 5613
5633 5614 icmd->ipkt_opcode = LA_ELS_PRLI;
5634 5615
5635 5616 bzero(&prli, sizeof (struct la_els_prli));
5636 5617 fprli = (struct fcp_prli *)prli.service_params;
5637 5618 prli.ls_code = LA_ELS_ACC;
5638 5619 prli.page_length = 0x10;
5639 5620 prli.payload_length = sizeof (struct la_els_prli);
5640 5621
5641 5622 /* fill in service params */
5642 5623 fprli->type = 0x08;
5643 5624 fprli->resvd1 = 0;
5644 5625 fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5645 5626 fprli->orig_process_associator = orig->orig_process_associator;
5646 5627 fprli->resp_process_assoc_valid = 0;
5647 5628 fprli->establish_image_pair = 1;
5648 5629 fprli->resvd2 = 0;
5649 5630 fprli->resvd3 = 0;
5650 5631 fprli->obsolete_1 = 0;
5651 5632 fprli->obsolete_2 = 0;
5652 5633 fprli->data_overlay_allowed = 0;
5653 5634 fprli->initiator_fn = 1;
5654 5635 fprli->confirmed_compl_allowed = 1;
5655 5636
5656 5637 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5657 5638 fprli->target_fn = 1;
5658 5639 } else {
5659 5640 fprli->target_fn = 0;
5660 5641 }
5661 5642
5662 5643 fprli->retry = 1;
5663 5644 fprli->read_xfer_rdy_disabled = 1;
5664 5645 fprli->write_xfer_rdy_disabled = 0;
5665 5646
5666 5647 /* save the unsol prli payload first */
5667 5648 FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5668 5649 fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5669 5650
5670 5651 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5671 5652 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5672 5653
5673 5654 fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5674 5655
5675 5656 mutex_enter(&pptr->port_mutex);
5676 5657 if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5677 5658 int rval;
5678 5659 mutex_exit(&pptr->port_mutex);
5679 5660
5680 5661 if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5681 5662 FC_SUCCESS) {
5682 5663 if ((rval == FC_STATEC_BUSY || rval == FC_OFFLINE) &&
5683 5664 ptgt != NULL) {
5684 5665 fcp_queue_ipkt(pptr, fpkt);
5685 5666 return (FC_SUCCESS);
5686 5667 }
5687 5668 /* Let it timeout */
5688 5669 fcp_icmd_free(pptr, icmd);
5689 5670 return (FC_FAILURE);
5690 5671 }
5691 5672 } else {
5692 5673 mutex_exit(&pptr->port_mutex);
5693 5674 fcp_icmd_free(pptr, icmd);
5694 5675 return (FC_FAILURE);
5695 5676 }
5696 5677
5697 5678 (void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5698 5679
5699 5680 return (FC_SUCCESS);
5700 5681 }
5701 5682
5702 5683 /*
5703 5684 * Function: fcp_icmd_alloc
5704 5685 *
5705 5686 * Description: This function allocated a fcp_ipkt structure. The pkt_comp
5706 5687 * field is initialized to fcp_icmd_callback. Sometimes it is
5707 5688 * modified by the caller (such as fcp_send_scsi). The
5708 5689 * structure is also tied to the state of the line and of the
5709 5690 * target at a particular time. That link is established by
5710 5691 * setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5711 5692 * and tcount which came respectively from pptr->link_cnt and
5712 5693 * ptgt->tgt_change_cnt.
5713 5694 *
5714 5695 * Argument: *pptr Fcp port.
5715 5696 * *ptgt Target (destination of the command).
5716 5697 * cmd_len Length of the command.
5717 5698 * resp_len Length of the expected response.
5718 5699 * data_len Length of the data.
5719 5700 * nodma Indicates weither the command and response.
5720 5701 * will be transfer through DMA or not.
5721 5702 * lcount Link state change counter.
5722 5703 * tcount Target state change counter.
5723 5704 * cause Reason that lead to this call.
5724 5705 *
5725 5706 * Return Value: NULL Failed.
5726 5707 * Not NULL Internal packet address.
5727 5708 */
5728 5709 static struct fcp_ipkt *
5729 5710 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5730 5711 int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5731 5712 uint32_t rscn_count)
5732 5713 {
5733 5714 int dma_setup = 0;
5734 5715 fc_packet_t *fpkt;
5735 5716 struct fcp_ipkt *icmd = NULL;
5736 5717
5737 5718 icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5738 5719 pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5739 5720 KM_NOSLEEP);
5740 5721 if (icmd == NULL) {
5741 5722 fcp_log(CE_WARN, pptr->port_dip,
5742 5723 "!internal packet allocation failed");
5743 5724 return (NULL);
5744 5725 }
5745 5726
5746 5727 /*
5747 5728 * initialize the allocated packet
5748 5729 */
5749 5730 icmd->ipkt_nodma = nodma;
5750 5731 icmd->ipkt_next = icmd->ipkt_prev = NULL;
5751 5732 icmd->ipkt_lun = NULL;
5752 5733
5753 5734 icmd->ipkt_link_cnt = lcount;
5754 5735 icmd->ipkt_change_cnt = tcount;
5755 5736 icmd->ipkt_cause = cause;
5756 5737
5757 5738 mutex_enter(&pptr->port_mutex);
5758 5739 icmd->ipkt_port = pptr;
5759 5740 mutex_exit(&pptr->port_mutex);
5760 5741
5761 5742 /* keep track of amt of data to be sent in pkt */
5762 5743 icmd->ipkt_cmdlen = cmd_len;
5763 5744 icmd->ipkt_resplen = resp_len;
5764 5745 icmd->ipkt_datalen = data_len;
5765 5746
5766 5747 /* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5767 5748 icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5768 5749
5769 5750 /* set pkt's private ptr to point to cmd pkt */
5770 5751 icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5771 5752
5772 5753 /* set FCA private ptr to memory just beyond */
5773 5754 icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5774 5755 ((char *)icmd + sizeof (struct fcp_ipkt) +
5775 5756 pptr->port_dmacookie_sz);
5776 5757
5777 5758 /* get ptr to fpkt substruct and fill it in */
5778 5759 fpkt = icmd->ipkt_fpkt;
5779 5760 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5780 5761 sizeof (struct fcp_ipkt));
5781 5762
5782 5763 if (ptgt != NULL) {
5783 5764 icmd->ipkt_tgt = ptgt;
5784 5765 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5785 5766 }
5786 5767
5787 5768 fpkt->pkt_comp = fcp_icmd_callback;
5788 5769 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5789 5770 fpkt->pkt_cmdlen = cmd_len;
5790 5771 fpkt->pkt_rsplen = resp_len;
5791 5772 fpkt->pkt_datalen = data_len;
5792 5773
5793 5774 /*
5794 5775 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5795 5776 * rscn_count as fcp knows down to the transport. If a valid count was
5796 5777 * passed into this function, we allocate memory to actually pass down
5797 5778 * this info.
5798 5779 *
5799 5780 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5800 5781 * basically mean that fcp will not be able to help transport
5801 5782 * distinguish if a new RSCN has come after fcp was last informed about
5802 5783 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5803 5784 * 5068068 where the device might end up going offline in case of RSCN
5804 5785 * storms.
5805 5786 */
5806 5787 fpkt->pkt_ulp_rscn_infop = NULL;
5807 5788 if (rscn_count != FC_INVALID_RSCN_COUNT) {
5808 5789 fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5809 5790 sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5810 5791 if (fpkt->pkt_ulp_rscn_infop == NULL) {
5811 5792 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5812 5793 fcp_trace, FCP_BUF_LEVEL_6, 0,
5813 5794 "Failed to alloc memory to pass rscn info");
5814 5795 }
5815 5796 }
5816 5797
5817 5798 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5818 5799 fc_ulp_rscn_info_t *rscnp;
5819 5800
5820 5801 rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5821 5802 rscnp->ulp_rscn_count = rscn_count;
5822 5803 }
5823 5804
5824 5805 if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5825 5806 goto fail;
5826 5807 }
5827 5808 dma_setup++;
5828 5809
5829 5810 /*
5830 5811 * Must hold target mutex across setting of pkt_pd and call to
5831 5812 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5832 5813 * away while we're not looking.
5833 5814 */
5834 5815 if (ptgt != NULL) {
5835 5816 mutex_enter(&ptgt->tgt_mutex);
5836 5817 fpkt->pkt_pd = ptgt->tgt_pd_handle;
5837 5818
5838 5819 /* ask transport to do its initialization on this pkt */
5839 5820 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5840 5821 != FC_SUCCESS) {
5841 5822 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5842 5823 fcp_trace, FCP_BUF_LEVEL_6, 0,
5843 5824 "fc_ulp_init_packet failed");
5844 5825 mutex_exit(&ptgt->tgt_mutex);
5845 5826 goto fail;
5846 5827 }
5847 5828 mutex_exit(&ptgt->tgt_mutex);
5848 5829 } else {
5849 5830 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5850 5831 != FC_SUCCESS) {
5851 5832 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5852 5833 fcp_trace, FCP_BUF_LEVEL_6, 0,
5853 5834 "fc_ulp_init_packet failed");
5854 5835 goto fail;
5855 5836 }
5856 5837 }
5857 5838
5858 5839 mutex_enter(&pptr->port_mutex);
5859 5840 if (pptr->port_state & (FCP_STATE_DETACHING |
5860 5841 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5861 5842 int rval;
5862 5843
5863 5844 mutex_exit(&pptr->port_mutex);
5864 5845
5865 5846 rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5866 5847 ASSERT(rval == FC_SUCCESS);
5867 5848
5868 5849 goto fail;
5869 5850 }
5870 5851
5871 5852 if (ptgt != NULL) {
5872 5853 mutex_enter(&ptgt->tgt_mutex);
5873 5854 ptgt->tgt_ipkt_cnt++;
5874 5855 mutex_exit(&ptgt->tgt_mutex);
5875 5856 }
5876 5857
5877 5858 pptr->port_ipkt_cnt++;
5878 5859
5879 5860 mutex_exit(&pptr->port_mutex);
5880 5861
5881 5862 return (icmd);
5882 5863
5883 5864 fail:
5884 5865 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5885 5866 kmem_free(fpkt->pkt_ulp_rscn_infop,
5886 5867 sizeof (fc_ulp_rscn_info_t));
5887 5868 fpkt->pkt_ulp_rscn_infop = NULL;
5888 5869 }
5889 5870
5890 5871 if (dma_setup) {
5891 5872 fcp_free_dma(pptr, icmd);
5892 5873 }
5893 5874 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5894 5875 (size_t)pptr->port_dmacookie_sz);
5895 5876
5896 5877 return (NULL);
5897 5878 }
5898 5879
5899 5880 /*
5900 5881 * Function: fcp_icmd_free
5901 5882 *
5902 5883 * Description: Frees the internal command passed by the caller.
5903 5884 *
5904 5885 * Argument: *pptr Fcp port.
5905 5886 * *icmd Internal packet to free.
5906 5887 *
5907 5888 * Return Value: None
5908 5889 */
5909 5890 static void
5910 5891 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5911 5892 {
5912 5893 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
5913 5894
5914 5895 /* Let the underlying layers do their cleanup. */
5915 5896 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5916 5897 icmd->ipkt_fpkt);
5917 5898
5918 5899 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5919 5900 kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5920 5901 sizeof (fc_ulp_rscn_info_t));
5921 5902 }
5922 5903
5923 5904 fcp_free_dma(pptr, icmd);
5924 5905
5925 5906 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5926 5907 (size_t)pptr->port_dmacookie_sz);
5927 5908
5928 5909 mutex_enter(&pptr->port_mutex);
5929 5910
5930 5911 if (ptgt) {
5931 5912 mutex_enter(&ptgt->tgt_mutex);
5932 5913 ptgt->tgt_ipkt_cnt--;
5933 5914 mutex_exit(&ptgt->tgt_mutex);
5934 5915 }
5935 5916
5936 5917 pptr->port_ipkt_cnt--;
5937 5918 mutex_exit(&pptr->port_mutex);
5938 5919 }
5939 5920
5940 5921 /*
5941 5922 * Function: fcp_alloc_dma
5942 5923 *
5943 5924 * Description: Allocated the DMA resources required for the internal
5944 5925 * packet.
5945 5926 *
5946 5927 * Argument: *pptr FCP port.
5947 5928 * *icmd Internal FCP packet.
5948 5929 * nodma Indicates if the Cmd and Resp will be DMAed.
5949 5930 * flags Allocation flags (Sleep or NoSleep).
5950 5931 *
5951 5932 * Return Value: FC_SUCCESS
5952 5933 * FC_NOMEM
5953 5934 */
5954 5935 static int
5955 5936 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5956 5937 int nodma, int flags)
5957 5938 {
5958 5939 int rval;
5959 5940 size_t real_size;
5960 5941 uint_t ccount;
5961 5942 int bound = 0;
5962 5943 int cmd_resp = 0;
5963 5944 fc_packet_t *fpkt;
5964 5945 ddi_dma_cookie_t pkt_data_cookie;
5965 5946 ddi_dma_cookie_t *cp;
5966 5947 uint32_t cnt;
5967 5948
5968 5949 fpkt = &icmd->ipkt_fc_packet;
5969 5950
5970 5951 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5971 5952 fpkt->pkt_resp_dma == NULL);
5972 5953
5973 5954 icmd->ipkt_nodma = nodma;
5974 5955
5975 5956 if (nodma) {
5976 5957 fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5977 5958 if (fpkt->pkt_cmd == NULL) {
5978 5959 goto fail;
5979 5960 }
5980 5961
5981 5962 fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5982 5963 if (fpkt->pkt_resp == NULL) {
5983 5964 goto fail;
5984 5965 }
5985 5966 } else {
5986 5967 ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5987 5968
5988 5969 rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5989 5970 if (rval == FC_FAILURE) {
5990 5971 ASSERT(fpkt->pkt_cmd_dma == NULL &&
5991 5972 fpkt->pkt_resp_dma == NULL);
5992 5973 goto fail;
5993 5974 }
5994 5975 cmd_resp++;
5995 5976 }
5996 5977
5997 5978 if ((fpkt->pkt_datalen != 0) &&
5998 5979 !(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
5999 5980 /*
6000 5981 * set up DMA handle and memory for the data in this packet
6001 5982 */
6002 5983 if (ddi_dma_alloc_handle(pptr->port_dip,
6003 5984 &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
6004 5985 NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
6005 5986 goto fail;
6006 5987 }
6007 5988
6008 5989 if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
6009 5990 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
6010 5991 DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
6011 5992 &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
6012 5993 goto fail;
6013 5994 }
6014 5995
6015 5996 /* was DMA mem size gotten < size asked for/needed ?? */
6016 5997 if (real_size < fpkt->pkt_datalen) {
6017 5998 goto fail;
6018 5999 }
6019 6000
6020 6001 /* bind DMA address and handle together */
6021 6002 if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
6022 6003 NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
6023 6004 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
6024 6005 &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
6025 6006 goto fail;
6026 6007 }
6027 6008 bound++;
6028 6009
6029 6010 if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
6030 6011 goto fail;
6031 6012 }
6032 6013
6033 6014 fpkt->pkt_data_cookie_cnt = ccount;
6034 6015
6035 6016 cp = fpkt->pkt_data_cookie;
6036 6017 *cp = pkt_data_cookie;
6037 6018 cp++;
6038 6019
6039 6020 for (cnt = 1; cnt < ccount; cnt++, cp++) {
6040 6021 ddi_dma_nextcookie(fpkt->pkt_data_dma,
6041 6022 &pkt_data_cookie);
6042 6023 *cp = pkt_data_cookie;
6043 6024 }
6044 6025
6045 6026 } else if (fpkt->pkt_datalen != 0) {
6046 6027 /*
6047 6028 * If it's a pseudo FCA, then it can't support DMA even in
6048 6029 * SCSI data phase.
6049 6030 */
6050 6031 fpkt->pkt_data = kmem_alloc(fpkt->pkt_datalen, flags);
6051 6032 if (fpkt->pkt_data == NULL) {
6052 6033 goto fail;
6053 6034 }
6054 6035
6055 6036 }
6056 6037
6057 6038 return (FC_SUCCESS);
6058 6039
6059 6040 fail:
6060 6041 if (bound) {
6061 6042 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6062 6043 }
6063 6044
6064 6045 if (fpkt->pkt_data_dma) {
6065 6046 if (fpkt->pkt_data) {
6066 6047 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6067 6048 }
6068 6049 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6069 6050 } else {
6070 6051 if (fpkt->pkt_data) {
6071 6052 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6072 6053 }
6073 6054 }
6074 6055
6075 6056 if (nodma) {
6076 6057 if (fpkt->pkt_cmd) {
6077 6058 kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
6078 6059 }
6079 6060 if (fpkt->pkt_resp) {
6080 6061 kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
6081 6062 }
6082 6063 } else {
6083 6064 if (cmd_resp) {
6084 6065 fcp_free_cmd_resp(pptr, fpkt);
6085 6066 }
6086 6067 }
6087 6068
6088 6069 return (FC_NOMEM);
6089 6070 }
6090 6071
6091 6072
6092 6073 static void
6093 6074 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
6094 6075 {
6095 6076 fc_packet_t *fpkt = icmd->ipkt_fpkt;
6096 6077
6097 6078 if (fpkt->pkt_data_dma) {
6098 6079 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6099 6080 if (fpkt->pkt_data) {
6100 6081 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6101 6082 }
6102 6083 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6103 6084 } else {
6104 6085 if (fpkt->pkt_data) {
6105 6086 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6106 6087 }
6107 6088 /*
6108 6089 * Need we reset pkt_* to zero???
6109 6090 */
6110 6091 }
6111 6092
6112 6093 if (icmd->ipkt_nodma) {
6113 6094 if (fpkt->pkt_cmd) {
6114 6095 kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
6115 6096 }
6116 6097 if (fpkt->pkt_resp) {
6117 6098 kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
6118 6099 }
6119 6100 } else {
6120 6101 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
6121 6102
6122 6103 fcp_free_cmd_resp(pptr, fpkt);
6123 6104 }
6124 6105 }
6125 6106
6126 6107 /*
6127 6108 * Function: fcp_lookup_target
6128 6109 *
6129 6110 * Description: Finds a target given a WWN.
6130 6111 *
6131 6112 * Argument: *pptr FCP port.
6132 6113 * *wwn World Wide Name of the device to look for.
6133 6114 *
6134 6115 * Return Value: NULL No target found
6135 6116 * Not NULL Target structure
6136 6117 *
6137 6118 * Context: Interrupt context.
6138 6119 * The mutex pptr->port_mutex must be owned.
6139 6120 */
6140 6121 /* ARGSUSED */
6141 6122 static struct fcp_tgt *
6142 6123 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6143 6124 {
6144 6125 int hash;
6145 6126 struct fcp_tgt *ptgt;
6146 6127
6147 6128 ASSERT(mutex_owned(&pptr->port_mutex));
6148 6129
6149 6130 hash = FCP_HASH(wwn);
6150 6131
6151 6132 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6152 6133 ptgt = ptgt->tgt_next) {
6153 6134 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6154 6135 bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6155 6136 sizeof (ptgt->tgt_port_wwn)) == 0) {
6156 6137 break;
6157 6138 }
6158 6139 }
6159 6140
6160 6141 return (ptgt);
6161 6142 }
6162 6143
6163 6144
6164 6145 /*
6165 6146 * Find target structure given a port identifier
6166 6147 */
6167 6148 static struct fcp_tgt *
6168 6149 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6169 6150 {
6170 6151 fc_portid_t port_id;
6171 6152 la_wwn_t pwwn;
6172 6153 struct fcp_tgt *ptgt = NULL;
6173 6154
6174 6155 port_id.priv_lilp_posit = 0;
6175 6156 port_id.port_id = d_id;
6176 6157 if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6177 6158 &pwwn) == FC_SUCCESS) {
6178 6159 mutex_enter(&pptr->port_mutex);
6179 6160 ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6180 6161 mutex_exit(&pptr->port_mutex);
6181 6162 }
6182 6163
6183 6164 return (ptgt);
6184 6165 }
6185 6166
6186 6167
6187 6168 /*
6188 6169 * the packet completion callback routine for info cmd pkts
6189 6170 *
6190 6171 * this means fpkt pts to a response to either a PLOGI or a PRLI
6191 6172 *
6192 6173 * if there is an error an attempt is made to call a routine to resend
6193 6174 * the command that failed
6194 6175 */
6195 6176 static void
6196 6177 fcp_icmd_callback(fc_packet_t *fpkt)
6197 6178 {
6198 6179 struct fcp_ipkt *icmd;
6199 6180 struct fcp_port *pptr;
6200 6181 struct fcp_tgt *ptgt;
6201 6182 struct la_els_prli *prli;
6202 6183 struct la_els_prli prli_s;
6203 6184 struct fcp_prli *fprli;
6204 6185 struct fcp_lun *plun;
6205 6186 int free_pkt = 1;
6206 6187 int rval;
6207 6188 ls_code_t resp;
6208 6189 uchar_t prli_acc = 0;
6209 6190 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
6210 6191 int lun0_newalloc;
6211 6192
6212 6193 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6213 6194
6214 6195 /* get ptrs to the port and target structs for the cmd */
6215 6196 pptr = icmd->ipkt_port;
6216 6197 ptgt = icmd->ipkt_tgt;
6217 6198
6218 6199 FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6219 6200
6220 6201 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6221 6202 FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6222 6203 sizeof (prli_s));
6223 6204 prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6224 6205 }
6225 6206
6226 6207 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6227 6208 fcp_trace, FCP_BUF_LEVEL_2, 0,
6228 6209 "ELS (%x) callback state=0x%x reason=0x%x for %x",
6229 6210 icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6230 6211 ptgt->tgt_d_id);
6231 6212
6232 6213 if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6233 6214 ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6234 6215
6235 6216 mutex_enter(&ptgt->tgt_mutex);
6236 6217 if (ptgt->tgt_pd_handle == NULL) {
6237 6218 /*
6238 6219 * in a fabric environment the port device handles
6239 6220 * get created only after successful LOGIN into the
6240 6221 * transport, so the transport makes this port
6241 6222 * device (pd) handle available in this packet, so
6242 6223 * save it now
6243 6224 */
6244 6225 ASSERT(fpkt->pkt_pd != NULL);
6245 6226 ptgt->tgt_pd_handle = fpkt->pkt_pd;
6246 6227 }
6247 6228 mutex_exit(&ptgt->tgt_mutex);
6248 6229
6249 6230 /* which ELS cmd is this response for ?? */
6250 6231 switch (icmd->ipkt_opcode) {
6251 6232 case LA_ELS_PLOGI:
6252 6233 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6253 6234 fcp_trace, FCP_BUF_LEVEL_5, 0,
6254 6235 "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6255 6236 ptgt->tgt_d_id,
6256 6237 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6257 6238 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6258 6239
6259 6240 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6260 6241 FCP_TGT_TRACE_15);
6261 6242
6262 6243 /* Note that we are not allocating a new icmd */
6263 6244 if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6264 6245 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6265 6246 icmd->ipkt_cause) != DDI_SUCCESS) {
6266 6247 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6267 6248 FCP_TGT_TRACE_16);
6268 6249 goto fail;
6269 6250 }
6270 6251 break;
6271 6252
6272 6253 case LA_ELS_PRLI:
6273 6254 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6274 6255 fcp_trace, FCP_BUF_LEVEL_5, 0,
6275 6256 "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6276 6257
6277 6258 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6278 6259 FCP_TGT_TRACE_17);
6279 6260
6280 6261 prli = &prli_s;
6281 6262
6282 6263 FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6283 6264 sizeof (prli_s));
6284 6265
6285 6266 fprli = (struct fcp_prli *)prli->service_params;
6286 6267
6287 6268 mutex_enter(&ptgt->tgt_mutex);
6288 6269 ptgt->tgt_icap = fprli->initiator_fn;
6289 6270 ptgt->tgt_tcap = fprli->target_fn;
6290 6271 mutex_exit(&ptgt->tgt_mutex);
6291 6272
6292 6273 if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6293 6274 /*
6294 6275 * this FCP device does not support target mode
6295 6276 */
6296 6277 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6297 6278 FCP_TGT_TRACE_18);
6298 6279 goto fail;
6299 6280 }
6300 6281 if (fprli->retry == 1) {
6301 6282 fc_ulp_disable_relogin(pptr->port_fp_handle,
6302 6283 &ptgt->tgt_port_wwn);
6303 6284 }
6304 6285
6305 6286 /* target is no longer offline */
6306 6287 mutex_enter(&pptr->port_mutex);
6307 6288 mutex_enter(&ptgt->tgt_mutex);
6308 6289 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6309 6290 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6310 6291 FCP_TGT_MARK);
6311 6292 } else {
6312 6293 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6313 6294 fcp_trace, FCP_BUF_LEVEL_2, 0,
6314 6295 "fcp_icmd_callback,1: state change "
6315 6296 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6316 6297 mutex_exit(&ptgt->tgt_mutex);
6317 6298 mutex_exit(&pptr->port_mutex);
6318 6299 goto fail;
6319 6300 }
6320 6301 mutex_exit(&ptgt->tgt_mutex);
6321 6302 mutex_exit(&pptr->port_mutex);
6322 6303
6323 6304 /*
6324 6305 * lun 0 should always respond to inquiry, so
6325 6306 * get the LUN struct for LUN 0
6326 6307 *
6327 6308 * Currently we deal with first level of addressing.
6328 6309 * If / when we start supporting 0x device types
6329 6310 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6330 6311 * this logic will need revisiting.
6331 6312 */
6332 6313 lun0_newalloc = 0;
6333 6314 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6334 6315 /*
6335 6316 * no LUN struct for LUN 0 yet exists,
6336 6317 * so create one
6337 6318 */
6338 6319 plun = fcp_alloc_lun(ptgt);
6339 6320 if (plun == NULL) {
6340 6321 fcp_log(CE_WARN, pptr->port_dip,
6341 6322 "!Failed to allocate lun 0 for"
6342 6323 " D_ID=%x", ptgt->tgt_d_id);
6343 6324 goto fail;
6344 6325 }
6345 6326 lun0_newalloc = 1;
6346 6327 }
6347 6328
6348 6329 /* fill in LUN info */
6349 6330 mutex_enter(&ptgt->tgt_mutex);
6350 6331 /*
6351 6332 * consider lun 0 as device not connected if it is
6352 6333 * offlined or newly allocated
6353 6334 */
6354 6335 if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6355 6336 lun0_newalloc) {
6356 6337 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6357 6338 }
6358 6339 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6359 6340 plun->lun_state &= ~FCP_LUN_OFFLINE;
6360 6341 ptgt->tgt_lun_cnt = 1;
6361 6342 ptgt->tgt_report_lun_cnt = 0;
6362 6343 mutex_exit(&ptgt->tgt_mutex);
6363 6344
6364 6345 /* Retrieve the rscn count (if a valid one exists) */
6365 6346 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6366 6347 rscn_count = ((fc_ulp_rscn_info_t *)
6367 6348 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6368 6349 ->ulp_rscn_count;
6369 6350 } else {
6370 6351 rscn_count = FC_INVALID_RSCN_COUNT;
6371 6352 }
6372 6353
6373 6354 /* send Report Lun request to target */
6374 6355 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6375 6356 sizeof (struct fcp_reportlun_resp),
6376 6357 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6377 6358 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6378 6359 mutex_enter(&pptr->port_mutex);
6379 6360 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6380 6361 fcp_log(CE_WARN, pptr->port_dip,
6381 6362 "!Failed to send REPORT LUN to"
6382 6363 " D_ID=%x", ptgt->tgt_d_id);
6383 6364 } else {
6384 6365 FCP_TRACE(fcp_logq,
6385 6366 pptr->port_instbuf, fcp_trace,
6386 6367 FCP_BUF_LEVEL_5, 0,
6387 6368 "fcp_icmd_callback,2:state change"
6388 6369 " occured for D_ID=0x%x",
6389 6370 ptgt->tgt_d_id);
6390 6371 }
6391 6372 mutex_exit(&pptr->port_mutex);
6392 6373
6393 6374 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6394 6375 FCP_TGT_TRACE_19);
6395 6376
6396 6377 goto fail;
6397 6378 } else {
6398 6379 free_pkt = 0;
6399 6380 fcp_icmd_free(pptr, icmd);
6400 6381 }
6401 6382 break;
6402 6383
6403 6384 default:
6404 6385 fcp_log(CE_WARN, pptr->port_dip,
6405 6386 "!fcp_icmd_callback Invalid opcode");
6406 6387 goto fail;
6407 6388 }
6408 6389
6409 6390 return;
6410 6391 }
6411 6392
6412 6393
6413 6394 /*
6414 6395 * Other PLOGI failures are not retried as the
6415 6396 * transport does it already
6416 6397 */
6417 6398 if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6418 6399 if (fcp_is_retryable(icmd) &&
6419 6400 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6420 6401
6421 6402 if (FCP_MUST_RETRY(fpkt)) {
6422 6403 fcp_queue_ipkt(pptr, fpkt);
6423 6404 return;
6424 6405 }
6425 6406
6426 6407 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6427 6408 fcp_trace, FCP_BUF_LEVEL_2, 0,
6428 6409 "ELS PRLI is retried for d_id=0x%x, state=%x,"
6429 6410 " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6430 6411 fpkt->pkt_reason);
6431 6412
6432 6413 /*
6433 6414 * Retry by recalling the routine that
6434 6415 * originally queued this packet
6435 6416 */
6436 6417 mutex_enter(&pptr->port_mutex);
6437 6418 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6438 6419 caddr_t msg;
6439 6420
6440 6421 mutex_exit(&pptr->port_mutex);
6441 6422
6442 6423 ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6443 6424
6444 6425 if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6445 6426 fpkt->pkt_timeout +=
6446 6427 FCP_TIMEOUT_DELTA;
6447 6428 }
6448 6429
6449 6430 rval = fc_ulp_issue_els(pptr->port_fp_handle,
6450 6431 fpkt);
6451 6432 if (rval == FC_SUCCESS) {
6452 6433 return;
6453 6434 }
6454 6435
6455 6436 if (rval == FC_STATEC_BUSY ||
6456 6437 rval == FC_OFFLINE) {
6457 6438 fcp_queue_ipkt(pptr, fpkt);
6458 6439 return;
6459 6440 }
6460 6441 (void) fc_ulp_error(rval, &msg);
6461 6442
6462 6443 fcp_log(CE_NOTE, pptr->port_dip,
6463 6444 "!ELS 0x%x failed to d_id=0x%x;"
6464 6445 " %s", icmd->ipkt_opcode,
6465 6446 ptgt->tgt_d_id, msg);
6466 6447 } else {
6467 6448 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6468 6449 fcp_trace, FCP_BUF_LEVEL_2, 0,
6469 6450 "fcp_icmd_callback,3: state change "
6470 6451 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6471 6452 mutex_exit(&pptr->port_mutex);
6472 6453 }
6473 6454 }
6474 6455 } else {
6475 6456 if (fcp_is_retryable(icmd) &&
6476 6457 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6477 6458 if (FCP_MUST_RETRY(fpkt)) {
6478 6459 fcp_queue_ipkt(pptr, fpkt);
6479 6460 return;
6480 6461 }
6481 6462 }
6482 6463 mutex_enter(&pptr->port_mutex);
6483 6464 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6484 6465 fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6485 6466 mutex_exit(&pptr->port_mutex);
6486 6467 fcp_print_error(fpkt);
6487 6468 } else {
6488 6469 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6489 6470 fcp_trace, FCP_BUF_LEVEL_2, 0,
6490 6471 "fcp_icmd_callback,4: state change occured"
6491 6472 " for D_ID=0x%x", ptgt->tgt_d_id);
6492 6473 mutex_exit(&pptr->port_mutex);
6493 6474 }
6494 6475 }
6495 6476
6496 6477 fail:
6497 6478 if (free_pkt) {
6498 6479 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6499 6480 icmd->ipkt_change_cnt, icmd->ipkt_cause);
6500 6481 fcp_icmd_free(pptr, icmd);
6501 6482 }
6502 6483 }
6503 6484
6504 6485
6505 6486 /*
6506 6487 * called internally to send an info cmd using the transport
6507 6488 *
6508 6489 * sends either an INQ or a REPORT_LUN
6509 6490 *
6510 6491 * when the packet is completed fcp_scsi_callback is called
6511 6492 */
6512 6493 static int
6513 6494 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6514 6495 int lcount, int tcount, int cause, uint32_t rscn_count)
6515 6496 {
6516 6497 int nodma;
6517 6498 struct fcp_ipkt *icmd;
6518 6499 struct fcp_tgt *ptgt;
6519 6500 struct fcp_port *pptr;
6520 6501 fc_frame_hdr_t *hp;
6521 6502 fc_packet_t *fpkt;
6522 6503 struct fcp_cmd fcp_cmd;
6523 6504 struct fcp_cmd *fcmd;
6524 6505 union scsi_cdb *scsi_cdb;
6525 6506
6526 6507 ASSERT(plun != NULL);
6527 6508
6528 6509 ptgt = plun->lun_tgt;
6529 6510 ASSERT(ptgt != NULL);
6530 6511
6531 6512 pptr = ptgt->tgt_port;
6532 6513 ASSERT(pptr != NULL);
6533 6514
6534 6515 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6535 6516 fcp_trace, FCP_BUF_LEVEL_5, 0,
6536 6517 "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6537 6518
6538 6519 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6539 6520 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6540 6521 FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6541 6522 rscn_count);
6542 6523
6543 6524 if (icmd == NULL) {
6544 6525 return (DDI_FAILURE);
6545 6526 }
6546 6527
6547 6528 fpkt = icmd->ipkt_fpkt;
6548 6529 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6549 6530 icmd->ipkt_retries = 0;
6550 6531 icmd->ipkt_opcode = opcode;
6551 6532 icmd->ipkt_lun = plun;
6552 6533
6553 6534 if (nodma) {
6554 6535 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6555 6536 } else {
6556 6537 fcmd = &fcp_cmd;
6557 6538 }
6558 6539 bzero(fcmd, sizeof (struct fcp_cmd));
6559 6540
6560 6541 fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6561 6542
6562 6543 hp = &fpkt->pkt_cmd_fhdr;
6563 6544
6564 6545 hp->s_id = pptr->port_id;
6565 6546 hp->d_id = ptgt->tgt_d_id;
6566 6547 hp->r_ctl = R_CTL_COMMAND;
6567 6548 hp->type = FC_TYPE_SCSI_FCP;
6568 6549 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6569 6550 hp->rsvd = 0;
6570 6551 hp->seq_id = 0;
6571 6552 hp->seq_cnt = 0;
6572 6553 hp->ox_id = 0xffff;
6573 6554 hp->rx_id = 0xffff;
6574 6555 hp->ro = 0;
6575 6556
6576 6557 bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6577 6558
6578 6559 /*
6579 6560 * Request SCSI target for expedited processing
6580 6561 */
6581 6562
6582 6563 /*
6583 6564 * Set up for untagged queuing because we do not
6584 6565 * know if the fibre device supports queuing.
6585 6566 */
6586 6567 fcmd->fcp_cntl.cntl_reserved_0 = 0;
6587 6568 fcmd->fcp_cntl.cntl_reserved_1 = 0;
6588 6569 fcmd->fcp_cntl.cntl_reserved_2 = 0;
6589 6570 fcmd->fcp_cntl.cntl_reserved_3 = 0;
6590 6571 fcmd->fcp_cntl.cntl_reserved_4 = 0;
6591 6572 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6592 6573 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6593 6574
6594 6575 switch (opcode) {
6595 6576 case SCMD_INQUIRY_PAGE83:
6596 6577 /*
6597 6578 * Prepare to get the Inquiry VPD page 83 information
6598 6579 */
6599 6580 fcmd->fcp_cntl.cntl_read_data = 1;
6600 6581 fcmd->fcp_cntl.cntl_write_data = 0;
6601 6582 fcmd->fcp_data_len = alloc_len;
6602 6583
6603 6584 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6604 6585 fpkt->pkt_comp = fcp_scsi_callback;
6605 6586
6606 6587 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6607 6588 scsi_cdb->g0_addr2 = 0x01;
6608 6589 scsi_cdb->g0_addr1 = 0x83;
6609 6590 scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6610 6591 break;
6611 6592
6612 6593 case SCMD_INQUIRY:
6613 6594 fcmd->fcp_cntl.cntl_read_data = 1;
6614 6595 fcmd->fcp_cntl.cntl_write_data = 0;
6615 6596 fcmd->fcp_data_len = alloc_len;
6616 6597
6617 6598 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6618 6599 fpkt->pkt_comp = fcp_scsi_callback;
6619 6600
6620 6601 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6621 6602 scsi_cdb->g0_count0 = SUN_INQSIZE;
6622 6603 break;
6623 6604
6624 6605 case SCMD_REPORT_LUN: {
6625 6606 fc_portid_t d_id;
6626 6607 opaque_t fca_dev;
6627 6608
6628 6609 ASSERT(alloc_len >= 16);
6629 6610
6630 6611 d_id.priv_lilp_posit = 0;
6631 6612 d_id.port_id = ptgt->tgt_d_id;
6632 6613
6633 6614 fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6634 6615
6635 6616 mutex_enter(&ptgt->tgt_mutex);
6636 6617 ptgt->tgt_fca_dev = fca_dev;
6637 6618 mutex_exit(&ptgt->tgt_mutex);
6638 6619
6639 6620 fcmd->fcp_cntl.cntl_read_data = 1;
6640 6621 fcmd->fcp_cntl.cntl_write_data = 0;
6641 6622 fcmd->fcp_data_len = alloc_len;
6642 6623
6643 6624 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6644 6625 fpkt->pkt_comp = fcp_scsi_callback;
6645 6626
6646 6627 scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6647 6628 scsi_cdb->scc5_count0 = alloc_len & 0xff;
6648 6629 scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6649 6630 scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6650 6631 scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6651 6632 break;
6652 6633 }
6653 6634
6654 6635 default:
6655 6636 fcp_log(CE_WARN, pptr->port_dip,
6656 6637 "!fcp_send_scsi Invalid opcode");
6657 6638 break;
6658 6639 }
6659 6640
6660 6641 if (!nodma) {
6661 6642 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6662 6643 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6663 6644 }
6664 6645
6665 6646 mutex_enter(&pptr->port_mutex);
6666 6647 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6667 6648
6668 6649 mutex_exit(&pptr->port_mutex);
6669 6650 if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6670 6651 FC_SUCCESS) {
6671 6652 fcp_icmd_free(pptr, icmd);
6672 6653 return (DDI_FAILURE);
6673 6654 }
6674 6655 return (DDI_SUCCESS);
6675 6656 } else {
6676 6657 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6677 6658 fcp_trace, FCP_BUF_LEVEL_2, 0,
6678 6659 "fcp_send_scsi,1: state change occured"
6679 6660 " for D_ID=0x%x", ptgt->tgt_d_id);
6680 6661 mutex_exit(&pptr->port_mutex);
6681 6662 fcp_icmd_free(pptr, icmd);
6682 6663 return (DDI_FAILURE);
6683 6664 }
6684 6665 }
6685 6666
6686 6667
6687 6668 /*
6688 6669 * called by fcp_scsi_callback to check to handle the case where
6689 6670 * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6690 6671 */
6691 6672 static int
6692 6673 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6693 6674 {
6694 6675 uchar_t rqlen;
6695 6676 int rval = DDI_FAILURE;
6696 6677 struct scsi_extended_sense sense_info, *sense;
6697 6678 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6698 6679 fpkt->pkt_ulp_private;
6699 6680 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
6700 6681 struct fcp_port *pptr = ptgt->tgt_port;
6701 6682
6702 6683 ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6703 6684
6704 6685 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6705 6686 /*
6706 6687 * SCSI-II Reserve Release support. Some older FC drives return
6707 6688 * Reservation conflict for Report Luns command.
6708 6689 */
6709 6690 if (icmd->ipkt_nodma) {
6710 6691 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6711 6692 rsp->fcp_u.fcp_status.sense_len_set = 0;
6712 6693 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6713 6694 } else {
6714 6695 fcp_rsp_t new_resp;
6715 6696
6716 6697 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6717 6698 fpkt->pkt_resp_acc, sizeof (new_resp));
6718 6699
6719 6700 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6720 6701 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6721 6702 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6722 6703
6723 6704 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6724 6705 fpkt->pkt_resp_acc, sizeof (new_resp));
6725 6706 }
6726 6707
6727 6708 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6728 6709 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6729 6710
6730 6711 return (DDI_SUCCESS);
6731 6712 }
6732 6713
6733 6714 sense = &sense_info;
6734 6715 if (!rsp->fcp_u.fcp_status.sense_len_set) {
6735 6716 /* no need to continue if sense length is not set */
6736 6717 return (rval);
6737 6718 }
6738 6719
6739 6720 /* casting 64-bit integer to 8-bit */
6740 6721 rqlen = (uchar_t)min(rsp->fcp_sense_len,
6741 6722 sizeof (struct scsi_extended_sense));
6742 6723
6743 6724 if (rqlen < 14) {
6744 6725 /* no need to continue if request length isn't long enough */
6745 6726 return (rval);
6746 6727 }
6747 6728
6748 6729 if (icmd->ipkt_nodma) {
6749 6730 /*
6750 6731 * We can safely use fcp_response_len here since the
6751 6732 * only path that calls fcp_check_reportlun,
6752 6733 * fcp_scsi_callback, has already called
6753 6734 * fcp_validate_fcp_response.
6754 6735 */
6755 6736 sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6756 6737 sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6757 6738 } else {
6758 6739 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6759 6740 rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6760 6741 sizeof (struct scsi_extended_sense));
6761 6742 }
6762 6743
6763 6744 if (!FCP_SENSE_NO_LUN(sense)) {
6764 6745 mutex_enter(&ptgt->tgt_mutex);
6765 6746 /* clear the flag if any */
6766 6747 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6767 6748 mutex_exit(&ptgt->tgt_mutex);
6768 6749 }
6769 6750
6770 6751 if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6771 6752 (sense->es_add_code == 0x20)) {
6772 6753 if (icmd->ipkt_nodma) {
6773 6754 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6774 6755 rsp->fcp_u.fcp_status.sense_len_set = 0;
6775 6756 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6776 6757 } else {
6777 6758 fcp_rsp_t new_resp;
6778 6759
6779 6760 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6780 6761 fpkt->pkt_resp_acc, sizeof (new_resp));
6781 6762
6782 6763 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6783 6764 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6784 6765 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6785 6766
6786 6767 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6787 6768 fpkt->pkt_resp_acc, sizeof (new_resp));
6788 6769 }
6789 6770
6790 6771 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6791 6772 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6792 6773
6793 6774 return (DDI_SUCCESS);
6794 6775 }
6795 6776
6796 6777 /*
6797 6778 * This is for the STK library which returns a check condition,
6798 6779 * to indicate device is not ready, manual assistance needed.
6799 6780 * This is to a report lun command when the door is open.
6800 6781 */
6801 6782 if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6802 6783 if (icmd->ipkt_nodma) {
6803 6784 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6804 6785 rsp->fcp_u.fcp_status.sense_len_set = 0;
6805 6786 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6806 6787 } else {
6807 6788 fcp_rsp_t new_resp;
6808 6789
6809 6790 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6810 6791 fpkt->pkt_resp_acc, sizeof (new_resp));
6811 6792
6812 6793 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6813 6794 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6814 6795 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6815 6796
6816 6797 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6817 6798 fpkt->pkt_resp_acc, sizeof (new_resp));
6818 6799 }
6819 6800
6820 6801 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6821 6802 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6822 6803
6823 6804 return (DDI_SUCCESS);
6824 6805 }
6825 6806
6826 6807 if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6827 6808 (FCP_SENSE_NO_LUN(sense))) {
6828 6809 mutex_enter(&ptgt->tgt_mutex);
6829 6810 if ((FCP_SENSE_NO_LUN(sense)) &&
6830 6811 (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6831 6812 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6832 6813 mutex_exit(&ptgt->tgt_mutex);
6833 6814 /*
6834 6815 * reconfig was triggred by ILLEGAL REQUEST but
6835 6816 * got ILLEGAL REQUEST again
6836 6817 */
6837 6818 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6838 6819 fcp_trace, FCP_BUF_LEVEL_3, 0,
6839 6820 "!FCP: Unable to obtain Report Lun data"
6840 6821 " target=%x", ptgt->tgt_d_id);
6841 6822 } else {
6842 6823 if (ptgt->tgt_tid == NULL) {
6843 6824 timeout_id_t tid;
6844 6825 /*
6845 6826 * REPORT LUN data has changed. Kick off
6846 6827 * rediscovery
6847 6828 */
6848 6829 tid = timeout(fcp_reconfigure_luns,
6849 6830 (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6850 6831
6851 6832 ptgt->tgt_tid = tid;
6852 6833 ptgt->tgt_state |= FCP_TGT_BUSY;
6853 6834 }
6854 6835 if (FCP_SENSE_NO_LUN(sense)) {
6855 6836 ptgt->tgt_state |= FCP_TGT_ILLREQ;
6856 6837 }
6857 6838 mutex_exit(&ptgt->tgt_mutex);
6858 6839 if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6859 6840 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6860 6841 fcp_trace, FCP_BUF_LEVEL_3, 0,
6861 6842 "!FCP:Report Lun Has Changed"
6862 6843 " target=%x", ptgt->tgt_d_id);
6863 6844 } else if (FCP_SENSE_NO_LUN(sense)) {
6864 6845 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6865 6846 fcp_trace, FCP_BUF_LEVEL_3, 0,
6866 6847 "!FCP:LU Not Supported"
6867 6848 " target=%x", ptgt->tgt_d_id);
6868 6849 }
6869 6850 }
6870 6851 rval = DDI_SUCCESS;
6871 6852 }
6872 6853
6873 6854 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6874 6855 fcp_trace, FCP_BUF_LEVEL_5, 0,
6875 6856 "D_ID=%x, sense=%x, status=%x",
6876 6857 fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6877 6858 rsp->fcp_u.fcp_status.scsi_status);
6878 6859
6879 6860 return (rval);
6880 6861 }
6881 6862
6882 6863 /*
6883 6864 * Function: fcp_scsi_callback
6884 6865 *
6885 6866 * Description: This is the callback routine set by fcp_send_scsi() after
6886 6867 * it calls fcp_icmd_alloc(). The SCSI command completed here
6887 6868 * and autogenerated by FCP are: REPORT_LUN, INQUIRY and
6888 6869 * INQUIRY_PAGE83.
6889 6870 *
6890 6871 * Argument: *fpkt FC packet used to convey the command
6891 6872 *
6892 6873 * Return Value: None
6893 6874 */
6894 6875 static void
6895 6876 fcp_scsi_callback(fc_packet_t *fpkt)
6896 6877 {
6897 6878 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6898 6879 fpkt->pkt_ulp_private;
6899 6880 struct fcp_rsp_info fcp_rsp_err, *bep;
6900 6881 struct fcp_port *pptr;
6901 6882 struct fcp_tgt *ptgt;
6902 6883 struct fcp_lun *plun;
6903 6884 struct fcp_rsp response, *rsp;
6904 6885
6905 6886 ptgt = icmd->ipkt_tgt;
6906 6887 pptr = ptgt->tgt_port;
6907 6888 plun = icmd->ipkt_lun;
6908 6889
6909 6890 if (icmd->ipkt_nodma) {
6910 6891 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6911 6892 } else {
6912 6893 rsp = &response;
6913 6894 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6914 6895 sizeof (struct fcp_rsp));
6915 6896 }
6916 6897
6917 6898 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6918 6899 fcp_trace, FCP_BUF_LEVEL_2, 0,
6919 6900 "SCSI callback state=0x%x for %x, op_code=0x%x, "
6920 6901 "status=%x, lun num=%x",
6921 6902 fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6922 6903 rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6923 6904
6924 6905 /*
6925 6906 * Pre-init LUN GUID with NWWN if it is not a device that
6926 6907 * supports multiple luns and we know it's not page83
6927 6908 * compliant. Although using a NWWN is not lun unique,
6928 6909 * we will be fine since there is only one lun behind the taget
6929 6910 * in this case.
6930 6911 */
6931 6912 if ((plun->lun_guid_size == 0) &&
6932 6913 (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6933 6914 (fcp_symmetric_device_probe(plun) == 0)) {
6934 6915
6935 6916 char ascii_wwn[FC_WWN_SIZE*2+1];
6936 6917 fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6937 6918 (void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6938 6919 }
6939 6920
6940 6921 /*
6941 6922 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6942 6923 * when thay have more data than what is asked in CDB. An overrun
6943 6924 * is really when FCP_DL is smaller than the data length in CDB.
6944 6925 * In the case here we know that REPORT LUN command we formed within
6945 6926 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6946 6927 * behavior. In reality this is FC_SUCCESS.
6947 6928 */
6948 6929 if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6949 6930 (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6950 6931 (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6951 6932 fpkt->pkt_state = FC_PKT_SUCCESS;
6952 6933 }
6953 6934
6954 6935 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6955 6936 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6956 6937 fcp_trace, FCP_BUF_LEVEL_2, 0,
6957 6938 "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6958 6939 ptgt->tgt_d_id);
6959 6940
6960 6941 if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6961 6942 /*
6962 6943 * Inquiry VPD page command on A5K SES devices would
6963 6944 * result in data CRC errors.
6964 6945 */
6965 6946 if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6966 6947 (void) fcp_handle_page83(fpkt, icmd, 1);
6967 6948 return;
6968 6949 }
6969 6950 }
6970 6951 if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6971 6952 FCP_MUST_RETRY(fpkt)) {
6972 6953 fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6973 6954 fcp_retry_scsi_cmd(fpkt);
6974 6955 return;
6975 6956 }
6976 6957
6977 6958 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6978 6959 FCP_TGT_TRACE_20);
6979 6960
6980 6961 mutex_enter(&pptr->port_mutex);
6981 6962 mutex_enter(&ptgt->tgt_mutex);
6982 6963 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6983 6964 mutex_exit(&ptgt->tgt_mutex);
6984 6965 mutex_exit(&pptr->port_mutex);
6985 6966 fcp_print_error(fpkt);
6986 6967 } else {
6987 6968 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6988 6969 fcp_trace, FCP_BUF_LEVEL_2, 0,
6989 6970 "fcp_scsi_callback,1: state change occured"
6990 6971 " for D_ID=0x%x", ptgt->tgt_d_id);
6991 6972 mutex_exit(&ptgt->tgt_mutex);
6992 6973 mutex_exit(&pptr->port_mutex);
6993 6974 }
6994 6975 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6995 6976 icmd->ipkt_change_cnt, icmd->ipkt_cause);
6996 6977 fcp_icmd_free(pptr, icmd);
6997 6978 return;
6998 6979 }
6999 6980
7000 6981 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
7001 6982
7002 6983 mutex_enter(&pptr->port_mutex);
7003 6984 mutex_enter(&ptgt->tgt_mutex);
7004 6985 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7005 6986 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7006 6987 fcp_trace, FCP_BUF_LEVEL_2, 0,
7007 6988 "fcp_scsi_callback,2: state change occured"
7008 6989 " for D_ID=0x%x", ptgt->tgt_d_id);
7009 6990 mutex_exit(&ptgt->tgt_mutex);
7010 6991 mutex_exit(&pptr->port_mutex);
7011 6992 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7012 6993 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7013 6994 fcp_icmd_free(pptr, icmd);
7014 6995 return;
7015 6996 }
7016 6997 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7017 6998
7018 6999 mutex_exit(&ptgt->tgt_mutex);
7019 7000 mutex_exit(&pptr->port_mutex);
7020 7001
7021 7002 if (icmd->ipkt_nodma) {
7022 7003 bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
7023 7004 sizeof (struct fcp_rsp));
7024 7005 } else {
7025 7006 bep = &fcp_rsp_err;
7026 7007 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
7027 7008 fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
7028 7009 }
7029 7010
7030 7011 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
7031 7012 fcp_retry_scsi_cmd(fpkt);
7032 7013 return;
7033 7014 }
7034 7015
7035 7016 if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
7036 7017 FCP_NO_FAILURE) {
7037 7018 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7038 7019 fcp_trace, FCP_BUF_LEVEL_2, 0,
7039 7020 "rsp_code=0x%x, rsp_len_set=0x%x",
7040 7021 bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
7041 7022 fcp_retry_scsi_cmd(fpkt);
7042 7023 return;
7043 7024 }
7044 7025
7045 7026 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
7046 7027 rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
7047 7028 fcp_queue_ipkt(pptr, fpkt);
7048 7029 return;
7049 7030 }
7050 7031
7051 7032 /*
7052 7033 * Devices that do not support INQUIRY_PAGE83, return check condition
7053 7034 * with illegal request as per SCSI spec.
7054 7035 * Crossbridge is one such device and Daktari's SES node is another.
7055 7036 * We want to ideally enumerate these devices as a non-mpxio devices.
7056 7037 * SES nodes (Daktari only currently) are an exception to this.
7057 7038 */
7058 7039 if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
7059 7040 (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
7060 7041
7061 7042 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7062 7043 fcp_trace, FCP_BUF_LEVEL_3, 0,
7063 7044 "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
7064 7045 "check condition. May enumerate as non-mpxio device",
7065 7046 ptgt->tgt_d_id, plun->lun_type);
7066 7047
7067 7048 /*
7068 7049 * If we let Daktari's SES be enumerated as a non-mpxio
7069 7050 * device, there will be a discrepency in that the other
7070 7051 * internal FC disks will get enumerated as mpxio devices.
7071 7052 * Applications like luxadm expect this to be consistent.
7072 7053 *
7073 7054 * So, we put in a hack here to check if this is an SES device
7074 7055 * and handle it here.
7075 7056 */
7076 7057 if (plun->lun_type == DTYPE_ESI) {
7077 7058 /*
7078 7059 * Since, pkt_state is actually FC_PKT_SUCCESS
7079 7060 * at this stage, we fake a failure here so that
7080 7061 * fcp_handle_page83 will create a device path using
7081 7062 * the WWN instead of the GUID which is not there anyway
7082 7063 */
7083 7064 fpkt->pkt_state = FC_PKT_LOCAL_RJT;
7084 7065 (void) fcp_handle_page83(fpkt, icmd, 1);
7085 7066 return;
7086 7067 }
7087 7068
7088 7069 mutex_enter(&ptgt->tgt_mutex);
7089 7070 plun->lun_state &= ~(FCP_LUN_OFFLINE |
7090 7071 FCP_LUN_MARK | FCP_LUN_BUSY);
7091 7072 mutex_exit(&ptgt->tgt_mutex);
7092 7073
7093 7074 (void) fcp_call_finish_init(pptr, ptgt,
7094 7075 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7095 7076 icmd->ipkt_cause);
7096 7077 fcp_icmd_free(pptr, icmd);
7097 7078 return;
7098 7079 }
7099 7080
7100 7081 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7101 7082 int rval = DDI_FAILURE;
7102 7083
7103 7084 /*
7104 7085 * handle cases where report lun isn't supported
7105 7086 * by faking up our own REPORT_LUN response or
7106 7087 * UNIT ATTENTION
7107 7088 */
7108 7089 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7109 7090 rval = fcp_check_reportlun(rsp, fpkt);
7110 7091
7111 7092 /*
7112 7093 * fcp_check_reportlun might have modified the
7113 7094 * FCP response. Copy it in again to get an updated
7114 7095 * FCP response
7115 7096 */
7116 7097 if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
7117 7098 rsp = &response;
7118 7099
7119 7100 FCP_CP_IN(fpkt->pkt_resp, rsp,
7120 7101 fpkt->pkt_resp_acc,
7121 7102 sizeof (struct fcp_rsp));
7122 7103 }
7123 7104 }
7124 7105
7125 7106 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7126 7107 if (rval == DDI_SUCCESS) {
7127 7108 (void) fcp_call_finish_init(pptr, ptgt,
7128 7109 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7129 7110 icmd->ipkt_cause);
7130 7111 fcp_icmd_free(pptr, icmd);
7131 7112 } else {
7132 7113 fcp_retry_scsi_cmd(fpkt);
7133 7114 }
7134 7115
7135 7116 return;
7136 7117 }
7137 7118 } else {
7138 7119 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7139 7120 mutex_enter(&ptgt->tgt_mutex);
7140 7121 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7141 7122 mutex_exit(&ptgt->tgt_mutex);
7142 7123 }
7143 7124 }
7144 7125
7145 7126 ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7146 7127 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
7147 7128 (void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0,
7148 7129 DDI_DMA_SYNC_FORCPU);
7149 7130 }
7150 7131
7151 7132 switch (icmd->ipkt_opcode) {
7152 7133 case SCMD_INQUIRY:
7153 7134 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7154 7135 fcp_handle_inquiry(fpkt, icmd);
7155 7136 break;
7156 7137
7157 7138 case SCMD_REPORT_LUN:
7158 7139 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7159 7140 FCP_TGT_TRACE_22);
7160 7141 fcp_handle_reportlun(fpkt, icmd);
7161 7142 break;
7162 7143
7163 7144 case SCMD_INQUIRY_PAGE83:
7164 7145 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7165 7146 (void) fcp_handle_page83(fpkt, icmd, 0);
7166 7147 break;
7167 7148
7168 7149 default:
7169 7150 fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7170 7151 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7171 7152 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7172 7153 fcp_icmd_free(pptr, icmd);
7173 7154 break;
7174 7155 }
7175 7156 }
7176 7157
7177 7158
7178 7159 static void
7179 7160 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7180 7161 {
7181 7162 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
7182 7163 fpkt->pkt_ulp_private;
7183 7164 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
7184 7165 struct fcp_port *pptr = ptgt->tgt_port;
7185 7166
7186 7167 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7187 7168 fcp_is_retryable(icmd)) {
7188 7169 mutex_enter(&pptr->port_mutex);
7189 7170 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7190 7171 mutex_exit(&pptr->port_mutex);
7191 7172 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7192 7173 fcp_trace, FCP_BUF_LEVEL_3, 0,
7193 7174 "Retrying %s to %x; state=%x, reason=%x",
7194 7175 (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7195 7176 "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7196 7177 fpkt->pkt_state, fpkt->pkt_reason);
7197 7178
7198 7179 fcp_queue_ipkt(pptr, fpkt);
7199 7180 } else {
7200 7181 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7201 7182 fcp_trace, FCP_BUF_LEVEL_3, 0,
7202 7183 "fcp_retry_scsi_cmd,1: state change occured"
7203 7184 " for D_ID=0x%x", ptgt->tgt_d_id);
7204 7185 mutex_exit(&pptr->port_mutex);
7205 7186 (void) fcp_call_finish_init(pptr, ptgt,
7206 7187 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7207 7188 icmd->ipkt_cause);
7208 7189 fcp_icmd_free(pptr, icmd);
7209 7190 }
7210 7191 } else {
7211 7192 fcp_print_error(fpkt);
7212 7193 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7213 7194 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7214 7195 fcp_icmd_free(pptr, icmd);
7215 7196 }
7216 7197 }
7217 7198
7218 7199 /*
7219 7200 * Function: fcp_handle_page83
7220 7201 *
7221 7202 * Description: Treats the response to INQUIRY_PAGE83.
7222 7203 *
7223 7204 * Argument: *fpkt FC packet used to convey the command.
7224 7205 * *icmd Original fcp_ipkt structure.
7225 7206 * ignore_page83_data
7226 7207 * if it's 1, that means it's a special devices's
7227 7208 * page83 response, it should be enumerated under mpxio
7228 7209 *
7229 7210 * Return Value: None
7230 7211 */
7231 7212 static void
7232 7213 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7233 7214 int ignore_page83_data)
7234 7215 {
7235 7216 struct fcp_port *pptr;
7236 7217 struct fcp_lun *plun;
7237 7218 struct fcp_tgt *ptgt;
7238 7219 uchar_t dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7239 7220 int fail = 0;
7240 7221 ddi_devid_t devid;
7241 7222 char *guid = NULL;
7242 7223 int ret;
7243 7224
7244 7225 ASSERT(icmd != NULL && fpkt != NULL);
7245 7226
7246 7227 pptr = icmd->ipkt_port;
7247 7228 ptgt = icmd->ipkt_tgt;
7248 7229 plun = icmd->ipkt_lun;
7249 7230
7250 7231 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7251 7232 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7252 7233
7253 7234 FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7254 7235 SCMD_MAX_INQUIRY_PAGE83_SIZE);
7255 7236
7256 7237 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7257 7238 fcp_trace, FCP_BUF_LEVEL_5, 0,
7258 7239 "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7259 7240 "dtype=0x%x, lun num=%x",
7260 7241 pptr->port_instance, ptgt->tgt_d_id,
7261 7242 dev_id_page[0], plun->lun_num);
7262 7243
7263 7244 ret = ddi_devid_scsi_encode(
7264 7245 DEVID_SCSI_ENCODE_VERSION_LATEST,
7265 7246 NULL, /* driver name */
7266 7247 (unsigned char *) &plun->lun_inq, /* standard inquiry */
7267 7248 sizeof (plun->lun_inq), /* size of standard inquiry */
7268 7249 NULL, /* page 80 data */
7269 7250 0, /* page 80 len */
7270 7251 dev_id_page, /* page 83 data */
7271 7252 SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7272 7253 &devid);
7273 7254
7274 7255 if (ret == DDI_SUCCESS) {
7275 7256
7276 7257 guid = ddi_devid_to_guid(devid);
7277 7258
7278 7259 if (guid) {
7279 7260 /*
7280 7261 * Check our current guid. If it's non null
7281 7262 * and it has changed, we need to copy it into
7282 7263 * lun_old_guid since we might still need it.
7283 7264 */
7284 7265 if (plun->lun_guid &&
7285 7266 strcmp(guid, plun->lun_guid)) {
7286 7267 unsigned int len;
7287 7268
7288 7269 /*
7289 7270 * If the guid of the LUN changes,
7290 7271 * reconfiguration should be triggered
7291 7272 * to reflect the changes.
7292 7273 * i.e. we should offline the LUN with
7293 7274 * the old guid, and online the LUN with
7294 7275 * the new guid.
7295 7276 */
7296 7277 plun->lun_state |= FCP_LUN_CHANGED;
7297 7278
7298 7279 if (plun->lun_old_guid) {
7299 7280 kmem_free(plun->lun_old_guid,
7300 7281 plun->lun_old_guid_size);
7301 7282 }
7302 7283
7303 7284 len = plun->lun_guid_size;
7304 7285 plun->lun_old_guid_size = len;
7305 7286
7306 7287 plun->lun_old_guid = kmem_zalloc(len,
7307 7288 KM_NOSLEEP);
7308 7289
7309 7290 if (plun->lun_old_guid) {
7310 7291 /*
7311 7292 * The alloc was successful then
7312 7293 * let's do the copy.
7313 7294 */
7314 7295 bcopy(plun->lun_guid,
7315 7296 plun->lun_old_guid, len);
7316 7297 } else {
7317 7298 fail = 1;
7318 7299 plun->lun_old_guid_size = 0;
7319 7300 }
7320 7301 }
7321 7302 if (!fail) {
7322 7303 if (fcp_copy_guid_2_lun_block(
7323 7304 plun, guid)) {
7324 7305 fail = 1;
7325 7306 }
7326 7307 }
7327 7308 ddi_devid_free_guid(guid);
7328 7309
7329 7310 } else {
7330 7311 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7331 7312 fcp_trace, FCP_BUF_LEVEL_2, 0,
7332 7313 "fcp_handle_page83: unable to create "
7333 7314 "GUID");
7334 7315
7335 7316 /* couldn't create good guid from devid */
7336 7317 fail = 1;
7337 7318 }
7338 7319 ddi_devid_free(devid);
7339 7320
7340 7321 } else if (ret == DDI_NOT_WELL_FORMED) {
7341 7322 /* NULL filled data for page 83 */
7342 7323 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7343 7324 fcp_trace, FCP_BUF_LEVEL_2, 0,
7344 7325 "fcp_handle_page83: retry GUID");
7345 7326
7346 7327 icmd->ipkt_retries = 0;
7347 7328 fcp_retry_scsi_cmd(fpkt);
7348 7329 return;
7349 7330 } else {
7350 7331 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7351 7332 fcp_trace, FCP_BUF_LEVEL_2, 0,
7352 7333 "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7353 7334 ret);
7354 7335 /*
7355 7336 * Since the page83 validation
7356 7337 * introduced late, we are being
7357 7338 * tolerant to the existing devices
7358 7339 * that already found to be working
7359 7340 * under mpxio, like A5200's SES device,
7360 7341 * its page83 response will not be standard-compliant,
7361 7342 * but we still want it to be enumerated under mpxio.
7362 7343 */
7363 7344 if (fcp_symmetric_device_probe(plun) != 0) {
7364 7345 fail = 1;
7365 7346 }
7366 7347 }
7367 7348
7368 7349 } else {
7369 7350 /* bad packet state */
7370 7351 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7371 7352
7372 7353 /*
7373 7354 * For some special devices (A5K SES and Daktari's SES devices),
7374 7355 * they should be enumerated under mpxio
7375 7356 * or "luxadm dis" will fail
7376 7357 */
7377 7358 if (ignore_page83_data) {
7378 7359 fail = 0;
7379 7360 } else {
7380 7361 fail = 1;
7381 7362 }
7382 7363 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7383 7364 fcp_trace, FCP_BUF_LEVEL_2, 0,
7384 7365 "!Devid page cmd failed. "
7385 7366 "fpkt_state: %x fpkt_reason: %x",
7386 7367 "ignore_page83: %d",
7387 7368 fpkt->pkt_state, fpkt->pkt_reason,
7388 7369 ignore_page83_data);
7389 7370 }
7390 7371
7391 7372 mutex_enter(&pptr->port_mutex);
7392 7373 mutex_enter(&plun->lun_mutex);
7393 7374 /*
7394 7375 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7395 7376 * mismatch between lun_cip and lun_mpxio.
7396 7377 */
7397 7378 if (plun->lun_cip == NULL) {
7398 7379 /*
7399 7380 * If we don't have a guid for this lun it's because we were
7400 7381 * unable to glean one from the page 83 response. Set the
7401 7382 * control flag to 0 here to make sure that we don't attempt to
7402 7383 * enumerate it under mpxio.
7403 7384 */
7404 7385 if (fail || pptr->port_mpxio == 0) {
7405 7386 plun->lun_mpxio = 0;
7406 7387 } else {
7407 7388 plun->lun_mpxio = 1;
7408 7389 }
7409 7390 }
7410 7391 mutex_exit(&plun->lun_mutex);
7411 7392 mutex_exit(&pptr->port_mutex);
7412 7393
7413 7394 mutex_enter(&ptgt->tgt_mutex);
7414 7395 plun->lun_state &=
7415 7396 ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7416 7397 mutex_exit(&ptgt->tgt_mutex);
7417 7398
7418 7399 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7419 7400 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7420 7401
7421 7402 fcp_icmd_free(pptr, icmd);
7422 7403 }
7423 7404
7424 7405 /*
7425 7406 * Function: fcp_handle_inquiry
7426 7407 *
7427 7408 * Description: Called by fcp_scsi_callback to handle the response to an
7428 7409 * INQUIRY request.
7429 7410 *
7430 7411 * Argument: *fpkt FC packet used to convey the command.
7431 7412 * *icmd Original fcp_ipkt structure.
7432 7413 *
7433 7414 * Return Value: None
7434 7415 */
7435 7416 static void
7436 7417 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7437 7418 {
7438 7419 struct fcp_port *pptr;
7439 7420 struct fcp_lun *plun;
7440 7421 struct fcp_tgt *ptgt;
7441 7422 uchar_t dtype;
7442 7423 uchar_t pqual;
7443 7424 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7444 7425
7445 7426 ASSERT(icmd != NULL && fpkt != NULL);
7446 7427
7447 7428 pptr = icmd->ipkt_port;
7448 7429 ptgt = icmd->ipkt_tgt;
7449 7430 plun = icmd->ipkt_lun;
7450 7431
7451 7432 FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7452 7433 sizeof (struct scsi_inquiry));
7453 7434
7454 7435 dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7455 7436 pqual = plun->lun_inq.inq_dtype >> 5;
7456 7437
7457 7438 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7458 7439 fcp_trace, FCP_BUF_LEVEL_5, 0,
7459 7440 "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7460 7441 "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7461 7442 plun->lun_num, dtype, pqual);
7462 7443
7463 7444 if (pqual != 0) {
7464 7445 /*
7465 7446 * Non-zero peripheral qualifier
7466 7447 */
7467 7448 fcp_log(CE_CONT, pptr->port_dip,
7468 7449 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7469 7450 "Device type=0x%x Peripheral qual=0x%x\n",
7470 7451 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7471 7452
7472 7453 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7473 7454 fcp_trace, FCP_BUF_LEVEL_5, 0,
7474 7455 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7475 7456 "Device type=0x%x Peripheral qual=0x%x\n",
7476 7457 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7477 7458
7478 7459 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7479 7460
7480 7461 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7481 7462 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7482 7463 fcp_icmd_free(pptr, icmd);
7483 7464 return;
7484 7465 }
7485 7466
7486 7467 /*
7487 7468 * If the device is already initialized, check the dtype
7488 7469 * for a change. If it has changed then update the flags
7489 7470 * so the create_luns will offline the old device and
7490 7471 * create the new device. Refer to bug: 4764752
7491 7472 */
7492 7473 if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7493 7474 plun->lun_state |= FCP_LUN_CHANGED;
7494 7475 }
7495 7476 plun->lun_type = plun->lun_inq.inq_dtype;
7496 7477
7497 7478 /*
7498 7479 * This code is setting/initializing the throttling in the FCA
7499 7480 * driver.
7500 7481 */
7501 7482 mutex_enter(&pptr->port_mutex);
7502 7483 if (!pptr->port_notify) {
7503 7484 if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7504 7485 uint32_t cmd = 0;
7505 7486 cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7506 7487 ((cmd & 0xFFFFFF00 >> 8) |
7507 7488 FCP_SVE_THROTTLE << 8));
7508 7489 pptr->port_notify = 1;
7509 7490 mutex_exit(&pptr->port_mutex);
7510 7491 (void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7511 7492 mutex_enter(&pptr->port_mutex);
7512 7493 }
7513 7494 }
7514 7495
7515 7496 if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7516 7497 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7517 7498 fcp_trace, FCP_BUF_LEVEL_2, 0,
7518 7499 "fcp_handle_inquiry,1:state change occured"
7519 7500 " for D_ID=0x%x", ptgt->tgt_d_id);
7520 7501 mutex_exit(&pptr->port_mutex);
7521 7502
7522 7503 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7523 7504 (void) fcp_call_finish_init(pptr, ptgt,
7524 7505 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7525 7506 icmd->ipkt_cause);
7526 7507 fcp_icmd_free(pptr, icmd);
7527 7508 return;
7528 7509 }
7529 7510 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7530 7511 mutex_exit(&pptr->port_mutex);
7531 7512
7532 7513 /* Retrieve the rscn count (if a valid one exists) */
7533 7514 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7534 7515 rscn_count = ((fc_ulp_rscn_info_t *)
7535 7516 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7536 7517 } else {
7537 7518 rscn_count = FC_INVALID_RSCN_COUNT;
7538 7519 }
7539 7520
7540 7521 if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7541 7522 SCMD_MAX_INQUIRY_PAGE83_SIZE,
7542 7523 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7543 7524 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7544 7525 fcp_log(CE_WARN, NULL, "!failed to send page 83");
7545 7526 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7546 7527 (void) fcp_call_finish_init(pptr, ptgt,
7547 7528 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7548 7529 icmd->ipkt_cause);
7549 7530 }
7550 7531
7551 7532 /*
7552 7533 * Read Inquiry VPD Page 0x83 to uniquely
7553 7534 * identify this logical unit.
7554 7535 */
7555 7536 fcp_icmd_free(pptr, icmd);
7556 7537 }
7557 7538
7558 7539 /*
7559 7540 * Function: fcp_handle_reportlun
7560 7541 *
7561 7542 * Description: Called by fcp_scsi_callback to handle the response to a
7562 7543 * REPORT_LUN request.
7563 7544 *
7564 7545 * Argument: *fpkt FC packet used to convey the command.
7565 7546 * *icmd Original fcp_ipkt structure.
7566 7547 *
7567 7548 * Return Value: None
7568 7549 */
7569 7550 static void
7570 7551 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7571 7552 {
7572 7553 int i;
7573 7554 int nluns_claimed;
7574 7555 int nluns_bufmax;
7575 7556 int len;
7576 7557 uint16_t lun_num;
7577 7558 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7578 7559 struct fcp_port *pptr;
7579 7560 struct fcp_tgt *ptgt;
7580 7561 struct fcp_lun *plun;
7581 7562 struct fcp_reportlun_resp *report_lun;
7582 7563
7583 7564 pptr = icmd->ipkt_port;
7584 7565 ptgt = icmd->ipkt_tgt;
7585 7566 len = fpkt->pkt_datalen;
7586 7567
7587 7568 if ((len < FCP_LUN_HEADER) ||
7588 7569 ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7589 7570 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7590 7571 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7591 7572 fcp_icmd_free(pptr, icmd);
7592 7573 return;
7593 7574 }
7594 7575
7595 7576 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7596 7577 fpkt->pkt_datalen);
7597 7578
7598 7579 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7599 7580 fcp_trace, FCP_BUF_LEVEL_5, 0,
7600 7581 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7601 7582 pptr->port_instance, ptgt->tgt_d_id);
7602 7583
7603 7584 /*
7604 7585 * Get the number of luns (which is supplied as LUNS * 8) the
7605 7586 * device claims it has.
7606 7587 */
7607 7588 nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7608 7589
7609 7590 /*
7610 7591 * Get the maximum number of luns the buffer submitted can hold.
7611 7592 */
7612 7593 nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7613 7594
7614 7595 /*
7615 7596 * Due to limitations of certain hardware, we support only 16 bit LUNs
7616 7597 */
7617 7598 if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7618 7599 kmem_free(report_lun, len);
7619 7600
7620 7601 fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7621 7602 " 0x%x number of LUNs for target=%x", nluns_claimed,
7622 7603 ptgt->tgt_d_id);
7623 7604
7624 7605 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7625 7606 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7626 7607 fcp_icmd_free(pptr, icmd);
7627 7608 return;
7628 7609 }
7629 7610
7630 7611 /*
7631 7612 * If there are more LUNs than we have allocated memory for,
7632 7613 * allocate more space and send down yet another report lun if
7633 7614 * the maximum number of attempts hasn't been reached.
7634 7615 */
7635 7616 mutex_enter(&ptgt->tgt_mutex);
7636 7617
7637 7618 if ((nluns_claimed > nluns_bufmax) &&
7638 7619 (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7639 7620
7640 7621 struct fcp_lun *plun;
7641 7622
7642 7623 ptgt->tgt_report_lun_cnt++;
7643 7624 plun = ptgt->tgt_lun;
7644 7625 ASSERT(plun != NULL);
7645 7626 mutex_exit(&ptgt->tgt_mutex);
7646 7627
7647 7628 kmem_free(report_lun, len);
7648 7629
7649 7630 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7650 7631 fcp_trace, FCP_BUF_LEVEL_5, 0,
7651 7632 "!Dynamically discovered %d LUNs for D_ID=%x",
7652 7633 nluns_claimed, ptgt->tgt_d_id);
7653 7634
7654 7635 /* Retrieve the rscn count (if a valid one exists) */
7655 7636 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7656 7637 rscn_count = ((fc_ulp_rscn_info_t *)
7657 7638 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7658 7639 ulp_rscn_count;
7659 7640 } else {
7660 7641 rscn_count = FC_INVALID_RSCN_COUNT;
7661 7642 }
7662 7643
7663 7644 if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7664 7645 FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7665 7646 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7666 7647 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7667 7648 (void) fcp_call_finish_init(pptr, ptgt,
7668 7649 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7669 7650 icmd->ipkt_cause);
7670 7651 }
7671 7652
7672 7653 fcp_icmd_free(pptr, icmd);
7673 7654 return;
7674 7655 }
7675 7656
7676 7657 if (nluns_claimed > nluns_bufmax) {
7677 7658 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7678 7659 fcp_trace, FCP_BUF_LEVEL_5, 0,
7679 7660 "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7680 7661 " Number of LUNs lost=%x",
7681 7662 ptgt->tgt_port_wwn.raw_wwn[0],
7682 7663 ptgt->tgt_port_wwn.raw_wwn[1],
7683 7664 ptgt->tgt_port_wwn.raw_wwn[2],
7684 7665 ptgt->tgt_port_wwn.raw_wwn[3],
7685 7666 ptgt->tgt_port_wwn.raw_wwn[4],
7686 7667 ptgt->tgt_port_wwn.raw_wwn[5],
7687 7668 ptgt->tgt_port_wwn.raw_wwn[6],
7688 7669 ptgt->tgt_port_wwn.raw_wwn[7],
7689 7670 nluns_claimed - nluns_bufmax);
7690 7671
7691 7672 nluns_claimed = nluns_bufmax;
7692 7673 }
7693 7674 ptgt->tgt_lun_cnt = nluns_claimed;
7694 7675
7695 7676 /*
7696 7677 * Identify missing LUNs and print warning messages
7697 7678 */
7698 7679 for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7699 7680 int offline;
7700 7681 int exists = 0;
7701 7682
7702 7683 offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7703 7684
7704 7685 for (i = 0; i < nluns_claimed && exists == 0; i++) {
7705 7686 uchar_t *lun_string;
7706 7687
7707 7688 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7708 7689
7709 7690 switch (lun_string[0] & 0xC0) {
7710 7691 case FCP_LUN_ADDRESSING:
7711 7692 case FCP_PD_ADDRESSING:
7712 7693 case FCP_VOLUME_ADDRESSING:
7713 7694 lun_num = ((lun_string[0] & 0x3F) << 8) |
7714 7695 lun_string[1];
7715 7696 if (plun->lun_num == lun_num) {
7716 7697 exists++;
7717 7698 break;
7718 7699 }
7719 7700 break;
7720 7701
7721 7702 default:
7722 7703 break;
7723 7704 }
7724 7705 }
7725 7706
7726 7707 if (!exists && !offline) {
7727 7708 mutex_exit(&ptgt->tgt_mutex);
7728 7709
7729 7710 mutex_enter(&pptr->port_mutex);
7730 7711 mutex_enter(&ptgt->tgt_mutex);
7731 7712 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7732 7713 /*
7733 7714 * set disappear flag when device was connected
7734 7715 */
7735 7716 if (!(plun->lun_state &
7736 7717 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7737 7718 plun->lun_state |= FCP_LUN_DISAPPEARED;
7738 7719 }
7739 7720 mutex_exit(&ptgt->tgt_mutex);
7740 7721 mutex_exit(&pptr->port_mutex);
7741 7722 if (!(plun->lun_state &
7742 7723 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7743 7724 fcp_log(CE_NOTE, pptr->port_dip,
7744 7725 "!Lun=%x for target=%x disappeared",
7745 7726 plun->lun_num, ptgt->tgt_d_id);
7746 7727 }
7747 7728 mutex_enter(&ptgt->tgt_mutex);
7748 7729 } else {
7749 7730 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7750 7731 fcp_trace, FCP_BUF_LEVEL_5, 0,
7751 7732 "fcp_handle_reportlun,1: state change"
7752 7733 " occured for D_ID=0x%x", ptgt->tgt_d_id);
7753 7734 mutex_exit(&ptgt->tgt_mutex);
7754 7735 mutex_exit(&pptr->port_mutex);
7755 7736 kmem_free(report_lun, len);
7756 7737 (void) fcp_call_finish_init(pptr, ptgt,
7757 7738 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7758 7739 icmd->ipkt_cause);
7759 7740 fcp_icmd_free(pptr, icmd);
7760 7741 return;
7761 7742 }
7762 7743 } else if (exists) {
7763 7744 /*
7764 7745 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7765 7746 * actually exists in REPORT_LUN response
7766 7747 */
7767 7748 if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7768 7749 plun->lun_state &=
7769 7750 ~FCP_LUN_DEVICE_NOT_CONNECTED;
7770 7751 }
7771 7752 if (offline || plun->lun_num == 0) {
7772 7753 if (plun->lun_state & FCP_LUN_DISAPPEARED) {
7773 7754 plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7774 7755 mutex_exit(&ptgt->tgt_mutex);
7775 7756 fcp_log(CE_NOTE, pptr->port_dip,
7776 7757 "!Lun=%x for target=%x reappeared",
7777 7758 plun->lun_num, ptgt->tgt_d_id);
7778 7759 mutex_enter(&ptgt->tgt_mutex);
7779 7760 }
7780 7761 }
7781 7762 }
7782 7763 }
7783 7764
7784 7765 ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7785 7766 mutex_exit(&ptgt->tgt_mutex);
7786 7767
7787 7768 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7788 7769 fcp_trace, FCP_BUF_LEVEL_5, 0,
7789 7770 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7790 7771 pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7791 7772
7792 7773 /* scan each lun */
7793 7774 for (i = 0; i < nluns_claimed; i++) {
7794 7775 uchar_t *lun_string;
7795 7776
7796 7777 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7797 7778
7798 7779 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7799 7780 fcp_trace, FCP_BUF_LEVEL_5, 0,
7800 7781 "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7801 7782 " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7802 7783 lun_string[0]);
7803 7784
7804 7785 switch (lun_string[0] & 0xC0) {
7805 7786 case FCP_LUN_ADDRESSING:
7806 7787 case FCP_PD_ADDRESSING:
7807 7788 case FCP_VOLUME_ADDRESSING:
7808 7789 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7809 7790
7810 7791 /* We will skip masked LUNs because of the blacklist. */
7811 7792 if (fcp_lun_blacklist != NULL) {
7812 7793 mutex_enter(&ptgt->tgt_mutex);
7813 7794 if (fcp_should_mask(&ptgt->tgt_port_wwn,
7814 7795 lun_num) == TRUE) {
7815 7796 ptgt->tgt_lun_cnt--;
7816 7797 mutex_exit(&ptgt->tgt_mutex);
7817 7798 break;
7818 7799 }
7819 7800 mutex_exit(&ptgt->tgt_mutex);
7820 7801 }
7821 7802
7822 7803 /* see if this LUN is already allocated */
7823 7804 if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7824 7805 plun = fcp_alloc_lun(ptgt);
7825 7806 if (plun == NULL) {
7826 7807 fcp_log(CE_NOTE, pptr->port_dip,
7827 7808 "!Lun allocation failed"
7828 7809 " target=%x lun=%x",
7829 7810 ptgt->tgt_d_id, lun_num);
7830 7811 break;
7831 7812 }
7832 7813 }
7833 7814
7834 7815 mutex_enter(&plun->lun_tgt->tgt_mutex);
7835 7816 /* convert to LUN */
7836 7817 plun->lun_addr.ent_addr_0 =
7837 7818 BE_16(*(uint16_t *)&(lun_string[0]));
7838 7819 plun->lun_addr.ent_addr_1 =
7839 7820 BE_16(*(uint16_t *)&(lun_string[2]));
7840 7821 plun->lun_addr.ent_addr_2 =
7841 7822 BE_16(*(uint16_t *)&(lun_string[4]));
7842 7823 plun->lun_addr.ent_addr_3 =
7843 7824 BE_16(*(uint16_t *)&(lun_string[6]));
7844 7825
7845 7826 plun->lun_num = lun_num;
7846 7827 plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7847 7828 plun->lun_state &= ~FCP_LUN_OFFLINE;
7848 7829 mutex_exit(&plun->lun_tgt->tgt_mutex);
7849 7830
7850 7831 /* Retrieve the rscn count (if a valid one exists) */
7851 7832 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7852 7833 rscn_count = ((fc_ulp_rscn_info_t *)
7853 7834 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7854 7835 ulp_rscn_count;
7855 7836 } else {
7856 7837 rscn_count = FC_INVALID_RSCN_COUNT;
7857 7838 }
7858 7839
7859 7840 if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7860 7841 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7861 7842 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7862 7843 mutex_enter(&pptr->port_mutex);
7863 7844 mutex_enter(&plun->lun_tgt->tgt_mutex);
7864 7845 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7865 7846 fcp_log(CE_NOTE, pptr->port_dip,
7866 7847 "!failed to send INQUIRY"
7867 7848 " target=%x lun=%x",
7868 7849 ptgt->tgt_d_id, plun->lun_num);
7869 7850 } else {
7870 7851 FCP_TRACE(fcp_logq,
7871 7852 pptr->port_instbuf, fcp_trace,
7872 7853 FCP_BUF_LEVEL_5, 0,
7873 7854 "fcp_handle_reportlun,2: state"
7874 7855 " change occured for D_ID=0x%x",
7875 7856 ptgt->tgt_d_id);
7876 7857 }
7877 7858 mutex_exit(&plun->lun_tgt->tgt_mutex);
7878 7859 mutex_exit(&pptr->port_mutex);
7879 7860 } else {
7880 7861 continue;
7881 7862 }
7882 7863 break;
7883 7864
7884 7865 default:
7885 7866 fcp_log(CE_WARN, NULL,
7886 7867 "!Unsupported LUN Addressing method %x "
7887 7868 "in response to REPORT_LUN", lun_string[0]);
7888 7869 break;
7889 7870 }
7890 7871
7891 7872 /*
7892 7873 * each time through this loop we should decrement
7893 7874 * the tmp_cnt by one -- since we go through this loop
7894 7875 * one time for each LUN, the tmp_cnt should never be <=0
7895 7876 */
7896 7877 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7897 7878 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7898 7879 }
7899 7880
7900 7881 if (i == 0) {
7901 7882 fcp_log(CE_WARN, pptr->port_dip,
7902 7883 "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7903 7884 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7904 7885 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7905 7886 }
7906 7887
7907 7888 kmem_free(report_lun, len);
7908 7889 fcp_icmd_free(pptr, icmd);
7909 7890 }
7910 7891
7911 7892
7912 7893 /*
7913 7894 * called internally to return a LUN given a target and a LUN number
7914 7895 */
7915 7896 static struct fcp_lun *
7916 7897 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7917 7898 {
7918 7899 struct fcp_lun *plun;
7919 7900
7920 7901 mutex_enter(&ptgt->tgt_mutex);
7921 7902 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7922 7903 if (plun->lun_num == lun_num) {
7923 7904 mutex_exit(&ptgt->tgt_mutex);
7924 7905 return (plun);
7925 7906 }
7926 7907 }
7927 7908 mutex_exit(&ptgt->tgt_mutex);
7928 7909
7929 7910 return (NULL);
7930 7911 }
7931 7912
7932 7913
7933 7914 /*
7934 7915 * handle finishing one target for fcp_finish_init
7935 7916 *
7936 7917 * return true (non-zero) if we want finish_init to continue with the
7937 7918 * next target
7938 7919 *
7939 7920 * called with the port mutex held
7940 7921 */
7941 7922 /*ARGSUSED*/
7942 7923 static int
7943 7924 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7944 7925 int link_cnt, int tgt_cnt, int cause)
7945 7926 {
7946 7927 int rval = 1;
7947 7928 ASSERT(pptr != NULL);
7948 7929 ASSERT(ptgt != NULL);
7949 7930
7950 7931 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7951 7932 fcp_trace, FCP_BUF_LEVEL_5, 0,
7952 7933 "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7953 7934 ptgt->tgt_state);
7954 7935
7955 7936 ASSERT(mutex_owned(&pptr->port_mutex));
7956 7937
7957 7938 if ((pptr->port_link_cnt != link_cnt) ||
7958 7939 (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7959 7940 /*
7960 7941 * oh oh -- another link reset or target change
7961 7942 * must have occurred while we are in here
7962 7943 */
7963 7944 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7964 7945
7965 7946 return (0);
7966 7947 } else {
7967 7948 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7968 7949 }
7969 7950
7970 7951 mutex_enter(&ptgt->tgt_mutex);
7971 7952
7972 7953 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7973 7954 /*
7974 7955 * tgt is not offline -- is it marked (i.e. needs
7975 7956 * to be offlined) ??
7976 7957 */
7977 7958 if (ptgt->tgt_state & FCP_TGT_MARK) {
7978 7959 /*
7979 7960 * this target not offline *and*
7980 7961 * marked
7981 7962 */
7982 7963 ptgt->tgt_state &= ~FCP_TGT_MARK;
7983 7964 rval = fcp_offline_target(pptr, ptgt, link_cnt,
7984 7965 tgt_cnt, 0, 0);
7985 7966 } else {
7986 7967 ptgt->tgt_state &= ~FCP_TGT_BUSY;
7987 7968
7988 7969 /* create the LUNs */
7989 7970 if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7990 7971 ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7991 7972 fcp_create_luns(ptgt, link_cnt, tgt_cnt,
7992 7973 cause);
7993 7974 ptgt->tgt_device_created = 1;
7994 7975 } else {
7995 7976 fcp_update_tgt_state(ptgt, FCP_RESET,
7996 7977 FCP_LUN_BUSY);
7997 7978 }
7998 7979 }
7999 7980 }
8000 7981
8001 7982 mutex_exit(&ptgt->tgt_mutex);
8002 7983
8003 7984 return (rval);
8004 7985 }
8005 7986
8006 7987
8007 7988 /*
8008 7989 * this routine is called to finish port initialization
8009 7990 *
8010 7991 * Each port has a "temp" counter -- when a state change happens (e.g.
8011 7992 * port online), the temp count is set to the number of devices in the map.
8012 7993 * Then, as each device gets "discovered", the temp counter is decremented
8013 7994 * by one. When this count reaches zero we know that all of the devices
8014 7995 * in the map have been discovered (or an error has occurred), so we can
8015 7996 * then finish initialization -- which is done by this routine (well, this
8016 7997 * and fcp-finish_tgt())
8017 7998 *
8018 7999 * acquires and releases the global mutex
8019 8000 *
8020 8001 * called with the port mutex owned
8021 8002 */
8022 8003 static void
8023 8004 fcp_finish_init(struct fcp_port *pptr)
8024 8005 {
8025 8006 #ifdef DEBUG
8026 8007 bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
8027 8008 pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
8028 8009 FCP_STACK_DEPTH);
8029 8010 #endif /* DEBUG */
8030 8011
8031 8012 ASSERT(mutex_owned(&pptr->port_mutex));
8032 8013
8033 8014 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8034 8015 fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
8035 8016 " entering; ipkt count=%d", pptr->port_ipkt_cnt);
8036 8017
8037 8018 if ((pptr->port_state & FCP_STATE_ONLINING) &&
8038 8019 !(pptr->port_state & (FCP_STATE_SUSPENDED |
8039 8020 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
8040 8021 pptr->port_state &= ~FCP_STATE_ONLINING;
8041 8022 pptr->port_state |= FCP_STATE_ONLINE;
8042 8023 }
8043 8024
8044 8025 /* Wake up threads waiting on config done */
8045 8026 cv_broadcast(&pptr->port_config_cv);
8046 8027 }
8047 8028
8048 8029
8049 8030 /*
8050 8031 * called from fcp_finish_init to create the LUNs for a target
8051 8032 *
8052 8033 * called with the port mutex owned
8053 8034 */
8054 8035 static void
8055 8036 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
8056 8037 {
8057 8038 struct fcp_lun *plun;
8058 8039 struct fcp_port *pptr;
8059 8040 child_info_t *cip = NULL;
8060 8041
8061 8042 ASSERT(ptgt != NULL);
8062 8043 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8063 8044
8064 8045 pptr = ptgt->tgt_port;
8065 8046
8066 8047 ASSERT(pptr != NULL);
8067 8048
8068 8049 /* scan all LUNs for this target */
8069 8050 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8070 8051 if (plun->lun_state & FCP_LUN_OFFLINE) {
8071 8052 continue;
8072 8053 }
8073 8054
8074 8055 if (plun->lun_state & FCP_LUN_MARK) {
8075 8056 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8076 8057 fcp_trace, FCP_BUF_LEVEL_2, 0,
8077 8058 "fcp_create_luns: offlining marked LUN!");
8078 8059 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
8079 8060 continue;
8080 8061 }
8081 8062
8082 8063 plun->lun_state &= ~FCP_LUN_BUSY;
8083 8064
8084 8065 /*
8085 8066 * There are conditions in which FCP_LUN_INIT flag is cleared
8086 8067 * but we have a valid plun->lun_cip. To cover this case also
8087 8068 * CLEAR_BUSY whenever we have a valid lun_cip.
8088 8069 */
8089 8070 if (plun->lun_mpxio && plun->lun_cip &&
8090 8071 (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
8091 8072 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8092 8073 0, 0))) {
8093 8074 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8094 8075 fcp_trace, FCP_BUF_LEVEL_2, 0,
8095 8076 "fcp_create_luns: enable lun %p failed!",
8096 8077 plun);
8097 8078 }
8098 8079
8099 8080 if (plun->lun_state & FCP_LUN_INIT &&
8100 8081 !(plun->lun_state & FCP_LUN_CHANGED)) {
8101 8082 continue;
8102 8083 }
8103 8084
8104 8085 if (cause == FCP_CAUSE_USER_CREATE) {
8105 8086 continue;
8106 8087 }
8107 8088
8108 8089 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8109 8090 fcp_trace, FCP_BUF_LEVEL_6, 0,
8110 8091 "create_luns: passing ONLINE elem to HP thread");
8111 8092
8112 8093 /*
8113 8094 * If lun has changed, prepare for offlining the old path.
8114 8095 * Do not offline the old path right now, since it may be
8115 8096 * still opened.
8116 8097 */
8117 8098 if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
8118 8099 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8119 8100 }
8120 8101
8121 8102 /* pass an ONLINE element to the hotplug thread */
8122 8103 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8123 8104 link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
8124 8105
8125 8106 /*
8126 8107 * We can not synchronous attach (i.e pass
8127 8108 * NDI_ONLINE_ATTACH) here as we might be
8128 8109 * coming from an interrupt or callback
8129 8110 * thread.
8130 8111 */
8131 8112 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8132 8113 link_cnt, tgt_cnt, 0, 0)) {
8133 8114 fcp_log(CE_CONT, pptr->port_dip,
8134 8115 "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
8135 8116 plun->lun_tgt->tgt_d_id, plun->lun_num);
8136 8117 }
8137 8118 }
8138 8119 }
8139 8120 }
8140 8121
8141 8122
8142 8123 /*
8143 8124 * function to online/offline devices
8144 8125 */
8145 8126 static int
8146 8127 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8147 8128 int online, int lcount, int tcount, int flags)
8148 8129 {
8149 8130 int rval = NDI_FAILURE;
8150 8131 int circ;
8151 8132 child_info_t *ccip;
8152 8133 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8153 8134 int is_mpxio = pptr->port_mpxio;
8154 8135 dev_info_t *cdip, *pdip;
8155 8136 char *devname;
8156 8137
8157 8138 if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8158 8139 /*
8159 8140 * When this event gets serviced, lun_cip and lun_mpxio
8160 8141 * has changed, so it should be invalidated now.
8161 8142 */
8162 8143 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8163 8144 FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8164 8145 "plun: %p, cip: %p, what:%d", plun, cip, online);
8165 8146 return (rval);
8166 8147 }
8167 8148
8168 8149 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8169 8150 fcp_trace, FCP_BUF_LEVEL_2, 0,
8170 8151 "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8171 8152 "flags=%x mpxio=%x\n",
8172 8153 plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8173 8154 plun->lun_mpxio);
8174 8155
8175 8156 /*
8176 8157 * lun_mpxio needs checking here because we can end up in a race
8177 8158 * condition where this task has been dispatched while lun_mpxio is
8178 8159 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8179 8160 * enable MPXIO for the LUN, but was unable to, and hence cleared
8180 8161 * the flag. We rely on the serialization of the tasks here. We return
8181 8162 * NDI_SUCCESS so any callers continue without reporting spurious
8182 8163 * errors, and the still think we're an MPXIO LUN.
8183 8164 */
8184 8165
8185 8166 if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8186 8167 online == FCP_MPXIO_PATH_SET_BUSY) {
8187 8168 if (plun->lun_mpxio) {
8188 8169 rval = fcp_update_mpxio_path(plun, cip, online);
8189 8170 } else {
8190 8171 rval = NDI_SUCCESS;
8191 8172 }
8192 8173 return (rval);
8193 8174 }
8194 8175
8195 8176 /*
8196 8177 * Explicit devfs_clean() due to ndi_devi_offline() not
8197 8178 * executing devfs_clean() if parent lock is held.
8198 8179 */
8199 8180 ASSERT(!servicing_interrupt());
8200 8181 if (online == FCP_OFFLINE) {
8201 8182 if (plun->lun_mpxio == 0) {
8202 8183 if (plun->lun_cip == cip) {
8203 8184 cdip = DIP(plun->lun_cip);
8204 8185 } else {
8205 8186 cdip = DIP(cip);
8206 8187 }
8207 8188 } else if ((plun->lun_cip == cip) && plun->lun_cip) {
8208 8189 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8209 8190 } else if ((plun->lun_cip != cip) && cip) {
8210 8191 /*
8211 8192 * This means a DTYPE/GUID change, we shall get the
8212 8193 * dip of the old cip instead of the current lun_cip.
8213 8194 */
8214 8195 cdip = mdi_pi_get_client(PIP(cip));
8215 8196 }
8216 8197 if (cdip) {
8217 8198 if (i_ddi_devi_attached(cdip)) {
8218 8199 pdip = ddi_get_parent(cdip);
8219 8200 devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
8220 8201 ndi_devi_enter(pdip, &circ);
8221 8202 (void) ddi_deviname(cdip, devname);
8222 8203 /*
8223 8204 * Release parent lock before calling
8224 8205 * devfs_clean().
8225 8206 */
8226 8207 ndi_devi_exit(pdip, circ);
8227 8208 (void) devfs_clean(pdip, devname + 1,
8228 8209 DV_CLEAN_FORCE);
8229 8210 kmem_free(devname, MAXNAMELEN + 1);
8230 8211 }
8231 8212 }
8232 8213 }
8233 8214
8234 8215 if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8235 8216 return (NDI_FAILURE);
8236 8217 }
8237 8218
8238 8219 if (is_mpxio) {
8239 8220 mdi_devi_enter(pptr->port_dip, &circ);
8240 8221 } else {
8241 8222 ndi_devi_enter(pptr->port_dip, &circ);
8242 8223 }
8243 8224
8244 8225 mutex_enter(&pptr->port_mutex);
8245 8226 mutex_enter(&plun->lun_mutex);
8246 8227
8247 8228 if (online == FCP_ONLINE) {
8248 8229 ccip = fcp_get_cip(plun, cip, lcount, tcount);
8249 8230 if (ccip == NULL) {
8250 8231 goto fail;
8251 8232 }
8252 8233 } else {
8253 8234 if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8254 8235 goto fail;
8255 8236 }
8256 8237 ccip = cip;
8257 8238 }
8258 8239
8259 8240 if (online == FCP_ONLINE) {
8260 8241 rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8261 8242 &circ);
8262 8243 fc_ulp_log_device_event(pptr->port_fp_handle,
8263 8244 FC_ULP_DEVICE_ONLINE);
8264 8245 } else {
8265 8246 rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8266 8247 &circ);
8267 8248 fc_ulp_log_device_event(pptr->port_fp_handle,
8268 8249 FC_ULP_DEVICE_OFFLINE);
8269 8250 }
8270 8251
8271 8252 fail: mutex_exit(&plun->lun_mutex);
8272 8253 mutex_exit(&pptr->port_mutex);
8273 8254
8274 8255 if (is_mpxio) {
8275 8256 mdi_devi_exit(pptr->port_dip, circ);
8276 8257 } else {
8277 8258 ndi_devi_exit(pptr->port_dip, circ);
8278 8259 }
8279 8260
8280 8261 fc_ulp_idle_port(pptr->port_fp_handle);
8281 8262
8282 8263 return (rval);
8283 8264 }
8284 8265
8285 8266
8286 8267 /*
8287 8268 * take a target offline by taking all of its LUNs offline
8288 8269 */
8289 8270 /*ARGSUSED*/
8290 8271 static int
8291 8272 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8292 8273 int link_cnt, int tgt_cnt, int nowait, int flags)
8293 8274 {
8294 8275 struct fcp_tgt_elem *elem;
8295 8276
8296 8277 ASSERT(mutex_owned(&pptr->port_mutex));
8297 8278 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8298 8279
8299 8280 ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8300 8281
8301 8282 if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8302 8283 ptgt->tgt_change_cnt)) {
8303 8284 mutex_exit(&ptgt->tgt_mutex);
8304 8285 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8305 8286 mutex_enter(&ptgt->tgt_mutex);
8306 8287
8307 8288 return (0);
8308 8289 }
8309 8290
8310 8291 ptgt->tgt_pd_handle = NULL;
8311 8292 mutex_exit(&ptgt->tgt_mutex);
8312 8293 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8313 8294 mutex_enter(&ptgt->tgt_mutex);
8314 8295
8315 8296 tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8316 8297
8317 8298 if (ptgt->tgt_tcap &&
8318 8299 (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8319 8300 elem->flags = flags;
8320 8301 elem->time = fcp_watchdog_time;
8321 8302 if (nowait == 0) {
8322 8303 elem->time += fcp_offline_delay;
8323 8304 }
8324 8305 elem->ptgt = ptgt;
8325 8306 elem->link_cnt = link_cnt;
8326 8307 elem->tgt_cnt = tgt_cnt;
8327 8308 elem->next = pptr->port_offline_tgts;
8328 8309 pptr->port_offline_tgts = elem;
8329 8310 } else {
8330 8311 fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8331 8312 }
8332 8313
8333 8314 return (1);
8334 8315 }
8335 8316
8336 8317
8337 8318 static void
8338 8319 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8339 8320 int link_cnt, int tgt_cnt, int flags)
8340 8321 {
8341 8322 ASSERT(mutex_owned(&pptr->port_mutex));
8342 8323 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8343 8324
8344 8325 fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8345 8326 ptgt->tgt_state = FCP_TGT_OFFLINE;
8346 8327 ptgt->tgt_pd_handle = NULL;
8347 8328 fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8348 8329 }
8349 8330
8350 8331
8351 8332 static void
8352 8333 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8353 8334 int flags)
8354 8335 {
8355 8336 struct fcp_lun *plun;
8356 8337
8357 8338 ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8358 8339 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8359 8340
8360 8341 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8361 8342 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8362 8343 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8363 8344 }
8364 8345 }
8365 8346 }
8366 8347
8367 8348
8368 8349 /*
8369 8350 * take a LUN offline
8370 8351 *
8371 8352 * enters and leaves with the target mutex held, releasing it in the process
8372 8353 *
8373 8354 * allocates memory in non-sleep mode
8374 8355 */
8375 8356 static void
8376 8357 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8377 8358 int nowait, int flags)
8378 8359 {
8379 8360 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8380 8361 struct fcp_lun_elem *elem;
8381 8362
8382 8363 ASSERT(plun != NULL);
8383 8364 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8384 8365
8385 8366 if (nowait) {
8386 8367 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8387 8368 return;
8388 8369 }
8389 8370
8390 8371 if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8391 8372 elem->flags = flags;
8392 8373 elem->time = fcp_watchdog_time;
8393 8374 if (nowait == 0) {
8394 8375 elem->time += fcp_offline_delay;
8395 8376 }
8396 8377 elem->plun = plun;
8397 8378 elem->link_cnt = link_cnt;
8398 8379 elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8399 8380 elem->next = pptr->port_offline_luns;
8400 8381 pptr->port_offline_luns = elem;
8401 8382 } else {
8402 8383 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8403 8384 }
8404 8385 }
8405 8386
8406 8387
8407 8388 static void
8408 8389 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8409 8390 {
8410 8391 struct fcp_pkt *head = NULL;
8411 8392
8412 8393 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8413 8394
8414 8395 mutex_exit(&LUN_TGT->tgt_mutex);
8415 8396
8416 8397 head = fcp_scan_commands(plun);
8417 8398 if (head != NULL) {
8418 8399 fcp_abort_commands(head, LUN_PORT);
8419 8400 }
8420 8401
8421 8402 mutex_enter(&LUN_TGT->tgt_mutex);
8422 8403
8423 8404 if (plun->lun_cip && plun->lun_mpxio) {
8424 8405 /*
8425 8406 * Intimate MPxIO lun busy is cleared
8426 8407 */
8427 8408 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8428 8409 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8429 8410 0, 0)) {
8430 8411 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8431 8412 "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8432 8413 LUN_TGT->tgt_d_id, plun->lun_num);
8433 8414 }
8434 8415 /*
8435 8416 * Intimate MPxIO that the lun is now marked for offline
8436 8417 */
8437 8418 mutex_exit(&LUN_TGT->tgt_mutex);
8438 8419 (void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8439 8420 mutex_enter(&LUN_TGT->tgt_mutex);
8440 8421 }
8441 8422 }
8442 8423
8443 8424 static void
8444 8425 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8445 8426 int flags)
8446 8427 {
8447 8428 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8448 8429
8449 8430 mutex_exit(&LUN_TGT->tgt_mutex);
8450 8431 fcp_update_offline_flags(plun);
8451 8432 mutex_enter(&LUN_TGT->tgt_mutex);
8452 8433
8453 8434 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8454 8435
8455 8436 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8456 8437 fcp_trace, FCP_BUF_LEVEL_4, 0,
8457 8438 "offline_lun: passing OFFLINE elem to HP thread");
8458 8439
8459 8440 if (plun->lun_cip) {
8460 8441 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8461 8442 "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8462 8443 plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8463 8444 LUN_TGT->tgt_trace);
8464 8445
8465 8446 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8466 8447 link_cnt, tgt_cnt, flags, 0)) {
8467 8448 fcp_log(CE_CONT, LUN_PORT->port_dip,
8468 8449 "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8469 8450 LUN_TGT->tgt_d_id, plun->lun_num);
8470 8451 }
8471 8452 }
8472 8453 }
8473 8454
8474 8455 static void
8475 8456 fcp_scan_offline_luns(struct fcp_port *pptr)
8476 8457 {
8477 8458 struct fcp_lun_elem *elem;
8478 8459 struct fcp_lun_elem *prev;
8479 8460 struct fcp_lun_elem *next;
8480 8461
8481 8462 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8482 8463
8483 8464 prev = NULL;
8484 8465 elem = pptr->port_offline_luns;
8485 8466 while (elem) {
8486 8467 next = elem->next;
8487 8468 if (elem->time <= fcp_watchdog_time) {
8488 8469 int changed = 1;
8489 8470 struct fcp_tgt *ptgt = elem->plun->lun_tgt;
8490 8471
8491 8472 mutex_enter(&ptgt->tgt_mutex);
8492 8473 if (pptr->port_link_cnt == elem->link_cnt &&
8493 8474 ptgt->tgt_change_cnt == elem->tgt_cnt) {
8494 8475 changed = 0;
8495 8476 }
8496 8477
8497 8478 if (!changed &&
8498 8479 !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8499 8480 fcp_offline_lun_now(elem->plun,
8500 8481 elem->link_cnt, elem->tgt_cnt, elem->flags);
8501 8482 }
8502 8483 mutex_exit(&ptgt->tgt_mutex);
8503 8484
8504 8485 kmem_free(elem, sizeof (*elem));
8505 8486
8506 8487 if (prev) {
8507 8488 prev->next = next;
8508 8489 } else {
8509 8490 pptr->port_offline_luns = next;
8510 8491 }
8511 8492 } else {
8512 8493 prev = elem;
8513 8494 }
8514 8495 elem = next;
8515 8496 }
8516 8497 }
8517 8498
8518 8499
8519 8500 static void
8520 8501 fcp_scan_offline_tgts(struct fcp_port *pptr)
8521 8502 {
8522 8503 struct fcp_tgt_elem *elem;
8523 8504 struct fcp_tgt_elem *prev;
8524 8505 struct fcp_tgt_elem *next;
8525 8506
8526 8507 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8527 8508
8528 8509 prev = NULL;
8529 8510 elem = pptr->port_offline_tgts;
8530 8511 while (elem) {
8531 8512 next = elem->next;
8532 8513 if (elem->time <= fcp_watchdog_time) {
8533 8514 int outdated = 1;
8534 8515 struct fcp_tgt *ptgt = elem->ptgt;
8535 8516
8536 8517 mutex_enter(&ptgt->tgt_mutex);
8537 8518
8538 8519 if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8539 8520 /* No change on tgt since elem was created. */
8540 8521 outdated = 0;
8541 8522 } else if (ptgt->tgt_change_cnt == elem->tgt_cnt + 1 &&
8542 8523 pptr->port_link_cnt == elem->link_cnt + 1 &&
8543 8524 ptgt->tgt_statec_cause == FCP_CAUSE_LINK_DOWN) {
8544 8525 /*
8545 8526 * Exactly one thing happened to the target
8546 8527 * inbetween: the local port went offline.
8547 8528 * For fp the remote port is already gone so
8548 8529 * it will not tell us again to offline the
8549 8530 * target. We must offline it now.
8550 8531 */
8551 8532 outdated = 0;
8552 8533 }
8553 8534
8554 8535 if (!outdated && !(ptgt->tgt_state &
8555 8536 FCP_TGT_OFFLINE)) {
8556 8537 fcp_offline_target_now(pptr,
8557 8538 ptgt, elem->link_cnt, elem->tgt_cnt,
8558 8539 elem->flags);
8559 8540 }
8560 8541
8561 8542 mutex_exit(&ptgt->tgt_mutex);
8562 8543
8563 8544 kmem_free(elem, sizeof (*elem));
8564 8545
8565 8546 if (prev) {
8566 8547 prev->next = next;
8567 8548 } else {
8568 8549 pptr->port_offline_tgts = next;
8569 8550 }
8570 8551 } else {
8571 8552 prev = elem;
8572 8553 }
8573 8554 elem = next;
8574 8555 }
8575 8556 }
8576 8557
8577 8558
8578 8559 static void
8579 8560 fcp_update_offline_flags(struct fcp_lun *plun)
8580 8561 {
8581 8562 struct fcp_port *pptr = LUN_PORT;
8582 8563 ASSERT(plun != NULL);
8583 8564
8584 8565 mutex_enter(&LUN_TGT->tgt_mutex);
8585 8566 plun->lun_state |= FCP_LUN_OFFLINE;
8586 8567 plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8587 8568
8588 8569 mutex_enter(&plun->lun_mutex);
8589 8570 if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8590 8571 dev_info_t *cdip = NULL;
8591 8572
8592 8573 mutex_exit(&LUN_TGT->tgt_mutex);
8593 8574
8594 8575 if (plun->lun_mpxio == 0) {
8595 8576 cdip = DIP(plun->lun_cip);
8596 8577 } else if (plun->lun_cip) {
8597 8578 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8598 8579 }
8599 8580
8600 8581 mutex_exit(&plun->lun_mutex);
8601 8582 if (cdip) {
8602 8583 (void) ndi_event_retrieve_cookie(
8603 8584 pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8604 8585 &fcp_remove_eid, NDI_EVENT_NOPASS);
8605 8586 (void) ndi_event_run_callbacks(
8606 8587 pptr->port_ndi_event_hdl, cdip,
8607 8588 fcp_remove_eid, NULL);
8608 8589 }
8609 8590 } else {
8610 8591 mutex_exit(&plun->lun_mutex);
8611 8592 mutex_exit(&LUN_TGT->tgt_mutex);
8612 8593 }
8613 8594 }
8614 8595
8615 8596
8616 8597 /*
8617 8598 * Scan all of the command pkts for this port, moving pkts that
8618 8599 * match our LUN onto our own list (headed by "head")
8619 8600 */
8620 8601 static struct fcp_pkt *
8621 8602 fcp_scan_commands(struct fcp_lun *plun)
8622 8603 {
8623 8604 struct fcp_port *pptr = LUN_PORT;
8624 8605
8625 8606 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8626 8607 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8627 8608 struct fcp_pkt *pcmd = NULL; /* the previous command */
8628 8609
8629 8610 struct fcp_pkt *head = NULL; /* head of our list */
8630 8611 struct fcp_pkt *tail = NULL; /* tail of our list */
8631 8612
8632 8613 int cmds_found = 0;
8633 8614
8634 8615 mutex_enter(&pptr->port_pkt_mutex);
8635 8616 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8636 8617 struct fcp_lun *tlun =
8637 8618 ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8638 8619
8639 8620 ncmd = cmd->cmd_next; /* set next command */
8640 8621
8641 8622 /*
8642 8623 * if this pkt is for a different LUN or the
8643 8624 * command is sent down, skip it.
8644 8625 */
8645 8626 if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8646 8627 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8647 8628 pcmd = cmd;
8648 8629 continue;
8649 8630 }
8650 8631 cmds_found++;
8651 8632 if (pcmd != NULL) {
8652 8633 ASSERT(pptr->port_pkt_head != cmd);
8653 8634 pcmd->cmd_next = cmd->cmd_next;
8654 8635 } else {
8655 8636 ASSERT(cmd == pptr->port_pkt_head);
8656 8637 pptr->port_pkt_head = cmd->cmd_next;
8657 8638 }
8658 8639
8659 8640 if (cmd == pptr->port_pkt_tail) {
8660 8641 pptr->port_pkt_tail = pcmd;
8661 8642 if (pcmd) {
8662 8643 pcmd->cmd_next = NULL;
8663 8644 }
8664 8645 }
8665 8646
8666 8647 if (head == NULL) {
8667 8648 head = tail = cmd;
8668 8649 } else {
8669 8650 ASSERT(tail != NULL);
8670 8651
8671 8652 tail->cmd_next = cmd;
8672 8653 tail = cmd;
8673 8654 }
8674 8655 cmd->cmd_next = NULL;
8675 8656 }
8676 8657 mutex_exit(&pptr->port_pkt_mutex);
8677 8658
8678 8659 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8679 8660 fcp_trace, FCP_BUF_LEVEL_8, 0,
8680 8661 "scan commands: %d cmd(s) found", cmds_found);
8681 8662
8682 8663 return (head);
8683 8664 }
8684 8665
8685 8666
8686 8667 /*
8687 8668 * Abort all the commands in the command queue
8688 8669 */
8689 8670 static void
8690 8671 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8691 8672 {
8692 8673 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8693 8674 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8694 8675
8695 8676 ASSERT(mutex_owned(&pptr->port_mutex));
8696 8677
8697 8678 /* scan through the pkts and invalid them */
8698 8679 for (cmd = head; cmd != NULL; cmd = ncmd) {
8699 8680 struct scsi_pkt *pkt = cmd->cmd_pkt;
8700 8681
8701 8682 ncmd = cmd->cmd_next;
8702 8683 ASSERT(pkt != NULL);
8703 8684
8704 8685 /*
8705 8686 * The lun is going to be marked offline. Indicate
8706 8687 * the target driver not to requeue or retry this command
8707 8688 * as the device is going to be offlined pretty soon.
8708 8689 */
8709 8690 pkt->pkt_reason = CMD_DEV_GONE;
8710 8691 pkt->pkt_statistics = 0;
8711 8692 pkt->pkt_state = 0;
8712 8693
8713 8694 /* reset cmd flags/state */
8714 8695 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8715 8696 cmd->cmd_state = FCP_PKT_IDLE;
8716 8697
8717 8698 /*
8718 8699 * ensure we have a packet completion routine,
8719 8700 * then call it.
8720 8701 */
8721 8702 ASSERT(pkt->pkt_comp != NULL);
8722 8703
8723 8704 mutex_exit(&pptr->port_mutex);
8724 8705 fcp_post_callback(cmd);
8725 8706 mutex_enter(&pptr->port_mutex);
8726 8707 }
8727 8708 }
8728 8709
8729 8710
8730 8711 /*
8731 8712 * the pkt_comp callback for command packets
8732 8713 */
8733 8714 static void
8734 8715 fcp_cmd_callback(fc_packet_t *fpkt)
8735 8716 {
8736 8717 struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8737 8718 struct scsi_pkt *pkt = cmd->cmd_pkt;
8738 8719 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8739 8720
8740 8721 ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8741 8722
8742 8723 if (cmd->cmd_state == FCP_PKT_IDLE) {
8743 8724 cmn_err(CE_PANIC, "Packet already completed %p",
8744 8725 (void *)cmd);
8745 8726 }
8746 8727
8747 8728 /*
8748 8729 * Watch thread should be freeing the packet, ignore the pkt.
8749 8730 */
8750 8731 if (cmd->cmd_state == FCP_PKT_ABORTING) {
8751 8732 fcp_log(CE_CONT, pptr->port_dip,
8752 8733 "!FCP: Pkt completed while aborting\n");
8753 8734 return;
8754 8735 }
8755 8736 cmd->cmd_state = FCP_PKT_IDLE;
8756 8737
8757 8738 fcp_complete_pkt(fpkt);
8758 8739
8759 8740 #ifdef DEBUG
8760 8741 mutex_enter(&pptr->port_pkt_mutex);
8761 8742 pptr->port_npkts--;
8762 8743 mutex_exit(&pptr->port_pkt_mutex);
8763 8744 #endif /* DEBUG */
8764 8745
8765 8746 fcp_post_callback(cmd);
8766 8747 }
8767 8748
8768 8749
8769 8750 static void
8770 8751 fcp_complete_pkt(fc_packet_t *fpkt)
8771 8752 {
8772 8753 int error = 0;
8773 8754 struct fcp_pkt *cmd = (struct fcp_pkt *)
8774 8755 fpkt->pkt_ulp_private;
8775 8756 struct scsi_pkt *pkt = cmd->cmd_pkt;
8776 8757 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8777 8758 struct fcp_lun *plun;
8778 8759 struct fcp_tgt *ptgt;
8779 8760 struct fcp_rsp *rsp;
8780 8761 struct scsi_address save;
8781 8762
8782 8763 #ifdef DEBUG
8783 8764 save = pkt->pkt_address;
8784 8765 #endif /* DEBUG */
8785 8766
8786 8767 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8787 8768
8788 8769 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8789 8770 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8790 8771 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8791 8772 sizeof (struct fcp_rsp));
8792 8773 }
8793 8774
8794 8775 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8795 8776 STATE_SENT_CMD | STATE_GOT_STATUS;
8796 8777
8797 8778 pkt->pkt_resid = 0;
8798 8779
8799 8780 if (fpkt->pkt_datalen) {
8800 8781 pkt->pkt_state |= STATE_XFERRED_DATA;
8801 8782 if (fpkt->pkt_data_resid) {
8802 8783 error++;
8803 8784 }
8804 8785 }
8805 8786
8806 8787 if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8807 8788 rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8808 8789 /*
8809 8790 * The next two checks make sure that if there
8810 8791 * is no sense data or a valid response and
8811 8792 * the command came back with check condition,
8812 8793 * the command should be retried.
8813 8794 */
8814 8795 if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8815 8796 !rsp->fcp_u.fcp_status.sense_len_set) {
8816 8797 pkt->pkt_state &= ~STATE_XFERRED_DATA;
8817 8798 pkt->pkt_resid = cmd->cmd_dmacount;
8818 8799 }
8819 8800 }
8820 8801
8821 8802 if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8822 8803 return;
8823 8804 }
8824 8805
8825 8806 plun = ADDR2LUN(&pkt->pkt_address);
8826 8807 ptgt = plun->lun_tgt;
8827 8808 ASSERT(ptgt != NULL);
8828 8809
8829 8810 /*
8830 8811 * Update the transfer resid, if appropriate
8831 8812 */
8832 8813 if (rsp->fcp_u.fcp_status.resid_over ||
8833 8814 rsp->fcp_u.fcp_status.resid_under) {
8834 8815 pkt->pkt_resid = rsp->fcp_resid;
8835 8816 }
8836 8817
8837 8818 /*
8838 8819 * First see if we got a FCP protocol error.
8839 8820 */
8840 8821 if (rsp->fcp_u.fcp_status.rsp_len_set) {
8841 8822 struct fcp_rsp_info *bep;
8842 8823 bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8843 8824 sizeof (struct fcp_rsp));
8844 8825
8845 8826 if (fcp_validate_fcp_response(rsp, pptr) !=
8846 8827 FC_SUCCESS) {
8847 8828 pkt->pkt_reason = CMD_CMPLT;
8848 8829 *(pkt->pkt_scbp) = STATUS_CHECK;
8849 8830
8850 8831 fcp_log(CE_WARN, pptr->port_dip,
8851 8832 "!SCSI command to d_id=0x%x lun=0x%x"
8852 8833 " failed, Bad FCP response values:"
8853 8834 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8854 8835 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8855 8836 ptgt->tgt_d_id, plun->lun_num,
8856 8837 rsp->reserved_0, rsp->reserved_1,
8857 8838 rsp->fcp_u.fcp_status.reserved_0,
8858 8839 rsp->fcp_u.fcp_status.reserved_1,
8859 8840 rsp->fcp_response_len, rsp->fcp_sense_len);
8860 8841
8861 8842 return;
8862 8843 }
8863 8844
8864 8845 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8865 8846 FCP_CP_IN(fpkt->pkt_resp +
8866 8847 sizeof (struct fcp_rsp), bep,
8867 8848 fpkt->pkt_resp_acc,
8868 8849 sizeof (struct fcp_rsp_info));
8869 8850 }
8870 8851
8871 8852 if (bep->rsp_code != FCP_NO_FAILURE) {
8872 8853 child_info_t *cip;
8873 8854
8874 8855 pkt->pkt_reason = CMD_TRAN_ERR;
8875 8856
8876 8857 mutex_enter(&plun->lun_mutex);
8877 8858 cip = plun->lun_cip;
8878 8859 mutex_exit(&plun->lun_mutex);
8879 8860
8880 8861 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8881 8862 fcp_trace, FCP_BUF_LEVEL_2, 0,
8882 8863 "FCP response error on cmd=%p"
8883 8864 " target=0x%x, cip=%p", cmd,
8884 8865 ptgt->tgt_d_id, cip);
8885 8866 }
8886 8867 }
8887 8868
8888 8869 /*
8889 8870 * See if we got a SCSI error with sense data
8890 8871 */
8891 8872 if (rsp->fcp_u.fcp_status.sense_len_set) {
8892 8873 uchar_t rqlen;
8893 8874 caddr_t sense_from;
8894 8875 child_info_t *cip;
8895 8876 timeout_id_t tid;
8896 8877 struct scsi_arq_status *arq;
8897 8878 struct scsi_extended_sense *sense_to;
8898 8879
8899 8880 arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8900 8881 sense_to = &arq->sts_sensedata;
8901 8882
8902 8883 rqlen = (uchar_t)min(rsp->fcp_sense_len,
8903 8884 sizeof (struct scsi_extended_sense));
8904 8885
8905 8886 sense_from = (caddr_t)fpkt->pkt_resp +
8906 8887 sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8907 8888
8908 8889 if (fcp_validate_fcp_response(rsp, pptr) !=
8909 8890 FC_SUCCESS) {
8910 8891 pkt->pkt_reason = CMD_CMPLT;
8911 8892 *(pkt->pkt_scbp) = STATUS_CHECK;
8912 8893
8913 8894 fcp_log(CE_WARN, pptr->port_dip,
8914 8895 "!SCSI command to d_id=0x%x lun=0x%x"
8915 8896 " failed, Bad FCP response values:"
8916 8897 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8917 8898 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8918 8899 ptgt->tgt_d_id, plun->lun_num,
8919 8900 rsp->reserved_0, rsp->reserved_1,
8920 8901 rsp->fcp_u.fcp_status.reserved_0,
8921 8902 rsp->fcp_u.fcp_status.reserved_1,
8922 8903 rsp->fcp_response_len, rsp->fcp_sense_len);
8923 8904
8924 8905 return;
8925 8906 }
8926 8907
8927 8908 /*
8928 8909 * copy in sense information
8929 8910 */
8930 8911 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8931 8912 FCP_CP_IN(sense_from, sense_to,
8932 8913 fpkt->pkt_resp_acc, rqlen);
8933 8914 } else {
8934 8915 bcopy(sense_from, sense_to, rqlen);
8935 8916 }
8936 8917
8937 8918 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8938 8919 (FCP_SENSE_NO_LUN(sense_to))) {
8939 8920 mutex_enter(&ptgt->tgt_mutex);
8940 8921 if (ptgt->tgt_tid == NULL) {
8941 8922 /*
8942 8923 * Kick off rediscovery
8943 8924 */
8944 8925 tid = timeout(fcp_reconfigure_luns,
8945 8926 (caddr_t)ptgt, drv_usectohz(1));
8946 8927
8947 8928 ptgt->tgt_tid = tid;
8948 8929 ptgt->tgt_state |= FCP_TGT_BUSY;
8949 8930 }
8950 8931 mutex_exit(&ptgt->tgt_mutex);
8951 8932 if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8952 8933 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8953 8934 fcp_trace, FCP_BUF_LEVEL_3, 0,
8954 8935 "!FCP: Report Lun Has Changed"
8955 8936 " target=%x", ptgt->tgt_d_id);
8956 8937 } else if (FCP_SENSE_NO_LUN(sense_to)) {
8957 8938 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8958 8939 fcp_trace, FCP_BUF_LEVEL_3, 0,
8959 8940 "!FCP: LU Not Supported"
8960 8941 " target=%x", ptgt->tgt_d_id);
8961 8942 }
8962 8943 }
8963 8944 ASSERT(pkt->pkt_scbp != NULL);
8964 8945
8965 8946 pkt->pkt_state |= STATE_ARQ_DONE;
8966 8947
8967 8948 arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8968 8949
8969 8950 *((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8970 8951 arq->sts_rqpkt_reason = 0;
8971 8952 arq->sts_rqpkt_statistics = 0;
8972 8953
8973 8954 arq->sts_rqpkt_state = STATE_GOT_BUS |
8974 8955 STATE_GOT_TARGET | STATE_SENT_CMD |
8975 8956 STATE_GOT_STATUS | STATE_ARQ_DONE |
8976 8957 STATE_XFERRED_DATA;
8977 8958
8978 8959 mutex_enter(&plun->lun_mutex);
8979 8960 cip = plun->lun_cip;
8980 8961 mutex_exit(&plun->lun_mutex);
8981 8962
8982 8963 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8983 8964 fcp_trace, FCP_BUF_LEVEL_8, 0,
8984 8965 "SCSI Check condition on cmd=%p target=0x%x"
8985 8966 " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
8986 8967 " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
8987 8968 cmd->cmd_fcp_cmd.fcp_cdb[0],
8988 8969 rsp->fcp_u.fcp_status.scsi_status,
8989 8970 sense_to->es_key, sense_to->es_add_code,
8990 8971 sense_to->es_qual_code);
8991 8972 }
8992 8973 } else {
8993 8974 plun = ADDR2LUN(&pkt->pkt_address);
8994 8975 ptgt = plun->lun_tgt;
8995 8976 ASSERT(ptgt != NULL);
8996 8977
8997 8978 /*
8998 8979 * Work harder to translate errors into target driver
8999 8980 * understandable ones. Note with despair that the target
9000 8981 * drivers don't decode pkt_state and pkt_reason exhaustively
9001 8982 * They resort to using the big hammer most often, which
9002 8983 * may not get fixed in the life time of this driver.
9003 8984 */
9004 8985 pkt->pkt_state = 0;
9005 8986 pkt->pkt_statistics = 0;
9006 8987
9007 8988 switch (fpkt->pkt_state) {
9008 8989 case FC_PKT_TRAN_ERROR:
9009 8990 switch (fpkt->pkt_reason) {
9010 8991 case FC_REASON_OVERRUN:
9011 8992 pkt->pkt_reason = CMD_CMD_OVR;
9012 8993 pkt->pkt_statistics |= STAT_ABORTED;
9013 8994 break;
9014 8995
9015 8996 case FC_REASON_XCHG_BSY: {
9016 8997 caddr_t ptr;
9017 8998
9018 8999 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9019 9000
9020 9001 ptr = (caddr_t)pkt->pkt_scbp;
9021 9002 if (ptr) {
9022 9003 *ptr = STATUS_BUSY;
9023 9004 }
9024 9005 break;
9025 9006 }
9026 9007
9027 9008 case FC_REASON_ABORTED:
9028 9009 pkt->pkt_reason = CMD_TRAN_ERR;
9029 9010 pkt->pkt_statistics |= STAT_ABORTED;
9030 9011 break;
9031 9012
9032 9013 case FC_REASON_ABORT_FAILED:
9033 9014 pkt->pkt_reason = CMD_ABORT_FAIL;
9034 9015 break;
9035 9016
9036 9017 case FC_REASON_NO_SEQ_INIT:
9037 9018 case FC_REASON_CRC_ERROR:
9038 9019 pkt->pkt_reason = CMD_TRAN_ERR;
9039 9020 pkt->pkt_statistics |= STAT_ABORTED;
9040 9021 break;
9041 9022 default:
9042 9023 pkt->pkt_reason = CMD_TRAN_ERR;
9043 9024 break;
9044 9025 }
9045 9026 break;
9046 9027
9047 9028 case FC_PKT_PORT_OFFLINE: {
9048 9029 dev_info_t *cdip = NULL;
9049 9030 caddr_t ptr;
9050 9031
9051 9032 if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
9052 9033 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9053 9034 fcp_trace, FCP_BUF_LEVEL_8, 0,
9054 9035 "SCSI cmd; LOGIN REQUIRED from FCA for %x",
9055 9036 ptgt->tgt_d_id);
9056 9037 }
9057 9038
9058 9039 mutex_enter(&plun->lun_mutex);
9059 9040 if (plun->lun_mpxio == 0) {
9060 9041 cdip = DIP(plun->lun_cip);
9061 9042 } else if (plun->lun_cip) {
9062 9043 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
9063 9044 }
9064 9045
9065 9046 mutex_exit(&plun->lun_mutex);
9066 9047
9067 9048 if (cdip) {
9068 9049 (void) ndi_event_retrieve_cookie(
9069 9050 pptr->port_ndi_event_hdl, cdip,
9070 9051 FCAL_REMOVE_EVENT, &fcp_remove_eid,
9071 9052 NDI_EVENT_NOPASS);
9072 9053 (void) ndi_event_run_callbacks(
9073 9054 pptr->port_ndi_event_hdl, cdip,
9074 9055 fcp_remove_eid, NULL);
9075 9056 }
9076 9057
9077 9058 /*
9078 9059 * If the link goes off-line for a lip,
9079 9060 * this will cause a error to the ST SG
9080 9061 * SGEN drivers. By setting BUSY we will
9081 9062 * give the drivers the chance to retry
9082 9063 * before it blows of the job. ST will
9083 9064 * remember how many times it has retried.
9084 9065 */
9085 9066
9086 9067 if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
9087 9068 (plun->lun_type == DTYPE_CHANGER)) {
9088 9069 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9089 9070 ptr = (caddr_t)pkt->pkt_scbp;
9090 9071 if (ptr) {
9091 9072 *ptr = STATUS_BUSY;
9092 9073 }
9093 9074 } else {
9094 9075 pkt->pkt_reason = CMD_TRAN_ERR;
9095 9076 pkt->pkt_statistics |= STAT_BUS_RESET;
9096 9077 }
9097 9078 break;
9098 9079 }
9099 9080
9100 9081 case FC_PKT_TRAN_BSY:
9101 9082 /*
9102 9083 * Use the ssd Qfull handling here.
9103 9084 */
9104 9085 *pkt->pkt_scbp = STATUS_INTERMEDIATE;
9105 9086 pkt->pkt_state = STATE_GOT_BUS;
9106 9087 break;
9107 9088
9108 9089 case FC_PKT_TIMEOUT:
9109 9090 pkt->pkt_reason = CMD_TIMEOUT;
9110 9091 if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
9111 9092 pkt->pkt_statistics |= STAT_TIMEOUT;
9112 9093 } else {
9113 9094 pkt->pkt_statistics |= STAT_ABORTED;
9114 9095 }
9115 9096 break;
9116 9097
9117 9098 case FC_PKT_LOCAL_RJT:
9118 9099 switch (fpkt->pkt_reason) {
9119 9100 case FC_REASON_OFFLINE: {
9120 9101 dev_info_t *cdip = NULL;
9121 9102
9122 9103 mutex_enter(&plun->lun_mutex);
9123 9104 if (plun->lun_mpxio == 0) {
9124 9105 cdip = DIP(plun->lun_cip);
9125 9106 } else if (plun->lun_cip) {
9126 9107 cdip = mdi_pi_get_client(
9127 9108 PIP(plun->lun_cip));
9128 9109 }
9129 9110 mutex_exit(&plun->lun_mutex);
9130 9111
9131 9112 if (cdip) {
9132 9113 (void) ndi_event_retrieve_cookie(
9133 9114 pptr->port_ndi_event_hdl, cdip,
9134 9115 FCAL_REMOVE_EVENT,
9135 9116 &fcp_remove_eid,
9136 9117 NDI_EVENT_NOPASS);
9137 9118 (void) ndi_event_run_callbacks(
9138 9119 pptr->port_ndi_event_hdl,
9139 9120 cdip, fcp_remove_eid, NULL);
9140 9121 }
9141 9122
9142 9123 pkt->pkt_reason = CMD_TRAN_ERR;
9143 9124 pkt->pkt_statistics |= STAT_BUS_RESET;
9144 9125
9145 9126 break;
9146 9127 }
9147 9128
9148 9129 case FC_REASON_NOMEM:
9149 9130 case FC_REASON_QFULL: {
9150 9131 caddr_t ptr;
9151 9132
9152 9133 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9153 9134 ptr = (caddr_t)pkt->pkt_scbp;
9154 9135 if (ptr) {
9155 9136 *ptr = STATUS_BUSY;
9156 9137 }
9157 9138 break;
9158 9139 }
9159 9140
9160 9141 case FC_REASON_DMA_ERROR:
9161 9142 pkt->pkt_reason = CMD_DMA_DERR;
9162 9143 pkt->pkt_statistics |= STAT_ABORTED;
9163 9144 break;
9164 9145
9165 9146 case FC_REASON_CRC_ERROR:
9166 9147 case FC_REASON_UNDERRUN: {
9167 9148 uchar_t status;
9168 9149 /*
9169 9150 * Work around for Bugid: 4240945.
9170 9151 * IB on A5k doesn't set the Underrun bit
9171 9152 * in the fcp status, when it is transferring
9172 9153 * less than requested amount of data. Work
9173 9154 * around the ses problem to keep luxadm
9174 9155 * happy till ibfirmware is fixed.
9175 9156 */
9176 9157 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9177 9158 FCP_CP_IN(fpkt->pkt_resp, rsp,
9178 9159 fpkt->pkt_resp_acc,
9179 9160 sizeof (struct fcp_rsp));
9180 9161 }
9181 9162 status = rsp->fcp_u.fcp_status.scsi_status;
9182 9163 if (((plun->lun_type & DTYPE_MASK) ==
9183 9164 DTYPE_ESI) && (status == STATUS_GOOD)) {
9184 9165 pkt->pkt_reason = CMD_CMPLT;
9185 9166 *pkt->pkt_scbp = status;
9186 9167 pkt->pkt_resid = 0;
9187 9168 } else {
9188 9169 pkt->pkt_reason = CMD_TRAN_ERR;
9189 9170 pkt->pkt_statistics |= STAT_ABORTED;
9190 9171 }
9191 9172 break;
9192 9173 }
9193 9174
9194 9175 case FC_REASON_NO_CONNECTION:
9195 9176 case FC_REASON_UNSUPPORTED:
9196 9177 case FC_REASON_ILLEGAL_REQ:
9197 9178 case FC_REASON_BAD_SID:
9198 9179 case FC_REASON_DIAG_BUSY:
9199 9180 case FC_REASON_FCAL_OPN_FAIL:
9200 9181 case FC_REASON_BAD_XID:
9201 9182 default:
9202 9183 pkt->pkt_reason = CMD_TRAN_ERR;
9203 9184 pkt->pkt_statistics |= STAT_ABORTED;
9204 9185 break;
9205 9186
9206 9187 }
9207 9188 break;
9208 9189
9209 9190 case FC_PKT_NPORT_RJT:
9210 9191 case FC_PKT_FABRIC_RJT:
9211 9192 case FC_PKT_NPORT_BSY:
9212 9193 case FC_PKT_FABRIC_BSY:
9213 9194 default:
9214 9195 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9215 9196 fcp_trace, FCP_BUF_LEVEL_8, 0,
9216 9197 "FC Status 0x%x, reason 0x%x",
9217 9198 fpkt->pkt_state, fpkt->pkt_reason);
9218 9199 pkt->pkt_reason = CMD_TRAN_ERR;
9219 9200 pkt->pkt_statistics |= STAT_ABORTED;
9220 9201 break;
9221 9202 }
9222 9203
9223 9204 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9224 9205 fcp_trace, FCP_BUF_LEVEL_9, 0,
9225 9206 "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9226 9207 " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9227 9208 fpkt->pkt_reason);
9228 9209 }
9229 9210
9230 9211 ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9231 9212 }
9232 9213
9233 9214
9234 9215 static int
9235 9216 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9236 9217 {
9237 9218 if (rsp->reserved_0 || rsp->reserved_1 ||
9238 9219 rsp->fcp_u.fcp_status.reserved_0 ||
9239 9220 rsp->fcp_u.fcp_status.reserved_1) {
9240 9221 /*
9241 9222 * These reserved fields should ideally be zero. FCP-2 does say
9242 9223 * that the recipient need not check for reserved fields to be
9243 9224 * zero. If they are not zero, we will not make a fuss about it
9244 9225 * - just log it (in debug to both trace buffer and messages
9245 9226 * file and to trace buffer only in non-debug) and move on.
9246 9227 *
9247 9228 * Non-zero reserved fields were seen with minnows.
9248 9229 *
9249 9230 * qlc takes care of some of this but we cannot assume that all
9250 9231 * FCAs will do so.
9251 9232 */
9252 9233 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9253 9234 FCP_BUF_LEVEL_5, 0,
9254 9235 "Got fcp response packet with non-zero reserved fields "
9255 9236 "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9256 9237 "status.reserved_0:0x%x, status.reserved_1:0x%x",
9257 9238 rsp->reserved_0, rsp->reserved_1,
9258 9239 rsp->fcp_u.fcp_status.reserved_0,
9259 9240 rsp->fcp_u.fcp_status.reserved_1);
9260 9241 }
9261 9242
9262 9243 if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9263 9244 (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9264 9245 return (FC_FAILURE);
9265 9246 }
9266 9247
9267 9248 if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9268 9249 (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9269 9250 sizeof (struct fcp_rsp))) {
9270 9251 return (FC_FAILURE);
9271 9252 }
9272 9253
9273 9254 return (FC_SUCCESS);
9274 9255 }
9275 9256
9276 9257
9277 9258 /*
9278 9259 * This is called when there is a change the in device state. The case we're
9279 9260 * handling here is, if the d_id s does not match, offline this tgt and online
9280 9261 * a new tgt with the new d_id. called from fcp_handle_devices with
9281 9262 * port_mutex held.
9282 9263 */
9283 9264 static int
9284 9265 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9285 9266 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9286 9267 {
9287 9268 ASSERT(mutex_owned(&pptr->port_mutex));
9288 9269
9289 9270 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9290 9271 fcp_trace, FCP_BUF_LEVEL_3, 0,
9291 9272 "Starting fcp_device_changed...");
9292 9273
9293 9274 /*
9294 9275 * The two cases where the port_device_changed is called is
9295 9276 * either it changes it's d_id or it's hard address.
9296 9277 */
9297 9278 if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9298 9279 (FC_TOP_EXTERNAL(pptr->port_topology) &&
9299 9280 (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9300 9281
9301 9282 /* offline this target */
9302 9283 mutex_enter(&ptgt->tgt_mutex);
9303 9284 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9304 9285 (void) fcp_offline_target(pptr, ptgt, link_cnt,
9305 9286 0, 1, NDI_DEVI_REMOVE);
9306 9287 }
9307 9288 mutex_exit(&ptgt->tgt_mutex);
9308 9289
9309 9290 fcp_log(CE_NOTE, pptr->port_dip,
9310 9291 "Change in target properties: Old D_ID=%x New D_ID=%x"
9311 9292 " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9312 9293 map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9313 9294 map_entry->map_hard_addr.hard_addr);
9314 9295 }
9315 9296
9316 9297 return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9317 9298 link_cnt, tgt_cnt, cause));
9318 9299 }
9319 9300
9320 9301 /*
9321 9302 * Function: fcp_alloc_lun
9322 9303 *
9323 9304 * Description: Creates a new lun structure and adds it to the list
9324 9305 * of luns of the target.
9325 9306 *
9326 9307 * Argument: ptgt Target the lun will belong to.
9327 9308 *
9328 9309 * Return Value: NULL Failed
9329 9310 * Not NULL Succeeded
9330 9311 *
9331 9312 * Context: Kernel context
9332 9313 */
9333 9314 static struct fcp_lun *
9334 9315 fcp_alloc_lun(struct fcp_tgt *ptgt)
9335 9316 {
9336 9317 struct fcp_lun *plun;
9337 9318
9338 9319 plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9339 9320 if (plun != NULL) {
9340 9321 /*
9341 9322 * Initialize the mutex before putting in the target list
9342 9323 * especially before releasing the target mutex.
9343 9324 */
9344 9325 mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9345 9326 plun->lun_tgt = ptgt;
9346 9327
9347 9328 mutex_enter(&ptgt->tgt_mutex);
9348 9329 plun->lun_next = ptgt->tgt_lun;
9349 9330 ptgt->tgt_lun = plun;
9350 9331 plun->lun_old_guid = NULL;
9351 9332 plun->lun_old_guid_size = 0;
9352 9333 mutex_exit(&ptgt->tgt_mutex);
9353 9334 }
9354 9335
9355 9336 return (plun);
9356 9337 }
9357 9338
9358 9339 /*
9359 9340 * Function: fcp_dealloc_lun
9360 9341 *
9361 9342 * Description: Frees the LUN structure passed by the caller.
9362 9343 *
9363 9344 * Argument: plun LUN structure to free.
9364 9345 *
9365 9346 * Return Value: None
9366 9347 *
9367 9348 * Context: Kernel context.
9368 9349 */
9369 9350 static void
9370 9351 fcp_dealloc_lun(struct fcp_lun *plun)
9371 9352 {
9372 9353 mutex_enter(&plun->lun_mutex);
9373 9354 if (plun->lun_cip) {
9374 9355 fcp_remove_child(plun);
9375 9356 }
9376 9357 mutex_exit(&plun->lun_mutex);
9377 9358
9378 9359 mutex_destroy(&plun->lun_mutex);
9379 9360 if (plun->lun_guid) {
9380 9361 kmem_free(plun->lun_guid, plun->lun_guid_size);
9381 9362 }
9382 9363 if (plun->lun_old_guid) {
9383 9364 kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9384 9365 }
9385 9366 kmem_free(plun, sizeof (*plun));
9386 9367 }
9387 9368
9388 9369 /*
9389 9370 * Function: fcp_alloc_tgt
9390 9371 *
9391 9372 * Description: Creates a new target structure and adds it to the port
9392 9373 * hash list.
9393 9374 *
9394 9375 * Argument: pptr fcp port structure
9395 9376 * *map_entry entry describing the target to create
9396 9377 * link_cnt Link state change counter
9397 9378 *
9398 9379 * Return Value: NULL Failed
9399 9380 * Not NULL Succeeded
9400 9381 *
9401 9382 * Context: Kernel context.
9402 9383 */
9403 9384 static struct fcp_tgt *
9404 9385 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9405 9386 {
9406 9387 int hash;
9407 9388 uchar_t *wwn;
9408 9389 struct fcp_tgt *ptgt;
9409 9390
9410 9391 ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9411 9392 if (ptgt != NULL) {
9412 9393 mutex_enter(&pptr->port_mutex);
9413 9394 if (link_cnt != pptr->port_link_cnt) {
9414 9395 /*
9415 9396 * oh oh -- another link reset
9416 9397 * in progress -- give up
9417 9398 */
9418 9399 mutex_exit(&pptr->port_mutex);
9419 9400 kmem_free(ptgt, sizeof (*ptgt));
9420 9401 ptgt = NULL;
9421 9402 } else {
9422 9403 /*
9423 9404 * initialize the mutex before putting in the port
9424 9405 * wwn list, especially before releasing the port
9425 9406 * mutex.
9426 9407 */
9427 9408 mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9428 9409
9429 9410 /* add new target entry to the port's hash list */
9430 9411 wwn = (uchar_t *)&map_entry->map_pwwn;
9431 9412 hash = FCP_HASH(wwn);
9432 9413
9433 9414 ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9434 9415 pptr->port_tgt_hash_table[hash] = ptgt;
9435 9416
9436 9417 /* save cross-ptr */
9437 9418 ptgt->tgt_port = pptr;
9438 9419
9439 9420 ptgt->tgt_change_cnt = 1;
9440 9421
9441 9422 /* initialize the target manual_config_only flag */
9442 9423 if (fcp_enable_auto_configuration) {
9443 9424 ptgt->tgt_manual_config_only = 0;
9444 9425 } else {
9445 9426 ptgt->tgt_manual_config_only = 1;
9446 9427 }
9447 9428
9448 9429 mutex_exit(&pptr->port_mutex);
9449 9430 }
9450 9431 }
9451 9432
9452 9433 return (ptgt);
9453 9434 }
9454 9435
9455 9436 /*
9456 9437 * Function: fcp_dealloc_tgt
9457 9438 *
9458 9439 * Description: Frees the target structure passed by the caller.
9459 9440 *
9460 9441 * Argument: ptgt Target structure to free.
9461 9442 *
9462 9443 * Return Value: None
9463 9444 *
9464 9445 * Context: Kernel context.
9465 9446 */
9466 9447 static void
9467 9448 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9468 9449 {
9469 9450 mutex_destroy(&ptgt->tgt_mutex);
9470 9451 kmem_free(ptgt, sizeof (*ptgt));
9471 9452 }
9472 9453
9473 9454
9474 9455 /*
9475 9456 * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9476 9457 *
9477 9458 * Device discovery commands will not be retried for-ever as
9478 9459 * this will have repercussions on other devices that need to
9479 9460 * be submitted to the hotplug thread. After a quick glance
9480 9461 * at the SCSI-3 spec, it was found that the spec doesn't
9481 9462 * mandate a forever retry, rather recommends a delayed retry.
9482 9463 *
9483 9464 * Since Photon IB is single threaded, STATUS_BUSY is common
9484 9465 * in a 4+initiator environment. Make sure the total time
9485 9466 * spent on retries (including command timeout) does not
9486 9467 * 60 seconds
9487 9468 */
9488 9469 static void
9489 9470 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9490 9471 {
9491 9472 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9492 9473 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9493 9474
9494 9475 mutex_enter(&pptr->port_mutex);
9495 9476 mutex_enter(&ptgt->tgt_mutex);
9496 9477 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9497 9478 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9498 9479 fcp_trace, FCP_BUF_LEVEL_2, 0,
9499 9480 "fcp_queue_ipkt,1:state change occured"
9500 9481 " for D_ID=0x%x", ptgt->tgt_d_id);
9501 9482 mutex_exit(&ptgt->tgt_mutex);
9502 9483 mutex_exit(&pptr->port_mutex);
9503 9484 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9504 9485 icmd->ipkt_change_cnt, icmd->ipkt_cause);
9505 9486 fcp_icmd_free(pptr, icmd);
9506 9487 return;
9507 9488 }
9508 9489 mutex_exit(&ptgt->tgt_mutex);
9509 9490
9510 9491 icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9511 9492
9512 9493 if (pptr->port_ipkt_list != NULL) {
9513 9494 /* add pkt to front of doubly-linked list */
9514 9495 pptr->port_ipkt_list->ipkt_prev = icmd;
9515 9496 icmd->ipkt_next = pptr->port_ipkt_list;
9516 9497 pptr->port_ipkt_list = icmd;
9517 9498 icmd->ipkt_prev = NULL;
9518 9499 } else {
9519 9500 /* this is the first/only pkt on the list */
9520 9501 pptr->port_ipkt_list = icmd;
9521 9502 icmd->ipkt_next = NULL;
9522 9503 icmd->ipkt_prev = NULL;
9523 9504 }
9524 9505 mutex_exit(&pptr->port_mutex);
9525 9506 }
9526 9507
9527 9508 /*
9528 9509 * Function: fcp_transport
9529 9510 *
9530 9511 * Description: This function submits the Fibre Channel packet to the transort
9531 9512 * layer by calling fc_ulp_transport(). If fc_ulp_transport()
9532 9513 * fails the submission, the treatment depends on the value of
9533 9514 * the variable internal.
9534 9515 *
9535 9516 * Argument: port_handle fp/fctl port handle.
9536 9517 * *fpkt Packet to submit to the transport layer.
9537 9518 * internal Not zero when it's an internal packet.
9538 9519 *
9539 9520 * Return Value: FC_TRAN_BUSY
9540 9521 * FC_STATEC_BUSY
9541 9522 * FC_OFFLINE
9542 9523 * FC_LOGINREQ
9543 9524 * FC_DEVICE_BUSY
9544 9525 * FC_SUCCESS
9545 9526 */
9546 9527 static int
9547 9528 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9548 9529 {
9549 9530 int rval;
9550 9531
9551 9532 rval = fc_ulp_transport(port_handle, fpkt);
9552 9533 if (rval == FC_SUCCESS) {
9553 9534 return (rval);
9554 9535 }
9555 9536
9556 9537 /*
9557 9538 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9558 9539 * a command, if the underlying modules see that there is a state
9559 9540 * change, or if a port is OFFLINE, that means, that state change
9560 9541 * hasn't reached FCP yet, so re-queue the command for deferred
9561 9542 * submission.
9562 9543 */
9563 9544 if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9564 9545 (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9565 9546 (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9566 9547 /*
9567 9548 * Defer packet re-submission. Life hang is possible on
9568 9549 * internal commands if the port driver sends FC_STATEC_BUSY
9569 9550 * for ever, but that shouldn't happen in a good environment.
9570 9551 * Limiting re-transport for internal commands is probably a
9571 9552 * good idea..
9572 9553 * A race condition can happen when a port sees barrage of
9573 9554 * link transitions offline to online. If the FCTL has
9574 9555 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9575 9556 * internal commands should be queued to do the discovery.
9576 9557 * The race condition is when an online comes and FCP starts
9577 9558 * its internal discovery and the link goes offline. It is
9578 9559 * possible that the statec_callback has not reached FCP
9579 9560 * and FCP is carrying on with its internal discovery.
9580 9561 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9581 9562 * that the link has gone offline. At this point FCP should
9582 9563 * drop all the internal commands and wait for the
9583 9564 * statec_callback. It will be facilitated by incrementing
9584 9565 * port_link_cnt.
9585 9566 *
9586 9567 * For external commands, the (FC)pkt_timeout is decremented
9587 9568 * by the QUEUE Delay added by our driver, Care is taken to
9588 9569 * ensure that it doesn't become zero (zero means no timeout)
9589 9570 * If the time expires right inside driver queue itself,
9590 9571 * the watch thread will return it to the original caller
9591 9572 * indicating that the command has timed-out.
9592 9573 */
9593 9574 if (internal) {
9594 9575 char *op;
9595 9576 struct fcp_ipkt *icmd;
9596 9577
9597 9578 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9598 9579 switch (icmd->ipkt_opcode) {
9599 9580 case SCMD_REPORT_LUN:
9600 9581 op = "REPORT LUN";
9601 9582 break;
9602 9583
9603 9584 case SCMD_INQUIRY:
9604 9585 op = "INQUIRY";
9605 9586 break;
9606 9587
9607 9588 case SCMD_INQUIRY_PAGE83:
9608 9589 op = "INQUIRY-83";
9609 9590 break;
9610 9591
9611 9592 default:
9612 9593 op = "Internal SCSI COMMAND";
9613 9594 break;
9614 9595 }
9615 9596
9616 9597 if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9617 9598 icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9618 9599 rval = FC_SUCCESS;
9619 9600 }
9620 9601 } else {
9621 9602 struct fcp_pkt *cmd;
9622 9603 struct fcp_port *pptr;
9623 9604
9624 9605 cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9625 9606 cmd->cmd_state = FCP_PKT_IDLE;
9626 9607 pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9627 9608
9628 9609 if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9629 9610 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9630 9611 fcp_trace, FCP_BUF_LEVEL_9, 0,
9631 9612 "fcp_transport: xport busy for pkt %p",
9632 9613 cmd->cmd_pkt);
9633 9614 rval = FC_TRAN_BUSY;
9634 9615 } else {
9635 9616 fcp_queue_pkt(pptr, cmd);
9636 9617 rval = FC_SUCCESS;
9637 9618 }
9638 9619 }
9639 9620 }
9640 9621
9641 9622 return (rval);
9642 9623 }
9643 9624
9644 9625 /*VARARGS3*/
9645 9626 static void
9646 9627 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9647 9628 {
9648 9629 char buf[256];
9649 9630 va_list ap;
9650 9631
9651 9632 if (dip == NULL) {
9652 9633 dip = fcp_global_dip;
9653 9634 }
9654 9635
9655 9636 va_start(ap, fmt);
9656 9637 (void) vsprintf(buf, fmt, ap);
9657 9638 va_end(ap);
9658 9639
9659 9640 scsi_log(dip, "fcp", level, buf);
9660 9641 }
9661 9642
9662 9643 /*
9663 9644 * This function retries NS registry of FC4 type.
9664 9645 * It assumes that fcp_mutex is held.
9665 9646 * The function does nothing if topology is not fabric
9666 9647 * So, the topology has to be set before this function can be called
9667 9648 */
9668 9649 static void
9669 9650 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9670 9651 {
9671 9652 int rval;
9672 9653
9673 9654 ASSERT(MUTEX_HELD(&pptr->port_mutex));
9674 9655
9675 9656 if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9676 9657 ((pptr->port_topology != FC_TOP_FABRIC) &&
9677 9658 (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9678 9659 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9679 9660 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9680 9661 }
9681 9662 return;
9682 9663 }
9683 9664 mutex_exit(&pptr->port_mutex);
9684 9665 rval = fcp_do_ns_registry(pptr, s_id);
9685 9666 mutex_enter(&pptr->port_mutex);
9686 9667
9687 9668 if (rval == 0) {
9688 9669 /* Registry successful. Reset flag */
9689 9670 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9690 9671 }
9691 9672 }
9692 9673
9693 9674 /*
9694 9675 * This function registers the ULP with the switch by calling transport i/f
9695 9676 */
9696 9677 static int
9697 9678 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9698 9679 {
9699 9680 fc_ns_cmd_t ns_cmd;
9700 9681 ns_rfc_type_t rfc;
9701 9682 uint32_t types[8];
9702 9683
9703 9684 /*
9704 9685 * Prepare the Name server structure to
9705 9686 * register with the transport in case of
9706 9687 * Fabric configuration.
9707 9688 */
9708 9689 bzero(&rfc, sizeof (rfc));
9709 9690 bzero(types, sizeof (types));
9710 9691
9711 9692 types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9712 9693 (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9713 9694
9714 9695 rfc.rfc_port_id.port_id = s_id;
9715 9696 bcopy(types, rfc.rfc_types, sizeof (types));
9716 9697
9717 9698 ns_cmd.ns_flags = 0;
9718 9699 ns_cmd.ns_cmd = NS_RFT_ID;
9719 9700 ns_cmd.ns_req_len = sizeof (rfc);
9720 9701 ns_cmd.ns_req_payload = (caddr_t)&rfc;
9721 9702 ns_cmd.ns_resp_len = 0;
9722 9703 ns_cmd.ns_resp_payload = NULL;
9723 9704
9724 9705 /*
9725 9706 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9726 9707 */
9727 9708 if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9728 9709 fcp_log(CE_WARN, pptr->port_dip,
9729 9710 "!ns_registry: failed name server registration");
9730 9711 return (1);
9731 9712 }
9732 9713
9733 9714 return (0);
9734 9715 }
9735 9716
9736 9717 /*
9737 9718 * Function: fcp_handle_port_attach
9738 9719 *
9739 9720 * Description: This function is called from fcp_port_attach() to attach a
9740 9721 * new port. This routine does the following:
9741 9722 *
9742 9723 * 1) Allocates an fcp_port structure and initializes it.
9743 9724 * 2) Tries to register the new FC-4 (FCP) capablity with the name
9744 9725 * server.
9745 9726 * 3) Kicks off the enumeration of the targets/luns visible
9746 9727 * through this new port. That is done by calling
9747 9728 * fcp_statec_callback() if the port is online.
9748 9729 *
9749 9730 * Argument: ulph fp/fctl port handle.
9750 9731 * *pinfo Port information.
9751 9732 * s_id Port ID.
9752 9733 * instance Device instance number for the local port
9753 9734 * (returned by ddi_get_instance()).
9754 9735 *
9755 9736 * Return Value: DDI_SUCCESS
9756 9737 * DDI_FAILURE
9757 9738 *
9758 9739 * Context: User and Kernel context.
9759 9740 */
9760 9741 /*ARGSUSED*/
9761 9742 int
9762 9743 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9763 9744 uint32_t s_id, int instance)
9764 9745 {
9765 9746 int res = DDI_FAILURE;
9766 9747 scsi_hba_tran_t *tran;
9767 9748 int mutex_initted = FALSE;
9768 9749 int hba_attached = FALSE;
9769 9750 int soft_state_linked = FALSE;
9770 9751 int event_bind = FALSE;
9771 9752 struct fcp_port *pptr;
9772 9753 fc_portmap_t *tmp_list = NULL;
9773 9754 uint32_t max_cnt, alloc_cnt;
9774 9755 uchar_t *boot_wwn = NULL;
9775 9756 uint_t nbytes;
9776 9757 int manual_cfg;
9777 9758
9778 9759 /*
9779 9760 * this port instance attaching for the first time (or after
9780 9761 * being detached before)
9781 9762 */
9782 9763 FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9783 9764 FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9784 9765
9785 9766 if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9786 9767 cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9787 9768 "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9788 9769 instance);
9789 9770 return (res);
9790 9771 }
9791 9772
9792 9773 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9793 9774 /* this shouldn't happen */
9794 9775 ddi_soft_state_free(fcp_softstate, instance);
9795 9776 cmn_err(CE_WARN, "fcp: bad soft state");
9796 9777 return (res);
9797 9778 }
9798 9779
9799 9780 (void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9800 9781
9801 9782 /*
9802 9783 * Make a copy of ulp_port_info as fctl allocates
9803 9784 * a temp struct.
9804 9785 */
9805 9786 (void) fcp_cp_pinfo(pptr, pinfo);
9806 9787
9807 9788 /*
9808 9789 * Check for manual_configuration_only property.
9809 9790 * Enable manual configurtion if the property is
9810 9791 * set to 1, otherwise disable manual configuration.
9811 9792 */
9812 9793 if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9813 9794 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9814 9795 MANUAL_CFG_ONLY,
9815 9796 -1)) != -1) {
9816 9797 if (manual_cfg == 1) {
9817 9798 char *pathname;
9818 9799 pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9819 9800 (void) ddi_pathname(pptr->port_dip, pathname);
9820 9801 cmn_err(CE_NOTE,
↓ open down ↓ |
8737 lines elided |
↑ open up ↑ |
9821 9802 "%s (%s%d) %s is enabled via %s.conf.",
9822 9803 pathname,
9823 9804 ddi_driver_name(pptr->port_dip),
9824 9805 ddi_get_instance(pptr->port_dip),
9825 9806 MANUAL_CFG_ONLY,
9826 9807 ddi_driver_name(pptr->port_dip));
9827 9808 fcp_enable_auto_configuration = 0;
9828 9809 kmem_free(pathname, MAXPATHLEN);
9829 9810 }
9830 9811 }
9831 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9832 9812 pptr->port_link_cnt = 1;
9833 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9834 9813 pptr->port_id = s_id;
9835 9814 pptr->port_instance = instance;
9836 - _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state));
9837 9815 pptr->port_state = FCP_STATE_INIT;
9838 9816 if (pinfo->port_acc_attr == NULL) {
9839 9817 /*
9840 9818 * The corresponding FCA doesn't support DMA at all
9841 9819 */
9842 9820 pptr->port_state |= FCP_STATE_FCA_IS_NODMA;
9843 9821 }
9844 9822
9845 - _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state));
9846 -
9847 9823 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
9848 9824 /*
9849 9825 * If FCA supports DMA in SCSI data phase, we need preallocate
9850 9826 * dma cookie, so stash the cookie size
9851 9827 */
9852 9828 pptr->port_dmacookie_sz = sizeof (ddi_dma_cookie_t) *
9853 9829 pptr->port_data_dma_attr.dma_attr_sgllen;
9854 9830 }
9855 9831
9856 9832 /*
9857 9833 * The two mutexes of fcp_port are initialized. The variable
9858 9834 * mutex_initted is incremented to remember that fact. That variable
9859 9835 * is checked when the routine fails and the mutexes have to be
9860 9836 * destroyed.
9861 9837 */
9862 9838 mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9863 9839 mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9864 9840 mutex_initted++;
9865 9841
9866 9842 /*
9867 9843 * The SCSI tran structure is allocate and initialized now.
9868 9844 */
9869 9845 if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9870 9846 fcp_log(CE_WARN, pptr->port_dip,
9871 9847 "!fcp%d: scsi_hba_tran_alloc failed", instance);
9872 9848 goto fail;
9873 9849 }
9874 9850
9875 9851 /* link in the transport structure then fill it in */
9876 9852 pptr->port_tran = tran;
9877 9853 tran->tran_hba_private = pptr;
9878 9854 tran->tran_tgt_init = fcp_scsi_tgt_init;
9879 9855 tran->tran_tgt_probe = NULL;
9880 9856 tran->tran_tgt_free = fcp_scsi_tgt_free;
9881 9857 tran->tran_start = fcp_scsi_start;
9882 9858 tran->tran_reset = fcp_scsi_reset;
9883 9859 tran->tran_abort = fcp_scsi_abort;
9884 9860 tran->tran_getcap = fcp_scsi_getcap;
9885 9861 tran->tran_setcap = fcp_scsi_setcap;
9886 9862 tran->tran_init_pkt = NULL;
9887 9863 tran->tran_destroy_pkt = NULL;
9888 9864 tran->tran_dmafree = NULL;
9889 9865 tran->tran_sync_pkt = NULL;
9890 9866 tran->tran_reset_notify = fcp_scsi_reset_notify;
9891 9867 tran->tran_get_bus_addr = fcp_scsi_get_bus_addr;
9892 9868 tran->tran_get_name = fcp_scsi_get_name;
9893 9869 tran->tran_clear_aca = NULL;
9894 9870 tran->tran_clear_task_set = NULL;
9895 9871 tran->tran_terminate_task = NULL;
9896 9872 tran->tran_get_eventcookie = fcp_scsi_bus_get_eventcookie;
9897 9873 tran->tran_add_eventcall = fcp_scsi_bus_add_eventcall;
9898 9874 tran->tran_remove_eventcall = fcp_scsi_bus_remove_eventcall;
9899 9875 tran->tran_post_event = fcp_scsi_bus_post_event;
9900 9876 tran->tran_quiesce = NULL;
9901 9877 tran->tran_unquiesce = NULL;
9902 9878 tran->tran_bus_reset = NULL;
9903 9879 tran->tran_bus_config = fcp_scsi_bus_config;
9904 9880 tran->tran_bus_unconfig = fcp_scsi_bus_unconfig;
9905 9881 tran->tran_bus_power = NULL;
9906 9882 tran->tran_interconnect_type = INTERCONNECT_FABRIC;
9907 9883
9908 9884 tran->tran_pkt_constructor = fcp_kmem_cache_constructor;
9909 9885 tran->tran_pkt_destructor = fcp_kmem_cache_destructor;
9910 9886 tran->tran_setup_pkt = fcp_pkt_setup;
9911 9887 tran->tran_teardown_pkt = fcp_pkt_teardown;
9912 9888 tran->tran_hba_len = pptr->port_priv_pkt_len +
9913 9889 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9914 9890 if (pptr->port_state & FCP_STATE_FCA_IS_NODMA) {
9915 9891 /*
9916 9892 * If FCA don't support DMA, then we use different vectors to
9917 9893 * minimize the effects on DMA code flow path
9918 9894 */
9919 9895 tran->tran_start = fcp_pseudo_start;
9920 9896 tran->tran_init_pkt = fcp_pseudo_init_pkt;
9921 9897 tran->tran_destroy_pkt = fcp_pseudo_destroy_pkt;
9922 9898 tran->tran_sync_pkt = fcp_pseudo_sync_pkt;
9923 9899 tran->tran_dmafree = fcp_pseudo_dmafree;
9924 9900 tran->tran_setup_pkt = NULL;
9925 9901 tran->tran_teardown_pkt = NULL;
9926 9902 tran->tran_pkt_constructor = NULL;
9927 9903 tran->tran_pkt_destructor = NULL;
9928 9904 pptr->port_data_dma_attr = pseudo_fca_dma_attr;
9929 9905 }
9930 9906
9931 9907 /*
9932 9908 * Allocate an ndi event handle
9933 9909 */
9934 9910 pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9935 9911 kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9936 9912
9937 9913 bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9938 9914 sizeof (fcp_ndi_event_defs));
9939 9915
9940 9916 (void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9941 9917 &pptr->port_ndi_event_hdl, NDI_SLEEP);
9942 9918
9943 9919 pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9944 9920 pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9945 9921 pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9946 9922
9947 9923 if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9948 9924 (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9949 9925 &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9950 9926 goto fail;
9951 9927 }
9952 9928 event_bind++; /* Checked in fail case */
9953 9929
9954 9930 if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9955 9931 tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9956 9932 != DDI_SUCCESS) {
9957 9933 fcp_log(CE_WARN, pptr->port_dip,
9958 9934 "!fcp%d: scsi_hba_attach_setup failed", instance);
9959 9935 goto fail;
9960 9936 }
9961 9937 hba_attached++; /* Checked in fail case */
9962 9938
9963 9939 pptr->port_mpxio = 0;
9964 9940 if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9965 9941 MDI_SUCCESS) {
9966 9942 pptr->port_mpxio++;
9967 9943 }
9968 9944
9969 9945 /*
9970 9946 * The following code is putting the new port structure in the global
9971 9947 * list of ports and, if it is the first port to attach, it start the
9972 9948 * fcp_watchdog_tick.
9973 9949 *
9974 9950 * Why put this new port in the global before we are done attaching it?
9975 9951 * We are actually making the structure globally known before we are
9976 9952 * done attaching it. The reason for that is: because of the code that
9977 9953 * follows. At this point the resources to handle the port are
9978 9954 * allocated. This function is now going to do the following:
9979 9955 *
9980 9956 * 1) It is going to try to register with the name server advertizing
9981 9957 * the new FCP capability of the port.
9982 9958 * 2) It is going to play the role of the fp/fctl layer by building
9983 9959 * a list of worlwide names reachable through this port and call
9984 9960 * itself on fcp_statec_callback(). That requires the port to
9985 9961 * be part of the global list.
9986 9962 */
9987 9963 mutex_enter(&fcp_global_mutex);
9988 9964 if (fcp_port_head == NULL) {
9989 9965 fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
9990 9966 }
9991 9967 pptr->port_next = fcp_port_head;
9992 9968 fcp_port_head = pptr;
9993 9969 soft_state_linked++;
9994 9970
9995 9971 if (fcp_watchdog_init++ == 0) {
9996 9972 fcp_watchdog_tick = fcp_watchdog_timeout *
9997 9973 drv_usectohz(1000000);
9998 9974 fcp_watchdog_id = timeout(fcp_watch, NULL,
9999 9975 fcp_watchdog_tick);
10000 9976 }
10001 9977 mutex_exit(&fcp_global_mutex);
10002 9978
10003 9979 /*
10004 9980 * Here an attempt is made to register with the name server, the new
10005 9981 * FCP capability. That is done using an RTF_ID to the name server.
10006 9982 * It is done synchronously. The function fcp_do_ns_registry()
10007 9983 * doesn't return till the name server responded.
10008 9984 * On failures, just ignore it for now and it will get retried during
10009 9985 * state change callbacks. We'll set a flag to show this failure
10010 9986 */
10011 9987 if (fcp_do_ns_registry(pptr, s_id)) {
10012 9988 mutex_enter(&pptr->port_mutex);
10013 9989 pptr->port_state |= FCP_STATE_NS_REG_FAILED;
10014 9990 mutex_exit(&pptr->port_mutex);
10015 9991 } else {
10016 9992 mutex_enter(&pptr->port_mutex);
10017 9993 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
10018 9994 mutex_exit(&pptr->port_mutex);
10019 9995 }
10020 9996
10021 9997 /*
10022 9998 * Lookup for boot WWN property
10023 9999 */
10024 10000 if (modrootloaded != 1) {
10025 10001 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
10026 10002 ddi_get_parent(pinfo->port_dip),
10027 10003 DDI_PROP_DONTPASS, OBP_BOOT_WWN,
10028 10004 &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
10029 10005 (nbytes == FC_WWN_SIZE)) {
10030 10006 bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
10031 10007 }
10032 10008 if (boot_wwn) {
10033 10009 ddi_prop_free(boot_wwn);
10034 10010 }
10035 10011 }
10036 10012
10037 10013 /*
10038 10014 * Handle various topologies and link states.
10039 10015 */
10040 10016 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
10041 10017 case FC_STATE_OFFLINE:
10042 10018
10043 10019 /*
10044 10020 * we're attaching a port where the link is offline
10045 10021 *
10046 10022 * Wait for ONLINE, at which time a state
10047 10023 * change will cause a statec_callback
10048 10024 *
10049 10025 * in the mean time, do not do anything
10050 10026 */
10051 10027 res = DDI_SUCCESS;
10052 10028 pptr->port_state |= FCP_STATE_OFFLINE;
10053 10029 break;
10054 10030
10055 10031 case FC_STATE_ONLINE: {
10056 10032 if (pptr->port_topology == FC_TOP_UNKNOWN) {
10057 10033 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
10058 10034 res = DDI_SUCCESS;
10059 10035 break;
10060 10036 }
10061 10037 /*
10062 10038 * discover devices and create nodes (a private
10063 10039 * loop or point-to-point)
10064 10040 */
10065 10041 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
10066 10042
10067 10043 /*
10068 10044 * At this point we are going to build a list of all the ports
10069 10045 * that can be reached through this local port. It looks like
10070 10046 * we cannot handle more than FCP_MAX_DEVICES per local port
10071 10047 * (128).
10072 10048 */
10073 10049 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
10074 10050 sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
10075 10051 KM_NOSLEEP)) == NULL) {
10076 10052 fcp_log(CE_WARN, pptr->port_dip,
10077 10053 "!fcp%d: failed to allocate portmap",
10078 10054 instance);
10079 10055 goto fail;
10080 10056 }
10081 10057
10082 10058 /*
10083 10059 * fc_ulp_getportmap() is going to provide us with the list of
10084 10060 * remote ports in the buffer we just allocated. The way the
10085 10061 * list is going to be retrieved depends on the topology.
10086 10062 * However, if we are connected to a Fabric, a name server
10087 10063 * request may be sent to get the list of FCP capable ports.
10088 10064 * It should be noted that is the case the request is
10089 10065 * synchronous. This means we are stuck here till the name
10090 10066 * server replies. A lot of things can change during that time
10091 10067 * and including, may be, being called on
10092 10068 * fcp_statec_callback() for different reasons. I'm not sure
10093 10069 * the code can handle that.
10094 10070 */
10095 10071 max_cnt = FCP_MAX_DEVICES;
10096 10072 alloc_cnt = FCP_MAX_DEVICES;
10097 10073 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
10098 10074 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
10099 10075 FC_SUCCESS) {
10100 10076 caddr_t msg;
10101 10077
10102 10078 (void) fc_ulp_error(res, &msg);
10103 10079
10104 10080 /*
10105 10081 * this just means the transport is
10106 10082 * busy perhaps building a portmap so,
10107 10083 * for now, succeed this port attach
10108 10084 * when the transport has a new map,
10109 10085 * it'll send us a state change then
10110 10086 */
10111 10087 fcp_log(CE_WARN, pptr->port_dip,
10112 10088 "!failed to get port map : %s", msg);
10113 10089
10114 10090 res = DDI_SUCCESS;
10115 10091 break; /* go return result */
10116 10092 }
10117 10093 if (max_cnt > alloc_cnt) {
10118 10094 alloc_cnt = max_cnt;
10119 10095 }
10120 10096
10121 10097 /*
10122 10098 * We are now going to call fcp_statec_callback() ourselves.
10123 10099 * By issuing this call we are trying to kick off the enumera-
10124 10100 * tion process.
10125 10101 */
10126 10102 /*
10127 10103 * let the state change callback do the SCSI device
10128 10104 * discovery and create the devinfos
10129 10105 */
10130 10106 fcp_statec_callback(ulph, pptr->port_fp_handle,
10131 10107 pptr->port_phys_state, pptr->port_topology, tmp_list,
10132 10108 max_cnt, pptr->port_id);
10133 10109
10134 10110 res = DDI_SUCCESS;
10135 10111 break;
10136 10112 }
10137 10113
10138 10114 default:
10139 10115 /* unknown port state */
10140 10116 fcp_log(CE_WARN, pptr->port_dip,
10141 10117 "!fcp%d: invalid port state at attach=0x%x",
10142 10118 instance, pptr->port_phys_state);
10143 10119
10144 10120 mutex_enter(&pptr->port_mutex);
10145 10121 pptr->port_phys_state = FCP_STATE_OFFLINE;
10146 10122 mutex_exit(&pptr->port_mutex);
10147 10123
10148 10124 res = DDI_SUCCESS;
10149 10125 break;
10150 10126 }
10151 10127
10152 10128 /* free temp list if used */
10153 10129 if (tmp_list != NULL) {
10154 10130 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10155 10131 }
10156 10132
10157 10133 /* note the attach time */
10158 10134 pptr->port_attach_time = ddi_get_lbolt64();
10159 10135
10160 10136 /* all done */
10161 10137 return (res);
10162 10138
10163 10139 /* a failure we have to clean up after */
10164 10140 fail:
10165 10141 fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
10166 10142
10167 10143 if (soft_state_linked) {
10168 10144 /* remove this fcp_port from the linked list */
10169 10145 (void) fcp_soft_state_unlink(pptr);
10170 10146 }
10171 10147
10172 10148 /* unbind and free event set */
10173 10149 if (pptr->port_ndi_event_hdl) {
10174 10150 if (event_bind) {
10175 10151 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10176 10152 &pptr->port_ndi_events, NDI_SLEEP);
10177 10153 }
10178 10154 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10179 10155 }
10180 10156
10181 10157 if (pptr->port_ndi_event_defs) {
10182 10158 (void) kmem_free(pptr->port_ndi_event_defs,
10183 10159 sizeof (fcp_ndi_event_defs));
10184 10160 }
10185 10161
10186 10162 /*
10187 10163 * Clean up mpxio stuff
10188 10164 */
10189 10165 if (pptr->port_mpxio) {
10190 10166 (void) mdi_phci_unregister(pptr->port_dip, 0);
10191 10167 pptr->port_mpxio--;
10192 10168 }
10193 10169
10194 10170 /* undo SCSI HBA setup */
10195 10171 if (hba_attached) {
10196 10172 (void) scsi_hba_detach(pptr->port_dip);
10197 10173 }
10198 10174 if (pptr->port_tran != NULL) {
10199 10175 scsi_hba_tran_free(pptr->port_tran);
10200 10176 }
10201 10177
10202 10178 mutex_enter(&fcp_global_mutex);
10203 10179
10204 10180 /*
10205 10181 * We check soft_state_linked, because it is incremented right before
10206 10182 * we call increment fcp_watchdog_init. Therefore, we know if
10207 10183 * soft_state_linked is still FALSE, we do not want to decrement
10208 10184 * fcp_watchdog_init or possibly call untimeout.
10209 10185 */
10210 10186
10211 10187 if (soft_state_linked) {
10212 10188 if (--fcp_watchdog_init == 0) {
10213 10189 timeout_id_t tid = fcp_watchdog_id;
10214 10190
10215 10191 mutex_exit(&fcp_global_mutex);
10216 10192 (void) untimeout(tid);
10217 10193 } else {
10218 10194 mutex_exit(&fcp_global_mutex);
10219 10195 }
10220 10196 } else {
10221 10197 mutex_exit(&fcp_global_mutex);
10222 10198 }
10223 10199
10224 10200 if (mutex_initted) {
10225 10201 mutex_destroy(&pptr->port_mutex);
10226 10202 mutex_destroy(&pptr->port_pkt_mutex);
10227 10203 }
10228 10204
10229 10205 if (tmp_list != NULL) {
10230 10206 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10231 10207 }
10232 10208
10233 10209 /* this makes pptr invalid */
10234 10210 ddi_soft_state_free(fcp_softstate, instance);
10235 10211
10236 10212 return (DDI_FAILURE);
10237 10213 }
10238 10214
10239 10215
10240 10216 static int
10241 10217 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10242 10218 {
10243 10219 int count = 0;
10244 10220
10245 10221 mutex_enter(&pptr->port_mutex);
10246 10222
10247 10223 /*
10248 10224 * if the port is powered down or suspended, nothing else
10249 10225 * to do; just return.
10250 10226 */
10251 10227 if (flag != FCP_STATE_DETACHING) {
10252 10228 if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10253 10229 FCP_STATE_SUSPENDED)) {
10254 10230 pptr->port_state |= flag;
10255 10231 mutex_exit(&pptr->port_mutex);
10256 10232 return (FC_SUCCESS);
10257 10233 }
10258 10234 }
10259 10235
10260 10236 if (pptr->port_state & FCP_STATE_IN_MDI) {
10261 10237 mutex_exit(&pptr->port_mutex);
10262 10238 return (FC_FAILURE);
10263 10239 }
10264 10240
10265 10241 FCP_TRACE(fcp_logq, pptr->port_instbuf,
10266 10242 fcp_trace, FCP_BUF_LEVEL_2, 0,
10267 10243 "fcp_handle_port_detach: port is detaching");
10268 10244
10269 10245 pptr->port_state |= flag;
10270 10246
10271 10247 /*
10272 10248 * Wait for any ongoing reconfig/ipkt to complete, that
10273 10249 * ensures the freeing to targets/luns is safe.
10274 10250 * No more ref to this port should happen from statec/ioctl
10275 10251 * after that as it was removed from the global port list.
10276 10252 */
10277 10253 while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10278 10254 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10279 10255 /*
10280 10256 * Let's give sufficient time for reconfig/ipkt
10281 10257 * to complete.
10282 10258 */
10283 10259 if (count++ >= FCP_ICMD_DEADLINE) {
10284 10260 break;
10285 10261 }
10286 10262 mutex_exit(&pptr->port_mutex);
10287 10263 delay(drv_usectohz(1000000));
10288 10264 mutex_enter(&pptr->port_mutex);
10289 10265 }
10290 10266
10291 10267 /*
10292 10268 * if the driver is still busy then fail to
10293 10269 * suspend/power down.
10294 10270 */
10295 10271 if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10296 10272 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10297 10273 pptr->port_state &= ~flag;
10298 10274 mutex_exit(&pptr->port_mutex);
10299 10275 return (FC_FAILURE);
10300 10276 }
10301 10277
10302 10278 if (flag == FCP_STATE_DETACHING) {
10303 10279 pptr = fcp_soft_state_unlink(pptr);
10304 10280 ASSERT(pptr != NULL);
10305 10281 }
10306 10282
10307 10283 pptr->port_link_cnt++;
10308 10284 pptr->port_state |= FCP_STATE_OFFLINE;
10309 10285 pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10310 10286
10311 10287 fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10312 10288 FCP_CAUSE_LINK_DOWN);
10313 10289 mutex_exit(&pptr->port_mutex);
10314 10290
10315 10291 /* kill watch dog timer if we're the last */
10316 10292 mutex_enter(&fcp_global_mutex);
10317 10293 if (--fcp_watchdog_init == 0) {
10318 10294 timeout_id_t tid = fcp_watchdog_id;
10319 10295 mutex_exit(&fcp_global_mutex);
10320 10296 (void) untimeout(tid);
10321 10297 } else {
10322 10298 mutex_exit(&fcp_global_mutex);
10323 10299 }
10324 10300
10325 10301 /* clean up the port structures */
10326 10302 if (flag == FCP_STATE_DETACHING) {
10327 10303 fcp_cleanup_port(pptr, instance);
10328 10304 }
10329 10305
10330 10306 return (FC_SUCCESS);
10331 10307 }
10332 10308
10333 10309
10334 10310 static void
10335 10311 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10336 10312 {
10337 10313 ASSERT(pptr != NULL);
10338 10314
10339 10315 /* unbind and free event set */
10340 10316 if (pptr->port_ndi_event_hdl) {
10341 10317 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10342 10318 &pptr->port_ndi_events, NDI_SLEEP);
10343 10319 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10344 10320 }
10345 10321
10346 10322 if (pptr->port_ndi_event_defs) {
10347 10323 (void) kmem_free(pptr->port_ndi_event_defs,
10348 10324 sizeof (fcp_ndi_event_defs));
10349 10325 }
10350 10326
10351 10327 /* free the lun/target structures and devinfos */
10352 10328 fcp_free_targets(pptr);
10353 10329
10354 10330 /*
10355 10331 * Clean up mpxio stuff
10356 10332 */
10357 10333 if (pptr->port_mpxio) {
10358 10334 (void) mdi_phci_unregister(pptr->port_dip, 0);
10359 10335 pptr->port_mpxio--;
10360 10336 }
10361 10337
10362 10338 /* clean up SCSA stuff */
10363 10339 (void) scsi_hba_detach(pptr->port_dip);
10364 10340 if (pptr->port_tran != NULL) {
10365 10341 scsi_hba_tran_free(pptr->port_tran);
10366 10342 }
10367 10343
10368 10344 #ifdef KSTATS_CODE
10369 10345 /* clean up kstats */
10370 10346 if (pptr->fcp_ksp != NULL) {
10371 10347 kstat_delete(pptr->fcp_ksp);
10372 10348 }
10373 10349 #endif
10374 10350
10375 10351 /* clean up soft state mutexes/condition variables */
10376 10352 mutex_destroy(&pptr->port_mutex);
10377 10353 mutex_destroy(&pptr->port_pkt_mutex);
10378 10354
10379 10355 /* all done with soft state */
10380 10356 ddi_soft_state_free(fcp_softstate, instance);
10381 10357 }
10382 10358
10383 10359 /*
10384 10360 * Function: fcp_kmem_cache_constructor
10385 10361 *
10386 10362 * Description: This function allocates and initializes the resources required
10387 10363 * to build a scsi_pkt structure the target driver. The result
10388 10364 * of the allocation and initialization will be cached in the
10389 10365 * memory cache. As DMA resources may be allocated here, that
10390 10366 * means DMA resources will be tied up in the cache manager.
10391 10367 * This is a tradeoff that has been made for performance reasons.
10392 10368 *
10393 10369 * Argument: *buf Memory to preinitialize.
10394 10370 * *arg FCP port structure (fcp_port).
10395 10371 * kmflags Value passed to kmem_cache_alloc() and
10396 10372 * propagated to the constructor.
10397 10373 *
10398 10374 * Return Value: 0 Allocation/Initialization was successful.
10399 10375 * -1 Allocation or Initialization failed.
10400 10376 *
10401 10377 *
10402 10378 * If the returned value is 0, the buffer is initialized like this:
10403 10379 *
10404 10380 * +================================+
10405 10381 * +----> | struct scsi_pkt |
10406 10382 * | | |
10407 10383 * | +--- | pkt_ha_private |
10408 10384 * | | | |
10409 10385 * | | +================================+
10410 10386 * | |
10411 10387 * | | +================================+
10412 10388 * | +--> | struct fcp_pkt | <---------+
10413 10389 * | | | |
10414 10390 * +----- | cmd_pkt | |
10415 10391 * | cmd_fp_pkt | ---+ |
10416 10392 * +-------->| cmd_fcp_rsp[] | | |
10417 10393 * | +--->| cmd_fcp_cmd[] | | |
10418 10394 * | | |--------------------------------| | |
10419 10395 * | | | struct fc_packet | <--+ |
10420 10396 * | | | | |
10421 10397 * | | | pkt_ulp_private | ----------+
10422 10398 * | | | pkt_fca_private | -----+
10423 10399 * | | | pkt_data_cookie | ---+ |
10424 10400 * | | | pkt_cmdlen | | |
10425 10401 * | |(a) | pkt_rsplen | | |
10426 10402 * | +----| .......... pkt_cmd ........... | ---|-|---------------+
10427 10403 * | (b) | pkt_cmd_cookie | ---|-|----------+ |
10428 10404 * +---------| .......... pkt_resp .......... | ---|-|------+ | |
10429 10405 * | pkt_resp_cookie | ---|-|--+ | | |
10430 10406 * | pkt_cmd_dma | | | | | | |
10431 10407 * | pkt_cmd_acc | | | | | | |
10432 10408 * +================================+ | | | | | |
10433 10409 * | dma_cookies | <--+ | | | | |
10434 10410 * | | | | | | |
10435 10411 * +================================+ | | | | |
10436 10412 * | fca_private | <----+ | | | |
10437 10413 * | | | | | |
10438 10414 * +================================+ | | | |
10439 10415 * | | | |
10440 10416 * | | | |
10441 10417 * +================================+ (d) | | | |
10442 10418 * | fcp_resp cookies | <-------+ | | |
10443 10419 * | | | | |
10444 10420 * +================================+ | | |
10445 10421 * | | |
10446 10422 * +================================+ (d) | | |
10447 10423 * | fcp_resp | <-----------+ | |
10448 10424 * | (DMA resources associated) | | |
10449 10425 * +================================+ | |
10450 10426 * | |
10451 10427 * | |
10452 10428 * | |
10453 10429 * +================================+ (c) | |
10454 10430 * | fcp_cmd cookies | <---------------+ |
10455 10431 * | | |
10456 10432 * +================================+ |
10457 10433 * |
10458 10434 * +================================+ (c) |
10459 10435 * | fcp_cmd | <--------------------+
10460 10436 * | (DMA resources associated) |
10461 10437 * +================================+
10462 10438 *
10463 10439 * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10464 10440 * (b) Only if DMA is NOT used for the FCP_RESP buffer
10465 10441 * (c) Only if DMA is used for the FCP_CMD buffer.
10466 10442 * (d) Only if DMA is used for the FCP_RESP buffer
10467 10443 */
10468 10444 static int
10469 10445 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10470 10446 int kmflags)
10471 10447 {
10472 10448 struct fcp_pkt *cmd;
10473 10449 struct fcp_port *pptr;
10474 10450 fc_packet_t *fpkt;
10475 10451
10476 10452 pptr = (struct fcp_port *)tran->tran_hba_private;
10477 10453 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10478 10454 bzero(cmd, tran->tran_hba_len);
10479 10455
10480 10456 cmd->cmd_pkt = pkt;
10481 10457 pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10482 10458 fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10483 10459 cmd->cmd_fp_pkt = fpkt;
10484 10460
10485 10461 cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10486 10462 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10487 10463 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10488 10464 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10489 10465
10490 10466 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10491 10467 sizeof (struct fcp_pkt));
10492 10468
10493 10469 fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10494 10470 fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10495 10471
10496 10472 if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10497 10473 /*
10498 10474 * The underlying HBA doesn't want to DMA the fcp_cmd or
10499 10475 * fcp_resp. The transfer of information will be done by
10500 10476 * bcopy.
10501 10477 * The naming of the flags (that is actually a value) is
10502 10478 * unfortunate. FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10503 10479 * DMA" but instead "NO DMA".
10504 10480 */
10505 10481 fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10506 10482 fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10507 10483 fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10508 10484 } else {
10509 10485 /*
10510 10486 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10511 10487 * buffer. A buffer is allocated for each one the ddi_dma_*
10512 10488 * interfaces.
10513 10489 */
10514 10490 if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10515 10491 return (-1);
10516 10492 }
10517 10493 }
10518 10494
10519 10495 return (0);
10520 10496 }
10521 10497
10522 10498 /*
10523 10499 * Function: fcp_kmem_cache_destructor
10524 10500 *
10525 10501 * Description: Called by the destructor of the cache managed by SCSA.
10526 10502 * All the resources pre-allocated in fcp_pkt_constructor
10527 10503 * and the data also pre-initialized in fcp_pkt_constructor
10528 10504 * are freed and uninitialized here.
10529 10505 *
10530 10506 * Argument: *buf Memory to uninitialize.
10531 10507 * *arg FCP port structure (fcp_port).
10532 10508 *
10533 10509 * Return Value: None
10534 10510 *
10535 10511 * Context: kernel
10536 10512 */
10537 10513 static void
10538 10514 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10539 10515 {
10540 10516 struct fcp_pkt *cmd;
10541 10517 struct fcp_port *pptr;
10542 10518
10543 10519 pptr = (struct fcp_port *)(tran->tran_hba_private);
10544 10520 cmd = pkt->pkt_ha_private;
10545 10521
10546 10522 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10547 10523 /*
10548 10524 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10549 10525 * buffer and DMA resources allocated to do so are released.
10550 10526 */
10551 10527 fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10552 10528 }
10553 10529 }
10554 10530
10555 10531 /*
10556 10532 * Function: fcp_alloc_cmd_resp
10557 10533 *
10558 10534 * Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10559 10535 * will be DMAed by the HBA. The buffer is allocated applying
10560 10536 * the DMA requirements for the HBA. The buffers allocated will
10561 10537 * also be bound. DMA resources are allocated in the process.
10562 10538 * They will be released by fcp_free_cmd_resp().
10563 10539 *
10564 10540 * Argument: *pptr FCP port.
10565 10541 * *fpkt fc packet for which the cmd and resp packet should be
10566 10542 * allocated.
10567 10543 * flags Allocation flags.
10568 10544 *
10569 10545 * Return Value: FC_FAILURE
10570 10546 * FC_SUCCESS
10571 10547 *
10572 10548 * Context: User or Kernel context only if flags == KM_SLEEP.
10573 10549 * Interrupt context if the KM_SLEEP is not specified.
10574 10550 */
10575 10551 static int
10576 10552 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10577 10553 {
10578 10554 int rval;
10579 10555 int cmd_len;
10580 10556 int resp_len;
10581 10557 ulong_t real_len;
10582 10558 int (*cb) (caddr_t);
10583 10559 ddi_dma_cookie_t pkt_cookie;
10584 10560 ddi_dma_cookie_t *cp;
10585 10561 uint32_t cnt;
10586 10562
10587 10563 cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10588 10564
10589 10565 cmd_len = fpkt->pkt_cmdlen;
10590 10566 resp_len = fpkt->pkt_rsplen;
10591 10567
10592 10568 ASSERT(fpkt->pkt_cmd_dma == NULL);
10593 10569
10594 10570 /* Allocation of a DMA handle used in subsequent calls. */
10595 10571 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10596 10572 cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10597 10573 return (FC_FAILURE);
10598 10574 }
10599 10575
10600 10576 /* A buffer is allocated that satisfies the DMA requirements. */
10601 10577 rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10602 10578 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10603 10579 (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10604 10580
10605 10581 if (rval != DDI_SUCCESS) {
10606 10582 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10607 10583 return (FC_FAILURE);
10608 10584 }
10609 10585
10610 10586 if (real_len < cmd_len) {
10611 10587 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10612 10588 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10613 10589 return (FC_FAILURE);
10614 10590 }
10615 10591
10616 10592 /* The buffer allocated is DMA bound. */
10617 10593 rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10618 10594 fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10619 10595 cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10620 10596
10621 10597 if (rval != DDI_DMA_MAPPED) {
10622 10598 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10623 10599 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10624 10600 return (FC_FAILURE);
10625 10601 }
10626 10602
10627 10603 if (fpkt->pkt_cmd_cookie_cnt >
10628 10604 pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10629 10605 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10630 10606 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10631 10607 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10632 10608 return (FC_FAILURE);
10633 10609 }
10634 10610
10635 10611 ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10636 10612
10637 10613 /*
10638 10614 * The buffer where the scatter/gather list is going to be built is
10639 10615 * allocated.
10640 10616 */
10641 10617 cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10642 10618 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10643 10619 KM_NOSLEEP);
10644 10620
10645 10621 if (cp == NULL) {
10646 10622 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10647 10623 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10648 10624 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10649 10625 return (FC_FAILURE);
10650 10626 }
10651 10627
10652 10628 /*
10653 10629 * The scatter/gather list for the buffer we just allocated is built
10654 10630 * here.
10655 10631 */
10656 10632 *cp = pkt_cookie;
10657 10633 cp++;
10658 10634
10659 10635 for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10660 10636 ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10661 10637 &pkt_cookie);
10662 10638 *cp = pkt_cookie;
10663 10639 }
10664 10640
10665 10641 ASSERT(fpkt->pkt_resp_dma == NULL);
10666 10642 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10667 10643 cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10668 10644 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10669 10645 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10670 10646 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10671 10647 return (FC_FAILURE);
10672 10648 }
10673 10649
10674 10650 rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10675 10651 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10676 10652 (caddr_t *)&fpkt->pkt_resp, &real_len,
10677 10653 &fpkt->pkt_resp_acc);
10678 10654
10679 10655 if (rval != DDI_SUCCESS) {
10680 10656 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10681 10657 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10682 10658 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10683 10659 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10684 10660 kmem_free(fpkt->pkt_cmd_cookie,
10685 10661 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10686 10662 return (FC_FAILURE);
10687 10663 }
10688 10664
10689 10665 if (real_len < resp_len) {
10690 10666 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10691 10667 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10692 10668 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10693 10669 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10694 10670 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10695 10671 kmem_free(fpkt->pkt_cmd_cookie,
10696 10672 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10697 10673 return (FC_FAILURE);
10698 10674 }
10699 10675
10700 10676 rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10701 10677 fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10702 10678 cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10703 10679
10704 10680 if (rval != DDI_DMA_MAPPED) {
10705 10681 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10706 10682 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10707 10683 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10708 10684 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10709 10685 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10710 10686 kmem_free(fpkt->pkt_cmd_cookie,
10711 10687 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10712 10688 return (FC_FAILURE);
10713 10689 }
10714 10690
10715 10691 if (fpkt->pkt_resp_cookie_cnt >
10716 10692 pptr->port_resp_dma_attr.dma_attr_sgllen) {
10717 10693 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10718 10694 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10719 10695 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10720 10696 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10721 10697 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10722 10698 kmem_free(fpkt->pkt_cmd_cookie,
10723 10699 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10724 10700 return (FC_FAILURE);
10725 10701 }
10726 10702
10727 10703 ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10728 10704
10729 10705 cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10730 10706 fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10731 10707 KM_NOSLEEP);
10732 10708
10733 10709 if (cp == NULL) {
10734 10710 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10735 10711 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10736 10712 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10737 10713 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10738 10714 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10739 10715 kmem_free(fpkt->pkt_cmd_cookie,
10740 10716 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10741 10717 return (FC_FAILURE);
10742 10718 }
10743 10719
10744 10720 *cp = pkt_cookie;
10745 10721 cp++;
10746 10722
10747 10723 for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10748 10724 ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10749 10725 &pkt_cookie);
10750 10726 *cp = pkt_cookie;
10751 10727 }
10752 10728
10753 10729 return (FC_SUCCESS);
10754 10730 }
10755 10731
10756 10732 /*
10757 10733 * Function: fcp_free_cmd_resp
10758 10734 *
10759 10735 * Description: This function releases the FCP_CMD and FCP_RESP buffer
10760 10736 * allocated by fcp_alloc_cmd_resp() and all the resources
10761 10737 * associated with them. That includes the DMA resources and the
10762 10738 * buffer allocated for the cookies of each one of them.
10763 10739 *
10764 10740 * Argument: *pptr FCP port context.
10765 10741 * *fpkt fc packet containing the cmd and resp packet
10766 10742 * to be released.
10767 10743 *
10768 10744 * Return Value: None
10769 10745 *
10770 10746 * Context: Interrupt, User and Kernel context.
10771 10747 */
10772 10748 /* ARGSUSED */
10773 10749 static void
10774 10750 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10775 10751 {
10776 10752 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10777 10753
10778 10754 if (fpkt->pkt_resp_dma) {
10779 10755 (void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10780 10756 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10781 10757 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10782 10758 }
10783 10759
10784 10760 if (fpkt->pkt_resp_cookie) {
10785 10761 kmem_free(fpkt->pkt_resp_cookie,
10786 10762 fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10787 10763 fpkt->pkt_resp_cookie = NULL;
10788 10764 }
10789 10765
10790 10766 if (fpkt->pkt_cmd_dma) {
10791 10767 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10792 10768 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10793 10769 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10794 10770 }
10795 10771
10796 10772 if (fpkt->pkt_cmd_cookie) {
10797 10773 kmem_free(fpkt->pkt_cmd_cookie,
10798 10774 fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10799 10775 fpkt->pkt_cmd_cookie = NULL;
10800 10776 }
10801 10777 }
10802 10778
10803 10779
10804 10780 /*
10805 10781 * called by the transport to do our own target initialization
10806 10782 *
10807 10783 * can acquire and release the global mutex
10808 10784 */
10809 10785 /* ARGSUSED */
10810 10786 static int
10811 10787 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10812 10788 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10813 10789 {
10814 10790 uchar_t *bytes;
10815 10791 uint_t nbytes;
10816 10792 uint16_t lun_num;
10817 10793 struct fcp_tgt *ptgt;
10818 10794 struct fcp_lun *plun;
10819 10795 struct fcp_port *pptr = (struct fcp_port *)
10820 10796 hba_tran->tran_hba_private;
10821 10797
10822 10798 ASSERT(pptr != NULL);
10823 10799
10824 10800 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10825 10801 FCP_BUF_LEVEL_8, 0,
10826 10802 "fcp_phys_tgt_init: called for %s (instance %d)",
10827 10803 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10828 10804
10829 10805 /* get our port WWN property */
10830 10806 bytes = NULL;
10831 10807 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10832 10808 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10833 10809 (nbytes != FC_WWN_SIZE)) {
10834 10810 /* no port WWN property */
10835 10811 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10836 10812 FCP_BUF_LEVEL_8, 0,
10837 10813 "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10838 10814 " for %s (instance %d): bytes=%p nbytes=%x",
10839 10815 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10840 10816 nbytes);
10841 10817
10842 10818 if (bytes != NULL) {
10843 10819 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10844 10820 }
10845 10821
10846 10822 return (DDI_NOT_WELL_FORMED);
10847 10823 }
10848 10824 ASSERT(bytes != NULL);
10849 10825
10850 10826 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10851 10827 LUN_PROP, 0xFFFF);
10852 10828 if (lun_num == 0xFFFF) {
10853 10829 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10854 10830 FCP_BUF_LEVEL_8, 0,
10855 10831 "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10856 10832 " for %s (instance %d)", ddi_get_name(tgt_dip),
10857 10833 ddi_get_instance(tgt_dip));
10858 10834
10859 10835 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10860 10836 return (DDI_NOT_WELL_FORMED);
10861 10837 }
10862 10838
10863 10839 mutex_enter(&pptr->port_mutex);
10864 10840 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10865 10841 mutex_exit(&pptr->port_mutex);
10866 10842 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10867 10843 FCP_BUF_LEVEL_8, 0,
10868 10844 "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10869 10845 " for %s (instance %d)", ddi_get_name(tgt_dip),
10870 10846 ddi_get_instance(tgt_dip));
10871 10847
10872 10848 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10873 10849 return (DDI_FAILURE);
10874 10850 }
10875 10851
10876 10852 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10877 10853 FC_WWN_SIZE) == 0);
10878 10854 ASSERT(plun->lun_num == lun_num);
10879 10855
10880 10856 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10881 10857
10882 10858 ptgt = plun->lun_tgt;
10883 10859
10884 10860 mutex_enter(&ptgt->tgt_mutex);
10885 10861 plun->lun_tgt_count++;
10886 10862 scsi_device_hba_private_set(sd, plun);
10887 10863 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10888 10864 plun->lun_sd = sd;
10889 10865 mutex_exit(&ptgt->tgt_mutex);
10890 10866 mutex_exit(&pptr->port_mutex);
10891 10867
10892 10868 return (DDI_SUCCESS);
10893 10869 }
10894 10870
10895 10871 /*ARGSUSED*/
10896 10872 static int
10897 10873 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10898 10874 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10899 10875 {
10900 10876 uchar_t *bytes;
10901 10877 uint_t nbytes;
10902 10878 uint16_t lun_num;
10903 10879 struct fcp_tgt *ptgt;
10904 10880 struct fcp_lun *plun;
10905 10881 struct fcp_port *pptr = (struct fcp_port *)
10906 10882 hba_tran->tran_hba_private;
10907 10883 child_info_t *cip;
10908 10884
10909 10885 ASSERT(pptr != NULL);
10910 10886
10911 10887 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10912 10888 fcp_trace, FCP_BUF_LEVEL_8, 0,
10913 10889 "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10914 10890 " (tgt_dip %p)", ddi_get_name(tgt_dip),
10915 10891 ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10916 10892
10917 10893 cip = (child_info_t *)sd->sd_pathinfo;
10918 10894 if (cip == NULL) {
10919 10895 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10920 10896 fcp_trace, FCP_BUF_LEVEL_8, 0,
10921 10897 "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10922 10898 " for %s (instance %d)", ddi_get_name(tgt_dip),
10923 10899 ddi_get_instance(tgt_dip));
10924 10900
10925 10901 return (DDI_NOT_WELL_FORMED);
10926 10902 }
10927 10903
10928 10904 /* get our port WWN property */
10929 10905 bytes = NULL;
10930 10906 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10931 10907 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10932 10908 (nbytes != FC_WWN_SIZE)) {
10933 10909 if (bytes) {
10934 10910 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10935 10911 }
10936 10912 return (DDI_NOT_WELL_FORMED);
10937 10913 }
10938 10914
10939 10915 ASSERT(bytes != NULL);
10940 10916
10941 10917 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10942 10918 LUN_PROP, 0xFFFF);
10943 10919 if (lun_num == 0xFFFF) {
10944 10920 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10945 10921 fcp_trace, FCP_BUF_LEVEL_8, 0,
10946 10922 "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10947 10923 " for %s (instance %d)", ddi_get_name(tgt_dip),
10948 10924 ddi_get_instance(tgt_dip));
10949 10925
10950 10926 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10951 10927 return (DDI_NOT_WELL_FORMED);
10952 10928 }
10953 10929
10954 10930 mutex_enter(&pptr->port_mutex);
10955 10931 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10956 10932 mutex_exit(&pptr->port_mutex);
10957 10933 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10958 10934 fcp_trace, FCP_BUF_LEVEL_8, 0,
10959 10935 "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10960 10936 " for %s (instance %d)", ddi_get_name(tgt_dip),
10961 10937 ddi_get_instance(tgt_dip));
10962 10938
10963 10939 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10964 10940 return (DDI_FAILURE);
10965 10941 }
10966 10942
10967 10943 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10968 10944 FC_WWN_SIZE) == 0);
10969 10945 ASSERT(plun->lun_num == lun_num);
10970 10946
10971 10947 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10972 10948
10973 10949 ptgt = plun->lun_tgt;
10974 10950
10975 10951 mutex_enter(&ptgt->tgt_mutex);
10976 10952 plun->lun_tgt_count++;
10977 10953 scsi_device_hba_private_set(sd, plun);
10978 10954 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10979 10955 plun->lun_sd = sd;
10980 10956 mutex_exit(&ptgt->tgt_mutex);
10981 10957 mutex_exit(&pptr->port_mutex);
10982 10958
10983 10959 return (DDI_SUCCESS);
10984 10960 }
10985 10961
10986 10962
10987 10963 /*
10988 10964 * called by the transport to do our own target initialization
10989 10965 *
10990 10966 * can acquire and release the global mutex
10991 10967 */
10992 10968 /* ARGSUSED */
10993 10969 static int
10994 10970 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10995 10971 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10996 10972 {
10997 10973 struct fcp_port *pptr = (struct fcp_port *)
10998 10974 hba_tran->tran_hba_private;
10999 10975 int rval;
11000 10976
11001 10977 ASSERT(pptr != NULL);
11002 10978
11003 10979 /*
11004 10980 * Child node is getting initialized. Look at the mpxio component
11005 10981 * type on the child device to see if this device is mpxio managed
11006 10982 * or not.
11007 10983 */
11008 10984 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
11009 10985 rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11010 10986 } else {
11011 10987 rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11012 10988 }
11013 10989
11014 10990 return (rval);
11015 10991 }
11016 10992
11017 10993
11018 10994 /* ARGSUSED */
11019 10995 static void
11020 10996 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
11021 10997 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
11022 10998 {
11023 10999 struct fcp_lun *plun = scsi_device_hba_private_get(sd);
11024 11000 struct fcp_tgt *ptgt;
11025 11001
11026 11002 FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
11027 11003 fcp_trace, FCP_BUF_LEVEL_8, 0,
11028 11004 "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
11029 11005 ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
11030 11006 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
11031 11007
11032 11008 if (plun == NULL) {
11033 11009 return;
11034 11010 }
11035 11011 ptgt = plun->lun_tgt;
11036 11012
11037 11013 ASSERT(ptgt != NULL);
11038 11014
11039 11015 mutex_enter(&ptgt->tgt_mutex);
11040 11016 ASSERT(plun->lun_tgt_count > 0);
11041 11017
11042 11018 if (--plun->lun_tgt_count == 0) {
11043 11019 plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
11044 11020 }
11045 11021 plun->lun_sd = NULL;
11046 11022 mutex_exit(&ptgt->tgt_mutex);
11047 11023 }
11048 11024
11049 11025 /*
11050 11026 * Function: fcp_scsi_start
11051 11027 *
11052 11028 * Description: This function is called by the target driver to request a
11053 11029 * command to be sent.
11054 11030 *
11055 11031 * Argument: *ap SCSI address of the device.
11056 11032 * *pkt SCSI packet containing the cmd to send.
11057 11033 *
11058 11034 * Return Value: TRAN_ACCEPT
11059 11035 * TRAN_BUSY
11060 11036 * TRAN_BADPKT
11061 11037 * TRAN_FATAL_ERROR
11062 11038 */
11063 11039 static int
11064 11040 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
11065 11041 {
11066 11042 struct fcp_port *pptr = ADDR2FCP(ap);
11067 11043 struct fcp_lun *plun = ADDR2LUN(ap);
11068 11044 struct fcp_pkt *cmd = PKT2CMD(pkt);
11069 11045 struct fcp_tgt *ptgt = plun->lun_tgt;
11070 11046 int rval;
11071 11047
11072 11048 /* ensure command isn't already issued */
11073 11049 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
11074 11050
11075 11051 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11076 11052 fcp_trace, FCP_BUF_LEVEL_9, 0,
11077 11053 "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
11078 11054
11079 11055 /*
11080 11056 * It is strange that we enter the fcp_port mutex and the target
11081 11057 * mutex to check the lun state (which has a mutex of its own).
11082 11058 */
11083 11059 mutex_enter(&pptr->port_mutex);
11084 11060 mutex_enter(&ptgt->tgt_mutex);
11085 11061
11086 11062 /*
11087 11063 * If the device is offline and is not in the process of coming
11088 11064 * online, fail the request.
11089 11065 */
11090 11066
11091 11067 if ((plun->lun_state & FCP_LUN_OFFLINE) &&
11092 11068 !(plun->lun_state & FCP_LUN_ONLINING)) {
11093 11069 mutex_exit(&ptgt->tgt_mutex);
11094 11070 mutex_exit(&pptr->port_mutex);
11095 11071
11096 11072 if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
11097 11073 pkt->pkt_reason = CMD_DEV_GONE;
11098 11074 }
11099 11075
11100 11076 return (TRAN_FATAL_ERROR);
11101 11077 }
11102 11078 cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
11103 11079
11104 11080 /*
11105 11081 * If we are suspended, kernel is trying to dump, so don't
11106 11082 * block, fail or defer requests - send them down right away.
11107 11083 * NOTE: If we are in panic (i.e. trying to dump), we can't
11108 11084 * assume we have been suspended. There is hardware such as
11109 11085 * the v880 that doesn't do PM. Thus, the check for
11110 11086 * ddi_in_panic.
11111 11087 *
11112 11088 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
11113 11089 * of changing. So, if we can queue the packet, do it. Eventually,
11114 11090 * either the device will have gone away or changed and we can fail
11115 11091 * the request, or we can proceed if the device didn't change.
11116 11092 *
11117 11093 * If the pd in the target or the packet is NULL it's probably
11118 11094 * because the device has gone away, we allow the request to be
11119 11095 * put on the internal queue here in case the device comes back within
11120 11096 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
11121 11097 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
11122 11098 * could be NULL because the device was disappearing during or since
11123 11099 * packet initialization.
11124 11100 */
11125 11101
11126 11102 if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
11127 11103 FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
11128 11104 (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
11129 11105 (ptgt->tgt_pd_handle == NULL) ||
11130 11106 (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
11131 11107 /*
11132 11108 * If ((LUN is busy AND
11133 11109 * LUN not suspended AND
11134 11110 * The system is not in panic state) OR
11135 11111 * (The port is coming up))
11136 11112 *
11137 11113 * We check to see if the any of the flags FLAG_NOINTR or
11138 11114 * FLAG_NOQUEUE is set. If one of them is set the value
11139 11115 * returned will be TRAN_BUSY. If not, the request is queued.
11140 11116 */
11141 11117 mutex_exit(&ptgt->tgt_mutex);
11142 11118 mutex_exit(&pptr->port_mutex);
11143 11119
11144 11120 /* see if using interrupts is allowed (so queueing'll work) */
11145 11121 if (pkt->pkt_flags & FLAG_NOINTR) {
11146 11122 pkt->pkt_resid = 0;
11147 11123 return (TRAN_BUSY);
11148 11124 }
11149 11125 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11150 11126 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11151 11127 fcp_trace, FCP_BUF_LEVEL_9, 0,
11152 11128 "fcp_scsi_start: lun busy for pkt %p", pkt);
11153 11129 return (TRAN_BUSY);
11154 11130 }
11155 11131 #ifdef DEBUG
11156 11132 mutex_enter(&pptr->port_pkt_mutex);
11157 11133 pptr->port_npkts++;
11158 11134 mutex_exit(&pptr->port_pkt_mutex);
11159 11135 #endif /* DEBUG */
11160 11136
11161 11137 /* got queue up the pkt for later */
11162 11138 fcp_queue_pkt(pptr, cmd);
11163 11139 return (TRAN_ACCEPT);
11164 11140 }
11165 11141 cmd->cmd_state = FCP_PKT_ISSUED;
11166 11142
11167 11143 mutex_exit(&ptgt->tgt_mutex);
11168 11144 mutex_exit(&pptr->port_mutex);
11169 11145
11170 11146 /*
11171 11147 * Now that we released the mutexes, what was protected by them can
11172 11148 * change.
11173 11149 */
11174 11150
11175 11151 /*
11176 11152 * If there is a reconfiguration in progress, wait for it to complete.
11177 11153 */
11178 11154 fcp_reconfig_wait(pptr);
11179 11155
11180 11156 cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11181 11157 pkt->pkt_time : 0;
11182 11158
11183 11159 /* prepare the packet */
11184 11160
11185 11161 fcp_prepare_pkt(pptr, cmd, plun);
11186 11162
11187 11163 if (cmd->cmd_pkt->pkt_time) {
11188 11164 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11189 11165 } else {
11190 11166 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11191 11167 }
11192 11168
11193 11169 /*
11194 11170 * if interrupts aren't allowed (e.g. at dump time) then we'll
11195 11171 * have to do polled I/O
11196 11172 */
11197 11173 if (pkt->pkt_flags & FLAG_NOINTR) {
11198 11174 cmd->cmd_state &= ~FCP_PKT_ISSUED;
11199 11175 return (fcp_dopoll(pptr, cmd));
11200 11176 }
11201 11177
11202 11178 #ifdef DEBUG
11203 11179 mutex_enter(&pptr->port_pkt_mutex);
11204 11180 pptr->port_npkts++;
11205 11181 mutex_exit(&pptr->port_pkt_mutex);
11206 11182 #endif /* DEBUG */
11207 11183
11208 11184 rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11209 11185 if (rval == FC_SUCCESS) {
11210 11186 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11211 11187 fcp_trace, FCP_BUF_LEVEL_9, 0,
11212 11188 "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11213 11189 return (TRAN_ACCEPT);
11214 11190 }
11215 11191
11216 11192 cmd->cmd_state = FCP_PKT_IDLE;
11217 11193
11218 11194 #ifdef DEBUG
11219 11195 mutex_enter(&pptr->port_pkt_mutex);
11220 11196 pptr->port_npkts--;
11221 11197 mutex_exit(&pptr->port_pkt_mutex);
11222 11198 #endif /* DEBUG */
11223 11199
11224 11200 /*
11225 11201 * For lack of clearer definitions, choose
11226 11202 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11227 11203 */
11228 11204
11229 11205 if (rval == FC_TRAN_BUSY) {
11230 11206 pkt->pkt_resid = 0;
11231 11207 rval = TRAN_BUSY;
11232 11208 } else {
11233 11209 mutex_enter(&ptgt->tgt_mutex);
11234 11210 if (plun->lun_state & FCP_LUN_OFFLINE) {
11235 11211 child_info_t *cip;
11236 11212
11237 11213 mutex_enter(&plun->lun_mutex);
11238 11214 cip = plun->lun_cip;
11239 11215 mutex_exit(&plun->lun_mutex);
11240 11216
11241 11217 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11242 11218 fcp_trace, FCP_BUF_LEVEL_6, 0,
11243 11219 "fcp_transport failed 2 for %x: %x; dip=%p",
11244 11220 plun->lun_tgt->tgt_d_id, rval, cip);
11245 11221
11246 11222 rval = TRAN_FATAL_ERROR;
11247 11223 } else {
11248 11224 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11249 11225 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11250 11226 fcp_trace, FCP_BUF_LEVEL_9, 0,
11251 11227 "fcp_scsi_start: FC_BUSY for pkt %p",
11252 11228 pkt);
11253 11229 rval = TRAN_BUSY;
11254 11230 } else {
11255 11231 rval = TRAN_ACCEPT;
11256 11232 fcp_queue_pkt(pptr, cmd);
11257 11233 }
11258 11234 }
11259 11235 mutex_exit(&ptgt->tgt_mutex);
11260 11236 }
11261 11237
11262 11238 return (rval);
11263 11239 }
11264 11240
11265 11241 /*
11266 11242 * called by the transport to abort a packet
11267 11243 */
11268 11244 /*ARGSUSED*/
11269 11245 static int
11270 11246 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11271 11247 {
11272 11248 int tgt_cnt;
11273 11249 struct fcp_port *pptr = ADDR2FCP(ap);
11274 11250 struct fcp_lun *plun = ADDR2LUN(ap);
11275 11251 struct fcp_tgt *ptgt = plun->lun_tgt;
11276 11252
11277 11253 if (pkt == NULL) {
11278 11254 if (ptgt) {
11279 11255 mutex_enter(&ptgt->tgt_mutex);
11280 11256 tgt_cnt = ptgt->tgt_change_cnt;
11281 11257 mutex_exit(&ptgt->tgt_mutex);
11282 11258 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11283 11259 return (TRUE);
11284 11260 }
11285 11261 }
11286 11262 return (FALSE);
11287 11263 }
11288 11264
11289 11265
11290 11266 /*
11291 11267 * Perform reset
11292 11268 */
11293 11269 int
11294 11270 fcp_scsi_reset(struct scsi_address *ap, int level)
11295 11271 {
11296 11272 int rval = 0;
11297 11273 struct fcp_port *pptr = ADDR2FCP(ap);
11298 11274 struct fcp_lun *plun = ADDR2LUN(ap);
11299 11275 struct fcp_tgt *ptgt = plun->lun_tgt;
11300 11276
11301 11277 if (level == RESET_ALL) {
11302 11278 if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11303 11279 rval = 1;
11304 11280 }
11305 11281 } else if (level == RESET_TARGET || level == RESET_LUN) {
11306 11282 /*
11307 11283 * If we are in the middle of discovery, return
11308 11284 * SUCCESS as this target will be rediscovered
11309 11285 * anyway
11310 11286 */
11311 11287 mutex_enter(&ptgt->tgt_mutex);
11312 11288 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11313 11289 mutex_exit(&ptgt->tgt_mutex);
11314 11290 return (1);
11315 11291 }
11316 11292 mutex_exit(&ptgt->tgt_mutex);
11317 11293
11318 11294 if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11319 11295 rval = 1;
11320 11296 }
11321 11297 }
11322 11298 return (rval);
11323 11299 }
11324 11300
11325 11301
11326 11302 /*
11327 11303 * called by the framework to get a SCSI capability
11328 11304 */
11329 11305 static int
11330 11306 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11331 11307 {
11332 11308 return (fcp_commoncap(ap, cap, 0, whom, 0));
11333 11309 }
11334 11310
11335 11311
11336 11312 /*
11337 11313 * called by the framework to set a SCSI capability
11338 11314 */
11339 11315 static int
11340 11316 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11341 11317 {
11342 11318 return (fcp_commoncap(ap, cap, value, whom, 1));
11343 11319 }
11344 11320
11345 11321 /*
11346 11322 * Function: fcp_pkt_setup
11347 11323 *
11348 11324 * Description: This function sets up the scsi_pkt structure passed by the
11349 11325 * caller. This function assumes fcp_pkt_constructor has been
11350 11326 * called previously for the packet passed by the caller. If
11351 11327 * successful this call will have the following results:
11352 11328 *
11353 11329 * - The resources needed that will be constant through out
11354 11330 * the whole transaction are allocated.
11355 11331 * - The fields that will be constant through out the whole
11356 11332 * transaction are initialized.
11357 11333 * - The scsi packet will be linked to the LUN structure
11358 11334 * addressed by the transaction.
11359 11335 *
11360 11336 * Argument:
11361 11337 * *pkt Pointer to a scsi_pkt structure.
11362 11338 * callback
11363 11339 * arg
11364 11340 *
11365 11341 * Return Value: 0 Success
11366 11342 * !0 Failure
11367 11343 *
11368 11344 * Context: Kernel context or interrupt context
11369 11345 */
11370 11346 /* ARGSUSED */
11371 11347 static int
11372 11348 fcp_pkt_setup(struct scsi_pkt *pkt,
11373 11349 int (*callback)(caddr_t arg),
11374 11350 caddr_t arg)
11375 11351 {
11376 11352 struct fcp_pkt *cmd;
11377 11353 struct fcp_port *pptr;
11378 11354 struct fcp_lun *plun;
11379 11355 struct fcp_tgt *ptgt;
11380 11356 int kf;
11381 11357 fc_packet_t *fpkt;
11382 11358 fc_frame_hdr_t *hp;
11383 11359
11384 11360 pptr = ADDR2FCP(&pkt->pkt_address);
11385 11361 plun = ADDR2LUN(&pkt->pkt_address);
11386 11362 ptgt = plun->lun_tgt;
11387 11363
11388 11364 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11389 11365 fpkt = cmd->cmd_fp_pkt;
11390 11366
11391 11367 /*
11392 11368 * this request is for dma allocation only
11393 11369 */
11394 11370 /*
11395 11371 * First step of fcp_scsi_init_pkt: pkt allocation
11396 11372 * We determine if the caller is willing to wait for the
11397 11373 * resources.
11398 11374 */
11399 11375 kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11400 11376
11401 11377 /*
11402 11378 * Selective zeroing of the pkt.
11403 11379 */
11404 11380 cmd->cmd_back = NULL;
11405 11381 cmd->cmd_next = NULL;
11406 11382
11407 11383 /*
11408 11384 * Zero out fcp command
11409 11385 */
11410 11386 bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11411 11387
11412 11388 cmd->cmd_state = FCP_PKT_IDLE;
11413 11389
11414 11390 fpkt = cmd->cmd_fp_pkt;
11415 11391 fpkt->pkt_data_acc = NULL;
11416 11392
11417 11393 /*
11418 11394 * When port_state is FCP_STATE_OFFLINE, remote_port (tgt_pd_handle)
11419 11395 * could be destroyed. We need fail pkt_setup.
11420 11396 */
11421 11397 if (pptr->port_state & FCP_STATE_OFFLINE) {
11422 11398 return (-1);
11423 11399 }
11424 11400
11425 11401 mutex_enter(&ptgt->tgt_mutex);
11426 11402 fpkt->pkt_pd = ptgt->tgt_pd_handle;
11427 11403
11428 11404 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11429 11405 != FC_SUCCESS) {
11430 11406 mutex_exit(&ptgt->tgt_mutex);
11431 11407 return (-1);
11432 11408 }
11433 11409
11434 11410 mutex_exit(&ptgt->tgt_mutex);
11435 11411
11436 11412 /* Fill in the Fabric Channel Header */
11437 11413 hp = &fpkt->pkt_cmd_fhdr;
11438 11414 hp->r_ctl = R_CTL_COMMAND;
11439 11415 hp->rsvd = 0;
11440 11416 hp->type = FC_TYPE_SCSI_FCP;
11441 11417 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11442 11418 hp->seq_id = 0;
11443 11419 hp->df_ctl = 0;
11444 11420 hp->seq_cnt = 0;
11445 11421 hp->ox_id = 0xffff;
11446 11422 hp->rx_id = 0xffff;
11447 11423 hp->ro = 0;
11448 11424
11449 11425 /*
11450 11426 * A doubly linked list (cmd_forw, cmd_back) is built
11451 11427 * out of every allocated packet on a per-lun basis
11452 11428 *
11453 11429 * The packets are maintained in the list so as to satisfy
11454 11430 * scsi_abort() requests. At present (which is unlikely to
11455 11431 * change in the future) nobody performs a real scsi_abort
11456 11432 * in the SCSI target drivers (as they don't keep the packets
11457 11433 * after doing scsi_transport - so they don't know how to
11458 11434 * abort a packet other than sending a NULL to abort all
11459 11435 * outstanding packets)
11460 11436 */
11461 11437 mutex_enter(&plun->lun_mutex);
11462 11438 if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11463 11439 plun->lun_pkt_head->cmd_back = cmd;
11464 11440 } else {
11465 11441 plun->lun_pkt_tail = cmd;
11466 11442 }
11467 11443 plun->lun_pkt_head = cmd;
11468 11444 mutex_exit(&plun->lun_mutex);
11469 11445 return (0);
11470 11446 }
11471 11447
11472 11448 /*
11473 11449 * Function: fcp_pkt_teardown
11474 11450 *
11475 11451 * Description: This function releases a scsi_pkt structure and all the
11476 11452 * resources attached to it.
11477 11453 *
11478 11454 * Argument: *pkt Pointer to a scsi_pkt structure.
11479 11455 *
11480 11456 * Return Value: None
11481 11457 *
11482 11458 * Context: User, Kernel or Interrupt context.
11483 11459 */
11484 11460 static void
11485 11461 fcp_pkt_teardown(struct scsi_pkt *pkt)
11486 11462 {
11487 11463 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
11488 11464 struct fcp_lun *plun = ADDR2LUN(&pkt->pkt_address);
11489 11465 struct fcp_pkt *cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11490 11466
11491 11467 /*
11492 11468 * Remove the packet from the per-lun list
11493 11469 */
11494 11470 mutex_enter(&plun->lun_mutex);
11495 11471 if (cmd->cmd_back) {
11496 11472 ASSERT(cmd != plun->lun_pkt_head);
11497 11473 cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11498 11474 } else {
11499 11475 ASSERT(cmd == plun->lun_pkt_head);
11500 11476 plun->lun_pkt_head = cmd->cmd_forw;
11501 11477 }
11502 11478
11503 11479 if (cmd->cmd_forw) {
11504 11480 cmd->cmd_forw->cmd_back = cmd->cmd_back;
11505 11481 } else {
11506 11482 ASSERT(cmd == plun->lun_pkt_tail);
11507 11483 plun->lun_pkt_tail = cmd->cmd_back;
11508 11484 }
11509 11485
11510 11486 mutex_exit(&plun->lun_mutex);
11511 11487
11512 11488 (void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11513 11489 }
11514 11490
11515 11491 /*
11516 11492 * Routine for reset notification setup, to register or cancel.
11517 11493 * This function is called by SCSA
11518 11494 */
11519 11495 /*ARGSUSED*/
11520 11496 static int
11521 11497 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11522 11498 void (*callback)(caddr_t), caddr_t arg)
11523 11499 {
11524 11500 struct fcp_port *pptr = ADDR2FCP(ap);
11525 11501
11526 11502 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11527 11503 &pptr->port_mutex, &pptr->port_reset_notify_listf));
11528 11504 }
11529 11505
11530 11506
11531 11507 static int
11532 11508 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11533 11509 ddi_eventcookie_t *event_cookiep)
11534 11510 {
11535 11511 struct fcp_port *pptr = fcp_dip2port(dip);
11536 11512
11537 11513 if (pptr == NULL) {
11538 11514 return (DDI_FAILURE);
11539 11515 }
11540 11516
11541 11517 return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11542 11518 event_cookiep, NDI_EVENT_NOPASS));
11543 11519 }
11544 11520
11545 11521
11546 11522 static int
11547 11523 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11548 11524 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11549 11525 ddi_callback_id_t *cb_id)
11550 11526 {
11551 11527 struct fcp_port *pptr = fcp_dip2port(dip);
11552 11528
11553 11529 if (pptr == NULL) {
11554 11530 return (DDI_FAILURE);
11555 11531 }
11556 11532
11557 11533 return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11558 11534 eventid, callback, arg, NDI_SLEEP, cb_id));
11559 11535 }
11560 11536
11561 11537
11562 11538 static int
11563 11539 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11564 11540 {
11565 11541
11566 11542 struct fcp_port *pptr = fcp_dip2port(dip);
11567 11543
11568 11544 if (pptr == NULL) {
11569 11545 return (DDI_FAILURE);
11570 11546 }
11571 11547 return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11572 11548 }
11573 11549
11574 11550
11575 11551 /*
11576 11552 * called by the transport to post an event
11577 11553 */
11578 11554 static int
11579 11555 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11580 11556 ddi_eventcookie_t eventid, void *impldata)
11581 11557 {
11582 11558 struct fcp_port *pptr = fcp_dip2port(dip);
11583 11559
11584 11560 if (pptr == NULL) {
11585 11561 return (DDI_FAILURE);
11586 11562 }
11587 11563
11588 11564 return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11589 11565 eventid, impldata));
11590 11566 }
11591 11567
11592 11568
11593 11569 /*
11594 11570 * A target in in many cases in Fibre Channel has a one to one relation
11595 11571 * with a port identifier (which is also known as D_ID and also as AL_PA
11596 11572 * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11597 11573 * will most likely result in resetting all LUNs (which means a reset will
11598 11574 * occur on all the SCSI devices connected at the other end of the bridge)
11599 11575 * That is the latest favorite topic for discussion, for, one can debate as
11600 11576 * hot as one likes and come up with arguably a best solution to one's
11601 11577 * satisfaction
11602 11578 *
11603 11579 * To stay on track and not digress much, here are the problems stated
11604 11580 * briefly:
11605 11581 *
11606 11582 * SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11607 11583 * target drivers use RESET_TARGET even if their instance is on a
11608 11584 * LUN. Doesn't that sound a bit broken ?
11609 11585 *
11610 11586 * FCP SCSI (the current spec) only defines RESET TARGET in the
11611 11587 * control fields of an FCP_CMND structure. It should have been
11612 11588 * fixed right there, giving flexibility to the initiators to
11613 11589 * minimize havoc that could be caused by resetting a target.
11614 11590 */
11615 11591 static int
11616 11592 fcp_reset_target(struct scsi_address *ap, int level)
11617 11593 {
11618 11594 int rval = FC_FAILURE;
11619 11595 char lun_id[25];
11620 11596 struct fcp_port *pptr = ADDR2FCP(ap);
11621 11597 struct fcp_lun *plun = ADDR2LUN(ap);
11622 11598 struct fcp_tgt *ptgt = plun->lun_tgt;
11623 11599 struct scsi_pkt *pkt;
11624 11600 struct fcp_pkt *cmd;
11625 11601 struct fcp_rsp *rsp;
11626 11602 uint32_t tgt_cnt;
11627 11603 struct fcp_rsp_info *rsp_info;
11628 11604 struct fcp_reset_elem *p;
11629 11605 int bval;
11630 11606
11631 11607 if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11632 11608 KM_NOSLEEP)) == NULL) {
11633 11609 return (rval);
11634 11610 }
11635 11611
11636 11612 mutex_enter(&ptgt->tgt_mutex);
11637 11613 if (level == RESET_TARGET) {
11638 11614 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11639 11615 mutex_exit(&ptgt->tgt_mutex);
11640 11616 kmem_free(p, sizeof (struct fcp_reset_elem));
11641 11617 return (rval);
11642 11618 }
11643 11619 fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11644 11620 (void) strcpy(lun_id, " ");
11645 11621 } else {
11646 11622 if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11647 11623 mutex_exit(&ptgt->tgt_mutex);
11648 11624 kmem_free(p, sizeof (struct fcp_reset_elem));
11649 11625 return (rval);
11650 11626 }
11651 11627 fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11652 11628
11653 11629 (void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11654 11630 }
11655 11631 tgt_cnt = ptgt->tgt_change_cnt;
11656 11632
11657 11633 mutex_exit(&ptgt->tgt_mutex);
11658 11634
11659 11635 if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11660 11636 0, 0, NULL, 0)) == NULL) {
11661 11637 kmem_free(p, sizeof (struct fcp_reset_elem));
11662 11638 mutex_enter(&ptgt->tgt_mutex);
11663 11639 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11664 11640 mutex_exit(&ptgt->tgt_mutex);
11665 11641 return (rval);
11666 11642 }
11667 11643 pkt->pkt_time = FCP_POLL_TIMEOUT;
11668 11644
11669 11645 /* fill in cmd part of packet */
11670 11646 cmd = PKT2CMD(pkt);
11671 11647 if (level == RESET_TARGET) {
11672 11648 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11673 11649 } else {
11674 11650 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11675 11651 }
11676 11652 cmd->cmd_fp_pkt->pkt_comp = NULL;
11677 11653 cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11678 11654
11679 11655 /* prepare a packet for transport */
11680 11656 fcp_prepare_pkt(pptr, cmd, plun);
11681 11657
11682 11658 if (cmd->cmd_pkt->pkt_time) {
11683 11659 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11684 11660 } else {
11685 11661 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11686 11662 }
11687 11663
11688 11664 (void) fc_ulp_busy_port(pptr->port_fp_handle);
11689 11665 bval = fcp_dopoll(pptr, cmd);
11690 11666 fc_ulp_idle_port(pptr->port_fp_handle);
11691 11667
11692 11668 /* submit the packet */
11693 11669 if (bval == TRAN_ACCEPT) {
11694 11670 int error = 3;
11695 11671
11696 11672 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11697 11673 rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11698 11674 sizeof (struct fcp_rsp));
11699 11675
11700 11676 if (rsp->fcp_u.fcp_status.rsp_len_set) {
11701 11677 if (fcp_validate_fcp_response(rsp, pptr) ==
11702 11678 FC_SUCCESS) {
11703 11679 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11704 11680 FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11705 11681 sizeof (struct fcp_rsp), rsp_info,
11706 11682 cmd->cmd_fp_pkt->pkt_resp_acc,
11707 11683 sizeof (struct fcp_rsp_info));
11708 11684 }
11709 11685 if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11710 11686 rval = FC_SUCCESS;
11711 11687 error = 0;
11712 11688 } else {
11713 11689 error = 1;
11714 11690 }
11715 11691 } else {
11716 11692 error = 2;
11717 11693 }
11718 11694 }
11719 11695
11720 11696 switch (error) {
11721 11697 case 0:
11722 11698 fcp_log(CE_WARN, pptr->port_dip,
11723 11699 "!FCP: WWN 0x%08x%08x %s reset successfully",
11724 11700 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11725 11701 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11726 11702 break;
11727 11703
11728 11704 case 1:
11729 11705 fcp_log(CE_WARN, pptr->port_dip,
11730 11706 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11731 11707 " response code=%x",
11732 11708 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11733 11709 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11734 11710 rsp_info->rsp_code);
11735 11711 break;
11736 11712
11737 11713 case 2:
11738 11714 fcp_log(CE_WARN, pptr->port_dip,
11739 11715 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11740 11716 " Bad FCP response values: rsvd1=%x,"
11741 11717 " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11742 11718 " rsplen=%x, senselen=%x",
11743 11719 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11744 11720 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11745 11721 rsp->reserved_0, rsp->reserved_1,
11746 11722 rsp->fcp_u.fcp_status.reserved_0,
11747 11723 rsp->fcp_u.fcp_status.reserved_1,
11748 11724 rsp->fcp_response_len, rsp->fcp_sense_len);
11749 11725 break;
11750 11726
11751 11727 default:
11752 11728 fcp_log(CE_WARN, pptr->port_dip,
11753 11729 "!FCP: Reset to WWN 0x%08x%08x %s failed",
11754 11730 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11755 11731 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11756 11732 break;
11757 11733 }
11758 11734 }
11759 11735 scsi_destroy_pkt(pkt);
11760 11736
11761 11737 if (rval == FC_FAILURE) {
11762 11738 mutex_enter(&ptgt->tgt_mutex);
11763 11739 if (level == RESET_TARGET) {
11764 11740 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11765 11741 } else {
11766 11742 fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11767 11743 }
11768 11744 mutex_exit(&ptgt->tgt_mutex);
11769 11745 kmem_free(p, sizeof (struct fcp_reset_elem));
11770 11746 return (rval);
11771 11747 }
11772 11748
11773 11749 mutex_enter(&pptr->port_mutex);
11774 11750 if (level == RESET_TARGET) {
11775 11751 p->tgt = ptgt;
11776 11752 p->lun = NULL;
11777 11753 } else {
11778 11754 p->tgt = NULL;
11779 11755 p->lun = plun;
11780 11756 }
11781 11757 p->tgt = ptgt;
11782 11758 p->tgt_cnt = tgt_cnt;
11783 11759 p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11784 11760 p->next = pptr->port_reset_list;
11785 11761 pptr->port_reset_list = p;
11786 11762
11787 11763 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11788 11764 fcp_trace, FCP_BUF_LEVEL_3, 0,
11789 11765 "Notify ssd of the reset to reinstate the reservations");
11790 11766
11791 11767 scsi_hba_reset_notify_callback(&pptr->port_mutex,
11792 11768 &pptr->port_reset_notify_listf);
11793 11769
11794 11770 mutex_exit(&pptr->port_mutex);
11795 11771
11796 11772 return (rval);
11797 11773 }
11798 11774
11799 11775
11800 11776 /*
11801 11777 * called by fcp_getcap and fcp_setcap to get and set (respectively)
11802 11778 * SCSI capabilities
11803 11779 */
11804 11780 /* ARGSUSED */
11805 11781 static int
11806 11782 fcp_commoncap(struct scsi_address *ap, char *cap,
11807 11783 int val, int tgtonly, int doset)
11808 11784 {
11809 11785 struct fcp_port *pptr = ADDR2FCP(ap);
11810 11786 struct fcp_lun *plun = ADDR2LUN(ap);
11811 11787 struct fcp_tgt *ptgt = plun->lun_tgt;
11812 11788 int cidx;
11813 11789 int rval = FALSE;
11814 11790
11815 11791 if (cap == (char *)0) {
11816 11792 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11817 11793 fcp_trace, FCP_BUF_LEVEL_3, 0,
11818 11794 "fcp_commoncap: invalid arg");
11819 11795 return (rval);
11820 11796 }
11821 11797
11822 11798 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11823 11799 return (UNDEFINED);
11824 11800 }
11825 11801
11826 11802 /*
11827 11803 * Process setcap request.
11828 11804 */
11829 11805 if (doset) {
11830 11806 /*
11831 11807 * At present, we can only set binary (0/1) values
11832 11808 */
11833 11809 switch (cidx) {
11834 11810 case SCSI_CAP_ARQ:
11835 11811 if (val == 0) {
11836 11812 rval = FALSE;
11837 11813 } else {
11838 11814 rval = TRUE;
11839 11815 }
11840 11816 break;
11841 11817
11842 11818 case SCSI_CAP_LUN_RESET:
11843 11819 if (val) {
11844 11820 plun->lun_cap |= FCP_LUN_CAP_RESET;
11845 11821 } else {
11846 11822 plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11847 11823 }
11848 11824 rval = TRUE;
11849 11825 break;
11850 11826
11851 11827 case SCSI_CAP_SECTOR_SIZE:
11852 11828 rval = TRUE;
11853 11829 break;
11854 11830 default:
11855 11831 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11856 11832 fcp_trace, FCP_BUF_LEVEL_4, 0,
11857 11833 "fcp_setcap: unsupported %d", cidx);
11858 11834 rval = UNDEFINED;
11859 11835 break;
11860 11836 }
11861 11837
11862 11838 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11863 11839 fcp_trace, FCP_BUF_LEVEL_5, 0,
11864 11840 "set cap: cap=%s, val/tgtonly/doset/rval = "
11865 11841 "0x%x/0x%x/0x%x/%d",
11866 11842 cap, val, tgtonly, doset, rval);
11867 11843
11868 11844 } else {
11869 11845 /*
11870 11846 * Process getcap request.
11871 11847 */
11872 11848 switch (cidx) {
11873 11849 case SCSI_CAP_DMA_MAX:
11874 11850 rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11875 11851
11876 11852 /*
11877 11853 * Need to make an adjustment qlc is uint_t 64
11878 11854 * st is int, so we will make the adjustment here
11879 11855 * being as nobody wants to touch this.
11880 11856 * It still leaves the max single block length
11881 11857 * of 2 gig. This should last .
11882 11858 */
11883 11859
11884 11860 if (rval == -1) {
11885 11861 rval = MAX_INT_DMA;
11886 11862 }
11887 11863
11888 11864 break;
11889 11865
11890 11866 case SCSI_CAP_INITIATOR_ID:
11891 11867 rval = pptr->port_id;
11892 11868 break;
11893 11869
11894 11870 case SCSI_CAP_ARQ:
11895 11871 case SCSI_CAP_RESET_NOTIFICATION:
11896 11872 case SCSI_CAP_TAGGED_QING:
11897 11873 rval = TRUE;
11898 11874 break;
11899 11875
11900 11876 case SCSI_CAP_SCSI_VERSION:
11901 11877 rval = 3;
11902 11878 break;
11903 11879
11904 11880 case SCSI_CAP_INTERCONNECT_TYPE:
11905 11881 if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11906 11882 (ptgt->tgt_hard_addr == 0)) {
11907 11883 rval = INTERCONNECT_FABRIC;
11908 11884 } else {
11909 11885 rval = INTERCONNECT_FIBRE;
11910 11886 }
11911 11887 break;
11912 11888
11913 11889 case SCSI_CAP_LUN_RESET:
11914 11890 rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11915 11891 TRUE : FALSE;
11916 11892 break;
11917 11893
11918 11894 default:
11919 11895 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11920 11896 fcp_trace, FCP_BUF_LEVEL_4, 0,
11921 11897 "fcp_getcap: unsupported %d", cidx);
11922 11898 rval = UNDEFINED;
11923 11899 break;
11924 11900 }
11925 11901
11926 11902 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11927 11903 fcp_trace, FCP_BUF_LEVEL_8, 0,
11928 11904 "get cap: cap=%s, val/tgtonly/doset/rval = "
11929 11905 "0x%x/0x%x/0x%x/%d",
11930 11906 cap, val, tgtonly, doset, rval);
11931 11907 }
11932 11908
11933 11909 return (rval);
11934 11910 }
11935 11911
11936 11912 /*
11937 11913 * called by the transport to get the port-wwn and lun
11938 11914 * properties of this device, and to create a "name" based on them
11939 11915 *
11940 11916 * these properties don't exist on sun4m
11941 11917 *
11942 11918 * return 1 for success else return 0
11943 11919 */
11944 11920 /* ARGSUSED */
11945 11921 static int
11946 11922 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11947 11923 {
11948 11924 int i;
11949 11925 int *lun;
11950 11926 int numChars;
11951 11927 uint_t nlun;
11952 11928 uint_t count;
11953 11929 uint_t nbytes;
11954 11930 uchar_t *bytes;
11955 11931 uint16_t lun_num;
11956 11932 uint32_t tgt_id;
11957 11933 char **conf_wwn;
11958 11934 char tbuf[(FC_WWN_SIZE << 1) + 1];
11959 11935 uchar_t barray[FC_WWN_SIZE];
11960 11936 dev_info_t *tgt_dip;
11961 11937 struct fcp_tgt *ptgt;
11962 11938 struct fcp_port *pptr;
11963 11939 struct fcp_lun *plun;
11964 11940
11965 11941 ASSERT(sd != NULL);
11966 11942 ASSERT(name != NULL);
11967 11943
11968 11944 tgt_dip = sd->sd_dev;
11969 11945 pptr = ddi_get_soft_state(fcp_softstate,
11970 11946 ddi_get_instance(ddi_get_parent(tgt_dip)));
11971 11947 if (pptr == NULL) {
11972 11948 return (0);
11973 11949 }
11974 11950
11975 11951 ASSERT(tgt_dip != NULL);
11976 11952
11977 11953 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11978 11954 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11979 11955 LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11980 11956 name[0] = '\0';
11981 11957 return (0);
11982 11958 }
11983 11959
11984 11960 if (nlun == 0) {
11985 11961 ddi_prop_free(lun);
11986 11962 return (0);
11987 11963 }
11988 11964
11989 11965 lun_num = lun[0];
11990 11966 ddi_prop_free(lun);
11991 11967
11992 11968 /*
11993 11969 * Lookup for .conf WWN property
11994 11970 */
11995 11971 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
11996 11972 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
11997 11973 &conf_wwn, &count) == DDI_PROP_SUCCESS) {
11998 11974 ASSERT(count >= 1);
11999 11975
12000 11976 fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
12001 11977 ddi_prop_free(conf_wwn);
12002 11978 mutex_enter(&pptr->port_mutex);
12003 11979 if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
12004 11980 mutex_exit(&pptr->port_mutex);
12005 11981 return (0);
12006 11982 }
12007 11983 ptgt = plun->lun_tgt;
12008 11984 mutex_exit(&pptr->port_mutex);
12009 11985
12010 11986 (void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
12011 11987 tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
12012 11988
12013 11989 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12014 11990 ptgt->tgt_hard_addr != 0) {
12015 11991 tgt_id = (uint32_t)fcp_alpa_to_switch[
12016 11992 ptgt->tgt_hard_addr];
12017 11993 } else {
12018 11994 tgt_id = ptgt->tgt_d_id;
12019 11995 }
12020 11996
12021 11997 (void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
12022 11998 TARGET_PROP, tgt_id);
12023 11999 }
12024 12000
12025 12001 /* get the our port-wwn property */
12026 12002 bytes = NULL;
12027 12003 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
12028 12004 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12029 12005 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
12030 12006 if (bytes != NULL) {
12031 12007 ddi_prop_free(bytes);
12032 12008 }
12033 12009 return (0);
12034 12010 }
12035 12011
12036 12012 for (i = 0; i < FC_WWN_SIZE; i++) {
12037 12013 (void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
12038 12014 }
12039 12015
12040 12016 /* Stick in the address of the form "wWWN,LUN" */
12041 12017 numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
12042 12018
12043 12019 ASSERT(numChars < len);
12044 12020 if (numChars >= len) {
12045 12021 fcp_log(CE_WARN, pptr->port_dip,
12046 12022 "!fcp_scsi_get_name: "
12047 12023 "name parameter length too small, it needs to be %d",
12048 12024 numChars+1);
12049 12025 }
12050 12026
12051 12027 ddi_prop_free(bytes);
12052 12028
12053 12029 return (1);
12054 12030 }
12055 12031
12056 12032
12057 12033 /*
12058 12034 * called by the transport to get the SCSI target id value, returning
12059 12035 * it in "name"
12060 12036 *
12061 12037 * this isn't needed/used on sun4m
12062 12038 *
12063 12039 * return 1 for success else return 0
12064 12040 */
12065 12041 /* ARGSUSED */
12066 12042 static int
12067 12043 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
12068 12044 {
12069 12045 struct fcp_lun *plun = ADDR2LUN(&sd->sd_address);
12070 12046 struct fcp_tgt *ptgt;
12071 12047 int numChars;
12072 12048
12073 12049 if (plun == NULL) {
12074 12050 return (0);
12075 12051 }
12076 12052
12077 12053 if ((ptgt = plun->lun_tgt) == NULL) {
12078 12054 return (0);
12079 12055 }
12080 12056
12081 12057 numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
12082 12058
12083 12059 ASSERT(numChars < len);
12084 12060 if (numChars >= len) {
12085 12061 fcp_log(CE_WARN, NULL,
12086 12062 "!fcp_scsi_get_bus_addr: "
12087 12063 "name parameter length too small, it needs to be %d",
12088 12064 numChars+1);
12089 12065 }
12090 12066
12091 12067 return (1);
12092 12068 }
12093 12069
12094 12070
12095 12071 /*
12096 12072 * called internally to reset the link where the specified port lives
12097 12073 */
12098 12074 static int
12099 12075 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
12100 12076 {
12101 12077 la_wwn_t wwn;
12102 12078 struct fcp_lun *plun;
12103 12079 struct fcp_tgt *ptgt;
12104 12080
12105 12081 /* disable restart of lip if we're suspended */
12106 12082 mutex_enter(&pptr->port_mutex);
12107 12083
12108 12084 if (pptr->port_state & (FCP_STATE_SUSPENDED |
12109 12085 FCP_STATE_POWER_DOWN)) {
12110 12086 mutex_exit(&pptr->port_mutex);
12111 12087 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12112 12088 fcp_trace, FCP_BUF_LEVEL_2, 0,
12113 12089 "fcp_linkreset, fcp%d: link reset "
12114 12090 "disabled due to DDI_SUSPEND",
12115 12091 ddi_get_instance(pptr->port_dip));
12116 12092 return (FC_FAILURE);
12117 12093 }
12118 12094
12119 12095 if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
12120 12096 mutex_exit(&pptr->port_mutex);
12121 12097 return (FC_SUCCESS);
12122 12098 }
12123 12099
12124 12100 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
12125 12101 fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
12126 12102
12127 12103 /*
12128 12104 * If ap == NULL assume local link reset.
12129 12105 */
12130 12106 if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
12131 12107 plun = ADDR2LUN(ap);
12132 12108 ptgt = plun->lun_tgt;
12133 12109 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
12134 12110 } else {
12135 12111 bzero((caddr_t)&wwn, sizeof (wwn));
12136 12112 }
12137 12113 mutex_exit(&pptr->port_mutex);
12138 12114
12139 12115 return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
12140 12116 }
12141 12117
12142 12118
12143 12119 /*
12144 12120 * called from fcp_port_attach() to resume a port
12145 12121 * return DDI_* success/failure status
12146 12122 * acquires and releases the global mutex
12147 12123 * acquires and releases the port mutex
12148 12124 */
12149 12125 /*ARGSUSED*/
12150 12126
12151 12127 static int
12152 12128 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
12153 12129 uint32_t s_id, fc_attach_cmd_t cmd, int instance)
12154 12130 {
12155 12131 int res = DDI_FAILURE; /* default result */
12156 12132 struct fcp_port *pptr; /* port state ptr */
12157 12133 uint32_t alloc_cnt;
12158 12134 uint32_t max_cnt;
12159 12135 fc_portmap_t *tmp_list = NULL;
12160 12136
12161 12137 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
12162 12138 FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
12163 12139 instance);
12164 12140
12165 12141 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
12166 12142 cmn_err(CE_WARN, "fcp: bad soft state");
12167 12143 return (res);
12168 12144 }
12169 12145
12170 12146 mutex_enter(&pptr->port_mutex);
12171 12147 switch (cmd) {
12172 12148 case FC_CMD_RESUME:
12173 12149 ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
12174 12150 pptr->port_state &= ~FCP_STATE_SUSPENDED;
12175 12151 break;
12176 12152
12177 12153 case FC_CMD_POWER_UP:
12178 12154 /*
12179 12155 * If the port is DDI_SUSPENded, defer rediscovery
12180 12156 * until DDI_RESUME occurs
12181 12157 */
12182 12158 if (pptr->port_state & FCP_STATE_SUSPENDED) {
12183 12159 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12184 12160 mutex_exit(&pptr->port_mutex);
12185 12161 return (DDI_SUCCESS);
12186 12162 }
12187 12163 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12188 12164 }
12189 12165 pptr->port_id = s_id;
12190 12166 pptr->port_state = FCP_STATE_INIT;
12191 12167 mutex_exit(&pptr->port_mutex);
12192 12168
12193 12169 /*
12194 12170 * Make a copy of ulp_port_info as fctl allocates
12195 12171 * a temp struct.
12196 12172 */
12197 12173 (void) fcp_cp_pinfo(pptr, pinfo);
12198 12174
12199 12175 mutex_enter(&fcp_global_mutex);
12200 12176 if (fcp_watchdog_init++ == 0) {
12201 12177 fcp_watchdog_tick = fcp_watchdog_timeout *
12202 12178 drv_usectohz(1000000);
12203 12179 fcp_watchdog_id = timeout(fcp_watch,
12204 12180 NULL, fcp_watchdog_tick);
12205 12181 }
12206 12182 mutex_exit(&fcp_global_mutex);
12207 12183
12208 12184 /*
12209 12185 * Handle various topologies and link states.
12210 12186 */
12211 12187 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12212 12188 case FC_STATE_OFFLINE:
12213 12189 /*
12214 12190 * Wait for ONLINE, at which time a state
12215 12191 * change will cause a statec_callback
12216 12192 */
12217 12193 res = DDI_SUCCESS;
12218 12194 break;
12219 12195
12220 12196 case FC_STATE_ONLINE:
12221 12197
12222 12198 if (pptr->port_topology == FC_TOP_UNKNOWN) {
12223 12199 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12224 12200 res = DDI_SUCCESS;
12225 12201 break;
12226 12202 }
12227 12203
12228 12204 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12229 12205 !fcp_enable_auto_configuration) {
12230 12206 tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12231 12207 if (tmp_list == NULL) {
12232 12208 if (!alloc_cnt) {
12233 12209 res = DDI_SUCCESS;
12234 12210 }
12235 12211 break;
12236 12212 }
12237 12213 max_cnt = alloc_cnt;
12238 12214 } else {
12239 12215 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12240 12216
12241 12217 alloc_cnt = FCP_MAX_DEVICES;
12242 12218
12243 12219 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12244 12220 (sizeof (fc_portmap_t)) * alloc_cnt,
12245 12221 KM_NOSLEEP)) == NULL) {
12246 12222 fcp_log(CE_WARN, pptr->port_dip,
12247 12223 "!fcp%d: failed to allocate portmap",
12248 12224 instance);
12249 12225 break;
12250 12226 }
12251 12227
12252 12228 max_cnt = alloc_cnt;
12253 12229 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12254 12230 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12255 12231 FC_SUCCESS) {
12256 12232 caddr_t msg;
12257 12233
12258 12234 (void) fc_ulp_error(res, &msg);
12259 12235
12260 12236 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12261 12237 fcp_trace, FCP_BUF_LEVEL_2, 0,
12262 12238 "resume failed getportmap: reason=0x%x",
12263 12239 res);
12264 12240
12265 12241 fcp_log(CE_WARN, pptr->port_dip,
12266 12242 "!failed to get port map : %s", msg);
12267 12243 break;
12268 12244 }
12269 12245 if (max_cnt > alloc_cnt) {
12270 12246 alloc_cnt = max_cnt;
12271 12247 }
12272 12248 }
12273 12249
12274 12250 /*
12275 12251 * do the SCSI device discovery and create
12276 12252 * the devinfos
12277 12253 */
12278 12254 fcp_statec_callback(ulph, pptr->port_fp_handle,
12279 12255 pptr->port_phys_state, pptr->port_topology, tmp_list,
12280 12256 max_cnt, pptr->port_id);
12281 12257
12282 12258 res = DDI_SUCCESS;
12283 12259 break;
12284 12260
12285 12261 default:
12286 12262 fcp_log(CE_WARN, pptr->port_dip,
12287 12263 "!fcp%d: invalid port state at attach=0x%x",
12288 12264 instance, pptr->port_phys_state);
12289 12265
12290 12266 mutex_enter(&pptr->port_mutex);
12291 12267 pptr->port_phys_state = FCP_STATE_OFFLINE;
12292 12268 mutex_exit(&pptr->port_mutex);
12293 12269 res = DDI_SUCCESS;
12294 12270
12295 12271 break;
12296 12272 }
12297 12273
12298 12274 if (tmp_list != NULL) {
12299 12275 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12300 12276 }
12301 12277
12302 12278 return (res);
12303 12279 }
12304 12280
12305 12281
12306 12282 static void
12307 12283 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12308 12284 {
12309 12285 pptr->port_fp_modlinkage = *pinfo->port_linkage;
12310 12286 pptr->port_dip = pinfo->port_dip;
12311 12287 pptr->port_fp_handle = pinfo->port_handle;
12312 12288 if (pinfo->port_acc_attr != NULL) {
12313 12289 /*
12314 12290 * FCA supports DMA
12315 12291 */
12316 12292 pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12317 12293 pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12318 12294 pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12319 12295 pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12320 12296 }
12321 12297 pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12322 12298 pptr->port_max_exch = pinfo->port_fca_max_exch;
12323 12299 pptr->port_phys_state = pinfo->port_state;
12324 12300 pptr->port_topology = pinfo->port_flags;
12325 12301 pptr->port_reset_action = pinfo->port_reset_action;
12326 12302 pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12327 12303 pptr->port_fcp_dma = pinfo->port_fcp_dma;
12328 12304 bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12329 12305 bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12330 12306
12331 12307 /* Clear FMA caps to avoid fm-capability ereport */
12332 12308 if (pptr->port_cmd_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12333 12309 pptr->port_cmd_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12334 12310 if (pptr->port_data_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12335 12311 pptr->port_data_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12336 12312 if (pptr->port_resp_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12337 12313 pptr->port_resp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12338 12314 }
12339 12315
12340 12316 /*
12341 12317 * If the elements wait field is set to 1 then
12342 12318 * another thread is waiting for the operation to complete. Once
12343 12319 * it is complete, the waiting thread is signaled and the element is
12344 12320 * freed by the waiting thread. If the elements wait field is set to 0
12345 12321 * the element is freed.
12346 12322 */
12347 12323 static void
12348 12324 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12349 12325 {
12350 12326 ASSERT(elem != NULL);
12351 12327 mutex_enter(&elem->mutex);
12352 12328 elem->result = result;
12353 12329 if (elem->wait) {
12354 12330 elem->wait = 0;
12355 12331 cv_signal(&elem->cv);
12356 12332 mutex_exit(&elem->mutex);
12357 12333 } else {
12358 12334 mutex_exit(&elem->mutex);
12359 12335 cv_destroy(&elem->cv);
12360 12336 mutex_destroy(&elem->mutex);
12361 12337 kmem_free(elem, sizeof (struct fcp_hp_elem));
12362 12338 }
12363 12339 }
12364 12340
12365 12341 /*
12366 12342 * This function is invoked from the taskq thread to allocate
12367 12343 * devinfo nodes and to online/offline them.
12368 12344 */
12369 12345 static void
12370 12346 fcp_hp_task(void *arg)
12371 12347 {
12372 12348 struct fcp_hp_elem *elem = (struct fcp_hp_elem *)arg;
12373 12349 struct fcp_lun *plun = elem->lun;
12374 12350 struct fcp_port *pptr = elem->port;
12375 12351 int result;
12376 12352
12377 12353 ASSERT(elem->what == FCP_ONLINE ||
12378 12354 elem->what == FCP_OFFLINE ||
12379 12355 elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12380 12356 elem->what == FCP_MPXIO_PATH_SET_BUSY);
12381 12357
12382 12358 mutex_enter(&pptr->port_mutex);
12383 12359 mutex_enter(&plun->lun_mutex);
12384 12360 if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12385 12361 plun->lun_event_count != elem->event_cnt) ||
12386 12362 pptr->port_state & (FCP_STATE_SUSPENDED |
12387 12363 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12388 12364 mutex_exit(&plun->lun_mutex);
12389 12365 mutex_exit(&pptr->port_mutex);
12390 12366 fcp_process_elem(elem, NDI_FAILURE);
12391 12367 return;
12392 12368 }
12393 12369 mutex_exit(&plun->lun_mutex);
12394 12370 mutex_exit(&pptr->port_mutex);
12395 12371
12396 12372 result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12397 12373 elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12398 12374 fcp_process_elem(elem, result);
12399 12375 }
12400 12376
12401 12377
12402 12378 static child_info_t *
12403 12379 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12404 12380 int tcount)
12405 12381 {
12406 12382 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12407 12383
12408 12384 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12409 12385 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12410 12386
12411 12387 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12412 12388 /*
12413 12389 * Child has not been created yet. Create the child device
12414 12390 * based on the per-Lun flags.
12415 12391 */
12416 12392 if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12417 12393 plun->lun_cip =
12418 12394 CIP(fcp_create_dip(plun, lcount, tcount));
12419 12395 plun->lun_mpxio = 0;
12420 12396 } else {
12421 12397 plun->lun_cip =
12422 12398 CIP(fcp_create_pip(plun, lcount, tcount));
12423 12399 plun->lun_mpxio = 1;
12424 12400 }
12425 12401 } else {
12426 12402 plun->lun_cip = cip;
12427 12403 }
12428 12404
12429 12405 return (plun->lun_cip);
12430 12406 }
12431 12407
12432 12408
12433 12409 static int
12434 12410 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12435 12411 {
12436 12412 int rval = FC_FAILURE;
12437 12413 dev_info_t *pdip;
12438 12414 struct dev_info *dip;
12439 12415 int circular;
12440 12416
12441 12417 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12442 12418
12443 12419 pdip = plun->lun_tgt->tgt_port->port_dip;
12444 12420
12445 12421 if (plun->lun_cip == NULL) {
12446 12422 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12447 12423 fcp_trace, FCP_BUF_LEVEL_3, 0,
12448 12424 "fcp_is_dip_present: plun->lun_cip is NULL: "
12449 12425 "plun: %p lun state: %x num: %d target state: %x",
12450 12426 plun, plun->lun_state, plun->lun_num,
12451 12427 plun->lun_tgt->tgt_port->port_state);
12452 12428 return (rval);
12453 12429 }
12454 12430 ndi_devi_enter(pdip, &circular);
12455 12431 dip = DEVI(pdip)->devi_child;
12456 12432 while (dip) {
12457 12433 if (dip == DEVI(cdip)) {
12458 12434 rval = FC_SUCCESS;
12459 12435 break;
12460 12436 }
12461 12437 dip = dip->devi_sibling;
12462 12438 }
12463 12439 ndi_devi_exit(pdip, circular);
12464 12440 return (rval);
12465 12441 }
12466 12442
12467 12443 static int
12468 12444 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12469 12445 {
12470 12446 int rval = FC_FAILURE;
12471 12447
12472 12448 ASSERT(plun != NULL);
12473 12449 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12474 12450
12475 12451 if (plun->lun_mpxio == 0) {
12476 12452 rval = fcp_is_dip_present(plun, DIP(cip));
12477 12453 } else {
12478 12454 rval = fcp_is_pip_present(plun, PIP(cip));
12479 12455 }
12480 12456
12481 12457 return (rval);
12482 12458 }
12483 12459
12484 12460 /*
12485 12461 * Function: fcp_create_dip
12486 12462 *
12487 12463 * Description: Creates a dev_info_t structure for the LUN specified by the
12488 12464 * caller.
12489 12465 *
12490 12466 * Argument: plun Lun structure
12491 12467 * link_cnt Link state count.
12492 12468 * tgt_cnt Target state change count.
12493 12469 *
12494 12470 * Return Value: NULL if it failed
12495 12471 * dev_info_t structure address if it succeeded
12496 12472 *
12497 12473 * Context: Kernel context
12498 12474 */
12499 12475 static dev_info_t *
12500 12476 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12501 12477 {
12502 12478 int failure = 0;
12503 12479 uint32_t tgt_id;
12504 12480 uint64_t sam_lun;
12505 12481 struct fcp_tgt *ptgt = plun->lun_tgt;
12506 12482 struct fcp_port *pptr = ptgt->tgt_port;
12507 12483 dev_info_t *pdip = pptr->port_dip;
12508 12484 dev_info_t *cdip = NULL;
12509 12485 dev_info_t *old_dip = DIP(plun->lun_cip);
12510 12486 char *nname = NULL;
12511 12487 char **compatible = NULL;
12512 12488 int ncompatible;
12513 12489 char *scsi_binding_set;
12514 12490 char t_pwwn[17];
12515 12491
12516 12492 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12517 12493 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12518 12494
12519 12495 /* get the 'scsi-binding-set' property */
12520 12496 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12521 12497 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12522 12498 &scsi_binding_set) != DDI_PROP_SUCCESS) {
12523 12499 scsi_binding_set = NULL;
12524 12500 }
12525 12501
12526 12502 /* determine the node name and compatible */
12527 12503 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12528 12504 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12529 12505 if (scsi_binding_set) {
12530 12506 ddi_prop_free(scsi_binding_set);
12531 12507 }
12532 12508
12533 12509 if (nname == NULL) {
12534 12510 #ifdef DEBUG
12535 12511 cmn_err(CE_WARN, "%s%d: no driver for "
12536 12512 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12537 12513 " compatible: %s",
12538 12514 ddi_driver_name(pdip), ddi_get_instance(pdip),
12539 12515 ptgt->tgt_port_wwn.raw_wwn[0],
12540 12516 ptgt->tgt_port_wwn.raw_wwn[1],
12541 12517 ptgt->tgt_port_wwn.raw_wwn[2],
12542 12518 ptgt->tgt_port_wwn.raw_wwn[3],
12543 12519 ptgt->tgt_port_wwn.raw_wwn[4],
12544 12520 ptgt->tgt_port_wwn.raw_wwn[5],
12545 12521 ptgt->tgt_port_wwn.raw_wwn[6],
12546 12522 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12547 12523 *compatible);
12548 12524 #endif /* DEBUG */
12549 12525 failure++;
12550 12526 goto end_of_fcp_create_dip;
12551 12527 }
12552 12528
12553 12529 cdip = fcp_find_existing_dip(plun, pdip, nname);
12554 12530
12555 12531 /*
12556 12532 * if the old_dip does not match the cdip, that means there is
12557 12533 * some property change. since we'll be using the cdip, we need
12558 12534 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12559 12535 * then the dtype for the device has been updated. Offline the
12560 12536 * the old device and create a new device with the new device type
12561 12537 * Refer to bug: 4764752
12562 12538 */
12563 12539 if (old_dip && (cdip != old_dip ||
12564 12540 plun->lun_state & FCP_LUN_CHANGED)) {
12565 12541 plun->lun_state &= ~(FCP_LUN_INIT);
12566 12542 mutex_exit(&plun->lun_mutex);
12567 12543 mutex_exit(&pptr->port_mutex);
12568 12544
12569 12545 mutex_enter(&ptgt->tgt_mutex);
12570 12546 (void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12571 12547 link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12572 12548 mutex_exit(&ptgt->tgt_mutex);
12573 12549
12574 12550 #ifdef DEBUG
12575 12551 if (cdip != NULL) {
12576 12552 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12577 12553 fcp_trace, FCP_BUF_LEVEL_2, 0,
12578 12554 "Old dip=%p; New dip=%p don't match", old_dip,
12579 12555 cdip);
12580 12556 } else {
12581 12557 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12582 12558 fcp_trace, FCP_BUF_LEVEL_2, 0,
12583 12559 "Old dip=%p; New dip=NULL don't match", old_dip);
12584 12560 }
12585 12561 #endif
12586 12562
12587 12563 mutex_enter(&pptr->port_mutex);
12588 12564 mutex_enter(&plun->lun_mutex);
12589 12565 }
12590 12566
12591 12567 if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12592 12568 plun->lun_state &= ~(FCP_LUN_CHANGED);
12593 12569 if (ndi_devi_alloc(pptr->port_dip, nname,
12594 12570 DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12595 12571 failure++;
12596 12572 goto end_of_fcp_create_dip;
12597 12573 }
12598 12574 }
12599 12575
12600 12576 /*
12601 12577 * Previously all the properties for the devinfo were destroyed here
12602 12578 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12603 12579 * the devid property (and other properties established by the target
12604 12580 * driver or framework) which the code does not always recreate, this
12605 12581 * call was removed.
12606 12582 * This opens a theoretical possibility that we may return with a
12607 12583 * stale devid on the node if the scsi entity behind the fibre channel
12608 12584 * lun has changed.
12609 12585 */
12610 12586
12611 12587 /* decorate the node with compatible */
12612 12588 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12613 12589 "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12614 12590 failure++;
12615 12591 goto end_of_fcp_create_dip;
12616 12592 }
12617 12593
12618 12594 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12619 12595 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12620 12596 failure++;
12621 12597 goto end_of_fcp_create_dip;
12622 12598 }
12623 12599
12624 12600 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12625 12601 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12626 12602 failure++;
12627 12603 goto end_of_fcp_create_dip;
12628 12604 }
12629 12605
12630 12606 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12631 12607 t_pwwn[16] = '\0';
12632 12608 if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12633 12609 != DDI_PROP_SUCCESS) {
12634 12610 failure++;
12635 12611 goto end_of_fcp_create_dip;
12636 12612 }
12637 12613
12638 12614 /*
12639 12615 * If there is no hard address - We might have to deal with
12640 12616 * that by using WWN - Having said that it is important to
12641 12617 * recognize this problem early so ssd can be informed of
12642 12618 * the right interconnect type.
12643 12619 */
12644 12620 if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12645 12621 tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12646 12622 } else {
12647 12623 tgt_id = ptgt->tgt_d_id;
12648 12624 }
12649 12625
12650 12626 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12651 12627 tgt_id) != DDI_PROP_SUCCESS) {
12652 12628 failure++;
12653 12629 goto end_of_fcp_create_dip;
12654 12630 }
12655 12631
12656 12632 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12657 12633 (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12658 12634 failure++;
12659 12635 goto end_of_fcp_create_dip;
12660 12636 }
12661 12637 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12662 12638 if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12663 12639 sam_lun) != DDI_PROP_SUCCESS) {
12664 12640 failure++;
12665 12641 goto end_of_fcp_create_dip;
12666 12642 }
12667 12643
12668 12644 end_of_fcp_create_dip:
12669 12645 scsi_hba_nodename_compatible_free(nname, compatible);
12670 12646
12671 12647 if (cdip != NULL && failure) {
12672 12648 (void) ndi_prop_remove_all(cdip);
12673 12649 (void) ndi_devi_free(cdip);
12674 12650 cdip = NULL;
12675 12651 }
12676 12652
12677 12653 return (cdip);
12678 12654 }
12679 12655
12680 12656 /*
12681 12657 * Function: fcp_create_pip
12682 12658 *
12683 12659 * Description: Creates a Path Id for the LUN specified by the caller.
12684 12660 *
12685 12661 * Argument: plun Lun structure
12686 12662 * link_cnt Link state count.
12687 12663 * tgt_cnt Target state count.
12688 12664 *
12689 12665 * Return Value: NULL if it failed
12690 12666 * mdi_pathinfo_t structure address if it succeeded
12691 12667 *
12692 12668 * Context: Kernel context
12693 12669 */
12694 12670 static mdi_pathinfo_t *
12695 12671 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12696 12672 {
12697 12673 int i;
12698 12674 char buf[MAXNAMELEN];
12699 12675 char uaddr[MAXNAMELEN];
12700 12676 int failure = 0;
12701 12677 uint32_t tgt_id;
12702 12678 uint64_t sam_lun;
12703 12679 struct fcp_tgt *ptgt = plun->lun_tgt;
12704 12680 struct fcp_port *pptr = ptgt->tgt_port;
12705 12681 dev_info_t *pdip = pptr->port_dip;
12706 12682 mdi_pathinfo_t *pip = NULL;
12707 12683 mdi_pathinfo_t *old_pip = PIP(plun->lun_cip);
12708 12684 char *nname = NULL;
12709 12685 char **compatible = NULL;
12710 12686 int ncompatible;
12711 12687 char *scsi_binding_set;
12712 12688 char t_pwwn[17];
12713 12689
12714 12690 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12715 12691 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12716 12692
12717 12693 scsi_binding_set = "vhci";
12718 12694
12719 12695 /* determine the node name and compatible */
12720 12696 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12721 12697 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12722 12698
12723 12699 if (nname == NULL) {
12724 12700 #ifdef DEBUG
12725 12701 cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12726 12702 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12727 12703 " compatible: %s",
12728 12704 ddi_driver_name(pdip), ddi_get_instance(pdip),
12729 12705 ptgt->tgt_port_wwn.raw_wwn[0],
12730 12706 ptgt->tgt_port_wwn.raw_wwn[1],
12731 12707 ptgt->tgt_port_wwn.raw_wwn[2],
12732 12708 ptgt->tgt_port_wwn.raw_wwn[3],
12733 12709 ptgt->tgt_port_wwn.raw_wwn[4],
12734 12710 ptgt->tgt_port_wwn.raw_wwn[5],
12735 12711 ptgt->tgt_port_wwn.raw_wwn[6],
12736 12712 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12737 12713 *compatible);
12738 12714 #endif /* DEBUG */
12739 12715 failure++;
12740 12716 goto end_of_fcp_create_pip;
12741 12717 }
12742 12718
12743 12719 pip = fcp_find_existing_pip(plun, pdip);
12744 12720
12745 12721 /*
12746 12722 * if the old_dip does not match the cdip, that means there is
12747 12723 * some property change. since we'll be using the cdip, we need
12748 12724 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12749 12725 * then the dtype for the device has been updated. Offline the
12750 12726 * the old device and create a new device with the new device type
12751 12727 * Refer to bug: 4764752
12752 12728 */
12753 12729 if (old_pip && (pip != old_pip ||
12754 12730 plun->lun_state & FCP_LUN_CHANGED)) {
12755 12731 plun->lun_state &= ~(FCP_LUN_INIT);
12756 12732 mutex_exit(&plun->lun_mutex);
12757 12733 mutex_exit(&pptr->port_mutex);
12758 12734
12759 12735 mutex_enter(&ptgt->tgt_mutex);
12760 12736 (void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12761 12737 FCP_OFFLINE, lcount, tcount,
12762 12738 NDI_DEVI_REMOVE, 0);
12763 12739 mutex_exit(&ptgt->tgt_mutex);
12764 12740
12765 12741 if (pip != NULL) {
12766 12742 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12767 12743 fcp_trace, FCP_BUF_LEVEL_2, 0,
12768 12744 "Old pip=%p; New pip=%p don't match",
12769 12745 old_pip, pip);
12770 12746 } else {
12771 12747 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12772 12748 fcp_trace, FCP_BUF_LEVEL_2, 0,
12773 12749 "Old pip=%p; New pip=NULL don't match",
12774 12750 old_pip);
12775 12751 }
12776 12752
12777 12753 mutex_enter(&pptr->port_mutex);
12778 12754 mutex_enter(&plun->lun_mutex);
12779 12755 }
12780 12756
12781 12757 /*
12782 12758 * Since FC_WWN_SIZE is 8 bytes and its not like the
12783 12759 * lun_guid_size which is dependent on the target, I don't
12784 12760 * believe the same trancation happens here UNLESS the standards
12785 12761 * change the FC_WWN_SIZE value to something larger than
12786 12762 * MAXNAMELEN(currently 255 bytes).
12787 12763 */
12788 12764
12789 12765 for (i = 0; i < FC_WWN_SIZE; i++) {
12790 12766 (void) sprintf(&buf[i << 1], "%02x",
12791 12767 ptgt->tgt_port_wwn.raw_wwn[i]);
12792 12768 }
12793 12769
12794 12770 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12795 12771 buf, plun->lun_num);
12796 12772
12797 12773 if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12798 12774 /*
12799 12775 * Release the locks before calling into
12800 12776 * mdi_pi_alloc_compatible() since this can result in a
12801 12777 * callback into fcp which can result in a deadlock
12802 12778 * (see bug # 4870272).
12803 12779 *
12804 12780 * Basically, what we are trying to avoid is the scenario where
12805 12781 * one thread does ndi_devi_enter() and tries to grab
12806 12782 * fcp_mutex and another does it the other way round.
12807 12783 *
12808 12784 * But before we do that, make sure that nobody releases the
12809 12785 * port in the meantime. We can do this by setting a flag.
12810 12786 */
12811 12787 plun->lun_state &= ~(FCP_LUN_CHANGED);
12812 12788 pptr->port_state |= FCP_STATE_IN_MDI;
12813 12789 mutex_exit(&plun->lun_mutex);
12814 12790 mutex_exit(&pptr->port_mutex);
12815 12791 if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12816 12792 uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12817 12793 fcp_log(CE_WARN, pptr->port_dip,
12818 12794 "!path alloc failed:0x%x", plun);
12819 12795 mutex_enter(&pptr->port_mutex);
12820 12796 mutex_enter(&plun->lun_mutex);
12821 12797 pptr->port_state &= ~FCP_STATE_IN_MDI;
12822 12798 failure++;
12823 12799 goto end_of_fcp_create_pip;
12824 12800 }
12825 12801 mutex_enter(&pptr->port_mutex);
12826 12802 mutex_enter(&plun->lun_mutex);
12827 12803 pptr->port_state &= ~FCP_STATE_IN_MDI;
12828 12804 } else {
12829 12805 (void) mdi_prop_remove(pip, NULL);
12830 12806 }
12831 12807
12832 12808 mdi_pi_set_phci_private(pip, (caddr_t)plun);
12833 12809
12834 12810 if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12835 12811 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12836 12812 != DDI_PROP_SUCCESS) {
12837 12813 failure++;
12838 12814 goto end_of_fcp_create_pip;
12839 12815 }
12840 12816
12841 12817 if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12842 12818 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12843 12819 != DDI_PROP_SUCCESS) {
12844 12820 failure++;
12845 12821 goto end_of_fcp_create_pip;
12846 12822 }
12847 12823
12848 12824 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12849 12825 t_pwwn[16] = '\0';
12850 12826 if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12851 12827 != DDI_PROP_SUCCESS) {
12852 12828 failure++;
12853 12829 goto end_of_fcp_create_pip;
12854 12830 }
12855 12831
12856 12832 /*
12857 12833 * If there is no hard address - We might have to deal with
12858 12834 * that by using WWN - Having said that it is important to
12859 12835 * recognize this problem early so ssd can be informed of
12860 12836 * the right interconnect type.
12861 12837 */
12862 12838 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12863 12839 ptgt->tgt_hard_addr != 0) {
12864 12840 tgt_id = (uint32_t)
12865 12841 fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12866 12842 } else {
12867 12843 tgt_id = ptgt->tgt_d_id;
12868 12844 }
12869 12845
12870 12846 if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12871 12847 != DDI_PROP_SUCCESS) {
12872 12848 failure++;
12873 12849 goto end_of_fcp_create_pip;
12874 12850 }
12875 12851
12876 12852 if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12877 12853 != DDI_PROP_SUCCESS) {
12878 12854 failure++;
12879 12855 goto end_of_fcp_create_pip;
12880 12856 }
12881 12857 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12882 12858 if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12883 12859 != DDI_PROP_SUCCESS) {
12884 12860 failure++;
12885 12861 goto end_of_fcp_create_pip;
12886 12862 }
12887 12863
12888 12864 end_of_fcp_create_pip:
12889 12865 scsi_hba_nodename_compatible_free(nname, compatible);
12890 12866
12891 12867 if (pip != NULL && failure) {
12892 12868 (void) mdi_prop_remove(pip, NULL);
12893 12869 mutex_exit(&plun->lun_mutex);
12894 12870 mutex_exit(&pptr->port_mutex);
12895 12871 (void) mdi_pi_free(pip, 0);
12896 12872 mutex_enter(&pptr->port_mutex);
12897 12873 mutex_enter(&plun->lun_mutex);
12898 12874 pip = NULL;
12899 12875 }
12900 12876
12901 12877 return (pip);
12902 12878 }
12903 12879
12904 12880 static dev_info_t *
12905 12881 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12906 12882 {
12907 12883 uint_t nbytes;
12908 12884 uchar_t *bytes;
12909 12885 uint_t nwords;
12910 12886 uint32_t tgt_id;
12911 12887 int *words;
12912 12888 dev_info_t *cdip;
12913 12889 dev_info_t *ndip;
12914 12890 struct fcp_tgt *ptgt = plun->lun_tgt;
12915 12891 struct fcp_port *pptr = ptgt->tgt_port;
12916 12892 int circular;
12917 12893
12918 12894 ndi_devi_enter(pdip, &circular);
12919 12895
12920 12896 ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12921 12897 while ((cdip = ndip) != NULL) {
12922 12898 ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12923 12899
12924 12900 if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12925 12901 continue;
12926 12902 }
12927 12903
12928 12904 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12929 12905 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12930 12906 &nbytes) != DDI_PROP_SUCCESS) {
12931 12907 continue;
12932 12908 }
12933 12909
12934 12910 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12935 12911 if (bytes != NULL) {
12936 12912 ddi_prop_free(bytes);
12937 12913 }
12938 12914 continue;
12939 12915 }
12940 12916 ASSERT(bytes != NULL);
12941 12917
12942 12918 if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12943 12919 ddi_prop_free(bytes);
12944 12920 continue;
12945 12921 }
12946 12922
12947 12923 ddi_prop_free(bytes);
12948 12924
12949 12925 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12950 12926 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12951 12927 &nbytes) != DDI_PROP_SUCCESS) {
12952 12928 continue;
12953 12929 }
12954 12930
12955 12931 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12956 12932 if (bytes != NULL) {
12957 12933 ddi_prop_free(bytes);
12958 12934 }
12959 12935 continue;
12960 12936 }
12961 12937 ASSERT(bytes != NULL);
12962 12938
12963 12939 if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12964 12940 ddi_prop_free(bytes);
12965 12941 continue;
12966 12942 }
12967 12943
12968 12944 ddi_prop_free(bytes);
12969 12945
12970 12946 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12971 12947 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12972 12948 &nwords) != DDI_PROP_SUCCESS) {
12973 12949 continue;
12974 12950 }
12975 12951
12976 12952 if (nwords != 1 || words == NULL) {
12977 12953 if (words != NULL) {
12978 12954 ddi_prop_free(words);
12979 12955 }
12980 12956 continue;
12981 12957 }
12982 12958 ASSERT(words != NULL);
12983 12959
12984 12960 /*
12985 12961 * If there is no hard address - We might have to deal with
12986 12962 * that by using WWN - Having said that it is important to
12987 12963 * recognize this problem early so ssd can be informed of
12988 12964 * the right interconnect type.
12989 12965 */
12990 12966 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12991 12967 ptgt->tgt_hard_addr != 0) {
12992 12968 tgt_id =
12993 12969 (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12994 12970 } else {
12995 12971 tgt_id = ptgt->tgt_d_id;
12996 12972 }
12997 12973
12998 12974 if (tgt_id != (uint32_t)*words) {
12999 12975 ddi_prop_free(words);
13000 12976 continue;
13001 12977 }
13002 12978 ddi_prop_free(words);
13003 12979
13004 12980 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
13005 12981 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
13006 12982 &nwords) != DDI_PROP_SUCCESS) {
13007 12983 continue;
13008 12984 }
13009 12985
13010 12986 if (nwords != 1 || words == NULL) {
13011 12987 if (words != NULL) {
13012 12988 ddi_prop_free(words);
13013 12989 }
13014 12990 continue;
13015 12991 }
13016 12992 ASSERT(words != NULL);
13017 12993
13018 12994 if (plun->lun_num == (uint16_t)*words) {
13019 12995 ddi_prop_free(words);
13020 12996 break;
13021 12997 }
13022 12998 ddi_prop_free(words);
13023 12999 }
13024 13000 ndi_devi_exit(pdip, circular);
13025 13001
13026 13002 return (cdip);
13027 13003 }
13028 13004
13029 13005
13030 13006 static int
13031 13007 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
13032 13008 {
13033 13009 dev_info_t *pdip;
13034 13010 char buf[MAXNAMELEN];
13035 13011 char uaddr[MAXNAMELEN];
13036 13012 int rval = FC_FAILURE;
13037 13013
13038 13014 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13039 13015
13040 13016 pdip = plun->lun_tgt->tgt_port->port_dip;
13041 13017
13042 13018 /*
13043 13019 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
13044 13020 * non-NULL even when the LUN is not there as in the case when a LUN is
13045 13021 * configured and then deleted on the device end (for T3/T4 case). In
13046 13022 * such cases, pip will be NULL.
13047 13023 *
13048 13024 * If the device generates an RSCN, it will end up getting offlined when
13049 13025 * it disappeared and a new LUN will get created when it is rediscovered
13050 13026 * on the device. If we check for lun_cip here, the LUN will not end
13051 13027 * up getting onlined since this function will end up returning a
13052 13028 * FC_SUCCESS.
13053 13029 *
13054 13030 * The behavior is different on other devices. For instance, on a HDS,
13055 13031 * there was no RSCN generated by the device but the next I/O generated
13056 13032 * a check condition and rediscovery got triggered that way. So, in
13057 13033 * such cases, this path will not be exercised
13058 13034 */
13059 13035 if (pip == NULL) {
13060 13036 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
13061 13037 fcp_trace, FCP_BUF_LEVEL_4, 0,
13062 13038 "fcp_is_pip_present: plun->lun_cip is NULL: "
13063 13039 "plun: %p lun state: %x num: %d target state: %x",
13064 13040 plun, plun->lun_state, plun->lun_num,
13065 13041 plun->lun_tgt->tgt_port->port_state);
13066 13042 return (rval);
13067 13043 }
13068 13044
13069 13045 fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
13070 13046
13071 13047 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13072 13048
13073 13049 if (plun->lun_old_guid) {
13074 13050 if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
13075 13051 rval = FC_SUCCESS;
13076 13052 }
13077 13053 } else {
13078 13054 if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
13079 13055 rval = FC_SUCCESS;
13080 13056 }
13081 13057 }
13082 13058 return (rval);
13083 13059 }
13084 13060
13085 13061 static mdi_pathinfo_t *
13086 13062 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
13087 13063 {
13088 13064 char buf[MAXNAMELEN];
13089 13065 char uaddr[MAXNAMELEN];
13090 13066 mdi_pathinfo_t *pip;
13091 13067 struct fcp_tgt *ptgt = plun->lun_tgt;
13092 13068 struct fcp_port *pptr = ptgt->tgt_port;
13093 13069
13094 13070 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13095 13071
13096 13072 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
13097 13073 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13098 13074
13099 13075 pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
13100 13076
13101 13077 return (pip);
13102 13078 }
13103 13079
13104 13080
13105 13081 static int
13106 13082 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13107 13083 int tcount, int flags, int *circ)
13108 13084 {
13109 13085 int rval;
13110 13086 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13111 13087 struct fcp_tgt *ptgt = plun->lun_tgt;
13112 13088 dev_info_t *cdip = NULL;
13113 13089
13114 13090 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13115 13091 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13116 13092
13117 13093 if (plun->lun_cip == NULL) {
13118 13094 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13119 13095 fcp_trace, FCP_BUF_LEVEL_3, 0,
13120 13096 "fcp_online_child: plun->lun_cip is NULL: "
13121 13097 "plun: %p state: %x num: %d target state: %x",
13122 13098 plun, plun->lun_state, plun->lun_num,
13123 13099 plun->lun_tgt->tgt_port->port_state);
13124 13100 return (NDI_FAILURE);
13125 13101 }
13126 13102 again:
13127 13103 if (plun->lun_mpxio == 0) {
13128 13104 cdip = DIP(cip);
13129 13105 mutex_exit(&plun->lun_mutex);
13130 13106 mutex_exit(&pptr->port_mutex);
13131 13107
13132 13108 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13133 13109 fcp_trace, FCP_BUF_LEVEL_3, 0,
13134 13110 "!Invoking ndi_devi_online for %s: target=%x lun=%x",
13135 13111 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13136 13112
13137 13113 /*
13138 13114 * We could check for FCP_LUN_INIT here but chances
13139 13115 * of getting here when it's already in FCP_LUN_INIT
13140 13116 * is rare and a duplicate ndi_devi_online wouldn't
13141 13117 * hurt either (as the node would already have been
13142 13118 * in CF2)
13143 13119 */
13144 13120 if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
13145 13121 rval = ndi_devi_bind_driver(cdip, flags);
13146 13122 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13147 13123 fcp_trace, FCP_BUF_LEVEL_3, 0,
13148 13124 "!Invoking ndi_devi_bind_driver: rval=%d", rval);
13149 13125 } else {
13150 13126 rval = ndi_devi_online(cdip, flags);
13151 13127 }
13152 13128
13153 13129 /*
13154 13130 * We log the message into trace buffer if the device
13155 13131 * is "ses" and into syslog for any other device
13156 13132 * type. This is to prevent the ndi_devi_online failure
13157 13133 * message that appears for V880/A5K ses devices.
13158 13134 */
13159 13135 if (rval == NDI_SUCCESS) {
13160 13136 mutex_enter(&ptgt->tgt_mutex);
13161 13137 plun->lun_state |= FCP_LUN_INIT;
13162 13138 mutex_exit(&ptgt->tgt_mutex);
13163 13139 } else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
13164 13140 fcp_log(CE_NOTE, pptr->port_dip,
13165 13141 "!ndi_devi_online:"
13166 13142 " failed for %s: target=%x lun=%x %x",
13167 13143 ddi_get_name(cdip), ptgt->tgt_d_id,
13168 13144 plun->lun_num, rval);
13169 13145 } else {
13170 13146 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13171 13147 fcp_trace, FCP_BUF_LEVEL_3, 0,
13172 13148 " !ndi_devi_online:"
13173 13149 " failed for %s: target=%x lun=%x %x",
13174 13150 ddi_get_name(cdip), ptgt->tgt_d_id,
13175 13151 plun->lun_num, rval);
13176 13152 }
13177 13153 } else {
13178 13154 cdip = mdi_pi_get_client(PIP(cip));
13179 13155 mutex_exit(&plun->lun_mutex);
13180 13156 mutex_exit(&pptr->port_mutex);
13181 13157
13182 13158 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13183 13159 fcp_trace, FCP_BUF_LEVEL_3, 0,
13184 13160 "!Invoking mdi_pi_online for %s: target=%x lun=%x",
13185 13161 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13186 13162
13187 13163 /*
13188 13164 * Hold path and exit phci to avoid deadlock with power
13189 13165 * management code during mdi_pi_online.
13190 13166 */
13191 13167 mdi_hold_path(PIP(cip));
13192 13168 mdi_devi_exit_phci(pptr->port_dip, *circ);
13193 13169
13194 13170 rval = mdi_pi_online(PIP(cip), flags);
13195 13171
13196 13172 mdi_devi_enter_phci(pptr->port_dip, circ);
13197 13173 mdi_rele_path(PIP(cip));
13198 13174
13199 13175 if (rval == MDI_SUCCESS) {
13200 13176 mutex_enter(&ptgt->tgt_mutex);
13201 13177 plun->lun_state |= FCP_LUN_INIT;
13202 13178 mutex_exit(&ptgt->tgt_mutex);
13203 13179
13204 13180 /*
13205 13181 * Clear MPxIO path permanent disable in case
13206 13182 * fcp hotplug dropped the offline event.
13207 13183 */
13208 13184 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13209 13185
13210 13186 } else if (rval == MDI_NOT_SUPPORTED) {
13211 13187 child_info_t *old_cip = cip;
13212 13188
13213 13189 /*
13214 13190 * MPxIO does not support this device yet.
13215 13191 * Enumerate in legacy mode.
13216 13192 */
13217 13193 mutex_enter(&pptr->port_mutex);
13218 13194 mutex_enter(&plun->lun_mutex);
13219 13195 plun->lun_mpxio = 0;
13220 13196 plun->lun_cip = NULL;
13221 13197 cdip = fcp_create_dip(plun, lcount, tcount);
13222 13198 plun->lun_cip = cip = CIP(cdip);
13223 13199 if (cip == NULL) {
13224 13200 fcp_log(CE_WARN, pptr->port_dip,
13225 13201 "!fcp_online_child: "
13226 13202 "Create devinfo failed for LU=%p", plun);
13227 13203 mutex_exit(&plun->lun_mutex);
13228 13204
13229 13205 mutex_enter(&ptgt->tgt_mutex);
13230 13206 plun->lun_state |= FCP_LUN_OFFLINE;
13231 13207 mutex_exit(&ptgt->tgt_mutex);
13232 13208
13233 13209 mutex_exit(&pptr->port_mutex);
13234 13210
13235 13211 /*
13236 13212 * free the mdi_pathinfo node
13237 13213 */
13238 13214 (void) mdi_pi_free(PIP(old_cip), 0);
13239 13215 } else {
13240 13216 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13241 13217 fcp_trace, FCP_BUF_LEVEL_3, 0,
13242 13218 "fcp_online_child: creating devinfo "
13243 13219 "node 0x%p for plun 0x%p",
13244 13220 cip, plun);
13245 13221 mutex_exit(&plun->lun_mutex);
13246 13222 mutex_exit(&pptr->port_mutex);
13247 13223 /*
13248 13224 * free the mdi_pathinfo node
13249 13225 */
13250 13226 (void) mdi_pi_free(PIP(old_cip), 0);
13251 13227 mutex_enter(&pptr->port_mutex);
13252 13228 mutex_enter(&plun->lun_mutex);
13253 13229 goto again;
13254 13230 }
13255 13231 } else {
13256 13232 if (cdip) {
13257 13233 fcp_log(CE_NOTE, pptr->port_dip,
13258 13234 "!fcp_online_child: mdi_pi_online:"
13259 13235 " failed for %s: target=%x lun=%x %x",
13260 13236 ddi_get_name(cdip), ptgt->tgt_d_id,
13261 13237 plun->lun_num, rval);
13262 13238 }
13263 13239 }
13264 13240 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13265 13241 }
13266 13242
13267 13243 if (rval == NDI_SUCCESS) {
13268 13244 if (cdip) {
13269 13245 (void) ndi_event_retrieve_cookie(
13270 13246 pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13271 13247 &fcp_insert_eid, NDI_EVENT_NOPASS);
13272 13248 (void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13273 13249 cdip, fcp_insert_eid, NULL);
13274 13250 }
13275 13251 }
13276 13252 mutex_enter(&pptr->port_mutex);
13277 13253 mutex_enter(&plun->lun_mutex);
13278 13254 return (rval);
13279 13255 }
13280 13256
13281 13257 /* ARGSUSED */
13282 13258 static int
13283 13259 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13284 13260 int tcount, int flags, int *circ)
13285 13261 {
13286 13262 int rval;
13287 13263 int lun_mpxio;
13288 13264 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13289 13265 struct fcp_tgt *ptgt = plun->lun_tgt;
13290 13266 dev_info_t *cdip;
13291 13267
13292 13268 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13293 13269 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13294 13270
13295 13271 if (plun->lun_cip == NULL) {
13296 13272 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13297 13273 fcp_trace, FCP_BUF_LEVEL_3, 0,
13298 13274 "fcp_offline_child: plun->lun_cip is NULL: "
13299 13275 "plun: %p lun state: %x num: %d target state: %x",
13300 13276 plun, plun->lun_state, plun->lun_num,
13301 13277 plun->lun_tgt->tgt_port->port_state);
13302 13278 return (NDI_FAILURE);
13303 13279 }
13304 13280
13305 13281 /*
13306 13282 * We will use this value twice. Make a copy to be sure we use
13307 13283 * the same value in both places.
13308 13284 */
13309 13285 lun_mpxio = plun->lun_mpxio;
13310 13286
13311 13287 if (lun_mpxio == 0) {
13312 13288 cdip = DIP(cip);
13313 13289 mutex_exit(&plun->lun_mutex);
13314 13290 mutex_exit(&pptr->port_mutex);
13315 13291 rval = ndi_devi_offline(DIP(cip), flags);
13316 13292 if (rval != NDI_SUCCESS) {
13317 13293 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13318 13294 fcp_trace, FCP_BUF_LEVEL_3, 0,
13319 13295 "fcp_offline_child: ndi_devi_offline failed "
13320 13296 "rval=%x cip=%p", rval, cip);
13321 13297 }
13322 13298 } else {
13323 13299 cdip = mdi_pi_get_client(PIP(cip));
13324 13300 mutex_exit(&plun->lun_mutex);
13325 13301 mutex_exit(&pptr->port_mutex);
13326 13302
13327 13303 /*
13328 13304 * Exit phci to avoid deadlock with power management code
13329 13305 * during mdi_pi_offline
13330 13306 */
13331 13307 mdi_hold_path(PIP(cip));
13332 13308 mdi_devi_exit_phci(pptr->port_dip, *circ);
13333 13309
13334 13310 rval = mdi_pi_offline(PIP(cip), flags);
13335 13311
13336 13312 mdi_devi_enter_phci(pptr->port_dip, circ);
13337 13313 mdi_rele_path(PIP(cip));
13338 13314
13339 13315 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13340 13316 }
13341 13317
13342 13318 mutex_enter(&ptgt->tgt_mutex);
13343 13319 plun->lun_state &= ~FCP_LUN_INIT;
13344 13320 mutex_exit(&ptgt->tgt_mutex);
13345 13321
13346 13322 if (rval == NDI_SUCCESS) {
13347 13323 cdip = NULL;
13348 13324 if (flags & NDI_DEVI_REMOVE) {
13349 13325 mutex_enter(&plun->lun_mutex);
13350 13326 /*
13351 13327 * If the guid of the LUN changes, lun_cip will not
13352 13328 * equal to cip, and after offlining the LUN with the
13353 13329 * old guid, we should keep lun_cip since it's the cip
13354 13330 * of the LUN with the new guid.
13355 13331 * Otherwise remove our reference to child node.
13356 13332 *
13357 13333 * This must be done before the child node is freed,
13358 13334 * otherwise other threads could see a stale lun_cip
13359 13335 * pointer.
13360 13336 */
13361 13337 if (plun->lun_cip == cip) {
13362 13338 plun->lun_cip = NULL;
13363 13339 }
13364 13340 if (plun->lun_old_guid) {
13365 13341 kmem_free(plun->lun_old_guid,
13366 13342 plun->lun_old_guid_size);
13367 13343 plun->lun_old_guid = NULL;
13368 13344 plun->lun_old_guid_size = 0;
13369 13345 }
13370 13346 mutex_exit(&plun->lun_mutex);
13371 13347 }
13372 13348 }
13373 13349
13374 13350 if (lun_mpxio != 0) {
13375 13351 if (rval == NDI_SUCCESS) {
13376 13352 /*
13377 13353 * Clear MPxIO path permanent disable as the path is
13378 13354 * already offlined.
13379 13355 */
13380 13356 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13381 13357
13382 13358 if (flags & NDI_DEVI_REMOVE) {
13383 13359 (void) mdi_pi_free(PIP(cip), 0);
13384 13360 }
13385 13361 } else {
13386 13362 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13387 13363 fcp_trace, FCP_BUF_LEVEL_3, 0,
13388 13364 "fcp_offline_child: mdi_pi_offline failed "
13389 13365 "rval=%x cip=%p", rval, cip);
13390 13366 }
13391 13367 }
13392 13368
13393 13369 mutex_enter(&pptr->port_mutex);
13394 13370 mutex_enter(&plun->lun_mutex);
13395 13371
13396 13372 if (cdip) {
13397 13373 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13398 13374 fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13399 13375 " target=%x lun=%x", "ndi_offline",
13400 13376 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13401 13377 }
13402 13378
13403 13379 return (rval);
13404 13380 }
13405 13381
13406 13382 static void
13407 13383 fcp_remove_child(struct fcp_lun *plun)
13408 13384 {
13409 13385 child_info_t *cip;
13410 13386 int circ;
13411 13387
13412 13388 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13413 13389
13414 13390 if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13415 13391 if (plun->lun_mpxio == 0) {
13416 13392 (void) ndi_prop_remove_all(DIP(plun->lun_cip));
13417 13393 (void) ndi_devi_free(DIP(plun->lun_cip));
13418 13394 plun->lun_cip = NULL;
13419 13395 } else {
13420 13396 /*
13421 13397 * Clear reference to the child node in the lun.
13422 13398 * This must be done before freeing it with mdi_pi_free
13423 13399 * and with lun_mutex held so that other threads always
13424 13400 * see either valid lun_cip or NULL when holding
13425 13401 * lun_mutex. We keep a copy in cip.
13426 13402 */
13427 13403 cip = plun->lun_cip;
13428 13404 plun->lun_cip = NULL;
13429 13405
13430 13406 mutex_exit(&plun->lun_mutex);
13431 13407 mutex_exit(&plun->lun_tgt->tgt_mutex);
13432 13408 mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13433 13409
13434 13410 mdi_devi_enter(
13435 13411 plun->lun_tgt->tgt_port->port_dip, &circ);
13436 13412
13437 13413 /*
13438 13414 * Exit phci to avoid deadlock with power management
13439 13415 * code during mdi_pi_offline
13440 13416 */
13441 13417 mdi_hold_path(PIP(cip));
13442 13418 mdi_devi_exit_phci(
13443 13419 plun->lun_tgt->tgt_port->port_dip, circ);
13444 13420 (void) mdi_pi_offline(PIP(cip),
13445 13421 NDI_DEVI_REMOVE);
13446 13422 mdi_devi_enter_phci(
13447 13423 plun->lun_tgt->tgt_port->port_dip, &circ);
13448 13424 mdi_rele_path(PIP(cip));
13449 13425
13450 13426 mdi_devi_exit(
13451 13427 plun->lun_tgt->tgt_port->port_dip, circ);
13452 13428
13453 13429 FCP_TRACE(fcp_logq,
13454 13430 plun->lun_tgt->tgt_port->port_instbuf,
13455 13431 fcp_trace, FCP_BUF_LEVEL_3, 0,
13456 13432 "lun=%p pip freed %p", plun, cip);
13457 13433
13458 13434 (void) mdi_prop_remove(PIP(cip), NULL);
13459 13435 (void) mdi_pi_free(PIP(cip), 0);
13460 13436
13461 13437 mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13462 13438 mutex_enter(&plun->lun_tgt->tgt_mutex);
13463 13439 mutex_enter(&plun->lun_mutex);
13464 13440 }
13465 13441 } else {
13466 13442 plun->lun_cip = NULL;
13467 13443 }
13468 13444 }
13469 13445
13470 13446 /*
13471 13447 * called when a timeout occurs
13472 13448 *
13473 13449 * can be scheduled during an attach or resume (if not already running)
13474 13450 *
13475 13451 * one timeout is set up for all ports
13476 13452 *
13477 13453 * acquires and releases the global mutex
13478 13454 */
13479 13455 /*ARGSUSED*/
13480 13456 static void
13481 13457 fcp_watch(void *arg)
13482 13458 {
13483 13459 struct fcp_port *pptr;
13484 13460 struct fcp_ipkt *icmd;
13485 13461 struct fcp_ipkt *nicmd;
13486 13462 struct fcp_pkt *cmd;
13487 13463 struct fcp_pkt *ncmd;
13488 13464 struct fcp_pkt *tail;
13489 13465 struct fcp_pkt *pcmd;
13490 13466 struct fcp_pkt *save_head;
13491 13467 struct fcp_port *save_port;
13492 13468
13493 13469 /* increment global watchdog time */
13494 13470 fcp_watchdog_time += fcp_watchdog_timeout;
13495 13471
13496 13472 mutex_enter(&fcp_global_mutex);
13497 13473
13498 13474 /* scan each port in our list */
13499 13475 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13500 13476 save_port = fcp_port_head;
13501 13477 pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13502 13478 mutex_exit(&fcp_global_mutex);
13503 13479
13504 13480 mutex_enter(&pptr->port_mutex);
13505 13481 if (pptr->port_ipkt_list == NULL &&
13506 13482 (pptr->port_state & (FCP_STATE_SUSPENDED |
13507 13483 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13508 13484 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13509 13485 mutex_exit(&pptr->port_mutex);
13510 13486 mutex_enter(&fcp_global_mutex);
13511 13487 goto end_of_watchdog;
13512 13488 }
13513 13489
13514 13490 /*
13515 13491 * We check if a list of targets need to be offlined.
13516 13492 */
13517 13493 if (pptr->port_offline_tgts) {
13518 13494 fcp_scan_offline_tgts(pptr);
13519 13495 }
13520 13496
13521 13497 /*
13522 13498 * We check if a list of luns need to be offlined.
13523 13499 */
13524 13500 if (pptr->port_offline_luns) {
13525 13501 fcp_scan_offline_luns(pptr);
13526 13502 }
13527 13503
13528 13504 /*
13529 13505 * We check if a list of targets or luns need to be reset.
13530 13506 */
13531 13507 if (pptr->port_reset_list) {
13532 13508 fcp_check_reset_delay(pptr);
13533 13509 }
13534 13510
13535 13511 mutex_exit(&pptr->port_mutex);
13536 13512
13537 13513 /*
13538 13514 * This is where the pending commands (pkt) are checked for
13539 13515 * timeout.
13540 13516 */
13541 13517 mutex_enter(&pptr->port_pkt_mutex);
13542 13518 tail = pptr->port_pkt_tail;
13543 13519
13544 13520 for (pcmd = NULL, cmd = pptr->port_pkt_head;
13545 13521 cmd != NULL; cmd = ncmd) {
13546 13522 ncmd = cmd->cmd_next;
13547 13523 /*
13548 13524 * If a command is in this queue the bit CFLAG_IN_QUEUE
13549 13525 * must be set.
13550 13526 */
13551 13527 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13552 13528 /*
13553 13529 * FCP_INVALID_TIMEOUT will be set for those
13554 13530 * command that need to be failed. Mostly those
13555 13531 * cmds that could not be queued down for the
13556 13532 * "timeout" value. cmd->cmd_timeout is used
13557 13533 * to try and requeue the command regularly.
13558 13534 */
13559 13535 if (cmd->cmd_timeout >= fcp_watchdog_time) {
13560 13536 /*
13561 13537 * This command hasn't timed out yet. Let's
13562 13538 * go to the next one.
13563 13539 */
13564 13540 pcmd = cmd;
13565 13541 goto end_of_loop;
13566 13542 }
13567 13543
13568 13544 if (cmd == pptr->port_pkt_head) {
13569 13545 ASSERT(pcmd == NULL);
13570 13546 pptr->port_pkt_head = cmd->cmd_next;
13571 13547 } else {
13572 13548 ASSERT(pcmd != NULL);
13573 13549 pcmd->cmd_next = cmd->cmd_next;
13574 13550 }
13575 13551
13576 13552 if (cmd == pptr->port_pkt_tail) {
13577 13553 ASSERT(cmd->cmd_next == NULL);
13578 13554 pptr->port_pkt_tail = pcmd;
13579 13555 if (pcmd) {
13580 13556 pcmd->cmd_next = NULL;
13581 13557 }
13582 13558 }
13583 13559 cmd->cmd_next = NULL;
13584 13560
13585 13561 /*
13586 13562 * save the current head before dropping the
13587 13563 * mutex - If the head doesn't remain the
13588 13564 * same after re acquiring the mutex, just
13589 13565 * bail out and revisit on next tick.
13590 13566 *
13591 13567 * PS: The tail pointer can change as the commands
13592 13568 * get requeued after failure to retransport
13593 13569 */
13594 13570 save_head = pptr->port_pkt_head;
13595 13571 mutex_exit(&pptr->port_pkt_mutex);
13596 13572
13597 13573 if (cmd->cmd_fp_pkt->pkt_timeout ==
13598 13574 FCP_INVALID_TIMEOUT) {
13599 13575 struct scsi_pkt *pkt = cmd->cmd_pkt;
13600 13576 struct fcp_lun *plun;
13601 13577 struct fcp_tgt *ptgt;
13602 13578
13603 13579 plun = ADDR2LUN(&pkt->pkt_address);
13604 13580 ptgt = plun->lun_tgt;
13605 13581
13606 13582 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13607 13583 fcp_trace, FCP_BUF_LEVEL_2, 0,
13608 13584 "SCSI cmd 0x%x to D_ID=%x timed out",
13609 13585 pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13610 13586
13611 13587 cmd->cmd_state == FCP_PKT_ABORTING ?
13612 13588 fcp_fail_cmd(cmd, CMD_RESET,
13613 13589 STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13614 13590 CMD_TIMEOUT, STAT_ABORTED);
13615 13591 } else {
13616 13592 fcp_retransport_cmd(pptr, cmd);
13617 13593 }
13618 13594 mutex_enter(&pptr->port_pkt_mutex);
13619 13595 if (save_head && save_head != pptr->port_pkt_head) {
13620 13596 /*
13621 13597 * Looks like linked list got changed (mostly
13622 13598 * happens when an an OFFLINE LUN code starts
13623 13599 * returning overflow queue commands in
13624 13600 * parallel. So bail out and revisit during
13625 13601 * next tick
13626 13602 */
13627 13603 break;
13628 13604 }
13629 13605 end_of_loop:
13630 13606 /*
13631 13607 * Scan only upto the previously known tail pointer
13632 13608 * to avoid excessive processing - lots of new packets
13633 13609 * could have been added to the tail or the old ones
13634 13610 * re-queued.
13635 13611 */
13636 13612 if (cmd == tail) {
13637 13613 break;
13638 13614 }
13639 13615 }
13640 13616 mutex_exit(&pptr->port_pkt_mutex);
13641 13617
13642 13618 mutex_enter(&pptr->port_mutex);
13643 13619 for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13644 13620 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13645 13621
13646 13622 nicmd = icmd->ipkt_next;
13647 13623 if ((icmd->ipkt_restart != 0) &&
13648 13624 (icmd->ipkt_restart >= fcp_watchdog_time)) {
13649 13625 /* packet has not timed out */
13650 13626 continue;
13651 13627 }
13652 13628
13653 13629 /* time for packet re-transport */
13654 13630 if (icmd == pptr->port_ipkt_list) {
13655 13631 pptr->port_ipkt_list = icmd->ipkt_next;
13656 13632 if (pptr->port_ipkt_list) {
13657 13633 pptr->port_ipkt_list->ipkt_prev =
13658 13634 NULL;
13659 13635 }
13660 13636 } else {
13661 13637 icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13662 13638 if (icmd->ipkt_next) {
13663 13639 icmd->ipkt_next->ipkt_prev =
13664 13640 icmd->ipkt_prev;
13665 13641 }
13666 13642 }
13667 13643 icmd->ipkt_next = NULL;
13668 13644 icmd->ipkt_prev = NULL;
13669 13645 mutex_exit(&pptr->port_mutex);
13670 13646
13671 13647 if (fcp_is_retryable(icmd)) {
13672 13648 fc_ulp_rscn_info_t *rscnp =
13673 13649 (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13674 13650 pkt_ulp_rscn_infop;
13675 13651
13676 13652 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13677 13653 fcp_trace, FCP_BUF_LEVEL_2, 0,
13678 13654 "%x to D_ID=%x Retrying..",
13679 13655 icmd->ipkt_opcode,
13680 13656 icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13681 13657
13682 13658 /*
13683 13659 * Update the RSCN count in the packet
13684 13660 * before resending.
13685 13661 */
13686 13662
13687 13663 if (rscnp != NULL) {
13688 13664 rscnp->ulp_rscn_count =
13689 13665 fc_ulp_get_rscn_count(pptr->
13690 13666 port_fp_handle);
13691 13667 }
13692 13668
13693 13669 mutex_enter(&pptr->port_mutex);
13694 13670 mutex_enter(&ptgt->tgt_mutex);
13695 13671 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13696 13672 mutex_exit(&ptgt->tgt_mutex);
13697 13673 mutex_exit(&pptr->port_mutex);
13698 13674 switch (icmd->ipkt_opcode) {
13699 13675 int rval;
13700 13676 case LA_ELS_PLOGI:
13701 13677 if ((rval = fc_ulp_login(
13702 13678 pptr->port_fp_handle,
13703 13679 &icmd->ipkt_fpkt, 1)) ==
13704 13680 FC_SUCCESS) {
13705 13681 mutex_enter(
13706 13682 &pptr->port_mutex);
13707 13683 continue;
13708 13684 }
13709 13685 if (fcp_handle_ipkt_errors(
13710 13686 pptr, ptgt, icmd, rval,
13711 13687 "PLOGI") == DDI_SUCCESS) {
13712 13688 mutex_enter(
13713 13689 &pptr->port_mutex);
13714 13690 continue;
13715 13691 }
13716 13692 break;
13717 13693
13718 13694 case LA_ELS_PRLI:
13719 13695 if ((rval = fc_ulp_issue_els(
13720 13696 pptr->port_fp_handle,
13721 13697 icmd->ipkt_fpkt)) ==
13722 13698 FC_SUCCESS) {
13723 13699 mutex_enter(
13724 13700 &pptr->port_mutex);
13725 13701 continue;
13726 13702 }
13727 13703 if (fcp_handle_ipkt_errors(
13728 13704 pptr, ptgt, icmd, rval,
13729 13705 "PRLI") == DDI_SUCCESS) {
13730 13706 mutex_enter(
13731 13707 &pptr->port_mutex);
13732 13708 continue;
13733 13709 }
13734 13710 break;
13735 13711
13736 13712 default:
13737 13713 if ((rval = fcp_transport(
13738 13714 pptr->port_fp_handle,
13739 13715 icmd->ipkt_fpkt, 1)) ==
13740 13716 FC_SUCCESS) {
13741 13717 mutex_enter(
13742 13718 &pptr->port_mutex);
13743 13719 continue;
13744 13720 }
13745 13721 if (fcp_handle_ipkt_errors(
13746 13722 pptr, ptgt, icmd, rval,
13747 13723 "PRLI") == DDI_SUCCESS) {
13748 13724 mutex_enter(
13749 13725 &pptr->port_mutex);
13750 13726 continue;
13751 13727 }
13752 13728 break;
13753 13729 }
13754 13730 } else {
13755 13731 mutex_exit(&ptgt->tgt_mutex);
13756 13732 mutex_exit(&pptr->port_mutex);
13757 13733 }
13758 13734 } else {
13759 13735 fcp_print_error(icmd->ipkt_fpkt);
13760 13736 }
13761 13737
13762 13738 (void) fcp_call_finish_init(pptr, ptgt,
13763 13739 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13764 13740 icmd->ipkt_cause);
13765 13741 fcp_icmd_free(pptr, icmd);
13766 13742 mutex_enter(&pptr->port_mutex);
13767 13743 }
13768 13744
13769 13745 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13770 13746 mutex_exit(&pptr->port_mutex);
13771 13747 mutex_enter(&fcp_global_mutex);
13772 13748
13773 13749 end_of_watchdog:
13774 13750 /*
13775 13751 * Bail out early before getting into trouble
13776 13752 */
13777 13753 if (save_port != fcp_port_head) {
13778 13754 break;
13779 13755 }
13780 13756 }
13781 13757
13782 13758 if (fcp_watchdog_init > 0) {
13783 13759 /* reschedule timeout to go again */
13784 13760 fcp_watchdog_id =
13785 13761 timeout(fcp_watch, NULL, fcp_watchdog_tick);
13786 13762 }
13787 13763 mutex_exit(&fcp_global_mutex);
13788 13764 }
13789 13765
13790 13766
13791 13767 static void
13792 13768 fcp_check_reset_delay(struct fcp_port *pptr)
13793 13769 {
13794 13770 uint32_t tgt_cnt;
13795 13771 int level;
13796 13772 struct fcp_tgt *ptgt;
13797 13773 struct fcp_lun *plun;
13798 13774 struct fcp_reset_elem *cur = NULL;
13799 13775 struct fcp_reset_elem *next = NULL;
13800 13776 struct fcp_reset_elem *prev = NULL;
13801 13777
13802 13778 ASSERT(mutex_owned(&pptr->port_mutex));
13803 13779
13804 13780 next = pptr->port_reset_list;
13805 13781 while ((cur = next) != NULL) {
13806 13782 next = cur->next;
13807 13783
13808 13784 if (cur->timeout < fcp_watchdog_time) {
13809 13785 prev = cur;
13810 13786 continue;
13811 13787 }
13812 13788
13813 13789 ptgt = cur->tgt;
13814 13790 plun = cur->lun;
13815 13791 tgt_cnt = cur->tgt_cnt;
13816 13792
13817 13793 if (ptgt) {
13818 13794 level = RESET_TARGET;
13819 13795 } else {
13820 13796 ASSERT(plun != NULL);
13821 13797 level = RESET_LUN;
13822 13798 ptgt = plun->lun_tgt;
13823 13799 }
13824 13800 if (prev) {
13825 13801 prev->next = next;
13826 13802 } else {
13827 13803 /*
13828 13804 * Because we drop port mutex while doing aborts for
13829 13805 * packets, we can't rely on reset_list pointing to
13830 13806 * our head
13831 13807 */
13832 13808 if (cur == pptr->port_reset_list) {
13833 13809 pptr->port_reset_list = next;
13834 13810 } else {
13835 13811 struct fcp_reset_elem *which;
13836 13812
13837 13813 which = pptr->port_reset_list;
13838 13814 while (which && which->next != cur) {
13839 13815 which = which->next;
13840 13816 }
13841 13817 ASSERT(which != NULL);
13842 13818
13843 13819 which->next = next;
13844 13820 prev = which;
13845 13821 }
13846 13822 }
13847 13823
13848 13824 kmem_free(cur, sizeof (*cur));
13849 13825
13850 13826 if (tgt_cnt == ptgt->tgt_change_cnt) {
13851 13827 mutex_enter(&ptgt->tgt_mutex);
13852 13828 if (level == RESET_TARGET) {
13853 13829 fcp_update_tgt_state(ptgt,
13854 13830 FCP_RESET, FCP_LUN_BUSY);
13855 13831 } else {
13856 13832 fcp_update_lun_state(plun,
13857 13833 FCP_RESET, FCP_LUN_BUSY);
13858 13834 }
13859 13835 mutex_exit(&ptgt->tgt_mutex);
13860 13836
13861 13837 mutex_exit(&pptr->port_mutex);
13862 13838 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13863 13839 mutex_enter(&pptr->port_mutex);
13864 13840 }
13865 13841 }
13866 13842 }
13867 13843
13868 13844
13869 13845 static void
13870 13846 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13871 13847 struct fcp_lun *rlun, int tgt_cnt)
13872 13848 {
13873 13849 int rval;
13874 13850 struct fcp_lun *tlun, *nlun;
13875 13851 struct fcp_pkt *pcmd = NULL, *ncmd = NULL,
13876 13852 *cmd = NULL, *head = NULL,
13877 13853 *tail = NULL;
13878 13854
13879 13855 mutex_enter(&pptr->port_pkt_mutex);
13880 13856 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13881 13857 struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13882 13858 struct fcp_tgt *ptgt = plun->lun_tgt;
13883 13859
13884 13860 ncmd = cmd->cmd_next;
13885 13861
13886 13862 if (ptgt != ttgt && plun != rlun) {
13887 13863 pcmd = cmd;
13888 13864 continue;
13889 13865 }
13890 13866
13891 13867 if (pcmd != NULL) {
13892 13868 ASSERT(pptr->port_pkt_head != cmd);
13893 13869 pcmd->cmd_next = ncmd;
13894 13870 } else {
13895 13871 ASSERT(cmd == pptr->port_pkt_head);
13896 13872 pptr->port_pkt_head = ncmd;
13897 13873 }
13898 13874 if (pptr->port_pkt_tail == cmd) {
13899 13875 ASSERT(cmd->cmd_next == NULL);
13900 13876 pptr->port_pkt_tail = pcmd;
13901 13877 if (pcmd != NULL) {
13902 13878 pcmd->cmd_next = NULL;
13903 13879 }
13904 13880 }
13905 13881
13906 13882 if (head == NULL) {
13907 13883 head = tail = cmd;
13908 13884 } else {
13909 13885 ASSERT(tail != NULL);
13910 13886 tail->cmd_next = cmd;
13911 13887 tail = cmd;
13912 13888 }
13913 13889 cmd->cmd_next = NULL;
13914 13890 }
13915 13891 mutex_exit(&pptr->port_pkt_mutex);
13916 13892
13917 13893 for (cmd = head; cmd != NULL; cmd = ncmd) {
13918 13894 struct scsi_pkt *pkt = cmd->cmd_pkt;
13919 13895
13920 13896 ncmd = cmd->cmd_next;
13921 13897 ASSERT(pkt != NULL);
13922 13898
13923 13899 mutex_enter(&pptr->port_mutex);
13924 13900 if (ttgt->tgt_change_cnt == tgt_cnt) {
13925 13901 mutex_exit(&pptr->port_mutex);
13926 13902 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13927 13903 pkt->pkt_reason = CMD_RESET;
13928 13904 pkt->pkt_statistics |= STAT_DEV_RESET;
13929 13905 cmd->cmd_state = FCP_PKT_IDLE;
13930 13906 fcp_post_callback(cmd);
13931 13907 } else {
13932 13908 mutex_exit(&pptr->port_mutex);
13933 13909 }
13934 13910 }
13935 13911
13936 13912 /*
13937 13913 * If the FCA will return all the commands in its queue then our
13938 13914 * work is easy, just return.
13939 13915 */
13940 13916
13941 13917 if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13942 13918 return;
13943 13919 }
13944 13920
13945 13921 /*
13946 13922 * For RESET_LUN get hold of target pointer
13947 13923 */
13948 13924 if (ttgt == NULL) {
13949 13925 ASSERT(rlun != NULL);
13950 13926
13951 13927 ttgt = rlun->lun_tgt;
13952 13928
13953 13929 ASSERT(ttgt != NULL);
13954 13930 }
13955 13931
13956 13932 /*
13957 13933 * There are some severe race conditions here.
13958 13934 * While we are trying to abort the pkt, it might be completing
13959 13935 * so mark it aborted and if the abort does not succeed then
13960 13936 * handle it in the watch thread.
13961 13937 */
13962 13938 mutex_enter(&ttgt->tgt_mutex);
13963 13939 nlun = ttgt->tgt_lun;
13964 13940 mutex_exit(&ttgt->tgt_mutex);
13965 13941 while ((tlun = nlun) != NULL) {
13966 13942 int restart = 0;
13967 13943 if (rlun && rlun != tlun) {
13968 13944 mutex_enter(&ttgt->tgt_mutex);
13969 13945 nlun = tlun->lun_next;
13970 13946 mutex_exit(&ttgt->tgt_mutex);
13971 13947 continue;
13972 13948 }
13973 13949 mutex_enter(&tlun->lun_mutex);
13974 13950 cmd = tlun->lun_pkt_head;
13975 13951 while (cmd != NULL) {
13976 13952 if (cmd->cmd_state == FCP_PKT_ISSUED) {
13977 13953 struct scsi_pkt *pkt;
13978 13954
13979 13955 restart = 1;
13980 13956 cmd->cmd_state = FCP_PKT_ABORTING;
13981 13957 mutex_exit(&tlun->lun_mutex);
13982 13958 rval = fc_ulp_abort(pptr->port_fp_handle,
13983 13959 cmd->cmd_fp_pkt, KM_SLEEP);
13984 13960 if (rval == FC_SUCCESS) {
13985 13961 pkt = cmd->cmd_pkt;
13986 13962 pkt->pkt_reason = CMD_RESET;
13987 13963 pkt->pkt_statistics |= STAT_DEV_RESET;
13988 13964 cmd->cmd_state = FCP_PKT_IDLE;
13989 13965 fcp_post_callback(cmd);
13990 13966 } else {
13991 13967 caddr_t msg;
13992 13968
13993 13969 (void) fc_ulp_error(rval, &msg);
13994 13970
13995 13971 /*
13996 13972 * This part is tricky. The abort
13997 13973 * failed and now the command could
13998 13974 * be completing. The cmd_state ==
13999 13975 * FCP_PKT_ABORTING should save
14000 13976 * us in fcp_cmd_callback. If we
14001 13977 * are already aborting ignore the
14002 13978 * command in fcp_cmd_callback.
14003 13979 * Here we leave this packet for 20
14004 13980 * sec to be aborted in the
14005 13981 * fcp_watch thread.
14006 13982 */
14007 13983 fcp_log(CE_WARN, pptr->port_dip,
14008 13984 "!Abort failed after reset %s",
14009 13985 msg);
14010 13986
14011 13987 cmd->cmd_timeout =
14012 13988 fcp_watchdog_time +
14013 13989 cmd->cmd_pkt->pkt_time +
14014 13990 FCP_FAILED_DELAY;
14015 13991
14016 13992 cmd->cmd_fp_pkt->pkt_timeout =
14017 13993 FCP_INVALID_TIMEOUT;
14018 13994 /*
14019 13995 * This is a hack, cmd is put in the
14020 13996 * overflow queue so that it can be
14021 13997 * timed out finally
14022 13998 */
14023 13999 cmd->cmd_flags |= CFLAG_IN_QUEUE;
14024 14000
14025 14001 mutex_enter(&pptr->port_pkt_mutex);
14026 14002 if (pptr->port_pkt_head) {
14027 14003 ASSERT(pptr->port_pkt_tail
14028 14004 != NULL);
14029 14005 pptr->port_pkt_tail->cmd_next
14030 14006 = cmd;
14031 14007 pptr->port_pkt_tail = cmd;
14032 14008 } else {
14033 14009 ASSERT(pptr->port_pkt_tail
14034 14010 == NULL);
14035 14011 pptr->port_pkt_head =
14036 14012 pptr->port_pkt_tail
14037 14013 = cmd;
14038 14014 }
14039 14015 cmd->cmd_next = NULL;
14040 14016 mutex_exit(&pptr->port_pkt_mutex);
14041 14017 }
14042 14018 mutex_enter(&tlun->lun_mutex);
14043 14019 cmd = tlun->lun_pkt_head;
14044 14020 } else {
14045 14021 cmd = cmd->cmd_forw;
14046 14022 }
14047 14023 }
14048 14024 mutex_exit(&tlun->lun_mutex);
14049 14025
14050 14026 mutex_enter(&ttgt->tgt_mutex);
14051 14027 restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
14052 14028 mutex_exit(&ttgt->tgt_mutex);
14053 14029
14054 14030 mutex_enter(&pptr->port_mutex);
14055 14031 if (tgt_cnt != ttgt->tgt_change_cnt) {
14056 14032 mutex_exit(&pptr->port_mutex);
14057 14033 return;
14058 14034 } else {
14059 14035 mutex_exit(&pptr->port_mutex);
14060 14036 }
14061 14037 }
14062 14038 }
14063 14039
14064 14040
14065 14041 /*
14066 14042 * unlink the soft state, returning the soft state found (if any)
14067 14043 *
14068 14044 * acquires and releases the global mutex
14069 14045 */
14070 14046 struct fcp_port *
14071 14047 fcp_soft_state_unlink(struct fcp_port *pptr)
14072 14048 {
14073 14049 struct fcp_port *hptr; /* ptr index */
14074 14050 struct fcp_port *tptr; /* prev hptr */
14075 14051
14076 14052 mutex_enter(&fcp_global_mutex);
14077 14053 for (hptr = fcp_port_head, tptr = NULL;
14078 14054 hptr != NULL;
14079 14055 tptr = hptr, hptr = hptr->port_next) {
14080 14056 if (hptr == pptr) {
14081 14057 /* we found a match -- remove this item */
14082 14058 if (tptr == NULL) {
14083 14059 /* we're at the head of the list */
14084 14060 fcp_port_head = hptr->port_next;
14085 14061 } else {
14086 14062 tptr->port_next = hptr->port_next;
14087 14063 }
14088 14064 break; /* success */
14089 14065 }
14090 14066 }
14091 14067 if (fcp_port_head == NULL) {
14092 14068 fcp_cleanup_blacklist(&fcp_lun_blacklist);
14093 14069 }
14094 14070 mutex_exit(&fcp_global_mutex);
14095 14071 return (hptr);
14096 14072 }
14097 14073
14098 14074
14099 14075 /*
14100 14076 * called by fcp_scsi_hba_tgt_init to find a LUN given a
14101 14077 * WWN and a LUN number
14102 14078 */
14103 14079 /* ARGSUSED */
14104 14080 static struct fcp_lun *
14105 14081 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
14106 14082 {
14107 14083 int hash;
14108 14084 struct fcp_tgt *ptgt;
14109 14085 struct fcp_lun *plun;
14110 14086
14111 14087 ASSERT(mutex_owned(&pptr->port_mutex));
14112 14088
14113 14089 hash = FCP_HASH(wwn);
14114 14090 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
14115 14091 ptgt = ptgt->tgt_next) {
14116 14092 if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
14117 14093 sizeof (ptgt->tgt_port_wwn)) == 0) {
14118 14094 mutex_enter(&ptgt->tgt_mutex);
14119 14095 for (plun = ptgt->tgt_lun;
14120 14096 plun != NULL;
14121 14097 plun = plun->lun_next) {
14122 14098 if (plun->lun_num == lun) {
14123 14099 mutex_exit(&ptgt->tgt_mutex);
14124 14100 return (plun);
14125 14101 }
14126 14102 }
14127 14103 mutex_exit(&ptgt->tgt_mutex);
14128 14104 return (NULL);
14129 14105 }
14130 14106 }
14131 14107 return (NULL);
14132 14108 }
14133 14109
14134 14110 /*
14135 14111 * Function: fcp_prepare_pkt
14136 14112 *
14137 14113 * Description: This function prepares the SCSI cmd pkt, passed by the caller,
14138 14114 * for fcp_start(). It binds the data or partially maps it.
14139 14115 * Builds the FCP header and starts the initialization of the
14140 14116 * Fibre Channel header.
14141 14117 *
14142 14118 * Argument: *pptr FCP port.
14143 14119 * *cmd FCP packet.
14144 14120 * *plun LUN the command will be sent to.
14145 14121 *
14146 14122 * Context: User, Kernel and Interrupt context.
14147 14123 */
14148 14124 static void
14149 14125 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
14150 14126 struct fcp_lun *plun)
14151 14127 {
14152 14128 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
14153 14129 struct fcp_tgt *ptgt = plun->lun_tgt;
14154 14130 struct fcp_cmd *fcmd = &cmd->cmd_fcp_cmd;
14155 14131
14156 14132 ASSERT(cmd->cmd_pkt->pkt_comp ||
14157 14133 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
14158 14134
14159 14135 if (cmd->cmd_pkt->pkt_numcookies) {
14160 14136 if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
14161 14137 fcmd->fcp_cntl.cntl_read_data = 1;
14162 14138 fcmd->fcp_cntl.cntl_write_data = 0;
14163 14139 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
14164 14140 } else {
14165 14141 fcmd->fcp_cntl.cntl_read_data = 0;
14166 14142 fcmd->fcp_cntl.cntl_write_data = 1;
14167 14143 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
14168 14144 }
14169 14145
14170 14146 fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
14171 14147
14172 14148 fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
14173 14149 ASSERT(fpkt->pkt_data_cookie_cnt <=
14174 14150 pptr->port_data_dma_attr.dma_attr_sgllen);
14175 14151
14176 14152 cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
14177 14153
14178 14154 /* FCA needs pkt_datalen to be set */
14179 14155 fpkt->pkt_datalen = cmd->cmd_dmacount;
14180 14156 fcmd->fcp_data_len = cmd->cmd_dmacount;
14181 14157 } else {
14182 14158 fcmd->fcp_cntl.cntl_read_data = 0;
14183 14159 fcmd->fcp_cntl.cntl_write_data = 0;
14184 14160 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
14185 14161 fpkt->pkt_datalen = 0;
14186 14162 fcmd->fcp_data_len = 0;
14187 14163 }
14188 14164
14189 14165 /* set up the Tagged Queuing type */
14190 14166 if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
14191 14167 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
14192 14168 } else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
14193 14169 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
14194 14170 } else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
14195 14171 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
14196 14172 } else {
14197 14173 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
14198 14174 }
14199 14175
14200 14176 fcmd->fcp_ent_addr = plun->lun_addr;
14201 14177
14202 14178 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
14203 14179 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
14204 14180 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
14205 14181 } else {
14206 14182 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
14207 14183 }
14208 14184
14209 14185 cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
14210 14186 cmd->cmd_pkt->pkt_state = 0;
14211 14187 cmd->cmd_pkt->pkt_statistics = 0;
14212 14188 cmd->cmd_pkt->pkt_resid = 0;
14213 14189
14214 14190 cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
14215 14191
14216 14192 if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
14217 14193 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
14218 14194 fpkt->pkt_comp = NULL;
14219 14195 } else {
14220 14196 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
14221 14197 if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
14222 14198 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
14223 14199 }
14224 14200 fpkt->pkt_comp = fcp_cmd_callback;
14225 14201 }
14226 14202
14227 14203 mutex_enter(&pptr->port_mutex);
14228 14204 if (pptr->port_state & FCP_STATE_SUSPENDED) {
↓ open down ↓ |
4372 lines elided |
↑ open up ↑ |
14229 14205 fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
14230 14206 }
14231 14207 mutex_exit(&pptr->port_mutex);
14232 14208
14233 14209 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
14234 14210 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
14235 14211
14236 14212 /*
14237 14213 * Save a few kernel cycles here
14238 14214 */
14239 -#ifndef __lock_lint
14240 14215 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
14241 -#endif /* __lock_lint */
14242 14216 }
14243 14217
14244 14218 static void
14245 14219 fcp_post_callback(struct fcp_pkt *cmd)
14246 14220 {
14247 14221 scsi_hba_pkt_comp(cmd->cmd_pkt);
14248 14222 }
14249 14223
14250 14224
14251 14225 /*
14252 14226 * called to do polled I/O by fcp_start()
14253 14227 *
14254 14228 * return a transport status value, i.e. TRAN_ACCECPT for success
14255 14229 */
14256 14230 static int
14257 14231 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14258 14232 {
14259 14233 int rval;
14260 14234
14261 14235 #ifdef DEBUG
14262 14236 mutex_enter(&pptr->port_pkt_mutex);
14263 14237 pptr->port_npkts++;
14264 14238 mutex_exit(&pptr->port_pkt_mutex);
14265 14239 #endif /* DEBUG */
14266 14240
14267 14241 if (cmd->cmd_fp_pkt->pkt_timeout) {
14268 14242 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14269 14243 } else {
14270 14244 cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14271 14245 }
14272 14246
14273 14247 ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14274 14248
14275 14249 cmd->cmd_state = FCP_PKT_ISSUED;
14276 14250
14277 14251 rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14278 14252
14279 14253 #ifdef DEBUG
14280 14254 mutex_enter(&pptr->port_pkt_mutex);
14281 14255 pptr->port_npkts--;
14282 14256 mutex_exit(&pptr->port_pkt_mutex);
14283 14257 #endif /* DEBUG */
14284 14258
14285 14259 cmd->cmd_state = FCP_PKT_IDLE;
14286 14260
14287 14261 switch (rval) {
14288 14262 case FC_SUCCESS:
14289 14263 if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14290 14264 fcp_complete_pkt(cmd->cmd_fp_pkt);
14291 14265 rval = TRAN_ACCEPT;
14292 14266 } else {
14293 14267 rval = TRAN_FATAL_ERROR;
14294 14268 }
14295 14269 break;
14296 14270
14297 14271 case FC_TRAN_BUSY:
14298 14272 rval = TRAN_BUSY;
14299 14273 cmd->cmd_pkt->pkt_resid = 0;
14300 14274 break;
14301 14275
14302 14276 case FC_BADPACKET:
14303 14277 rval = TRAN_BADPKT;
14304 14278 break;
14305 14279
14306 14280 default:
14307 14281 rval = TRAN_FATAL_ERROR;
14308 14282 break;
14309 14283 }
14310 14284
14311 14285 return (rval);
14312 14286 }
14313 14287
14314 14288
14315 14289 /*
14316 14290 * called by some of the following transport-called routines to convert
14317 14291 * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14318 14292 */
14319 14293 static struct fcp_port *
14320 14294 fcp_dip2port(dev_info_t *dip)
14321 14295 {
14322 14296 int instance;
14323 14297
14324 14298 instance = ddi_get_instance(dip);
14325 14299 return (ddi_get_soft_state(fcp_softstate, instance));
14326 14300 }
14327 14301
14328 14302
14329 14303 /*
14330 14304 * called internally to return a LUN given a dip
14331 14305 */
14332 14306 struct fcp_lun *
14333 14307 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14334 14308 {
14335 14309 struct fcp_tgt *ptgt;
14336 14310 struct fcp_lun *plun;
14337 14311 int i;
14338 14312
14339 14313
14340 14314 ASSERT(mutex_owned(&pptr->port_mutex));
14341 14315
14342 14316 for (i = 0; i < FCP_NUM_HASH; i++) {
14343 14317 for (ptgt = pptr->port_tgt_hash_table[i];
14344 14318 ptgt != NULL;
14345 14319 ptgt = ptgt->tgt_next) {
14346 14320 mutex_enter(&ptgt->tgt_mutex);
14347 14321 for (plun = ptgt->tgt_lun; plun != NULL;
14348 14322 plun = plun->lun_next) {
14349 14323 mutex_enter(&plun->lun_mutex);
14350 14324 if (plun->lun_cip == cip) {
14351 14325 mutex_exit(&plun->lun_mutex);
14352 14326 mutex_exit(&ptgt->tgt_mutex);
14353 14327 return (plun); /* match found */
14354 14328 }
14355 14329 mutex_exit(&plun->lun_mutex);
14356 14330 }
14357 14331 mutex_exit(&ptgt->tgt_mutex);
14358 14332 }
14359 14333 }
14360 14334 return (NULL); /* no LUN found */
14361 14335 }
14362 14336
14363 14337 /*
14364 14338 * pass an element to the hotplug list, kick the hotplug thread
14365 14339 * and wait for the element to get processed by the hotplug thread.
14366 14340 * on return the element is freed.
14367 14341 *
14368 14342 * return zero success and non-zero on failure
14369 14343 *
14370 14344 * acquires/releases the target mutex
14371 14345 *
14372 14346 */
14373 14347 static int
14374 14348 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14375 14349 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14376 14350 {
14377 14351 struct fcp_hp_elem *elem;
14378 14352 int rval;
14379 14353
14380 14354 mutex_enter(&plun->lun_tgt->tgt_mutex);
14381 14355 if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14382 14356 what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14383 14357 mutex_exit(&plun->lun_tgt->tgt_mutex);
14384 14358 fcp_log(CE_CONT, pptr->port_dip,
14385 14359 "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14386 14360 what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14387 14361 return (NDI_FAILURE);
14388 14362 }
14389 14363 mutex_exit(&plun->lun_tgt->tgt_mutex);
14390 14364 mutex_enter(&elem->mutex);
14391 14365 if (elem->wait) {
14392 14366 while (elem->wait) {
14393 14367 cv_wait(&elem->cv, &elem->mutex);
14394 14368 }
14395 14369 }
14396 14370 rval = (elem->result);
14397 14371 mutex_exit(&elem->mutex);
14398 14372 mutex_destroy(&elem->mutex);
14399 14373 cv_destroy(&elem->cv);
14400 14374 kmem_free(elem, sizeof (struct fcp_hp_elem));
14401 14375 return (rval);
14402 14376 }
14403 14377
14404 14378 /*
14405 14379 * pass an element to the hotplug list, and then
14406 14380 * kick the hotplug thread
14407 14381 *
14408 14382 * return Boolean success, i.e. non-zero if all goes well, else zero on error
14409 14383 *
14410 14384 * acquires/releases the hotplug mutex
14411 14385 *
14412 14386 * called with the target mutex owned
14413 14387 *
14414 14388 * memory acquired in NOSLEEP mode
14415 14389 * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14416 14390 * for the hp daemon to process the request and is responsible for
14417 14391 * freeing the element
14418 14392 */
14419 14393 static struct fcp_hp_elem *
14420 14394 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14421 14395 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14422 14396 {
14423 14397 struct fcp_hp_elem *elem;
14424 14398 dev_info_t *pdip;
14425 14399
14426 14400 ASSERT(pptr != NULL);
14427 14401 ASSERT(plun != NULL);
14428 14402 ASSERT(plun->lun_tgt != NULL);
14429 14403 ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14430 14404
14431 14405 /* create space for a hotplug element */
14432 14406 if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14433 14407 == NULL) {
14434 14408 fcp_log(CE_WARN, NULL,
14435 14409 "!can't allocate memory for hotplug element");
14436 14410 return (NULL);
14437 14411 }
14438 14412
14439 14413 /* fill in hotplug element */
14440 14414 elem->port = pptr;
14441 14415 elem->lun = plun;
14442 14416 elem->cip = cip;
14443 14417 elem->old_lun_mpxio = plun->lun_mpxio;
14444 14418 elem->what = what;
14445 14419 elem->flags = flags;
14446 14420 elem->link_cnt = link_cnt;
14447 14421 elem->tgt_cnt = tgt_cnt;
14448 14422 elem->wait = wait;
14449 14423 mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14450 14424 cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14451 14425
14452 14426 /* schedule the hotplug task */
14453 14427 pdip = pptr->port_dip;
14454 14428 mutex_enter(&plun->lun_mutex);
14455 14429 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14456 14430 plun->lun_event_count++;
14457 14431 elem->event_cnt = plun->lun_event_count;
14458 14432 }
14459 14433 mutex_exit(&plun->lun_mutex);
14460 14434 if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14461 14435 (void *)elem, KM_NOSLEEP) == NULL) {
14462 14436 mutex_enter(&plun->lun_mutex);
14463 14437 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14464 14438 plun->lun_event_count--;
14465 14439 }
14466 14440 mutex_exit(&plun->lun_mutex);
14467 14441 kmem_free(elem, sizeof (*elem));
14468 14442 return (0);
14469 14443 }
14470 14444
14471 14445 return (elem);
14472 14446 }
14473 14447
14474 14448
14475 14449 static void
14476 14450 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14477 14451 {
14478 14452 int rval;
14479 14453 struct scsi_address *ap;
14480 14454 struct fcp_lun *plun;
14481 14455 struct fcp_tgt *ptgt;
14482 14456 fc_packet_t *fpkt;
14483 14457
14484 14458 ap = &cmd->cmd_pkt->pkt_address;
14485 14459 plun = ADDR2LUN(ap);
14486 14460 ptgt = plun->lun_tgt;
14487 14461
14488 14462 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14489 14463
14490 14464 cmd->cmd_state = FCP_PKT_IDLE;
14491 14465
14492 14466 mutex_enter(&pptr->port_mutex);
14493 14467 mutex_enter(&ptgt->tgt_mutex);
14494 14468 if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14495 14469 (!(pptr->port_state & FCP_STATE_ONLINING))) {
14496 14470 fc_ulp_rscn_info_t *rscnp;
14497 14471
14498 14472 cmd->cmd_state = FCP_PKT_ISSUED;
14499 14473
14500 14474 /*
14501 14475 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14502 14476 * originally NULL, hence we try to set it to the pd pointed
14503 14477 * to by the SCSI device we're trying to get to.
14504 14478 */
14505 14479
14506 14480 fpkt = cmd->cmd_fp_pkt;
14507 14481 if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14508 14482 fpkt->pkt_pd = ptgt->tgt_pd_handle;
14509 14483 /*
14510 14484 * We need to notify the transport that we now have a
14511 14485 * reference to the remote port handle.
14512 14486 */
14513 14487 fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14514 14488 }
14515 14489
14516 14490 mutex_exit(&ptgt->tgt_mutex);
14517 14491 mutex_exit(&pptr->port_mutex);
14518 14492
14519 14493 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14520 14494
14521 14495 /* prepare the packet */
14522 14496
14523 14497 fcp_prepare_pkt(pptr, cmd, plun);
14524 14498
14525 14499 rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14526 14500 pkt_ulp_rscn_infop;
14527 14501
14528 14502 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14529 14503 fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14530 14504
14531 14505 if (rscnp != NULL) {
14532 14506 rscnp->ulp_rscn_count =
14533 14507 fc_ulp_get_rscn_count(pptr->
14534 14508 port_fp_handle);
14535 14509 }
14536 14510
14537 14511 rval = fcp_transport(pptr->port_fp_handle,
14538 14512 cmd->cmd_fp_pkt, 0);
14539 14513
14540 14514 if (rval == FC_SUCCESS) {
14541 14515 return;
14542 14516 }
14543 14517 cmd->cmd_state &= ~FCP_PKT_ISSUED;
14544 14518 } else {
14545 14519 mutex_exit(&ptgt->tgt_mutex);
14546 14520 mutex_exit(&pptr->port_mutex);
14547 14521 }
14548 14522
14549 14523 fcp_queue_pkt(pptr, cmd);
14550 14524 }
14551 14525
14552 14526
14553 14527 static void
14554 14528 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14555 14529 {
14556 14530 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14557 14531
14558 14532 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14559 14533 cmd->cmd_state = FCP_PKT_IDLE;
14560 14534
14561 14535 cmd->cmd_pkt->pkt_reason = reason;
14562 14536 cmd->cmd_pkt->pkt_state = 0;
14563 14537 cmd->cmd_pkt->pkt_statistics = statistics;
14564 14538
14565 14539 fcp_post_callback(cmd);
14566 14540 }
14567 14541
14568 14542 /*
14569 14543 * Function: fcp_queue_pkt
14570 14544 *
14571 14545 * Description: This function queues the packet passed by the caller into
14572 14546 * the list of packets of the FCP port.
14573 14547 *
14574 14548 * Argument: *pptr FCP port.
14575 14549 * *cmd FCP packet to queue.
14576 14550 *
14577 14551 * Return Value: None
14578 14552 *
14579 14553 * Context: User, Kernel and Interrupt context.
14580 14554 */
14581 14555 static void
14582 14556 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14583 14557 {
14584 14558 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14585 14559
14586 14560 mutex_enter(&pptr->port_pkt_mutex);
14587 14561 cmd->cmd_flags |= CFLAG_IN_QUEUE;
14588 14562 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14589 14563 cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14590 14564
14591 14565 /*
14592 14566 * zero pkt_time means hang around for ever
14593 14567 */
14594 14568 if (cmd->cmd_pkt->pkt_time) {
14595 14569 if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14596 14570 cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14597 14571 } else {
14598 14572 /*
14599 14573 * Indicate the watch thread to fail the
14600 14574 * command by setting it to highest value
14601 14575 */
14602 14576 cmd->cmd_timeout = fcp_watchdog_time;
14603 14577 cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14604 14578 }
14605 14579 }
14606 14580
14607 14581 if (pptr->port_pkt_head) {
14608 14582 ASSERT(pptr->port_pkt_tail != NULL);
14609 14583
14610 14584 pptr->port_pkt_tail->cmd_next = cmd;
14611 14585 pptr->port_pkt_tail = cmd;
14612 14586 } else {
14613 14587 ASSERT(pptr->port_pkt_tail == NULL);
14614 14588
14615 14589 pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14616 14590 }
14617 14591 cmd->cmd_next = NULL;
14618 14592 mutex_exit(&pptr->port_pkt_mutex);
14619 14593 }
14620 14594
14621 14595 /*
14622 14596 * Function: fcp_update_targets
14623 14597 *
14624 14598 * Description: This function applies the specified change of state to all
14625 14599 * the targets listed. The operation applied is 'set'.
14626 14600 *
14627 14601 * Argument: *pptr FCP port.
14628 14602 * *dev_list Array of fc_portmap_t structures.
14629 14603 * count Length of dev_list.
14630 14604 * state State bits to update.
14631 14605 * cause Reason for the update.
14632 14606 *
14633 14607 * Return Value: None
14634 14608 *
14635 14609 * Context: User, Kernel and Interrupt context.
14636 14610 * The mutex pptr->port_mutex must be held.
14637 14611 */
14638 14612 static void
14639 14613 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14640 14614 uint32_t count, uint32_t state, int cause)
14641 14615 {
14642 14616 fc_portmap_t *map_entry;
14643 14617 struct fcp_tgt *ptgt;
14644 14618
14645 14619 ASSERT(MUTEX_HELD(&pptr->port_mutex));
14646 14620
14647 14621 while (count--) {
14648 14622 map_entry = &(dev_list[count]);
14649 14623 ptgt = fcp_lookup_target(pptr,
14650 14624 (uchar_t *)&(map_entry->map_pwwn));
14651 14625 if (ptgt == NULL) {
14652 14626 continue;
14653 14627 }
14654 14628
14655 14629 mutex_enter(&ptgt->tgt_mutex);
14656 14630 ptgt->tgt_trace = 0;
14657 14631 ptgt->tgt_change_cnt++;
14658 14632 ptgt->tgt_statec_cause = cause;
14659 14633 ptgt->tgt_tmp_cnt = 1;
14660 14634 fcp_update_tgt_state(ptgt, FCP_SET, state);
14661 14635 mutex_exit(&ptgt->tgt_mutex);
14662 14636 }
14663 14637 }
14664 14638
14665 14639 static int
14666 14640 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14667 14641 int lcount, int tcount, int cause)
14668 14642 {
14669 14643 int rval;
14670 14644
14671 14645 mutex_enter(&pptr->port_mutex);
14672 14646 rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14673 14647 mutex_exit(&pptr->port_mutex);
14674 14648
14675 14649 return (rval);
14676 14650 }
14677 14651
14678 14652
14679 14653 static int
14680 14654 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14681 14655 int lcount, int tcount, int cause)
14682 14656 {
14683 14657 int finish_init = 0;
14684 14658 int finish_tgt = 0;
14685 14659 int do_finish_init = 0;
14686 14660 int rval = FCP_NO_CHANGE;
14687 14661
14688 14662 if (cause == FCP_CAUSE_LINK_CHANGE ||
14689 14663 cause == FCP_CAUSE_LINK_DOWN) {
14690 14664 do_finish_init = 1;
14691 14665 }
14692 14666
14693 14667 if (ptgt != NULL) {
14694 14668 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14695 14669 FCP_BUF_LEVEL_2, 0,
14696 14670 "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14697 14671 " cause = %d, d_id = 0x%x, tgt_done = %d",
14698 14672 pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14699 14673 pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14700 14674 ptgt->tgt_d_id, ptgt->tgt_done);
14701 14675
14702 14676 mutex_enter(&ptgt->tgt_mutex);
14703 14677
14704 14678 if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14705 14679 rval = FCP_DEV_CHANGE;
14706 14680 if (do_finish_init && ptgt->tgt_done == 0) {
14707 14681 ptgt->tgt_done++;
14708 14682 finish_init = 1;
14709 14683 }
14710 14684 } else {
14711 14685 if (--ptgt->tgt_tmp_cnt <= 0) {
14712 14686 ptgt->tgt_tmp_cnt = 0;
14713 14687 finish_tgt = 1;
14714 14688
14715 14689 if (do_finish_init) {
14716 14690 finish_init = 1;
14717 14691 }
14718 14692 }
14719 14693 }
14720 14694 mutex_exit(&ptgt->tgt_mutex);
14721 14695 } else {
14722 14696 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14723 14697 FCP_BUF_LEVEL_2, 0,
14724 14698 "Call Finish Init for NO target");
14725 14699
14726 14700 if (do_finish_init) {
14727 14701 finish_init = 1;
14728 14702 }
14729 14703 }
14730 14704
14731 14705 if (finish_tgt) {
14732 14706 ASSERT(ptgt != NULL);
14733 14707
14734 14708 mutex_enter(&ptgt->tgt_mutex);
14735 14709 #ifdef DEBUG
14736 14710 bzero(ptgt->tgt_tmp_cnt_stack,
14737 14711 sizeof (ptgt->tgt_tmp_cnt_stack));
14738 14712
14739 14713 ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14740 14714 FCP_STACK_DEPTH);
14741 14715 #endif /* DEBUG */
14742 14716 mutex_exit(&ptgt->tgt_mutex);
14743 14717
14744 14718 (void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14745 14719 }
14746 14720
14747 14721 if (finish_init && lcount == pptr->port_link_cnt) {
14748 14722 ASSERT(pptr->port_tmp_cnt > 0);
14749 14723 if (--pptr->port_tmp_cnt == 0) {
14750 14724 fcp_finish_init(pptr);
14751 14725 }
14752 14726 } else if (lcount != pptr->port_link_cnt) {
14753 14727 FCP_TRACE(fcp_logq, pptr->port_instbuf,
14754 14728 fcp_trace, FCP_BUF_LEVEL_2, 0,
14755 14729 "fcp_call_finish_init_held,1: state change occured"
14756 14730 " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14757 14731 }
14758 14732
14759 14733 return (rval);
14760 14734 }
14761 14735
14762 14736 static void
14763 14737 fcp_reconfigure_luns(void * tgt_handle)
14764 14738 {
14765 14739 uint32_t dev_cnt;
14766 14740 fc_portmap_t *devlist;
14767 14741 struct fcp_tgt *ptgt = (struct fcp_tgt *)tgt_handle;
14768 14742 struct fcp_port *pptr = ptgt->tgt_port;
14769 14743
14770 14744 /*
14771 14745 * If the timer that fires this off got canceled too late, the
14772 14746 * target could have been destroyed.
14773 14747 */
14774 14748
14775 14749 if (ptgt->tgt_tid == NULL) {
14776 14750 return;
14777 14751 }
14778 14752
14779 14753 devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14780 14754 if (devlist == NULL) {
14781 14755 fcp_log(CE_WARN, pptr->port_dip,
14782 14756 "!fcp%d: failed to allocate for portmap",
14783 14757 pptr->port_instance);
14784 14758 return;
14785 14759 }
14786 14760
14787 14761 dev_cnt = 1;
14788 14762 devlist->map_pd = ptgt->tgt_pd_handle;
14789 14763 devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14790 14764 devlist->map_did.port_id = ptgt->tgt_d_id;
14791 14765
14792 14766 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14793 14767 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14794 14768
14795 14769 devlist->map_state = PORT_DEVICE_LOGGED_IN;
14796 14770 devlist->map_type = PORT_DEVICE_REPORTLUN_CHANGED;
14797 14771 devlist->map_flags = 0;
14798 14772
14799 14773 fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14800 14774 pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14801 14775
14802 14776 /*
14803 14777 * Clear the tgt_tid after no more references to
14804 14778 * the fcp_tgt
14805 14779 */
14806 14780 mutex_enter(&ptgt->tgt_mutex);
14807 14781 ptgt->tgt_tid = NULL;
14808 14782 mutex_exit(&ptgt->tgt_mutex);
14809 14783
14810 14784 kmem_free(devlist, sizeof (*devlist));
14811 14785 }
14812 14786
14813 14787
14814 14788 static void
14815 14789 fcp_free_targets(struct fcp_port *pptr)
14816 14790 {
14817 14791 int i;
14818 14792 struct fcp_tgt *ptgt;
14819 14793
14820 14794 mutex_enter(&pptr->port_mutex);
14821 14795 for (i = 0; i < FCP_NUM_HASH; i++) {
14822 14796 ptgt = pptr->port_tgt_hash_table[i];
14823 14797 while (ptgt != NULL) {
14824 14798 struct fcp_tgt *next_tgt = ptgt->tgt_next;
14825 14799
14826 14800 fcp_free_target(ptgt);
14827 14801 ptgt = next_tgt;
14828 14802 }
14829 14803 }
14830 14804 mutex_exit(&pptr->port_mutex);
14831 14805 }
14832 14806
14833 14807
14834 14808 static void
14835 14809 fcp_free_target(struct fcp_tgt *ptgt)
14836 14810 {
14837 14811 struct fcp_lun *plun;
14838 14812 timeout_id_t tid;
14839 14813
14840 14814 mutex_enter(&ptgt->tgt_mutex);
14841 14815 tid = ptgt->tgt_tid;
14842 14816
14843 14817 /*
14844 14818 * Cancel any pending timeouts for this target.
14845 14819 */
14846 14820
14847 14821 if (tid != NULL) {
14848 14822 /*
14849 14823 * Set tgt_tid to NULL first to avoid a race in the callback.
14850 14824 * If tgt_tid is NULL, the callback will simply return.
14851 14825 */
14852 14826 ptgt->tgt_tid = NULL;
14853 14827 mutex_exit(&ptgt->tgt_mutex);
14854 14828 (void) untimeout(tid);
14855 14829 mutex_enter(&ptgt->tgt_mutex);
14856 14830 }
14857 14831
14858 14832 plun = ptgt->tgt_lun;
14859 14833 while (plun != NULL) {
14860 14834 struct fcp_lun *next_lun = plun->lun_next;
14861 14835
14862 14836 fcp_dealloc_lun(plun);
14863 14837 plun = next_lun;
14864 14838 }
14865 14839
14866 14840 mutex_exit(&ptgt->tgt_mutex);
14867 14841 fcp_dealloc_tgt(ptgt);
14868 14842 }
14869 14843
14870 14844 /*
14871 14845 * Function: fcp_is_retryable
14872 14846 *
14873 14847 * Description: Indicates if the internal packet is retryable.
14874 14848 *
14875 14849 * Argument: *icmd FCP internal packet.
14876 14850 *
14877 14851 * Return Value: 0 Not retryable
14878 14852 * 1 Retryable
14879 14853 *
14880 14854 * Context: User, Kernel and Interrupt context
14881 14855 */
14882 14856 static int
14883 14857 fcp_is_retryable(struct fcp_ipkt *icmd)
14884 14858 {
14885 14859 if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14886 14860 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14887 14861 return (0);
14888 14862 }
14889 14863
14890 14864 return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14891 14865 icmd->ipkt_port->port_deadline) ? 1 : 0);
14892 14866 }
14893 14867
14894 14868 /*
14895 14869 * Function: fcp_create_on_demand
14896 14870 *
14897 14871 * Argument: *pptr FCP port.
14898 14872 * *pwwn Port WWN.
14899 14873 *
14900 14874 * Return Value: 0 Success
14901 14875 * EIO
14902 14876 * ENOMEM
14903 14877 * EBUSY
14904 14878 * EINVAL
14905 14879 *
14906 14880 * Context: User and Kernel context
14907 14881 */
14908 14882 static int
14909 14883 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14910 14884 {
14911 14885 int wait_ms;
14912 14886 int tcount;
14913 14887 int lcount;
14914 14888 int ret;
14915 14889 int error;
14916 14890 int rval = EIO;
14917 14891 int ntries;
14918 14892 fc_portmap_t *devlist;
14919 14893 opaque_t pd;
14920 14894 struct fcp_lun *plun;
14921 14895 struct fcp_tgt *ptgt;
14922 14896 int old_manual = 0;
14923 14897
14924 14898 /* Allocates the fc_portmap_t structure. */
14925 14899 devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14926 14900
14927 14901 /*
14928 14902 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14929 14903 * in the commented statement below:
14930 14904 *
14931 14905 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14932 14906 *
14933 14907 * Below, the deadline for the discovery process is set.
14934 14908 */
14935 14909 mutex_enter(&pptr->port_mutex);
14936 14910 pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14937 14911 mutex_exit(&pptr->port_mutex);
14938 14912
14939 14913 /*
14940 14914 * We try to find the remote port based on the WWN provided by the
14941 14915 * caller. We actually ask fp/fctl if it has it.
14942 14916 */
14943 14917 pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14944 14918 (la_wwn_t *)pwwn, &error, 1);
14945 14919
14946 14920 if (pd == NULL) {
14947 14921 kmem_free(devlist, sizeof (*devlist));
14948 14922 return (rval);
14949 14923 }
14950 14924
14951 14925 /*
14952 14926 * The remote port was found. We ask fp/fctl to update our
14953 14927 * fc_portmap_t structure.
14954 14928 */
14955 14929 ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14956 14930 (la_wwn_t *)pwwn, devlist);
14957 14931 if (ret != FC_SUCCESS) {
14958 14932 kmem_free(devlist, sizeof (*devlist));
14959 14933 return (rval);
14960 14934 }
14961 14935
14962 14936 /*
14963 14937 * The map flag field is set to indicates that the creation is being
14964 14938 * done at the user request (Ioclt probably luxadm or cfgadm).
14965 14939 */
14966 14940 devlist->map_type = PORT_DEVICE_USER_CREATE;
14967 14941
14968 14942 mutex_enter(&pptr->port_mutex);
14969 14943
14970 14944 /*
14971 14945 * We check to see if fcp already has a target that describes the
14972 14946 * device being created. If not it is created.
14973 14947 */
14974 14948 ptgt = fcp_lookup_target(pptr, pwwn);
14975 14949 if (ptgt == NULL) {
14976 14950 lcount = pptr->port_link_cnt;
14977 14951 mutex_exit(&pptr->port_mutex);
14978 14952
14979 14953 ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14980 14954 if (ptgt == NULL) {
14981 14955 fcp_log(CE_WARN, pptr->port_dip,
14982 14956 "!FC target allocation failed");
14983 14957 return (ENOMEM);
14984 14958 }
14985 14959
14986 14960 mutex_enter(&pptr->port_mutex);
14987 14961 }
14988 14962
14989 14963 mutex_enter(&ptgt->tgt_mutex);
14990 14964 ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
14991 14965 ptgt->tgt_tmp_cnt = 1;
14992 14966 ptgt->tgt_device_created = 0;
14993 14967 /*
14994 14968 * If fabric and auto config is set but the target was
14995 14969 * manually unconfigured then reset to the manual_config_only to
14996 14970 * 0 so the device will get configured.
14997 14971 */
14998 14972 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14999 14973 fcp_enable_auto_configuration &&
15000 14974 ptgt->tgt_manual_config_only == 1) {
15001 14975 old_manual = 1;
15002 14976 ptgt->tgt_manual_config_only = 0;
15003 14977 }
15004 14978 mutex_exit(&ptgt->tgt_mutex);
15005 14979
15006 14980 fcp_update_targets(pptr, devlist, 1,
15007 14981 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
15008 14982
15009 14983 lcount = pptr->port_link_cnt;
15010 14984 tcount = ptgt->tgt_change_cnt;
15011 14985
15012 14986 if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
15013 14987 tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
15014 14988 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15015 14989 fcp_enable_auto_configuration && old_manual) {
15016 14990 mutex_enter(&ptgt->tgt_mutex);
15017 14991 ptgt->tgt_manual_config_only = 1;
15018 14992 mutex_exit(&ptgt->tgt_mutex);
15019 14993 }
15020 14994
15021 14995 if (pptr->port_link_cnt != lcount ||
15022 14996 ptgt->tgt_change_cnt != tcount) {
15023 14997 rval = EBUSY;
15024 14998 }
15025 14999 mutex_exit(&pptr->port_mutex);
15026 15000
15027 15001 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15028 15002 FCP_BUF_LEVEL_3, 0,
15029 15003 "fcp_create_on_demand: mapflags ptgt=%x, "
15030 15004 "lcount=%x::port_link_cnt=%x, "
15031 15005 "tcount=%x: tgt_change_cnt=%x, rval=%x",
15032 15006 ptgt, lcount, pptr->port_link_cnt,
15033 15007 tcount, ptgt->tgt_change_cnt, rval);
15034 15008 return (rval);
15035 15009 }
15036 15010
15037 15011 /*
15038 15012 * Due to lack of synchronization mechanisms, we perform
15039 15013 * periodic monitoring of our request; Because requests
15040 15014 * get dropped when another one supercedes (either because
15041 15015 * of a link change or a target change), it is difficult to
15042 15016 * provide a clean synchronization mechanism (such as a
15043 15017 * semaphore or a conditional variable) without exhaustively
15044 15018 * rewriting the mainline discovery code of this driver.
15045 15019 */
15046 15020 wait_ms = 500;
15047 15021
15048 15022 ntries = fcp_max_target_retries;
15049 15023
15050 15024 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15051 15025 FCP_BUF_LEVEL_3, 0,
15052 15026 "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
15053 15027 "lcount=%x::port_link_cnt=%x, "
15054 15028 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15055 15029 "tgt_tmp_cnt =%x",
15056 15030 ntries, ptgt, lcount, pptr->port_link_cnt,
15057 15031 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15058 15032 ptgt->tgt_tmp_cnt);
15059 15033
15060 15034 mutex_enter(&ptgt->tgt_mutex);
15061 15035 while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
15062 15036 ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
15063 15037 mutex_exit(&ptgt->tgt_mutex);
15064 15038 mutex_exit(&pptr->port_mutex);
15065 15039
15066 15040 delay(drv_usectohz(wait_ms * 1000));
15067 15041
15068 15042 mutex_enter(&pptr->port_mutex);
15069 15043 mutex_enter(&ptgt->tgt_mutex);
15070 15044 }
15071 15045
15072 15046
15073 15047 if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
15074 15048 rval = EBUSY;
15075 15049 } else {
15076 15050 if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
15077 15051 FCP_TGT_NODE_PRESENT) {
15078 15052 rval = 0;
15079 15053 }
15080 15054 }
15081 15055
15082 15056 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15083 15057 FCP_BUF_LEVEL_3, 0,
15084 15058 "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
15085 15059 "lcount=%x::port_link_cnt=%x, "
15086 15060 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15087 15061 "tgt_tmp_cnt =%x",
15088 15062 ntries, ptgt, lcount, pptr->port_link_cnt,
15089 15063 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15090 15064 ptgt->tgt_tmp_cnt);
15091 15065
15092 15066 if (rval) {
15093 15067 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15094 15068 fcp_enable_auto_configuration && old_manual) {
15095 15069 ptgt->tgt_manual_config_only = 1;
15096 15070 }
15097 15071 mutex_exit(&ptgt->tgt_mutex);
15098 15072 mutex_exit(&pptr->port_mutex);
15099 15073 kmem_free(devlist, sizeof (*devlist));
15100 15074
15101 15075 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15102 15076 FCP_BUF_LEVEL_3, 0,
15103 15077 "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
15104 15078 "lcount=%x::port_link_cnt=%x, "
15105 15079 "tcount=%x::tgt_change_cnt=%x, rval=%x, "
15106 15080 "tgt_device_created=%x, tgt D_ID=%x",
15107 15081 ntries, ptgt, lcount, pptr->port_link_cnt,
15108 15082 tcount, ptgt->tgt_change_cnt, rval,
15109 15083 ptgt->tgt_device_created, ptgt->tgt_d_id);
15110 15084 return (rval);
15111 15085 }
15112 15086
15113 15087 if ((plun = ptgt->tgt_lun) != NULL) {
15114 15088 tcount = plun->lun_tgt->tgt_change_cnt;
15115 15089 } else {
15116 15090 rval = EINVAL;
15117 15091 }
15118 15092 lcount = pptr->port_link_cnt;
15119 15093
15120 15094 /*
15121 15095 * Configuring the target with no LUNs will fail. We
15122 15096 * should reset the node state so that it is not
15123 15097 * automatically configured when the LUNs are added
15124 15098 * to this target.
15125 15099 */
15126 15100 if (ptgt->tgt_lun_cnt == 0) {
15127 15101 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
15128 15102 }
15129 15103 mutex_exit(&ptgt->tgt_mutex);
15130 15104 mutex_exit(&pptr->port_mutex);
15131 15105
15132 15106 while (plun) {
15133 15107 child_info_t *cip;
15134 15108
15135 15109 mutex_enter(&plun->lun_mutex);
15136 15110 cip = plun->lun_cip;
15137 15111 mutex_exit(&plun->lun_mutex);
15138 15112
15139 15113 mutex_enter(&ptgt->tgt_mutex);
15140 15114 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
15141 15115 mutex_exit(&ptgt->tgt_mutex);
15142 15116
15143 15117 rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
15144 15118 FCP_ONLINE, lcount, tcount,
15145 15119 NDI_ONLINE_ATTACH);
15146 15120 if (rval != NDI_SUCCESS) {
15147 15121 FCP_TRACE(fcp_logq,
15148 15122 pptr->port_instbuf, fcp_trace,
15149 15123 FCP_BUF_LEVEL_3, 0,
15150 15124 "fcp_create_on_demand: "
15151 15125 "pass_to_hp_and_wait failed "
15152 15126 "rval=%x", rval);
15153 15127 rval = EIO;
15154 15128 } else {
15155 15129 mutex_enter(&LUN_TGT->tgt_mutex);
15156 15130 plun->lun_state &= ~(FCP_LUN_OFFLINE |
15157 15131 FCP_LUN_BUSY);
15158 15132 mutex_exit(&LUN_TGT->tgt_mutex);
15159 15133 }
15160 15134 mutex_enter(&ptgt->tgt_mutex);
15161 15135 }
15162 15136
15163 15137 plun = plun->lun_next;
15164 15138 mutex_exit(&ptgt->tgt_mutex);
15165 15139 }
15166 15140
15167 15141 kmem_free(devlist, sizeof (*devlist));
15168 15142
15169 15143 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15170 15144 fcp_enable_auto_configuration && old_manual) {
15171 15145 mutex_enter(&ptgt->tgt_mutex);
15172 15146 /* if successful then set manual to 0 */
15173 15147 if (rval == 0) {
15174 15148 ptgt->tgt_manual_config_only = 0;
15175 15149 } else {
15176 15150 /* reset to 1 so the user has to do the config */
15177 15151 ptgt->tgt_manual_config_only = 1;
15178 15152 }
15179 15153 mutex_exit(&ptgt->tgt_mutex);
15180 15154 }
15181 15155
15182 15156 return (rval);
15183 15157 }
15184 15158
15185 15159
15186 15160 static void
15187 15161 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
15188 15162 {
15189 15163 int count;
15190 15164 uchar_t byte;
15191 15165
15192 15166 count = 0;
15193 15167 while (*string) {
15194 15168 byte = FCP_ATOB(*string); string++;
15195 15169 byte = byte << 4 | FCP_ATOB(*string); string++;
15196 15170 bytes[count++] = byte;
15197 15171
15198 15172 if (count >= byte_len) {
15199 15173 break;
15200 15174 }
15201 15175 }
15202 15176 }
15203 15177
15204 15178 static void
15205 15179 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
15206 15180 {
15207 15181 int i;
15208 15182
15209 15183 for (i = 0; i < FC_WWN_SIZE; i++) {
15210 15184 (void) sprintf(string + (i * 2),
15211 15185 "%02x", wwn[i]);
15212 15186 }
15213 15187
15214 15188 }
15215 15189
15216 15190 static void
15217 15191 fcp_print_error(fc_packet_t *fpkt)
15218 15192 {
15219 15193 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
15220 15194 fpkt->pkt_ulp_private;
15221 15195 struct fcp_port *pptr;
15222 15196 struct fcp_tgt *ptgt;
15223 15197 struct fcp_lun *plun;
15224 15198 caddr_t buf;
15225 15199 int scsi_cmd = 0;
15226 15200
15227 15201 ptgt = icmd->ipkt_tgt;
15228 15202 plun = icmd->ipkt_lun;
15229 15203 pptr = ptgt->tgt_port;
15230 15204
15231 15205 buf = kmem_zalloc(256, KM_NOSLEEP);
15232 15206 if (buf == NULL) {
15233 15207 return;
15234 15208 }
15235 15209
15236 15210 switch (icmd->ipkt_opcode) {
15237 15211 case SCMD_REPORT_LUN:
15238 15212 (void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
15239 15213 " lun=0x%%x failed");
15240 15214 scsi_cmd++;
15241 15215 break;
15242 15216
15243 15217 case SCMD_INQUIRY_PAGE83:
15244 15218 (void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15245 15219 " lun=0x%%x failed");
15246 15220 scsi_cmd++;
15247 15221 break;
15248 15222
15249 15223 case SCMD_INQUIRY:
15250 15224 (void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15251 15225 " lun=0x%%x failed");
15252 15226 scsi_cmd++;
15253 15227 break;
15254 15228
15255 15229 case LA_ELS_PLOGI:
15256 15230 (void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15257 15231 break;
15258 15232
15259 15233 case LA_ELS_PRLI:
15260 15234 (void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15261 15235 break;
15262 15236 }
15263 15237
15264 15238 if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15265 15239 struct fcp_rsp response, *rsp;
15266 15240 uchar_t asc, ascq;
15267 15241 caddr_t sense_key = NULL;
15268 15242 struct fcp_rsp_info fcp_rsp_err, *bep;
15269 15243
15270 15244 if (icmd->ipkt_nodma) {
15271 15245 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15272 15246 bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15273 15247 sizeof (struct fcp_rsp));
15274 15248 } else {
15275 15249 rsp = &response;
15276 15250 bep = &fcp_rsp_err;
15277 15251
15278 15252 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15279 15253 sizeof (struct fcp_rsp));
15280 15254
15281 15255 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15282 15256 bep, fpkt->pkt_resp_acc,
15283 15257 sizeof (struct fcp_rsp_info));
15284 15258 }
15285 15259
15286 15260
15287 15261 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15288 15262 (void) sprintf(buf + strlen(buf),
15289 15263 " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15290 15264 " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15291 15265 " senselen=%%x. Giving up");
15292 15266
15293 15267 fcp_log(CE_WARN, pptr->port_dip, buf,
15294 15268 ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15295 15269 rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15296 15270 rsp->fcp_u.fcp_status.reserved_1,
15297 15271 rsp->fcp_response_len, rsp->fcp_sense_len);
15298 15272
15299 15273 kmem_free(buf, 256);
15300 15274 return;
15301 15275 }
15302 15276
15303 15277 if (rsp->fcp_u.fcp_status.rsp_len_set &&
15304 15278 bep->rsp_code != FCP_NO_FAILURE) {
15305 15279 (void) sprintf(buf + strlen(buf),
15306 15280 " FCP Response code = 0x%x", bep->rsp_code);
15307 15281 }
15308 15282
15309 15283 if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15310 15284 struct scsi_extended_sense sense_info, *sense_ptr;
15311 15285
15312 15286 if (icmd->ipkt_nodma) {
15313 15287 sense_ptr = (struct scsi_extended_sense *)
15314 15288 ((caddr_t)fpkt->pkt_resp +
15315 15289 sizeof (struct fcp_rsp) +
15316 15290 rsp->fcp_response_len);
15317 15291 } else {
15318 15292 sense_ptr = &sense_info;
15319 15293
15320 15294 FCP_CP_IN(fpkt->pkt_resp +
15321 15295 sizeof (struct fcp_rsp) +
15322 15296 rsp->fcp_response_len, &sense_info,
15323 15297 fpkt->pkt_resp_acc,
15324 15298 sizeof (struct scsi_extended_sense));
15325 15299 }
15326 15300
15327 15301 if (sense_ptr->es_key < NUM_SENSE_KEYS +
15328 15302 NUM_IMPL_SENSE_KEYS) {
15329 15303 sense_key = sense_keys[sense_ptr->es_key];
15330 15304 } else {
15331 15305 sense_key = "Undefined";
15332 15306 }
15333 15307
15334 15308 asc = sense_ptr->es_add_code;
15335 15309 ascq = sense_ptr->es_qual_code;
15336 15310
15337 15311 (void) sprintf(buf + strlen(buf),
15338 15312 ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15339 15313 " Giving up");
15340 15314
15341 15315 fcp_log(CE_WARN, pptr->port_dip, buf,
15342 15316 ptgt->tgt_d_id, plun->lun_num, sense_key,
15343 15317 asc, ascq);
15344 15318 } else {
15345 15319 (void) sprintf(buf + strlen(buf),
15346 15320 " : SCSI status=%%x. Giving up");
15347 15321
15348 15322 fcp_log(CE_WARN, pptr->port_dip, buf,
15349 15323 ptgt->tgt_d_id, plun->lun_num,
15350 15324 rsp->fcp_u.fcp_status.scsi_status);
15351 15325 }
15352 15326 } else {
15353 15327 caddr_t state, reason, action, expln;
15354 15328
15355 15329 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
15356 15330 &action, &expln);
15357 15331
15358 15332 (void) sprintf(buf + strlen(buf), ": State:%%s,"
15359 15333 " Reason:%%s. Giving up");
15360 15334
15361 15335 if (scsi_cmd) {
15362 15336 fcp_log(CE_WARN, pptr->port_dip, buf,
15363 15337 ptgt->tgt_d_id, plun->lun_num, state, reason);
15364 15338 } else {
15365 15339 fcp_log(CE_WARN, pptr->port_dip, buf,
15366 15340 ptgt->tgt_d_id, state, reason);
15367 15341 }
15368 15342 }
15369 15343
15370 15344 kmem_free(buf, 256);
15371 15345 }
15372 15346
15373 15347
15374 15348 static int
15375 15349 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15376 15350 struct fcp_ipkt *icmd, int rval, caddr_t op)
15377 15351 {
15378 15352 int ret = DDI_FAILURE;
15379 15353 char *error;
15380 15354
15381 15355 switch (rval) {
15382 15356 case FC_DEVICE_BUSY_NEW_RSCN:
15383 15357 /*
15384 15358 * This means that there was a new RSCN that the transport
15385 15359 * knows about (which the ULP *may* know about too) but the
15386 15360 * pkt that was sent down was related to an older RSCN. So, we
15387 15361 * are just going to reset the retry count and deadline and
15388 15362 * continue to retry. The idea is that transport is currently
15389 15363 * working on the new RSCN and will soon let the ULPs know
15390 15364 * about it and when it does the existing logic will kick in
15391 15365 * where it will change the tcount to indicate that something
15392 15366 * changed on the target. So, rediscovery will start and there
15393 15367 * will not be an infinite retry.
15394 15368 *
15395 15369 * For a full flow of how the RSCN info is transferred back and
15396 15370 * forth, see fp.c
15397 15371 */
15398 15372 icmd->ipkt_retries = 0;
15399 15373 icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15400 15374 FCP_ICMD_DEADLINE;
15401 15375
15402 15376 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15403 15377 FCP_BUF_LEVEL_3, 0,
15404 15378 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15405 15379 rval, ptgt->tgt_d_id);
15406 15380 /* FALLTHROUGH */
15407 15381
15408 15382 case FC_STATEC_BUSY:
15409 15383 case FC_DEVICE_BUSY:
15410 15384 case FC_PBUSY:
15411 15385 case FC_FBUSY:
15412 15386 case FC_TRAN_BUSY:
15413 15387 case FC_OFFLINE:
15414 15388 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15415 15389 FCP_BUF_LEVEL_3, 0,
15416 15390 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15417 15391 rval, ptgt->tgt_d_id);
15418 15392 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15419 15393 fcp_is_retryable(icmd)) {
15420 15394 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15421 15395 ret = DDI_SUCCESS;
15422 15396 }
15423 15397 break;
15424 15398
15425 15399 case FC_LOGINREQ:
15426 15400 /*
15427 15401 * FC_LOGINREQ used to be handled just like all the cases
15428 15402 * above. It has been changed to handled a PRLI that fails
15429 15403 * with FC_LOGINREQ different than other ipkts that fail
15430 15404 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15431 15405 * a simple matter to turn it into a PLOGI instead, so that's
15432 15406 * exactly what we do here.
15433 15407 */
15434 15408 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15435 15409 ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15436 15410 icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15437 15411 icmd->ipkt_change_cnt, icmd->ipkt_cause);
15438 15412 } else {
15439 15413 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15440 15414 FCP_BUF_LEVEL_3, 0,
15441 15415 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15442 15416 rval, ptgt->tgt_d_id);
15443 15417 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15444 15418 fcp_is_retryable(icmd)) {
15445 15419 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15446 15420 ret = DDI_SUCCESS;
15447 15421 }
15448 15422 }
15449 15423 break;
15450 15424
15451 15425 default:
15452 15426 mutex_enter(&pptr->port_mutex);
15453 15427 mutex_enter(&ptgt->tgt_mutex);
15454 15428 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15455 15429 mutex_exit(&ptgt->tgt_mutex);
15456 15430 mutex_exit(&pptr->port_mutex);
15457 15431
15458 15432 (void) fc_ulp_error(rval, &error);
15459 15433 fcp_log(CE_WARN, pptr->port_dip,
15460 15434 "!Failed to send %s to D_ID=%x error=%s",
15461 15435 op, ptgt->tgt_d_id, error);
15462 15436 } else {
15463 15437 FCP_TRACE(fcp_logq, pptr->port_instbuf,
15464 15438 fcp_trace, FCP_BUF_LEVEL_2, 0,
15465 15439 "fcp_handle_ipkt_errors,1: state change occured"
15466 15440 " for D_ID=0x%x", ptgt->tgt_d_id);
15467 15441 mutex_exit(&ptgt->tgt_mutex);
15468 15442 mutex_exit(&pptr->port_mutex);
15469 15443 }
15470 15444 break;
15471 15445 }
15472 15446
15473 15447 return (ret);
15474 15448 }
15475 15449
15476 15450
15477 15451 /*
15478 15452 * Check of outstanding commands on any LUN for this target
15479 15453 */
15480 15454 static int
15481 15455 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15482 15456 {
15483 15457 struct fcp_lun *plun;
15484 15458 struct fcp_pkt *cmd;
15485 15459
15486 15460 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15487 15461 mutex_enter(&plun->lun_mutex);
15488 15462 for (cmd = plun->lun_pkt_head; cmd != NULL;
15489 15463 cmd = cmd->cmd_forw) {
15490 15464 if (cmd->cmd_state == FCP_PKT_ISSUED) {
15491 15465 mutex_exit(&plun->lun_mutex);
15492 15466 return (FC_SUCCESS);
15493 15467 }
15494 15468 }
15495 15469 mutex_exit(&plun->lun_mutex);
15496 15470 }
15497 15471
15498 15472 return (FC_FAILURE);
15499 15473 }
15500 15474
15501 15475 static fc_portmap_t *
15502 15476 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15503 15477 {
15504 15478 int i;
15505 15479 fc_portmap_t *devlist;
15506 15480 fc_portmap_t *devptr = NULL;
15507 15481 struct fcp_tgt *ptgt;
15508 15482
15509 15483 mutex_enter(&pptr->port_mutex);
15510 15484 for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15511 15485 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15512 15486 ptgt = ptgt->tgt_next) {
15513 15487 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15514 15488 ++*dev_cnt;
15515 15489 }
15516 15490 }
15517 15491 }
15518 15492
15519 15493 devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15520 15494 KM_NOSLEEP);
15521 15495 if (devlist == NULL) {
15522 15496 mutex_exit(&pptr->port_mutex);
15523 15497 fcp_log(CE_WARN, pptr->port_dip,
15524 15498 "!fcp%d: failed to allocate for portmap for construct map",
15525 15499 pptr->port_instance);
15526 15500 return (devptr);
15527 15501 }
15528 15502
15529 15503 for (i = 0; i < FCP_NUM_HASH; i++) {
15530 15504 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15531 15505 ptgt = ptgt->tgt_next) {
15532 15506 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15533 15507 int ret;
15534 15508
15535 15509 ret = fc_ulp_pwwn_to_portmap(
15536 15510 pptr->port_fp_handle,
15537 15511 (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15538 15512 devlist);
15539 15513
15540 15514 if (ret == FC_SUCCESS) {
15541 15515 devlist++;
15542 15516 continue;
15543 15517 }
15544 15518
15545 15519 devlist->map_pd = NULL;
15546 15520 devlist->map_did.port_id = ptgt->tgt_d_id;
15547 15521 devlist->map_hard_addr.hard_addr =
15548 15522 ptgt->tgt_hard_addr;
15549 15523
15550 15524 devlist->map_state = PORT_DEVICE_INVALID;
15551 15525 devlist->map_type = PORT_DEVICE_OLD;
15552 15526
15553 15527 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15554 15528 &devlist->map_nwwn, FC_WWN_SIZE);
15555 15529
15556 15530 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15557 15531 &devlist->map_pwwn, FC_WWN_SIZE);
15558 15532
15559 15533 devlist++;
15560 15534 }
15561 15535 }
15562 15536 }
15563 15537
15564 15538 mutex_exit(&pptr->port_mutex);
15565 15539
15566 15540 return (devptr);
15567 15541 }
15568 15542 /*
15569 15543 * Inimate MPxIO that the lun is busy and cannot accept regular IO
15570 15544 */
15571 15545 static void
15572 15546 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15573 15547 {
15574 15548 int i;
15575 15549 struct fcp_tgt *ptgt;
15576 15550 struct fcp_lun *plun;
15577 15551
15578 15552 for (i = 0; i < FCP_NUM_HASH; i++) {
15579 15553 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15580 15554 ptgt = ptgt->tgt_next) {
15581 15555 mutex_enter(&ptgt->tgt_mutex);
15582 15556 for (plun = ptgt->tgt_lun; plun != NULL;
15583 15557 plun = plun->lun_next) {
15584 15558 if (plun->lun_mpxio &&
15585 15559 plun->lun_state & FCP_LUN_BUSY) {
15586 15560 if (!fcp_pass_to_hp(pptr, plun,
15587 15561 plun->lun_cip,
15588 15562 FCP_MPXIO_PATH_SET_BUSY,
15589 15563 pptr->port_link_cnt,
15590 15564 ptgt->tgt_change_cnt, 0, 0)) {
15591 15565 FCP_TRACE(fcp_logq,
15592 15566 pptr->port_instbuf,
15593 15567 fcp_trace,
15594 15568 FCP_BUF_LEVEL_2, 0,
15595 15569 "path_verifybusy: "
15596 15570 "disable lun %p failed!",
15597 15571 plun);
15598 15572 }
15599 15573 }
15600 15574 }
15601 15575 mutex_exit(&ptgt->tgt_mutex);
15602 15576 }
15603 15577 }
15604 15578 }
15605 15579
15606 15580 static int
15607 15581 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15608 15582 {
15609 15583 dev_info_t *cdip = NULL;
15610 15584 dev_info_t *pdip = NULL;
15611 15585
15612 15586 ASSERT(plun);
15613 15587
15614 15588 mutex_enter(&plun->lun_mutex);
15615 15589 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15616 15590 mutex_exit(&plun->lun_mutex);
15617 15591 return (NDI_FAILURE);
15618 15592 }
15619 15593 mutex_exit(&plun->lun_mutex);
15620 15594 cdip = mdi_pi_get_client(PIP(cip));
15621 15595 pdip = mdi_pi_get_phci(PIP(cip));
15622 15596
15623 15597 ASSERT(cdip != NULL);
15624 15598 ASSERT(pdip != NULL);
15625 15599
15626 15600 if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15627 15601 /* LUN ready for IO */
15628 15602 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15629 15603 } else {
15630 15604 /* LUN busy to accept IO */
15631 15605 (void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15632 15606 }
15633 15607 return (NDI_SUCCESS);
15634 15608 }
15635 15609
15636 15610 /*
15637 15611 * Caller must free the returned string of MAXPATHLEN len
15638 15612 * If the device is offline (-1 instance number) NULL
15639 15613 * will be returned.
15640 15614 */
15641 15615 static char *
15642 15616 fcp_get_lun_path(struct fcp_lun *plun)
15643 15617 {
15644 15618 dev_info_t *dip = NULL;
15645 15619 char *path = NULL;
15646 15620 mdi_pathinfo_t *pip = NULL;
15647 15621
15648 15622 if (plun == NULL) {
15649 15623 return (NULL);
15650 15624 }
15651 15625
15652 15626 mutex_enter(&plun->lun_mutex);
15653 15627 if (plun->lun_mpxio == 0) {
15654 15628 dip = DIP(plun->lun_cip);
15655 15629 mutex_exit(&plun->lun_mutex);
15656 15630 } else {
15657 15631 /*
15658 15632 * lun_cip must be accessed with lun_mutex held. Here
15659 15633 * plun->lun_cip either points to a valid node or it is NULL.
15660 15634 * Make a copy so that we can release lun_mutex.
15661 15635 */
15662 15636 pip = PIP(plun->lun_cip);
15663 15637
15664 15638 /*
15665 15639 * Increase ref count on the path so that we can release
15666 15640 * lun_mutex and still be sure that the pathinfo node (and thus
15667 15641 * also the client) is not deallocated. If pip is NULL, this
15668 15642 * has no effect.
15669 15643 */
15670 15644 mdi_hold_path(pip);
15671 15645
15672 15646 mutex_exit(&plun->lun_mutex);
15673 15647
15674 15648 /* Get the client. If pip is NULL, we get NULL. */
15675 15649 dip = mdi_pi_get_client(pip);
15676 15650 }
15677 15651
15678 15652 if (dip == NULL)
15679 15653 goto out;
15680 15654 if (ddi_get_instance(dip) < 0)
15681 15655 goto out;
15682 15656
15683 15657 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15684 15658 if (path == NULL)
15685 15659 goto out;
15686 15660
15687 15661 (void) ddi_pathname(dip, path);
15688 15662
15689 15663 /* Clean up. */
15690 15664 out:
15691 15665 if (pip != NULL)
15692 15666 mdi_rele_path(pip);
15693 15667
15694 15668 /*
15695 15669 * In reality, the user wants a fully valid path (one they can open)
15696 15670 * but this string is lacking the mount point, and the minor node.
15697 15671 * It would be nice if we could "figure these out" somehow
15698 15672 * and fill them in. Otherwise, the userland code has to understand
15699 15673 * driver specific details of which minor node is the "best" or
15700 15674 * "right" one to expose. (Ex: which slice is the whole disk, or
15701 15675 * which tape doesn't rewind)
15702 15676 */
15703 15677 return (path);
15704 15678 }
15705 15679
15706 15680 static int
15707 15681 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15708 15682 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15709 15683 {
15710 15684 int64_t reset_delay;
15711 15685 int rval, retry = 0;
15712 15686 struct fcp_port *pptr = fcp_dip2port(parent);
15713 15687
15714 15688 reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15715 15689 (ddi_get_lbolt64() - pptr->port_attach_time);
15716 15690 if (reset_delay < 0) {
15717 15691 reset_delay = 0;
15718 15692 }
15719 15693
15720 15694 if (fcp_bus_config_debug) {
15721 15695 flag |= NDI_DEVI_DEBUG;
15722 15696 }
15723 15697
15724 15698 switch (op) {
15725 15699 case BUS_CONFIG_ONE:
15726 15700 /*
15727 15701 * Retry the command since we need to ensure
15728 15702 * the fabric devices are available for root
15729 15703 */
15730 15704 while (retry++ < fcp_max_bus_config_retries) {
15731 15705 rval = (ndi_busop_bus_config(parent,
15732 15706 flag | NDI_MDI_FALLBACK, op,
15733 15707 arg, childp, (clock_t)reset_delay));
15734 15708 if (rval == 0) {
15735 15709 return (rval);
15736 15710 }
15737 15711 }
15738 15712
15739 15713 /*
15740 15714 * drain taskq to make sure nodes are created and then
15741 15715 * try again.
15742 15716 */
15743 15717 taskq_wait(DEVI(parent)->devi_taskq);
15744 15718 return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15745 15719 op, arg, childp, 0));
15746 15720
15747 15721 case BUS_CONFIG_DRIVER:
15748 15722 case BUS_CONFIG_ALL: {
15749 15723 /*
15750 15724 * delay till all devices report in (port_tmp_cnt == 0)
15751 15725 * or FCP_INIT_WAIT_TIMEOUT
15752 15726 */
15753 15727 mutex_enter(&pptr->port_mutex);
15754 15728 while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15755 15729 (void) cv_timedwait(&pptr->port_config_cv,
15756 15730 &pptr->port_mutex,
15757 15731 ddi_get_lbolt() + (clock_t)reset_delay);
15758 15732 reset_delay =
15759 15733 (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15760 15734 (ddi_get_lbolt64() - pptr->port_attach_time);
15761 15735 }
15762 15736 mutex_exit(&pptr->port_mutex);
15763 15737 /* drain taskq to make sure nodes are created */
15764 15738 taskq_wait(DEVI(parent)->devi_taskq);
15765 15739 return (ndi_busop_bus_config(parent, flag, op,
15766 15740 arg, childp, 0));
15767 15741 }
15768 15742
15769 15743 default:
15770 15744 return (NDI_FAILURE);
15771 15745 }
15772 15746 /*NOTREACHED*/
15773 15747 }
15774 15748
15775 15749 static int
15776 15750 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15777 15751 ddi_bus_config_op_t op, void *arg)
15778 15752 {
15779 15753 if (fcp_bus_config_debug) {
15780 15754 flag |= NDI_DEVI_DEBUG;
15781 15755 }
15782 15756
15783 15757 return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15784 15758 }
15785 15759
15786 15760
15787 15761 /*
15788 15762 * Routine to copy GUID into the lun structure.
15789 15763 * returns 0 if copy was successful and 1 if encountered a
15790 15764 * failure and did not copy the guid.
15791 15765 */
15792 15766 static int
15793 15767 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15794 15768 {
15795 15769
15796 15770 int retval = 0;
15797 15771
15798 15772 /* add one for the null terminator */
15799 15773 const unsigned int len = strlen(guidp) + 1;
15800 15774
15801 15775 if ((guidp == NULL) || (plun == NULL)) {
15802 15776 return (1);
15803 15777 }
15804 15778
15805 15779 /*
15806 15780 * if the plun->lun_guid already has been allocated,
15807 15781 * then check the size. if the size is exact, reuse
15808 15782 * it....if not free it an allocate the required size.
15809 15783 * The reallocation should NOT typically happen
15810 15784 * unless the GUIDs reported changes between passes.
15811 15785 * We free up and alloc again even if the
15812 15786 * size was more than required. This is due to the
15813 15787 * fact that the field lun_guid_size - serves
15814 15788 * dual role of indicating the size of the wwn
15815 15789 * size and ALSO the allocation size.
15816 15790 */
15817 15791 if (plun->lun_guid) {
15818 15792 if (plun->lun_guid_size != len) {
15819 15793 /*
15820 15794 * free the allocated memory and
15821 15795 * initialize the field
15822 15796 * lun_guid_size to 0.
15823 15797 */
15824 15798 kmem_free(plun->lun_guid, plun->lun_guid_size);
15825 15799 plun->lun_guid = NULL;
15826 15800 plun->lun_guid_size = 0;
15827 15801 }
15828 15802 }
15829 15803 /*
15830 15804 * alloc only if not already done.
15831 15805 */
15832 15806 if (plun->lun_guid == NULL) {
15833 15807 plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15834 15808 if (plun->lun_guid == NULL) {
15835 15809 cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15836 15810 "Unable to allocate"
15837 15811 "Memory for GUID!!! size %d", len);
15838 15812 retval = 1;
15839 15813 } else {
15840 15814 plun->lun_guid_size = len;
15841 15815 }
15842 15816 }
15843 15817 if (plun->lun_guid) {
15844 15818 /*
15845 15819 * now copy the GUID
15846 15820 */
15847 15821 bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15848 15822 }
15849 15823 return (retval);
15850 15824 }
15851 15825
15852 15826 /*
15853 15827 * fcp_reconfig_wait
15854 15828 *
15855 15829 * Wait for a rediscovery/reconfiguration to complete before continuing.
15856 15830 */
15857 15831
15858 15832 static void
15859 15833 fcp_reconfig_wait(struct fcp_port *pptr)
15860 15834 {
15861 15835 clock_t reconfig_start, wait_timeout;
15862 15836
15863 15837 /*
15864 15838 * Quick check. If pptr->port_tmp_cnt is 0, there is no
15865 15839 * reconfiguration in progress.
15866 15840 */
15867 15841
15868 15842 mutex_enter(&pptr->port_mutex);
15869 15843 if (pptr->port_tmp_cnt == 0) {
15870 15844 mutex_exit(&pptr->port_mutex);
15871 15845 return;
15872 15846 }
15873 15847 mutex_exit(&pptr->port_mutex);
15874 15848
15875 15849 /*
15876 15850 * If we cause a reconfig by raising power, delay until all devices
15877 15851 * report in (port_tmp_cnt returns to 0)
15878 15852 */
15879 15853
15880 15854 reconfig_start = ddi_get_lbolt();
15881 15855 wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15882 15856
15883 15857 mutex_enter(&pptr->port_mutex);
15884 15858
15885 15859 while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15886 15860 pptr->port_tmp_cnt) {
15887 15861
15888 15862 (void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15889 15863 reconfig_start + wait_timeout);
15890 15864 }
15891 15865
15892 15866 mutex_exit(&pptr->port_mutex);
15893 15867
15894 15868 /*
15895 15869 * Even if fcp_tmp_count isn't 0, continue without error. The port
15896 15870 * we want may still be ok. If not, it will error out later
15897 15871 */
15898 15872 }
15899 15873
15900 15874 /*
15901 15875 * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15902 15876 * We rely on the fcp_global_mutex to provide protection against changes to
15903 15877 * the fcp_lun_blacklist.
15904 15878 *
15905 15879 * You can describe a list of target port WWNs and LUN numbers which will
15906 15880 * not be configured. LUN numbers will be interpreted as decimal. White
15907 15881 * spaces and ',' can be used in the list of LUN numbers.
15908 15882 *
15909 15883 * To prevent LUNs 1 and 2 from being configured for target
15910 15884 * port 510000f010fd92a1 and target port 510000e012079df1, set:
15911 15885 *
15912 15886 * pwwn-lun-blacklist=
15913 15887 * "510000f010fd92a1,1,2",
15914 15888 * "510000e012079df1,1,2";
15915 15889 */
15916 15890 static void
15917 15891 fcp_read_blacklist(dev_info_t *dip,
15918 15892 struct fcp_black_list_entry **pplun_blacklist)
15919 15893 {
15920 15894 char **prop_array = NULL;
15921 15895 char *curr_pwwn = NULL;
15922 15896 char *curr_lun = NULL;
15923 15897 uint32_t prop_item = 0;
15924 15898 int idx = 0;
15925 15899 int len = 0;
15926 15900
15927 15901 ASSERT(mutex_owned(&fcp_global_mutex));
15928 15902 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15929 15903 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15930 15904 LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15931 15905 return;
15932 15906 }
15933 15907
15934 15908 for (idx = 0; idx < prop_item; idx++) {
15935 15909
15936 15910 curr_pwwn = prop_array[idx];
15937 15911 while (*curr_pwwn == ' ') {
15938 15912 curr_pwwn++;
15939 15913 }
15940 15914 if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15941 15915 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15942 15916 ", please check.", curr_pwwn);
15943 15917 continue;
15944 15918 }
15945 15919 if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15946 15920 (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15947 15921 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15948 15922 ", please check.", curr_pwwn);
15949 15923 continue;
15950 15924 }
15951 15925 for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15952 15926 if (isxdigit(curr_pwwn[len]) != TRUE) {
15953 15927 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15954 15928 "blacklist, please check.", curr_pwwn);
15955 15929 break;
15956 15930 }
15957 15931 }
15958 15932 if (len != sizeof (la_wwn_t) * 2) {
15959 15933 continue;
15960 15934 }
15961 15935
15962 15936 curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15963 15937 *(curr_lun - 1) = '\0';
15964 15938 fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15965 15939 }
15966 15940
15967 15941 ddi_prop_free(prop_array);
15968 15942 }
15969 15943
15970 15944 /*
15971 15945 * Get the masking info about one remote target port designated by wwn.
15972 15946 * Lun ids could be separated by ',' or white spaces.
15973 15947 */
15974 15948 static void
15975 15949 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15976 15950 struct fcp_black_list_entry **pplun_blacklist)
15977 15951 {
15978 15952 int idx = 0;
15979 15953 uint32_t offset = 0;
15980 15954 unsigned long lun_id = 0;
15981 15955 char lunid_buf[16];
15982 15956 char *pend = NULL;
15983 15957 int illegal_digit = 0;
15984 15958
15985 15959 while (offset < strlen(curr_lun)) {
15986 15960 while ((curr_lun[offset + idx] != ',') &&
15987 15961 (curr_lun[offset + idx] != '\0') &&
15988 15962 (curr_lun[offset + idx] != ' ')) {
15989 15963 if (isdigit(curr_lun[offset + idx]) == 0) {
15990 15964 illegal_digit++;
15991 15965 }
15992 15966 idx++;
15993 15967 }
15994 15968 if (illegal_digit > 0) {
15995 15969 offset += (idx+1); /* To the start of next lun */
15996 15970 idx = 0;
15997 15971 illegal_digit = 0;
15998 15972 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15999 15973 "the blacklist, please check digits.",
16000 15974 curr_lun, curr_pwwn);
16001 15975 continue;
16002 15976 }
16003 15977 if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
16004 15978 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16005 15979 "the blacklist, please check the length of LUN#.",
16006 15980 curr_lun, curr_pwwn);
16007 15981 break;
16008 15982 }
16009 15983 if (idx == 0) { /* ignore ' ' or ',' or '\0' */
16010 15984 offset++;
16011 15985 continue;
16012 15986 }
16013 15987
16014 15988 bcopy(curr_lun + offset, lunid_buf, idx);
16015 15989 lunid_buf[idx] = '\0';
16016 15990 if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
16017 15991 fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
16018 15992 } else {
16019 15993 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16020 15994 "the blacklist, please check %s.",
16021 15995 curr_lun, curr_pwwn, lunid_buf);
16022 15996 }
16023 15997 offset += (idx+1); /* To the start of next lun */
16024 15998 idx = 0;
16025 15999 }
16026 16000 }
16027 16001
16028 16002 /*
16029 16003 * Add one masking record
16030 16004 */
16031 16005 static void
16032 16006 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
16033 16007 struct fcp_black_list_entry **pplun_blacklist)
16034 16008 {
16035 16009 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
16036 16010 struct fcp_black_list_entry *new_entry = NULL;
16037 16011 la_wwn_t wwn;
16038 16012
16039 16013 fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
16040 16014 while (tmp_entry) {
16041 16015 if ((bcmp(&tmp_entry->wwn, &wwn,
16042 16016 sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
16043 16017 return;
16044 16018 }
16045 16019
16046 16020 tmp_entry = tmp_entry->next;
16047 16021 }
16048 16022
16049 16023 /* add to black list */
16050 16024 new_entry = (struct fcp_black_list_entry *)kmem_zalloc
16051 16025 (sizeof (struct fcp_black_list_entry), KM_SLEEP);
16052 16026 bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
16053 16027 new_entry->lun = lun_id;
16054 16028 new_entry->masked = 0;
16055 16029 new_entry->next = *pplun_blacklist;
16056 16030 *pplun_blacklist = new_entry;
16057 16031 }
16058 16032
16059 16033 /*
16060 16034 * Check if we should mask the specified lun of this fcp_tgt
16061 16035 */
16062 16036 static int
16063 16037 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id)
16064 16038 {
16065 16039 struct fcp_black_list_entry *remote_port;
16066 16040
16067 16041 remote_port = fcp_lun_blacklist;
16068 16042 while (remote_port != NULL) {
16069 16043 if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
16070 16044 if (remote_port->lun == lun_id) {
16071 16045 remote_port->masked++;
16072 16046 if (remote_port->masked == 1) {
16073 16047 fcp_log(CE_NOTE, NULL, "LUN %d of port "
16074 16048 "%02x%02x%02x%02x%02x%02x%02x%02x "
16075 16049 "is masked due to black listing.\n",
16076 16050 lun_id, wwn->raw_wwn[0],
16077 16051 wwn->raw_wwn[1], wwn->raw_wwn[2],
16078 16052 wwn->raw_wwn[3], wwn->raw_wwn[4],
16079 16053 wwn->raw_wwn[5], wwn->raw_wwn[6],
16080 16054 wwn->raw_wwn[7]);
16081 16055 }
16082 16056 return (TRUE);
16083 16057 }
16084 16058 }
16085 16059 remote_port = remote_port->next;
16086 16060 }
16087 16061 return (FALSE);
16088 16062 }
16089 16063
16090 16064 /*
16091 16065 * Release all allocated resources
16092 16066 */
16093 16067 static void
16094 16068 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist)
16095 16069 {
16096 16070 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
16097 16071 struct fcp_black_list_entry *current_entry = NULL;
16098 16072
16099 16073 ASSERT(mutex_owned(&fcp_global_mutex));
16100 16074 /*
16101 16075 * Traverse all luns
16102 16076 */
16103 16077 while (tmp_entry) {
16104 16078 current_entry = tmp_entry;
16105 16079 tmp_entry = tmp_entry->next;
16106 16080 kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
16107 16081 }
16108 16082 *pplun_blacklist = NULL;
16109 16083 }
16110 16084
16111 16085 /*
16112 16086 * In fcp module,
16113 16087 * pkt@scsi_pkt, cmd@fcp_pkt, icmd@fcp_ipkt, fpkt@fc_packet, pptr@fcp_port
16114 16088 */
16115 16089 static struct scsi_pkt *
16116 16090 fcp_pseudo_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
16117 16091 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
16118 16092 int flags, int (*callback)(), caddr_t arg)
16119 16093 {
16120 16094 fcp_port_t *pptr = ADDR2FCP(ap);
16121 16095 fcp_pkt_t *cmd = NULL;
16122 16096 fc_frame_hdr_t *hp;
16123 16097
16124 16098 /*
16125 16099 * First step: get the packet
16126 16100 */
16127 16101 if (pkt == NULL) {
16128 16102 pkt = scsi_hba_pkt_alloc(pptr->port_dip, ap, cmdlen, statuslen,
16129 16103 tgtlen, sizeof (fcp_pkt_t) + pptr->port_priv_pkt_len,
16130 16104 callback, arg);
16131 16105 if (pkt == NULL) {
16132 16106 return (NULL);
16133 16107 }
16134 16108
16135 16109 /*
16136 16110 * All fields in scsi_pkt will be initialized properly or
16137 16111 * set to zero. We need do nothing for scsi_pkt.
16138 16112 */
16139 16113 /*
16140 16114 * But it's our responsibility to link other related data
16141 16115 * structures. Their initialization will be done, just
16142 16116 * before the scsi_pkt will be sent to FCA.
16143 16117 */
16144 16118 cmd = PKT2CMD(pkt);
16145 16119 cmd->cmd_pkt = pkt;
16146 16120 cmd->cmd_fp_pkt = &cmd->cmd_fc_packet;
16147 16121 /*
16148 16122 * fc_packet_t
16149 16123 */
16150 16124 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
16151 16125 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
16152 16126 sizeof (struct fcp_pkt));
16153 16127 cmd->cmd_fp_pkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
16154 16128 cmd->cmd_fp_pkt->pkt_cmdlen = sizeof (struct fcp_cmd);
16155 16129 cmd->cmd_fp_pkt->pkt_resp = cmd->cmd_fcp_rsp;
16156 16130 cmd->cmd_fp_pkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
16157 16131 /*
16158 16132 * Fill in the Fabric Channel Header
16159 16133 */
16160 16134 hp = &cmd->cmd_fp_pkt->pkt_cmd_fhdr;
16161 16135 hp->r_ctl = R_CTL_COMMAND;
16162 16136 hp->rsvd = 0;
16163 16137 hp->type = FC_TYPE_SCSI_FCP;
16164 16138 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
16165 16139 hp->seq_id = 0;
16166 16140 hp->df_ctl = 0;
16167 16141 hp->seq_cnt = 0;
16168 16142 hp->ox_id = 0xffff;
16169 16143 hp->rx_id = 0xffff;
16170 16144 hp->ro = 0;
16171 16145 } else {
16172 16146 /*
16173 16147 * We need think if we should reset any elements in
16174 16148 * related data structures.
16175 16149 */
16176 16150 FCP_TRACE(fcp_logq, pptr->port_instbuf,
16177 16151 fcp_trace, FCP_BUF_LEVEL_6, 0,
16178 16152 "reusing pkt, flags %d", flags);
16179 16153 cmd = PKT2CMD(pkt);
16180 16154 if (cmd->cmd_fp_pkt->pkt_pd) {
16181 16155 cmd->cmd_fp_pkt->pkt_pd = NULL;
16182 16156 }
16183 16157 }
16184 16158
16185 16159 /*
16186 16160 * Second step: dma allocation/move
16187 16161 */
16188 16162 if (bp && bp->b_bcount != 0) {
16189 16163 /*
16190 16164 * Mark if it's read or write
16191 16165 */
16192 16166 if (bp->b_flags & B_READ) {
16193 16167 cmd->cmd_flags |= CFLAG_IS_READ;
16194 16168 } else {
16195 16169 cmd->cmd_flags &= ~CFLAG_IS_READ;
16196 16170 }
16197 16171
16198 16172 bp_mapin(bp);
16199 16173 cmd->cmd_fp_pkt->pkt_data = bp->b_un.b_addr;
16200 16174 cmd->cmd_fp_pkt->pkt_datalen = bp->b_bcount;
16201 16175 cmd->cmd_fp_pkt->pkt_data_resid = 0;
16202 16176 } else {
16203 16177 /*
16204 16178 * It seldom happens, except when CLUSTER or SCSI_VHCI wants
16205 16179 * to send zero-length read/write.
16206 16180 */
16207 16181 cmd->cmd_fp_pkt->pkt_data = NULL;
16208 16182 cmd->cmd_fp_pkt->pkt_datalen = 0;
16209 16183 }
16210 16184
16211 16185 return (pkt);
16212 16186 }
16213 16187
16214 16188 static void
16215 16189 fcp_pseudo_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16216 16190 {
16217 16191 fcp_port_t *pptr = ADDR2FCP(ap);
16218 16192
16219 16193 /*
16220 16194 * First we let FCA to uninitilize private part.
16221 16195 */
16222 16196 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
16223 16197 PKT2CMD(pkt)->cmd_fp_pkt);
16224 16198
16225 16199 /*
16226 16200 * Then we uninitialize fc_packet.
16227 16201 */
16228 16202
16229 16203 /*
16230 16204 * Thirdly, we uninitializae fcp_pkt.
16231 16205 */
16232 16206
16233 16207 /*
16234 16208 * In the end, we free scsi_pkt.
16235 16209 */
16236 16210 scsi_hba_pkt_free(ap, pkt);
16237 16211 }
16238 16212
16239 16213 static int
16240 16214 fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt)
16241 16215 {
16242 16216 fcp_port_t *pptr = ADDR2FCP(ap);
16243 16217 fcp_lun_t *plun = ADDR2LUN(ap);
16244 16218 fcp_tgt_t *ptgt = plun->lun_tgt;
16245 16219 fcp_pkt_t *cmd = PKT2CMD(pkt);
16246 16220 fcp_cmd_t *fcmd = &cmd->cmd_fcp_cmd;
16247 16221 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
16248 16222 int rval;
16249 16223
16250 16224 fpkt->pkt_pd = ptgt->tgt_pd_handle;
16251 16225 (void) fc_ulp_init_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt, 1);
16252 16226
16253 16227 /*
16254 16228 * Firstly, we need initialize fcp_pkt_t
16255 16229 * Secondly, we need initialize fcp_cmd_t.
16256 16230 */
16257 16231 bcopy(pkt->pkt_cdbp, fcmd->fcp_cdb, pkt->pkt_cdblen);
16258 16232 fcmd->fcp_data_len = fpkt->pkt_datalen;
16259 16233 fcmd->fcp_ent_addr = plun->lun_addr;
16260 16234 if (pkt->pkt_flags & FLAG_HTAG) {
16261 16235 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
16262 16236 } else if (pkt->pkt_flags & FLAG_OTAG) {
16263 16237 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
16264 16238 } else if (pkt->pkt_flags & FLAG_STAG) {
16265 16239 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
16266 16240 } else {
16267 16241 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
16268 16242 }
16269 16243
16270 16244 if (cmd->cmd_flags & CFLAG_IS_READ) {
16271 16245 fcmd->fcp_cntl.cntl_read_data = 1;
16272 16246 fcmd->fcp_cntl.cntl_write_data = 0;
16273 16247 } else {
16274 16248 fcmd->fcp_cntl.cntl_read_data = 0;
16275 16249 fcmd->fcp_cntl.cntl_write_data = 1;
16276 16250 }
16277 16251
16278 16252 /*
16279 16253 * Then we need initialize fc_packet_t too.
16280 16254 */
16281 16255 fpkt->pkt_timeout = pkt->pkt_time + 2;
16282 16256 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
16283 16257 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
16284 16258 if (cmd->cmd_flags & CFLAG_IS_READ) {
16285 16259 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
16286 16260 } else {
16287 16261 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
16288 16262 }
16289 16263
16290 16264 if (pkt->pkt_flags & FLAG_NOINTR) {
16291 16265 fpkt->pkt_comp = NULL;
16292 16266 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
16293 16267 } else {
16294 16268 fpkt->pkt_comp = fcp_cmd_callback;
16295 16269 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
16296 16270 if (pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
16297 16271 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
16298 16272 }
16299 16273 }
16300 16274
16301 16275 /*
16302 16276 * Lastly, we need initialize scsi_pkt
16303 16277 */
16304 16278 pkt->pkt_reason = CMD_CMPLT;
16305 16279 pkt->pkt_state = 0;
16306 16280 pkt->pkt_statistics = 0;
16307 16281 pkt->pkt_resid = 0;
16308 16282
16309 16283 /*
16310 16284 * if interrupts aren't allowed (e.g. at dump time) then we'll
16311 16285 * have to do polled I/O
16312 16286 */
16313 16287 if (pkt->pkt_flags & FLAG_NOINTR) {
16314 16288 return (fcp_dopoll(pptr, cmd));
16315 16289 }
16316 16290
16317 16291 cmd->cmd_state = FCP_PKT_ISSUED;
16318 16292 rval = fcp_transport(pptr->port_fp_handle, fpkt, 0);
16319 16293 if (rval == FC_SUCCESS) {
16320 16294 return (TRAN_ACCEPT);
16321 16295 }
16322 16296
16323 16297 /*
16324 16298 * Need more consideration
16325 16299 *
16326 16300 * pkt->pkt_flags & FLAG_NOQUEUE could abort other pkt
16327 16301 */
16328 16302 cmd->cmd_state = FCP_PKT_IDLE;
16329 16303 if (rval == FC_TRAN_BUSY) {
16330 16304 return (TRAN_BUSY);
16331 16305 } else {
16332 16306 return (TRAN_FATAL_ERROR);
16333 16307 }
16334 16308 }
16335 16309
16336 16310 /*
16337 16311 * scsi_poll will always call tran_sync_pkt for pseudo FC-HBAs
16338 16312 * SCSA will initialize it to scsi_sync_cache_pkt for physical FC-HBAs
16339 16313 */
16340 16314 static void
16341 16315 fcp_pseudo_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16342 16316 {
16343 16317 FCP_TRACE(fcp_logq, "fcp_pseudo_sync_pkt", fcp_trace,
16344 16318 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16345 16319 }
16346 16320
16347 16321 /*
16348 16322 * scsi_dmafree will always call tran_dmafree, when STATE_ARQ_DONE
16349 16323 */
16350 16324 static void
16351 16325 fcp_pseudo_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
16352 16326 {
16353 16327 FCP_TRACE(fcp_logq, "fcp_pseudo_dmafree", fcp_trace,
16354 16328 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16355 16329 }
↓ open down ↓ |
2104 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX