Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/scsi/targets/sd.c
+++ new/usr/src/uts/common/io/scsi/targets/sd.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25 /*
26 26 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
27 27 * Copyright (c) 2012 by Delphix. All rights reserved.
28 28 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
29 29 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved.
30 30 */
31 31 /*
32 32 * Copyright 2011 cyril.galibern@opensvc.com
33 33 */
34 34
35 35 /*
36 36 * SCSI disk target driver.
37 37 */
38 38 #include <sys/scsi/scsi.h>
39 39 #include <sys/dkbad.h>
40 40 #include <sys/dklabel.h>
41 41 #include <sys/dkio.h>
42 42 #include <sys/fdio.h>
43 43 #include <sys/cdio.h>
44 44 #include <sys/mhd.h>
45 45 #include <sys/vtoc.h>
46 46 #include <sys/dktp/fdisk.h>
47 47 #include <sys/kstat.h>
48 48 #include <sys/vtrace.h>
49 49 #include <sys/note.h>
50 50 #include <sys/thread.h>
51 51 #include <sys/proc.h>
52 52 #include <sys/efi_partition.h>
53 53 #include <sys/var.h>
54 54 #include <sys/aio_req.h>
55 55
56 56 #ifdef __lock_lint
57 57 #define _LP64
58 58 #define __amd64
59 59 #endif
60 60
61 61 #if (defined(__fibre))
62 62 /* Note: is there a leadville version of the following? */
63 63 #include <sys/fc4/fcal_linkapp.h>
64 64 #endif
65 65 #include <sys/taskq.h>
66 66 #include <sys/uuid.h>
67 67 #include <sys/byteorder.h>
68 68 #include <sys/sdt.h>
69 69
70 70 #include "sd_xbuf.h"
71 71
72 72 #include <sys/scsi/targets/sddef.h>
73 73 #include <sys/cmlb.h>
74 74 #include <sys/sysevent/eventdefs.h>
75 75 #include <sys/sysevent/dev.h>
76 76
77 77 #include <sys/fm/protocol.h>
78 78
79 79 /*
80 80 * Loadable module info.
81 81 */
82 82 #if (defined(__fibre))
83 83 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver"
84 84 #else /* !__fibre */
85 85 #define SD_MODULE_NAME "SCSI Disk Driver"
86 86 #endif /* !__fibre */
87 87
88 88 /*
89 89 * Define the interconnect type, to allow the driver to distinguish
90 90 * between parallel SCSI (sd) and fibre channel (ssd) behaviors.
91 91 *
92 92 * This is really for backward compatibility. In the future, the driver
93 93 * should actually check the "interconnect-type" property as reported by
94 94 * the HBA; however at present this property is not defined by all HBAs,
95 95 * so we will use this #define (1) to permit the driver to run in
96 96 * backward-compatibility mode; and (2) to print a notification message
97 97 * if an FC HBA does not support the "interconnect-type" property. The
98 98 * behavior of the driver will be to assume parallel SCSI behaviors unless
99 99 * the "interconnect-type" property is defined by the HBA **AND** has a
100 100 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or
101 101 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre
102 102 * Channel behaviors (as per the old ssd). (Note that the
103 103 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and
104 104 * will result in the driver assuming parallel SCSI behaviors.)
105 105 *
106 106 * (see common/sys/scsi/impl/services.h)
107 107 *
108 108 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default
109 109 * since some FC HBAs may already support that, and there is some code in
110 110 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the
111 111 * default would confuse that code, and besides things should work fine
112 112 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the
113 113 * "interconnect_type" property.
114 114 *
115 115 */
116 116 #if (defined(__fibre))
117 117 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE
118 118 #else
119 119 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL
120 120 #endif
121 121
122 122 /*
123 123 * The name of the driver, established from the module name in _init.
124 124 */
125 125 static char *sd_label = NULL;
126 126
127 127 /*
128 128 * Driver name is unfortunately prefixed on some driver.conf properties.
129 129 */
130 130 #if (defined(__fibre))
131 131 #define sd_max_xfer_size ssd_max_xfer_size
132 132 #define sd_config_list ssd_config_list
133 133 static char *sd_max_xfer_size = "ssd_max_xfer_size";
134 134 static char *sd_config_list = "ssd-config-list";
135 135 #else
136 136 static char *sd_max_xfer_size = "sd_max_xfer_size";
137 137 static char *sd_config_list = "sd-config-list";
138 138 #endif
139 139
140 140 /*
141 141 * Driver global variables
142 142 */
143 143
144 144 #if (defined(__fibre))
145 145 /*
146 146 * These #defines are to avoid namespace collisions that occur because this
147 147 * code is currently used to compile two separate driver modules: sd and ssd.
148 148 * All global variables need to be treated this way (even if declared static)
149 149 * in order to allow the debugger to resolve the names properly.
150 150 * It is anticipated that in the near future the ssd module will be obsoleted,
151 151 * at which time this namespace issue should go away.
152 152 */
153 153 #define sd_state ssd_state
154 154 #define sd_io_time ssd_io_time
155 155 #define sd_failfast_enable ssd_failfast_enable
156 156 #define sd_ua_retry_count ssd_ua_retry_count
157 157 #define sd_report_pfa ssd_report_pfa
158 158 #define sd_max_throttle ssd_max_throttle
159 159 #define sd_min_throttle ssd_min_throttle
160 160 #define sd_rot_delay ssd_rot_delay
161 161
162 162 #define sd_retry_on_reservation_conflict \
163 163 ssd_retry_on_reservation_conflict
164 164 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay
165 165 #define sd_resv_conflict_name ssd_resv_conflict_name
166 166
167 167 #define sd_component_mask ssd_component_mask
168 168 #define sd_level_mask ssd_level_mask
169 169 #define sd_debug_un ssd_debug_un
170 170 #define sd_error_level ssd_error_level
171 171
172 172 #define sd_xbuf_active_limit ssd_xbuf_active_limit
173 173 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit
174 174
175 175 #define sd_tr ssd_tr
176 176 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout
177 177 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout
178 178 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable
179 179 #define sd_check_media_time ssd_check_media_time
180 180 #define sd_wait_cmds_complete ssd_wait_cmds_complete
181 181 #define sd_label_mutex ssd_label_mutex
182 182 #define sd_detach_mutex ssd_detach_mutex
183 183 #define sd_log_buf ssd_log_buf
184 184 #define sd_log_mutex ssd_log_mutex
185 185
186 186 #define sd_disk_table ssd_disk_table
187 187 #define sd_disk_table_size ssd_disk_table_size
188 188 #define sd_sense_mutex ssd_sense_mutex
189 189 #define sd_cdbtab ssd_cdbtab
190 190
191 191 #define sd_cb_ops ssd_cb_ops
192 192 #define sd_ops ssd_ops
193 193 #define sd_additional_codes ssd_additional_codes
194 194 #define sd_tgops ssd_tgops
195 195
196 196 #define sd_minor_data ssd_minor_data
197 197 #define sd_minor_data_efi ssd_minor_data_efi
198 198
199 199 #define sd_tq ssd_tq
200 200 #define sd_wmr_tq ssd_wmr_tq
201 201 #define sd_taskq_name ssd_taskq_name
202 202 #define sd_wmr_taskq_name ssd_wmr_taskq_name
203 203 #define sd_taskq_minalloc ssd_taskq_minalloc
204 204 #define sd_taskq_maxalloc ssd_taskq_maxalloc
205 205
206 206 #define sd_dump_format_string ssd_dump_format_string
207 207
208 208 #define sd_iostart_chain ssd_iostart_chain
209 209 #define sd_iodone_chain ssd_iodone_chain
210 210
211 211 #define sd_pm_idletime ssd_pm_idletime
212 212
213 213 #define sd_force_pm_supported ssd_force_pm_supported
214 214
215 215 #define sd_dtype_optical_bind ssd_dtype_optical_bind
216 216
217 217 #define sd_ssc_init ssd_ssc_init
218 218 #define sd_ssc_send ssd_ssc_send
219 219 #define sd_ssc_fini ssd_ssc_fini
220 220 #define sd_ssc_assessment ssd_ssc_assessment
221 221 #define sd_ssc_post ssd_ssc_post
222 222 #define sd_ssc_print ssd_ssc_print
223 223 #define sd_ssc_ereport_post ssd_ssc_ereport_post
224 224 #define sd_ssc_set_info ssd_ssc_set_info
225 225 #define sd_ssc_extract_info ssd_ssc_extract_info
226 226
227 227 #endif
228 228
229 229 #ifdef SDDEBUG
230 230 int sd_force_pm_supported = 0;
231 231 #endif /* SDDEBUG */
232 232
233 233 void *sd_state = NULL;
234 234 int sd_io_time = SD_IO_TIME;
235 235 int sd_failfast_enable = 1;
236 236 int sd_ua_retry_count = SD_UA_RETRY_COUNT;
237 237 int sd_report_pfa = 1;
238 238 int sd_max_throttle = SD_MAX_THROTTLE;
239 239 int sd_min_throttle = SD_MIN_THROTTLE;
240 240 int sd_rot_delay = 4; /* Default 4ms Rotation delay */
241 241 int sd_qfull_throttle_enable = TRUE;
242 242
243 243 int sd_retry_on_reservation_conflict = 1;
244 244 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY;
245 245 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay))
246 246
247 247 static int sd_dtype_optical_bind = -1;
248 248
249 249 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */
250 250 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict";
251 251
252 252 /*
253 253 * Global data for debug logging. To enable debug printing, sd_component_mask
254 254 * and sd_level_mask should be set to the desired bit patterns as outlined in
255 255 * sddef.h.
↓ open down ↓ |
255 lines elided |
↑ open up ↑ |
256 256 */
257 257 uint_t sd_component_mask = 0x0;
258 258 uint_t sd_level_mask = 0x0;
259 259 struct sd_lun *sd_debug_un = NULL;
260 260 uint_t sd_error_level = SCSI_ERR_RETRYABLE;
261 261
262 262 /* Note: these may go away in the future... */
263 263 static uint32_t sd_xbuf_active_limit = 512;
264 264 static uint32_t sd_xbuf_reserve_limit = 16;
265 265
266 -static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 };
266 +static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, {0} };
267 267
268 268 /*
269 269 * Timer value used to reset the throttle after it has been reduced
270 270 * (typically in response to TRAN_BUSY or STATUS_QFULL)
271 271 */
272 272 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT;
273 273 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT;
274 274
275 275 /*
276 276 * Interval value associated with the media change scsi watch.
277 277 */
278 278 static int sd_check_media_time = 3000000;
279 279
280 280 /*
281 281 * Wait value used for in progress operations during a DDI_SUSPEND
282 282 */
283 283 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE;
284 284
285 285 /*
286 286 * sd_label_mutex protects a static buffer used in the disk label
287 287 * component of the driver
288 288 */
289 289 static kmutex_t sd_label_mutex;
290 290
291 291 /*
292 292 * sd_detach_mutex protects un_layer_count, un_detach_count, and
293 293 * un_opens_in_progress in the sd_lun structure.
294 294 */
295 295 static kmutex_t sd_detach_mutex;
296 296
297 297 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex,
298 298 sd_lun::{un_layer_count un_detach_count un_opens_in_progress}))
299 299
300 300 /*
301 301 * Global buffer and mutex for debug logging
302 302 */
303 303 static char sd_log_buf[1024];
304 304 static kmutex_t sd_log_mutex;
305 305
306 306 /*
307 307 * Structs and globals for recording attached lun information.
308 308 * This maintains a chain. Each node in the chain represents a SCSI controller.
309 309 * The structure records the number of luns attached to each target connected
310 310 * with the controller.
311 311 * For parallel scsi device only.
312 312 */
313 313 struct sd_scsi_hba_tgt_lun {
314 314 struct sd_scsi_hba_tgt_lun *next;
315 315 dev_info_t *pdip;
316 316 int nlun[NTARGETS_WIDE];
317 317 };
318 318
319 319 /*
320 320 * Flag to indicate the lun is attached or detached
321 321 */
322 322 #define SD_SCSI_LUN_ATTACH 0
323 323 #define SD_SCSI_LUN_DETACH 1
324 324
325 325 static kmutex_t sd_scsi_target_lun_mutex;
326 326 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL;
327 327
328 328 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex,
329 329 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip))
330 330
331 331 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex,
332 332 sd_scsi_target_lun_head))
333 333
334 334 /*
335 335 * "Smart" Probe Caching structs, globals, #defines, etc.
336 336 * For parallel scsi and non-self-identify device only.
337 337 */
338 338
339 339 /*
340 340 * The following resources and routines are implemented to support
341 341 * "smart" probing, which caches the scsi_probe() results in an array,
342 342 * in order to help avoid long probe times.
343 343 */
344 344 struct sd_scsi_probe_cache {
345 345 struct sd_scsi_probe_cache *next;
346 346 dev_info_t *pdip;
347 347 int cache[NTARGETS_WIDE];
348 348 };
349 349
350 350 static kmutex_t sd_scsi_probe_cache_mutex;
351 351 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL;
352 352
353 353 /*
354 354 * Really we only need protection on the head of the linked list, but
355 355 * better safe than sorry.
356 356 */
357 357 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex,
358 358 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip))
359 359
360 360 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex,
361 361 sd_scsi_probe_cache_head))
362 362
363 363 /*
364 364 * Power attribute table
365 365 */
366 366 static sd_power_attr_ss sd_pwr_ss = {
367 367 { "NAME=spindle-motor", "0=off", "1=on", NULL },
368 368 {0, 100},
369 369 {30, 0},
370 370 {20000, 0}
371 371 };
372 372
373 373 static sd_power_attr_pc sd_pwr_pc = {
374 374 { "NAME=spindle-motor", "0=stopped", "1=standby", "2=idle",
375 375 "3=active", NULL },
376 376 {0, 0, 0, 100},
377 377 {90, 90, 20, 0},
378 378 {15000, 15000, 1000, 0}
379 379 };
380 380
381 381 /*
382 382 * Power level to power condition
383 383 */
384 384 static int sd_pl2pc[] = {
385 385 SD_TARGET_START_VALID,
386 386 SD_TARGET_STANDBY,
387 387 SD_TARGET_IDLE,
388 388 SD_TARGET_ACTIVE
389 389 };
390 390
391 391 /*
392 392 * Vendor specific data name property declarations
393 393 */
394 394
395 395 #if defined(__fibre) || defined(__i386) ||defined(__amd64)
396 396
397 397 static sd_tunables seagate_properties = {
398 398 SEAGATE_THROTTLE_VALUE,
399 399 0,
400 400 0,
401 401 0,
402 402 0,
403 403 0,
404 404 0,
405 405 0,
406 406 0
407 407 };
408 408
409 409
410 410 static sd_tunables fujitsu_properties = {
411 411 FUJITSU_THROTTLE_VALUE,
412 412 0,
413 413 0,
414 414 0,
415 415 0,
416 416 0,
417 417 0,
418 418 0,
419 419 0
420 420 };
421 421
422 422 static sd_tunables ibm_properties = {
423 423 IBM_THROTTLE_VALUE,
424 424 0,
425 425 0,
426 426 0,
427 427 0,
428 428 0,
429 429 0,
430 430 0,
431 431 0
432 432 };
433 433
434 434 static sd_tunables purple_properties = {
435 435 PURPLE_THROTTLE_VALUE,
436 436 0,
437 437 0,
438 438 PURPLE_BUSY_RETRIES,
439 439 PURPLE_RESET_RETRY_COUNT,
440 440 PURPLE_RESERVE_RELEASE_TIME,
441 441 0,
442 442 0,
443 443 0
444 444 };
445 445
446 446 static sd_tunables sve_properties = {
447 447 SVE_THROTTLE_VALUE,
448 448 0,
449 449 0,
450 450 SVE_BUSY_RETRIES,
451 451 SVE_RESET_RETRY_COUNT,
452 452 SVE_RESERVE_RELEASE_TIME,
453 453 SVE_MIN_THROTTLE_VALUE,
454 454 SVE_DISKSORT_DISABLED_FLAG,
455 455 0
456 456 };
457 457
458 458 static sd_tunables maserati_properties = {
459 459 0,
460 460 0,
461 461 0,
462 462 0,
463 463 0,
464 464 0,
465 465 0,
466 466 MASERATI_DISKSORT_DISABLED_FLAG,
467 467 MASERATI_LUN_RESET_ENABLED_FLAG
468 468 };
469 469
470 470 static sd_tunables pirus_properties = {
471 471 PIRUS_THROTTLE_VALUE,
472 472 0,
473 473 PIRUS_NRR_COUNT,
474 474 PIRUS_BUSY_RETRIES,
475 475 PIRUS_RESET_RETRY_COUNT,
476 476 0,
477 477 PIRUS_MIN_THROTTLE_VALUE,
478 478 PIRUS_DISKSORT_DISABLED_FLAG,
479 479 PIRUS_LUN_RESET_ENABLED_FLAG
480 480 };
481 481
482 482 #endif
483 483
484 484 #if (defined(__sparc) && !defined(__fibre)) || \
485 485 (defined(__i386) || defined(__amd64))
486 486
487 487
488 488 static sd_tunables elite_properties = {
489 489 ELITE_THROTTLE_VALUE,
490 490 0,
491 491 0,
492 492 0,
493 493 0,
494 494 0,
495 495 0,
496 496 0,
497 497 0
498 498 };
499 499
500 500 static sd_tunables st31200n_properties = {
501 501 ST31200N_THROTTLE_VALUE,
502 502 0,
503 503 0,
504 504 0,
505 505 0,
506 506 0,
507 507 0,
508 508 0,
509 509 0
510 510 };
511 511
512 512 #endif /* Fibre or not */
513 513
514 514 static sd_tunables lsi_properties_scsi = {
515 515 LSI_THROTTLE_VALUE,
516 516 0,
517 517 LSI_NOTREADY_RETRIES,
518 518 0,
519 519 0,
520 520 0,
521 521 0,
522 522 0,
523 523 0
524 524 };
525 525
526 526 static sd_tunables symbios_properties = {
527 527 SYMBIOS_THROTTLE_VALUE,
528 528 0,
529 529 SYMBIOS_NOTREADY_RETRIES,
530 530 0,
531 531 0,
532 532 0,
533 533 0,
534 534 0,
535 535 0
536 536 };
537 537
538 538 static sd_tunables lsi_properties = {
539 539 0,
540 540 0,
541 541 LSI_NOTREADY_RETRIES,
542 542 0,
543 543 0,
544 544 0,
545 545 0,
546 546 0,
547 547 0
548 548 };
549 549
550 550 static sd_tunables lsi_oem_properties = {
551 551 0,
552 552 0,
553 553 LSI_OEM_NOTREADY_RETRIES,
554 554 0,
555 555 0,
556 556 0,
557 557 0,
558 558 0,
559 559 0,
560 560 1
561 561 };
562 562
563 563
564 564
565 565 #if (defined(SD_PROP_TST))
566 566
567 567 #define SD_TST_CTYPE_VAL CTYPE_CDROM
568 568 #define SD_TST_THROTTLE_VAL 16
569 569 #define SD_TST_NOTREADY_VAL 12
570 570 #define SD_TST_BUSY_VAL 60
571 571 #define SD_TST_RST_RETRY_VAL 36
572 572 #define SD_TST_RSV_REL_TIME 60
573 573
574 574 static sd_tunables tst_properties = {
575 575 SD_TST_THROTTLE_VAL,
576 576 SD_TST_CTYPE_VAL,
577 577 SD_TST_NOTREADY_VAL,
578 578 SD_TST_BUSY_VAL,
579 579 SD_TST_RST_RETRY_VAL,
580 580 SD_TST_RSV_REL_TIME,
581 581 0,
582 582 0,
583 583 0
584 584 };
585 585 #endif
586 586
587 587 /* This is similar to the ANSI toupper implementation */
588 588 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C))
589 589
590 590 /*
591 591 * Static Driver Configuration Table
592 592 *
593 593 * This is the table of disks which need throttle adjustment (or, perhaps
594 594 * something else as defined by the flags at a future time.) device_id
595 595 * is a string consisting of concatenated vid (vendor), pid (product/model)
596 596 * and revision strings as defined in the scsi_inquiry structure. Offsets of
597 597 * the parts of the string are as defined by the sizes in the scsi_inquiry
598 598 * structure. Device type is searched as far as the device_id string is
599 599 * defined. Flags defines which values are to be set in the driver from the
600 600 * properties list.
601 601 *
602 602 * Entries below which begin and end with a "*" are a special case.
603 603 * These do not have a specific vendor, and the string which follows
604 604 * can appear anywhere in the 16 byte PID portion of the inquiry data.
605 605 *
606 606 * Entries below which begin and end with a " " (blank) are a special
607 607 * case. The comparison function will treat multiple consecutive blanks
608 608 * as equivalent to a single blank. For example, this causes a
609 609 * sd_disk_table entry of " NEC CDROM " to match a device's id string
610 610 * of "NEC CDROM".
611 611 *
612 612 * Note: The MD21 controller type has been obsoleted.
613 613 * ST318202F is a Legacy device
614 614 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been
615 615 * made with an FC connection. The entries here are a legacy.
616 616 */
617 617 static sd_disk_config_t sd_disk_table[] = {
618 618 #if defined(__fibre) || defined(__i386) || defined(__amd64)
619 619 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
620 620 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
621 621 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
622 622 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
623 623 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties },
624 624 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties },
625 625 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties },
626 626 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties },
627 627 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties },
628 628 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties },
629 629 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties },
630 630 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties },
631 631 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties },
632 632 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties },
633 633 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
634 634 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
635 635 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
636 636 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
637 637 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
638 638 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
639 639 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
640 640 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
641 641 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
642 642 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties },
643 643 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties },
644 644 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties },
645 645 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties },
646 646 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
647 647 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
648 648 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
649 649 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
650 650 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
651 651 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
652 652 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
653 653 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
654 654 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
655 655 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
656 656 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
657 657 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
658 658 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
659 659 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
660 660 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
661 661 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
662 662 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
663 663 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
664 664 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
665 665 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
666 666 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
667 667 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
668 668 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT |
669 669 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties },
670 670 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT |
671 671 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties },
672 672 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties },
673 673 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties },
674 674 { "SUN T3", SD_CONF_BSET_THROTTLE |
675 675 SD_CONF_BSET_BSY_RETRY_COUNT|
676 676 SD_CONF_BSET_RST_RETRIES|
677 677 SD_CONF_BSET_RSV_REL_TIME,
678 678 &purple_properties },
679 679 { "SUN SESS01", SD_CONF_BSET_THROTTLE |
680 680 SD_CONF_BSET_BSY_RETRY_COUNT|
681 681 SD_CONF_BSET_RST_RETRIES|
682 682 SD_CONF_BSET_RSV_REL_TIME|
683 683 SD_CONF_BSET_MIN_THROTTLE|
684 684 SD_CONF_BSET_DISKSORT_DISABLED,
685 685 &sve_properties },
686 686 { "SUN T4", SD_CONF_BSET_THROTTLE |
687 687 SD_CONF_BSET_BSY_RETRY_COUNT|
688 688 SD_CONF_BSET_RST_RETRIES|
689 689 SD_CONF_BSET_RSV_REL_TIME,
690 690 &purple_properties },
691 691 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED |
692 692 SD_CONF_BSET_LUN_RESET_ENABLED,
693 693 &maserati_properties },
694 694 { "SUN SE6920", SD_CONF_BSET_THROTTLE |
695 695 SD_CONF_BSET_NRR_COUNT|
696 696 SD_CONF_BSET_BSY_RETRY_COUNT|
697 697 SD_CONF_BSET_RST_RETRIES|
698 698 SD_CONF_BSET_MIN_THROTTLE|
699 699 SD_CONF_BSET_DISKSORT_DISABLED|
700 700 SD_CONF_BSET_LUN_RESET_ENABLED,
701 701 &pirus_properties },
702 702 { "SUN SE6940", SD_CONF_BSET_THROTTLE |
703 703 SD_CONF_BSET_NRR_COUNT|
704 704 SD_CONF_BSET_BSY_RETRY_COUNT|
705 705 SD_CONF_BSET_RST_RETRIES|
706 706 SD_CONF_BSET_MIN_THROTTLE|
707 707 SD_CONF_BSET_DISKSORT_DISABLED|
708 708 SD_CONF_BSET_LUN_RESET_ENABLED,
709 709 &pirus_properties },
710 710 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE |
711 711 SD_CONF_BSET_NRR_COUNT|
712 712 SD_CONF_BSET_BSY_RETRY_COUNT|
713 713 SD_CONF_BSET_RST_RETRIES|
714 714 SD_CONF_BSET_MIN_THROTTLE|
715 715 SD_CONF_BSET_DISKSORT_DISABLED|
716 716 SD_CONF_BSET_LUN_RESET_ENABLED,
717 717 &pirus_properties },
718 718 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE |
719 719 SD_CONF_BSET_NRR_COUNT|
720 720 SD_CONF_BSET_BSY_RETRY_COUNT|
721 721 SD_CONF_BSET_RST_RETRIES|
722 722 SD_CONF_BSET_MIN_THROTTLE|
723 723 SD_CONF_BSET_DISKSORT_DISABLED|
724 724 SD_CONF_BSET_LUN_RESET_ENABLED,
725 725 &pirus_properties },
726 726 { "SUN PSX1000", SD_CONF_BSET_THROTTLE |
727 727 SD_CONF_BSET_NRR_COUNT|
728 728 SD_CONF_BSET_BSY_RETRY_COUNT|
729 729 SD_CONF_BSET_RST_RETRIES|
730 730 SD_CONF_BSET_MIN_THROTTLE|
731 731 SD_CONF_BSET_DISKSORT_DISABLED|
732 732 SD_CONF_BSET_LUN_RESET_ENABLED,
733 733 &pirus_properties },
734 734 { "SUN SE6330", SD_CONF_BSET_THROTTLE |
735 735 SD_CONF_BSET_NRR_COUNT|
736 736 SD_CONF_BSET_BSY_RETRY_COUNT|
737 737 SD_CONF_BSET_RST_RETRIES|
738 738 SD_CONF_BSET_MIN_THROTTLE|
739 739 SD_CONF_BSET_DISKSORT_DISABLED|
740 740 SD_CONF_BSET_LUN_RESET_ENABLED,
741 741 &pirus_properties },
742 742 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
743 743 { "SUN SUN_6180", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
744 744 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
745 745 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
746 746 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
747 747 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
748 748 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties },
749 749 #endif /* fibre or NON-sparc platforms */
750 750 #if ((defined(__sparc) && !defined(__fibre)) ||\
751 751 (defined(__i386) || defined(__amd64)))
752 752 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties },
753 753 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties },
754 754 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL },
755 755 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL },
756 756 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL },
757 757 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL },
758 758 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL },
759 759 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL },
760 760 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL },
761 761 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL },
762 762 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL },
763 763 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL },
764 764 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT,
765 765 &symbios_properties },
766 766 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT,
767 767 &lsi_properties_scsi },
768 768 #if defined(__i386) || defined(__amd64)
769 769 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD
770 770 | SD_CONF_BSET_READSUB_BCD
771 771 | SD_CONF_BSET_READ_TOC_ADDR_BCD
772 772 | SD_CONF_BSET_NO_READ_HEADER
773 773 | SD_CONF_BSET_READ_CD_XD4), NULL },
774 774
775 775 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD
776 776 | SD_CONF_BSET_READSUB_BCD
777 777 | SD_CONF_BSET_READ_TOC_ADDR_BCD
778 778 | SD_CONF_BSET_NO_READ_HEADER
779 779 | SD_CONF_BSET_READ_CD_XD4), NULL },
780 780 #endif /* __i386 || __amd64 */
781 781 #endif /* sparc NON-fibre or NON-sparc platforms */
782 782
783 783 #if (defined(SD_PROP_TST))
784 784 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE
785 785 | SD_CONF_BSET_CTYPE
786 786 | SD_CONF_BSET_NRR_COUNT
787 787 | SD_CONF_BSET_FAB_DEVID
788 788 | SD_CONF_BSET_NOCACHE
789 789 | SD_CONF_BSET_BSY_RETRY_COUNT
790 790 | SD_CONF_BSET_PLAYMSF_BCD
791 791 | SD_CONF_BSET_READSUB_BCD
792 792 | SD_CONF_BSET_READ_TOC_TRK_BCD
793 793 | SD_CONF_BSET_READ_TOC_ADDR_BCD
794 794 | SD_CONF_BSET_NO_READ_HEADER
795 795 | SD_CONF_BSET_READ_CD_XD4
796 796 | SD_CONF_BSET_RST_RETRIES
797 797 | SD_CONF_BSET_RSV_REL_TIME
798 798 | SD_CONF_BSET_TUR_CHECK), &tst_properties},
799 799 #endif
800 800 };
801 801
802 802 static const int sd_disk_table_size =
803 803 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t);
804 804
805 805 /*
806 806 * Emulation mode disk drive VID/PID table
807 807 */
808 808 static char sd_flash_dev_table[][25] = {
809 809 "ATA MARVELL SD88SA02",
810 810 "MARVELL SD88SA02",
811 811 "TOSHIBA THNSNV05",
812 812 };
813 813
814 814 static const int sd_flash_dev_table_size =
815 815 sizeof (sd_flash_dev_table) / sizeof (sd_flash_dev_table[0]);
816 816
817 817 #define SD_INTERCONNECT_PARALLEL 0
818 818 #define SD_INTERCONNECT_FABRIC 1
819 819 #define SD_INTERCONNECT_FIBRE 2
820 820 #define SD_INTERCONNECT_SSA 3
821 821 #define SD_INTERCONNECT_SATA 4
822 822 #define SD_INTERCONNECT_SAS 5
823 823
824 824 #define SD_IS_PARALLEL_SCSI(un) \
825 825 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL)
826 826 #define SD_IS_SERIAL(un) \
↓ open down ↓ |
550 lines elided |
↑ open up ↑ |
827 827 (((un)->un_interconnect_type == SD_INTERCONNECT_SATA) ||\
828 828 ((un)->un_interconnect_type == SD_INTERCONNECT_SAS))
829 829
830 830 /*
831 831 * Definitions used by device id registration routines
832 832 */
833 833 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */
834 834 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */
835 835 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */
836 836
837 -static kmutex_t sd_sense_mutex = {0};
837 +static kmutex_t sd_sense_mutex = {{NULL}};
838 838
839 839 /*
840 840 * Macros for updates of the driver state
841 841 */
842 842 #define New_state(un, s) \
843 843 (un)->un_last_state = (un)->un_state, (un)->un_state = (s)
844 844 #define Restore_state(un) \
845 845 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); }
846 846
847 847 static struct sd_cdbinfo sd_cdbtab[] = {
848 848 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, },
849 849 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, },
850 850 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, },
851 851 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, },
852 852 };
853 853
854 854 /*
855 855 * Specifies the number of seconds that must have elapsed since the last
856 856 * cmd. has completed for a device to be declared idle to the PM framework.
857 857 */
858 858 static int sd_pm_idletime = 1;
859 859
860 860 /*
861 861 * Internal function prototypes
862 862 */
863 863
864 864 #if (defined(__fibre))
865 865 /*
866 866 * These #defines are to avoid namespace collisions that occur because this
867 867 * code is currently used to compile two separate driver modules: sd and ssd.
868 868 * All function names need to be treated this way (even if declared static)
869 869 * in order to allow the debugger to resolve the names properly.
870 870 * It is anticipated that in the near future the ssd module will be obsoleted,
871 871 * at which time this ugliness should go away.
872 872 */
873 873 #define sd_log_trace ssd_log_trace
874 874 #define sd_log_info ssd_log_info
875 875 #define sd_log_err ssd_log_err
876 876 #define sdprobe ssdprobe
877 877 #define sdinfo ssdinfo
878 878 #define sd_prop_op ssd_prop_op
879 879 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init
880 880 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini
881 881 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache
882 882 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache
883 883 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init
884 884 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini
885 885 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count
886 886 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target
887 887 #define sd_spin_up_unit ssd_spin_up_unit
888 888 #define sd_enable_descr_sense ssd_enable_descr_sense
889 889 #define sd_reenable_dsense_task ssd_reenable_dsense_task
890 890 #define sd_set_mmc_caps ssd_set_mmc_caps
891 891 #define sd_read_unit_properties ssd_read_unit_properties
892 892 #define sd_process_sdconf_file ssd_process_sdconf_file
893 893 #define sd_process_sdconf_table ssd_process_sdconf_table
894 894 #define sd_sdconf_id_match ssd_sdconf_id_match
895 895 #define sd_blank_cmp ssd_blank_cmp
896 896 #define sd_chk_vers1_data ssd_chk_vers1_data
897 897 #define sd_set_vers1_properties ssd_set_vers1_properties
898 898 #define sd_check_solid_state ssd_check_solid_state
899 899 #define sd_check_emulation_mode ssd_check_emulation_mode
900 900
901 901 #define sd_get_physical_geometry ssd_get_physical_geometry
902 902 #define sd_get_virtual_geometry ssd_get_virtual_geometry
903 903 #define sd_update_block_info ssd_update_block_info
904 904 #define sd_register_devid ssd_register_devid
905 905 #define sd_get_devid ssd_get_devid
906 906 #define sd_create_devid ssd_create_devid
907 907 #define sd_write_deviceid ssd_write_deviceid
908 908 #define sd_check_vpd_page_support ssd_check_vpd_page_support
909 909 #define sd_setup_pm ssd_setup_pm
910 910 #define sd_create_pm_components ssd_create_pm_components
911 911 #define sd_ddi_suspend ssd_ddi_suspend
912 912 #define sd_ddi_resume ssd_ddi_resume
913 913 #define sd_pm_state_change ssd_pm_state_change
914 914 #define sdpower ssdpower
915 915 #define sdattach ssdattach
916 916 #define sddetach ssddetach
917 917 #define sd_unit_attach ssd_unit_attach
918 918 #define sd_unit_detach ssd_unit_detach
919 919 #define sd_set_unit_attributes ssd_set_unit_attributes
920 920 #define sd_create_errstats ssd_create_errstats
921 921 #define sd_set_errstats ssd_set_errstats
922 922 #define sd_set_pstats ssd_set_pstats
923 923 #define sddump ssddump
924 924 #define sd_scsi_poll ssd_scsi_poll
925 925 #define sd_send_polled_RQS ssd_send_polled_RQS
926 926 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll
927 927 #define sd_init_event_callbacks ssd_init_event_callbacks
928 928 #define sd_event_callback ssd_event_callback
929 929 #define sd_cache_control ssd_cache_control
930 930 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled
931 931 #define sd_get_nv_sup ssd_get_nv_sup
932 932 #define sd_make_device ssd_make_device
933 933 #define sdopen ssdopen
934 934 #define sdclose ssdclose
935 935 #define sd_ready_and_valid ssd_ready_and_valid
936 936 #define sdmin ssdmin
937 937 #define sdread ssdread
938 938 #define sdwrite ssdwrite
939 939 #define sdaread ssdaread
940 940 #define sdawrite ssdawrite
941 941 #define sdstrategy ssdstrategy
942 942 #define sdioctl ssdioctl
943 943 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart
944 944 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart
945 945 #define sd_checksum_iostart ssd_checksum_iostart
946 946 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart
947 947 #define sd_pm_iostart ssd_pm_iostart
948 948 #define sd_core_iostart ssd_core_iostart
949 949 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone
950 950 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone
951 951 #define sd_checksum_iodone ssd_checksum_iodone
952 952 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone
953 953 #define sd_pm_iodone ssd_pm_iodone
954 954 #define sd_initpkt_for_buf ssd_initpkt_for_buf
955 955 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf
956 956 #define sd_setup_rw_pkt ssd_setup_rw_pkt
957 957 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt
958 958 #define sd_buf_iodone ssd_buf_iodone
959 959 #define sd_uscsi_strategy ssd_uscsi_strategy
960 960 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi
961 961 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi
962 962 #define sd_uscsi_iodone ssd_uscsi_iodone
963 963 #define sd_xbuf_strategy ssd_xbuf_strategy
964 964 #define sd_xbuf_init ssd_xbuf_init
965 965 #define sd_pm_entry ssd_pm_entry
966 966 #define sd_pm_exit ssd_pm_exit
967 967
968 968 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler
969 969 #define sd_pm_timeout_handler ssd_pm_timeout_handler
970 970
971 971 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq
972 972 #define sdintr ssdintr
973 973 #define sd_start_cmds ssd_start_cmds
974 974 #define sd_send_scsi_cmd ssd_send_scsi_cmd
975 975 #define sd_bioclone_alloc ssd_bioclone_alloc
976 976 #define sd_bioclone_free ssd_bioclone_free
977 977 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc
978 978 #define sd_shadow_buf_free ssd_shadow_buf_free
979 979 #define sd_print_transport_rejected_message \
980 980 ssd_print_transport_rejected_message
981 981 #define sd_retry_command ssd_retry_command
982 982 #define sd_set_retry_bp ssd_set_retry_bp
983 983 #define sd_send_request_sense_command ssd_send_request_sense_command
984 984 #define sd_start_retry_command ssd_start_retry_command
985 985 #define sd_start_direct_priority_command \
986 986 ssd_start_direct_priority_command
987 987 #define sd_return_failed_command ssd_return_failed_command
988 988 #define sd_return_failed_command_no_restart \
989 989 ssd_return_failed_command_no_restart
990 990 #define sd_return_command ssd_return_command
991 991 #define sd_sync_with_callback ssd_sync_with_callback
992 992 #define sdrunout ssdrunout
993 993 #define sd_mark_rqs_busy ssd_mark_rqs_busy
994 994 #define sd_mark_rqs_idle ssd_mark_rqs_idle
995 995 #define sd_reduce_throttle ssd_reduce_throttle
996 996 #define sd_restore_throttle ssd_restore_throttle
997 997 #define sd_print_incomplete_msg ssd_print_incomplete_msg
998 998 #define sd_init_cdb_limits ssd_init_cdb_limits
999 999 #define sd_pkt_status_good ssd_pkt_status_good
1000 1000 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition
1001 1001 #define sd_pkt_status_busy ssd_pkt_status_busy
1002 1002 #define sd_pkt_status_reservation_conflict \
1003 1003 ssd_pkt_status_reservation_conflict
1004 1004 #define sd_pkt_status_qfull ssd_pkt_status_qfull
1005 1005 #define sd_handle_request_sense ssd_handle_request_sense
1006 1006 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense
1007 1007 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg
1008 1008 #define sd_validate_sense_data ssd_validate_sense_data
1009 1009 #define sd_decode_sense ssd_decode_sense
1010 1010 #define sd_print_sense_msg ssd_print_sense_msg
1011 1011 #define sd_sense_key_no_sense ssd_sense_key_no_sense
1012 1012 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error
1013 1013 #define sd_sense_key_not_ready ssd_sense_key_not_ready
1014 1014 #define sd_sense_key_medium_or_hardware_error \
1015 1015 ssd_sense_key_medium_or_hardware_error
1016 1016 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request
1017 1017 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention
1018 1018 #define sd_sense_key_fail_command ssd_sense_key_fail_command
1019 1019 #define sd_sense_key_blank_check ssd_sense_key_blank_check
1020 1020 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command
1021 1021 #define sd_sense_key_default ssd_sense_key_default
1022 1022 #define sd_print_retry_msg ssd_print_retry_msg
1023 1023 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg
1024 1024 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete
1025 1025 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err
1026 1026 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset
1027 1027 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted
1028 1028 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout
1029 1029 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free
1030 1030 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject
1031 1031 #define sd_pkt_reason_default ssd_pkt_reason_default
1032 1032 #define sd_reset_target ssd_reset_target
1033 1033 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback
1034 1034 #define sd_start_stop_unit_task ssd_start_stop_unit_task
1035 1035 #define sd_taskq_create ssd_taskq_create
1036 1036 #define sd_taskq_delete ssd_taskq_delete
1037 1037 #define sd_target_change_task ssd_target_change_task
1038 1038 #define sd_log_dev_status_event ssd_log_dev_status_event
1039 1039 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event
1040 1040 #define sd_log_eject_request_event ssd_log_eject_request_event
1041 1041 #define sd_media_change_task ssd_media_change_task
1042 1042 #define sd_handle_mchange ssd_handle_mchange
1043 1043 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK
1044 1044 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY
1045 1045 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16
1046 1046 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION
1047 1047 #define sd_send_scsi_feature_GET_CONFIGURATION \
1048 1048 sd_send_scsi_feature_GET_CONFIGURATION
1049 1049 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT
1050 1050 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY
1051 1051 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY
1052 1052 #define sd_send_scsi_PERSISTENT_RESERVE_IN \
1053 1053 ssd_send_scsi_PERSISTENT_RESERVE_IN
1054 1054 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \
1055 1055 ssd_send_scsi_PERSISTENT_RESERVE_OUT
1056 1056 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE
1057 1057 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \
1058 1058 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone
1059 1059 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE
1060 1060 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT
1061 1061 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR
1062 1062 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE
1063 1063 #define sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION \
1064 1064 ssd_send_scsi_GET_EVENT_STATUS_NOTIFICATION
1065 1065 #define sd_gesn_media_data_valid ssd_gesn_media_data_valid
1066 1066 #define sd_alloc_rqs ssd_alloc_rqs
1067 1067 #define sd_free_rqs ssd_free_rqs
1068 1068 #define sd_dump_memory ssd_dump_memory
1069 1069 #define sd_get_media_info_com ssd_get_media_info_com
1070 1070 #define sd_get_media_info ssd_get_media_info
1071 1071 #define sd_get_media_info_ext ssd_get_media_info_ext
1072 1072 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info
1073 1073 #define sd_nvpair_str_decode ssd_nvpair_str_decode
1074 1074 #define sd_strtok_r ssd_strtok_r
1075 1075 #define sd_set_properties ssd_set_properties
1076 1076 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf
1077 1077 #define sd_setup_next_xfer ssd_setup_next_xfer
1078 1078 #define sd_dkio_get_temp ssd_dkio_get_temp
1079 1079 #define sd_check_mhd ssd_check_mhd
1080 1080 #define sd_mhd_watch_cb ssd_mhd_watch_cb
1081 1081 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete
1082 1082 #define sd_sname ssd_sname
1083 1083 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover
1084 1084 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread
1085 1085 #define sd_take_ownership ssd_take_ownership
1086 1086 #define sd_reserve_release ssd_reserve_release
1087 1087 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req
1088 1088 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb
1089 1089 #define sd_persistent_reservation_in_read_keys \
1090 1090 ssd_persistent_reservation_in_read_keys
1091 1091 #define sd_persistent_reservation_in_read_resv \
1092 1092 ssd_persistent_reservation_in_read_resv
1093 1093 #define sd_mhdioc_takeown ssd_mhdioc_takeown
1094 1094 #define sd_mhdioc_failfast ssd_mhdioc_failfast
1095 1095 #define sd_mhdioc_release ssd_mhdioc_release
1096 1096 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid
1097 1097 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys
1098 1098 #define sd_mhdioc_inresv ssd_mhdioc_inresv
1099 1099 #define sr_change_blkmode ssr_change_blkmode
1100 1100 #define sr_change_speed ssr_change_speed
1101 1101 #define sr_atapi_change_speed ssr_atapi_change_speed
1102 1102 #define sr_pause_resume ssr_pause_resume
1103 1103 #define sr_play_msf ssr_play_msf
1104 1104 #define sr_play_trkind ssr_play_trkind
1105 1105 #define sr_read_all_subcodes ssr_read_all_subcodes
1106 1106 #define sr_read_subchannel ssr_read_subchannel
1107 1107 #define sr_read_tocentry ssr_read_tocentry
1108 1108 #define sr_read_tochdr ssr_read_tochdr
1109 1109 #define sr_read_cdda ssr_read_cdda
1110 1110 #define sr_read_cdxa ssr_read_cdxa
1111 1111 #define sr_read_mode1 ssr_read_mode1
1112 1112 #define sr_read_mode2 ssr_read_mode2
1113 1113 #define sr_read_cd_mode2 ssr_read_cd_mode2
1114 1114 #define sr_sector_mode ssr_sector_mode
1115 1115 #define sr_eject ssr_eject
1116 1116 #define sr_ejected ssr_ejected
1117 1117 #define sr_check_wp ssr_check_wp
1118 1118 #define sd_watch_request_submit ssd_watch_request_submit
1119 1119 #define sd_check_media ssd_check_media
1120 1120 #define sd_media_watch_cb ssd_media_watch_cb
1121 1121 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast
1122 1122 #define sr_volume_ctrl ssr_volume_ctrl
1123 1123 #define sr_read_sony_session_offset ssr_read_sony_session_offset
1124 1124 #define sd_log_page_supported ssd_log_page_supported
1125 1125 #define sd_check_for_writable_cd ssd_check_for_writable_cd
1126 1126 #define sd_wm_cache_constructor ssd_wm_cache_constructor
1127 1127 #define sd_wm_cache_destructor ssd_wm_cache_destructor
1128 1128 #define sd_range_lock ssd_range_lock
1129 1129 #define sd_get_range ssd_get_range
1130 1130 #define sd_free_inlist_wmap ssd_free_inlist_wmap
1131 1131 #define sd_range_unlock ssd_range_unlock
1132 1132 #define sd_read_modify_write_task ssd_read_modify_write_task
1133 1133 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw
1134 1134
1135 1135 #define sd_iostart_chain ssd_iostart_chain
1136 1136 #define sd_iodone_chain ssd_iodone_chain
1137 1137 #define sd_initpkt_map ssd_initpkt_map
1138 1138 #define sd_destroypkt_map ssd_destroypkt_map
1139 1139 #define sd_chain_type_map ssd_chain_type_map
1140 1140 #define sd_chain_index_map ssd_chain_index_map
1141 1141
1142 1142 #define sd_failfast_flushctl ssd_failfast_flushctl
1143 1143 #define sd_failfast_flushq ssd_failfast_flushq
1144 1144 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback
1145 1145
1146 1146 #define sd_is_lsi ssd_is_lsi
1147 1147 #define sd_tg_rdwr ssd_tg_rdwr
1148 1148 #define sd_tg_getinfo ssd_tg_getinfo
1149 1149 #define sd_rmw_msg_print_handler ssd_rmw_msg_print_handler
1150 1150
1151 1151 #endif /* #if (defined(__fibre)) */
1152 1152
1153 1153
1154 1154 int _init(void);
1155 1155 int _fini(void);
1156 1156 int _info(struct modinfo *modinfop);
1157 1157
1158 1158 /*PRINTFLIKE3*/
1159 1159 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...);
1160 1160 /*PRINTFLIKE3*/
1161 1161 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...);
1162 1162 /*PRINTFLIKE3*/
1163 1163 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...);
1164 1164
1165 1165 static int sdprobe(dev_info_t *devi);
1166 1166 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
1167 1167 void **result);
1168 1168 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1169 1169 int mod_flags, char *name, caddr_t valuep, int *lengthp);
1170 1170
1171 1171 /*
1172 1172 * Smart probe for parallel scsi
1173 1173 */
1174 1174 static void sd_scsi_probe_cache_init(void);
1175 1175 static void sd_scsi_probe_cache_fini(void);
1176 1176 static void sd_scsi_clear_probe_cache(void);
1177 1177 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)());
1178 1178
1179 1179 /*
1180 1180 * Attached luns on target for parallel scsi
1181 1181 */
1182 1182 static void sd_scsi_target_lun_init(void);
1183 1183 static void sd_scsi_target_lun_fini(void);
1184 1184 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target);
1185 1185 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag);
1186 1186
1187 1187 static int sd_spin_up_unit(sd_ssc_t *ssc);
1188 1188
1189 1189 /*
1190 1190 * Using sd_ssc_init to establish sd_ssc_t struct
1191 1191 * Using sd_ssc_send to send uscsi internal command
1192 1192 * Using sd_ssc_fini to free sd_ssc_t struct
1193 1193 */
1194 1194 static sd_ssc_t *sd_ssc_init(struct sd_lun *un);
1195 1195 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd,
1196 1196 int flag, enum uio_seg dataspace, int path_flag);
1197 1197 static void sd_ssc_fini(sd_ssc_t *ssc);
1198 1198
1199 1199 /*
1200 1200 * Using sd_ssc_assessment to set correct type-of-assessment
1201 1201 * Using sd_ssc_post to post ereport & system log
1202 1202 * sd_ssc_post will call sd_ssc_print to print system log
1203 1203 * sd_ssc_post will call sd_ssd_ereport_post to post ereport
1204 1204 */
1205 1205 static void sd_ssc_assessment(sd_ssc_t *ssc,
1206 1206 enum sd_type_assessment tp_assess);
1207 1207
1208 1208 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess);
1209 1209 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity);
1210 1210 static void sd_ssc_ereport_post(sd_ssc_t *ssc,
1211 1211 enum sd_driver_assessment drv_assess);
1212 1212
1213 1213 /*
1214 1214 * Using sd_ssc_set_info to mark an un-decodable-data error.
1215 1215 * Using sd_ssc_extract_info to transfer information from internal
1216 1216 * data structures to sd_ssc_t.
1217 1217 */
1218 1218 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp,
1219 1219 const char *fmt, ...);
1220 1220 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un,
1221 1221 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp);
1222 1222
1223 1223 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag,
1224 1224 enum uio_seg dataspace, int path_flag);
1225 1225
1226 1226 #ifdef _LP64
1227 1227 static void sd_enable_descr_sense(sd_ssc_t *ssc);
1228 1228 static void sd_reenable_dsense_task(void *arg);
1229 1229 #endif /* _LP64 */
1230 1230
1231 1231 static void sd_set_mmc_caps(sd_ssc_t *ssc);
1232 1232
1233 1233 static void sd_read_unit_properties(struct sd_lun *un);
1234 1234 static int sd_process_sdconf_file(struct sd_lun *un);
1235 1235 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str);
1236 1236 static char *sd_strtok_r(char *string, const char *sepset, char **lasts);
1237 1237 static void sd_set_properties(struct sd_lun *un, char *name, char *value);
1238 1238 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags,
1239 1239 int *data_list, sd_tunables *values);
1240 1240 static void sd_process_sdconf_table(struct sd_lun *un);
1241 1241 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen);
1242 1242 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen);
1243 1243 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list,
1244 1244 int list_len, char *dataname_ptr);
1245 1245 static void sd_set_vers1_properties(struct sd_lun *un, int flags,
1246 1246 sd_tunables *prop_list);
1247 1247
1248 1248 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi,
1249 1249 int reservation_flag);
1250 1250 static int sd_get_devid(sd_ssc_t *ssc);
1251 1251 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc);
1252 1252 static int sd_write_deviceid(sd_ssc_t *ssc);
1253 1253 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len);
1254 1254 static int sd_check_vpd_page_support(sd_ssc_t *ssc);
1255 1255
1256 1256 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi);
1257 1257 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un);
1258 1258
1259 1259 static int sd_ddi_suspend(dev_info_t *devi);
1260 1260 static int sd_ddi_resume(dev_info_t *devi);
1261 1261 static int sd_pm_state_change(struct sd_lun *un, int level, int flag);
1262 1262 static int sdpower(dev_info_t *devi, int component, int level);
1263 1263
1264 1264 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd);
1265 1265 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd);
1266 1266 static int sd_unit_attach(dev_info_t *devi);
1267 1267 static int sd_unit_detach(dev_info_t *devi);
1268 1268
1269 1269 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi);
1270 1270 static void sd_create_errstats(struct sd_lun *un, int instance);
1271 1271 static void sd_set_errstats(struct sd_lun *un);
1272 1272 static void sd_set_pstats(struct sd_lun *un);
1273 1273
1274 1274 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk);
1275 1275 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt);
1276 1276 static int sd_send_polled_RQS(struct sd_lun *un);
1277 1277 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt);
1278 1278
1279 1279 #if (defined(__fibre))
1280 1280 /*
1281 1281 * Event callbacks (photon)
1282 1282 */
1283 1283 static void sd_init_event_callbacks(struct sd_lun *un);
1284 1284 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *);
1285 1285 #endif
1286 1286
1287 1287 /*
1288 1288 * Defines for sd_cache_control
1289 1289 */
1290 1290
1291 1291 #define SD_CACHE_ENABLE 1
1292 1292 #define SD_CACHE_DISABLE 0
1293 1293 #define SD_CACHE_NOCHANGE -1
1294 1294
1295 1295 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag);
1296 1296 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled);
1297 1297 static void sd_get_nv_sup(sd_ssc_t *ssc);
1298 1298 static dev_t sd_make_device(dev_info_t *devi);
1299 1299 static void sd_check_solid_state(sd_ssc_t *ssc);
1300 1300 static void sd_check_emulation_mode(sd_ssc_t *ssc);
1301 1301 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize,
1302 1302 uint64_t capacity);
1303 1303
1304 1304 /*
1305 1305 * Driver entry point functions.
1306 1306 */
1307 1307 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p);
1308 1308 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p);
1309 1309 static int sd_ready_and_valid(sd_ssc_t *ssc, int part);
1310 1310
1311 1311 static void sdmin(struct buf *bp);
1312 1312 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p);
1313 1313 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p);
1314 1314 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p);
1315 1315 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p);
1316 1316
1317 1317 static int sdstrategy(struct buf *bp);
1318 1318 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *);
1319 1319
1320 1320 /*
1321 1321 * Function prototypes for layering functions in the iostart chain.
1322 1322 */
1323 1323 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un,
1324 1324 struct buf *bp);
1325 1325 static void sd_mapblocksize_iostart(int index, struct sd_lun *un,
1326 1326 struct buf *bp);
1327 1327 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp);
1328 1328 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un,
1329 1329 struct buf *bp);
1330 1330 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp);
1331 1331 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp);
1332 1332
1333 1333 /*
1334 1334 * Function prototypes for layering functions in the iodone chain.
1335 1335 */
1336 1336 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp);
1337 1337 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp);
1338 1338 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un,
1339 1339 struct buf *bp);
1340 1340 static void sd_mapblocksize_iodone(int index, struct sd_lun *un,
1341 1341 struct buf *bp);
1342 1342 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp);
1343 1343 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un,
1344 1344 struct buf *bp);
1345 1345 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp);
1346 1346
1347 1347 /*
1348 1348 * Prototypes for functions to support buf(9S) based IO.
1349 1349 */
1350 1350 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg);
1351 1351 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **);
1352 1352 static void sd_destroypkt_for_buf(struct buf *);
1353 1353 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp,
1354 1354 struct buf *bp, int flags,
1355 1355 int (*callback)(caddr_t), caddr_t callback_arg,
1356 1356 diskaddr_t lba, uint32_t blockcount);
1357 1357 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp,
1358 1358 struct buf *bp, diskaddr_t lba, uint32_t blockcount);
1359 1359
1360 1360 /*
1361 1361 * Prototypes for functions to support USCSI IO.
1362 1362 */
1363 1363 static int sd_uscsi_strategy(struct buf *bp);
1364 1364 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **);
1365 1365 static void sd_destroypkt_for_uscsi(struct buf *);
1366 1366
1367 1367 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
1368 1368 uchar_t chain_type, void *pktinfop);
1369 1369
1370 1370 static int sd_pm_entry(struct sd_lun *un);
1371 1371 static void sd_pm_exit(struct sd_lun *un);
1372 1372
1373 1373 static void sd_pm_idletimeout_handler(void *arg);
1374 1374
1375 1375 /*
1376 1376 * sd_core internal functions (used at the sd_core_io layer).
1377 1377 */
1378 1378 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp);
1379 1379 static void sdintr(struct scsi_pkt *pktp);
1380 1380 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp);
1381 1381
1382 1382 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag,
1383 1383 enum uio_seg dataspace, int path_flag);
1384 1384
1385 1385 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen,
1386 1386 daddr_t blkno, int (*func)(struct buf *));
1387 1387 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen,
1388 1388 uint_t bflags, daddr_t blkno, int (*func)(struct buf *));
1389 1389 static void sd_bioclone_free(struct buf *bp);
1390 1390 static void sd_shadow_buf_free(struct buf *bp);
1391 1391
1392 1392 static void sd_print_transport_rejected_message(struct sd_lun *un,
1393 1393 struct sd_xbuf *xp, int code);
1394 1394 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp,
1395 1395 void *arg, int code);
1396 1396 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp,
1397 1397 void *arg, int code);
1398 1398 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp,
1399 1399 void *arg, int code);
1400 1400
1401 1401 static void sd_retry_command(struct sd_lun *un, struct buf *bp,
1402 1402 int retry_check_flag,
1403 1403 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp,
1404 1404 int c),
1405 1405 void *user_arg, int failure_code, clock_t retry_delay,
1406 1406 void (*statp)(kstat_io_t *));
1407 1407
1408 1408 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp,
1409 1409 clock_t retry_delay, void (*statp)(kstat_io_t *));
1410 1410
1411 1411 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp,
1412 1412 struct scsi_pkt *pktp);
1413 1413 static void sd_start_retry_command(void *arg);
1414 1414 static void sd_start_direct_priority_command(void *arg);
1415 1415 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp,
1416 1416 int errcode);
1417 1417 static void sd_return_failed_command_no_restart(struct sd_lun *un,
1418 1418 struct buf *bp, int errcode);
1419 1419 static void sd_return_command(struct sd_lun *un, struct buf *bp);
1420 1420 static void sd_sync_with_callback(struct sd_lun *un);
1421 1421 static int sdrunout(caddr_t arg);
1422 1422
1423 1423 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp);
1424 1424 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp);
1425 1425
1426 1426 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type);
1427 1427 static void sd_restore_throttle(void *arg);
1428 1428
1429 1429 static void sd_init_cdb_limits(struct sd_lun *un);
1430 1430
1431 1431 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp,
1432 1432 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1433 1433
1434 1434 /*
1435 1435 * Error handling functions
1436 1436 */
1437 1437 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp,
1438 1438 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1439 1439 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp,
1440 1440 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1441 1441 static void sd_pkt_status_reservation_conflict(struct sd_lun *un,
1442 1442 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1443 1443 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp,
1444 1444 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1445 1445
1446 1446 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp,
1447 1447 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1448 1448 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp,
1449 1449 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1450 1450 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp,
1451 1451 struct sd_xbuf *xp, size_t actual_len);
1452 1452 static void sd_decode_sense(struct sd_lun *un, struct buf *bp,
1453 1453 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1454 1454
1455 1455 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp,
1456 1456 void *arg, int code);
1457 1457
1458 1458 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp,
1459 1459 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1460 1460 static void sd_sense_key_recoverable_error(struct sd_lun *un,
1461 1461 uint8_t *sense_datap,
1462 1462 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1463 1463 static void sd_sense_key_not_ready(struct sd_lun *un,
1464 1464 uint8_t *sense_datap,
1465 1465 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1466 1466 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un,
1467 1467 uint8_t *sense_datap,
1468 1468 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1469 1469 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp,
1470 1470 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1471 1471 static void sd_sense_key_unit_attention(struct sd_lun *un,
1472 1472 uint8_t *sense_datap,
1473 1473 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1474 1474 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp,
1475 1475 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1476 1476 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp,
1477 1477 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1478 1478 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp,
1479 1479 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1480 1480 static void sd_sense_key_default(struct sd_lun *un,
1481 1481 uint8_t *sense_datap,
1482 1482 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1483 1483
1484 1484 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp,
1485 1485 void *arg, int flag);
1486 1486
1487 1487 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp,
1488 1488 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1489 1489 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp,
1490 1490 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1491 1491 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp,
1492 1492 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1493 1493 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp,
1494 1494 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1495 1495 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp,
1496 1496 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1497 1497 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp,
1498 1498 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1499 1499 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp,
1500 1500 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1501 1501 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp,
1502 1502 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1503 1503
1504 1504 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp);
1505 1505
1506 1506 static void sd_start_stop_unit_callback(void *arg);
1507 1507 static void sd_start_stop_unit_task(void *arg);
1508 1508
1509 1509 static void sd_taskq_create(void);
1510 1510 static void sd_taskq_delete(void);
1511 1511 static void sd_target_change_task(void *arg);
1512 1512 static void sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag);
1513 1513 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag);
1514 1514 static void sd_log_eject_request_event(struct sd_lun *un, int km_flag);
1515 1515 static void sd_media_change_task(void *arg);
1516 1516
1517 1517 static int sd_handle_mchange(struct sd_lun *un);
1518 1518 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag);
1519 1519 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp,
1520 1520 uint32_t *lbap, int path_flag);
1521 1521 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp,
1522 1522 uint32_t *lbap, uint32_t *psp, int path_flag);
1523 1523 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag,
1524 1524 int flag, int path_flag);
1525 1525 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr,
1526 1526 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp);
1527 1527 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag);
1528 1528 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc,
1529 1529 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp);
1530 1530 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc,
1531 1531 uchar_t usr_cmd, uchar_t *usr_bufp);
1532 1532 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un,
1533 1533 struct dk_callback *dkc);
1534 1534 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp);
1535 1535 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc,
1536 1536 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen,
1537 1537 uchar_t *bufaddr, uint_t buflen, int path_flag);
1538 1538 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc,
1539 1539 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen,
1540 1540 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag);
1541 1541 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize,
1542 1542 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag);
1543 1543 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize,
1544 1544 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag);
1545 1545 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr,
1546 1546 size_t buflen, daddr_t start_block, int path_flag);
1547 1547 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \
1548 1548 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \
1549 1549 path_flag)
1550 1550 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\
1551 1551 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\
1552 1552 path_flag)
1553 1553
1554 1554 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr,
1555 1555 uint16_t buflen, uchar_t page_code, uchar_t page_control,
1556 1556 uint16_t param_ptr, int path_flag);
1557 1557 static int sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc,
1558 1558 uchar_t *bufaddr, size_t buflen, uchar_t class_req);
1559 1559 static boolean_t sd_gesn_media_data_valid(uchar_t *data);
1560 1560
1561 1561 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un);
1562 1562 static void sd_free_rqs(struct sd_lun *un);
1563 1563
1564 1564 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title,
1565 1565 uchar_t *data, int len, int fmt);
1566 1566 static void sd_panic_for_res_conflict(struct sd_lun *un);
1567 1567
1568 1568 /*
1569 1569 * Disk Ioctl Function Prototypes
1570 1570 */
1571 1571 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag);
1572 1572 static int sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag);
1573 1573 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag);
1574 1574 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag);
1575 1575
1576 1576 /*
1577 1577 * Multi-host Ioctl Prototypes
1578 1578 */
1579 1579 static int sd_check_mhd(dev_t dev, int interval);
1580 1580 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp);
1581 1581 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt);
1582 1582 static char *sd_sname(uchar_t status);
1583 1583 static void sd_mhd_resvd_recover(void *arg);
1584 1584 static void sd_resv_reclaim_thread();
1585 1585 static int sd_take_ownership(dev_t dev, struct mhioctkown *p);
1586 1586 static int sd_reserve_release(dev_t dev, int cmd);
1587 1587 static void sd_rmv_resv_reclaim_req(dev_t dev);
1588 1588 static void sd_mhd_reset_notify_cb(caddr_t arg);
1589 1589 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un,
1590 1590 mhioc_inkeys_t *usrp, int flag);
1591 1591 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un,
1592 1592 mhioc_inresvs_t *usrp, int flag);
1593 1593 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag);
1594 1594 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag);
1595 1595 static int sd_mhdioc_release(dev_t dev);
1596 1596 static int sd_mhdioc_register_devid(dev_t dev);
1597 1597 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag);
1598 1598 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag);
1599 1599
1600 1600 /*
1601 1601 * SCSI removable prototypes
1602 1602 */
1603 1603 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag);
1604 1604 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag);
1605 1605 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag);
1606 1606 static int sr_pause_resume(dev_t dev, int mode);
1607 1607 static int sr_play_msf(dev_t dev, caddr_t data, int flag);
1608 1608 static int sr_play_trkind(dev_t dev, caddr_t data, int flag);
1609 1609 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag);
1610 1610 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag);
1611 1611 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag);
1612 1612 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag);
1613 1613 static int sr_read_cdda(dev_t dev, caddr_t data, int flag);
1614 1614 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag);
1615 1615 static int sr_read_mode1(dev_t dev, caddr_t data, int flag);
1616 1616 static int sr_read_mode2(dev_t dev, caddr_t data, int flag);
1617 1617 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag);
1618 1618 static int sr_sector_mode(dev_t dev, uint32_t blksize);
1619 1619 static int sr_eject(dev_t dev);
1620 1620 static void sr_ejected(register struct sd_lun *un);
1621 1621 static int sr_check_wp(dev_t dev);
1622 1622 static opaque_t sd_watch_request_submit(struct sd_lun *un);
1623 1623 static int sd_check_media(dev_t dev, enum dkio_state state);
1624 1624 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp);
1625 1625 static void sd_delayed_cv_broadcast(void *arg);
1626 1626 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag);
1627 1627 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag);
1628 1628
1629 1629 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page);
1630 1630
1631 1631 /*
1632 1632 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions.
1633 1633 */
1634 1634 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag);
1635 1635 static int sd_wm_cache_constructor(void *wm, void *un, int flags);
1636 1636 static void sd_wm_cache_destructor(void *wm, void *un);
1637 1637 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb,
1638 1638 daddr_t endb, ushort_t typ);
1639 1639 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb,
1640 1640 daddr_t endb);
1641 1641 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp);
1642 1642 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm);
1643 1643 static void sd_read_modify_write_task(void * arg);
1644 1644 static int
1645 1645 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk,
1646 1646 struct buf **bpp);
1647 1647
1648 1648
1649 1649 /*
1650 1650 * Function prototypes for failfast support.
1651 1651 */
1652 1652 static void sd_failfast_flushq(struct sd_lun *un);
1653 1653 static int sd_failfast_flushq_callback(struct buf *bp);
1654 1654
1655 1655 /*
1656 1656 * Function prototypes to check for lsi devices
1657 1657 */
1658 1658 static void sd_is_lsi(struct sd_lun *un);
1659 1659
1660 1660 /*
1661 1661 * Function prototypes for partial DMA support
1662 1662 */
1663 1663 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp,
1664 1664 struct scsi_pkt *pkt, struct sd_xbuf *xp);
1665 1665
1666 1666
1667 1667 /* Function prototypes for cmlb */
1668 1668 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
1669 1669 diskaddr_t start_block, size_t reqlength, void *tg_cookie);
1670 1670
1671 1671 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie);
1672 1672
1673 1673 /*
1674 1674 * For printing RMW warning message timely
1675 1675 */
1676 1676 static void sd_rmw_msg_print_handler(void *arg);
1677 1677
1678 1678 /*
1679 1679 * Constants for failfast support:
1680 1680 *
1681 1681 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO
1682 1682 * failfast processing being performed.
1683 1683 *
1684 1684 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing
1685 1685 * failfast processing on all bufs with B_FAILFAST set.
1686 1686 */
1687 1687
1688 1688 #define SD_FAILFAST_INACTIVE 0
1689 1689 #define SD_FAILFAST_ACTIVE 1
1690 1690
1691 1691 /*
1692 1692 * Bitmask to control behavior of buf(9S) flushes when a transition to
1693 1693 * the failfast state occurs. Optional bits include:
1694 1694 *
1695 1695 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that
1696 1696 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will
1697 1697 * be flushed.
1698 1698 *
1699 1699 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the
1700 1700 * driver, in addition to the regular wait queue. This includes the xbuf
1701 1701 * queues. When clear, only the driver's wait queue will be flushed.
1702 1702 */
1703 1703 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01
1704 1704 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02
1705 1705
1706 1706 /*
1707 1707 * The default behavior is to only flush bufs that have B_FAILFAST set, but
1708 1708 * to flush all queues within the driver.
1709 1709 */
1710 1710 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES;
1711 1711
1712 1712
1713 1713 /*
1714 1714 * SD Testing Fault Injection
1715 1715 */
1716 1716 #ifdef SD_FAULT_INJECTION
1717 1717 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un);
1718 1718 static void sd_faultinjection(struct scsi_pkt *pktp);
1719 1719 static void sd_injection_log(char *buf, struct sd_lun *un);
1720 1720 #endif
1721 1721
1722 1722 /*
1723 1723 * Device driver ops vector
1724 1724 */
1725 1725 static struct cb_ops sd_cb_ops = {
1726 1726 sdopen, /* open */
1727 1727 sdclose, /* close */
1728 1728 sdstrategy, /* strategy */
1729 1729 nodev, /* print */
1730 1730 sddump, /* dump */
1731 1731 sdread, /* read */
1732 1732 sdwrite, /* write */
1733 1733 sdioctl, /* ioctl */
1734 1734 nodev, /* devmap */
1735 1735 nodev, /* mmap */
1736 1736 nodev, /* segmap */
1737 1737 nochpoll, /* poll */
1738 1738 sd_prop_op, /* cb_prop_op */
1739 1739 0, /* streamtab */
1740 1740 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */
1741 1741 CB_REV, /* cb_rev */
1742 1742 sdaread, /* async I/O read entry point */
1743 1743 sdawrite /* async I/O write entry point */
1744 1744 };
1745 1745
1746 1746 struct dev_ops sd_ops = {
1747 1747 DEVO_REV, /* devo_rev, */
1748 1748 0, /* refcnt */
1749 1749 sdinfo, /* info */
1750 1750 nulldev, /* identify */
1751 1751 sdprobe, /* probe */
1752 1752 sdattach, /* attach */
1753 1753 sddetach, /* detach */
1754 1754 nodev, /* reset */
1755 1755 &sd_cb_ops, /* driver operations */
1756 1756 NULL, /* bus operations */
1757 1757 sdpower, /* power */
1758 1758 ddi_quiesce_not_needed, /* quiesce */
1759 1759 };
1760 1760
1761 1761 /*
1762 1762 * This is the loadable module wrapper.
1763 1763 */
↓ open down ↓ |
916 lines elided |
↑ open up ↑ |
1764 1764 #include <sys/modctl.h>
1765 1765
1766 1766 #ifndef XPV_HVM_DRIVER
1767 1767 static struct modldrv modldrv = {
1768 1768 &mod_driverops, /* Type of module. This one is a driver */
1769 1769 SD_MODULE_NAME, /* Module name. */
1770 1770 &sd_ops /* driver ops */
1771 1771 };
1772 1772
1773 1773 static struct modlinkage modlinkage = {
1774 - MODREV_1, &modldrv, NULL
1774 + MODREV_1, { &modldrv, NULL }
1775 1775 };
1776 1776
1777 1777 #else /* XPV_HVM_DRIVER */
1778 1778 static struct modlmisc modlmisc = {
1779 1779 &mod_miscops, /* Type of module. This one is a misc */
1780 1780 "HVM " SD_MODULE_NAME, /* Module name. */
1781 1781 };
1782 1782
1783 1783 static struct modlinkage modlinkage = {
1784 - MODREV_1, &modlmisc, NULL
1784 + MODREV_1, { &modlmisc, NULL }
1785 1785 };
1786 1786
1787 1787 #endif /* XPV_HVM_DRIVER */
1788 1788
1789 1789 static cmlb_tg_ops_t sd_tgops = {
1790 1790 TG_DK_OPS_VERSION_1,
1791 1791 sd_tg_rdwr,
1792 1792 sd_tg_getinfo
1793 1793 };
1794 1794
1795 1795 static struct scsi_asq_key_strings sd_additional_codes[] = {
1796 - 0x81, 0, "Logical Unit is Reserved",
1797 - 0x85, 0, "Audio Address Not Valid",
1798 - 0xb6, 0, "Media Load Mechanism Failed",
1799 - 0xB9, 0, "Audio Play Operation Aborted",
1800 - 0xbf, 0, "Buffer Overflow for Read All Subcodes Command",
1801 - 0x53, 2, "Medium removal prevented",
1802 - 0x6f, 0, "Authentication failed during key exchange",
1803 - 0x6f, 1, "Key not present",
1804 - 0x6f, 2, "Key not established",
1805 - 0x6f, 3, "Read without proper authentication",
1806 - 0x6f, 4, "Mismatched region to this logical unit",
1807 - 0x6f, 5, "Region reset count error",
1808 - 0xffff, 0x0, NULL
1796 + { 0x81, 0, "Logical Unit is Reserved" },
1797 + { 0x85, 0, "Audio Address Not Valid" },
1798 + { 0xb6, 0, "Media Load Mechanism Failed" },
1799 + { 0xB9, 0, "Audio Play Operation Aborted" },
1800 + { 0xbf, 0, "Buffer Overflow for Read All Subcodes Command" },
1801 + { 0x53, 2, "Medium removal prevented" },
1802 + { 0x6f, 0, "Authentication failed during key exchange" },
1803 + { 0x6f, 1, "Key not present" },
1804 + { 0x6f, 2, "Key not established" },
1805 + { 0x6f, 3, "Read without proper authentication" },
1806 + { 0x6f, 4, "Mismatched region to this logical unit" },
1807 + { 0x6f, 5, "Region reset count error" },
1808 + { 0xffff, 0x0, NULL }
1809 1809 };
1810 1810
1811 1811
1812 1812 /*
1813 1813 * Struct for passing printing information for sense data messages
1814 1814 */
1815 1815 struct sd_sense_info {
1816 1816 int ssi_severity;
1817 1817 int ssi_pfa_flag;
1818 1818 };
1819 1819
1820 1820 /*
1821 1821 * Table of function pointers for iostart-side routines. Separate "chains"
1822 1822 * of layered function calls are formed by placing the function pointers
1823 1823 * sequentially in the desired order. Functions are called according to an
1824 1824 * incrementing table index ordering. The last function in each chain must
1825 1825 * be sd_core_iostart(). The corresponding iodone-side routines are expected
1826 1826 * in the sd_iodone_chain[] array.
1827 1827 *
1828 1828 * Note: It may seem more natural to organize both the iostart and iodone
1829 1829 * functions together, into an array of structures (or some similar
1830 1830 * organization) with a common index, rather than two separate arrays which
1831 1831 * must be maintained in synchronization. The purpose of this division is
1832 1832 * to achieve improved performance: individual arrays allows for more
1833 1833 * effective cache line utilization on certain platforms.
1834 1834 */
1835 1835
1836 1836 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp);
1837 1837
1838 1838
1839 1839 static sd_chain_t sd_iostart_chain[] = {
1840 1840
1841 1841 /* Chain for buf IO for disk drive targets (PM enabled) */
1842 1842 sd_mapblockaddr_iostart, /* Index: 0 */
1843 1843 sd_pm_iostart, /* Index: 1 */
1844 1844 sd_core_iostart, /* Index: 2 */
1845 1845
1846 1846 /* Chain for buf IO for disk drive targets (PM disabled) */
1847 1847 sd_mapblockaddr_iostart, /* Index: 3 */
1848 1848 sd_core_iostart, /* Index: 4 */
1849 1849
1850 1850 /*
1851 1851 * Chain for buf IO for removable-media or large sector size
1852 1852 * disk drive targets with RMW needed (PM enabled)
1853 1853 */
1854 1854 sd_mapblockaddr_iostart, /* Index: 5 */
1855 1855 sd_mapblocksize_iostart, /* Index: 6 */
1856 1856 sd_pm_iostart, /* Index: 7 */
1857 1857 sd_core_iostart, /* Index: 8 */
1858 1858
1859 1859 /*
1860 1860 * Chain for buf IO for removable-media or large sector size
1861 1861 * disk drive targets with RMW needed (PM disabled)
1862 1862 */
1863 1863 sd_mapblockaddr_iostart, /* Index: 9 */
1864 1864 sd_mapblocksize_iostart, /* Index: 10 */
1865 1865 sd_core_iostart, /* Index: 11 */
1866 1866
1867 1867 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
1868 1868 sd_mapblockaddr_iostart, /* Index: 12 */
1869 1869 sd_checksum_iostart, /* Index: 13 */
1870 1870 sd_pm_iostart, /* Index: 14 */
1871 1871 sd_core_iostart, /* Index: 15 */
1872 1872
1873 1873 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
1874 1874 sd_mapblockaddr_iostart, /* Index: 16 */
1875 1875 sd_checksum_iostart, /* Index: 17 */
1876 1876 sd_core_iostart, /* Index: 18 */
1877 1877
1878 1878 /* Chain for USCSI commands (all targets) */
1879 1879 sd_pm_iostart, /* Index: 19 */
1880 1880 sd_core_iostart, /* Index: 20 */
1881 1881
1882 1882 /* Chain for checksumming USCSI commands (all targets) */
1883 1883 sd_checksum_uscsi_iostart, /* Index: 21 */
1884 1884 sd_pm_iostart, /* Index: 22 */
1885 1885 sd_core_iostart, /* Index: 23 */
1886 1886
1887 1887 /* Chain for "direct" USCSI commands (all targets) */
1888 1888 sd_core_iostart, /* Index: 24 */
1889 1889
1890 1890 /* Chain for "direct priority" USCSI commands (all targets) */
1891 1891 sd_core_iostart, /* Index: 25 */
1892 1892
1893 1893 /*
1894 1894 * Chain for buf IO for large sector size disk drive targets
1895 1895 * with RMW needed with checksumming (PM enabled)
1896 1896 */
1897 1897 sd_mapblockaddr_iostart, /* Index: 26 */
1898 1898 sd_mapblocksize_iostart, /* Index: 27 */
1899 1899 sd_checksum_iostart, /* Index: 28 */
1900 1900 sd_pm_iostart, /* Index: 29 */
1901 1901 sd_core_iostart, /* Index: 30 */
1902 1902
1903 1903 /*
1904 1904 * Chain for buf IO for large sector size disk drive targets
1905 1905 * with RMW needed with checksumming (PM disabled)
1906 1906 */
1907 1907 sd_mapblockaddr_iostart, /* Index: 31 */
1908 1908 sd_mapblocksize_iostart, /* Index: 32 */
1909 1909 sd_checksum_iostart, /* Index: 33 */
1910 1910 sd_core_iostart, /* Index: 34 */
1911 1911
1912 1912 };
1913 1913
1914 1914 /*
1915 1915 * Macros to locate the first function of each iostart chain in the
1916 1916 * sd_iostart_chain[] array. These are located by the index in the array.
1917 1917 */
1918 1918 #define SD_CHAIN_DISK_IOSTART 0
1919 1919 #define SD_CHAIN_DISK_IOSTART_NO_PM 3
1920 1920 #define SD_CHAIN_MSS_DISK_IOSTART 5
1921 1921 #define SD_CHAIN_RMMEDIA_IOSTART 5
1922 1922 #define SD_CHAIN_MSS_DISK_IOSTART_NO_PM 9
1923 1923 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9
1924 1924 #define SD_CHAIN_CHKSUM_IOSTART 12
1925 1925 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16
1926 1926 #define SD_CHAIN_USCSI_CMD_IOSTART 19
1927 1927 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21
1928 1928 #define SD_CHAIN_DIRECT_CMD_IOSTART 24
1929 1929 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25
1930 1930 #define SD_CHAIN_MSS_CHKSUM_IOSTART 26
1931 1931 #define SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM 31
1932 1932
1933 1933
1934 1934 /*
1935 1935 * Table of function pointers for the iodone-side routines for the driver-
1936 1936 * internal layering mechanism. The calling sequence for iodone routines
1937 1937 * uses a decrementing table index, so the last routine called in a chain
1938 1938 * must be at the lowest array index location for that chain. The last
1939 1939 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs)
1940 1940 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering
1941 1941 * of the functions in an iodone side chain must correspond to the ordering
1942 1942 * of the iostart routines for that chain. Note that there is no iodone
1943 1943 * side routine that corresponds to sd_core_iostart(), so there is no
1944 1944 * entry in the table for this.
1945 1945 */
1946 1946
1947 1947 static sd_chain_t sd_iodone_chain[] = {
1948 1948
1949 1949 /* Chain for buf IO for disk drive targets (PM enabled) */
1950 1950 sd_buf_iodone, /* Index: 0 */
1951 1951 sd_mapblockaddr_iodone, /* Index: 1 */
1952 1952 sd_pm_iodone, /* Index: 2 */
1953 1953
1954 1954 /* Chain for buf IO for disk drive targets (PM disabled) */
1955 1955 sd_buf_iodone, /* Index: 3 */
1956 1956 sd_mapblockaddr_iodone, /* Index: 4 */
1957 1957
1958 1958 /*
1959 1959 * Chain for buf IO for removable-media or large sector size
1960 1960 * disk drive targets with RMW needed (PM enabled)
1961 1961 */
1962 1962 sd_buf_iodone, /* Index: 5 */
1963 1963 sd_mapblockaddr_iodone, /* Index: 6 */
1964 1964 sd_mapblocksize_iodone, /* Index: 7 */
1965 1965 sd_pm_iodone, /* Index: 8 */
1966 1966
1967 1967 /*
1968 1968 * Chain for buf IO for removable-media or large sector size
1969 1969 * disk drive targets with RMW needed (PM disabled)
1970 1970 */
1971 1971 sd_buf_iodone, /* Index: 9 */
1972 1972 sd_mapblockaddr_iodone, /* Index: 10 */
1973 1973 sd_mapblocksize_iodone, /* Index: 11 */
1974 1974
1975 1975 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
1976 1976 sd_buf_iodone, /* Index: 12 */
1977 1977 sd_mapblockaddr_iodone, /* Index: 13 */
1978 1978 sd_checksum_iodone, /* Index: 14 */
1979 1979 sd_pm_iodone, /* Index: 15 */
1980 1980
1981 1981 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
1982 1982 sd_buf_iodone, /* Index: 16 */
1983 1983 sd_mapblockaddr_iodone, /* Index: 17 */
1984 1984 sd_checksum_iodone, /* Index: 18 */
1985 1985
1986 1986 /* Chain for USCSI commands (non-checksum targets) */
1987 1987 sd_uscsi_iodone, /* Index: 19 */
1988 1988 sd_pm_iodone, /* Index: 20 */
1989 1989
1990 1990 /* Chain for USCSI commands (checksum targets) */
1991 1991 sd_uscsi_iodone, /* Index: 21 */
1992 1992 sd_checksum_uscsi_iodone, /* Index: 22 */
1993 1993 sd_pm_iodone, /* Index: 22 */
1994 1994
1995 1995 /* Chain for "direct" USCSI commands (all targets) */
1996 1996 sd_uscsi_iodone, /* Index: 24 */
1997 1997
1998 1998 /* Chain for "direct priority" USCSI commands (all targets) */
1999 1999 sd_uscsi_iodone, /* Index: 25 */
2000 2000
2001 2001 /*
2002 2002 * Chain for buf IO for large sector size disk drive targets
2003 2003 * with checksumming (PM enabled)
2004 2004 */
2005 2005 sd_buf_iodone, /* Index: 26 */
2006 2006 sd_mapblockaddr_iodone, /* Index: 27 */
2007 2007 sd_mapblocksize_iodone, /* Index: 28 */
2008 2008 sd_checksum_iodone, /* Index: 29 */
2009 2009 sd_pm_iodone, /* Index: 30 */
2010 2010
2011 2011 /*
2012 2012 * Chain for buf IO for large sector size disk drive targets
2013 2013 * with checksumming (PM disabled)
2014 2014 */
2015 2015 sd_buf_iodone, /* Index: 31 */
2016 2016 sd_mapblockaddr_iodone, /* Index: 32 */
2017 2017 sd_mapblocksize_iodone, /* Index: 33 */
2018 2018 sd_checksum_iodone, /* Index: 34 */
2019 2019 };
2020 2020
2021 2021
2022 2022 /*
2023 2023 * Macros to locate the "first" function in the sd_iodone_chain[] array for
2024 2024 * each iodone-side chain. These are located by the array index, but as the
2025 2025 * iodone side functions are called in a decrementing-index order, the
2026 2026 * highest index number in each chain must be specified (as these correspond
2027 2027 * to the first function in the iodone chain that will be called by the core
2028 2028 * at IO completion time).
2029 2029 */
2030 2030
2031 2031 #define SD_CHAIN_DISK_IODONE 2
2032 2032 #define SD_CHAIN_DISK_IODONE_NO_PM 4
2033 2033 #define SD_CHAIN_RMMEDIA_IODONE 8
2034 2034 #define SD_CHAIN_MSS_DISK_IODONE 8
2035 2035 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11
2036 2036 #define SD_CHAIN_MSS_DISK_IODONE_NO_PM 11
2037 2037 #define SD_CHAIN_CHKSUM_IODONE 15
2038 2038 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18
2039 2039 #define SD_CHAIN_USCSI_CMD_IODONE 20
2040 2040 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22
2041 2041 #define SD_CHAIN_DIRECT_CMD_IODONE 24
2042 2042 #define SD_CHAIN_PRIORITY_CMD_IODONE 25
2043 2043 #define SD_CHAIN_MSS_CHKSUM_IODONE 30
2044 2044 #define SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM 34
2045 2045
2046 2046
2047 2047
2048 2048 /*
2049 2049 * Array to map a layering chain index to the appropriate initpkt routine.
2050 2050 * The redundant entries are present so that the index used for accessing
2051 2051 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly
2052 2052 * with this table as well.
2053 2053 */
2054 2054 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **);
2055 2055
2056 2056 static sd_initpkt_t sd_initpkt_map[] = {
2057 2057
2058 2058 /* Chain for buf IO for disk drive targets (PM enabled) */
2059 2059 sd_initpkt_for_buf, /* Index: 0 */
2060 2060 sd_initpkt_for_buf, /* Index: 1 */
2061 2061 sd_initpkt_for_buf, /* Index: 2 */
2062 2062
2063 2063 /* Chain for buf IO for disk drive targets (PM disabled) */
2064 2064 sd_initpkt_for_buf, /* Index: 3 */
2065 2065 sd_initpkt_for_buf, /* Index: 4 */
2066 2066
2067 2067 /*
2068 2068 * Chain for buf IO for removable-media or large sector size
2069 2069 * disk drive targets (PM enabled)
2070 2070 */
2071 2071 sd_initpkt_for_buf, /* Index: 5 */
2072 2072 sd_initpkt_for_buf, /* Index: 6 */
2073 2073 sd_initpkt_for_buf, /* Index: 7 */
2074 2074 sd_initpkt_for_buf, /* Index: 8 */
2075 2075
2076 2076 /*
2077 2077 * Chain for buf IO for removable-media or large sector size
2078 2078 * disk drive targets (PM disabled)
2079 2079 */
2080 2080 sd_initpkt_for_buf, /* Index: 9 */
2081 2081 sd_initpkt_for_buf, /* Index: 10 */
2082 2082 sd_initpkt_for_buf, /* Index: 11 */
2083 2083
2084 2084 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
2085 2085 sd_initpkt_for_buf, /* Index: 12 */
2086 2086 sd_initpkt_for_buf, /* Index: 13 */
2087 2087 sd_initpkt_for_buf, /* Index: 14 */
2088 2088 sd_initpkt_for_buf, /* Index: 15 */
2089 2089
2090 2090 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
2091 2091 sd_initpkt_for_buf, /* Index: 16 */
2092 2092 sd_initpkt_for_buf, /* Index: 17 */
2093 2093 sd_initpkt_for_buf, /* Index: 18 */
2094 2094
2095 2095 /* Chain for USCSI commands (non-checksum targets) */
2096 2096 sd_initpkt_for_uscsi, /* Index: 19 */
2097 2097 sd_initpkt_for_uscsi, /* Index: 20 */
2098 2098
2099 2099 /* Chain for USCSI commands (checksum targets) */
2100 2100 sd_initpkt_for_uscsi, /* Index: 21 */
2101 2101 sd_initpkt_for_uscsi, /* Index: 22 */
2102 2102 sd_initpkt_for_uscsi, /* Index: 22 */
2103 2103
2104 2104 /* Chain for "direct" USCSI commands (all targets) */
2105 2105 sd_initpkt_for_uscsi, /* Index: 24 */
2106 2106
2107 2107 /* Chain for "direct priority" USCSI commands (all targets) */
2108 2108 sd_initpkt_for_uscsi, /* Index: 25 */
2109 2109
2110 2110 /*
2111 2111 * Chain for buf IO for large sector size disk drive targets
2112 2112 * with checksumming (PM enabled)
2113 2113 */
2114 2114 sd_initpkt_for_buf, /* Index: 26 */
2115 2115 sd_initpkt_for_buf, /* Index: 27 */
2116 2116 sd_initpkt_for_buf, /* Index: 28 */
2117 2117 sd_initpkt_for_buf, /* Index: 29 */
2118 2118 sd_initpkt_for_buf, /* Index: 30 */
2119 2119
2120 2120 /*
2121 2121 * Chain for buf IO for large sector size disk drive targets
2122 2122 * with checksumming (PM disabled)
2123 2123 */
2124 2124 sd_initpkt_for_buf, /* Index: 31 */
2125 2125 sd_initpkt_for_buf, /* Index: 32 */
2126 2126 sd_initpkt_for_buf, /* Index: 33 */
2127 2127 sd_initpkt_for_buf, /* Index: 34 */
2128 2128 };
2129 2129
2130 2130
2131 2131 /*
2132 2132 * Array to map a layering chain index to the appropriate destroypktpkt routine.
2133 2133 * The redundant entries are present so that the index used for accessing
2134 2134 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly
2135 2135 * with this table as well.
2136 2136 */
2137 2137 typedef void (*sd_destroypkt_t)(struct buf *);
2138 2138
2139 2139 static sd_destroypkt_t sd_destroypkt_map[] = {
2140 2140
2141 2141 /* Chain for buf IO for disk drive targets (PM enabled) */
2142 2142 sd_destroypkt_for_buf, /* Index: 0 */
2143 2143 sd_destroypkt_for_buf, /* Index: 1 */
2144 2144 sd_destroypkt_for_buf, /* Index: 2 */
2145 2145
2146 2146 /* Chain for buf IO for disk drive targets (PM disabled) */
2147 2147 sd_destroypkt_for_buf, /* Index: 3 */
2148 2148 sd_destroypkt_for_buf, /* Index: 4 */
2149 2149
2150 2150 /*
2151 2151 * Chain for buf IO for removable-media or large sector size
2152 2152 * disk drive targets (PM enabled)
2153 2153 */
2154 2154 sd_destroypkt_for_buf, /* Index: 5 */
2155 2155 sd_destroypkt_for_buf, /* Index: 6 */
2156 2156 sd_destroypkt_for_buf, /* Index: 7 */
2157 2157 sd_destroypkt_for_buf, /* Index: 8 */
2158 2158
2159 2159 /*
2160 2160 * Chain for buf IO for removable-media or large sector size
2161 2161 * disk drive targets (PM disabled)
2162 2162 */
2163 2163 sd_destroypkt_for_buf, /* Index: 9 */
2164 2164 sd_destroypkt_for_buf, /* Index: 10 */
2165 2165 sd_destroypkt_for_buf, /* Index: 11 */
2166 2166
2167 2167 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
2168 2168 sd_destroypkt_for_buf, /* Index: 12 */
2169 2169 sd_destroypkt_for_buf, /* Index: 13 */
2170 2170 sd_destroypkt_for_buf, /* Index: 14 */
2171 2171 sd_destroypkt_for_buf, /* Index: 15 */
2172 2172
2173 2173 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
2174 2174 sd_destroypkt_for_buf, /* Index: 16 */
2175 2175 sd_destroypkt_for_buf, /* Index: 17 */
2176 2176 sd_destroypkt_for_buf, /* Index: 18 */
2177 2177
2178 2178 /* Chain for USCSI commands (non-checksum targets) */
2179 2179 sd_destroypkt_for_uscsi, /* Index: 19 */
2180 2180 sd_destroypkt_for_uscsi, /* Index: 20 */
2181 2181
2182 2182 /* Chain for USCSI commands (checksum targets) */
2183 2183 sd_destroypkt_for_uscsi, /* Index: 21 */
2184 2184 sd_destroypkt_for_uscsi, /* Index: 22 */
2185 2185 sd_destroypkt_for_uscsi, /* Index: 22 */
2186 2186
2187 2187 /* Chain for "direct" USCSI commands (all targets) */
2188 2188 sd_destroypkt_for_uscsi, /* Index: 24 */
2189 2189
2190 2190 /* Chain for "direct priority" USCSI commands (all targets) */
2191 2191 sd_destroypkt_for_uscsi, /* Index: 25 */
2192 2192
2193 2193 /*
2194 2194 * Chain for buf IO for large sector size disk drive targets
2195 2195 * with checksumming (PM disabled)
2196 2196 */
2197 2197 sd_destroypkt_for_buf, /* Index: 26 */
2198 2198 sd_destroypkt_for_buf, /* Index: 27 */
2199 2199 sd_destroypkt_for_buf, /* Index: 28 */
2200 2200 sd_destroypkt_for_buf, /* Index: 29 */
2201 2201 sd_destroypkt_for_buf, /* Index: 30 */
2202 2202
2203 2203 /*
2204 2204 * Chain for buf IO for large sector size disk drive targets
2205 2205 * with checksumming (PM enabled)
2206 2206 */
2207 2207 sd_destroypkt_for_buf, /* Index: 31 */
2208 2208 sd_destroypkt_for_buf, /* Index: 32 */
2209 2209 sd_destroypkt_for_buf, /* Index: 33 */
2210 2210 sd_destroypkt_for_buf, /* Index: 34 */
2211 2211 };
2212 2212
2213 2213
2214 2214
2215 2215 /*
2216 2216 * Array to map a layering chain index to the appropriate chain "type".
2217 2217 * The chain type indicates a specific property/usage of the chain.
2218 2218 * The redundant entries are present so that the index used for accessing
2219 2219 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly
2220 2220 * with this table as well.
2221 2221 */
2222 2222
2223 2223 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */
2224 2224 #define SD_CHAIN_BUFIO 1 /* regular buf IO */
2225 2225 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */
2226 2226 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */
2227 2227 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */
2228 2228 /* (for error recovery) */
2229 2229
2230 2230 static int sd_chain_type_map[] = {
2231 2231
2232 2232 /* Chain for buf IO for disk drive targets (PM enabled) */
2233 2233 SD_CHAIN_BUFIO, /* Index: 0 */
2234 2234 SD_CHAIN_BUFIO, /* Index: 1 */
2235 2235 SD_CHAIN_BUFIO, /* Index: 2 */
2236 2236
2237 2237 /* Chain for buf IO for disk drive targets (PM disabled) */
2238 2238 SD_CHAIN_BUFIO, /* Index: 3 */
2239 2239 SD_CHAIN_BUFIO, /* Index: 4 */
2240 2240
2241 2241 /*
2242 2242 * Chain for buf IO for removable-media or large sector size
2243 2243 * disk drive targets (PM enabled)
2244 2244 */
2245 2245 SD_CHAIN_BUFIO, /* Index: 5 */
2246 2246 SD_CHAIN_BUFIO, /* Index: 6 */
2247 2247 SD_CHAIN_BUFIO, /* Index: 7 */
2248 2248 SD_CHAIN_BUFIO, /* Index: 8 */
2249 2249
2250 2250 /*
2251 2251 * Chain for buf IO for removable-media or large sector size
2252 2252 * disk drive targets (PM disabled)
2253 2253 */
2254 2254 SD_CHAIN_BUFIO, /* Index: 9 */
2255 2255 SD_CHAIN_BUFIO, /* Index: 10 */
2256 2256 SD_CHAIN_BUFIO, /* Index: 11 */
2257 2257
2258 2258 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
2259 2259 SD_CHAIN_BUFIO, /* Index: 12 */
2260 2260 SD_CHAIN_BUFIO, /* Index: 13 */
2261 2261 SD_CHAIN_BUFIO, /* Index: 14 */
2262 2262 SD_CHAIN_BUFIO, /* Index: 15 */
2263 2263
2264 2264 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
2265 2265 SD_CHAIN_BUFIO, /* Index: 16 */
2266 2266 SD_CHAIN_BUFIO, /* Index: 17 */
2267 2267 SD_CHAIN_BUFIO, /* Index: 18 */
2268 2268
2269 2269 /* Chain for USCSI commands (non-checksum targets) */
2270 2270 SD_CHAIN_USCSI, /* Index: 19 */
2271 2271 SD_CHAIN_USCSI, /* Index: 20 */
2272 2272
2273 2273 /* Chain for USCSI commands (checksum targets) */
2274 2274 SD_CHAIN_USCSI, /* Index: 21 */
2275 2275 SD_CHAIN_USCSI, /* Index: 22 */
2276 2276 SD_CHAIN_USCSI, /* Index: 23 */
2277 2277
2278 2278 /* Chain for "direct" USCSI commands (all targets) */
2279 2279 SD_CHAIN_DIRECT, /* Index: 24 */
2280 2280
2281 2281 /* Chain for "direct priority" USCSI commands (all targets) */
2282 2282 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */
2283 2283
2284 2284 /*
2285 2285 * Chain for buf IO for large sector size disk drive targets
2286 2286 * with checksumming (PM enabled)
2287 2287 */
2288 2288 SD_CHAIN_BUFIO, /* Index: 26 */
2289 2289 SD_CHAIN_BUFIO, /* Index: 27 */
2290 2290 SD_CHAIN_BUFIO, /* Index: 28 */
2291 2291 SD_CHAIN_BUFIO, /* Index: 29 */
2292 2292 SD_CHAIN_BUFIO, /* Index: 30 */
2293 2293
2294 2294 /*
2295 2295 * Chain for buf IO for large sector size disk drive targets
2296 2296 * with checksumming (PM disabled)
2297 2297 */
2298 2298 SD_CHAIN_BUFIO, /* Index: 31 */
2299 2299 SD_CHAIN_BUFIO, /* Index: 32 */
2300 2300 SD_CHAIN_BUFIO, /* Index: 33 */
2301 2301 SD_CHAIN_BUFIO, /* Index: 34 */
2302 2302 };
2303 2303
2304 2304
2305 2305 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */
2306 2306 #define SD_IS_BUFIO(xp) \
2307 2307 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO)
2308 2308
2309 2309 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */
2310 2310 #define SD_IS_DIRECT_PRIORITY(xp) \
2311 2311 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY)
2312 2312
2313 2313
2314 2314
2315 2315 /*
2316 2316 * Struct, array, and macros to map a specific chain to the appropriate
2317 2317 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays.
2318 2318 *
2319 2319 * The sd_chain_index_map[] array is used at attach time to set the various
2320 2320 * un_xxx_chain type members of the sd_lun softstate to the specific layering
2321 2321 * chain to be used with the instance. This allows different instances to use
2322 2322 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart
2323 2323 * and xb_chain_iodone index values in the sd_xbuf are initialized to these
2324 2324 * values at sd_xbuf init time, this allows (1) layering chains may be changed
2325 2325 * dynamically & without the use of locking; and (2) a layer may update the
2326 2326 * xb_chain_io[start|done] member in a given xbuf with its current index value,
2327 2327 * to allow for deferred processing of an IO within the same chain from a
2328 2328 * different execution context.
2329 2329 */
2330 2330
2331 2331 struct sd_chain_index {
2332 2332 int sci_iostart_index;
2333 2333 int sci_iodone_index;
2334 2334 };
2335 2335
2336 2336 static struct sd_chain_index sd_chain_index_map[] = {
2337 2337 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE },
2338 2338 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM },
2339 2339 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE },
2340 2340 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM },
2341 2341 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE },
2342 2342 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM },
2343 2343 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE },
2344 2344 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE },
2345 2345 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE },
2346 2346 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE },
2347 2347 { SD_CHAIN_MSS_CHKSUM_IOSTART, SD_CHAIN_MSS_CHKSUM_IODONE },
2348 2348 { SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM, SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM },
2349 2349
2350 2350 };
2351 2351
2352 2352
2353 2353 /*
2354 2354 * The following are indexes into the sd_chain_index_map[] array.
2355 2355 */
2356 2356
2357 2357 /* un->un_buf_chain_type must be set to one of these */
2358 2358 #define SD_CHAIN_INFO_DISK 0
2359 2359 #define SD_CHAIN_INFO_DISK_NO_PM 1
2360 2360 #define SD_CHAIN_INFO_RMMEDIA 2
2361 2361 #define SD_CHAIN_INFO_MSS_DISK 2
2362 2362 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3
2363 2363 #define SD_CHAIN_INFO_MSS_DSK_NO_PM 3
2364 2364 #define SD_CHAIN_INFO_CHKSUM 4
2365 2365 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5
2366 2366 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM 10
2367 2367 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM_NO_PM 11
2368 2368
2369 2369 /* un->un_uscsi_chain_type must be set to one of these */
2370 2370 #define SD_CHAIN_INFO_USCSI_CMD 6
2371 2371 /* USCSI with PM disabled is the same as DIRECT */
2372 2372 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8
2373 2373 #define SD_CHAIN_INFO_USCSI_CHKSUM 7
2374 2374
2375 2375 /* un->un_direct_chain_type must be set to one of these */
2376 2376 #define SD_CHAIN_INFO_DIRECT_CMD 8
2377 2377
2378 2378 /* un->un_priority_chain_type must be set to one of these */
2379 2379 #define SD_CHAIN_INFO_PRIORITY_CMD 9
2380 2380
2381 2381 /* size for devid inquiries */
2382 2382 #define MAX_INQUIRY_SIZE 0xF0
2383 2383
2384 2384 /*
2385 2385 * Macros used by functions to pass a given buf(9S) struct along to the
2386 2386 * next function in the layering chain for further processing.
2387 2387 *
2388 2388 * In the following macros, passing more than three arguments to the called
2389 2389 * routines causes the optimizer for the SPARC compiler to stop doing tail
2390 2390 * call elimination which results in significant performance degradation.
2391 2391 */
2392 2392 #define SD_BEGIN_IOSTART(index, un, bp) \
2393 2393 ((*(sd_iostart_chain[index]))(index, un, bp))
2394 2394
2395 2395 #define SD_BEGIN_IODONE(index, un, bp) \
2396 2396 ((*(sd_iodone_chain[index]))(index, un, bp))
2397 2397
2398 2398 #define SD_NEXT_IOSTART(index, un, bp) \
2399 2399 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp))
2400 2400
2401 2401 #define SD_NEXT_IODONE(index, un, bp) \
2402 2402 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp))
2403 2403
2404 2404 /*
2405 2405 * Function: _init
2406 2406 *
2407 2407 * Description: This is the driver _init(9E) entry point.
2408 2408 *
2409 2409 * Return Code: Returns the value from mod_install(9F) or
2410 2410 * ddi_soft_state_init(9F) as appropriate.
2411 2411 *
2412 2412 * Context: Called when driver module loaded.
2413 2413 */
2414 2414
2415 2415 int
2416 2416 _init(void)
2417 2417 {
2418 2418 int err;
2419 2419
2420 2420 /* establish driver name from module name */
2421 2421 sd_label = (char *)mod_modname(&modlinkage);
2422 2422
2423 2423 #ifndef XPV_HVM_DRIVER
2424 2424 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun),
2425 2425 SD_MAXUNIT);
2426 2426 if (err != 0) {
2427 2427 return (err);
2428 2428 }
2429 2429
2430 2430 #else /* XPV_HVM_DRIVER */
2431 2431 /* Remove the leading "hvm_" from the module name */
2432 2432 ASSERT(strncmp(sd_label, "hvm_", strlen("hvm_")) == 0);
2433 2433 sd_label += strlen("hvm_");
2434 2434
2435 2435 #endif /* XPV_HVM_DRIVER */
2436 2436
2437 2437 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL);
2438 2438 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL);
2439 2439 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL);
2440 2440
2441 2441 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL);
2442 2442 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL);
2443 2443 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL);
2444 2444
2445 2445 /*
2446 2446 * it's ok to init here even for fibre device
2447 2447 */
2448 2448 sd_scsi_probe_cache_init();
2449 2449
2450 2450 sd_scsi_target_lun_init();
2451 2451
2452 2452 /*
2453 2453 * Creating taskq before mod_install ensures that all callers (threads)
2454 2454 * that enter the module after a successful mod_install encounter
2455 2455 * a valid taskq.
2456 2456 */
2457 2457 sd_taskq_create();
2458 2458
2459 2459 err = mod_install(&modlinkage);
2460 2460 if (err != 0) {
2461 2461 /* delete taskq if install fails */
2462 2462 sd_taskq_delete();
2463 2463
2464 2464 mutex_destroy(&sd_detach_mutex);
2465 2465 mutex_destroy(&sd_log_mutex);
2466 2466 mutex_destroy(&sd_label_mutex);
2467 2467
2468 2468 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex);
2469 2469 cv_destroy(&sd_tr.srq_resv_reclaim_cv);
2470 2470 cv_destroy(&sd_tr.srq_inprocess_cv);
2471 2471
2472 2472 sd_scsi_probe_cache_fini();
2473 2473
2474 2474 sd_scsi_target_lun_fini();
2475 2475
2476 2476 #ifndef XPV_HVM_DRIVER
2477 2477 ddi_soft_state_fini(&sd_state);
2478 2478 #endif /* !XPV_HVM_DRIVER */
2479 2479 return (err);
2480 2480 }
2481 2481
2482 2482 return (err);
2483 2483 }
2484 2484
2485 2485
2486 2486 /*
2487 2487 * Function: _fini
2488 2488 *
2489 2489 * Description: This is the driver _fini(9E) entry point.
2490 2490 *
2491 2491 * Return Code: Returns the value from mod_remove(9F)
2492 2492 *
2493 2493 * Context: Called when driver module is unloaded.
2494 2494 */
2495 2495
2496 2496 int
2497 2497 _fini(void)
2498 2498 {
2499 2499 int err;
2500 2500
2501 2501 if ((err = mod_remove(&modlinkage)) != 0) {
2502 2502 return (err);
2503 2503 }
2504 2504
2505 2505 sd_taskq_delete();
2506 2506
2507 2507 mutex_destroy(&sd_detach_mutex);
2508 2508 mutex_destroy(&sd_log_mutex);
2509 2509 mutex_destroy(&sd_label_mutex);
2510 2510 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex);
2511 2511
2512 2512 sd_scsi_probe_cache_fini();
2513 2513
2514 2514 sd_scsi_target_lun_fini();
2515 2515
2516 2516 cv_destroy(&sd_tr.srq_resv_reclaim_cv);
2517 2517 cv_destroy(&sd_tr.srq_inprocess_cv);
2518 2518
2519 2519 #ifndef XPV_HVM_DRIVER
2520 2520 ddi_soft_state_fini(&sd_state);
2521 2521 #endif /* !XPV_HVM_DRIVER */
2522 2522
2523 2523 return (err);
2524 2524 }
2525 2525
2526 2526
2527 2527 /*
2528 2528 * Function: _info
2529 2529 *
2530 2530 * Description: This is the driver _info(9E) entry point.
2531 2531 *
2532 2532 * Arguments: modinfop - pointer to the driver modinfo structure
2533 2533 *
2534 2534 * Return Code: Returns the value from mod_info(9F).
2535 2535 *
2536 2536 * Context: Kernel thread context
2537 2537 */
2538 2538
2539 2539 int
2540 2540 _info(struct modinfo *modinfop)
2541 2541 {
2542 2542 return (mod_info(&modlinkage, modinfop));
2543 2543 }
2544 2544
2545 2545
2546 2546 /*
2547 2547 * The following routines implement the driver message logging facility.
2548 2548 * They provide component- and level- based debug output filtering.
2549 2549 * Output may also be restricted to messages for a single instance by
2550 2550 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set
2551 2551 * to NULL, then messages for all instances are printed.
2552 2552 *
2553 2553 * These routines have been cloned from each other due to the language
2554 2554 * constraints of macros and variable argument list processing.
2555 2555 */
2556 2556
2557 2557
2558 2558 /*
2559 2559 * Function: sd_log_err
2560 2560 *
2561 2561 * Description: This routine is called by the SD_ERROR macro for debug
2562 2562 * logging of error conditions.
2563 2563 *
2564 2564 * Arguments: comp - driver component being logged
2565 2565 * dev - pointer to driver info structure
2566 2566 * fmt - error string and format to be logged
2567 2567 */
2568 2568
2569 2569 static void
2570 2570 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...)
2571 2571 {
2572 2572 va_list ap;
2573 2573 dev_info_t *dev;
2574 2574
2575 2575 ASSERT(un != NULL);
2576 2576 dev = SD_DEVINFO(un);
2577 2577 ASSERT(dev != NULL);
2578 2578
2579 2579 /*
2580 2580 * Filter messages based on the global component and level masks.
2581 2581 * Also print if un matches the value of sd_debug_un, or if
2582 2582 * sd_debug_un is set to NULL.
2583 2583 */
2584 2584 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) &&
2585 2585 ((sd_debug_un == NULL) || (sd_debug_un == un))) {
2586 2586 mutex_enter(&sd_log_mutex);
2587 2587 va_start(ap, fmt);
2588 2588 (void) vsprintf(sd_log_buf, fmt, ap);
2589 2589 va_end(ap);
2590 2590 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
2591 2591 mutex_exit(&sd_log_mutex);
2592 2592 }
2593 2593 #ifdef SD_FAULT_INJECTION
2594 2594 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
2595 2595 if (un->sd_injection_mask & comp) {
2596 2596 mutex_enter(&sd_log_mutex);
2597 2597 va_start(ap, fmt);
2598 2598 (void) vsprintf(sd_log_buf, fmt, ap);
2599 2599 va_end(ap);
2600 2600 sd_injection_log(sd_log_buf, un);
2601 2601 mutex_exit(&sd_log_mutex);
2602 2602 }
2603 2603 #endif
2604 2604 }
2605 2605
2606 2606
2607 2607 /*
2608 2608 * Function: sd_log_info
2609 2609 *
2610 2610 * Description: This routine is called by the SD_INFO macro for debug
2611 2611 * logging of general purpose informational conditions.
2612 2612 *
2613 2613 * Arguments: comp - driver component being logged
2614 2614 * dev - pointer to driver info structure
2615 2615 * fmt - info string and format to be logged
2616 2616 */
2617 2617
2618 2618 static void
2619 2619 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...)
2620 2620 {
2621 2621 va_list ap;
2622 2622 dev_info_t *dev;
2623 2623
2624 2624 ASSERT(un != NULL);
2625 2625 dev = SD_DEVINFO(un);
2626 2626 ASSERT(dev != NULL);
2627 2627
2628 2628 /*
2629 2629 * Filter messages based on the global component and level masks.
2630 2630 * Also print if un matches the value of sd_debug_un, or if
2631 2631 * sd_debug_un is set to NULL.
2632 2632 */
2633 2633 if ((sd_component_mask & component) &&
2634 2634 (sd_level_mask & SD_LOGMASK_INFO) &&
2635 2635 ((sd_debug_un == NULL) || (sd_debug_un == un))) {
2636 2636 mutex_enter(&sd_log_mutex);
2637 2637 va_start(ap, fmt);
2638 2638 (void) vsprintf(sd_log_buf, fmt, ap);
2639 2639 va_end(ap);
2640 2640 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
2641 2641 mutex_exit(&sd_log_mutex);
2642 2642 }
2643 2643 #ifdef SD_FAULT_INJECTION
2644 2644 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
2645 2645 if (un->sd_injection_mask & component) {
2646 2646 mutex_enter(&sd_log_mutex);
2647 2647 va_start(ap, fmt);
2648 2648 (void) vsprintf(sd_log_buf, fmt, ap);
2649 2649 va_end(ap);
2650 2650 sd_injection_log(sd_log_buf, un);
2651 2651 mutex_exit(&sd_log_mutex);
2652 2652 }
2653 2653 #endif
2654 2654 }
2655 2655
2656 2656
2657 2657 /*
2658 2658 * Function: sd_log_trace
2659 2659 *
2660 2660 * Description: This routine is called by the SD_TRACE macro for debug
2661 2661 * logging of trace conditions (i.e. function entry/exit).
2662 2662 *
2663 2663 * Arguments: comp - driver component being logged
2664 2664 * dev - pointer to driver info structure
2665 2665 * fmt - trace string and format to be logged
2666 2666 */
2667 2667
2668 2668 static void
2669 2669 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...)
2670 2670 {
2671 2671 va_list ap;
2672 2672 dev_info_t *dev;
2673 2673
2674 2674 ASSERT(un != NULL);
2675 2675 dev = SD_DEVINFO(un);
2676 2676 ASSERT(dev != NULL);
2677 2677
2678 2678 /*
2679 2679 * Filter messages based on the global component and level masks.
2680 2680 * Also print if un matches the value of sd_debug_un, or if
2681 2681 * sd_debug_un is set to NULL.
2682 2682 */
2683 2683 if ((sd_component_mask & component) &&
2684 2684 (sd_level_mask & SD_LOGMASK_TRACE) &&
2685 2685 ((sd_debug_un == NULL) || (sd_debug_un == un))) {
2686 2686 mutex_enter(&sd_log_mutex);
2687 2687 va_start(ap, fmt);
2688 2688 (void) vsprintf(sd_log_buf, fmt, ap);
2689 2689 va_end(ap);
2690 2690 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
2691 2691 mutex_exit(&sd_log_mutex);
2692 2692 }
2693 2693 #ifdef SD_FAULT_INJECTION
2694 2694 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
2695 2695 if (un->sd_injection_mask & component) {
2696 2696 mutex_enter(&sd_log_mutex);
2697 2697 va_start(ap, fmt);
2698 2698 (void) vsprintf(sd_log_buf, fmt, ap);
2699 2699 va_end(ap);
2700 2700 sd_injection_log(sd_log_buf, un);
2701 2701 mutex_exit(&sd_log_mutex);
2702 2702 }
2703 2703 #endif
2704 2704 }
2705 2705
2706 2706
2707 2707 /*
2708 2708 * Function: sdprobe
2709 2709 *
2710 2710 * Description: This is the driver probe(9e) entry point function.
2711 2711 *
2712 2712 * Arguments: devi - opaque device info handle
2713 2713 *
2714 2714 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful.
2715 2715 * DDI_PROBE_FAILURE: If the probe failed.
2716 2716 * DDI_PROBE_PARTIAL: If the instance is not present now,
2717 2717 * but may be present in the future.
2718 2718 */
2719 2719
2720 2720 static int
2721 2721 sdprobe(dev_info_t *devi)
2722 2722 {
2723 2723 struct scsi_device *devp;
2724 2724 int rval;
2725 2725 #ifndef XPV_HVM_DRIVER
2726 2726 int instance = ddi_get_instance(devi);
2727 2727 #endif /* !XPV_HVM_DRIVER */
2728 2728
2729 2729 /*
2730 2730 * if it wasn't for pln, sdprobe could actually be nulldev
2731 2731 * in the "__fibre" case.
2732 2732 */
2733 2733 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) {
2734 2734 return (DDI_PROBE_DONTCARE);
2735 2735 }
2736 2736
2737 2737 devp = ddi_get_driver_private(devi);
2738 2738
2739 2739 if (devp == NULL) {
2740 2740 /* Ooops... nexus driver is mis-configured... */
2741 2741 return (DDI_PROBE_FAILURE);
2742 2742 }
2743 2743
2744 2744 #ifndef XPV_HVM_DRIVER
2745 2745 if (ddi_get_soft_state(sd_state, instance) != NULL) {
2746 2746 return (DDI_PROBE_PARTIAL);
2747 2747 }
2748 2748 #endif /* !XPV_HVM_DRIVER */
2749 2749
2750 2750 /*
2751 2751 * Call the SCSA utility probe routine to see if we actually
2752 2752 * have a target at this SCSI nexus.
2753 2753 */
2754 2754 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) {
2755 2755 case SCSIPROBE_EXISTS:
2756 2756 switch (devp->sd_inq->inq_dtype) {
2757 2757 case DTYPE_DIRECT:
2758 2758 rval = DDI_PROBE_SUCCESS;
2759 2759 break;
2760 2760 case DTYPE_RODIRECT:
2761 2761 /* CDs etc. Can be removable media */
2762 2762 rval = DDI_PROBE_SUCCESS;
2763 2763 break;
2764 2764 case DTYPE_OPTICAL:
2765 2765 /*
2766 2766 * Rewritable optical driver HP115AA
2767 2767 * Can also be removable media
2768 2768 */
2769 2769
2770 2770 /*
2771 2771 * Do not attempt to bind to DTYPE_OPTICAL if
2772 2772 * pre solaris 9 sparc sd behavior is required
2773 2773 *
2774 2774 * If first time through and sd_dtype_optical_bind
2775 2775 * has not been set in /etc/system check properties
2776 2776 */
2777 2777
2778 2778 if (sd_dtype_optical_bind < 0) {
2779 2779 sd_dtype_optical_bind = ddi_prop_get_int
2780 2780 (DDI_DEV_T_ANY, devi, 0,
2781 2781 "optical-device-bind", 1);
2782 2782 }
2783 2783
2784 2784 if (sd_dtype_optical_bind == 0) {
2785 2785 rval = DDI_PROBE_FAILURE;
2786 2786 } else {
2787 2787 rval = DDI_PROBE_SUCCESS;
2788 2788 }
2789 2789 break;
2790 2790
2791 2791 case DTYPE_NOTPRESENT:
2792 2792 default:
2793 2793 rval = DDI_PROBE_FAILURE;
2794 2794 break;
2795 2795 }
2796 2796 break;
2797 2797 default:
2798 2798 rval = DDI_PROBE_PARTIAL;
2799 2799 break;
2800 2800 }
2801 2801
2802 2802 /*
2803 2803 * This routine checks for resource allocation prior to freeing,
2804 2804 * so it will take care of the "smart probing" case where a
2805 2805 * scsi_probe() may or may not have been issued and will *not*
2806 2806 * free previously-freed resources.
2807 2807 */
2808 2808 scsi_unprobe(devp);
2809 2809 return (rval);
2810 2810 }
2811 2811
2812 2812
2813 2813 /*
2814 2814 * Function: sdinfo
2815 2815 *
2816 2816 * Description: This is the driver getinfo(9e) entry point function.
2817 2817 * Given the device number, return the devinfo pointer from
2818 2818 * the scsi_device structure or the instance number
2819 2819 * associated with the dev_t.
2820 2820 *
2821 2821 * Arguments: dip - pointer to device info structure
2822 2822 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO,
2823 2823 * DDI_INFO_DEVT2INSTANCE)
2824 2824 * arg - driver dev_t
2825 2825 * resultp - user buffer for request response
2826 2826 *
2827 2827 * Return Code: DDI_SUCCESS
2828 2828 * DDI_FAILURE
2829 2829 */
2830 2830 /* ARGSUSED */
2831 2831 static int
2832 2832 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
2833 2833 {
2834 2834 struct sd_lun *un;
2835 2835 dev_t dev;
2836 2836 int instance;
2837 2837 int error;
2838 2838
2839 2839 switch (infocmd) {
2840 2840 case DDI_INFO_DEVT2DEVINFO:
2841 2841 dev = (dev_t)arg;
2842 2842 instance = SDUNIT(dev);
2843 2843 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) {
2844 2844 return (DDI_FAILURE);
2845 2845 }
2846 2846 *result = (void *) SD_DEVINFO(un);
2847 2847 error = DDI_SUCCESS;
2848 2848 break;
2849 2849 case DDI_INFO_DEVT2INSTANCE:
2850 2850 dev = (dev_t)arg;
2851 2851 instance = SDUNIT(dev);
2852 2852 *result = (void *)(uintptr_t)instance;
2853 2853 error = DDI_SUCCESS;
2854 2854 break;
2855 2855 default:
2856 2856 error = DDI_FAILURE;
2857 2857 }
2858 2858 return (error);
2859 2859 }
2860 2860
2861 2861 /*
2862 2862 * Function: sd_prop_op
2863 2863 *
2864 2864 * Description: This is the driver prop_op(9e) entry point function.
2865 2865 * Return the number of blocks for the partition in question
2866 2866 * or forward the request to the property facilities.
2867 2867 *
2868 2868 * Arguments: dev - device number
2869 2869 * dip - pointer to device info structure
2870 2870 * prop_op - property operator
2871 2871 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent
2872 2872 * name - pointer to property name
2873 2873 * valuep - pointer or address of the user buffer
2874 2874 * lengthp - property length
2875 2875 *
2876 2876 * Return Code: DDI_PROP_SUCCESS
2877 2877 * DDI_PROP_NOT_FOUND
2878 2878 * DDI_PROP_UNDEFINED
2879 2879 * DDI_PROP_NO_MEMORY
2880 2880 * DDI_PROP_BUF_TOO_SMALL
2881 2881 */
2882 2882
2883 2883 static int
2884 2884 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
2885 2885 char *name, caddr_t valuep, int *lengthp)
2886 2886 {
2887 2887 struct sd_lun *un;
2888 2888
2889 2889 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL)
2890 2890 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
2891 2891 name, valuep, lengthp));
2892 2892
2893 2893 return (cmlb_prop_op(un->un_cmlbhandle,
2894 2894 dev, dip, prop_op, mod_flags, name, valuep, lengthp,
2895 2895 SDPART(dev), (void *)SD_PATH_DIRECT));
2896 2896 }
2897 2897
2898 2898 /*
2899 2899 * The following functions are for smart probing:
2900 2900 * sd_scsi_probe_cache_init()
2901 2901 * sd_scsi_probe_cache_fini()
2902 2902 * sd_scsi_clear_probe_cache()
2903 2903 * sd_scsi_probe_with_cache()
2904 2904 */
2905 2905
2906 2906 /*
2907 2907 * Function: sd_scsi_probe_cache_init
2908 2908 *
2909 2909 * Description: Initializes the probe response cache mutex and head pointer.
2910 2910 *
2911 2911 * Context: Kernel thread context
2912 2912 */
2913 2913
2914 2914 static void
2915 2915 sd_scsi_probe_cache_init(void)
2916 2916 {
2917 2917 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL);
2918 2918 sd_scsi_probe_cache_head = NULL;
2919 2919 }
2920 2920
2921 2921
2922 2922 /*
2923 2923 * Function: sd_scsi_probe_cache_fini
2924 2924 *
2925 2925 * Description: Frees all resources associated with the probe response cache.
2926 2926 *
2927 2927 * Context: Kernel thread context
2928 2928 */
2929 2929
2930 2930 static void
2931 2931 sd_scsi_probe_cache_fini(void)
2932 2932 {
2933 2933 struct sd_scsi_probe_cache *cp;
2934 2934 struct sd_scsi_probe_cache *ncp;
2935 2935
2936 2936 /* Clean up our smart probing linked list */
2937 2937 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) {
2938 2938 ncp = cp->next;
2939 2939 kmem_free(cp, sizeof (struct sd_scsi_probe_cache));
2940 2940 }
2941 2941 sd_scsi_probe_cache_head = NULL;
2942 2942 mutex_destroy(&sd_scsi_probe_cache_mutex);
2943 2943 }
2944 2944
2945 2945
2946 2946 /*
2947 2947 * Function: sd_scsi_clear_probe_cache
2948 2948 *
2949 2949 * Description: This routine clears the probe response cache. This is
2950 2950 * done when open() returns ENXIO so that when deferred
2951 2951 * attach is attempted (possibly after a device has been
2952 2952 * turned on) we will retry the probe. Since we don't know
2953 2953 * which target we failed to open, we just clear the
2954 2954 * entire cache.
2955 2955 *
2956 2956 * Context: Kernel thread context
2957 2957 */
2958 2958
2959 2959 static void
2960 2960 sd_scsi_clear_probe_cache(void)
2961 2961 {
2962 2962 struct sd_scsi_probe_cache *cp;
2963 2963 int i;
2964 2964
2965 2965 mutex_enter(&sd_scsi_probe_cache_mutex);
2966 2966 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) {
2967 2967 /*
2968 2968 * Reset all entries to SCSIPROBE_EXISTS. This will
2969 2969 * force probing to be performed the next time
2970 2970 * sd_scsi_probe_with_cache is called.
2971 2971 */
2972 2972 for (i = 0; i < NTARGETS_WIDE; i++) {
2973 2973 cp->cache[i] = SCSIPROBE_EXISTS;
2974 2974 }
2975 2975 }
2976 2976 mutex_exit(&sd_scsi_probe_cache_mutex);
2977 2977 }
2978 2978
2979 2979
2980 2980 /*
2981 2981 * Function: sd_scsi_probe_with_cache
2982 2982 *
2983 2983 * Description: This routine implements support for a scsi device probe
2984 2984 * with cache. The driver maintains a cache of the target
2985 2985 * responses to scsi probes. If we get no response from a
2986 2986 * target during a probe inquiry, we remember that, and we
2987 2987 * avoid additional calls to scsi_probe on non-zero LUNs
2988 2988 * on the same target until the cache is cleared. By doing
2989 2989 * so we avoid the 1/4 sec selection timeout for nonzero
2990 2990 * LUNs. lun0 of a target is always probed.
2991 2991 *
2992 2992 * Arguments: devp - Pointer to a scsi_device(9S) structure
2993 2993 * waitfunc - indicates what the allocator routines should
2994 2994 * do when resources are not available. This value
2995 2995 * is passed on to scsi_probe() when that routine
2996 2996 * is called.
2997 2997 *
2998 2998 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache;
2999 2999 * otherwise the value returned by scsi_probe(9F).
3000 3000 *
3001 3001 * Context: Kernel thread context
3002 3002 */
3003 3003
3004 3004 static int
3005 3005 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)())
3006 3006 {
3007 3007 struct sd_scsi_probe_cache *cp;
3008 3008 dev_info_t *pdip = ddi_get_parent(devp->sd_dev);
3009 3009 int lun, tgt;
3010 3010
3011 3011 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS,
3012 3012 SCSI_ADDR_PROP_LUN, 0);
3013 3013 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS,
3014 3014 SCSI_ADDR_PROP_TARGET, -1);
3015 3015
3016 3016 /* Make sure caching enabled and target in range */
3017 3017 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) {
3018 3018 /* do it the old way (no cache) */
3019 3019 return (scsi_probe(devp, waitfn));
3020 3020 }
3021 3021
3022 3022 mutex_enter(&sd_scsi_probe_cache_mutex);
3023 3023
3024 3024 /* Find the cache for this scsi bus instance */
3025 3025 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) {
3026 3026 if (cp->pdip == pdip) {
3027 3027 break;
3028 3028 }
3029 3029 }
3030 3030
3031 3031 /* If we can't find a cache for this pdip, create one */
3032 3032 if (cp == NULL) {
3033 3033 int i;
3034 3034
3035 3035 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache),
3036 3036 KM_SLEEP);
3037 3037 cp->pdip = pdip;
3038 3038 cp->next = sd_scsi_probe_cache_head;
3039 3039 sd_scsi_probe_cache_head = cp;
3040 3040 for (i = 0; i < NTARGETS_WIDE; i++) {
3041 3041 cp->cache[i] = SCSIPROBE_EXISTS;
3042 3042 }
3043 3043 }
3044 3044
3045 3045 mutex_exit(&sd_scsi_probe_cache_mutex);
3046 3046
3047 3047 /* Recompute the cache for this target if LUN zero */
3048 3048 if (lun == 0) {
3049 3049 cp->cache[tgt] = SCSIPROBE_EXISTS;
3050 3050 }
3051 3051
3052 3052 /* Don't probe if cache remembers a NORESP from a previous LUN. */
3053 3053 if (cp->cache[tgt] != SCSIPROBE_EXISTS) {
3054 3054 return (SCSIPROBE_NORESP);
3055 3055 }
3056 3056
3057 3057 /* Do the actual probe; save & return the result */
3058 3058 return (cp->cache[tgt] = scsi_probe(devp, waitfn));
3059 3059 }
3060 3060
3061 3061
3062 3062 /*
3063 3063 * Function: sd_scsi_target_lun_init
3064 3064 *
3065 3065 * Description: Initializes the attached lun chain mutex and head pointer.
3066 3066 *
3067 3067 * Context: Kernel thread context
3068 3068 */
3069 3069
3070 3070 static void
3071 3071 sd_scsi_target_lun_init(void)
3072 3072 {
3073 3073 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL);
3074 3074 sd_scsi_target_lun_head = NULL;
3075 3075 }
3076 3076
3077 3077
3078 3078 /*
3079 3079 * Function: sd_scsi_target_lun_fini
3080 3080 *
3081 3081 * Description: Frees all resources associated with the attached lun
3082 3082 * chain
3083 3083 *
3084 3084 * Context: Kernel thread context
3085 3085 */
3086 3086
3087 3087 static void
3088 3088 sd_scsi_target_lun_fini(void)
3089 3089 {
3090 3090 struct sd_scsi_hba_tgt_lun *cp;
3091 3091 struct sd_scsi_hba_tgt_lun *ncp;
3092 3092
3093 3093 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) {
3094 3094 ncp = cp->next;
3095 3095 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun));
3096 3096 }
3097 3097 sd_scsi_target_lun_head = NULL;
3098 3098 mutex_destroy(&sd_scsi_target_lun_mutex);
3099 3099 }
3100 3100
3101 3101
3102 3102 /*
3103 3103 * Function: sd_scsi_get_target_lun_count
3104 3104 *
3105 3105 * Description: This routine will check in the attached lun chain to see
3106 3106 * how many luns are attached on the required SCSI controller
3107 3107 * and target. Currently, some capabilities like tagged queue
3108 3108 * are supported per target based by HBA. So all luns in a
3109 3109 * target have the same capabilities. Based on this assumption,
3110 3110 * sd should only set these capabilities once per target. This
3111 3111 * function is called when sd needs to decide how many luns
3112 3112 * already attached on a target.
3113 3113 *
3114 3114 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI
3115 3115 * controller device.
3116 3116 * target - The target ID on the controller's SCSI bus.
3117 3117 *
3118 3118 * Return Code: The number of luns attached on the required target and
3119 3119 * controller.
3120 3120 * -1 if target ID is not in parallel SCSI scope or the given
3121 3121 * dip is not in the chain.
3122 3122 *
3123 3123 * Context: Kernel thread context
3124 3124 */
3125 3125
3126 3126 static int
3127 3127 sd_scsi_get_target_lun_count(dev_info_t *dip, int target)
3128 3128 {
3129 3129 struct sd_scsi_hba_tgt_lun *cp;
3130 3130
3131 3131 if ((target < 0) || (target >= NTARGETS_WIDE)) {
3132 3132 return (-1);
3133 3133 }
3134 3134
3135 3135 mutex_enter(&sd_scsi_target_lun_mutex);
3136 3136
3137 3137 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) {
3138 3138 if (cp->pdip == dip) {
3139 3139 break;
3140 3140 }
3141 3141 }
3142 3142
3143 3143 mutex_exit(&sd_scsi_target_lun_mutex);
3144 3144
3145 3145 if (cp == NULL) {
3146 3146 return (-1);
3147 3147 }
3148 3148
3149 3149 return (cp->nlun[target]);
3150 3150 }
3151 3151
3152 3152
3153 3153 /*
3154 3154 * Function: sd_scsi_update_lun_on_target
3155 3155 *
3156 3156 * Description: This routine is used to update the attached lun chain when a
3157 3157 * lun is attached or detached on a target.
3158 3158 *
3159 3159 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI
3160 3160 * controller device.
3161 3161 * target - The target ID on the controller's SCSI bus.
3162 3162 * flag - Indicate the lun is attached or detached.
3163 3163 *
3164 3164 * Context: Kernel thread context
3165 3165 */
3166 3166
3167 3167 static void
3168 3168 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag)
3169 3169 {
3170 3170 struct sd_scsi_hba_tgt_lun *cp;
3171 3171
3172 3172 mutex_enter(&sd_scsi_target_lun_mutex);
3173 3173
3174 3174 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) {
3175 3175 if (cp->pdip == dip) {
3176 3176 break;
3177 3177 }
3178 3178 }
3179 3179
3180 3180 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) {
3181 3181 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun),
3182 3182 KM_SLEEP);
3183 3183 cp->pdip = dip;
3184 3184 cp->next = sd_scsi_target_lun_head;
3185 3185 sd_scsi_target_lun_head = cp;
3186 3186 }
3187 3187
3188 3188 mutex_exit(&sd_scsi_target_lun_mutex);
3189 3189
3190 3190 if (cp != NULL) {
3191 3191 if (flag == SD_SCSI_LUN_ATTACH) {
3192 3192 cp->nlun[target] ++;
3193 3193 } else {
3194 3194 cp->nlun[target] --;
3195 3195 }
3196 3196 }
3197 3197 }
3198 3198
3199 3199
3200 3200 /*
3201 3201 * Function: sd_spin_up_unit
3202 3202 *
3203 3203 * Description: Issues the following commands to spin-up the device:
3204 3204 * START STOP UNIT, and INQUIRY.
3205 3205 *
3206 3206 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
3207 3207 * structure for this target.
3208 3208 *
3209 3209 * Return Code: 0 - success
3210 3210 * EIO - failure
3211 3211 * EACCES - reservation conflict
3212 3212 *
3213 3213 * Context: Kernel thread context
3214 3214 */
3215 3215
3216 3216 static int
3217 3217 sd_spin_up_unit(sd_ssc_t *ssc)
3218 3218 {
3219 3219 size_t resid = 0;
3220 3220 int has_conflict = FALSE;
3221 3221 uchar_t *bufaddr;
3222 3222 int status;
3223 3223 struct sd_lun *un;
3224 3224
3225 3225 ASSERT(ssc != NULL);
3226 3226 un = ssc->ssc_un;
3227 3227 ASSERT(un != NULL);
3228 3228
3229 3229 /*
3230 3230 * Send a throwaway START UNIT command.
3231 3231 *
3232 3232 * If we fail on this, we don't care presently what precisely
3233 3233 * is wrong. EMC's arrays will also fail this with a check
3234 3234 * condition (0x2/0x4/0x3) if the device is "inactive," but
3235 3235 * we don't want to fail the attach because it may become
3236 3236 * "active" later.
3237 3237 * We don't know if power condition is supported or not at
3238 3238 * this stage, use START STOP bit.
3239 3239 */
3240 3240 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
3241 3241 SD_TARGET_START, SD_PATH_DIRECT);
3242 3242
3243 3243 if (status != 0) {
3244 3244 if (status == EACCES)
3245 3245 has_conflict = TRUE;
3246 3246 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3247 3247 }
3248 3248
3249 3249 /*
3250 3250 * Send another INQUIRY command to the target. This is necessary for
3251 3251 * non-removable media direct access devices because their INQUIRY data
3252 3252 * may not be fully qualified until they are spun up (perhaps via the
3253 3253 * START command above). Note: This seems to be needed for some
3254 3254 * legacy devices only.) The INQUIRY command should succeed even if a
3255 3255 * Reservation Conflict is present.
3256 3256 */
3257 3257 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP);
3258 3258
3259 3259 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid)
3260 3260 != 0) {
3261 3261 kmem_free(bufaddr, SUN_INQSIZE);
3262 3262 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
3263 3263 return (EIO);
3264 3264 }
3265 3265
3266 3266 /*
3267 3267 * If we got enough INQUIRY data, copy it over the old INQUIRY data.
3268 3268 * Note that this routine does not return a failure here even if the
3269 3269 * INQUIRY command did not return any data. This is a legacy behavior.
3270 3270 */
3271 3271 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) {
3272 3272 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE);
3273 3273 }
3274 3274
3275 3275 kmem_free(bufaddr, SUN_INQSIZE);
3276 3276
3277 3277 /* If we hit a reservation conflict above, tell the caller. */
3278 3278 if (has_conflict == TRUE) {
3279 3279 return (EACCES);
3280 3280 }
3281 3281
3282 3282 return (0);
3283 3283 }
3284 3284
3285 3285 #ifdef _LP64
3286 3286 /*
3287 3287 * Function: sd_enable_descr_sense
3288 3288 *
3289 3289 * Description: This routine attempts to select descriptor sense format
3290 3290 * using the Control mode page. Devices that support 64 bit
3291 3291 * LBAs (for >2TB luns) should also implement descriptor
3292 3292 * sense data so we will call this function whenever we see
3293 3293 * a lun larger than 2TB. If for some reason the device
3294 3294 * supports 64 bit LBAs but doesn't support descriptor sense
3295 3295 * presumably the mode select will fail. Everything will
3296 3296 * continue to work normally except that we will not get
3297 3297 * complete sense data for commands that fail with an LBA
3298 3298 * larger than 32 bits.
3299 3299 *
3300 3300 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
3301 3301 * structure for this target.
3302 3302 *
3303 3303 * Context: Kernel thread context only
3304 3304 */
3305 3305
3306 3306 static void
3307 3307 sd_enable_descr_sense(sd_ssc_t *ssc)
3308 3308 {
3309 3309 uchar_t *header;
3310 3310 struct mode_control_scsi3 *ctrl_bufp;
3311 3311 size_t buflen;
3312 3312 size_t bd_len;
3313 3313 int status;
3314 3314 struct sd_lun *un;
3315 3315
3316 3316 ASSERT(ssc != NULL);
3317 3317 un = ssc->ssc_un;
3318 3318 ASSERT(un != NULL);
3319 3319
3320 3320 /*
3321 3321 * Read MODE SENSE page 0xA, Control Mode Page
3322 3322 */
3323 3323 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH +
3324 3324 sizeof (struct mode_control_scsi3);
3325 3325 header = kmem_zalloc(buflen, KM_SLEEP);
3326 3326
3327 3327 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen,
3328 3328 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT);
3329 3329
3330 3330 if (status != 0) {
3331 3331 SD_ERROR(SD_LOG_COMMON, un,
3332 3332 "sd_enable_descr_sense: mode sense ctrl page failed\n");
3333 3333 goto eds_exit;
3334 3334 }
3335 3335
3336 3336 /*
3337 3337 * Determine size of Block Descriptors in order to locate
3338 3338 * the mode page data. ATAPI devices return 0, SCSI devices
3339 3339 * should return MODE_BLK_DESC_LENGTH.
3340 3340 */
3341 3341 bd_len = ((struct mode_header *)header)->bdesc_length;
3342 3342
3343 3343 /* Clear the mode data length field for MODE SELECT */
3344 3344 ((struct mode_header *)header)->length = 0;
3345 3345
3346 3346 ctrl_bufp = (struct mode_control_scsi3 *)
3347 3347 (header + MODE_HEADER_LENGTH + bd_len);
3348 3348
3349 3349 /*
3350 3350 * If the page length is smaller than the expected value,
3351 3351 * the target device doesn't support D_SENSE. Bail out here.
3352 3352 */
3353 3353 if (ctrl_bufp->mode_page.length <
3354 3354 sizeof (struct mode_control_scsi3) - 2) {
3355 3355 SD_ERROR(SD_LOG_COMMON, un,
3356 3356 "sd_enable_descr_sense: enable D_SENSE failed\n");
3357 3357 goto eds_exit;
3358 3358 }
3359 3359
3360 3360 /*
3361 3361 * Clear PS bit for MODE SELECT
3362 3362 */
3363 3363 ctrl_bufp->mode_page.ps = 0;
3364 3364
3365 3365 /*
3366 3366 * Set D_SENSE to enable descriptor sense format.
3367 3367 */
3368 3368 ctrl_bufp->d_sense = 1;
3369 3369
3370 3370 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3371 3371
3372 3372 /*
3373 3373 * Use MODE SELECT to commit the change to the D_SENSE bit
3374 3374 */
3375 3375 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header,
3376 3376 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT);
3377 3377
3378 3378 if (status != 0) {
3379 3379 SD_INFO(SD_LOG_COMMON, un,
3380 3380 "sd_enable_descr_sense: mode select ctrl page failed\n");
3381 3381 } else {
3382 3382 kmem_free(header, buflen);
3383 3383 return;
3384 3384 }
3385 3385
3386 3386 eds_exit:
3387 3387 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3388 3388 kmem_free(header, buflen);
3389 3389 }
3390 3390
3391 3391 /*
3392 3392 * Function: sd_reenable_dsense_task
3393 3393 *
3394 3394 * Description: Re-enable descriptor sense after device or bus reset
3395 3395 *
3396 3396 * Context: Executes in a taskq() thread context
3397 3397 */
3398 3398 static void
3399 3399 sd_reenable_dsense_task(void *arg)
3400 3400 {
3401 3401 struct sd_lun *un = arg;
3402 3402 sd_ssc_t *ssc;
3403 3403
3404 3404 ASSERT(un != NULL);
3405 3405
3406 3406 ssc = sd_ssc_init(un);
3407 3407 sd_enable_descr_sense(ssc);
3408 3408 sd_ssc_fini(ssc);
3409 3409 }
3410 3410 #endif /* _LP64 */
3411 3411
3412 3412 /*
3413 3413 * Function: sd_set_mmc_caps
3414 3414 *
3415 3415 * Description: This routine determines if the device is MMC compliant and if
3416 3416 * the device supports CDDA via a mode sense of the CDVD
3417 3417 * capabilities mode page. Also checks if the device is a
3418 3418 * dvdram writable device.
3419 3419 *
3420 3420 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
3421 3421 * structure for this target.
3422 3422 *
3423 3423 * Context: Kernel thread context only
3424 3424 */
3425 3425
3426 3426 static void
3427 3427 sd_set_mmc_caps(sd_ssc_t *ssc)
3428 3428 {
3429 3429 struct mode_header_grp2 *sense_mhp;
3430 3430 uchar_t *sense_page;
3431 3431 caddr_t buf;
3432 3432 int bd_len;
3433 3433 int status;
3434 3434 struct uscsi_cmd com;
3435 3435 int rtn;
3436 3436 uchar_t *out_data_rw, *out_data_hd;
3437 3437 uchar_t *rqbuf_rw, *rqbuf_hd;
3438 3438 uchar_t *out_data_gesn;
3439 3439 int gesn_len;
3440 3440 struct sd_lun *un;
3441 3441
3442 3442 ASSERT(ssc != NULL);
3443 3443 un = ssc->ssc_un;
3444 3444 ASSERT(un != NULL);
3445 3445
3446 3446 /*
3447 3447 * The flags which will be set in this function are - mmc compliant,
3448 3448 * dvdram writable device, cdda support. Initialize them to FALSE
3449 3449 * and if a capability is detected - it will be set to TRUE.
3450 3450 */
3451 3451 un->un_f_mmc_cap = FALSE;
3452 3452 un->un_f_dvdram_writable_device = FALSE;
3453 3453 un->un_f_cfg_cdda = FALSE;
3454 3454
3455 3455 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP);
3456 3456 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf,
3457 3457 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT);
3458 3458
3459 3459 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3460 3460
3461 3461 if (status != 0) {
3462 3462 /* command failed; just return */
3463 3463 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3464 3464 return;
3465 3465 }
3466 3466 /*
3467 3467 * If the mode sense request for the CDROM CAPABILITIES
3468 3468 * page (0x2A) succeeds the device is assumed to be MMC.
3469 3469 */
3470 3470 un->un_f_mmc_cap = TRUE;
3471 3471
3472 3472 /* See if GET STATUS EVENT NOTIFICATION is supported */
3473 3473 if (un->un_f_mmc_gesn_polling) {
3474 3474 gesn_len = SD_GESN_HEADER_LEN + SD_GESN_MEDIA_DATA_LEN;
3475 3475 out_data_gesn = kmem_zalloc(gesn_len, KM_SLEEP);
3476 3476
3477 3477 rtn = sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(ssc,
3478 3478 out_data_gesn, gesn_len, 1 << SD_GESN_MEDIA_CLASS);
3479 3479
3480 3480 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3481 3481
3482 3482 if ((rtn != 0) || !sd_gesn_media_data_valid(out_data_gesn)) {
3483 3483 un->un_f_mmc_gesn_polling = FALSE;
3484 3484 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3485 3485 "sd_set_mmc_caps: gesn not supported "
3486 3486 "%d %x %x %x %x\n", rtn,
3487 3487 out_data_gesn[0], out_data_gesn[1],
3488 3488 out_data_gesn[2], out_data_gesn[3]);
3489 3489 }
3490 3490
3491 3491 kmem_free(out_data_gesn, gesn_len);
3492 3492 }
3493 3493
3494 3494 /* Get to the page data */
3495 3495 sense_mhp = (struct mode_header_grp2 *)buf;
3496 3496 bd_len = (sense_mhp->bdesc_length_hi << 8) |
3497 3497 sense_mhp->bdesc_length_lo;
3498 3498 if (bd_len > MODE_BLK_DESC_LENGTH) {
3499 3499 /*
3500 3500 * We did not get back the expected block descriptor
3501 3501 * length so we cannot determine if the device supports
3502 3502 * CDDA. However, we still indicate the device is MMC
3503 3503 * according to the successful response to the page
3504 3504 * 0x2A mode sense request.
3505 3505 */
3506 3506 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
3507 3507 "sd_set_mmc_caps: Mode Sense returned "
3508 3508 "invalid block descriptor length\n");
3509 3509 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3510 3510 return;
3511 3511 }
3512 3512
3513 3513 /* See if read CDDA is supported */
3514 3514 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 +
3515 3515 bd_len);
3516 3516 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE;
3517 3517
3518 3518 /* See if writing DVD RAM is supported. */
3519 3519 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE;
3520 3520 if (un->un_f_dvdram_writable_device == TRUE) {
3521 3521 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3522 3522 return;
3523 3523 }
3524 3524
3525 3525 /*
3526 3526 * If the device presents DVD or CD capabilities in the mode
3527 3527 * page, we can return here since a RRD will not have
3528 3528 * these capabilities.
3529 3529 */
3530 3530 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) {
3531 3531 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3532 3532 return;
3533 3533 }
3534 3534 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3535 3535
3536 3536 /*
3537 3537 * If un->un_f_dvdram_writable_device is still FALSE,
3538 3538 * check for a Removable Rigid Disk (RRD). A RRD
3539 3539 * device is identified by the features RANDOM_WRITABLE and
3540 3540 * HARDWARE_DEFECT_MANAGEMENT.
3541 3541 */
3542 3542 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3543 3543 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3544 3544
3545 3545 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw,
3546 3546 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN,
3547 3547 RANDOM_WRITABLE, SD_PATH_STANDARD);
3548 3548
3549 3549 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3550 3550
3551 3551 if (rtn != 0) {
3552 3552 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3553 3553 kmem_free(rqbuf_rw, SENSE_LENGTH);
3554 3554 return;
3555 3555 }
3556 3556
3557 3557 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3558 3558 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3559 3559
3560 3560 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd,
3561 3561 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN,
3562 3562 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD);
3563 3563
3564 3564 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3565 3565
3566 3566 if (rtn == 0) {
3567 3567 /*
3568 3568 * We have good information, check for random writable
3569 3569 * and hardware defect features.
3570 3570 */
3571 3571 if ((out_data_rw[9] & RANDOM_WRITABLE) &&
3572 3572 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) {
3573 3573 un->un_f_dvdram_writable_device = TRUE;
3574 3574 }
3575 3575 }
3576 3576
3577 3577 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3578 3578 kmem_free(rqbuf_rw, SENSE_LENGTH);
3579 3579 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN);
3580 3580 kmem_free(rqbuf_hd, SENSE_LENGTH);
3581 3581 }
3582 3582
3583 3583 /*
3584 3584 * Function: sd_check_for_writable_cd
3585 3585 *
3586 3586 * Description: This routine determines if the media in the device is
3587 3587 * writable or not. It uses the get configuration command (0x46)
3588 3588 * to determine if the media is writable
3589 3589 *
3590 3590 * Arguments: un - driver soft state (unit) structure
3591 3591 * path_flag - SD_PATH_DIRECT to use the USCSI "direct"
3592 3592 * chain and the normal command waitq, or
3593 3593 * SD_PATH_DIRECT_PRIORITY to use the USCSI
3594 3594 * "direct" chain and bypass the normal command
3595 3595 * waitq.
3596 3596 *
3597 3597 * Context: Never called at interrupt context.
3598 3598 */
3599 3599
3600 3600 static void
3601 3601 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag)
3602 3602 {
3603 3603 struct uscsi_cmd com;
3604 3604 uchar_t *out_data;
3605 3605 uchar_t *rqbuf;
3606 3606 int rtn;
3607 3607 uchar_t *out_data_rw, *out_data_hd;
3608 3608 uchar_t *rqbuf_rw, *rqbuf_hd;
3609 3609 struct mode_header_grp2 *sense_mhp;
3610 3610 uchar_t *sense_page;
3611 3611 caddr_t buf;
3612 3612 int bd_len;
3613 3613 int status;
3614 3614 struct sd_lun *un;
3615 3615
3616 3616 ASSERT(ssc != NULL);
3617 3617 un = ssc->ssc_un;
3618 3618 ASSERT(un != NULL);
3619 3619 ASSERT(mutex_owned(SD_MUTEX(un)));
3620 3620
3621 3621 /*
3622 3622 * Initialize the writable media to false, if configuration info.
3623 3623 * tells us otherwise then only we will set it.
3624 3624 */
3625 3625 un->un_f_mmc_writable_media = FALSE;
3626 3626 mutex_exit(SD_MUTEX(un));
3627 3627
3628 3628 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP);
3629 3629 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3630 3630
3631 3631 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH,
3632 3632 out_data, SD_PROFILE_HEADER_LEN, path_flag);
3633 3633
3634 3634 if (rtn != 0)
3635 3635 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3636 3636
3637 3637 mutex_enter(SD_MUTEX(un));
3638 3638 if (rtn == 0) {
3639 3639 /*
3640 3640 * We have good information, check for writable DVD.
3641 3641 */
3642 3642 if ((out_data[6] == 0) && (out_data[7] == 0x12)) {
3643 3643 un->un_f_mmc_writable_media = TRUE;
3644 3644 kmem_free(out_data, SD_PROFILE_HEADER_LEN);
3645 3645 kmem_free(rqbuf, SENSE_LENGTH);
3646 3646 return;
3647 3647 }
3648 3648 }
3649 3649
3650 3650 kmem_free(out_data, SD_PROFILE_HEADER_LEN);
3651 3651 kmem_free(rqbuf, SENSE_LENGTH);
3652 3652
3653 3653 /*
3654 3654 * Determine if this is a RRD type device.
3655 3655 */
3656 3656 mutex_exit(SD_MUTEX(un));
3657 3657 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP);
3658 3658 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf,
3659 3659 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag);
3660 3660
3661 3661 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3662 3662
3663 3663 mutex_enter(SD_MUTEX(un));
3664 3664 if (status != 0) {
3665 3665 /* command failed; just return */
3666 3666 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3667 3667 return;
3668 3668 }
3669 3669
3670 3670 /* Get to the page data */
3671 3671 sense_mhp = (struct mode_header_grp2 *)buf;
3672 3672 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo;
3673 3673 if (bd_len > MODE_BLK_DESC_LENGTH) {
3674 3674 /*
3675 3675 * We did not get back the expected block descriptor length so
3676 3676 * we cannot check the mode page.
3677 3677 */
3678 3678 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
3679 3679 "sd_check_for_writable_cd: Mode Sense returned "
3680 3680 "invalid block descriptor length\n");
3681 3681 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3682 3682 return;
3683 3683 }
3684 3684
3685 3685 /*
3686 3686 * If the device presents DVD or CD capabilities in the mode
3687 3687 * page, we can return here since a RRD device will not have
3688 3688 * these capabilities.
3689 3689 */
3690 3690 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len);
3691 3691 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) {
3692 3692 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3693 3693 return;
3694 3694 }
3695 3695 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3696 3696
3697 3697 /*
3698 3698 * If un->un_f_mmc_writable_media is still FALSE,
3699 3699 * check for RRD type media. A RRD device is identified
3700 3700 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT.
3701 3701 */
3702 3702 mutex_exit(SD_MUTEX(un));
3703 3703 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3704 3704 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3705 3705
3706 3706 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw,
3707 3707 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN,
3708 3708 RANDOM_WRITABLE, path_flag);
3709 3709
3710 3710 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3711 3711 if (rtn != 0) {
3712 3712 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3713 3713 kmem_free(rqbuf_rw, SENSE_LENGTH);
3714 3714 mutex_enter(SD_MUTEX(un));
3715 3715 return;
3716 3716 }
3717 3717
3718 3718 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3719 3719 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3720 3720
3721 3721 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd,
3722 3722 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN,
3723 3723 HARDWARE_DEFECT_MANAGEMENT, path_flag);
3724 3724
3725 3725 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3726 3726 mutex_enter(SD_MUTEX(un));
3727 3727 if (rtn == 0) {
3728 3728 /*
3729 3729 * We have good information, check for random writable
3730 3730 * and hardware defect features as current.
3731 3731 */
3732 3732 if ((out_data_rw[9] & RANDOM_WRITABLE) &&
3733 3733 (out_data_rw[10] & 0x1) &&
3734 3734 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) &&
3735 3735 (out_data_hd[10] & 0x1)) {
3736 3736 un->un_f_mmc_writable_media = TRUE;
3737 3737 }
3738 3738 }
3739 3739
3740 3740 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3741 3741 kmem_free(rqbuf_rw, SENSE_LENGTH);
3742 3742 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN);
3743 3743 kmem_free(rqbuf_hd, SENSE_LENGTH);
3744 3744 }
3745 3745
3746 3746 /*
3747 3747 * Function: sd_read_unit_properties
3748 3748 *
3749 3749 * Description: The following implements a property lookup mechanism.
3750 3750 * Properties for particular disks (keyed on vendor, model
3751 3751 * and rev numbers) are sought in the sd.conf file via
3752 3752 * sd_process_sdconf_file(), and if not found there, are
3753 3753 * looked for in a list hardcoded in this driver via
3754 3754 * sd_process_sdconf_table() Once located the properties
3755 3755 * are used to update the driver unit structure.
3756 3756 *
3757 3757 * Arguments: un - driver soft state (unit) structure
3758 3758 */
3759 3759
3760 3760 static void
3761 3761 sd_read_unit_properties(struct sd_lun *un)
3762 3762 {
3763 3763 /*
3764 3764 * sd_process_sdconf_file returns SD_FAILURE if it cannot find
3765 3765 * the "sd-config-list" property (from the sd.conf file) or if
3766 3766 * there was not a match for the inquiry vid/pid. If this event
3767 3767 * occurs the static driver configuration table is searched for
3768 3768 * a match.
3769 3769 */
3770 3770 ASSERT(un != NULL);
3771 3771 if (sd_process_sdconf_file(un) == SD_FAILURE) {
3772 3772 sd_process_sdconf_table(un);
3773 3773 }
3774 3774
3775 3775 /* check for LSI device */
3776 3776 sd_is_lsi(un);
3777 3777
3778 3778
3779 3779 }
3780 3780
3781 3781
3782 3782 /*
3783 3783 * Function: sd_process_sdconf_file
3784 3784 *
3785 3785 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the
3786 3786 * driver's config file (ie, sd.conf) and update the driver
3787 3787 * soft state structure accordingly.
3788 3788 *
3789 3789 * Arguments: un - driver soft state (unit) structure
3790 3790 *
3791 3791 * Return Code: SD_SUCCESS - The properties were successfully set according
3792 3792 * to the driver configuration file.
3793 3793 * SD_FAILURE - The driver config list was not obtained or
3794 3794 * there was no vid/pid match. This indicates that
3795 3795 * the static config table should be used.
3796 3796 *
3797 3797 * The config file has a property, "sd-config-list". Currently we support
3798 3798 * two kinds of formats. For both formats, the value of this property
3799 3799 * is a list of duplets:
3800 3800 *
3801 3801 * sd-config-list=
3802 3802 * <duplet>,
3803 3803 * [,<duplet>]*;
3804 3804 *
3805 3805 * For the improved format, where
3806 3806 *
3807 3807 * <duplet>:= "<vid+pid>","<tunable-list>"
3808 3808 *
3809 3809 * and
3810 3810 *
3811 3811 * <tunable-list>:= <tunable> [, <tunable> ]*;
3812 3812 * <tunable> = <name> : <value>
3813 3813 *
3814 3814 * The <vid+pid> is the string that is returned by the target device on a
3815 3815 * SCSI inquiry command, the <tunable-list> contains one or more tunables
3816 3816 * to apply to all target devices with the specified <vid+pid>.
3817 3817 *
3818 3818 * Each <tunable> is a "<name> : <value>" pair.
3819 3819 *
3820 3820 * For the old format, the structure of each duplet is as follows:
3821 3821 *
3822 3822 * <duplet>:= "<vid+pid>","<data-property-name_list>"
3823 3823 *
3824 3824 * The first entry of the duplet is the device ID string (the concatenated
3825 3825 * vid & pid; not to be confused with a device_id). This is defined in
3826 3826 * the same way as in the sd_disk_table.
3827 3827 *
3828 3828 * The second part of the duplet is a string that identifies a
3829 3829 * data-property-name-list. The data-property-name-list is defined as
3830 3830 * follows:
3831 3831 *
3832 3832 * <data-property-name-list>:=<data-property-name> [<data-property-name>]
3833 3833 *
3834 3834 * The syntax of <data-property-name> depends on the <version> field.
3835 3835 *
3836 3836 * If version = SD_CONF_VERSION_1 we have the following syntax:
3837 3837 *
3838 3838 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN>
3839 3839 *
3840 3840 * where the prop0 value will be used to set prop0 if bit0 set in the
3841 3841 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1
3842 3842 *
3843 3843 */
3844 3844
3845 3845 static int
3846 3846 sd_process_sdconf_file(struct sd_lun *un)
3847 3847 {
3848 3848 char **config_list = NULL;
3849 3849 uint_t nelements;
3850 3850 char *vidptr;
3851 3851 int vidlen;
3852 3852 char *dnlist_ptr;
3853 3853 char *dataname_ptr;
3854 3854 char *dataname_lasts;
3855 3855 int *data_list = NULL;
3856 3856 uint_t data_list_len;
3857 3857 int rval = SD_FAILURE;
3858 3858 int i;
3859 3859
3860 3860 ASSERT(un != NULL);
3861 3861
3862 3862 /* Obtain the configuration list associated with the .conf file */
3863 3863 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un),
3864 3864 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list,
3865 3865 &config_list, &nelements) != DDI_PROP_SUCCESS) {
3866 3866 return (SD_FAILURE);
3867 3867 }
3868 3868
3869 3869 /*
3870 3870 * Compare vids in each duplet to the inquiry vid - if a match is
3871 3871 * made, get the data value and update the soft state structure
3872 3872 * accordingly.
3873 3873 *
3874 3874 * Each duplet should show as a pair of strings, return SD_FAILURE
3875 3875 * otherwise.
3876 3876 */
3877 3877 if (nelements & 1) {
3878 3878 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
3879 3879 "sd-config-list should show as pairs of strings.\n");
3880 3880 if (config_list)
3881 3881 ddi_prop_free(config_list);
3882 3882 return (SD_FAILURE);
3883 3883 }
3884 3884
3885 3885 for (i = 0; i < nelements; i += 2) {
3886 3886 /*
3887 3887 * Note: The assumption here is that each vid entry is on
3888 3888 * a unique line from its associated duplet.
3889 3889 */
3890 3890 vidptr = config_list[i];
3891 3891 vidlen = (int)strlen(vidptr);
3892 3892 if (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS) {
3893 3893 continue;
3894 3894 }
3895 3895
3896 3896 /*
3897 3897 * dnlist contains 1 or more blank separated
3898 3898 * data-property-name entries
3899 3899 */
3900 3900 dnlist_ptr = config_list[i + 1];
3901 3901
3902 3902 if (strchr(dnlist_ptr, ':') != NULL) {
3903 3903 /*
3904 3904 * Decode the improved format sd-config-list.
3905 3905 */
3906 3906 sd_nvpair_str_decode(un, dnlist_ptr);
3907 3907 } else {
3908 3908 /*
3909 3909 * The old format sd-config-list, loop through all
3910 3910 * data-property-name entries in the
3911 3911 * data-property-name-list
3912 3912 * setting the properties for each.
3913 3913 */
3914 3914 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t",
3915 3915 &dataname_lasts); dataname_ptr != NULL;
3916 3916 dataname_ptr = sd_strtok_r(NULL, " \t",
3917 3917 &dataname_lasts)) {
3918 3918 int version;
3919 3919
3920 3920 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3921 3921 "sd_process_sdconf_file: disk:%s, "
3922 3922 "data:%s\n", vidptr, dataname_ptr);
3923 3923
3924 3924 /* Get the data list */
3925 3925 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY,
3926 3926 SD_DEVINFO(un), 0, dataname_ptr, &data_list,
3927 3927 &data_list_len) != DDI_PROP_SUCCESS) {
3928 3928 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3929 3929 "sd_process_sdconf_file: data "
3930 3930 "property (%s) has no value\n",
3931 3931 dataname_ptr);
3932 3932 continue;
3933 3933 }
3934 3934
3935 3935 version = data_list[0];
3936 3936
3937 3937 if (version == SD_CONF_VERSION_1) {
3938 3938 sd_tunables values;
3939 3939
3940 3940 /* Set the properties */
3941 3941 if (sd_chk_vers1_data(un, data_list[1],
3942 3942 &data_list[2], data_list_len,
3943 3943 dataname_ptr) == SD_SUCCESS) {
3944 3944 sd_get_tunables_from_conf(un,
3945 3945 data_list[1], &data_list[2],
3946 3946 &values);
3947 3947 sd_set_vers1_properties(un,
3948 3948 data_list[1], &values);
3949 3949 rval = SD_SUCCESS;
3950 3950 } else {
3951 3951 rval = SD_FAILURE;
3952 3952 }
3953 3953 } else {
3954 3954 scsi_log(SD_DEVINFO(un), sd_label,
3955 3955 CE_WARN, "data property %s version "
3956 3956 "0x%x is invalid.",
3957 3957 dataname_ptr, version);
3958 3958 rval = SD_FAILURE;
3959 3959 }
3960 3960 if (data_list)
3961 3961 ddi_prop_free(data_list);
3962 3962 }
3963 3963 }
3964 3964 }
3965 3965
3966 3966 /* free up the memory allocated by ddi_prop_lookup_string_array(). */
3967 3967 if (config_list) {
3968 3968 ddi_prop_free(config_list);
3969 3969 }
3970 3970
3971 3971 return (rval);
3972 3972 }
3973 3973
3974 3974 /*
3975 3975 * Function: sd_nvpair_str_decode()
3976 3976 *
3977 3977 * Description: Parse the improved format sd-config-list to get
3978 3978 * each entry of tunable, which includes a name-value pair.
3979 3979 * Then call sd_set_properties() to set the property.
3980 3980 *
3981 3981 * Arguments: un - driver soft state (unit) structure
3982 3982 * nvpair_str - the tunable list
3983 3983 */
3984 3984 static void
3985 3985 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str)
3986 3986 {
3987 3987 char *nv, *name, *value, *token;
3988 3988 char *nv_lasts, *v_lasts, *x_lasts;
3989 3989
3990 3990 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL;
3991 3991 nv = sd_strtok_r(NULL, ",", &nv_lasts)) {
3992 3992 token = sd_strtok_r(nv, ":", &v_lasts);
3993 3993 name = sd_strtok_r(token, " \t", &x_lasts);
3994 3994 token = sd_strtok_r(NULL, ":", &v_lasts);
3995 3995 value = sd_strtok_r(token, " \t", &x_lasts);
3996 3996 if (name == NULL || value == NULL) {
3997 3997 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3998 3998 "sd_nvpair_str_decode: "
3999 3999 "name or value is not valid!\n");
4000 4000 } else {
4001 4001 sd_set_properties(un, name, value);
4002 4002 }
4003 4003 }
4004 4004 }
4005 4005
4006 4006 /*
4007 4007 * Function: sd_strtok_r()
4008 4008 *
4009 4009 * Description: This function uses strpbrk and strspn to break
4010 4010 * string into tokens on sequentially subsequent calls. Return
4011 4011 * NULL when no non-separator characters remain. The first
4012 4012 * argument is NULL for subsequent calls.
4013 4013 */
4014 4014 static char *
4015 4015 sd_strtok_r(char *string, const char *sepset, char **lasts)
4016 4016 {
4017 4017 char *q, *r;
4018 4018
4019 4019 /* First or subsequent call */
4020 4020 if (string == NULL)
4021 4021 string = *lasts;
4022 4022
4023 4023 if (string == NULL)
4024 4024 return (NULL);
4025 4025
4026 4026 /* Skip leading separators */
4027 4027 q = string + strspn(string, sepset);
4028 4028
4029 4029 if (*q == '\0')
4030 4030 return (NULL);
4031 4031
4032 4032 if ((r = strpbrk(q, sepset)) == NULL)
4033 4033 *lasts = NULL;
4034 4034 else {
4035 4035 *r = '\0';
4036 4036 *lasts = r + 1;
4037 4037 }
4038 4038 return (q);
4039 4039 }
4040 4040
4041 4041 /*
4042 4042 * Function: sd_set_properties()
4043 4043 *
4044 4044 * Description: Set device properties based on the improved
4045 4045 * format sd-config-list.
4046 4046 *
4047 4047 * Arguments: un - driver soft state (unit) structure
4048 4048 * name - supported tunable name
4049 4049 * value - tunable value
4050 4050 */
4051 4051 static void
4052 4052 sd_set_properties(struct sd_lun *un, char *name, char *value)
4053 4053 {
4054 4054 char *endptr = NULL;
4055 4055 long val = 0;
4056 4056
4057 4057 if (strcasecmp(name, "cache-nonvolatile") == 0) {
4058 4058 if (strcasecmp(value, "true") == 0) {
4059 4059 un->un_f_suppress_cache_flush = TRUE;
4060 4060 } else if (strcasecmp(value, "false") == 0) {
4061 4061 un->un_f_suppress_cache_flush = FALSE;
4062 4062 } else {
4063 4063 goto value_invalid;
4064 4064 }
4065 4065 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4066 4066 "suppress_cache_flush flag set to %d\n",
4067 4067 un->un_f_suppress_cache_flush);
4068 4068 return;
4069 4069 }
4070 4070
4071 4071 if (strcasecmp(name, "controller-type") == 0) {
4072 4072 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4073 4073 un->un_ctype = val;
4074 4074 } else {
4075 4075 goto value_invalid;
4076 4076 }
4077 4077 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4078 4078 "ctype set to %d\n", un->un_ctype);
4079 4079 return;
4080 4080 }
4081 4081
4082 4082 if (strcasecmp(name, "delay-busy") == 0) {
4083 4083 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4084 4084 un->un_busy_timeout = drv_usectohz(val / 1000);
4085 4085 } else {
4086 4086 goto value_invalid;
4087 4087 }
4088 4088 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4089 4089 "busy_timeout set to %d\n", un->un_busy_timeout);
4090 4090 return;
4091 4091 }
4092 4092
4093 4093 if (strcasecmp(name, "disksort") == 0) {
4094 4094 if (strcasecmp(value, "true") == 0) {
4095 4095 un->un_f_disksort_disabled = FALSE;
4096 4096 } else if (strcasecmp(value, "false") == 0) {
4097 4097 un->un_f_disksort_disabled = TRUE;
4098 4098 } else {
4099 4099 goto value_invalid;
4100 4100 }
4101 4101 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4102 4102 "disksort disabled flag set to %d\n",
4103 4103 un->un_f_disksort_disabled);
4104 4104 return;
4105 4105 }
4106 4106
4107 4107 if (strcasecmp(name, "power-condition") == 0) {
4108 4108 if (strcasecmp(value, "true") == 0) {
4109 4109 un->un_f_power_condition_disabled = FALSE;
4110 4110 } else if (strcasecmp(value, "false") == 0) {
4111 4111 un->un_f_power_condition_disabled = TRUE;
4112 4112 } else {
4113 4113 goto value_invalid;
4114 4114 }
4115 4115 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4116 4116 "power condition disabled flag set to %d\n",
4117 4117 un->un_f_power_condition_disabled);
4118 4118 return;
4119 4119 }
4120 4120
4121 4121 if (strcasecmp(name, "timeout-releasereservation") == 0) {
4122 4122 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4123 4123 un->un_reserve_release_time = val;
4124 4124 } else {
4125 4125 goto value_invalid;
4126 4126 }
4127 4127 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4128 4128 "reservation release timeout set to %d\n",
4129 4129 un->un_reserve_release_time);
4130 4130 return;
4131 4131 }
4132 4132
4133 4133 if (strcasecmp(name, "reset-lun") == 0) {
4134 4134 if (strcasecmp(value, "true") == 0) {
4135 4135 un->un_f_lun_reset_enabled = TRUE;
4136 4136 } else if (strcasecmp(value, "false") == 0) {
4137 4137 un->un_f_lun_reset_enabled = FALSE;
4138 4138 } else {
4139 4139 goto value_invalid;
4140 4140 }
4141 4141 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4142 4142 "lun reset enabled flag set to %d\n",
4143 4143 un->un_f_lun_reset_enabled);
4144 4144 return;
4145 4145 }
4146 4146
4147 4147 if (strcasecmp(name, "retries-busy") == 0) {
4148 4148 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4149 4149 un->un_busy_retry_count = val;
4150 4150 } else {
4151 4151 goto value_invalid;
4152 4152 }
4153 4153 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4154 4154 "busy retry count set to %d\n", un->un_busy_retry_count);
4155 4155 return;
4156 4156 }
4157 4157
4158 4158 if (strcasecmp(name, "retries-timeout") == 0) {
4159 4159 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4160 4160 un->un_retry_count = val;
4161 4161 } else {
4162 4162 goto value_invalid;
4163 4163 }
4164 4164 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4165 4165 "timeout retry count set to %d\n", un->un_retry_count);
4166 4166 return;
4167 4167 }
4168 4168
4169 4169 if (strcasecmp(name, "retries-notready") == 0) {
4170 4170 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4171 4171 un->un_notready_retry_count = val;
4172 4172 } else {
4173 4173 goto value_invalid;
4174 4174 }
4175 4175 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4176 4176 "notready retry count set to %d\n",
4177 4177 un->un_notready_retry_count);
4178 4178 return;
4179 4179 }
4180 4180
4181 4181 if (strcasecmp(name, "retries-reset") == 0) {
4182 4182 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4183 4183 un->un_reset_retry_count = val;
4184 4184 } else {
4185 4185 goto value_invalid;
4186 4186 }
4187 4187 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4188 4188 "reset retry count set to %d\n",
4189 4189 un->un_reset_retry_count);
4190 4190 return;
4191 4191 }
4192 4192
4193 4193 if (strcasecmp(name, "throttle-max") == 0) {
4194 4194 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4195 4195 un->un_saved_throttle = un->un_throttle = val;
4196 4196 } else {
4197 4197 goto value_invalid;
4198 4198 }
4199 4199 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4200 4200 "throttle set to %d\n", un->un_throttle);
4201 4201 }
4202 4202
4203 4203 if (strcasecmp(name, "throttle-min") == 0) {
4204 4204 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4205 4205 un->un_min_throttle = val;
4206 4206 } else {
4207 4207 goto value_invalid;
4208 4208 }
4209 4209 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4210 4210 "min throttle set to %d\n", un->un_min_throttle);
4211 4211 }
4212 4212
4213 4213 if (strcasecmp(name, "rmw-type") == 0) {
4214 4214 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4215 4215 un->un_f_rmw_type = val;
4216 4216 } else {
4217 4217 goto value_invalid;
4218 4218 }
4219 4219 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4220 4220 "RMW type set to %d\n", un->un_f_rmw_type);
4221 4221 }
4222 4222
4223 4223 if (strcasecmp(name, "physical-block-size") == 0) {
4224 4224 if (ddi_strtol(value, &endptr, 0, &val) == 0 &&
4225 4225 ISP2(val) && val >= un->un_tgt_blocksize &&
4226 4226 val >= un->un_sys_blocksize) {
4227 4227 un->un_phy_blocksize = val;
4228 4228 } else {
4229 4229 goto value_invalid;
4230 4230 }
4231 4231 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4232 4232 "physical block size set to %d\n", un->un_phy_blocksize);
4233 4233 }
4234 4234
4235 4235 if (strcasecmp(name, "retries-victim") == 0) {
4236 4236 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4237 4237 un->un_victim_retry_count = val;
4238 4238 } else {
4239 4239 goto value_invalid;
4240 4240 }
4241 4241 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4242 4242 "victim retry count set to %d\n",
4243 4243 un->un_victim_retry_count);
4244 4244 return;
4245 4245 }
4246 4246
4247 4247 /*
4248 4248 * Validate the throttle values.
4249 4249 * If any of the numbers are invalid, set everything to defaults.
4250 4250 */
4251 4251 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) ||
4252 4252 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) ||
4253 4253 (un->un_min_throttle > un->un_throttle)) {
4254 4254 un->un_saved_throttle = un->un_throttle = sd_max_throttle;
4255 4255 un->un_min_throttle = sd_min_throttle;
4256 4256 }
4257 4257
4258 4258 if (strcasecmp(name, "mmc-gesn-polling") == 0) {
4259 4259 if (strcasecmp(value, "true") == 0) {
4260 4260 un->un_f_mmc_gesn_polling = TRUE;
4261 4261 } else if (strcasecmp(value, "false") == 0) {
4262 4262 un->un_f_mmc_gesn_polling = FALSE;
4263 4263 } else {
4264 4264 goto value_invalid;
4265 4265 }
4266 4266 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4267 4267 "mmc-gesn-polling set to %d\n",
4268 4268 un->un_f_mmc_gesn_polling);
4269 4269 }
4270 4270
4271 4271 return;
4272 4272
4273 4273 value_invalid:
4274 4274 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4275 4275 "value of prop %s is invalid\n", name);
4276 4276 }
4277 4277
4278 4278 /*
4279 4279 * Function: sd_get_tunables_from_conf()
4280 4280 *
4281 4281 *
4282 4282 * This function reads the data list from the sd.conf file and pulls
4283 4283 * the values that can have numeric values as arguments and places
4284 4284 * the values in the appropriate sd_tunables member.
4285 4285 * Since the order of the data list members varies across platforms
4286 4286 * This function reads them from the data list in a platform specific
4287 4287 * order and places them into the correct sd_tunable member that is
4288 4288 * consistent across all platforms.
4289 4289 */
4290 4290 static void
4291 4291 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list,
4292 4292 sd_tunables *values)
4293 4293 {
4294 4294 int i;
4295 4295 int mask;
4296 4296
4297 4297 bzero(values, sizeof (sd_tunables));
4298 4298
4299 4299 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) {
4300 4300
4301 4301 mask = 1 << i;
4302 4302 if (mask > flags) {
4303 4303 break;
4304 4304 }
4305 4305
4306 4306 switch (mask & flags) {
4307 4307 case 0: /* This mask bit not set in flags */
4308 4308 continue;
4309 4309 case SD_CONF_BSET_THROTTLE:
4310 4310 values->sdt_throttle = data_list[i];
4311 4311 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4312 4312 "sd_get_tunables_from_conf: throttle = %d\n",
4313 4313 values->sdt_throttle);
4314 4314 break;
4315 4315 case SD_CONF_BSET_CTYPE:
4316 4316 values->sdt_ctype = data_list[i];
4317 4317 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4318 4318 "sd_get_tunables_from_conf: ctype = %d\n",
4319 4319 values->sdt_ctype);
4320 4320 break;
4321 4321 case SD_CONF_BSET_NRR_COUNT:
4322 4322 values->sdt_not_rdy_retries = data_list[i];
4323 4323 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4324 4324 "sd_get_tunables_from_conf: not_rdy_retries = %d\n",
4325 4325 values->sdt_not_rdy_retries);
4326 4326 break;
4327 4327 case SD_CONF_BSET_BSY_RETRY_COUNT:
4328 4328 values->sdt_busy_retries = data_list[i];
4329 4329 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4330 4330 "sd_get_tunables_from_conf: busy_retries = %d\n",
4331 4331 values->sdt_busy_retries);
4332 4332 break;
4333 4333 case SD_CONF_BSET_RST_RETRIES:
4334 4334 values->sdt_reset_retries = data_list[i];
4335 4335 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4336 4336 "sd_get_tunables_from_conf: reset_retries = %d\n",
4337 4337 values->sdt_reset_retries);
4338 4338 break;
4339 4339 case SD_CONF_BSET_RSV_REL_TIME:
4340 4340 values->sdt_reserv_rel_time = data_list[i];
4341 4341 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4342 4342 "sd_get_tunables_from_conf: reserv_rel_time = %d\n",
4343 4343 values->sdt_reserv_rel_time);
4344 4344 break;
4345 4345 case SD_CONF_BSET_MIN_THROTTLE:
4346 4346 values->sdt_min_throttle = data_list[i];
4347 4347 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4348 4348 "sd_get_tunables_from_conf: min_throttle = %d\n",
4349 4349 values->sdt_min_throttle);
4350 4350 break;
4351 4351 case SD_CONF_BSET_DISKSORT_DISABLED:
4352 4352 values->sdt_disk_sort_dis = data_list[i];
4353 4353 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4354 4354 "sd_get_tunables_from_conf: disk_sort_dis = %d\n",
4355 4355 values->sdt_disk_sort_dis);
4356 4356 break;
4357 4357 case SD_CONF_BSET_LUN_RESET_ENABLED:
4358 4358 values->sdt_lun_reset_enable = data_list[i];
4359 4359 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4360 4360 "sd_get_tunables_from_conf: lun_reset_enable = %d"
4361 4361 "\n", values->sdt_lun_reset_enable);
4362 4362 break;
4363 4363 case SD_CONF_BSET_CACHE_IS_NV:
4364 4364 values->sdt_suppress_cache_flush = data_list[i];
4365 4365 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4366 4366 "sd_get_tunables_from_conf: \
4367 4367 suppress_cache_flush = %d"
4368 4368 "\n", values->sdt_suppress_cache_flush);
4369 4369 break;
4370 4370 case SD_CONF_BSET_PC_DISABLED:
4371 4371 values->sdt_disk_sort_dis = data_list[i];
4372 4372 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4373 4373 "sd_get_tunables_from_conf: power_condition_dis = "
4374 4374 "%d\n", values->sdt_power_condition_dis);
4375 4375 break;
4376 4376 }
4377 4377 }
4378 4378 }
4379 4379
4380 4380 /*
4381 4381 * Function: sd_process_sdconf_table
4382 4382 *
4383 4383 * Description: Search the static configuration table for a match on the
4384 4384 * inquiry vid/pid and update the driver soft state structure
4385 4385 * according to the table property values for the device.
4386 4386 *
4387 4387 * The form of a configuration table entry is:
4388 4388 * <vid+pid>,<flags>,<property-data>
4389 4389 * "SEAGATE ST42400N",1,0x40000,
4390 4390 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1;
4391 4391 *
4392 4392 * Arguments: un - driver soft state (unit) structure
4393 4393 */
4394 4394
4395 4395 static void
4396 4396 sd_process_sdconf_table(struct sd_lun *un)
4397 4397 {
4398 4398 char *id = NULL;
4399 4399 int table_index;
4400 4400 int idlen;
4401 4401
4402 4402 ASSERT(un != NULL);
4403 4403 for (table_index = 0; table_index < sd_disk_table_size;
4404 4404 table_index++) {
4405 4405 id = sd_disk_table[table_index].device_id;
4406 4406 idlen = strlen(id);
4407 4407
4408 4408 /*
4409 4409 * The static configuration table currently does not
4410 4410 * implement version 10 properties. Additionally,
4411 4411 * multiple data-property-name entries are not
4412 4412 * implemented in the static configuration table.
4413 4413 */
4414 4414 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) {
4415 4415 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4416 4416 "sd_process_sdconf_table: disk %s\n", id);
4417 4417 sd_set_vers1_properties(un,
4418 4418 sd_disk_table[table_index].flags,
4419 4419 sd_disk_table[table_index].properties);
4420 4420 break;
4421 4421 }
4422 4422 }
4423 4423 }
4424 4424
4425 4425
4426 4426 /*
4427 4427 * Function: sd_sdconf_id_match
4428 4428 *
4429 4429 * Description: This local function implements a case sensitive vid/pid
4430 4430 * comparison as well as the boundary cases of wild card and
4431 4431 * multiple blanks.
4432 4432 *
4433 4433 * Note: An implicit assumption made here is that the scsi
4434 4434 * inquiry structure will always keep the vid, pid and
4435 4435 * revision strings in consecutive sequence, so they can be
4436 4436 * read as a single string. If this assumption is not the
4437 4437 * case, a separate string, to be used for the check, needs
4438 4438 * to be built with these strings concatenated.
4439 4439 *
4440 4440 * Arguments: un - driver soft state (unit) structure
4441 4441 * id - table or config file vid/pid
4442 4442 * idlen - length of the vid/pid (bytes)
4443 4443 *
4444 4444 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid
4445 4445 * SD_FAILURE - Indicates no match with the inquiry vid/pid
4446 4446 */
4447 4447
4448 4448 static int
4449 4449 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen)
4450 4450 {
4451 4451 struct scsi_inquiry *sd_inq;
4452 4452 int rval = SD_SUCCESS;
4453 4453
4454 4454 ASSERT(un != NULL);
4455 4455 sd_inq = un->un_sd->sd_inq;
4456 4456 ASSERT(id != NULL);
4457 4457
4458 4458 /*
4459 4459 * We use the inq_vid as a pointer to a buffer containing the
4460 4460 * vid and pid and use the entire vid/pid length of the table
4461 4461 * entry for the comparison. This works because the inq_pid
4462 4462 * data member follows inq_vid in the scsi_inquiry structure.
4463 4463 */
4464 4464 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) {
4465 4465 /*
4466 4466 * The user id string is compared to the inquiry vid/pid
4467 4467 * using a case insensitive comparison and ignoring
4468 4468 * multiple spaces.
4469 4469 */
4470 4470 rval = sd_blank_cmp(un, id, idlen);
4471 4471 if (rval != SD_SUCCESS) {
4472 4472 /*
4473 4473 * User id strings that start and end with a "*"
4474 4474 * are a special case. These do not have a
4475 4475 * specific vendor, and the product string can
4476 4476 * appear anywhere in the 16 byte PID portion of
4477 4477 * the inquiry data. This is a simple strstr()
4478 4478 * type search for the user id in the inquiry data.
4479 4479 */
4480 4480 if ((id[0] == '*') && (id[idlen - 1] == '*')) {
4481 4481 char *pidptr = &id[1];
4482 4482 int i;
4483 4483 int j;
4484 4484 int pidstrlen = idlen - 2;
4485 4485 j = sizeof (SD_INQUIRY(un)->inq_pid) -
4486 4486 pidstrlen;
4487 4487
4488 4488 if (j < 0) {
4489 4489 return (SD_FAILURE);
4490 4490 }
4491 4491 for (i = 0; i < j; i++) {
4492 4492 if (bcmp(&SD_INQUIRY(un)->inq_pid[i],
4493 4493 pidptr, pidstrlen) == 0) {
4494 4494 rval = SD_SUCCESS;
4495 4495 break;
4496 4496 }
4497 4497 }
4498 4498 }
4499 4499 }
4500 4500 }
4501 4501 return (rval);
4502 4502 }
4503 4503
4504 4504
4505 4505 /*
4506 4506 * Function: sd_blank_cmp
4507 4507 *
4508 4508 * Description: If the id string starts and ends with a space, treat
4509 4509 * multiple consecutive spaces as equivalent to a single
4510 4510 * space. For example, this causes a sd_disk_table entry
4511 4511 * of " NEC CDROM " to match a device's id string of
4512 4512 * "NEC CDROM".
4513 4513 *
4514 4514 * Note: The success exit condition for this routine is if
4515 4515 * the pointer to the table entry is '\0' and the cnt of
4516 4516 * the inquiry length is zero. This will happen if the inquiry
4517 4517 * string returned by the device is padded with spaces to be
4518 4518 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The
4519 4519 * SCSI spec states that the inquiry string is to be padded with
4520 4520 * spaces.
4521 4521 *
4522 4522 * Arguments: un - driver soft state (unit) structure
4523 4523 * id - table or config file vid/pid
4524 4524 * idlen - length of the vid/pid (bytes)
4525 4525 *
4526 4526 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid
4527 4527 * SD_FAILURE - Indicates no match with the inquiry vid/pid
4528 4528 */
4529 4529
4530 4530 static int
4531 4531 sd_blank_cmp(struct sd_lun *un, char *id, int idlen)
4532 4532 {
4533 4533 char *p1;
4534 4534 char *p2;
4535 4535 int cnt;
4536 4536 cnt = sizeof (SD_INQUIRY(un)->inq_vid) +
4537 4537 sizeof (SD_INQUIRY(un)->inq_pid);
4538 4538
4539 4539 ASSERT(un != NULL);
4540 4540 p2 = un->un_sd->sd_inq->inq_vid;
4541 4541 ASSERT(id != NULL);
4542 4542 p1 = id;
4543 4543
4544 4544 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) {
4545 4545 /*
4546 4546 * Note: string p1 is terminated by a NUL but string p2
4547 4547 * isn't. The end of p2 is determined by cnt.
4548 4548 */
4549 4549 for (;;) {
4550 4550 /* skip over any extra blanks in both strings */
4551 4551 while ((*p1 != '\0') && (*p1 == ' ')) {
4552 4552 p1++;
4553 4553 }
4554 4554 while ((cnt != 0) && (*p2 == ' ')) {
4555 4555 p2++;
4556 4556 cnt--;
4557 4557 }
4558 4558
4559 4559 /* compare the two strings */
4560 4560 if ((cnt == 0) ||
4561 4561 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) {
4562 4562 break;
4563 4563 }
4564 4564 while ((cnt > 0) &&
4565 4565 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) {
4566 4566 p1++;
4567 4567 p2++;
4568 4568 cnt--;
4569 4569 }
4570 4570 }
4571 4571 }
4572 4572
4573 4573 /* return SD_SUCCESS if both strings match */
4574 4574 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE);
4575 4575 }
4576 4576
4577 4577
4578 4578 /*
4579 4579 * Function: sd_chk_vers1_data
4580 4580 *
4581 4581 * Description: Verify the version 1 device properties provided by the
4582 4582 * user via the configuration file
4583 4583 *
4584 4584 * Arguments: un - driver soft state (unit) structure
4585 4585 * flags - integer mask indicating properties to be set
4586 4586 * prop_list - integer list of property values
4587 4587 * list_len - number of the elements
4588 4588 *
4589 4589 * Return Code: SD_SUCCESS - Indicates the user provided data is valid
4590 4590 * SD_FAILURE - Indicates the user provided data is invalid
4591 4591 */
4592 4592
4593 4593 static int
4594 4594 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list,
4595 4595 int list_len, char *dataname_ptr)
4596 4596 {
4597 4597 int i;
4598 4598 int mask = 1;
4599 4599 int index = 0;
4600 4600
4601 4601 ASSERT(un != NULL);
4602 4602
4603 4603 /* Check for a NULL property name and list */
4604 4604 if (dataname_ptr == NULL) {
4605 4605 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4606 4606 "sd_chk_vers1_data: NULL data property name.");
4607 4607 return (SD_FAILURE);
4608 4608 }
4609 4609 if (prop_list == NULL) {
4610 4610 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4611 4611 "sd_chk_vers1_data: %s NULL data property list.",
4612 4612 dataname_ptr);
4613 4613 return (SD_FAILURE);
4614 4614 }
4615 4615
4616 4616 /* Display a warning if undefined bits are set in the flags */
4617 4617 if (flags & ~SD_CONF_BIT_MASK) {
4618 4618 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4619 4619 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. "
4620 4620 "Properties not set.",
4621 4621 (flags & ~SD_CONF_BIT_MASK), dataname_ptr);
4622 4622 return (SD_FAILURE);
4623 4623 }
4624 4624
4625 4625 /*
4626 4626 * Verify the length of the list by identifying the highest bit set
4627 4627 * in the flags and validating that the property list has a length
4628 4628 * up to the index of this bit.
4629 4629 */
4630 4630 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) {
4631 4631 if (flags & mask) {
4632 4632 index++;
4633 4633 }
4634 4634 mask = 1 << i;
4635 4635 }
4636 4636 if (list_len < (index + 2)) {
4637 4637 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4638 4638 "sd_chk_vers1_data: "
4639 4639 "Data property list %s size is incorrect. "
4640 4640 "Properties not set.", dataname_ptr);
4641 4641 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: "
4642 4642 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS);
4643 4643 return (SD_FAILURE);
4644 4644 }
4645 4645 return (SD_SUCCESS);
4646 4646 }
4647 4647
4648 4648
4649 4649 /*
4650 4650 * Function: sd_set_vers1_properties
4651 4651 *
4652 4652 * Description: Set version 1 device properties based on a property list
4653 4653 * retrieved from the driver configuration file or static
4654 4654 * configuration table. Version 1 properties have the format:
4655 4655 *
4656 4656 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN>
4657 4657 *
4658 4658 * where the prop0 value will be used to set prop0 if bit0
4659 4659 * is set in the flags
4660 4660 *
4661 4661 * Arguments: un - driver soft state (unit) structure
4662 4662 * flags - integer mask indicating properties to be set
4663 4663 * prop_list - integer list of property values
4664 4664 */
4665 4665
4666 4666 static void
4667 4667 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list)
4668 4668 {
4669 4669 ASSERT(un != NULL);
4670 4670
4671 4671 /*
4672 4672 * Set the flag to indicate cache is to be disabled. An attempt
4673 4673 * to disable the cache via sd_cache_control() will be made
4674 4674 * later during attach once the basic initialization is complete.
4675 4675 */
4676 4676 if (flags & SD_CONF_BSET_NOCACHE) {
4677 4677 un->un_f_opt_disable_cache = TRUE;
4678 4678 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4679 4679 "sd_set_vers1_properties: caching disabled flag set\n");
4680 4680 }
4681 4681
4682 4682 /* CD-specific configuration parameters */
4683 4683 if (flags & SD_CONF_BSET_PLAYMSF_BCD) {
4684 4684 un->un_f_cfg_playmsf_bcd = TRUE;
4685 4685 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4686 4686 "sd_set_vers1_properties: playmsf_bcd set\n");
4687 4687 }
4688 4688 if (flags & SD_CONF_BSET_READSUB_BCD) {
4689 4689 un->un_f_cfg_readsub_bcd = TRUE;
4690 4690 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4691 4691 "sd_set_vers1_properties: readsub_bcd set\n");
4692 4692 }
4693 4693 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) {
4694 4694 un->un_f_cfg_read_toc_trk_bcd = TRUE;
4695 4695 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4696 4696 "sd_set_vers1_properties: read_toc_trk_bcd set\n");
4697 4697 }
4698 4698 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) {
4699 4699 un->un_f_cfg_read_toc_addr_bcd = TRUE;
4700 4700 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4701 4701 "sd_set_vers1_properties: read_toc_addr_bcd set\n");
4702 4702 }
4703 4703 if (flags & SD_CONF_BSET_NO_READ_HEADER) {
4704 4704 un->un_f_cfg_no_read_header = TRUE;
4705 4705 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4706 4706 "sd_set_vers1_properties: no_read_header set\n");
4707 4707 }
4708 4708 if (flags & SD_CONF_BSET_READ_CD_XD4) {
4709 4709 un->un_f_cfg_read_cd_xd4 = TRUE;
4710 4710 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4711 4711 "sd_set_vers1_properties: read_cd_xd4 set\n");
4712 4712 }
4713 4713
4714 4714 /* Support for devices which do not have valid/unique serial numbers */
4715 4715 if (flags & SD_CONF_BSET_FAB_DEVID) {
4716 4716 un->un_f_opt_fab_devid = TRUE;
4717 4717 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4718 4718 "sd_set_vers1_properties: fab_devid bit set\n");
4719 4719 }
4720 4720
4721 4721 /* Support for user throttle configuration */
4722 4722 if (flags & SD_CONF_BSET_THROTTLE) {
4723 4723 ASSERT(prop_list != NULL);
4724 4724 un->un_saved_throttle = un->un_throttle =
4725 4725 prop_list->sdt_throttle;
4726 4726 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4727 4727 "sd_set_vers1_properties: throttle set to %d\n",
4728 4728 prop_list->sdt_throttle);
4729 4729 }
4730 4730
4731 4731 /* Set the per disk retry count according to the conf file or table. */
4732 4732 if (flags & SD_CONF_BSET_NRR_COUNT) {
4733 4733 ASSERT(prop_list != NULL);
4734 4734 if (prop_list->sdt_not_rdy_retries) {
4735 4735 un->un_notready_retry_count =
4736 4736 prop_list->sdt_not_rdy_retries;
4737 4737 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4738 4738 "sd_set_vers1_properties: not ready retry count"
4739 4739 " set to %d\n", un->un_notready_retry_count);
4740 4740 }
4741 4741 }
4742 4742
4743 4743 /* The controller type is reported for generic disk driver ioctls */
4744 4744 if (flags & SD_CONF_BSET_CTYPE) {
4745 4745 ASSERT(prop_list != NULL);
4746 4746 switch (prop_list->sdt_ctype) {
4747 4747 case CTYPE_CDROM:
4748 4748 un->un_ctype = prop_list->sdt_ctype;
4749 4749 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4750 4750 "sd_set_vers1_properties: ctype set to "
4751 4751 "CTYPE_CDROM\n");
4752 4752 break;
4753 4753 case CTYPE_CCS:
4754 4754 un->un_ctype = prop_list->sdt_ctype;
4755 4755 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4756 4756 "sd_set_vers1_properties: ctype set to "
4757 4757 "CTYPE_CCS\n");
4758 4758 break;
4759 4759 case CTYPE_ROD: /* RW optical */
4760 4760 un->un_ctype = prop_list->sdt_ctype;
4761 4761 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4762 4762 "sd_set_vers1_properties: ctype set to "
4763 4763 "CTYPE_ROD\n");
4764 4764 break;
4765 4765 default:
4766 4766 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4767 4767 "sd_set_vers1_properties: Could not set "
4768 4768 "invalid ctype value (%d)",
4769 4769 prop_list->sdt_ctype);
4770 4770 }
4771 4771 }
4772 4772
4773 4773 /* Purple failover timeout */
4774 4774 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) {
4775 4775 ASSERT(prop_list != NULL);
4776 4776 un->un_busy_retry_count =
4777 4777 prop_list->sdt_busy_retries;
4778 4778 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4779 4779 "sd_set_vers1_properties: "
4780 4780 "busy retry count set to %d\n",
4781 4781 un->un_busy_retry_count);
4782 4782 }
4783 4783
4784 4784 /* Purple reset retry count */
4785 4785 if (flags & SD_CONF_BSET_RST_RETRIES) {
4786 4786 ASSERT(prop_list != NULL);
4787 4787 un->un_reset_retry_count =
4788 4788 prop_list->sdt_reset_retries;
4789 4789 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4790 4790 "sd_set_vers1_properties: "
4791 4791 "reset retry count set to %d\n",
4792 4792 un->un_reset_retry_count);
4793 4793 }
4794 4794
4795 4795 /* Purple reservation release timeout */
4796 4796 if (flags & SD_CONF_BSET_RSV_REL_TIME) {
4797 4797 ASSERT(prop_list != NULL);
4798 4798 un->un_reserve_release_time =
4799 4799 prop_list->sdt_reserv_rel_time;
4800 4800 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4801 4801 "sd_set_vers1_properties: "
4802 4802 "reservation release timeout set to %d\n",
4803 4803 un->un_reserve_release_time);
4804 4804 }
4805 4805
4806 4806 /*
4807 4807 * Driver flag telling the driver to verify that no commands are pending
4808 4808 * for a device before issuing a Test Unit Ready. This is a workaround
4809 4809 * for a firmware bug in some Seagate eliteI drives.
4810 4810 */
4811 4811 if (flags & SD_CONF_BSET_TUR_CHECK) {
4812 4812 un->un_f_cfg_tur_check = TRUE;
4813 4813 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4814 4814 "sd_set_vers1_properties: tur queue check set\n");
4815 4815 }
4816 4816
4817 4817 if (flags & SD_CONF_BSET_MIN_THROTTLE) {
4818 4818 un->un_min_throttle = prop_list->sdt_min_throttle;
4819 4819 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4820 4820 "sd_set_vers1_properties: min throttle set to %d\n",
4821 4821 un->un_min_throttle);
4822 4822 }
4823 4823
4824 4824 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) {
4825 4825 un->un_f_disksort_disabled =
4826 4826 (prop_list->sdt_disk_sort_dis != 0) ?
4827 4827 TRUE : FALSE;
4828 4828 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4829 4829 "sd_set_vers1_properties: disksort disabled "
4830 4830 "flag set to %d\n",
4831 4831 prop_list->sdt_disk_sort_dis);
4832 4832 }
4833 4833
4834 4834 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) {
4835 4835 un->un_f_lun_reset_enabled =
4836 4836 (prop_list->sdt_lun_reset_enable != 0) ?
4837 4837 TRUE : FALSE;
4838 4838 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4839 4839 "sd_set_vers1_properties: lun reset enabled "
4840 4840 "flag set to %d\n",
4841 4841 prop_list->sdt_lun_reset_enable);
4842 4842 }
4843 4843
4844 4844 if (flags & SD_CONF_BSET_CACHE_IS_NV) {
4845 4845 un->un_f_suppress_cache_flush =
4846 4846 (prop_list->sdt_suppress_cache_flush != 0) ?
4847 4847 TRUE : FALSE;
4848 4848 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4849 4849 "sd_set_vers1_properties: suppress_cache_flush "
4850 4850 "flag set to %d\n",
4851 4851 prop_list->sdt_suppress_cache_flush);
4852 4852 }
4853 4853
4854 4854 if (flags & SD_CONF_BSET_PC_DISABLED) {
4855 4855 un->un_f_power_condition_disabled =
4856 4856 (prop_list->sdt_power_condition_dis != 0) ?
4857 4857 TRUE : FALSE;
4858 4858 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4859 4859 "sd_set_vers1_properties: power_condition_disabled "
4860 4860 "flag set to %d\n",
4861 4861 prop_list->sdt_power_condition_dis);
4862 4862 }
4863 4863
4864 4864 /*
4865 4865 * Validate the throttle values.
4866 4866 * If any of the numbers are invalid, set everything to defaults.
4867 4867 */
4868 4868 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) ||
4869 4869 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) ||
4870 4870 (un->un_min_throttle > un->un_throttle)) {
4871 4871 un->un_saved_throttle = un->un_throttle = sd_max_throttle;
4872 4872 un->un_min_throttle = sd_min_throttle;
4873 4873 }
4874 4874 }
4875 4875
4876 4876 /*
4877 4877 * Function: sd_is_lsi()
4878 4878 *
4879 4879 * Description: Check for lsi devices, step through the static device
4880 4880 * table to match vid/pid.
4881 4881 *
4882 4882 * Args: un - ptr to sd_lun
4883 4883 *
4884 4884 * Notes: When creating new LSI property, need to add the new LSI property
4885 4885 * to this function.
4886 4886 */
4887 4887 static void
4888 4888 sd_is_lsi(struct sd_lun *un)
4889 4889 {
4890 4890 char *id = NULL;
4891 4891 int table_index;
4892 4892 int idlen;
4893 4893 void *prop;
4894 4894
4895 4895 ASSERT(un != NULL);
4896 4896 for (table_index = 0; table_index < sd_disk_table_size;
4897 4897 table_index++) {
4898 4898 id = sd_disk_table[table_index].device_id;
4899 4899 idlen = strlen(id);
4900 4900 if (idlen == 0) {
4901 4901 continue;
4902 4902 }
4903 4903
4904 4904 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) {
4905 4905 prop = sd_disk_table[table_index].properties;
4906 4906 if (prop == &lsi_properties ||
4907 4907 prop == &lsi_oem_properties ||
4908 4908 prop == &lsi_properties_scsi ||
4909 4909 prop == &symbios_properties) {
4910 4910 un->un_f_cfg_is_lsi = TRUE;
4911 4911 }
4912 4912 break;
4913 4913 }
4914 4914 }
4915 4915 }
4916 4916
4917 4917 /*
4918 4918 * Function: sd_get_physical_geometry
4919 4919 *
4920 4920 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and
4921 4921 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the
4922 4922 * target, and use this information to initialize the physical
4923 4923 * geometry cache specified by pgeom_p.
4924 4924 *
4925 4925 * MODE SENSE is an optional command, so failure in this case
4926 4926 * does not necessarily denote an error. We want to use the
4927 4927 * MODE SENSE commands to derive the physical geometry of the
4928 4928 * device, but if either command fails, the logical geometry is
4929 4929 * used as the fallback for disk label geometry in cmlb.
4930 4930 *
4931 4931 * This requires that un->un_blockcount and un->un_tgt_blocksize
4932 4932 * have already been initialized for the current target and
4933 4933 * that the current values be passed as args so that we don't
4934 4934 * end up ever trying to use -1 as a valid value. This could
4935 4935 * happen if either value is reset while we're not holding
4936 4936 * the mutex.
4937 4937 *
4938 4938 * Arguments: un - driver soft state (unit) structure
4939 4939 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
4940 4940 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
4941 4941 * to use the USCSI "direct" chain and bypass the normal
4942 4942 * command waitq.
4943 4943 *
4944 4944 * Context: Kernel thread only (can sleep).
4945 4945 */
4946 4946
4947 4947 static int
4948 4948 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p,
4949 4949 diskaddr_t capacity, int lbasize, int path_flag)
4950 4950 {
4951 4951 struct mode_format *page3p;
4952 4952 struct mode_geometry *page4p;
4953 4953 struct mode_header *headerp;
4954 4954 int sector_size;
4955 4955 int nsect;
4956 4956 int nhead;
4957 4957 int ncyl;
4958 4958 int intrlv;
4959 4959 int spc;
4960 4960 diskaddr_t modesense_capacity;
4961 4961 int rpm;
4962 4962 int bd_len;
4963 4963 int mode_header_length;
4964 4964 uchar_t *p3bufp;
4965 4965 uchar_t *p4bufp;
4966 4966 int cdbsize;
4967 4967 int ret = EIO;
4968 4968 sd_ssc_t *ssc;
4969 4969 int status;
4970 4970
4971 4971 ASSERT(un != NULL);
4972 4972
4973 4973 if (lbasize == 0) {
4974 4974 if (ISCD(un)) {
4975 4975 lbasize = 2048;
4976 4976 } else {
4977 4977 lbasize = un->un_sys_blocksize;
4978 4978 }
4979 4979 }
4980 4980 pgeom_p->g_secsize = (unsigned short)lbasize;
4981 4981
4982 4982 /*
4983 4983 * If the unit is a cd/dvd drive MODE SENSE page three
4984 4984 * and MODE SENSE page four are reserved (see SBC spec
4985 4985 * and MMC spec). To prevent soft errors just return
4986 4986 * using the default LBA size.
4987 4987 */
4988 4988 if (ISCD(un))
4989 4989 return (ret);
4990 4990
4991 4991 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0;
4992 4992
4993 4993 /*
4994 4994 * Retrieve MODE SENSE page 3 - Format Device Page
4995 4995 */
4996 4996 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP);
4997 4997 ssc = sd_ssc_init(un);
4998 4998 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp,
4999 4999 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag);
5000 5000 if (status != 0) {
5001 5001 SD_ERROR(SD_LOG_COMMON, un,
5002 5002 "sd_get_physical_geometry: mode sense page 3 failed\n");
5003 5003 goto page3_exit;
5004 5004 }
5005 5005
5006 5006 /*
5007 5007 * Determine size of Block Descriptors in order to locate the mode
5008 5008 * page data. ATAPI devices return 0, SCSI devices should return
5009 5009 * MODE_BLK_DESC_LENGTH.
5010 5010 */
5011 5011 headerp = (struct mode_header *)p3bufp;
5012 5012 if (un->un_f_cfg_is_atapi == TRUE) {
5013 5013 struct mode_header_grp2 *mhp =
5014 5014 (struct mode_header_grp2 *)headerp;
5015 5015 mode_header_length = MODE_HEADER_LENGTH_GRP2;
5016 5016 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
5017 5017 } else {
5018 5018 mode_header_length = MODE_HEADER_LENGTH;
5019 5019 bd_len = ((struct mode_header *)headerp)->bdesc_length;
5020 5020 }
5021 5021
5022 5022 if (bd_len > MODE_BLK_DESC_LENGTH) {
5023 5023 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5024 5024 "sd_get_physical_geometry: received unexpected bd_len "
5025 5025 "of %d, page3\n", bd_len);
5026 5026 status = EIO;
5027 5027 goto page3_exit;
5028 5028 }
5029 5029
5030 5030 page3p = (struct mode_format *)
5031 5031 ((caddr_t)headerp + mode_header_length + bd_len);
5032 5032
5033 5033 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) {
5034 5034 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5035 5035 "sd_get_physical_geometry: mode sense pg3 code mismatch "
5036 5036 "%d\n", page3p->mode_page.code);
5037 5037 status = EIO;
5038 5038 goto page3_exit;
5039 5039 }
5040 5040
5041 5041 /*
5042 5042 * Use this physical geometry data only if BOTH MODE SENSE commands
5043 5043 * complete successfully; otherwise, revert to the logical geometry.
5044 5044 * So, we need to save everything in temporary variables.
5045 5045 */
5046 5046 sector_size = BE_16(page3p->data_bytes_sect);
5047 5047
5048 5048 /*
5049 5049 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size
5050 5050 */
5051 5051 if (sector_size == 0) {
5052 5052 sector_size = un->un_sys_blocksize;
5053 5053 } else {
5054 5054 sector_size &= ~(un->un_sys_blocksize - 1);
5055 5055 }
5056 5056
5057 5057 nsect = BE_16(page3p->sect_track);
5058 5058 intrlv = BE_16(page3p->interleave);
5059 5059
5060 5060 SD_INFO(SD_LOG_COMMON, un,
5061 5061 "sd_get_physical_geometry: Format Parameters (page 3)\n");
5062 5062 SD_INFO(SD_LOG_COMMON, un,
5063 5063 " mode page: %d; nsect: %d; sector size: %d;\n",
5064 5064 page3p->mode_page.code, nsect, sector_size);
5065 5065 SD_INFO(SD_LOG_COMMON, un,
5066 5066 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv,
5067 5067 BE_16(page3p->track_skew),
5068 5068 BE_16(page3p->cylinder_skew));
5069 5069
5070 5070 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
5071 5071
5072 5072 /*
5073 5073 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page
5074 5074 */
5075 5075 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP);
5076 5076 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp,
5077 5077 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag);
5078 5078 if (status != 0) {
5079 5079 SD_ERROR(SD_LOG_COMMON, un,
5080 5080 "sd_get_physical_geometry: mode sense page 4 failed\n");
5081 5081 goto page4_exit;
5082 5082 }
5083 5083
5084 5084 /*
5085 5085 * Determine size of Block Descriptors in order to locate the mode
5086 5086 * page data. ATAPI devices return 0, SCSI devices should return
5087 5087 * MODE_BLK_DESC_LENGTH.
5088 5088 */
5089 5089 headerp = (struct mode_header *)p4bufp;
5090 5090 if (un->un_f_cfg_is_atapi == TRUE) {
5091 5091 struct mode_header_grp2 *mhp =
5092 5092 (struct mode_header_grp2 *)headerp;
5093 5093 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
5094 5094 } else {
5095 5095 bd_len = ((struct mode_header *)headerp)->bdesc_length;
5096 5096 }
5097 5097
5098 5098 if (bd_len > MODE_BLK_DESC_LENGTH) {
5099 5099 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5100 5100 "sd_get_physical_geometry: received unexpected bd_len of "
5101 5101 "%d, page4\n", bd_len);
5102 5102 status = EIO;
5103 5103 goto page4_exit;
5104 5104 }
5105 5105
5106 5106 page4p = (struct mode_geometry *)
5107 5107 ((caddr_t)headerp + mode_header_length + bd_len);
5108 5108
5109 5109 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) {
5110 5110 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5111 5111 "sd_get_physical_geometry: mode sense pg4 code mismatch "
5112 5112 "%d\n", page4p->mode_page.code);
5113 5113 status = EIO;
5114 5114 goto page4_exit;
5115 5115 }
5116 5116
5117 5117 /*
5118 5118 * Stash the data now, after we know that both commands completed.
5119 5119 */
5120 5120
5121 5121
5122 5122 nhead = (int)page4p->heads; /* uchar, so no conversion needed */
5123 5123 spc = nhead * nsect;
5124 5124 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb;
5125 5125 rpm = BE_16(page4p->rpm);
5126 5126
5127 5127 modesense_capacity = spc * ncyl;
5128 5128
5129 5129 SD_INFO(SD_LOG_COMMON, un,
5130 5130 "sd_get_physical_geometry: Geometry Parameters (page 4)\n");
5131 5131 SD_INFO(SD_LOG_COMMON, un,
5132 5132 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm);
5133 5133 SD_INFO(SD_LOG_COMMON, un,
5134 5134 " computed capacity(h*s*c): %d;\n", modesense_capacity);
5135 5135 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n",
5136 5136 (void *)pgeom_p, capacity);
5137 5137
5138 5138 /*
5139 5139 * Compensate if the drive's geometry is not rectangular, i.e.,
5140 5140 * the product of C * H * S returned by MODE SENSE >= that returned
5141 5141 * by read capacity. This is an idiosyncrasy of the original x86
5142 5142 * disk subsystem.
5143 5143 */
5144 5144 if (modesense_capacity >= capacity) {
5145 5145 SD_INFO(SD_LOG_COMMON, un,
5146 5146 "sd_get_physical_geometry: adjusting acyl; "
5147 5147 "old: %d; new: %d\n", pgeom_p->g_acyl,
5148 5148 (modesense_capacity - capacity + spc - 1) / spc);
5149 5149 if (sector_size != 0) {
5150 5150 /* 1243403: NEC D38x7 drives don't support sec size */
5151 5151 pgeom_p->g_secsize = (unsigned short)sector_size;
5152 5152 }
5153 5153 pgeom_p->g_nsect = (unsigned short)nsect;
5154 5154 pgeom_p->g_nhead = (unsigned short)nhead;
5155 5155 pgeom_p->g_capacity = capacity;
5156 5156 pgeom_p->g_acyl =
5157 5157 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc;
5158 5158 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl;
5159 5159 }
5160 5160
5161 5161 pgeom_p->g_rpm = (unsigned short)rpm;
5162 5162 pgeom_p->g_intrlv = (unsigned short)intrlv;
5163 5163 ret = 0;
5164 5164
5165 5165 SD_INFO(SD_LOG_COMMON, un,
5166 5166 "sd_get_physical_geometry: mode sense geometry:\n");
5167 5167 SD_INFO(SD_LOG_COMMON, un,
5168 5168 " nsect: %d; sector size: %d; interlv: %d\n",
5169 5169 nsect, sector_size, intrlv);
5170 5170 SD_INFO(SD_LOG_COMMON, un,
5171 5171 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n",
5172 5172 nhead, ncyl, rpm, modesense_capacity);
5173 5173 SD_INFO(SD_LOG_COMMON, un,
5174 5174 "sd_get_physical_geometry: (cached)\n");
5175 5175 SD_INFO(SD_LOG_COMMON, un,
5176 5176 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n",
5177 5177 pgeom_p->g_ncyl, pgeom_p->g_acyl,
5178 5178 pgeom_p->g_nhead, pgeom_p->g_nsect);
5179 5179 SD_INFO(SD_LOG_COMMON, un,
5180 5180 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n",
5181 5181 pgeom_p->g_secsize, pgeom_p->g_capacity,
5182 5182 pgeom_p->g_intrlv, pgeom_p->g_rpm);
5183 5183 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
5184 5184
5185 5185 page4_exit:
5186 5186 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH);
5187 5187
5188 5188 page3_exit:
5189 5189 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH);
5190 5190
5191 5191 if (status != 0) {
5192 5192 if (status == EIO) {
5193 5193 /*
5194 5194 * Some disks do not support mode sense(6), we
5195 5195 * should ignore this kind of error(sense key is
5196 5196 * 0x5 - illegal request).
5197 5197 */
5198 5198 uint8_t *sensep;
5199 5199 int senlen;
5200 5200
5201 5201 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
5202 5202 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
5203 5203 ssc->ssc_uscsi_cmd->uscsi_rqresid);
5204 5204
5205 5205 if (senlen > 0 &&
5206 5206 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) {
5207 5207 sd_ssc_assessment(ssc,
5208 5208 SD_FMT_IGNORE_COMPROMISE);
5209 5209 } else {
5210 5210 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
5211 5211 }
5212 5212 } else {
5213 5213 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5214 5214 }
5215 5215 }
5216 5216 sd_ssc_fini(ssc);
5217 5217 return (ret);
5218 5218 }
5219 5219
5220 5220 /*
5221 5221 * Function: sd_get_virtual_geometry
5222 5222 *
5223 5223 * Description: Ask the controller to tell us about the target device.
5224 5224 *
5225 5225 * Arguments: un - pointer to softstate
5226 5226 * capacity - disk capacity in #blocks
5227 5227 * lbasize - disk block size in bytes
5228 5228 *
5229 5229 * Context: Kernel thread only
5230 5230 */
5231 5231
5232 5232 static int
5233 5233 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p,
5234 5234 diskaddr_t capacity, int lbasize)
5235 5235 {
5236 5236 uint_t geombuf;
5237 5237 int spc;
5238 5238
5239 5239 ASSERT(un != NULL);
5240 5240
5241 5241 /* Set sector size, and total number of sectors */
5242 5242 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1);
5243 5243 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1);
5244 5244
5245 5245 /* Let the HBA tell us its geometry */
5246 5246 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1);
5247 5247
5248 5248 /* A value of -1 indicates an undefined "geometry" property */
5249 5249 if (geombuf == (-1)) {
5250 5250 return (EINVAL);
5251 5251 }
5252 5252
5253 5253 /* Initialize the logical geometry cache. */
5254 5254 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff;
5255 5255 lgeom_p->g_nsect = geombuf & 0xffff;
5256 5256 lgeom_p->g_secsize = un->un_sys_blocksize;
5257 5257
5258 5258 spc = lgeom_p->g_nhead * lgeom_p->g_nsect;
5259 5259
5260 5260 /*
5261 5261 * Note: The driver originally converted the capacity value from
5262 5262 * target blocks to system blocks. However, the capacity value passed
5263 5263 * to this routine is already in terms of system blocks (this scaling
5264 5264 * is done when the READ CAPACITY command is issued and processed).
5265 5265 * This 'error' may have gone undetected because the usage of g_ncyl
5266 5266 * (which is based upon g_capacity) is very limited within the driver
5267 5267 */
5268 5268 lgeom_p->g_capacity = capacity;
5269 5269
5270 5270 /*
5271 5271 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The
5272 5272 * hba may return zero values if the device has been removed.
5273 5273 */
5274 5274 if (spc == 0) {
5275 5275 lgeom_p->g_ncyl = 0;
5276 5276 } else {
5277 5277 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc;
5278 5278 }
5279 5279 lgeom_p->g_acyl = 0;
5280 5280
5281 5281 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n");
5282 5282 return (0);
5283 5283
5284 5284 }
5285 5285 /*
5286 5286 * Function: sd_update_block_info
5287 5287 *
5288 5288 * Description: Calculate a byte count to sector count bitshift value
5289 5289 * from sector size.
5290 5290 *
5291 5291 * Arguments: un: unit struct.
5292 5292 * lbasize: new target sector size
5293 5293 * capacity: new target capacity, ie. block count
5294 5294 *
5295 5295 * Context: Kernel thread context
5296 5296 */
5297 5297
5298 5298 static void
5299 5299 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity)
5300 5300 {
5301 5301 if (lbasize != 0) {
5302 5302 un->un_tgt_blocksize = lbasize;
5303 5303 un->un_f_tgt_blocksize_is_valid = TRUE;
5304 5304 if (!un->un_f_has_removable_media) {
5305 5305 un->un_sys_blocksize = lbasize;
5306 5306 }
5307 5307 }
5308 5308
5309 5309 if (capacity != 0) {
5310 5310 un->un_blockcount = capacity;
5311 5311 un->un_f_blockcount_is_valid = TRUE;
5312 5312
5313 5313 /*
5314 5314 * The capacity has changed so update the errstats.
5315 5315 */
5316 5316 if (un->un_errstats != NULL) {
5317 5317 struct sd_errstats *stp;
5318 5318
5319 5319 capacity *= un->un_sys_blocksize;
5320 5320 stp = (struct sd_errstats *)un->un_errstats->ks_data;
5321 5321 if (stp->sd_capacity.value.ui64 < capacity)
5322 5322 stp->sd_capacity.value.ui64 = capacity;
5323 5323 }
5324 5324 }
5325 5325 }
5326 5326
5327 5327
5328 5328 /*
5329 5329 * Function: sd_register_devid
5330 5330 *
5331 5331 * Description: This routine will obtain the device id information from the
5332 5332 * target, obtain the serial number, and register the device
5333 5333 * id with the ddi framework.
5334 5334 *
5335 5335 * Arguments: devi - the system's dev_info_t for the device.
5336 5336 * un - driver soft state (unit) structure
5337 5337 * reservation_flag - indicates if a reservation conflict
5338 5338 * occurred during attach
5339 5339 *
5340 5340 * Context: Kernel Thread
5341 5341 */
5342 5342 static void
5343 5343 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag)
5344 5344 {
5345 5345 int rval = 0;
5346 5346 uchar_t *inq80 = NULL;
5347 5347 size_t inq80_len = MAX_INQUIRY_SIZE;
5348 5348 size_t inq80_resid = 0;
5349 5349 uchar_t *inq83 = NULL;
5350 5350 size_t inq83_len = MAX_INQUIRY_SIZE;
5351 5351 size_t inq83_resid = 0;
5352 5352 int dlen, len;
5353 5353 char *sn;
5354 5354 struct sd_lun *un;
5355 5355
5356 5356 ASSERT(ssc != NULL);
5357 5357 un = ssc->ssc_un;
5358 5358 ASSERT(un != NULL);
5359 5359 ASSERT(mutex_owned(SD_MUTEX(un)));
5360 5360 ASSERT((SD_DEVINFO(un)) == devi);
5361 5361
5362 5362
5363 5363 /*
5364 5364 * We check the availability of the World Wide Name (0x83) and Unit
5365 5365 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using
5366 5366 * un_vpd_page_mask from them, we decide which way to get the WWN. If
5367 5367 * 0x83 is available, that is the best choice. Our next choice is
5368 5368 * 0x80. If neither are available, we munge the devid from the device
5369 5369 * vid/pid/serial # for Sun qualified disks, or use the ddi framework
5370 5370 * to fabricate a devid for non-Sun qualified disks.
5371 5371 */
5372 5372 if (sd_check_vpd_page_support(ssc) == 0) {
5373 5373 /* collect page 80 data if available */
5374 5374 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) {
5375 5375
5376 5376 mutex_exit(SD_MUTEX(un));
5377 5377 inq80 = kmem_zalloc(inq80_len, KM_SLEEP);
5378 5378
5379 5379 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len,
5380 5380 0x01, 0x80, &inq80_resid);
5381 5381
5382 5382 if (rval != 0) {
5383 5383 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5384 5384 kmem_free(inq80, inq80_len);
5385 5385 inq80 = NULL;
5386 5386 inq80_len = 0;
5387 5387 } else if (ddi_prop_exists(
5388 5388 DDI_DEV_T_NONE, SD_DEVINFO(un),
5389 5389 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
5390 5390 INQUIRY_SERIAL_NO) == 0) {
5391 5391 /*
5392 5392 * If we don't already have a serial number
5393 5393 * property, do quick verify of data returned
5394 5394 * and define property.
5395 5395 */
5396 5396 dlen = inq80_len - inq80_resid;
5397 5397 len = (size_t)inq80[3];
5398 5398 if ((dlen >= 4) && ((len + 4) <= dlen)) {
5399 5399 /*
5400 5400 * Ensure sn termination, skip leading
5401 5401 * blanks, and create property
5402 5402 * 'inquiry-serial-no'.
5403 5403 */
5404 5404 sn = (char *)&inq80[4];
5405 5405 sn[len] = 0;
5406 5406 while (*sn && (*sn == ' '))
5407 5407 sn++;
5408 5408 if (*sn) {
5409 5409 (void) ddi_prop_update_string(
5410 5410 DDI_DEV_T_NONE,
5411 5411 SD_DEVINFO(un),
5412 5412 INQUIRY_SERIAL_NO, sn);
5413 5413 }
5414 5414 }
5415 5415 }
5416 5416 mutex_enter(SD_MUTEX(un));
5417 5417 }
5418 5418
5419 5419 /* collect page 83 data if available */
5420 5420 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) {
5421 5421 mutex_exit(SD_MUTEX(un));
5422 5422 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
5423 5423
5424 5424 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len,
5425 5425 0x01, 0x83, &inq83_resid);
5426 5426
5427 5427 if (rval != 0) {
5428 5428 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5429 5429 kmem_free(inq83, inq83_len);
5430 5430 inq83 = NULL;
5431 5431 inq83_len = 0;
5432 5432 }
5433 5433 mutex_enter(SD_MUTEX(un));
5434 5434 }
5435 5435 }
5436 5436
5437 5437 /*
5438 5438 * If transport has already registered a devid for this target
5439 5439 * then that takes precedence over the driver's determination
5440 5440 * of the devid.
5441 5441 *
5442 5442 * NOTE: The reason this check is done here instead of at the beginning
5443 5443 * of the function is to allow the code above to create the
5444 5444 * 'inquiry-serial-no' property.
5445 5445 */
5446 5446 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) {
5447 5447 ASSERT(un->un_devid);
5448 5448 un->un_f_devid_transport_defined = TRUE;
5449 5449 goto cleanup; /* use devid registered by the transport */
5450 5450 }
5451 5451
5452 5452 /*
5453 5453 * This is the case of antiquated Sun disk drives that have the
5454 5454 * FAB_DEVID property set in the disk_table. These drives
5455 5455 * manage the devid's by storing them in last 2 available sectors
5456 5456 * on the drive and have them fabricated by the ddi layer by calling
5457 5457 * ddi_devid_init and passing the DEVID_FAB flag.
5458 5458 */
5459 5459 if (un->un_f_opt_fab_devid == TRUE) {
5460 5460 /*
5461 5461 * Depending on EINVAL isn't reliable, since a reserved disk
5462 5462 * may result in invalid geometry, so check to make sure a
5463 5463 * reservation conflict did not occur during attach.
5464 5464 */
5465 5465 if ((sd_get_devid(ssc) == EINVAL) &&
5466 5466 (reservation_flag != SD_TARGET_IS_RESERVED)) {
5467 5467 /*
5468 5468 * The devid is invalid AND there is no reservation
5469 5469 * conflict. Fabricate a new devid.
5470 5470 */
5471 5471 (void) sd_create_devid(ssc);
5472 5472 }
5473 5473
5474 5474 /* Register the devid if it exists */
5475 5475 if (un->un_devid != NULL) {
5476 5476 (void) ddi_devid_register(SD_DEVINFO(un),
5477 5477 un->un_devid);
5478 5478 SD_INFO(SD_LOG_ATTACH_DETACH, un,
5479 5479 "sd_register_devid: Devid Fabricated\n");
5480 5480 }
5481 5481 goto cleanup;
5482 5482 }
5483 5483
5484 5484 /* encode best devid possible based on data available */
5485 5485 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST,
5486 5486 (char *)ddi_driver_name(SD_DEVINFO(un)),
5487 5487 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)),
5488 5488 inq80, inq80_len - inq80_resid, inq83, inq83_len -
5489 5489 inq83_resid, &un->un_devid) == DDI_SUCCESS) {
5490 5490
5491 5491 /* devid successfully encoded, register devid */
5492 5492 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid);
5493 5493
5494 5494 } else {
5495 5495 /*
5496 5496 * Unable to encode a devid based on data available.
5497 5497 * This is not a Sun qualified disk. Older Sun disk
5498 5498 * drives that have the SD_FAB_DEVID property
5499 5499 * set in the disk_table and non Sun qualified
5500 5500 * disks are treated in the same manner. These
5501 5501 * drives manage the devid's by storing them in
5502 5502 * last 2 available sectors on the drive and
5503 5503 * have them fabricated by the ddi layer by
5504 5504 * calling ddi_devid_init and passing the
5505 5505 * DEVID_FAB flag.
5506 5506 * Create a fabricate devid only if there's no
5507 5507 * fabricate devid existed.
5508 5508 */
5509 5509 if (sd_get_devid(ssc) == EINVAL) {
5510 5510 (void) sd_create_devid(ssc);
5511 5511 }
5512 5512 un->un_f_opt_fab_devid = TRUE;
5513 5513
5514 5514 /* Register the devid if it exists */
5515 5515 if (un->un_devid != NULL) {
5516 5516 (void) ddi_devid_register(SD_DEVINFO(un),
5517 5517 un->un_devid);
5518 5518 SD_INFO(SD_LOG_ATTACH_DETACH, un,
5519 5519 "sd_register_devid: devid fabricated using "
5520 5520 "ddi framework\n");
5521 5521 }
5522 5522 }
5523 5523
5524 5524 cleanup:
5525 5525 /* clean up resources */
5526 5526 if (inq80 != NULL) {
5527 5527 kmem_free(inq80, inq80_len);
5528 5528 }
5529 5529 if (inq83 != NULL) {
5530 5530 kmem_free(inq83, inq83_len);
5531 5531 }
5532 5532 }
5533 5533
5534 5534
5535 5535
5536 5536 /*
5537 5537 * Function: sd_get_devid
5538 5538 *
5539 5539 * Description: This routine will return 0 if a valid device id has been
5540 5540 * obtained from the target and stored in the soft state. If a
5541 5541 * valid device id has not been previously read and stored, a
5542 5542 * read attempt will be made.
5543 5543 *
5544 5544 * Arguments: un - driver soft state (unit) structure
5545 5545 *
5546 5546 * Return Code: 0 if we successfully get the device id
5547 5547 *
5548 5548 * Context: Kernel Thread
5549 5549 */
5550 5550
5551 5551 static int
5552 5552 sd_get_devid(sd_ssc_t *ssc)
5553 5553 {
5554 5554 struct dk_devid *dkdevid;
5555 5555 ddi_devid_t tmpid;
5556 5556 uint_t *ip;
5557 5557 size_t sz;
5558 5558 diskaddr_t blk;
5559 5559 int status;
5560 5560 int chksum;
5561 5561 int i;
5562 5562 size_t buffer_size;
5563 5563 struct sd_lun *un;
5564 5564
5565 5565 ASSERT(ssc != NULL);
5566 5566 un = ssc->ssc_un;
5567 5567 ASSERT(un != NULL);
5568 5568 ASSERT(mutex_owned(SD_MUTEX(un)));
5569 5569
5570 5570 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n",
5571 5571 un);
5572 5572
5573 5573 if (un->un_devid != NULL) {
5574 5574 return (0);
5575 5575 }
5576 5576
5577 5577 mutex_exit(SD_MUTEX(un));
5578 5578 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk,
5579 5579 (void *)SD_PATH_DIRECT) != 0) {
5580 5580 mutex_enter(SD_MUTEX(un));
5581 5581 return (EINVAL);
5582 5582 }
5583 5583
5584 5584 /*
5585 5585 * Read and verify device id, stored in the reserved cylinders at the
5586 5586 * end of the disk. Backup label is on the odd sectors of the last
5587 5587 * track of the last cylinder. Device id will be on track of the next
5588 5588 * to last cylinder.
5589 5589 */
5590 5590 mutex_enter(SD_MUTEX(un));
5591 5591 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid));
5592 5592 mutex_exit(SD_MUTEX(un));
5593 5593 dkdevid = kmem_alloc(buffer_size, KM_SLEEP);
5594 5594 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk,
5595 5595 SD_PATH_DIRECT);
5596 5596
5597 5597 if (status != 0) {
5598 5598 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5599 5599 goto error;
5600 5600 }
5601 5601
5602 5602 /* Validate the revision */
5603 5603 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) ||
5604 5604 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) {
5605 5605 status = EINVAL;
5606 5606 goto error;
5607 5607 }
5608 5608
5609 5609 /* Calculate the checksum */
5610 5610 chksum = 0;
5611 5611 ip = (uint_t *)dkdevid;
5612 5612 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int));
5613 5613 i++) {
5614 5614 chksum ^= ip[i];
5615 5615 }
5616 5616
5617 5617 /* Compare the checksums */
5618 5618 if (DKD_GETCHKSUM(dkdevid) != chksum) {
5619 5619 status = EINVAL;
5620 5620 goto error;
5621 5621 }
5622 5622
5623 5623 /* Validate the device id */
5624 5624 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) {
5625 5625 status = EINVAL;
5626 5626 goto error;
5627 5627 }
5628 5628
5629 5629 /*
5630 5630 * Store the device id in the driver soft state
5631 5631 */
5632 5632 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid);
5633 5633 tmpid = kmem_alloc(sz, KM_SLEEP);
5634 5634
5635 5635 mutex_enter(SD_MUTEX(un));
5636 5636
5637 5637 un->un_devid = tmpid;
5638 5638 bcopy(&dkdevid->dkd_devid, un->un_devid, sz);
5639 5639
5640 5640 kmem_free(dkdevid, buffer_size);
5641 5641
5642 5642 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un);
5643 5643
5644 5644 return (status);
5645 5645 error:
5646 5646 mutex_enter(SD_MUTEX(un));
5647 5647 kmem_free(dkdevid, buffer_size);
5648 5648 return (status);
5649 5649 }
5650 5650
5651 5651
5652 5652 /*
5653 5653 * Function: sd_create_devid
5654 5654 *
5655 5655 * Description: This routine will fabricate the device id and write it
5656 5656 * to the disk.
5657 5657 *
5658 5658 * Arguments: un - driver soft state (unit) structure
5659 5659 *
5660 5660 * Return Code: value of the fabricated device id
5661 5661 *
5662 5662 * Context: Kernel Thread
5663 5663 */
5664 5664
5665 5665 static ddi_devid_t
5666 5666 sd_create_devid(sd_ssc_t *ssc)
5667 5667 {
5668 5668 struct sd_lun *un;
5669 5669
5670 5670 ASSERT(ssc != NULL);
5671 5671 un = ssc->ssc_un;
5672 5672 ASSERT(un != NULL);
5673 5673
5674 5674 /* Fabricate the devid */
5675 5675 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid)
5676 5676 == DDI_FAILURE) {
5677 5677 return (NULL);
5678 5678 }
5679 5679
5680 5680 /* Write the devid to disk */
5681 5681 if (sd_write_deviceid(ssc) != 0) {
5682 5682 ddi_devid_free(un->un_devid);
5683 5683 un->un_devid = NULL;
5684 5684 }
5685 5685
5686 5686 return (un->un_devid);
5687 5687 }
5688 5688
5689 5689
5690 5690 /*
5691 5691 * Function: sd_write_deviceid
5692 5692 *
5693 5693 * Description: This routine will write the device id to the disk
5694 5694 * reserved sector.
5695 5695 *
5696 5696 * Arguments: un - driver soft state (unit) structure
5697 5697 *
5698 5698 * Return Code: EINVAL
5699 5699 * value returned by sd_send_scsi_cmd
5700 5700 *
5701 5701 * Context: Kernel Thread
5702 5702 */
5703 5703
5704 5704 static int
5705 5705 sd_write_deviceid(sd_ssc_t *ssc)
5706 5706 {
5707 5707 struct dk_devid *dkdevid;
5708 5708 uchar_t *buf;
5709 5709 diskaddr_t blk;
5710 5710 uint_t *ip, chksum;
5711 5711 int status;
5712 5712 int i;
5713 5713 struct sd_lun *un;
5714 5714
5715 5715 ASSERT(ssc != NULL);
5716 5716 un = ssc->ssc_un;
5717 5717 ASSERT(un != NULL);
5718 5718 ASSERT(mutex_owned(SD_MUTEX(un)));
5719 5719
5720 5720 mutex_exit(SD_MUTEX(un));
5721 5721 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk,
5722 5722 (void *)SD_PATH_DIRECT) != 0) {
5723 5723 mutex_enter(SD_MUTEX(un));
5724 5724 return (-1);
5725 5725 }
5726 5726
5727 5727
5728 5728 /* Allocate the buffer */
5729 5729 buf = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP);
5730 5730 dkdevid = (struct dk_devid *)buf;
5731 5731
5732 5732 /* Fill in the revision */
5733 5733 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB;
5734 5734 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB;
5735 5735
5736 5736 /* Copy in the device id */
5737 5737 mutex_enter(SD_MUTEX(un));
5738 5738 bcopy(un->un_devid, &dkdevid->dkd_devid,
5739 5739 ddi_devid_sizeof(un->un_devid));
5740 5740 mutex_exit(SD_MUTEX(un));
5741 5741
5742 5742 /* Calculate the checksum */
5743 5743 chksum = 0;
5744 5744 ip = (uint_t *)dkdevid;
5745 5745 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int));
5746 5746 i++) {
5747 5747 chksum ^= ip[i];
5748 5748 }
5749 5749
5750 5750 /* Fill-in checksum */
5751 5751 DKD_FORMCHKSUM(chksum, dkdevid);
5752 5752
5753 5753 /* Write the reserved sector */
5754 5754 status = sd_send_scsi_WRITE(ssc, buf, un->un_sys_blocksize, blk,
5755 5755 SD_PATH_DIRECT);
5756 5756 if (status != 0)
5757 5757 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5758 5758
5759 5759 kmem_free(buf, un->un_sys_blocksize);
5760 5760
5761 5761 mutex_enter(SD_MUTEX(un));
5762 5762 return (status);
5763 5763 }
5764 5764
5765 5765
5766 5766 /*
5767 5767 * Function: sd_check_vpd_page_support
5768 5768 *
5769 5769 * Description: This routine sends an inquiry command with the EVPD bit set and
5770 5770 * a page code of 0x00 to the device. It is used to determine which
5771 5771 * vital product pages are available to find the devid. We are
5772 5772 * looking for pages 0x83 0x80 or 0xB1. If we return a negative 1,
5773 5773 * the device does not support that command.
5774 5774 *
5775 5775 * Arguments: un - driver soft state (unit) structure
5776 5776 *
5777 5777 * Return Code: 0 - success
5778 5778 * 1 - check condition
5779 5779 *
5780 5780 * Context: This routine can sleep.
5781 5781 */
5782 5782
5783 5783 static int
5784 5784 sd_check_vpd_page_support(sd_ssc_t *ssc)
5785 5785 {
5786 5786 uchar_t *page_list = NULL;
5787 5787 uchar_t page_length = 0xff; /* Use max possible length */
5788 5788 uchar_t evpd = 0x01; /* Set the EVPD bit */
5789 5789 uchar_t page_code = 0x00; /* Supported VPD Pages */
5790 5790 int rval = 0;
5791 5791 int counter;
5792 5792 struct sd_lun *un;
5793 5793
5794 5794 ASSERT(ssc != NULL);
5795 5795 un = ssc->ssc_un;
5796 5796 ASSERT(un != NULL);
5797 5797 ASSERT(mutex_owned(SD_MUTEX(un)));
5798 5798
5799 5799 mutex_exit(SD_MUTEX(un));
5800 5800
5801 5801 /*
5802 5802 * We'll set the page length to the maximum to save figuring it out
5803 5803 * with an additional call.
5804 5804 */
5805 5805 page_list = kmem_zalloc(page_length, KM_SLEEP);
5806 5806
5807 5807 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd,
5808 5808 page_code, NULL);
5809 5809
5810 5810 if (rval != 0)
5811 5811 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5812 5812
5813 5813 mutex_enter(SD_MUTEX(un));
5814 5814
5815 5815 /*
5816 5816 * Now we must validate that the device accepted the command, as some
5817 5817 * drives do not support it. If the drive does support it, we will
5818 5818 * return 0, and the supported pages will be in un_vpd_page_mask. If
5819 5819 * not, we return -1.
5820 5820 */
5821 5821 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) {
5822 5822 /* Loop to find one of the 2 pages we need */
5823 5823 counter = 4; /* Supported pages start at byte 4, with 0x00 */
5824 5824
5825 5825 /*
5826 5826 * Pages are returned in ascending order, and 0x83 is what we
5827 5827 * are hoping for.
5828 5828 */
5829 5829 while ((page_list[counter] <= 0xB1) &&
5830 5830 (counter <= (page_list[VPD_PAGE_LENGTH] +
5831 5831 VPD_HEAD_OFFSET))) {
5832 5832 /*
5833 5833 * Add 3 because page_list[3] is the number of
5834 5834 * pages minus 3
5835 5835 */
5836 5836
5837 5837 switch (page_list[counter]) {
5838 5838 case 0x00:
5839 5839 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG;
5840 5840 break;
5841 5841 case 0x80:
5842 5842 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG;
5843 5843 break;
5844 5844 case 0x81:
5845 5845 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG;
5846 5846 break;
5847 5847 case 0x82:
5848 5848 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG;
5849 5849 break;
5850 5850 case 0x83:
5851 5851 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG;
5852 5852 break;
5853 5853 case 0x86:
5854 5854 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG;
5855 5855 break;
5856 5856 case 0xB1:
5857 5857 un->un_vpd_page_mask |= SD_VPD_DEV_CHARACTER_PG;
5858 5858 break;
5859 5859 }
5860 5860 counter++;
5861 5861 }
5862 5862
5863 5863 } else {
5864 5864 rval = -1;
5865 5865
5866 5866 SD_INFO(SD_LOG_ATTACH_DETACH, un,
5867 5867 "sd_check_vpd_page_support: This drive does not implement "
5868 5868 "VPD pages.\n");
5869 5869 }
5870 5870
5871 5871 kmem_free(page_list, page_length);
5872 5872
5873 5873 return (rval);
5874 5874 }
5875 5875
5876 5876
5877 5877 /*
5878 5878 * Function: sd_setup_pm
5879 5879 *
5880 5880 * Description: Initialize Power Management on the device
5881 5881 *
5882 5882 * Context: Kernel Thread
5883 5883 */
5884 5884
5885 5885 static void
5886 5886 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi)
5887 5887 {
5888 5888 uint_t log_page_size;
5889 5889 uchar_t *log_page_data;
5890 5890 int rval = 0;
5891 5891 struct sd_lun *un;
5892 5892
5893 5893 ASSERT(ssc != NULL);
5894 5894 un = ssc->ssc_un;
5895 5895 ASSERT(un != NULL);
5896 5896
5897 5897 /*
5898 5898 * Since we are called from attach, holding a mutex for
5899 5899 * un is unnecessary. Because some of the routines called
5900 5900 * from here require SD_MUTEX to not be held, assert this
5901 5901 * right up front.
5902 5902 */
5903 5903 ASSERT(!mutex_owned(SD_MUTEX(un)));
5904 5904 /*
5905 5905 * Since the sd device does not have the 'reg' property,
5906 5906 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries.
5907 5907 * The following code is to tell cpr that this device
5908 5908 * DOES need to be suspended and resumed.
5909 5909 */
5910 5910 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi,
5911 5911 "pm-hardware-state", "needs-suspend-resume");
5912 5912
5913 5913 /*
5914 5914 * This complies with the new power management framework
5915 5915 * for certain desktop machines. Create the pm_components
5916 5916 * property as a string array property.
5917 5917 * If un_f_pm_supported is TRUE, that means the disk
5918 5918 * attached HBA has set the "pm-capable" property and
5919 5919 * the value of this property is bigger than 0.
5920 5920 */
5921 5921 if (un->un_f_pm_supported) {
5922 5922 /*
5923 5923 * not all devices have a motor, try it first.
5924 5924 * some devices may return ILLEGAL REQUEST, some
5925 5925 * will hang
5926 5926 * The following START_STOP_UNIT is used to check if target
5927 5927 * device has a motor.
5928 5928 */
5929 5929 un->un_f_start_stop_supported = TRUE;
5930 5930
5931 5931 if (un->un_f_power_condition_supported) {
5932 5932 rval = sd_send_scsi_START_STOP_UNIT(ssc,
5933 5933 SD_POWER_CONDITION, SD_TARGET_ACTIVE,
5934 5934 SD_PATH_DIRECT);
5935 5935 if (rval != 0) {
5936 5936 un->un_f_power_condition_supported = FALSE;
5937 5937 }
5938 5938 }
5939 5939 if (!un->un_f_power_condition_supported) {
5940 5940 rval = sd_send_scsi_START_STOP_UNIT(ssc,
5941 5941 SD_START_STOP, SD_TARGET_START, SD_PATH_DIRECT);
5942 5942 }
5943 5943 if (rval != 0) {
5944 5944 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5945 5945 un->un_f_start_stop_supported = FALSE;
5946 5946 }
5947 5947
5948 5948 /*
5949 5949 * create pm properties anyways otherwise the parent can't
5950 5950 * go to sleep
5951 5951 */
5952 5952 un->un_f_pm_is_enabled = TRUE;
5953 5953 (void) sd_create_pm_components(devi, un);
5954 5954
5955 5955 /*
5956 5956 * If it claims that log sense is supported, check it out.
5957 5957 */
5958 5958 if (un->un_f_log_sense_supported) {
5959 5959 rval = sd_log_page_supported(ssc,
5960 5960 START_STOP_CYCLE_PAGE);
5961 5961 if (rval == 1) {
5962 5962 /* Page found, use it. */
5963 5963 un->un_start_stop_cycle_page =
5964 5964 START_STOP_CYCLE_PAGE;
5965 5965 } else {
5966 5966 /*
5967 5967 * Page not found or log sense is not
5968 5968 * supported.
5969 5969 * Notice we do not check the old style
5970 5970 * START_STOP_CYCLE_VU_PAGE because this
5971 5971 * code path does not apply to old disks.
5972 5972 */
5973 5973 un->un_f_log_sense_supported = FALSE;
5974 5974 un->un_f_pm_log_sense_smart = FALSE;
5975 5975 }
5976 5976 }
5977 5977
5978 5978 return;
5979 5979 }
5980 5980
5981 5981 /*
5982 5982 * For the disk whose attached HBA has not set the "pm-capable"
5983 5983 * property, check if it supports the power management.
5984 5984 */
5985 5985 if (!un->un_f_log_sense_supported) {
5986 5986 un->un_power_level = SD_SPINDLE_ON;
5987 5987 un->un_f_pm_is_enabled = FALSE;
5988 5988 return;
5989 5989 }
5990 5990
5991 5991 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE);
5992 5992
5993 5993 #ifdef SDDEBUG
5994 5994 if (sd_force_pm_supported) {
5995 5995 /* Force a successful result */
5996 5996 rval = 1;
5997 5997 }
5998 5998 #endif
5999 5999
6000 6000 /*
6001 6001 * If the start-stop cycle counter log page is not supported
6002 6002 * or if the pm-capable property is set to be false (0),
6003 6003 * then we should not create the pm_components property.
6004 6004 */
6005 6005 if (rval == -1) {
6006 6006 /*
6007 6007 * Error.
6008 6008 * Reading log sense failed, most likely this is
6009 6009 * an older drive that does not support log sense.
6010 6010 * If this fails auto-pm is not supported.
6011 6011 */
6012 6012 un->un_power_level = SD_SPINDLE_ON;
6013 6013 un->un_f_pm_is_enabled = FALSE;
6014 6014
6015 6015 } else if (rval == 0) {
6016 6016 /*
6017 6017 * Page not found.
6018 6018 * The start stop cycle counter is implemented as page
6019 6019 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For
6020 6020 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE).
6021 6021 */
6022 6022 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) {
6023 6023 /*
6024 6024 * Page found, use this one.
6025 6025 */
6026 6026 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE;
6027 6027 un->un_f_pm_is_enabled = TRUE;
6028 6028 } else {
6029 6029 /*
6030 6030 * Error or page not found.
6031 6031 * auto-pm is not supported for this device.
6032 6032 */
6033 6033 un->un_power_level = SD_SPINDLE_ON;
6034 6034 un->un_f_pm_is_enabled = FALSE;
6035 6035 }
6036 6036 } else {
6037 6037 /*
6038 6038 * Page found, use it.
6039 6039 */
6040 6040 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE;
6041 6041 un->un_f_pm_is_enabled = TRUE;
6042 6042 }
6043 6043
6044 6044
6045 6045 if (un->un_f_pm_is_enabled == TRUE) {
6046 6046 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE;
6047 6047 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP);
6048 6048
6049 6049 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data,
6050 6050 log_page_size, un->un_start_stop_cycle_page,
6051 6051 0x01, 0, SD_PATH_DIRECT);
6052 6052
6053 6053 if (rval != 0) {
6054 6054 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6055 6055 }
6056 6056
6057 6057 #ifdef SDDEBUG
6058 6058 if (sd_force_pm_supported) {
6059 6059 /* Force a successful result */
6060 6060 rval = 0;
6061 6061 }
6062 6062 #endif
6063 6063
6064 6064 /*
6065 6065 * If the Log sense for Page( Start/stop cycle counter page)
6066 6066 * succeeds, then power management is supported and we can
6067 6067 * enable auto-pm.
6068 6068 */
6069 6069 if (rval == 0) {
6070 6070 (void) sd_create_pm_components(devi, un);
6071 6071 } else {
6072 6072 un->un_power_level = SD_SPINDLE_ON;
6073 6073 un->un_f_pm_is_enabled = FALSE;
6074 6074 }
6075 6075
6076 6076 kmem_free(log_page_data, log_page_size);
6077 6077 }
6078 6078 }
6079 6079
6080 6080
6081 6081 /*
6082 6082 * Function: sd_create_pm_components
6083 6083 *
6084 6084 * Description: Initialize PM property.
6085 6085 *
6086 6086 * Context: Kernel thread context
6087 6087 */
6088 6088
6089 6089 static void
6090 6090 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un)
6091 6091 {
6092 6092 ASSERT(!mutex_owned(SD_MUTEX(un)));
6093 6093
6094 6094 if (un->un_f_power_condition_supported) {
6095 6095 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi,
6096 6096 "pm-components", sd_pwr_pc.pm_comp, 5)
6097 6097 != DDI_PROP_SUCCESS) {
6098 6098 un->un_power_level = SD_SPINDLE_ACTIVE;
6099 6099 un->un_f_pm_is_enabled = FALSE;
6100 6100 return;
6101 6101 }
6102 6102 } else {
6103 6103 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi,
6104 6104 "pm-components", sd_pwr_ss.pm_comp, 3)
6105 6105 != DDI_PROP_SUCCESS) {
6106 6106 un->un_power_level = SD_SPINDLE_ON;
6107 6107 un->un_f_pm_is_enabled = FALSE;
6108 6108 return;
6109 6109 }
6110 6110 }
6111 6111 /*
6112 6112 * When components are initially created they are idle,
6113 6113 * power up any non-removables.
6114 6114 * Note: the return value of pm_raise_power can't be used
6115 6115 * for determining if PM should be enabled for this device.
6116 6116 * Even if you check the return values and remove this
6117 6117 * property created above, the PM framework will not honor the
6118 6118 * change after the first call to pm_raise_power. Hence,
6119 6119 * removal of that property does not help if pm_raise_power
6120 6120 * fails. In the case of removable media, the start/stop
6121 6121 * will fail if the media is not present.
6122 6122 */
6123 6123 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0,
6124 6124 SD_PM_STATE_ACTIVE(un)) == DDI_SUCCESS)) {
6125 6125 mutex_enter(SD_MUTEX(un));
6126 6126 un->un_power_level = SD_PM_STATE_ACTIVE(un);
6127 6127 mutex_enter(&un->un_pm_mutex);
6128 6128 /* Set to on and not busy. */
6129 6129 un->un_pm_count = 0;
6130 6130 } else {
6131 6131 mutex_enter(SD_MUTEX(un));
6132 6132 un->un_power_level = SD_PM_STATE_STOPPED(un);
6133 6133 mutex_enter(&un->un_pm_mutex);
6134 6134 /* Set to off. */
6135 6135 un->un_pm_count = -1;
6136 6136 }
6137 6137 mutex_exit(&un->un_pm_mutex);
6138 6138 mutex_exit(SD_MUTEX(un));
6139 6139 }
6140 6140
6141 6141
6142 6142 /*
6143 6143 * Function: sd_ddi_suspend
6144 6144 *
6145 6145 * Description: Performs system power-down operations. This includes
6146 6146 * setting the drive state to indicate its suspended so
6147 6147 * that no new commands will be accepted. Also, wait for
6148 6148 * all commands that are in transport or queued to a timer
6149 6149 * for retry to complete. All timeout threads are cancelled.
6150 6150 *
6151 6151 * Return Code: DDI_FAILURE or DDI_SUCCESS
6152 6152 *
6153 6153 * Context: Kernel thread context
6154 6154 */
6155 6155
6156 6156 static int
6157 6157 sd_ddi_suspend(dev_info_t *devi)
6158 6158 {
6159 6159 struct sd_lun *un;
6160 6160 clock_t wait_cmds_complete;
6161 6161
6162 6162 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
6163 6163 if (un == NULL) {
6164 6164 return (DDI_FAILURE);
6165 6165 }
6166 6166
6167 6167 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n");
6168 6168
6169 6169 mutex_enter(SD_MUTEX(un));
6170 6170
6171 6171 /* Return success if the device is already suspended. */
6172 6172 if (un->un_state == SD_STATE_SUSPENDED) {
6173 6173 mutex_exit(SD_MUTEX(un));
6174 6174 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: "
6175 6175 "device already suspended, exiting\n");
6176 6176 return (DDI_SUCCESS);
6177 6177 }
6178 6178
6179 6179 /* Return failure if the device is being used by HA */
6180 6180 if (un->un_resvd_status &
6181 6181 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) {
6182 6182 mutex_exit(SD_MUTEX(un));
6183 6183 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: "
6184 6184 "device in use by HA, exiting\n");
6185 6185 return (DDI_FAILURE);
6186 6186 }
6187 6187
6188 6188 /*
6189 6189 * Return failure if the device is in a resource wait
6190 6190 * or power changing state.
6191 6191 */
6192 6192 if ((un->un_state == SD_STATE_RWAIT) ||
6193 6193 (un->un_state == SD_STATE_PM_CHANGING)) {
6194 6194 mutex_exit(SD_MUTEX(un));
6195 6195 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: "
6196 6196 "device in resource wait state, exiting\n");
6197 6197 return (DDI_FAILURE);
6198 6198 }
6199 6199
6200 6200
6201 6201 un->un_save_state = un->un_last_state;
6202 6202 New_state(un, SD_STATE_SUSPENDED);
6203 6203
6204 6204 /*
6205 6205 * Wait for all commands that are in transport or queued to a timer
6206 6206 * for retry to complete.
6207 6207 *
6208 6208 * While waiting, no new commands will be accepted or sent because of
6209 6209 * the new state we set above.
6210 6210 *
6211 6211 * Wait till current operation has completed. If we are in the resource
6212 6212 * wait state (with an intr outstanding) then we need to wait till the
6213 6213 * intr completes and starts the next cmd. We want to wait for
6214 6214 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND.
6215 6215 */
6216 6216 wait_cmds_complete = ddi_get_lbolt() +
6217 6217 (sd_wait_cmds_complete * drv_usectohz(1000000));
6218 6218
6219 6219 while (un->un_ncmds_in_transport != 0) {
6220 6220 /*
6221 6221 * Fail if commands do not finish in the specified time.
6222 6222 */
6223 6223 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un),
6224 6224 wait_cmds_complete) == -1) {
6225 6225 /*
6226 6226 * Undo the state changes made above. Everything
6227 6227 * must go back to it's original value.
6228 6228 */
6229 6229 Restore_state(un);
6230 6230 un->un_last_state = un->un_save_state;
6231 6231 /* Wake up any threads that might be waiting. */
6232 6232 cv_broadcast(&un->un_suspend_cv);
6233 6233 mutex_exit(SD_MUTEX(un));
6234 6234 SD_ERROR(SD_LOG_IO_PM, un,
6235 6235 "sd_ddi_suspend: failed due to outstanding cmds\n");
6236 6236 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n");
6237 6237 return (DDI_FAILURE);
6238 6238 }
6239 6239 }
6240 6240
6241 6241 /*
6242 6242 * Cancel SCSI watch thread and timeouts, if any are active
6243 6243 */
6244 6244
6245 6245 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) {
6246 6246 opaque_t temp_token = un->un_swr_token;
6247 6247 mutex_exit(SD_MUTEX(un));
6248 6248 scsi_watch_suspend(temp_token);
6249 6249 mutex_enter(SD_MUTEX(un));
6250 6250 }
6251 6251
6252 6252 if (un->un_reset_throttle_timeid != NULL) {
6253 6253 timeout_id_t temp_id = un->un_reset_throttle_timeid;
6254 6254 un->un_reset_throttle_timeid = NULL;
6255 6255 mutex_exit(SD_MUTEX(un));
6256 6256 (void) untimeout(temp_id);
6257 6257 mutex_enter(SD_MUTEX(un));
6258 6258 }
6259 6259
6260 6260 if (un->un_dcvb_timeid != NULL) {
6261 6261 timeout_id_t temp_id = un->un_dcvb_timeid;
6262 6262 un->un_dcvb_timeid = NULL;
6263 6263 mutex_exit(SD_MUTEX(un));
6264 6264 (void) untimeout(temp_id);
6265 6265 mutex_enter(SD_MUTEX(un));
6266 6266 }
6267 6267
6268 6268 mutex_enter(&un->un_pm_mutex);
6269 6269 if (un->un_pm_timeid != NULL) {
6270 6270 timeout_id_t temp_id = un->un_pm_timeid;
6271 6271 un->un_pm_timeid = NULL;
6272 6272 mutex_exit(&un->un_pm_mutex);
6273 6273 mutex_exit(SD_MUTEX(un));
6274 6274 (void) untimeout(temp_id);
6275 6275 mutex_enter(SD_MUTEX(un));
6276 6276 } else {
6277 6277 mutex_exit(&un->un_pm_mutex);
6278 6278 }
6279 6279
6280 6280 if (un->un_rmw_msg_timeid != NULL) {
6281 6281 timeout_id_t temp_id = un->un_rmw_msg_timeid;
6282 6282 un->un_rmw_msg_timeid = NULL;
6283 6283 mutex_exit(SD_MUTEX(un));
6284 6284 (void) untimeout(temp_id);
6285 6285 mutex_enter(SD_MUTEX(un));
6286 6286 }
6287 6287
6288 6288 if (un->un_retry_timeid != NULL) {
6289 6289 timeout_id_t temp_id = un->un_retry_timeid;
6290 6290 un->un_retry_timeid = NULL;
6291 6291 mutex_exit(SD_MUTEX(un));
6292 6292 (void) untimeout(temp_id);
6293 6293 mutex_enter(SD_MUTEX(un));
6294 6294
6295 6295 if (un->un_retry_bp != NULL) {
6296 6296 un->un_retry_bp->av_forw = un->un_waitq_headp;
6297 6297 un->un_waitq_headp = un->un_retry_bp;
6298 6298 if (un->un_waitq_tailp == NULL) {
6299 6299 un->un_waitq_tailp = un->un_retry_bp;
6300 6300 }
6301 6301 un->un_retry_bp = NULL;
6302 6302 un->un_retry_statp = NULL;
6303 6303 }
6304 6304 }
6305 6305
6306 6306 if (un->un_direct_priority_timeid != NULL) {
6307 6307 timeout_id_t temp_id = un->un_direct_priority_timeid;
6308 6308 un->un_direct_priority_timeid = NULL;
6309 6309 mutex_exit(SD_MUTEX(un));
6310 6310 (void) untimeout(temp_id);
6311 6311 mutex_enter(SD_MUTEX(un));
6312 6312 }
6313 6313
6314 6314 if (un->un_f_is_fibre == TRUE) {
6315 6315 /*
6316 6316 * Remove callbacks for insert and remove events
6317 6317 */
6318 6318 if (un->un_insert_event != NULL) {
6319 6319 mutex_exit(SD_MUTEX(un));
6320 6320 (void) ddi_remove_event_handler(un->un_insert_cb_id);
6321 6321 mutex_enter(SD_MUTEX(un));
6322 6322 un->un_insert_event = NULL;
6323 6323 }
6324 6324
6325 6325 if (un->un_remove_event != NULL) {
6326 6326 mutex_exit(SD_MUTEX(un));
6327 6327 (void) ddi_remove_event_handler(un->un_remove_cb_id);
6328 6328 mutex_enter(SD_MUTEX(un));
6329 6329 un->un_remove_event = NULL;
6330 6330 }
6331 6331 }
6332 6332
6333 6333 mutex_exit(SD_MUTEX(un));
6334 6334
6335 6335 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n");
6336 6336
6337 6337 return (DDI_SUCCESS);
6338 6338 }
6339 6339
6340 6340
6341 6341 /*
6342 6342 * Function: sd_ddi_resume
6343 6343 *
6344 6344 * Description: Performs system power-up operations..
6345 6345 *
6346 6346 * Return Code: DDI_SUCCESS
6347 6347 * DDI_FAILURE
6348 6348 *
6349 6349 * Context: Kernel thread context
6350 6350 */
6351 6351
6352 6352 static int
6353 6353 sd_ddi_resume(dev_info_t *devi)
6354 6354 {
6355 6355 struct sd_lun *un;
6356 6356
6357 6357 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
6358 6358 if (un == NULL) {
6359 6359 return (DDI_FAILURE);
6360 6360 }
6361 6361
6362 6362 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n");
6363 6363
6364 6364 mutex_enter(SD_MUTEX(un));
6365 6365 Restore_state(un);
6366 6366
6367 6367 /*
6368 6368 * Restore the state which was saved to give the
6369 6369 * the right state in un_last_state
6370 6370 */
6371 6371 un->un_last_state = un->un_save_state;
6372 6372 /*
6373 6373 * Note: throttle comes back at full.
6374 6374 * Also note: this MUST be done before calling pm_raise_power
6375 6375 * otherwise the system can get hung in biowait. The scenario where
6376 6376 * this'll happen is under cpr suspend. Writing of the system
6377 6377 * state goes through sddump, which writes 0 to un_throttle. If
6378 6378 * writing the system state then fails, example if the partition is
6379 6379 * too small, then cpr attempts a resume. If throttle isn't restored
6380 6380 * from the saved value until after calling pm_raise_power then
6381 6381 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs
6382 6382 * in biowait.
6383 6383 */
6384 6384 un->un_throttle = un->un_saved_throttle;
6385 6385
6386 6386 /*
6387 6387 * The chance of failure is very rare as the only command done in power
6388 6388 * entry point is START command when you transition from 0->1 or
6389 6389 * unknown->1. Put it to SPINDLE ON state irrespective of the state at
6390 6390 * which suspend was done. Ignore the return value as the resume should
6391 6391 * not be failed. In the case of removable media the media need not be
6392 6392 * inserted and hence there is a chance that raise power will fail with
6393 6393 * media not present.
6394 6394 */
6395 6395 if (un->un_f_attach_spinup) {
6396 6396 mutex_exit(SD_MUTEX(un));
6397 6397 (void) pm_raise_power(SD_DEVINFO(un), 0,
6398 6398 SD_PM_STATE_ACTIVE(un));
6399 6399 mutex_enter(SD_MUTEX(un));
6400 6400 }
6401 6401
6402 6402 /*
6403 6403 * Don't broadcast to the suspend cv and therefore possibly
6404 6404 * start I/O until after power has been restored.
6405 6405 */
6406 6406 cv_broadcast(&un->un_suspend_cv);
6407 6407 cv_broadcast(&un->un_state_cv);
6408 6408
6409 6409 /* restart thread */
6410 6410 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) {
6411 6411 scsi_watch_resume(un->un_swr_token);
6412 6412 }
6413 6413
6414 6414 #if (defined(__fibre))
6415 6415 if (un->un_f_is_fibre == TRUE) {
6416 6416 /*
6417 6417 * Add callbacks for insert and remove events
6418 6418 */
6419 6419 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) {
6420 6420 sd_init_event_callbacks(un);
6421 6421 }
6422 6422 }
6423 6423 #endif
6424 6424
6425 6425 /*
6426 6426 * Transport any pending commands to the target.
6427 6427 *
6428 6428 * If this is a low-activity device commands in queue will have to wait
6429 6429 * until new commands come in, which may take awhile. Also, we
6430 6430 * specifically don't check un_ncmds_in_transport because we know that
6431 6431 * there really are no commands in progress after the unit was
6432 6432 * suspended and we could have reached the throttle level, been
6433 6433 * suspended, and have no new commands coming in for awhile. Highly
6434 6434 * unlikely, but so is the low-activity disk scenario.
6435 6435 */
6436 6436 ddi_xbuf_dispatch(un->un_xbuf_attr);
6437 6437
6438 6438 sd_start_cmds(un, NULL);
6439 6439 mutex_exit(SD_MUTEX(un));
6440 6440
6441 6441 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n");
6442 6442
6443 6443 return (DDI_SUCCESS);
6444 6444 }
6445 6445
6446 6446
6447 6447 /*
6448 6448 * Function: sd_pm_state_change
6449 6449 *
6450 6450 * Description: Change the driver power state.
6451 6451 * Someone else is required to actually change the driver
6452 6452 * power level.
6453 6453 *
6454 6454 * Arguments: un - driver soft state (unit) structure
6455 6455 * level - the power level that is changed to
6456 6456 * flag - to decide how to change the power state
6457 6457 *
6458 6458 * Return Code: DDI_SUCCESS
6459 6459 *
6460 6460 * Context: Kernel thread context
6461 6461 */
6462 6462 static int
6463 6463 sd_pm_state_change(struct sd_lun *un, int level, int flag)
6464 6464 {
6465 6465 ASSERT(un != NULL);
6466 6466 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: entry\n");
6467 6467
6468 6468 ASSERT(!mutex_owned(SD_MUTEX(un)));
6469 6469 mutex_enter(SD_MUTEX(un));
6470 6470
6471 6471 if (flag == SD_PM_STATE_ROLLBACK || SD_PM_IS_IO_CAPABLE(un, level)) {
6472 6472 un->un_power_level = level;
6473 6473 ASSERT(!mutex_owned(&un->un_pm_mutex));
6474 6474 mutex_enter(&un->un_pm_mutex);
6475 6475 if (SD_DEVICE_IS_IN_LOW_POWER(un)) {
6476 6476 un->un_pm_count++;
6477 6477 ASSERT(un->un_pm_count == 0);
6478 6478 }
6479 6479 mutex_exit(&un->un_pm_mutex);
6480 6480 } else {
6481 6481 /*
6482 6482 * Exit if power management is not enabled for this device,
6483 6483 * or if the device is being used by HA.
6484 6484 */
6485 6485 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status &
6486 6486 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) {
6487 6487 mutex_exit(SD_MUTEX(un));
6488 6488 SD_TRACE(SD_LOG_POWER, un,
6489 6489 "sd_pm_state_change: exiting\n");
6490 6490 return (DDI_FAILURE);
6491 6491 }
6492 6492
6493 6493 SD_INFO(SD_LOG_POWER, un, "sd_pm_state_change: "
6494 6494 "un_ncmds_in_driver=%ld\n", un->un_ncmds_in_driver);
6495 6495
6496 6496 /*
6497 6497 * See if the device is not busy, ie.:
6498 6498 * - we have no commands in the driver for this device
6499 6499 * - not waiting for resources
6500 6500 */
6501 6501 if ((un->un_ncmds_in_driver == 0) &&
6502 6502 (un->un_state != SD_STATE_RWAIT)) {
6503 6503 /*
6504 6504 * The device is not busy, so it is OK to go to low
6505 6505 * power state. Indicate low power, but rely on someone
6506 6506 * else to actually change it.
6507 6507 */
6508 6508 mutex_enter(&un->un_pm_mutex);
6509 6509 un->un_pm_count = -1;
6510 6510 mutex_exit(&un->un_pm_mutex);
6511 6511 un->un_power_level = level;
6512 6512 }
6513 6513 }
6514 6514
6515 6515 mutex_exit(SD_MUTEX(un));
6516 6516
6517 6517 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: exit\n");
6518 6518
6519 6519 return (DDI_SUCCESS);
6520 6520 }
6521 6521
6522 6522
6523 6523 /*
6524 6524 * Function: sd_pm_idletimeout_handler
6525 6525 *
6526 6526 * Description: A timer routine that's active only while a device is busy.
6527 6527 * The purpose is to extend slightly the pm framework's busy
6528 6528 * view of the device to prevent busy/idle thrashing for
6529 6529 * back-to-back commands. Do this by comparing the current time
6530 6530 * to the time at which the last command completed and when the
6531 6531 * difference is greater than sd_pm_idletime, call
6532 6532 * pm_idle_component. In addition to indicating idle to the pm
6533 6533 * framework, update the chain type to again use the internal pm
6534 6534 * layers of the driver.
6535 6535 *
6536 6536 * Arguments: arg - driver soft state (unit) structure
6537 6537 *
6538 6538 * Context: Executes in a timeout(9F) thread context
6539 6539 */
6540 6540
6541 6541 static void
6542 6542 sd_pm_idletimeout_handler(void *arg)
6543 6543 {
6544 6544 const hrtime_t idletime = sd_pm_idletime * NANOSEC;
6545 6545 struct sd_lun *un = arg;
6546 6546
6547 6547 mutex_enter(&sd_detach_mutex);
6548 6548 if (un->un_detach_count != 0) {
6549 6549 /* Abort if the instance is detaching */
6550 6550 mutex_exit(&sd_detach_mutex);
6551 6551 return;
6552 6552 }
6553 6553 mutex_exit(&sd_detach_mutex);
6554 6554
6555 6555 /*
6556 6556 * Grab both mutexes, in the proper order, since we're accessing
6557 6557 * both PM and softstate variables.
6558 6558 */
6559 6559 mutex_enter(SD_MUTEX(un));
6560 6560 mutex_enter(&un->un_pm_mutex);
6561 6561 if (((gethrtime() - un->un_pm_idle_time) > idletime) &&
6562 6562 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) {
6563 6563 /*
6564 6564 * Update the chain types.
6565 6565 * This takes affect on the next new command received.
6566 6566 */
6567 6567 if (un->un_f_non_devbsize_supported) {
6568 6568 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA;
6569 6569 } else {
6570 6570 un->un_buf_chain_type = SD_CHAIN_INFO_DISK;
6571 6571 }
6572 6572 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD;
6573 6573
6574 6574 SD_TRACE(SD_LOG_IO_PM, un,
6575 6575 "sd_pm_idletimeout_handler: idling device\n");
6576 6576 (void) pm_idle_component(SD_DEVINFO(un), 0);
6577 6577 un->un_pm_idle_timeid = NULL;
6578 6578 } else {
6579 6579 un->un_pm_idle_timeid =
6580 6580 timeout(sd_pm_idletimeout_handler, un,
6581 6581 (drv_usectohz((clock_t)300000))); /* 300 ms. */
6582 6582 }
6583 6583 mutex_exit(&un->un_pm_mutex);
6584 6584 mutex_exit(SD_MUTEX(un));
6585 6585 }
6586 6586
6587 6587
6588 6588 /*
6589 6589 * Function: sd_pm_timeout_handler
6590 6590 *
6591 6591 * Description: Callback to tell framework we are idle.
6592 6592 *
6593 6593 * Context: timeout(9f) thread context.
6594 6594 */
6595 6595
6596 6596 static void
6597 6597 sd_pm_timeout_handler(void *arg)
6598 6598 {
6599 6599 struct sd_lun *un = arg;
6600 6600
6601 6601 (void) pm_idle_component(SD_DEVINFO(un), 0);
6602 6602 mutex_enter(&un->un_pm_mutex);
6603 6603 un->un_pm_timeid = NULL;
6604 6604 mutex_exit(&un->un_pm_mutex);
6605 6605 }
6606 6606
6607 6607
6608 6608 /*
6609 6609 * Function: sdpower
6610 6610 *
6611 6611 * Description: PM entry point.
6612 6612 *
6613 6613 * Return Code: DDI_SUCCESS
6614 6614 * DDI_FAILURE
6615 6615 *
6616 6616 * Context: Kernel thread context
6617 6617 */
6618 6618
6619 6619 static int
6620 6620 sdpower(dev_info_t *devi, int component, int level)
6621 6621 {
6622 6622 struct sd_lun *un;
6623 6623 int instance;
6624 6624 int rval = DDI_SUCCESS;
6625 6625 uint_t i, log_page_size, maxcycles, ncycles;
6626 6626 uchar_t *log_page_data;
6627 6627 int log_sense_page;
6628 6628 int medium_present;
6629 6629 time_t intvlp;
6630 6630 struct pm_trans_data sd_pm_tran_data;
6631 6631 uchar_t save_state;
6632 6632 int sval;
6633 6633 uchar_t state_before_pm;
6634 6634 int got_semaphore_here;
6635 6635 sd_ssc_t *ssc;
6636 6636 int last_power_level;
6637 6637
6638 6638 instance = ddi_get_instance(devi);
6639 6639
6640 6640 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) ||
6641 6641 !SD_PM_IS_LEVEL_VALID(un, level) || component != 0) {
6642 6642 return (DDI_FAILURE);
6643 6643 }
6644 6644
6645 6645 ssc = sd_ssc_init(un);
6646 6646
6647 6647 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level);
6648 6648
6649 6649 /*
6650 6650 * Must synchronize power down with close.
6651 6651 * Attempt to decrement/acquire the open/close semaphore,
6652 6652 * but do NOT wait on it. If it's not greater than zero,
6653 6653 * ie. it can't be decremented without waiting, then
6654 6654 * someone else, either open or close, already has it
6655 6655 * and the try returns 0. Use that knowledge here to determine
6656 6656 * if it's OK to change the device power level.
6657 6657 * Also, only increment it on exit if it was decremented, ie. gotten,
6658 6658 * here.
6659 6659 */
6660 6660 got_semaphore_here = sema_tryp(&un->un_semoclose);
6661 6661
6662 6662 mutex_enter(SD_MUTEX(un));
6663 6663
6664 6664 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n",
6665 6665 un->un_ncmds_in_driver);
6666 6666
6667 6667 /*
6668 6668 * If un_ncmds_in_driver is non-zero it indicates commands are
6669 6669 * already being processed in the driver, or if the semaphore was
6670 6670 * not gotten here it indicates an open or close is being processed.
6671 6671 * At the same time somebody is requesting to go to a lower power
6672 6672 * that can't perform I/O, which can't happen, therefore we need to
6673 6673 * return failure.
6674 6674 */
6675 6675 if ((!SD_PM_IS_IO_CAPABLE(un, level)) &&
6676 6676 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) {
6677 6677 mutex_exit(SD_MUTEX(un));
6678 6678
6679 6679 if (got_semaphore_here != 0) {
6680 6680 sema_v(&un->un_semoclose);
6681 6681 }
6682 6682 SD_TRACE(SD_LOG_IO_PM, un,
6683 6683 "sdpower: exit, device has queued cmds.\n");
6684 6684
6685 6685 goto sdpower_failed;
6686 6686 }
6687 6687
6688 6688 /*
6689 6689 * if it is OFFLINE that means the disk is completely dead
6690 6690 * in our case we have to put the disk in on or off by sending commands
6691 6691 * Of course that will fail anyway so return back here.
6692 6692 *
6693 6693 * Power changes to a device that's OFFLINE or SUSPENDED
6694 6694 * are not allowed.
6695 6695 */
6696 6696 if ((un->un_state == SD_STATE_OFFLINE) ||
6697 6697 (un->un_state == SD_STATE_SUSPENDED)) {
6698 6698 mutex_exit(SD_MUTEX(un));
6699 6699
6700 6700 if (got_semaphore_here != 0) {
6701 6701 sema_v(&un->un_semoclose);
6702 6702 }
6703 6703 SD_TRACE(SD_LOG_IO_PM, un,
6704 6704 "sdpower: exit, device is off-line.\n");
6705 6705
6706 6706 goto sdpower_failed;
6707 6707 }
6708 6708
6709 6709 /*
6710 6710 * Change the device's state to indicate it's power level
6711 6711 * is being changed. Do this to prevent a power off in the
6712 6712 * middle of commands, which is especially bad on devices
6713 6713 * that are really powered off instead of just spun down.
6714 6714 */
6715 6715 state_before_pm = un->un_state;
6716 6716 un->un_state = SD_STATE_PM_CHANGING;
6717 6717
6718 6718 mutex_exit(SD_MUTEX(un));
6719 6719
6720 6720 /*
6721 6721 * If log sense command is not supported, bypass the
6722 6722 * following checking, otherwise, check the log sense
6723 6723 * information for this device.
6724 6724 */
6725 6725 if (SD_PM_STOP_MOTOR_NEEDED(un, level) &&
6726 6726 un->un_f_log_sense_supported) {
6727 6727 /*
6728 6728 * Get the log sense information to understand whether the
6729 6729 * the powercycle counts have gone beyond the threshhold.
6730 6730 */
6731 6731 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE;
6732 6732 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP);
6733 6733
6734 6734 mutex_enter(SD_MUTEX(un));
6735 6735 log_sense_page = un->un_start_stop_cycle_page;
6736 6736 mutex_exit(SD_MUTEX(un));
6737 6737
6738 6738 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data,
6739 6739 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT);
6740 6740
6741 6741 if (rval != 0) {
6742 6742 if (rval == EIO)
6743 6743 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
6744 6744 else
6745 6745 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6746 6746 }
6747 6747
6748 6748 #ifdef SDDEBUG
6749 6749 if (sd_force_pm_supported) {
6750 6750 /* Force a successful result */
6751 6751 rval = 0;
6752 6752 }
6753 6753 #endif
6754 6754 if (rval != 0) {
6755 6755 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
6756 6756 "Log Sense Failed\n");
6757 6757
6758 6758 kmem_free(log_page_data, log_page_size);
6759 6759 /* Cannot support power management on those drives */
6760 6760
6761 6761 if (got_semaphore_here != 0) {
6762 6762 sema_v(&un->un_semoclose);
6763 6763 }
6764 6764 /*
6765 6765 * On exit put the state back to it's original value
6766 6766 * and broadcast to anyone waiting for the power
6767 6767 * change completion.
6768 6768 */
6769 6769 mutex_enter(SD_MUTEX(un));
6770 6770 un->un_state = state_before_pm;
6771 6771 cv_broadcast(&un->un_suspend_cv);
6772 6772 mutex_exit(SD_MUTEX(un));
6773 6773 SD_TRACE(SD_LOG_IO_PM, un,
6774 6774 "sdpower: exit, Log Sense Failed.\n");
6775 6775
6776 6776 goto sdpower_failed;
6777 6777 }
6778 6778
6779 6779 /*
6780 6780 * From the page data - Convert the essential information to
6781 6781 * pm_trans_data
6782 6782 */
6783 6783 maxcycles =
6784 6784 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) |
6785 6785 (log_page_data[0x1E] << 8) | log_page_data[0x1F];
6786 6786
6787 6787 ncycles =
6788 6788 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) |
6789 6789 (log_page_data[0x26] << 8) | log_page_data[0x27];
6790 6790
6791 6791 if (un->un_f_pm_log_sense_smart) {
6792 6792 sd_pm_tran_data.un.smart_count.allowed = maxcycles;
6793 6793 sd_pm_tran_data.un.smart_count.consumed = ncycles;
6794 6794 sd_pm_tran_data.un.smart_count.flag = 0;
6795 6795 sd_pm_tran_data.format = DC_SMART_FORMAT;
6796 6796 } else {
6797 6797 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles;
6798 6798 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles;
6799 6799 for (i = 0; i < DC_SCSI_MFR_LEN; i++) {
6800 6800 sd_pm_tran_data.un.scsi_cycles.svc_date[i] =
6801 6801 log_page_data[8+i];
6802 6802 }
6803 6803 sd_pm_tran_data.un.scsi_cycles.flag = 0;
6804 6804 sd_pm_tran_data.format = DC_SCSI_FORMAT;
6805 6805 }
6806 6806
6807 6807 kmem_free(log_page_data, log_page_size);
6808 6808
6809 6809 /*
6810 6810 * Call pm_trans_check routine to get the Ok from
6811 6811 * the global policy
6812 6812 */
6813 6813 rval = pm_trans_check(&sd_pm_tran_data, &intvlp);
6814 6814 #ifdef SDDEBUG
6815 6815 if (sd_force_pm_supported) {
6816 6816 /* Force a successful result */
6817 6817 rval = 1;
6818 6818 }
6819 6819 #endif
6820 6820 switch (rval) {
6821 6821 case 0:
6822 6822 /*
6823 6823 * Not Ok to Power cycle or error in parameters passed
6824 6824 * Would have given the advised time to consider power
6825 6825 * cycle. Based on the new intvlp parameter we are
6826 6826 * supposed to pretend we are busy so that pm framework
6827 6827 * will never call our power entry point. Because of
6828 6828 * that install a timeout handler and wait for the
6829 6829 * recommended time to elapse so that power management
6830 6830 * can be effective again.
6831 6831 *
6832 6832 * To effect this behavior, call pm_busy_component to
6833 6833 * indicate to the framework this device is busy.
6834 6834 * By not adjusting un_pm_count the rest of PM in
6835 6835 * the driver will function normally, and independent
6836 6836 * of this but because the framework is told the device
6837 6837 * is busy it won't attempt powering down until it gets
6838 6838 * a matching idle. The timeout handler sends this.
6839 6839 * Note: sd_pm_entry can't be called here to do this
6840 6840 * because sdpower may have been called as a result
6841 6841 * of a call to pm_raise_power from within sd_pm_entry.
6842 6842 *
6843 6843 * If a timeout handler is already active then
6844 6844 * don't install another.
6845 6845 */
6846 6846 mutex_enter(&un->un_pm_mutex);
6847 6847 if (un->un_pm_timeid == NULL) {
6848 6848 un->un_pm_timeid =
6849 6849 timeout(sd_pm_timeout_handler,
6850 6850 un, intvlp * drv_usectohz(1000000));
6851 6851 mutex_exit(&un->un_pm_mutex);
6852 6852 (void) pm_busy_component(SD_DEVINFO(un), 0);
6853 6853 } else {
6854 6854 mutex_exit(&un->un_pm_mutex);
6855 6855 }
6856 6856 if (got_semaphore_here != 0) {
6857 6857 sema_v(&un->un_semoclose);
6858 6858 }
6859 6859 /*
6860 6860 * On exit put the state back to it's original value
6861 6861 * and broadcast to anyone waiting for the power
6862 6862 * change completion.
6863 6863 */
6864 6864 mutex_enter(SD_MUTEX(un));
6865 6865 un->un_state = state_before_pm;
6866 6866 cv_broadcast(&un->un_suspend_cv);
6867 6867 mutex_exit(SD_MUTEX(un));
6868 6868
6869 6869 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, "
6870 6870 "trans check Failed, not ok to power cycle.\n");
6871 6871
6872 6872 goto sdpower_failed;
6873 6873 case -1:
6874 6874 if (got_semaphore_here != 0) {
6875 6875 sema_v(&un->un_semoclose);
6876 6876 }
6877 6877 /*
6878 6878 * On exit put the state back to it's original value
6879 6879 * and broadcast to anyone waiting for the power
6880 6880 * change completion.
6881 6881 */
6882 6882 mutex_enter(SD_MUTEX(un));
6883 6883 un->un_state = state_before_pm;
6884 6884 cv_broadcast(&un->un_suspend_cv);
6885 6885 mutex_exit(SD_MUTEX(un));
6886 6886 SD_TRACE(SD_LOG_IO_PM, un,
6887 6887 "sdpower: exit, trans check command Failed.\n");
6888 6888
6889 6889 goto sdpower_failed;
6890 6890 }
6891 6891 }
6892 6892
6893 6893 if (!SD_PM_IS_IO_CAPABLE(un, level)) {
6894 6894 /*
6895 6895 * Save the last state... if the STOP FAILS we need it
6896 6896 * for restoring
6897 6897 */
6898 6898 mutex_enter(SD_MUTEX(un));
6899 6899 save_state = un->un_last_state;
6900 6900 last_power_level = un->un_power_level;
6901 6901 /*
6902 6902 * There must not be any cmds. getting processed
6903 6903 * in the driver when we get here. Power to the
6904 6904 * device is potentially going off.
6905 6905 */
6906 6906 ASSERT(un->un_ncmds_in_driver == 0);
6907 6907 mutex_exit(SD_MUTEX(un));
6908 6908
6909 6909 /*
6910 6910 * For now PM suspend the device completely before spindle is
6911 6911 * turned off
6912 6912 */
6913 6913 if ((rval = sd_pm_state_change(un, level, SD_PM_STATE_CHANGE))
6914 6914 == DDI_FAILURE) {
6915 6915 if (got_semaphore_here != 0) {
6916 6916 sema_v(&un->un_semoclose);
6917 6917 }
6918 6918 /*
6919 6919 * On exit put the state back to it's original value
6920 6920 * and broadcast to anyone waiting for the power
6921 6921 * change completion.
6922 6922 */
6923 6923 mutex_enter(SD_MUTEX(un));
6924 6924 un->un_state = state_before_pm;
6925 6925 un->un_power_level = last_power_level;
6926 6926 cv_broadcast(&un->un_suspend_cv);
6927 6927 mutex_exit(SD_MUTEX(un));
6928 6928 SD_TRACE(SD_LOG_IO_PM, un,
6929 6929 "sdpower: exit, PM suspend Failed.\n");
6930 6930
6931 6931 goto sdpower_failed;
6932 6932 }
6933 6933 }
6934 6934
6935 6935 /*
6936 6936 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open,
6937 6937 * close, or strategy. Dump no long uses this routine, it uses it's
6938 6938 * own code so it can be done in polled mode.
6939 6939 */
6940 6940
6941 6941 medium_present = TRUE;
6942 6942
6943 6943 /*
6944 6944 * When powering up, issue a TUR in case the device is at unit
6945 6945 * attention. Don't do retries. Bypass the PM layer, otherwise
6946 6946 * a deadlock on un_pm_busy_cv will occur.
6947 6947 */
6948 6948 if (SD_PM_IS_IO_CAPABLE(un, level)) {
6949 6949 sval = sd_send_scsi_TEST_UNIT_READY(ssc,
6950 6950 SD_DONT_RETRY_TUR | SD_BYPASS_PM);
6951 6951 if (sval != 0)
6952 6952 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6953 6953 }
6954 6954
6955 6955 if (un->un_f_power_condition_supported) {
6956 6956 char *pm_condition_name[] = {"STOPPED", "STANDBY",
6957 6957 "IDLE", "ACTIVE"};
6958 6958 SD_TRACE(SD_LOG_IO_PM, un,
6959 6959 "sdpower: sending \'%s\' power condition",
6960 6960 pm_condition_name[level]);
6961 6961 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION,
6962 6962 sd_pl2pc[level], SD_PATH_DIRECT);
6963 6963 } else {
6964 6964 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n",
6965 6965 ((level == SD_SPINDLE_ON) ? "START" : "STOP"));
6966 6966 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
6967 6967 ((level == SD_SPINDLE_ON) ? SD_TARGET_START :
6968 6968 SD_TARGET_STOP), SD_PATH_DIRECT);
6969 6969 }
6970 6970 if (sval != 0) {
6971 6971 if (sval == EIO)
6972 6972 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
6973 6973 else
6974 6974 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6975 6975 }
6976 6976
6977 6977 /* Command failed, check for media present. */
6978 6978 if ((sval == ENXIO) && un->un_f_has_removable_media) {
6979 6979 medium_present = FALSE;
6980 6980 }
6981 6981
6982 6982 /*
6983 6983 * The conditions of interest here are:
6984 6984 * if a spindle off with media present fails,
6985 6985 * then restore the state and return an error.
6986 6986 * else if a spindle on fails,
6987 6987 * then return an error (there's no state to restore).
6988 6988 * In all other cases we setup for the new state
6989 6989 * and return success.
6990 6990 */
6991 6991 if (!SD_PM_IS_IO_CAPABLE(un, level)) {
6992 6992 if ((medium_present == TRUE) && (sval != 0)) {
6993 6993 /* The stop command from above failed */
6994 6994 rval = DDI_FAILURE;
6995 6995 /*
6996 6996 * The stop command failed, and we have media
6997 6997 * present. Put the level back by calling the
6998 6998 * sd_pm_resume() and set the state back to
6999 6999 * it's previous value.
7000 7000 */
7001 7001 (void) sd_pm_state_change(un, last_power_level,
7002 7002 SD_PM_STATE_ROLLBACK);
7003 7003 mutex_enter(SD_MUTEX(un));
7004 7004 un->un_last_state = save_state;
7005 7005 mutex_exit(SD_MUTEX(un));
7006 7006 } else if (un->un_f_monitor_media_state) {
7007 7007 /*
7008 7008 * The stop command from above succeeded.
7009 7009 * Terminate watch thread in case of removable media
7010 7010 * devices going into low power state. This is as per
7011 7011 * the requirements of pm framework, otherwise commands
7012 7012 * will be generated for the device (through watch
7013 7013 * thread), even when the device is in low power state.
7014 7014 */
7015 7015 mutex_enter(SD_MUTEX(un));
7016 7016 un->un_f_watcht_stopped = FALSE;
7017 7017 if (un->un_swr_token != NULL) {
7018 7018 opaque_t temp_token = un->un_swr_token;
7019 7019 un->un_f_watcht_stopped = TRUE;
7020 7020 un->un_swr_token = NULL;
7021 7021 mutex_exit(SD_MUTEX(un));
7022 7022 (void) scsi_watch_request_terminate(temp_token,
7023 7023 SCSI_WATCH_TERMINATE_ALL_WAIT);
7024 7024 } else {
7025 7025 mutex_exit(SD_MUTEX(un));
7026 7026 }
7027 7027 }
7028 7028 } else {
7029 7029 /*
7030 7030 * The level requested is I/O capable.
7031 7031 * Legacy behavior: return success on a failed spinup
7032 7032 * if there is no media in the drive.
7033 7033 * Do this by looking at medium_present here.
7034 7034 */
7035 7035 if ((sval != 0) && medium_present) {
7036 7036 /* The start command from above failed */
7037 7037 rval = DDI_FAILURE;
7038 7038 } else {
7039 7039 /*
7040 7040 * The start command from above succeeded
7041 7041 * PM resume the devices now that we have
7042 7042 * started the disks
7043 7043 */
7044 7044 (void) sd_pm_state_change(un, level,
7045 7045 SD_PM_STATE_CHANGE);
7046 7046
7047 7047 /*
7048 7048 * Resume the watch thread since it was suspended
7049 7049 * when the device went into low power mode.
7050 7050 */
7051 7051 if (un->un_f_monitor_media_state) {
7052 7052 mutex_enter(SD_MUTEX(un));
7053 7053 if (un->un_f_watcht_stopped == TRUE) {
7054 7054 opaque_t temp_token;
7055 7055
7056 7056 un->un_f_watcht_stopped = FALSE;
7057 7057 mutex_exit(SD_MUTEX(un));
7058 7058 temp_token =
7059 7059 sd_watch_request_submit(un);
7060 7060 mutex_enter(SD_MUTEX(un));
7061 7061 un->un_swr_token = temp_token;
7062 7062 }
7063 7063 mutex_exit(SD_MUTEX(un));
7064 7064 }
7065 7065 }
7066 7066 }
7067 7067
7068 7068 if (got_semaphore_here != 0) {
7069 7069 sema_v(&un->un_semoclose);
7070 7070 }
7071 7071 /*
7072 7072 * On exit put the state back to it's original value
7073 7073 * and broadcast to anyone waiting for the power
7074 7074 * change completion.
7075 7075 */
7076 7076 mutex_enter(SD_MUTEX(un));
7077 7077 un->un_state = state_before_pm;
7078 7078 cv_broadcast(&un->un_suspend_cv);
7079 7079 mutex_exit(SD_MUTEX(un));
7080 7080
7081 7081 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval);
7082 7082
7083 7083 sd_ssc_fini(ssc);
7084 7084 return (rval);
7085 7085
7086 7086 sdpower_failed:
7087 7087
7088 7088 sd_ssc_fini(ssc);
7089 7089 return (DDI_FAILURE);
7090 7090 }
7091 7091
7092 7092
7093 7093
7094 7094 /*
7095 7095 * Function: sdattach
7096 7096 *
7097 7097 * Description: Driver's attach(9e) entry point function.
7098 7098 *
7099 7099 * Arguments: devi - opaque device info handle
7100 7100 * cmd - attach type
7101 7101 *
7102 7102 * Return Code: DDI_SUCCESS
7103 7103 * DDI_FAILURE
7104 7104 *
7105 7105 * Context: Kernel thread context
7106 7106 */
7107 7107
7108 7108 static int
7109 7109 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
7110 7110 {
7111 7111 switch (cmd) {
7112 7112 case DDI_ATTACH:
7113 7113 return (sd_unit_attach(devi));
7114 7114 case DDI_RESUME:
7115 7115 return (sd_ddi_resume(devi));
7116 7116 default:
7117 7117 break;
7118 7118 }
7119 7119 return (DDI_FAILURE);
7120 7120 }
7121 7121
7122 7122
7123 7123 /*
7124 7124 * Function: sddetach
7125 7125 *
7126 7126 * Description: Driver's detach(9E) entry point function.
7127 7127 *
7128 7128 * Arguments: devi - opaque device info handle
7129 7129 * cmd - detach type
7130 7130 *
7131 7131 * Return Code: DDI_SUCCESS
7132 7132 * DDI_FAILURE
7133 7133 *
7134 7134 * Context: Kernel thread context
7135 7135 */
7136 7136
7137 7137 static int
7138 7138 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
7139 7139 {
7140 7140 switch (cmd) {
7141 7141 case DDI_DETACH:
7142 7142 return (sd_unit_detach(devi));
7143 7143 case DDI_SUSPEND:
7144 7144 return (sd_ddi_suspend(devi));
7145 7145 default:
7146 7146 break;
7147 7147 }
7148 7148 return (DDI_FAILURE);
7149 7149 }
7150 7150
7151 7151
7152 7152 /*
7153 7153 * Function: sd_sync_with_callback
7154 7154 *
7155 7155 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft
7156 7156 * state while the callback routine is active.
7157 7157 *
7158 7158 * Arguments: un: softstate structure for the instance
7159 7159 *
7160 7160 * Context: Kernel thread context
7161 7161 */
7162 7162
7163 7163 static void
7164 7164 sd_sync_with_callback(struct sd_lun *un)
7165 7165 {
7166 7166 ASSERT(un != NULL);
7167 7167
7168 7168 mutex_enter(SD_MUTEX(un));
7169 7169
7170 7170 ASSERT(un->un_in_callback >= 0);
7171 7171
7172 7172 while (un->un_in_callback > 0) {
7173 7173 mutex_exit(SD_MUTEX(un));
7174 7174 delay(2);
7175 7175 mutex_enter(SD_MUTEX(un));
7176 7176 }
7177 7177
7178 7178 mutex_exit(SD_MUTEX(un));
7179 7179 }
7180 7180
7181 7181 /*
7182 7182 * Function: sd_unit_attach
7183 7183 *
7184 7184 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates
7185 7185 * the soft state structure for the device and performs
7186 7186 * all necessary structure and device initializations.
7187 7187 *
7188 7188 * Arguments: devi: the system's dev_info_t for the device.
7189 7189 *
7190 7190 * Return Code: DDI_SUCCESS if attach is successful.
7191 7191 * DDI_FAILURE if any part of the attach fails.
7192 7192 *
7193 7193 * Context: Called at attach(9e) time for the DDI_ATTACH flag.
7194 7194 * Kernel thread context only. Can sleep.
7195 7195 */
7196 7196
7197 7197 static int
7198 7198 sd_unit_attach(dev_info_t *devi)
7199 7199 {
7200 7200 struct scsi_device *devp;
7201 7201 struct sd_lun *un;
7202 7202 char *variantp;
7203 7203 char name_str[48];
7204 7204 int reservation_flag = SD_TARGET_IS_UNRESERVED;
7205 7205 int instance;
7206 7206 int rval;
7207 7207 int wc_enabled;
7208 7208 int tgt;
7209 7209 uint64_t capacity;
7210 7210 uint_t lbasize = 0;
7211 7211 dev_info_t *pdip = ddi_get_parent(devi);
7212 7212 int offbyone = 0;
7213 7213 int geom_label_valid = 0;
7214 7214 sd_ssc_t *ssc;
7215 7215 int status;
7216 7216 struct sd_fm_internal *sfip = NULL;
7217 7217 int max_xfer_size;
7218 7218
7219 7219 /*
7220 7220 * Retrieve the target driver's private data area. This was set
7221 7221 * up by the HBA.
7222 7222 */
7223 7223 devp = ddi_get_driver_private(devi);
7224 7224
7225 7225 /*
7226 7226 * Retrieve the target ID of the device.
7227 7227 */
7228 7228 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
7229 7229 SCSI_ADDR_PROP_TARGET, -1);
7230 7230
7231 7231 /*
7232 7232 * Since we have no idea what state things were left in by the last
7233 7233 * user of the device, set up some 'default' settings, ie. turn 'em
7234 7234 * off. The scsi_ifsetcap calls force re-negotiations with the drive.
7235 7235 * Do this before the scsi_probe, which sends an inquiry.
7236 7236 * This is a fix for bug (4430280).
7237 7237 * Of special importance is wide-xfer. The drive could have been left
7238 7238 * in wide transfer mode by the last driver to communicate with it,
7239 7239 * this includes us. If that's the case, and if the following is not
7240 7240 * setup properly or we don't re-negotiate with the drive prior to
7241 7241 * transferring data to/from the drive, it causes bus parity errors,
7242 7242 * data overruns, and unexpected interrupts. This first occurred when
7243 7243 * the fix for bug (4378686) was made.
7244 7244 */
7245 7245 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1);
7246 7246 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1);
7247 7247 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1);
7248 7248
7249 7249 /*
7250 7250 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs
7251 7251 * on a target. Setting it per lun instance actually sets the
7252 7252 * capability of this target, which affects those luns already
7253 7253 * attached on the same target. So during attach, we can only disable
7254 7254 * this capability only when no other lun has been attached on this
7255 7255 * target. By doing this, we assume a target has the same tagged-qing
7256 7256 * capability for every lun. The condition can be removed when HBA
7257 7257 * is changed to support per lun based tagged-qing capability.
7258 7258 */
7259 7259 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) {
7260 7260 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1);
7261 7261 }
7262 7262
7263 7263 /*
7264 7264 * Use scsi_probe() to issue an INQUIRY command to the device.
7265 7265 * This call will allocate and fill in the scsi_inquiry structure
7266 7266 * and point the sd_inq member of the scsi_device structure to it.
7267 7267 * If the attach succeeds, then this memory will not be de-allocated
7268 7268 * (via scsi_unprobe()) until the instance is detached.
7269 7269 */
7270 7270 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) {
7271 7271 goto probe_failed;
7272 7272 }
7273 7273
7274 7274 /*
7275 7275 * Check the device type as specified in the inquiry data and
7276 7276 * claim it if it is of a type that we support.
7277 7277 */
7278 7278 switch (devp->sd_inq->inq_dtype) {
7279 7279 case DTYPE_DIRECT:
7280 7280 break;
7281 7281 case DTYPE_RODIRECT:
7282 7282 break;
7283 7283 case DTYPE_OPTICAL:
7284 7284 break;
7285 7285 case DTYPE_NOTPRESENT:
7286 7286 default:
7287 7287 /* Unsupported device type; fail the attach. */
7288 7288 goto probe_failed;
7289 7289 }
7290 7290
7291 7291 /*
7292 7292 * Allocate the soft state structure for this unit.
7293 7293 *
7294 7294 * We rely upon this memory being set to all zeroes by
7295 7295 * ddi_soft_state_zalloc(). We assume that any member of the
7296 7296 * soft state structure that is not explicitly initialized by
7297 7297 * this routine will have a value of zero.
7298 7298 */
7299 7299 instance = ddi_get_instance(devp->sd_dev);
7300 7300 #ifndef XPV_HVM_DRIVER
7301 7301 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) {
7302 7302 goto probe_failed;
7303 7303 }
7304 7304 #endif /* !XPV_HVM_DRIVER */
7305 7305
7306 7306 /*
7307 7307 * Retrieve a pointer to the newly-allocated soft state.
7308 7308 *
7309 7309 * This should NEVER fail if the ddi_soft_state_zalloc() call above
7310 7310 * was successful, unless something has gone horribly wrong and the
7311 7311 * ddi's soft state internals are corrupt (in which case it is
7312 7312 * probably better to halt here than just fail the attach....)
7313 7313 */
7314 7314 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) {
7315 7315 panic("sd_unit_attach: NULL soft state on instance:0x%x",
7316 7316 instance);
7317 7317 /*NOTREACHED*/
7318 7318 }
7319 7319
7320 7320 /*
7321 7321 * Link the back ptr of the driver soft state to the scsi_device
7322 7322 * struct for this lun.
7323 7323 * Save a pointer to the softstate in the driver-private area of
7324 7324 * the scsi_device struct.
7325 7325 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until
7326 7326 * we first set un->un_sd below.
7327 7327 */
7328 7328 un->un_sd = devp;
7329 7329 devp->sd_private = (opaque_t)un;
7330 7330
7331 7331 /*
7332 7332 * The following must be after devp is stored in the soft state struct.
7333 7333 */
7334 7334 #ifdef SDDEBUG
7335 7335 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7336 7336 "%s_unit_attach: un:0x%p instance:%d\n",
7337 7337 ddi_driver_name(devi), un, instance);
7338 7338 #endif
7339 7339
7340 7340 /*
7341 7341 * Set up the device type and node type (for the minor nodes).
7342 7342 * By default we assume that the device can at least support the
7343 7343 * Common Command Set. Call it a CD-ROM if it reports itself
7344 7344 * as a RODIRECT device.
7345 7345 */
7346 7346 switch (devp->sd_inq->inq_dtype) {
7347 7347 case DTYPE_RODIRECT:
7348 7348 un->un_node_type = DDI_NT_CD_CHAN;
7349 7349 un->un_ctype = CTYPE_CDROM;
7350 7350 break;
7351 7351 case DTYPE_OPTICAL:
7352 7352 un->un_node_type = DDI_NT_BLOCK_CHAN;
7353 7353 un->un_ctype = CTYPE_ROD;
7354 7354 break;
7355 7355 default:
7356 7356 un->un_node_type = DDI_NT_BLOCK_CHAN;
7357 7357 un->un_ctype = CTYPE_CCS;
7358 7358 break;
7359 7359 }
7360 7360
7361 7361 /*
7362 7362 * Try to read the interconnect type from the HBA.
7363 7363 *
7364 7364 * Note: This driver is currently compiled as two binaries, a parallel
7365 7365 * scsi version (sd) and a fibre channel version (ssd). All functional
7366 7366 * differences are determined at compile time. In the future a single
7367 7367 * binary will be provided and the interconnect type will be used to
7368 7368 * differentiate between fibre and parallel scsi behaviors. At that time
7369 7369 * it will be necessary for all fibre channel HBAs to support this
7370 7370 * property.
7371 7371 *
7372 7372 * set un_f_is_fiber to TRUE ( default fiber )
7373 7373 */
7374 7374 un->un_f_is_fibre = TRUE;
7375 7375 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) {
7376 7376 case INTERCONNECT_SSA:
7377 7377 un->un_interconnect_type = SD_INTERCONNECT_SSA;
7378 7378 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7379 7379 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un);
7380 7380 break;
7381 7381 case INTERCONNECT_PARALLEL:
7382 7382 un->un_f_is_fibre = FALSE;
7383 7383 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL;
7384 7384 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7385 7385 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un);
7386 7386 break;
7387 7387 case INTERCONNECT_SAS:
7388 7388 un->un_f_is_fibre = FALSE;
7389 7389 un->un_interconnect_type = SD_INTERCONNECT_SAS;
7390 7390 un->un_node_type = DDI_NT_BLOCK_SAS;
7391 7391 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7392 7392 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un);
7393 7393 break;
7394 7394 case INTERCONNECT_SATA:
7395 7395 un->un_f_is_fibre = FALSE;
7396 7396 un->un_interconnect_type = SD_INTERCONNECT_SATA;
7397 7397 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7398 7398 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un);
7399 7399 break;
7400 7400 case INTERCONNECT_FIBRE:
7401 7401 un->un_interconnect_type = SD_INTERCONNECT_FIBRE;
7402 7402 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7403 7403 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un);
7404 7404 break;
7405 7405 case INTERCONNECT_FABRIC:
7406 7406 un->un_interconnect_type = SD_INTERCONNECT_FABRIC;
7407 7407 un->un_node_type = DDI_NT_BLOCK_FABRIC;
7408 7408 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7409 7409 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un);
7410 7410 break;
7411 7411 default:
7412 7412 #ifdef SD_DEFAULT_INTERCONNECT_TYPE
7413 7413 /*
7414 7414 * The HBA does not support the "interconnect-type" property
7415 7415 * (or did not provide a recognized type).
7416 7416 *
7417 7417 * Note: This will be obsoleted when a single fibre channel
7418 7418 * and parallel scsi driver is delivered. In the meantime the
7419 7419 * interconnect type will be set to the platform default.If that
7420 7420 * type is not parallel SCSI, it means that we should be
7421 7421 * assuming "ssd" semantics. However, here this also means that
7422 7422 * the FC HBA is not supporting the "interconnect-type" property
7423 7423 * like we expect it to, so log this occurrence.
7424 7424 */
7425 7425 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE;
7426 7426 if (!SD_IS_PARALLEL_SCSI(un)) {
7427 7427 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7428 7428 "sd_unit_attach: un:0x%p Assuming "
7429 7429 "INTERCONNECT_FIBRE\n", un);
7430 7430 } else {
7431 7431 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7432 7432 "sd_unit_attach: un:0x%p Assuming "
7433 7433 "INTERCONNECT_PARALLEL\n", un);
7434 7434 un->un_f_is_fibre = FALSE;
7435 7435 }
7436 7436 #else
7437 7437 /*
7438 7438 * Note: This source will be implemented when a single fibre
7439 7439 * channel and parallel scsi driver is delivered. The default
7440 7440 * will be to assume that if a device does not support the
7441 7441 * "interconnect-type" property it is a parallel SCSI HBA and
7442 7442 * we will set the interconnect type for parallel scsi.
7443 7443 */
7444 7444 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL;
7445 7445 un->un_f_is_fibre = FALSE;
7446 7446 #endif
7447 7447 break;
7448 7448 }
7449 7449
7450 7450 if (un->un_f_is_fibre == TRUE) {
7451 7451 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) ==
7452 7452 SCSI_VERSION_3) {
7453 7453 switch (un->un_interconnect_type) {
7454 7454 case SD_INTERCONNECT_FIBRE:
7455 7455 case SD_INTERCONNECT_SSA:
7456 7456 un->un_node_type = DDI_NT_BLOCK_WWN;
7457 7457 break;
7458 7458 default:
7459 7459 break;
7460 7460 }
7461 7461 }
7462 7462 }
7463 7463
7464 7464 /*
7465 7465 * Initialize the Request Sense command for the target
7466 7466 */
7467 7467 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) {
7468 7468 goto alloc_rqs_failed;
7469 7469 }
7470 7470
7471 7471 /*
7472 7472 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc
7473 7473 * with separate binary for sd and ssd.
7474 7474 *
7475 7475 * x86 has 1 binary, un_retry_count is set base on connection type.
7476 7476 * The hardcoded values will go away when Sparc uses 1 binary
7477 7477 * for sd and ssd. This hardcoded values need to match
7478 7478 * SD_RETRY_COUNT in sddef.h
7479 7479 * The value used is base on interconnect type.
7480 7480 * fibre = 3, parallel = 5
7481 7481 */
7482 7482 #if defined(__i386) || defined(__amd64)
7483 7483 un->un_retry_count = un->un_f_is_fibre ? 3 : 5;
7484 7484 #else
7485 7485 un->un_retry_count = SD_RETRY_COUNT;
7486 7486 #endif
7487 7487
7488 7488 /*
7489 7489 * Set the per disk retry count to the default number of retries
7490 7490 * for disks and CDROMs. This value can be overridden by the
7491 7491 * disk property list or an entry in sd.conf.
7492 7492 */
7493 7493 un->un_notready_retry_count =
7494 7494 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un)
7495 7495 : DISK_NOT_READY_RETRY_COUNT(un);
7496 7496
7497 7497 /*
7498 7498 * Set the busy retry count to the default value of un_retry_count.
7499 7499 * This can be overridden by entries in sd.conf or the device
7500 7500 * config table.
7501 7501 */
7502 7502 un->un_busy_retry_count = un->un_retry_count;
7503 7503
7504 7504 /*
7505 7505 * Init the reset threshold for retries. This number determines
7506 7506 * how many retries must be performed before a reset can be issued
7507 7507 * (for certain error conditions). This can be overridden by entries
7508 7508 * in sd.conf or the device config table.
7509 7509 */
7510 7510 un->un_reset_retry_count = (un->un_retry_count / 2);
7511 7511
7512 7512 /*
7513 7513 * Set the victim_retry_count to the default un_retry_count
7514 7514 */
7515 7515 un->un_victim_retry_count = (2 * un->un_retry_count);
7516 7516
7517 7517 /*
7518 7518 * Set the reservation release timeout to the default value of
7519 7519 * 5 seconds. This can be overridden by entries in ssd.conf or the
7520 7520 * device config table.
7521 7521 */
7522 7522 un->un_reserve_release_time = 5;
7523 7523
7524 7524 /*
7525 7525 * Set up the default maximum transfer size. Note that this may
7526 7526 * get updated later in the attach, when setting up default wide
7527 7527 * operations for disks.
7528 7528 */
7529 7529 #if defined(__i386) || defined(__amd64)
7530 7530 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE;
7531 7531 un->un_partial_dma_supported = 1;
7532 7532 #else
7533 7533 un->un_max_xfer_size = (uint_t)maxphys;
7534 7534 #endif
7535 7535
7536 7536 /*
7537 7537 * Get "allow bus device reset" property (defaults to "enabled" if
7538 7538 * the property was not defined). This is to disable bus resets for
7539 7539 * certain kinds of error recovery. Note: In the future when a run-time
7540 7540 * fibre check is available the soft state flag should default to
7541 7541 * enabled.
7542 7542 */
7543 7543 if (un->un_f_is_fibre == TRUE) {
7544 7544 un->un_f_allow_bus_device_reset = TRUE;
7545 7545 } else {
7546 7546 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
7547 7547 "allow-bus-device-reset", 1) != 0) {
7548 7548 un->un_f_allow_bus_device_reset = TRUE;
7549 7549 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7550 7550 "sd_unit_attach: un:0x%p Bus device reset "
7551 7551 "enabled\n", un);
7552 7552 } else {
7553 7553 un->un_f_allow_bus_device_reset = FALSE;
7554 7554 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7555 7555 "sd_unit_attach: un:0x%p Bus device reset "
7556 7556 "disabled\n", un);
7557 7557 }
7558 7558 }
7559 7559
7560 7560 /*
7561 7561 * Check if this is an ATAPI device. ATAPI devices use Group 1
7562 7562 * Read/Write commands and Group 2 Mode Sense/Select commands.
7563 7563 *
7564 7564 * Note: The "obsolete" way of doing this is to check for the "atapi"
7565 7565 * property. The new "variant" property with a value of "atapi" has been
7566 7566 * introduced so that future 'variants' of standard SCSI behavior (like
7567 7567 * atapi) could be specified by the underlying HBA drivers by supplying
7568 7568 * a new value for the "variant" property, instead of having to define a
7569 7569 * new property.
7570 7570 */
7571 7571 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) {
7572 7572 un->un_f_cfg_is_atapi = TRUE;
7573 7573 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7574 7574 "sd_unit_attach: un:0x%p Atapi device\n", un);
7575 7575 }
7576 7576 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant",
7577 7577 &variantp) == DDI_PROP_SUCCESS) {
7578 7578 if (strcmp(variantp, "atapi") == 0) {
7579 7579 un->un_f_cfg_is_atapi = TRUE;
7580 7580 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7581 7581 "sd_unit_attach: un:0x%p Atapi device\n", un);
7582 7582 }
7583 7583 ddi_prop_free(variantp);
7584 7584 }
7585 7585
7586 7586 un->un_cmd_timeout = SD_IO_TIME;
7587 7587
7588 7588 un->un_busy_timeout = SD_BSY_TIMEOUT;
7589 7589
7590 7590 /* Info on current states, statuses, etc. (Updated frequently) */
7591 7591 un->un_state = SD_STATE_NORMAL;
7592 7592 un->un_last_state = SD_STATE_NORMAL;
7593 7593
7594 7594 /* Control & status info for command throttling */
7595 7595 un->un_throttle = sd_max_throttle;
7596 7596 un->un_saved_throttle = sd_max_throttle;
7597 7597 un->un_min_throttle = sd_min_throttle;
7598 7598
7599 7599 if (un->un_f_is_fibre == TRUE) {
7600 7600 un->un_f_use_adaptive_throttle = TRUE;
7601 7601 } else {
7602 7602 un->un_f_use_adaptive_throttle = FALSE;
7603 7603 }
7604 7604
7605 7605 /* Removable media support. */
7606 7606 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL);
7607 7607 un->un_mediastate = DKIO_NONE;
7608 7608 un->un_specified_mediastate = DKIO_NONE;
7609 7609
7610 7610 /* CVs for suspend/resume (PM or DR) */
7611 7611 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL);
7612 7612 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL);
7613 7613
7614 7614 /* Power management support. */
7615 7615 un->un_power_level = SD_SPINDLE_UNINIT;
7616 7616
7617 7617 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL);
7618 7618 un->un_f_wcc_inprog = 0;
7619 7619
7620 7620 /*
7621 7621 * The open/close semaphore is used to serialize threads executing
7622 7622 * in the driver's open & close entry point routines for a given
7623 7623 * instance.
7624 7624 */
7625 7625 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL);
7626 7626
7627 7627 /*
7628 7628 * The conf file entry and softstate variable is a forceful override,
7629 7629 * meaning a non-zero value must be entered to change the default.
7630 7630 */
7631 7631 un->un_f_disksort_disabled = FALSE;
7632 7632 un->un_f_rmw_type = SD_RMW_TYPE_DEFAULT;
7633 7633 un->un_f_enable_rmw = FALSE;
7634 7634
7635 7635 /*
7636 7636 * GET EVENT STATUS NOTIFICATION media polling enabled by default, but
7637 7637 * can be overridden via [s]sd-config-list "mmc-gesn-polling" property.
7638 7638 */
7639 7639 un->un_f_mmc_gesn_polling = TRUE;
7640 7640
7641 7641 /*
7642 7642 * physical sector size defaults to DEV_BSIZE currently. We can
7643 7643 * override this value via the driver configuration file so we must
7644 7644 * set it before calling sd_read_unit_properties().
7645 7645 */
7646 7646 un->un_phy_blocksize = DEV_BSIZE;
7647 7647
7648 7648 /*
7649 7649 * Retrieve the properties from the static driver table or the driver
7650 7650 * configuration file (.conf) for this unit and update the soft state
7651 7651 * for the device as needed for the indicated properties.
7652 7652 * Note: the property configuration needs to occur here as some of the
7653 7653 * following routines may have dependencies on soft state flags set
7654 7654 * as part of the driver property configuration.
7655 7655 */
7656 7656 sd_read_unit_properties(un);
7657 7657 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7658 7658 "sd_unit_attach: un:0x%p property configuration complete.\n", un);
7659 7659
7660 7660 /*
7661 7661 * Only if a device has "hotpluggable" property, it is
7662 7662 * treated as hotpluggable device. Otherwise, it is
7663 7663 * regarded as non-hotpluggable one.
7664 7664 */
7665 7665 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable",
7666 7666 -1) != -1) {
7667 7667 un->un_f_is_hotpluggable = TRUE;
7668 7668 }
7669 7669
7670 7670 /*
7671 7671 * set unit's attributes(flags) according to "hotpluggable" and
7672 7672 * RMB bit in INQUIRY data.
7673 7673 */
7674 7674 sd_set_unit_attributes(un, devi);
7675 7675
7676 7676 /*
7677 7677 * By default, we mark the capacity, lbasize, and geometry
7678 7678 * as invalid. Only if we successfully read a valid capacity
7679 7679 * will we update the un_blockcount and un_tgt_blocksize with the
7680 7680 * valid values (the geometry will be validated later).
7681 7681 */
7682 7682 un->un_f_blockcount_is_valid = FALSE;
7683 7683 un->un_f_tgt_blocksize_is_valid = FALSE;
7684 7684
7685 7685 /*
7686 7686 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine
7687 7687 * otherwise.
7688 7688 */
7689 7689 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE;
7690 7690 un->un_blockcount = 0;
7691 7691
7692 7692 /*
7693 7693 * Set up the per-instance info needed to determine the correct
7694 7694 * CDBs and other info for issuing commands to the target.
7695 7695 */
7696 7696 sd_init_cdb_limits(un);
7697 7697
7698 7698 /*
7699 7699 * Set up the IO chains to use, based upon the target type.
7700 7700 */
7701 7701 if (un->un_f_non_devbsize_supported) {
7702 7702 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA;
7703 7703 } else {
7704 7704 un->un_buf_chain_type = SD_CHAIN_INFO_DISK;
7705 7705 }
7706 7706 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD;
7707 7707 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD;
7708 7708 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD;
7709 7709
7710 7710 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf),
7711 7711 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit,
7712 7712 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER);
7713 7713 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi);
7714 7714
7715 7715
7716 7716 if (ISCD(un)) {
7717 7717 un->un_additional_codes = sd_additional_codes;
7718 7718 } else {
7719 7719 un->un_additional_codes = NULL;
7720 7720 }
7721 7721
7722 7722 /*
7723 7723 * Create the kstats here so they can be available for attach-time
7724 7724 * routines that send commands to the unit (either polled or via
7725 7725 * sd_send_scsi_cmd).
7726 7726 *
7727 7727 * Note: This is a critical sequence that needs to be maintained:
7728 7728 * 1) Instantiate the kstats here, before any routines using the
7729 7729 * iopath (i.e. sd_send_scsi_cmd).
7730 7730 * 2) Instantiate and initialize the partition stats
7731 7731 * (sd_set_pstats).
7732 7732 * 3) Initialize the error stats (sd_set_errstats), following
7733 7733 * sd_validate_geometry(),sd_register_devid(),
7734 7734 * and sd_cache_control().
7735 7735 */
7736 7736
7737 7737 un->un_stats = kstat_create(sd_label, instance,
7738 7738 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
7739 7739 if (un->un_stats != NULL) {
7740 7740 un->un_stats->ks_lock = SD_MUTEX(un);
7741 7741 kstat_install(un->un_stats);
7742 7742 }
7743 7743 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7744 7744 "sd_unit_attach: un:0x%p un_stats created\n", un);
7745 7745
7746 7746 sd_create_errstats(un, instance);
7747 7747 if (un->un_errstats == NULL) {
7748 7748 goto create_errstats_failed;
7749 7749 }
7750 7750 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7751 7751 "sd_unit_attach: un:0x%p errstats created\n", un);
7752 7752
7753 7753 /*
7754 7754 * The following if/else code was relocated here from below as part
7755 7755 * of the fix for bug (4430280). However with the default setup added
7756 7756 * on entry to this routine, it's no longer absolutely necessary for
7757 7757 * this to be before the call to sd_spin_up_unit.
7758 7758 */
7759 7759 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) {
7760 7760 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) ||
7761 7761 (devp->sd_inq->inq_ansi == 5)) &&
7762 7762 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque;
7763 7763
7764 7764 /*
7765 7765 * If tagged queueing is supported by the target
7766 7766 * and by the host adapter then we will enable it
7767 7767 */
7768 7768 un->un_tagflags = 0;
7769 7769 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag &&
7770 7770 (un->un_f_arq_enabled == TRUE)) {
7771 7771 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing",
7772 7772 1, 1) == 1) {
7773 7773 un->un_tagflags = FLAG_STAG;
7774 7774 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7775 7775 "sd_unit_attach: un:0x%p tag queueing "
7776 7776 "enabled\n", un);
7777 7777 } else if (scsi_ifgetcap(SD_ADDRESS(un),
7778 7778 "untagged-qing", 0) == 1) {
7779 7779 un->un_f_opt_queueing = TRUE;
7780 7780 un->un_saved_throttle = un->un_throttle =
7781 7781 min(un->un_throttle, 3);
7782 7782 } else {
7783 7783 un->un_f_opt_queueing = FALSE;
7784 7784 un->un_saved_throttle = un->un_throttle = 1;
7785 7785 }
7786 7786 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0)
7787 7787 == 1) && (un->un_f_arq_enabled == TRUE)) {
7788 7788 /* The Host Adapter supports internal queueing. */
7789 7789 un->un_f_opt_queueing = TRUE;
7790 7790 un->un_saved_throttle = un->un_throttle =
7791 7791 min(un->un_throttle, 3);
7792 7792 } else {
7793 7793 un->un_f_opt_queueing = FALSE;
7794 7794 un->un_saved_throttle = un->un_throttle = 1;
7795 7795 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7796 7796 "sd_unit_attach: un:0x%p no tag queueing\n", un);
7797 7797 }
7798 7798
7799 7799 /*
7800 7800 * Enable large transfers for SATA/SAS drives
7801 7801 */
7802 7802 if (SD_IS_SERIAL(un)) {
7803 7803 un->un_max_xfer_size =
7804 7804 ddi_getprop(DDI_DEV_T_ANY, devi, 0,
7805 7805 sd_max_xfer_size, SD_MAX_XFER_SIZE);
7806 7806 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7807 7807 "sd_unit_attach: un:0x%p max transfer "
7808 7808 "size=0x%x\n", un, un->un_max_xfer_size);
7809 7809
7810 7810 }
7811 7811
7812 7812 /* Setup or tear down default wide operations for disks */
7813 7813
7814 7814 /*
7815 7815 * Note: Legacy: it may be possible for both "sd_max_xfer_size"
7816 7816 * and "ssd_max_xfer_size" to exist simultaneously on the same
7817 7817 * system and be set to different values. In the future this
7818 7818 * code may need to be updated when the ssd module is
7819 7819 * obsoleted and removed from the system. (4299588)
7820 7820 */
7821 7821 if (SD_IS_PARALLEL_SCSI(un) &&
7822 7822 (devp->sd_inq->inq_rdf == RDF_SCSI2) &&
7823 7823 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) {
7824 7824 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer",
7825 7825 1, 1) == 1) {
7826 7826 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7827 7827 "sd_unit_attach: un:0x%p Wide Transfer "
7828 7828 "enabled\n", un);
7829 7829 }
7830 7830
7831 7831 /*
7832 7832 * If tagged queuing has also been enabled, then
7833 7833 * enable large xfers
7834 7834 */
7835 7835 if (un->un_saved_throttle == sd_max_throttle) {
7836 7836 un->un_max_xfer_size =
7837 7837 ddi_getprop(DDI_DEV_T_ANY, devi, 0,
7838 7838 sd_max_xfer_size, SD_MAX_XFER_SIZE);
7839 7839 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7840 7840 "sd_unit_attach: un:0x%p max transfer "
7841 7841 "size=0x%x\n", un, un->un_max_xfer_size);
7842 7842 }
7843 7843 } else {
7844 7844 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer",
7845 7845 0, 1) == 1) {
7846 7846 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7847 7847 "sd_unit_attach: un:0x%p "
7848 7848 "Wide Transfer disabled\n", un);
7849 7849 }
7850 7850 }
7851 7851 } else {
7852 7852 un->un_tagflags = FLAG_STAG;
7853 7853 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY,
7854 7854 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE);
7855 7855 }
7856 7856
7857 7857 /*
7858 7858 * If this target supports LUN reset, try to enable it.
7859 7859 */
7860 7860 if (un->un_f_lun_reset_enabled) {
7861 7861 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) {
7862 7862 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
7863 7863 "un:0x%p lun_reset capability set\n", un);
7864 7864 } else {
7865 7865 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
7866 7866 "un:0x%p lun-reset capability not set\n", un);
7867 7867 }
7868 7868 }
7869 7869
7870 7870 /*
7871 7871 * Adjust the maximum transfer size. This is to fix
7872 7872 * the problem of partial DMA support on SPARC. Some
7873 7873 * HBA driver, like aac, has very small dma_attr_maxxfer
7874 7874 * size, which requires partial DMA support on SPARC.
7875 7875 * In the future the SPARC pci nexus driver may solve
7876 7876 * the problem instead of this fix.
7877 7877 */
7878 7878 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1);
7879 7879 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) {
7880 7880 /* We need DMA partial even on sparc to ensure sddump() works */
7881 7881 un->un_max_xfer_size = max_xfer_size;
7882 7882 if (un->un_partial_dma_supported == 0)
7883 7883 un->un_partial_dma_supported = 1;
7884 7884 }
7885 7885 if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
7886 7886 DDI_PROP_DONTPASS, "buf_break", 0) == 1) {
7887 7887 if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr,
7888 7888 un->un_max_xfer_size) == 1) {
7889 7889 un->un_buf_breakup_supported = 1;
7890 7890 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
7891 7891 "un:0x%p Buf breakup enabled\n", un);
7892 7892 }
7893 7893 }
7894 7894
7895 7895 /*
7896 7896 * Set PKT_DMA_PARTIAL flag.
7897 7897 */
7898 7898 if (un->un_partial_dma_supported == 1) {
7899 7899 un->un_pkt_flags = PKT_DMA_PARTIAL;
7900 7900 } else {
7901 7901 un->un_pkt_flags = 0;
7902 7902 }
7903 7903
7904 7904 /* Initialize sd_ssc_t for internal uscsi commands */
7905 7905 ssc = sd_ssc_init(un);
7906 7906 scsi_fm_init(devp);
7907 7907
7908 7908 /*
7909 7909 * Allocate memory for SCSI FMA stuffs.
7910 7910 */
7911 7911 un->un_fm_private =
7912 7912 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP);
7913 7913 sfip = (struct sd_fm_internal *)un->un_fm_private;
7914 7914 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd;
7915 7915 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo;
7916 7916 sfip->fm_ssc.ssc_un = un;
7917 7917
7918 7918 if (ISCD(un) ||
7919 7919 un->un_f_has_removable_media ||
7920 7920 devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) {
7921 7921 /*
7922 7922 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device.
7923 7923 * Their log are unchanged.
7924 7924 */
7925 7925 sfip->fm_log_level = SD_FM_LOG_NSUP;
7926 7926 } else {
7927 7927 /*
7928 7928 * If enter here, it should be non-CDROM and FM-capable
7929 7929 * device, and it will not keep the old scsi_log as before
7930 7930 * in /var/adm/messages. However, the property
7931 7931 * "fm-scsi-log" will control whether the FM telemetry will
7932 7932 * be logged in /var/adm/messages.
7933 7933 */
7934 7934 int fm_scsi_log;
7935 7935 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
7936 7936 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0);
7937 7937
7938 7938 if (fm_scsi_log)
7939 7939 sfip->fm_log_level = SD_FM_LOG_EREPORT;
7940 7940 else
7941 7941 sfip->fm_log_level = SD_FM_LOG_SILENT;
7942 7942 }
7943 7943
7944 7944 /*
7945 7945 * At this point in the attach, we have enough info in the
7946 7946 * soft state to be able to issue commands to the target.
7947 7947 *
7948 7948 * All command paths used below MUST issue their commands as
7949 7949 * SD_PATH_DIRECT. This is important as intermediate layers
7950 7950 * are not all initialized yet (such as PM).
7951 7951 */
7952 7952
7953 7953 /*
7954 7954 * Send a TEST UNIT READY command to the device. This should clear
7955 7955 * any outstanding UNIT ATTENTION that may be present.
7956 7956 *
7957 7957 * Note: Don't check for success, just track if there is a reservation,
7958 7958 * this is a throw away command to clear any unit attentions.
7959 7959 *
7960 7960 * Note: This MUST be the first command issued to the target during
7961 7961 * attach to ensure power on UNIT ATTENTIONS are cleared.
7962 7962 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated
7963 7963 * with attempts at spinning up a device with no media.
7964 7964 */
7965 7965 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR);
7966 7966 if (status != 0) {
7967 7967 if (status == EACCES)
7968 7968 reservation_flag = SD_TARGET_IS_RESERVED;
7969 7969 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
7970 7970 }
7971 7971
7972 7972 /*
7973 7973 * If the device is NOT a removable media device, attempt to spin
7974 7974 * it up (using the START_STOP_UNIT command) and read its capacity
7975 7975 * (using the READ CAPACITY command). Note, however, that either
7976 7976 * of these could fail and in some cases we would continue with
7977 7977 * the attach despite the failure (see below).
7978 7978 */
7979 7979 if (un->un_f_descr_format_supported) {
7980 7980
7981 7981 switch (sd_spin_up_unit(ssc)) {
7982 7982 case 0:
7983 7983 /*
7984 7984 * Spin-up was successful; now try to read the
7985 7985 * capacity. If successful then save the results
7986 7986 * and mark the capacity & lbasize as valid.
7987 7987 */
7988 7988 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7989 7989 "sd_unit_attach: un:0x%p spin-up successful\n", un);
7990 7990
7991 7991 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity,
7992 7992 &lbasize, SD_PATH_DIRECT);
7993 7993
7994 7994 switch (status) {
7995 7995 case 0: {
7996 7996 if (capacity > DK_MAX_BLOCKS) {
7997 7997 #ifdef _LP64
7998 7998 if ((capacity + 1) >
7999 7999 SD_GROUP1_MAX_ADDRESS) {
8000 8000 /*
8001 8001 * Enable descriptor format
8002 8002 * sense data so that we can
8003 8003 * get 64 bit sense data
8004 8004 * fields.
8005 8005 */
8006 8006 sd_enable_descr_sense(ssc);
8007 8007 }
8008 8008 #else
8009 8009 /* 32-bit kernels can't handle this */
8010 8010 scsi_log(SD_DEVINFO(un),
8011 8011 sd_label, CE_WARN,
8012 8012 "disk has %llu blocks, which "
8013 8013 "is too large for a 32-bit "
8014 8014 "kernel", capacity);
8015 8015
8016 8016 #if defined(__i386) || defined(__amd64)
8017 8017 /*
8018 8018 * 1TB disk was treated as (1T - 512)B
8019 8019 * in the past, so that it might have
8020 8020 * valid VTOC and solaris partitions,
8021 8021 * we have to allow it to continue to
8022 8022 * work.
8023 8023 */
8024 8024 if (capacity -1 > DK_MAX_BLOCKS)
8025 8025 #endif
8026 8026 goto spinup_failed;
8027 8027 #endif
8028 8028 }
8029 8029
8030 8030 /*
8031 8031 * Here it's not necessary to check the case:
8032 8032 * the capacity of the device is bigger than
8033 8033 * what the max hba cdb can support. Because
8034 8034 * sd_send_scsi_READ_CAPACITY will retrieve
8035 8035 * the capacity by sending USCSI command, which
8036 8036 * is constrained by the max hba cdb. Actually,
8037 8037 * sd_send_scsi_READ_CAPACITY will return
8038 8038 * EINVAL when using bigger cdb than required
8039 8039 * cdb length. Will handle this case in
8040 8040 * "case EINVAL".
8041 8041 */
8042 8042
8043 8043 /*
8044 8044 * The following relies on
8045 8045 * sd_send_scsi_READ_CAPACITY never
8046 8046 * returning 0 for capacity and/or lbasize.
8047 8047 */
8048 8048 sd_update_block_info(un, lbasize, capacity);
8049 8049
8050 8050 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8051 8051 "sd_unit_attach: un:0x%p capacity = %ld "
8052 8052 "blocks; lbasize= %ld.\n", un,
8053 8053 un->un_blockcount, un->un_tgt_blocksize);
8054 8054
8055 8055 break;
8056 8056 }
8057 8057 case EINVAL:
8058 8058 /*
8059 8059 * In the case where the max-cdb-length property
8060 8060 * is smaller than the required CDB length for
8061 8061 * a SCSI device, a target driver can fail to
8062 8062 * attach to that device.
8063 8063 */
8064 8064 scsi_log(SD_DEVINFO(un),
8065 8065 sd_label, CE_WARN,
8066 8066 "disk capacity is too large "
8067 8067 "for current cdb length");
8068 8068 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8069 8069
8070 8070 goto spinup_failed;
8071 8071 case EACCES:
8072 8072 /*
8073 8073 * Should never get here if the spin-up
8074 8074 * succeeded, but code it in anyway.
8075 8075 * From here, just continue with the attach...
8076 8076 */
8077 8077 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8078 8078 "sd_unit_attach: un:0x%p "
8079 8079 "sd_send_scsi_READ_CAPACITY "
8080 8080 "returned reservation conflict\n", un);
8081 8081 reservation_flag = SD_TARGET_IS_RESERVED;
8082 8082 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8083 8083 break;
8084 8084 default:
8085 8085 /*
8086 8086 * Likewise, should never get here if the
8087 8087 * spin-up succeeded. Just continue with
8088 8088 * the attach...
8089 8089 */
8090 8090 if (status == EIO)
8091 8091 sd_ssc_assessment(ssc,
8092 8092 SD_FMT_STATUS_CHECK);
8093 8093 else
8094 8094 sd_ssc_assessment(ssc,
8095 8095 SD_FMT_IGNORE);
8096 8096 break;
8097 8097 }
8098 8098 break;
8099 8099 case EACCES:
8100 8100 /*
8101 8101 * Device is reserved by another host. In this case
8102 8102 * we could not spin it up or read the capacity, but
8103 8103 * we continue with the attach anyway.
8104 8104 */
8105 8105 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8106 8106 "sd_unit_attach: un:0x%p spin-up reservation "
8107 8107 "conflict.\n", un);
8108 8108 reservation_flag = SD_TARGET_IS_RESERVED;
8109 8109 break;
8110 8110 default:
8111 8111 /* Fail the attach if the spin-up failed. */
8112 8112 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8113 8113 "sd_unit_attach: un:0x%p spin-up failed.", un);
8114 8114 goto spinup_failed;
8115 8115 }
8116 8116
8117 8117 }
8118 8118
8119 8119 /*
8120 8120 * Check to see if this is a MMC drive
8121 8121 */
8122 8122 if (ISCD(un)) {
8123 8123 sd_set_mmc_caps(ssc);
8124 8124 }
8125 8125
8126 8126 /*
8127 8127 * Add a zero-length attribute to tell the world we support
8128 8128 * kernel ioctls (for layered drivers)
8129 8129 */
8130 8130 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP,
8131 8131 DDI_KERNEL_IOCTL, NULL, 0);
8132 8132
8133 8133 /*
8134 8134 * Add a boolean property to tell the world we support
8135 8135 * the B_FAILFAST flag (for layered drivers)
8136 8136 */
8137 8137 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP,
8138 8138 "ddi-failfast-supported", NULL, 0);
8139 8139
8140 8140 /*
8141 8141 * Initialize power management
8142 8142 */
8143 8143 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL);
8144 8144 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL);
8145 8145 sd_setup_pm(ssc, devi);
8146 8146 if (un->un_f_pm_is_enabled == FALSE) {
8147 8147 /*
8148 8148 * For performance, point to a jump table that does
8149 8149 * not include pm.
8150 8150 * The direct and priority chains don't change with PM.
8151 8151 *
8152 8152 * Note: this is currently done based on individual device
8153 8153 * capabilities. When an interface for determining system
8154 8154 * power enabled state becomes available, or when additional
8155 8155 * layers are added to the command chain, these values will
8156 8156 * have to be re-evaluated for correctness.
8157 8157 */
8158 8158 if (un->un_f_non_devbsize_supported) {
8159 8159 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM;
8160 8160 } else {
8161 8161 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM;
8162 8162 }
8163 8163 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM;
8164 8164 }
8165 8165
8166 8166 /*
8167 8167 * This property is set to 0 by HA software to avoid retries
8168 8168 * on a reserved disk. (The preferred property name is
8169 8169 * "retry-on-reservation-conflict") (1189689)
8170 8170 *
8171 8171 * Note: The use of a global here can have unintended consequences. A
8172 8172 * per instance variable is preferable to match the capabilities of
8173 8173 * different underlying hba's (4402600)
8174 8174 */
8175 8175 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi,
8176 8176 DDI_PROP_DONTPASS, "retry-on-reservation-conflict",
8177 8177 sd_retry_on_reservation_conflict);
8178 8178 if (sd_retry_on_reservation_conflict != 0) {
8179 8179 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY,
8180 8180 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name,
8181 8181 sd_retry_on_reservation_conflict);
8182 8182 }
8183 8183
8184 8184 /* Set up options for QFULL handling. */
8185 8185 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0,
8186 8186 "qfull-retries", -1)) != -1) {
8187 8187 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries",
8188 8188 rval, 1);
8189 8189 }
8190 8190 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0,
8191 8191 "qfull-retry-interval", -1)) != -1) {
8192 8192 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval",
8193 8193 rval, 1);
8194 8194 }
8195 8195
8196 8196 /*
8197 8197 * This just prints a message that announces the existence of the
8198 8198 * device. The message is always printed in the system logfile, but
8199 8199 * only appears on the console if the system is booted with the
8200 8200 * -v (verbose) argument.
8201 8201 */
8202 8202 ddi_report_dev(devi);
8203 8203
8204 8204 un->un_mediastate = DKIO_NONE;
8205 8205
8206 8206 /*
8207 8207 * Check if this is a SSD(Solid State Drive).
8208 8208 */
8209 8209 sd_check_solid_state(ssc);
8210 8210
8211 8211 /*
8212 8212 * Check whether the drive is in emulation mode.
8213 8213 */
8214 8214 sd_check_emulation_mode(ssc);
8215 8215
8216 8216 cmlb_alloc_handle(&un->un_cmlbhandle);
8217 8217
8218 8218 #if defined(__i386) || defined(__amd64)
8219 8219 /*
8220 8220 * On x86, compensate for off-by-1 legacy error
8221 8221 */
8222 8222 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable &&
8223 8223 (lbasize == un->un_sys_blocksize))
8224 8224 offbyone = CMLB_OFF_BY_ONE;
8225 8225 #endif
8226 8226
8227 8227 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype,
8228 8228 VOID2BOOLEAN(un->un_f_has_removable_media != 0),
8229 8229 VOID2BOOLEAN(un->un_f_is_hotpluggable != 0),
8230 8230 un->un_node_type, offbyone, un->un_cmlbhandle,
8231 8231 (void *)SD_PATH_DIRECT) != 0) {
8232 8232 goto cmlb_attach_failed;
8233 8233 }
8234 8234
8235 8235
8236 8236 /*
8237 8237 * Read and validate the device's geometry (ie, disk label)
8238 8238 * A new unformatted drive will not have a valid geometry, but
8239 8239 * the driver needs to successfully attach to this device so
8240 8240 * the drive can be formatted via ioctls.
8241 8241 */
8242 8242 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0,
8243 8243 (void *)SD_PATH_DIRECT) == 0) ? 1: 0;
8244 8244
8245 8245 mutex_enter(SD_MUTEX(un));
8246 8246
8247 8247 /*
8248 8248 * Read and initialize the devid for the unit.
8249 8249 */
8250 8250 if (un->un_f_devid_supported) {
8251 8251 sd_register_devid(ssc, devi, reservation_flag);
8252 8252 }
8253 8253 mutex_exit(SD_MUTEX(un));
8254 8254
8255 8255 #if (defined(__fibre))
8256 8256 /*
8257 8257 * Register callbacks for fibre only. You can't do this solely
8258 8258 * on the basis of the devid_type because this is hba specific.
8259 8259 * We need to query our hba capabilities to find out whether to
8260 8260 * register or not.
8261 8261 */
8262 8262 if (un->un_f_is_fibre) {
8263 8263 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) {
8264 8264 sd_init_event_callbacks(un);
8265 8265 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
8266 8266 "sd_unit_attach: un:0x%p event callbacks inserted",
8267 8267 un);
8268 8268 }
8269 8269 }
8270 8270 #endif
8271 8271
8272 8272 if (un->un_f_opt_disable_cache == TRUE) {
8273 8273 /*
8274 8274 * Disable both read cache and write cache. This is
8275 8275 * the historic behavior of the keywords in the config file.
8276 8276 */
8277 8277 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) !=
8278 8278 0) {
8279 8279 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8280 8280 "sd_unit_attach: un:0x%p Could not disable "
8281 8281 "caching", un);
8282 8282 goto devid_failed;
8283 8283 }
8284 8284 }
8285 8285
8286 8286 /*
8287 8287 * Check the value of the WCE bit now and
8288 8288 * set un_f_write_cache_enabled accordingly.
8289 8289 */
8290 8290 (void) sd_get_write_cache_enabled(ssc, &wc_enabled);
8291 8291 mutex_enter(SD_MUTEX(un));
8292 8292 un->un_f_write_cache_enabled = (wc_enabled != 0);
8293 8293 mutex_exit(SD_MUTEX(un));
8294 8294
8295 8295 if ((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR &&
8296 8296 un->un_tgt_blocksize != DEV_BSIZE) ||
8297 8297 un->un_f_enable_rmw) {
8298 8298 if (!(un->un_wm_cache)) {
8299 8299 (void) snprintf(name_str, sizeof (name_str),
8300 8300 "%s%d_cache",
8301 8301 ddi_driver_name(SD_DEVINFO(un)),
8302 8302 ddi_get_instance(SD_DEVINFO(un)));
8303 8303 un->un_wm_cache = kmem_cache_create(
8304 8304 name_str, sizeof (struct sd_w_map),
8305 8305 8, sd_wm_cache_constructor,
8306 8306 sd_wm_cache_destructor, NULL,
8307 8307 (void *)un, NULL, 0);
8308 8308 if (!(un->un_wm_cache)) {
8309 8309 goto wm_cache_failed;
8310 8310 }
8311 8311 }
8312 8312 }
8313 8313
8314 8314 /*
8315 8315 * Check the value of the NV_SUP bit and set
8316 8316 * un_f_suppress_cache_flush accordingly.
8317 8317 */
8318 8318 sd_get_nv_sup(ssc);
8319 8319
8320 8320 /*
8321 8321 * Find out what type of reservation this disk supports.
8322 8322 */
8323 8323 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL);
8324 8324
8325 8325 switch (status) {
8326 8326 case 0:
8327 8327 /*
8328 8328 * SCSI-3 reservations are supported.
8329 8329 */
8330 8330 un->un_reservation_type = SD_SCSI3_RESERVATION;
8331 8331 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8332 8332 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un);
8333 8333 break;
8334 8334 case ENOTSUP:
8335 8335 /*
8336 8336 * The PERSISTENT RESERVE IN command would not be recognized by
8337 8337 * a SCSI-2 device, so assume the reservation type is SCSI-2.
8338 8338 */
8339 8339 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8340 8340 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un);
8341 8341 un->un_reservation_type = SD_SCSI2_RESERVATION;
8342 8342
8343 8343 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8344 8344 break;
8345 8345 default:
8346 8346 /*
8347 8347 * default to SCSI-3 reservations
8348 8348 */
8349 8349 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8350 8350 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un);
8351 8351 un->un_reservation_type = SD_SCSI3_RESERVATION;
8352 8352
8353 8353 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8354 8354 break;
8355 8355 }
8356 8356
8357 8357 /*
8358 8358 * Set the pstat and error stat values here, so data obtained during the
8359 8359 * previous attach-time routines is available.
8360 8360 *
8361 8361 * Note: This is a critical sequence that needs to be maintained:
8362 8362 * 1) Instantiate the kstats before any routines using the iopath
8363 8363 * (i.e. sd_send_scsi_cmd).
8364 8364 * 2) Initialize the error stats (sd_set_errstats) and partition
8365 8365 * stats (sd_set_pstats)here, following
8366 8366 * cmlb_validate_geometry(), sd_register_devid(), and
8367 8367 * sd_cache_control().
8368 8368 */
8369 8369
8370 8370 if (un->un_f_pkstats_enabled && geom_label_valid) {
8371 8371 sd_set_pstats(un);
8372 8372 SD_TRACE(SD_LOG_IO_PARTITION, un,
8373 8373 "sd_unit_attach: un:0x%p pstats created and set\n", un);
8374 8374 }
8375 8375
8376 8376 sd_set_errstats(un);
8377 8377 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
8378 8378 "sd_unit_attach: un:0x%p errstats set\n", un);
8379 8379
8380 8380
8381 8381 /*
8382 8382 * After successfully attaching an instance, we record the information
8383 8383 * of how many luns have been attached on the relative target and
8384 8384 * controller for parallel SCSI. This information is used when sd tries
8385 8385 * to set the tagged queuing capability in HBA.
8386 8386 */
8387 8387 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) {
8388 8388 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH);
8389 8389 }
8390 8390
8391 8391 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
8392 8392 "sd_unit_attach: un:0x%p exit success\n", un);
8393 8393
8394 8394 /* Uninitialize sd_ssc_t pointer */
8395 8395 sd_ssc_fini(ssc);
8396 8396
8397 8397 return (DDI_SUCCESS);
8398 8398
8399 8399 /*
8400 8400 * An error occurred during the attach; clean up & return failure.
8401 8401 */
8402 8402 wm_cache_failed:
8403 8403 devid_failed:
8404 8404
8405 8405 setup_pm_failed:
8406 8406 ddi_remove_minor_node(devi, NULL);
8407 8407
8408 8408 cmlb_attach_failed:
8409 8409 /*
8410 8410 * Cleanup from the scsi_ifsetcap() calls (437868)
8411 8411 */
8412 8412 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1);
8413 8413 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1);
8414 8414
8415 8415 /*
8416 8416 * Refer to the comments of setting tagged-qing in the beginning of
8417 8417 * sd_unit_attach. We can only disable tagged queuing when there is
8418 8418 * no lun attached on the target.
8419 8419 */
8420 8420 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) {
8421 8421 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
8422 8422 }
8423 8423
8424 8424 if (un->un_f_is_fibre == FALSE) {
8425 8425 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1);
8426 8426 }
8427 8427
8428 8428 spinup_failed:
8429 8429
8430 8430 /* Uninitialize sd_ssc_t pointer */
8431 8431 sd_ssc_fini(ssc);
8432 8432
8433 8433 mutex_enter(SD_MUTEX(un));
8434 8434
8435 8435 /* Deallocate SCSI FMA memory spaces */
8436 8436 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal));
8437 8437
8438 8438 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */
8439 8439 if (un->un_direct_priority_timeid != NULL) {
8440 8440 timeout_id_t temp_id = un->un_direct_priority_timeid;
8441 8441 un->un_direct_priority_timeid = NULL;
8442 8442 mutex_exit(SD_MUTEX(un));
8443 8443 (void) untimeout(temp_id);
8444 8444 mutex_enter(SD_MUTEX(un));
8445 8445 }
8446 8446
8447 8447 /* Cancel any pending start/stop timeouts */
8448 8448 if (un->un_startstop_timeid != NULL) {
8449 8449 timeout_id_t temp_id = un->un_startstop_timeid;
8450 8450 un->un_startstop_timeid = NULL;
8451 8451 mutex_exit(SD_MUTEX(un));
8452 8452 (void) untimeout(temp_id);
8453 8453 mutex_enter(SD_MUTEX(un));
8454 8454 }
8455 8455
8456 8456 /* Cancel any pending reset-throttle timeouts */
8457 8457 if (un->un_reset_throttle_timeid != NULL) {
8458 8458 timeout_id_t temp_id = un->un_reset_throttle_timeid;
8459 8459 un->un_reset_throttle_timeid = NULL;
8460 8460 mutex_exit(SD_MUTEX(un));
8461 8461 (void) untimeout(temp_id);
8462 8462 mutex_enter(SD_MUTEX(un));
8463 8463 }
8464 8464
8465 8465 /* Cancel rmw warning message timeouts */
8466 8466 if (un->un_rmw_msg_timeid != NULL) {
8467 8467 timeout_id_t temp_id = un->un_rmw_msg_timeid;
8468 8468 un->un_rmw_msg_timeid = NULL;
8469 8469 mutex_exit(SD_MUTEX(un));
8470 8470 (void) untimeout(temp_id);
8471 8471 mutex_enter(SD_MUTEX(un));
8472 8472 }
8473 8473
8474 8474 /* Cancel any pending retry timeouts */
8475 8475 if (un->un_retry_timeid != NULL) {
8476 8476 timeout_id_t temp_id = un->un_retry_timeid;
8477 8477 un->un_retry_timeid = NULL;
8478 8478 mutex_exit(SD_MUTEX(un));
8479 8479 (void) untimeout(temp_id);
8480 8480 mutex_enter(SD_MUTEX(un));
8481 8481 }
8482 8482
8483 8483 /* Cancel any pending delayed cv broadcast timeouts */
8484 8484 if (un->un_dcvb_timeid != NULL) {
8485 8485 timeout_id_t temp_id = un->un_dcvb_timeid;
8486 8486 un->un_dcvb_timeid = NULL;
8487 8487 mutex_exit(SD_MUTEX(un));
8488 8488 (void) untimeout(temp_id);
8489 8489 mutex_enter(SD_MUTEX(un));
8490 8490 }
8491 8491
8492 8492 mutex_exit(SD_MUTEX(un));
8493 8493
8494 8494 /* There should not be any in-progress I/O so ASSERT this check */
8495 8495 ASSERT(un->un_ncmds_in_transport == 0);
8496 8496 ASSERT(un->un_ncmds_in_driver == 0);
8497 8497
8498 8498 /* Do not free the softstate if the callback routine is active */
8499 8499 sd_sync_with_callback(un);
8500 8500
8501 8501 /*
8502 8502 * Partition stats apparently are not used with removables. These would
8503 8503 * not have been created during attach, so no need to clean them up...
8504 8504 */
8505 8505 if (un->un_errstats != NULL) {
8506 8506 kstat_delete(un->un_errstats);
8507 8507 un->un_errstats = NULL;
8508 8508 }
8509 8509
8510 8510 create_errstats_failed:
8511 8511
8512 8512 if (un->un_stats != NULL) {
8513 8513 kstat_delete(un->un_stats);
8514 8514 un->un_stats = NULL;
8515 8515 }
8516 8516
8517 8517 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi);
8518 8518 ddi_xbuf_attr_destroy(un->un_xbuf_attr);
8519 8519
8520 8520 ddi_prop_remove_all(devi);
8521 8521 sema_destroy(&un->un_semoclose);
8522 8522 cv_destroy(&un->un_state_cv);
8523 8523
8524 8524 getrbuf_failed:
8525 8525
8526 8526 sd_free_rqs(un);
8527 8527
8528 8528 alloc_rqs_failed:
8529 8529
8530 8530 devp->sd_private = NULL;
8531 8531 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */
8532 8532
8533 8533 get_softstate_failed:
8534 8534 /*
8535 8535 * Note: the man pages are unclear as to whether or not doing a
8536 8536 * ddi_soft_state_free(sd_state, instance) is the right way to
8537 8537 * clean up after the ddi_soft_state_zalloc() if the subsequent
8538 8538 * ddi_get_soft_state() fails. The implication seems to be
8539 8539 * that the get_soft_state cannot fail if the zalloc succeeds.
8540 8540 */
8541 8541 #ifndef XPV_HVM_DRIVER
8542 8542 ddi_soft_state_free(sd_state, instance);
8543 8543 #endif /* !XPV_HVM_DRIVER */
8544 8544
8545 8545 probe_failed:
8546 8546 scsi_unprobe(devp);
8547 8547
8548 8548 return (DDI_FAILURE);
8549 8549 }
8550 8550
8551 8551
8552 8552 /*
8553 8553 * Function: sd_unit_detach
8554 8554 *
8555 8555 * Description: Performs DDI_DETACH processing for sddetach().
8556 8556 *
8557 8557 * Return Code: DDI_SUCCESS
8558 8558 * DDI_FAILURE
8559 8559 *
8560 8560 * Context: Kernel thread context
8561 8561 */
8562 8562
8563 8563 static int
8564 8564 sd_unit_detach(dev_info_t *devi)
8565 8565 {
8566 8566 struct scsi_device *devp;
8567 8567 struct sd_lun *un;
8568 8568 int i;
8569 8569 int tgt;
8570 8570 dev_t dev;
8571 8571 dev_info_t *pdip = ddi_get_parent(devi);
8572 8572 #ifndef XPV_HVM_DRIVER
8573 8573 int instance = ddi_get_instance(devi);
8574 8574 #endif /* !XPV_HVM_DRIVER */
8575 8575
8576 8576 mutex_enter(&sd_detach_mutex);
8577 8577
8578 8578 /*
8579 8579 * Fail the detach for any of the following:
8580 8580 * - Unable to get the sd_lun struct for the instance
8581 8581 * - A layered driver has an outstanding open on the instance
8582 8582 * - Another thread is already detaching this instance
8583 8583 * - Another thread is currently performing an open
8584 8584 */
8585 8585 devp = ddi_get_driver_private(devi);
8586 8586 if ((devp == NULL) ||
8587 8587 ((un = (struct sd_lun *)devp->sd_private) == NULL) ||
8588 8588 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) ||
8589 8589 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) {
8590 8590 mutex_exit(&sd_detach_mutex);
8591 8591 return (DDI_FAILURE);
8592 8592 }
8593 8593
8594 8594 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un);
8595 8595
8596 8596 /*
8597 8597 * Mark this instance as currently in a detach, to inhibit any
8598 8598 * opens from a layered driver.
8599 8599 */
8600 8600 un->un_detach_count++;
8601 8601 mutex_exit(&sd_detach_mutex);
8602 8602
8603 8603 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
8604 8604 SCSI_ADDR_PROP_TARGET, -1);
8605 8605
8606 8606 dev = sd_make_device(SD_DEVINFO(un));
8607 8607
8608 8608 #ifndef lint
8609 8609 _NOTE(COMPETING_THREADS_NOW);
8610 8610 #endif
8611 8611
8612 8612 mutex_enter(SD_MUTEX(un));
8613 8613
8614 8614 /*
8615 8615 * Fail the detach if there are any outstanding layered
8616 8616 * opens on this device.
8617 8617 */
8618 8618 for (i = 0; i < NDKMAP; i++) {
8619 8619 if (un->un_ocmap.lyropen[i] != 0) {
8620 8620 goto err_notclosed;
8621 8621 }
8622 8622 }
8623 8623
8624 8624 /*
8625 8625 * Verify there are NO outstanding commands issued to this device.
8626 8626 * ie, un_ncmds_in_transport == 0.
8627 8627 * It's possible to have outstanding commands through the physio
8628 8628 * code path, even though everything's closed.
8629 8629 */
8630 8630 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) ||
8631 8631 (un->un_direct_priority_timeid != NULL) ||
8632 8632 (un->un_state == SD_STATE_RWAIT)) {
8633 8633 mutex_exit(SD_MUTEX(un));
8634 8634 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8635 8635 "sd_dr_detach: Detach failure due to outstanding cmds\n");
8636 8636 goto err_stillbusy;
8637 8637 }
8638 8638
8639 8639 /*
8640 8640 * If we have the device reserved, release the reservation.
8641 8641 */
8642 8642 if ((un->un_resvd_status & SD_RESERVE) &&
8643 8643 !(un->un_resvd_status & SD_LOST_RESERVE)) {
8644 8644 mutex_exit(SD_MUTEX(un));
8645 8645 /*
8646 8646 * Note: sd_reserve_release sends a command to the device
8647 8647 * via the sd_ioctlcmd() path, and can sleep.
8648 8648 */
8649 8649 if (sd_reserve_release(dev, SD_RELEASE) != 0) {
8650 8650 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8651 8651 "sd_dr_detach: Cannot release reservation \n");
8652 8652 }
8653 8653 } else {
8654 8654 mutex_exit(SD_MUTEX(un));
8655 8655 }
8656 8656
8657 8657 /*
8658 8658 * Untimeout any reserve recover, throttle reset, restart unit
8659 8659 * and delayed broadcast timeout threads. Protect the timeout pointer
8660 8660 * from getting nulled by their callback functions.
8661 8661 */
8662 8662 mutex_enter(SD_MUTEX(un));
8663 8663 if (un->un_resvd_timeid != NULL) {
8664 8664 timeout_id_t temp_id = un->un_resvd_timeid;
8665 8665 un->un_resvd_timeid = NULL;
8666 8666 mutex_exit(SD_MUTEX(un));
8667 8667 (void) untimeout(temp_id);
8668 8668 mutex_enter(SD_MUTEX(un));
8669 8669 }
8670 8670
8671 8671 if (un->un_reset_throttle_timeid != NULL) {
8672 8672 timeout_id_t temp_id = un->un_reset_throttle_timeid;
8673 8673 un->un_reset_throttle_timeid = NULL;
8674 8674 mutex_exit(SD_MUTEX(un));
8675 8675 (void) untimeout(temp_id);
8676 8676 mutex_enter(SD_MUTEX(un));
8677 8677 }
8678 8678
8679 8679 if (un->un_startstop_timeid != NULL) {
8680 8680 timeout_id_t temp_id = un->un_startstop_timeid;
8681 8681 un->un_startstop_timeid = NULL;
8682 8682 mutex_exit(SD_MUTEX(un));
8683 8683 (void) untimeout(temp_id);
8684 8684 mutex_enter(SD_MUTEX(un));
8685 8685 }
8686 8686
8687 8687 if (un->un_rmw_msg_timeid != NULL) {
8688 8688 timeout_id_t temp_id = un->un_rmw_msg_timeid;
8689 8689 un->un_rmw_msg_timeid = NULL;
8690 8690 mutex_exit(SD_MUTEX(un));
8691 8691 (void) untimeout(temp_id);
8692 8692 mutex_enter(SD_MUTEX(un));
8693 8693 }
8694 8694
8695 8695 if (un->un_dcvb_timeid != NULL) {
8696 8696 timeout_id_t temp_id = un->un_dcvb_timeid;
8697 8697 un->un_dcvb_timeid = NULL;
8698 8698 mutex_exit(SD_MUTEX(un));
8699 8699 (void) untimeout(temp_id);
8700 8700 } else {
8701 8701 mutex_exit(SD_MUTEX(un));
8702 8702 }
8703 8703
8704 8704 /* Remove any pending reservation reclaim requests for this device */
8705 8705 sd_rmv_resv_reclaim_req(dev);
8706 8706
8707 8707 mutex_enter(SD_MUTEX(un));
8708 8708
8709 8709 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */
8710 8710 if (un->un_direct_priority_timeid != NULL) {
8711 8711 timeout_id_t temp_id = un->un_direct_priority_timeid;
8712 8712 un->un_direct_priority_timeid = NULL;
8713 8713 mutex_exit(SD_MUTEX(un));
8714 8714 (void) untimeout(temp_id);
8715 8715 mutex_enter(SD_MUTEX(un));
8716 8716 }
8717 8717
8718 8718 /* Cancel any active multi-host disk watch thread requests */
8719 8719 if (un->un_mhd_token != NULL) {
8720 8720 mutex_exit(SD_MUTEX(un));
8721 8721 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token));
8722 8722 if (scsi_watch_request_terminate(un->un_mhd_token,
8723 8723 SCSI_WATCH_TERMINATE_NOWAIT)) {
8724 8724 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8725 8725 "sd_dr_detach: Cannot cancel mhd watch request\n");
8726 8726 /*
8727 8727 * Note: We are returning here after having removed
8728 8728 * some driver timeouts above. This is consistent with
8729 8729 * the legacy implementation but perhaps the watch
8730 8730 * terminate call should be made with the wait flag set.
8731 8731 */
8732 8732 goto err_stillbusy;
8733 8733 }
8734 8734 mutex_enter(SD_MUTEX(un));
8735 8735 un->un_mhd_token = NULL;
8736 8736 }
8737 8737
8738 8738 if (un->un_swr_token != NULL) {
8739 8739 mutex_exit(SD_MUTEX(un));
8740 8740 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token));
8741 8741 if (scsi_watch_request_terminate(un->un_swr_token,
8742 8742 SCSI_WATCH_TERMINATE_NOWAIT)) {
8743 8743 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8744 8744 "sd_dr_detach: Cannot cancel swr watch request\n");
8745 8745 /*
8746 8746 * Note: We are returning here after having removed
8747 8747 * some driver timeouts above. This is consistent with
8748 8748 * the legacy implementation but perhaps the watch
8749 8749 * terminate call should be made with the wait flag set.
8750 8750 */
8751 8751 goto err_stillbusy;
8752 8752 }
8753 8753 mutex_enter(SD_MUTEX(un));
8754 8754 un->un_swr_token = NULL;
8755 8755 }
8756 8756
8757 8757 mutex_exit(SD_MUTEX(un));
8758 8758
8759 8759 /*
8760 8760 * Clear any scsi_reset_notifies. We clear the reset notifies
8761 8761 * if we have not registered one.
8762 8762 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX!
8763 8763 */
8764 8764 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL,
8765 8765 sd_mhd_reset_notify_cb, (caddr_t)un);
8766 8766
8767 8767 /*
8768 8768 * protect the timeout pointers from getting nulled by
8769 8769 * their callback functions during the cancellation process.
8770 8770 * In such a scenario untimeout can be invoked with a null value.
8771 8771 */
8772 8772 _NOTE(NO_COMPETING_THREADS_NOW);
8773 8773
8774 8774 mutex_enter(&un->un_pm_mutex);
8775 8775 if (un->un_pm_idle_timeid != NULL) {
8776 8776 timeout_id_t temp_id = un->un_pm_idle_timeid;
8777 8777 un->un_pm_idle_timeid = NULL;
8778 8778 mutex_exit(&un->un_pm_mutex);
8779 8779
8780 8780 /*
8781 8781 * Timeout is active; cancel it.
8782 8782 * Note that it'll never be active on a device
8783 8783 * that does not support PM therefore we don't
8784 8784 * have to check before calling pm_idle_component.
8785 8785 */
8786 8786 (void) untimeout(temp_id);
8787 8787 (void) pm_idle_component(SD_DEVINFO(un), 0);
8788 8788 mutex_enter(&un->un_pm_mutex);
8789 8789 }
8790 8790
8791 8791 /*
8792 8792 * Check whether there is already a timeout scheduled for power
8793 8793 * management. If yes then don't lower the power here, that's.
8794 8794 * the timeout handler's job.
8795 8795 */
8796 8796 if (un->un_pm_timeid != NULL) {
8797 8797 timeout_id_t temp_id = un->un_pm_timeid;
8798 8798 un->un_pm_timeid = NULL;
8799 8799 mutex_exit(&un->un_pm_mutex);
8800 8800 /*
8801 8801 * Timeout is active; cancel it.
8802 8802 * Note that it'll never be active on a device
8803 8803 * that does not support PM therefore we don't
8804 8804 * have to check before calling pm_idle_component.
8805 8805 */
8806 8806 (void) untimeout(temp_id);
8807 8807 (void) pm_idle_component(SD_DEVINFO(un), 0);
8808 8808
8809 8809 } else {
8810 8810 mutex_exit(&un->un_pm_mutex);
8811 8811 if ((un->un_f_pm_is_enabled == TRUE) &&
8812 8812 (pm_lower_power(SD_DEVINFO(un), 0, SD_PM_STATE_STOPPED(un))
8813 8813 != DDI_SUCCESS)) {
8814 8814 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8815 8815 "sd_dr_detach: Lower power request failed, ignoring.\n");
8816 8816 /*
8817 8817 * Fix for bug: 4297749, item # 13
8818 8818 * The above test now includes a check to see if PM is
8819 8819 * supported by this device before call
8820 8820 * pm_lower_power().
8821 8821 * Note, the following is not dead code. The call to
8822 8822 * pm_lower_power above will generate a call back into
8823 8823 * our sdpower routine which might result in a timeout
8824 8824 * handler getting activated. Therefore the following
8825 8825 * code is valid and necessary.
8826 8826 */
8827 8827 mutex_enter(&un->un_pm_mutex);
8828 8828 if (un->un_pm_timeid != NULL) {
8829 8829 timeout_id_t temp_id = un->un_pm_timeid;
8830 8830 un->un_pm_timeid = NULL;
8831 8831 mutex_exit(&un->un_pm_mutex);
8832 8832 (void) untimeout(temp_id);
8833 8833 (void) pm_idle_component(SD_DEVINFO(un), 0);
8834 8834 } else {
8835 8835 mutex_exit(&un->un_pm_mutex);
8836 8836 }
8837 8837 }
8838 8838 }
8839 8839
8840 8840 /*
8841 8841 * Cleanup from the scsi_ifsetcap() calls (437868)
8842 8842 * Relocated here from above to be after the call to
8843 8843 * pm_lower_power, which was getting errors.
8844 8844 */
8845 8845 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1);
8846 8846 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1);
8847 8847
8848 8848 /*
8849 8849 * Currently, tagged queuing is supported per target based by HBA.
8850 8850 * Setting this per lun instance actually sets the capability of this
8851 8851 * target in HBA, which affects those luns already attached on the
8852 8852 * same target. So during detach, we can only disable this capability
8853 8853 * only when this is the only lun left on this target. By doing
8854 8854 * this, we assume a target has the same tagged queuing capability
8855 8855 * for every lun. The condition can be removed when HBA is changed to
8856 8856 * support per lun based tagged queuing capability.
8857 8857 */
8858 8858 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) {
8859 8859 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
8860 8860 }
8861 8861
8862 8862 if (un->un_f_is_fibre == FALSE) {
8863 8863 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1);
8864 8864 }
8865 8865
8866 8866 /*
8867 8867 * Remove any event callbacks, fibre only
8868 8868 */
8869 8869 if (un->un_f_is_fibre == TRUE) {
8870 8870 if ((un->un_insert_event != NULL) &&
8871 8871 (ddi_remove_event_handler(un->un_insert_cb_id) !=
8872 8872 DDI_SUCCESS)) {
8873 8873 /*
8874 8874 * Note: We are returning here after having done
8875 8875 * substantial cleanup above. This is consistent
8876 8876 * with the legacy implementation but this may not
8877 8877 * be the right thing to do.
8878 8878 */
8879 8879 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8880 8880 "sd_dr_detach: Cannot cancel insert event\n");
8881 8881 goto err_remove_event;
8882 8882 }
8883 8883 un->un_insert_event = NULL;
8884 8884
8885 8885 if ((un->un_remove_event != NULL) &&
8886 8886 (ddi_remove_event_handler(un->un_remove_cb_id) !=
8887 8887 DDI_SUCCESS)) {
8888 8888 /*
8889 8889 * Note: We are returning here after having done
8890 8890 * substantial cleanup above. This is consistent
8891 8891 * with the legacy implementation but this may not
8892 8892 * be the right thing to do.
8893 8893 */
8894 8894 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8895 8895 "sd_dr_detach: Cannot cancel remove event\n");
8896 8896 goto err_remove_event;
8897 8897 }
8898 8898 un->un_remove_event = NULL;
8899 8899 }
8900 8900
8901 8901 /* Do not free the softstate if the callback routine is active */
8902 8902 sd_sync_with_callback(un);
8903 8903
8904 8904 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT);
8905 8905 cmlb_free_handle(&un->un_cmlbhandle);
8906 8906
8907 8907 /*
8908 8908 * Hold the detach mutex here, to make sure that no other threads ever
8909 8909 * can access a (partially) freed soft state structure.
8910 8910 */
8911 8911 mutex_enter(&sd_detach_mutex);
8912 8912
8913 8913 /*
8914 8914 * Clean up the soft state struct.
8915 8915 * Cleanup is done in reverse order of allocs/inits.
8916 8916 * At this point there should be no competing threads anymore.
8917 8917 */
8918 8918
8919 8919 scsi_fm_fini(devp);
8920 8920
8921 8921 /*
8922 8922 * Deallocate memory for SCSI FMA.
8923 8923 */
8924 8924 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal));
8925 8925
8926 8926 /*
8927 8927 * Unregister and free device id if it was not registered
8928 8928 * by the transport.
8929 8929 */
8930 8930 if (un->un_f_devid_transport_defined == FALSE)
8931 8931 ddi_devid_unregister(devi);
8932 8932
8933 8933 /*
8934 8934 * free the devid structure if allocated before (by ddi_devid_init()
8935 8935 * or ddi_devid_get()).
8936 8936 */
8937 8937 if (un->un_devid) {
8938 8938 ddi_devid_free(un->un_devid);
8939 8939 un->un_devid = NULL;
8940 8940 }
8941 8941
8942 8942 /*
8943 8943 * Destroy wmap cache if it exists.
8944 8944 */
8945 8945 if (un->un_wm_cache != NULL) {
8946 8946 kmem_cache_destroy(un->un_wm_cache);
8947 8947 un->un_wm_cache = NULL;
8948 8948 }
8949 8949
8950 8950 /*
8951 8951 * kstat cleanup is done in detach for all device types (4363169).
8952 8952 * We do not want to fail detach if the device kstats are not deleted
8953 8953 * since there is a confusion about the devo_refcnt for the device.
8954 8954 * We just delete the kstats and let detach complete successfully.
8955 8955 */
8956 8956 if (un->un_stats != NULL) {
8957 8957 kstat_delete(un->un_stats);
8958 8958 un->un_stats = NULL;
8959 8959 }
8960 8960 if (un->un_errstats != NULL) {
8961 8961 kstat_delete(un->un_errstats);
8962 8962 un->un_errstats = NULL;
8963 8963 }
8964 8964
8965 8965 /* Remove partition stats */
8966 8966 if (un->un_f_pkstats_enabled) {
8967 8967 for (i = 0; i < NSDMAP; i++) {
8968 8968 if (un->un_pstats[i] != NULL) {
8969 8969 kstat_delete(un->un_pstats[i]);
8970 8970 un->un_pstats[i] = NULL;
8971 8971 }
8972 8972 }
8973 8973 }
8974 8974
8975 8975 /* Remove xbuf registration */
8976 8976 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi);
8977 8977 ddi_xbuf_attr_destroy(un->un_xbuf_attr);
8978 8978
8979 8979 /* Remove driver properties */
8980 8980 ddi_prop_remove_all(devi);
8981 8981
8982 8982 mutex_destroy(&un->un_pm_mutex);
8983 8983 cv_destroy(&un->un_pm_busy_cv);
8984 8984
8985 8985 cv_destroy(&un->un_wcc_cv);
8986 8986
8987 8987 /* Open/close semaphore */
8988 8988 sema_destroy(&un->un_semoclose);
8989 8989
8990 8990 /* Removable media condvar. */
8991 8991 cv_destroy(&un->un_state_cv);
8992 8992
8993 8993 /* Suspend/resume condvar. */
8994 8994 cv_destroy(&un->un_suspend_cv);
8995 8995 cv_destroy(&un->un_disk_busy_cv);
8996 8996
8997 8997 sd_free_rqs(un);
8998 8998
8999 8999 /* Free up soft state */
9000 9000 devp->sd_private = NULL;
9001 9001
9002 9002 bzero(un, sizeof (struct sd_lun));
9003 9003 #ifndef XPV_HVM_DRIVER
9004 9004 ddi_soft_state_free(sd_state, instance);
9005 9005 #endif /* !XPV_HVM_DRIVER */
9006 9006
9007 9007 mutex_exit(&sd_detach_mutex);
9008 9008
9009 9009 /* This frees up the INQUIRY data associated with the device. */
9010 9010 scsi_unprobe(devp);
9011 9011
9012 9012 /*
9013 9013 * After successfully detaching an instance, we update the information
9014 9014 * of how many luns have been attached in the relative target and
9015 9015 * controller for parallel SCSI. This information is used when sd tries
9016 9016 * to set the tagged queuing capability in HBA.
9017 9017 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to
9018 9018 * check if the device is parallel SCSI. However, we don't need to
9019 9019 * check here because we've already checked during attach. No device
9020 9020 * that is not parallel SCSI is in the chain.
9021 9021 */
9022 9022 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) {
9023 9023 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH);
9024 9024 }
9025 9025
9026 9026 return (DDI_SUCCESS);
9027 9027
9028 9028 err_notclosed:
9029 9029 mutex_exit(SD_MUTEX(un));
9030 9030
9031 9031 err_stillbusy:
9032 9032 _NOTE(NO_COMPETING_THREADS_NOW);
9033 9033
9034 9034 err_remove_event:
9035 9035 mutex_enter(&sd_detach_mutex);
9036 9036 un->un_detach_count--;
9037 9037 mutex_exit(&sd_detach_mutex);
9038 9038
9039 9039 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n");
9040 9040 return (DDI_FAILURE);
9041 9041 }
9042 9042
9043 9043
9044 9044 /*
9045 9045 * Function: sd_create_errstats
9046 9046 *
9047 9047 * Description: This routine instantiates the device error stats.
9048 9048 *
9049 9049 * Note: During attach the stats are instantiated first so they are
9050 9050 * available for attach-time routines that utilize the driver
9051 9051 * iopath to send commands to the device. The stats are initialized
9052 9052 * separately so data obtained during some attach-time routines is
9053 9053 * available. (4362483)
9054 9054 *
9055 9055 * Arguments: un - driver soft state (unit) structure
9056 9056 * instance - driver instance
9057 9057 *
9058 9058 * Context: Kernel thread context
9059 9059 */
9060 9060
9061 9061 static void
9062 9062 sd_create_errstats(struct sd_lun *un, int instance)
9063 9063 {
9064 9064 struct sd_errstats *stp;
9065 9065 char kstatmodule_err[KSTAT_STRLEN];
9066 9066 char kstatname[KSTAT_STRLEN];
9067 9067 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t));
9068 9068
9069 9069 ASSERT(un != NULL);
9070 9070
9071 9071 if (un->un_errstats != NULL) {
9072 9072 return;
9073 9073 }
9074 9074
9075 9075 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err),
9076 9076 "%serr", sd_label);
9077 9077 (void) snprintf(kstatname, sizeof (kstatname),
9078 9078 "%s%d,err", sd_label, instance);
9079 9079
9080 9080 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname,
9081 9081 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT);
9082 9082
9083 9083 if (un->un_errstats == NULL) {
9084 9084 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
9085 9085 "sd_create_errstats: Failed kstat_create\n");
9086 9086 return;
9087 9087 }
9088 9088
9089 9089 stp = (struct sd_errstats *)un->un_errstats->ks_data;
9090 9090 kstat_named_init(&stp->sd_softerrs, "Soft Errors",
9091 9091 KSTAT_DATA_UINT32);
9092 9092 kstat_named_init(&stp->sd_harderrs, "Hard Errors",
9093 9093 KSTAT_DATA_UINT32);
9094 9094 kstat_named_init(&stp->sd_transerrs, "Transport Errors",
9095 9095 KSTAT_DATA_UINT32);
9096 9096 kstat_named_init(&stp->sd_vid, "Vendor",
9097 9097 KSTAT_DATA_CHAR);
9098 9098 kstat_named_init(&stp->sd_pid, "Product",
9099 9099 KSTAT_DATA_CHAR);
9100 9100 kstat_named_init(&stp->sd_revision, "Revision",
9101 9101 KSTAT_DATA_CHAR);
9102 9102 kstat_named_init(&stp->sd_serial, "Serial No",
9103 9103 KSTAT_DATA_CHAR);
9104 9104 kstat_named_init(&stp->sd_capacity, "Size",
9105 9105 KSTAT_DATA_ULONGLONG);
9106 9106 kstat_named_init(&stp->sd_rq_media_err, "Media Error",
9107 9107 KSTAT_DATA_UINT32);
9108 9108 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready",
9109 9109 KSTAT_DATA_UINT32);
9110 9110 kstat_named_init(&stp->sd_rq_nodev_err, "No Device",
9111 9111 KSTAT_DATA_UINT32);
9112 9112 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable",
9113 9113 KSTAT_DATA_UINT32);
9114 9114 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request",
9115 9115 KSTAT_DATA_UINT32);
9116 9116 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis",
9117 9117 KSTAT_DATA_UINT32);
9118 9118
9119 9119 un->un_errstats->ks_private = un;
9120 9120 un->un_errstats->ks_update = nulldev;
9121 9121
9122 9122 kstat_install(un->un_errstats);
9123 9123 }
9124 9124
9125 9125
9126 9126 /*
9127 9127 * Function: sd_set_errstats
9128 9128 *
9129 9129 * Description: This routine sets the value of the vendor id, product id,
9130 9130 * revision, serial number, and capacity device error stats.
9131 9131 *
9132 9132 * Note: During attach the stats are instantiated first so they are
9133 9133 * available for attach-time routines that utilize the driver
9134 9134 * iopath to send commands to the device. The stats are initialized
9135 9135 * separately so data obtained during some attach-time routines is
9136 9136 * available. (4362483)
9137 9137 *
9138 9138 * Arguments: un - driver soft state (unit) structure
9139 9139 *
9140 9140 * Context: Kernel thread context
9141 9141 */
9142 9142
9143 9143 static void
9144 9144 sd_set_errstats(struct sd_lun *un)
9145 9145 {
9146 9146 struct sd_errstats *stp;
9147 9147 char *sn;
9148 9148
9149 9149 ASSERT(un != NULL);
9150 9150 ASSERT(un->un_errstats != NULL);
9151 9151 stp = (struct sd_errstats *)un->un_errstats->ks_data;
9152 9152 ASSERT(stp != NULL);
9153 9153 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8);
9154 9154 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16);
9155 9155 (void) strncpy(stp->sd_revision.value.c,
9156 9156 un->un_sd->sd_inq->inq_revision, 4);
9157 9157
9158 9158 /*
9159 9159 * All the errstats are persistent across detach/attach,
9160 9160 * so reset all the errstats here in case of the hot
9161 9161 * replacement of disk drives, except for not changed
9162 9162 * Sun qualified drives.
9163 9163 */
9164 9164 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) ||
9165 9165 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c,
9166 9166 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) {
9167 9167 stp->sd_softerrs.value.ui32 = 0;
9168 9168 stp->sd_harderrs.value.ui32 = 0;
9169 9169 stp->sd_transerrs.value.ui32 = 0;
9170 9170 stp->sd_rq_media_err.value.ui32 = 0;
9171 9171 stp->sd_rq_ntrdy_err.value.ui32 = 0;
9172 9172 stp->sd_rq_nodev_err.value.ui32 = 0;
9173 9173 stp->sd_rq_recov_err.value.ui32 = 0;
9174 9174 stp->sd_rq_illrq_err.value.ui32 = 0;
9175 9175 stp->sd_rq_pfa_err.value.ui32 = 0;
9176 9176 }
9177 9177
9178 9178 /*
9179 9179 * Set the "Serial No" kstat for Sun qualified drives (indicated by
9180 9180 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid)
9181 9181 * (4376302))
9182 9182 */
9183 9183 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) {
9184 9184 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c,
9185 9185 sizeof (SD_INQUIRY(un)->inq_serial));
9186 9186 } else {
9187 9187 /*
9188 9188 * Set the "Serial No" kstat for non-Sun qualified drives
9189 9189 */
9190 9190 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, SD_DEVINFO(un),
9191 9191 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9192 9192 INQUIRY_SERIAL_NO, &sn) == DDI_SUCCESS) {
9193 9193 (void) strlcpy(stp->sd_serial.value.c, sn,
9194 9194 sizeof (stp->sd_serial.value.c));
9195 9195 ddi_prop_free(sn);
9196 9196 }
9197 9197 }
9198 9198
9199 9199 if (un->un_f_blockcount_is_valid != TRUE) {
9200 9200 /*
9201 9201 * Set capacity error stat to 0 for no media. This ensures
9202 9202 * a valid capacity is displayed in response to 'iostat -E'
9203 9203 * when no media is present in the device.
9204 9204 */
9205 9205 stp->sd_capacity.value.ui64 = 0;
9206 9206 } else {
9207 9207 /*
9208 9208 * Multiply un_blockcount by un->un_sys_blocksize to get
9209 9209 * capacity.
9210 9210 *
9211 9211 * Note: for non-512 blocksize devices "un_blockcount" has been
9212 9212 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by
9213 9213 * (un_tgt_blocksize / un->un_sys_blocksize).
9214 9214 */
9215 9215 stp->sd_capacity.value.ui64 = (uint64_t)
9216 9216 ((uint64_t)un->un_blockcount * un->un_sys_blocksize);
9217 9217 }
9218 9218 }
9219 9219
9220 9220
9221 9221 /*
9222 9222 * Function: sd_set_pstats
9223 9223 *
9224 9224 * Description: This routine instantiates and initializes the partition
9225 9225 * stats for each partition with more than zero blocks.
9226 9226 * (4363169)
9227 9227 *
9228 9228 * Arguments: un - driver soft state (unit) structure
9229 9229 *
9230 9230 * Context: Kernel thread context
9231 9231 */
9232 9232
9233 9233 static void
9234 9234 sd_set_pstats(struct sd_lun *un)
9235 9235 {
9236 9236 char kstatname[KSTAT_STRLEN];
9237 9237 int instance;
9238 9238 int i;
9239 9239 diskaddr_t nblks = 0;
9240 9240 char *partname = NULL;
9241 9241
9242 9242 ASSERT(un != NULL);
9243 9243
9244 9244 instance = ddi_get_instance(SD_DEVINFO(un));
9245 9245
9246 9246 /* Note:x86: is this a VTOC8/VTOC16 difference? */
9247 9247 for (i = 0; i < NSDMAP; i++) {
9248 9248
9249 9249 if (cmlb_partinfo(un->un_cmlbhandle, i,
9250 9250 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0)
9251 9251 continue;
9252 9252 mutex_enter(SD_MUTEX(un));
9253 9253
9254 9254 if ((un->un_pstats[i] == NULL) &&
9255 9255 (nblks != 0)) {
9256 9256
9257 9257 (void) snprintf(kstatname, sizeof (kstatname),
9258 9258 "%s%d,%s", sd_label, instance,
9259 9259 partname);
9260 9260
9261 9261 un->un_pstats[i] = kstat_create(sd_label,
9262 9262 instance, kstatname, "partition", KSTAT_TYPE_IO,
9263 9263 1, KSTAT_FLAG_PERSISTENT);
9264 9264 if (un->un_pstats[i] != NULL) {
9265 9265 un->un_pstats[i]->ks_lock = SD_MUTEX(un);
9266 9266 kstat_install(un->un_pstats[i]);
9267 9267 }
9268 9268 }
9269 9269 mutex_exit(SD_MUTEX(un));
9270 9270 }
9271 9271 }
9272 9272
9273 9273
9274 9274 #if (defined(__fibre))
9275 9275 /*
9276 9276 * Function: sd_init_event_callbacks
9277 9277 *
9278 9278 * Description: This routine initializes the insertion and removal event
9279 9279 * callbacks. (fibre only)
9280 9280 *
9281 9281 * Arguments: un - driver soft state (unit) structure
9282 9282 *
9283 9283 * Context: Kernel thread context
9284 9284 */
9285 9285
9286 9286 static void
9287 9287 sd_init_event_callbacks(struct sd_lun *un)
9288 9288 {
9289 9289 ASSERT(un != NULL);
9290 9290
9291 9291 if ((un->un_insert_event == NULL) &&
9292 9292 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT,
9293 9293 &un->un_insert_event) == DDI_SUCCESS)) {
9294 9294 /*
9295 9295 * Add the callback for an insertion event
9296 9296 */
9297 9297 (void) ddi_add_event_handler(SD_DEVINFO(un),
9298 9298 un->un_insert_event, sd_event_callback, (void *)un,
9299 9299 &(un->un_insert_cb_id));
9300 9300 }
9301 9301
9302 9302 if ((un->un_remove_event == NULL) &&
9303 9303 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT,
9304 9304 &un->un_remove_event) == DDI_SUCCESS)) {
9305 9305 /*
9306 9306 * Add the callback for a removal event
9307 9307 */
9308 9308 (void) ddi_add_event_handler(SD_DEVINFO(un),
9309 9309 un->un_remove_event, sd_event_callback, (void *)un,
9310 9310 &(un->un_remove_cb_id));
9311 9311 }
9312 9312 }
9313 9313
9314 9314
9315 9315 /*
9316 9316 * Function: sd_event_callback
9317 9317 *
9318 9318 * Description: This routine handles insert/remove events (photon). The
9319 9319 * state is changed to OFFLINE which can be used to supress
9320 9320 * error msgs. (fibre only)
9321 9321 *
9322 9322 * Arguments: un - driver soft state (unit) structure
9323 9323 *
9324 9324 * Context: Callout thread context
9325 9325 */
9326 9326 /* ARGSUSED */
9327 9327 static void
9328 9328 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg,
9329 9329 void *bus_impldata)
9330 9330 {
9331 9331 struct sd_lun *un = (struct sd_lun *)arg;
9332 9332
9333 9333 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event));
9334 9334 if (event == un->un_insert_event) {
9335 9335 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event");
9336 9336 mutex_enter(SD_MUTEX(un));
9337 9337 if (un->un_state == SD_STATE_OFFLINE) {
9338 9338 if (un->un_last_state != SD_STATE_SUSPENDED) {
9339 9339 un->un_state = un->un_last_state;
9340 9340 } else {
9341 9341 /*
9342 9342 * We have gone through SUSPEND/RESUME while
9343 9343 * we were offline. Restore the last state
9344 9344 */
9345 9345 un->un_state = un->un_save_state;
9346 9346 }
9347 9347 }
9348 9348 mutex_exit(SD_MUTEX(un));
9349 9349
9350 9350 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event));
9351 9351 } else if (event == un->un_remove_event) {
9352 9352 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event");
9353 9353 mutex_enter(SD_MUTEX(un));
9354 9354 /*
9355 9355 * We need to handle an event callback that occurs during
9356 9356 * the suspend operation, since we don't prevent it.
9357 9357 */
9358 9358 if (un->un_state != SD_STATE_OFFLINE) {
9359 9359 if (un->un_state != SD_STATE_SUSPENDED) {
9360 9360 New_state(un, SD_STATE_OFFLINE);
9361 9361 } else {
9362 9362 un->un_last_state = SD_STATE_OFFLINE;
9363 9363 }
9364 9364 }
9365 9365 mutex_exit(SD_MUTEX(un));
9366 9366 } else {
9367 9367 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE,
9368 9368 "!Unknown event\n");
9369 9369 }
9370 9370
9371 9371 }
9372 9372 #endif
9373 9373
9374 9374 /*
9375 9375 * Function: sd_cache_control()
9376 9376 *
9377 9377 * Description: This routine is the driver entry point for setting
9378 9378 * read and write caching by modifying the WCE (write cache
9379 9379 * enable) and RCD (read cache disable) bits of mode
9380 9380 * page 8 (MODEPAGE_CACHING).
9381 9381 *
9382 9382 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
9383 9383 * structure for this target.
9384 9384 * rcd_flag - flag for controlling the read cache
9385 9385 * wce_flag - flag for controlling the write cache
9386 9386 *
9387 9387 * Return Code: EIO
9388 9388 * code returned by sd_send_scsi_MODE_SENSE and
9389 9389 * sd_send_scsi_MODE_SELECT
9390 9390 *
9391 9391 * Context: Kernel Thread
9392 9392 */
9393 9393
9394 9394 static int
9395 9395 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag)
9396 9396 {
9397 9397 struct mode_caching *mode_caching_page;
9398 9398 uchar_t *header;
9399 9399 size_t buflen;
9400 9400 int hdrlen;
9401 9401 int bd_len;
9402 9402 int rval = 0;
9403 9403 struct mode_header_grp2 *mhp;
9404 9404 struct sd_lun *un;
9405 9405 int status;
9406 9406
9407 9407 ASSERT(ssc != NULL);
9408 9408 un = ssc->ssc_un;
9409 9409 ASSERT(un != NULL);
9410 9410
9411 9411 /*
9412 9412 * Do a test unit ready, otherwise a mode sense may not work if this
9413 9413 * is the first command sent to the device after boot.
9414 9414 */
9415 9415 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
9416 9416 if (status != 0)
9417 9417 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9418 9418
9419 9419 if (un->un_f_cfg_is_atapi == TRUE) {
9420 9420 hdrlen = MODE_HEADER_LENGTH_GRP2;
9421 9421 } else {
9422 9422 hdrlen = MODE_HEADER_LENGTH;
9423 9423 }
9424 9424
9425 9425 /*
9426 9426 * Allocate memory for the retrieved mode page and its headers. Set
9427 9427 * a pointer to the page itself. Use mode_cache_scsi3 to insure
9428 9428 * we get all of the mode sense data otherwise, the mode select
9429 9429 * will fail. mode_cache_scsi3 is a superset of mode_caching.
9430 9430 */
9431 9431 buflen = hdrlen + MODE_BLK_DESC_LENGTH +
9432 9432 sizeof (struct mode_cache_scsi3);
9433 9433
9434 9434 header = kmem_zalloc(buflen, KM_SLEEP);
9435 9435
9436 9436 /* Get the information from the device. */
9437 9437 if (un->un_f_cfg_is_atapi == TRUE) {
9438 9438 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen,
9439 9439 MODEPAGE_CACHING, SD_PATH_DIRECT);
9440 9440 } else {
9441 9441 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen,
9442 9442 MODEPAGE_CACHING, SD_PATH_DIRECT);
9443 9443 }
9444 9444
9445 9445 if (rval != 0) {
9446 9446 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
9447 9447 "sd_cache_control: Mode Sense Failed\n");
9448 9448 goto mode_sense_failed;
9449 9449 }
9450 9450
9451 9451 /*
9452 9452 * Determine size of Block Descriptors in order to locate
9453 9453 * the mode page data. ATAPI devices return 0, SCSI devices
9454 9454 * should return MODE_BLK_DESC_LENGTH.
9455 9455 */
9456 9456 if (un->un_f_cfg_is_atapi == TRUE) {
9457 9457 mhp = (struct mode_header_grp2 *)header;
9458 9458 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
9459 9459 } else {
9460 9460 bd_len = ((struct mode_header *)header)->bdesc_length;
9461 9461 }
9462 9462
9463 9463 if (bd_len > MODE_BLK_DESC_LENGTH) {
9464 9464 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0,
9465 9465 "sd_cache_control: Mode Sense returned invalid block "
9466 9466 "descriptor length\n");
9467 9467 rval = EIO;
9468 9468 goto mode_sense_failed;
9469 9469 }
9470 9470
9471 9471 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len);
9472 9472 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) {
9473 9473 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
9474 9474 "sd_cache_control: Mode Sense caching page code mismatch "
9475 9475 "%d\n", mode_caching_page->mode_page.code);
9476 9476 rval = EIO;
9477 9477 goto mode_sense_failed;
9478 9478 }
9479 9479
9480 9480 /* Check the relevant bits on successful mode sense. */
9481 9481 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) ||
9482 9482 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) ||
9483 9483 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) ||
9484 9484 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) {
9485 9485
9486 9486 size_t sbuflen;
9487 9487 uchar_t save_pg;
9488 9488
9489 9489 /*
9490 9490 * Construct select buffer length based on the
9491 9491 * length of the sense data returned.
9492 9492 */
9493 9493 sbuflen = hdrlen + bd_len +
9494 9494 sizeof (struct mode_page) +
9495 9495 (int)mode_caching_page->mode_page.length;
9496 9496
9497 9497 /*
9498 9498 * Set the caching bits as requested.
9499 9499 */
9500 9500 if (rcd_flag == SD_CACHE_ENABLE)
9501 9501 mode_caching_page->rcd = 0;
9502 9502 else if (rcd_flag == SD_CACHE_DISABLE)
9503 9503 mode_caching_page->rcd = 1;
9504 9504
9505 9505 if (wce_flag == SD_CACHE_ENABLE)
9506 9506 mode_caching_page->wce = 1;
9507 9507 else if (wce_flag == SD_CACHE_DISABLE)
9508 9508 mode_caching_page->wce = 0;
9509 9509
9510 9510 /*
9511 9511 * Save the page if the mode sense says the
9512 9512 * drive supports it.
9513 9513 */
9514 9514 save_pg = mode_caching_page->mode_page.ps ?
9515 9515 SD_SAVE_PAGE : SD_DONTSAVE_PAGE;
9516 9516
9517 9517 /* Clear reserved bits before mode select. */
9518 9518 mode_caching_page->mode_page.ps = 0;
9519 9519
9520 9520 /*
9521 9521 * Clear out mode header for mode select.
9522 9522 * The rest of the retrieved page will be reused.
9523 9523 */
9524 9524 bzero(header, hdrlen);
9525 9525
9526 9526 if (un->un_f_cfg_is_atapi == TRUE) {
9527 9527 mhp = (struct mode_header_grp2 *)header;
9528 9528 mhp->bdesc_length_hi = bd_len >> 8;
9529 9529 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff;
9530 9530 } else {
9531 9531 ((struct mode_header *)header)->bdesc_length = bd_len;
9532 9532 }
9533 9533
9534 9534 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9535 9535
9536 9536 /* Issue mode select to change the cache settings */
9537 9537 if (un->un_f_cfg_is_atapi == TRUE) {
9538 9538 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, header,
9539 9539 sbuflen, save_pg, SD_PATH_DIRECT);
9540 9540 } else {
9541 9541 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header,
9542 9542 sbuflen, save_pg, SD_PATH_DIRECT);
9543 9543 }
9544 9544
9545 9545 }
9546 9546
9547 9547
9548 9548 mode_sense_failed:
9549 9549
9550 9550 kmem_free(header, buflen);
9551 9551
9552 9552 if (rval != 0) {
9553 9553 if (rval == EIO)
9554 9554 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
9555 9555 else
9556 9556 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9557 9557 }
9558 9558 return (rval);
9559 9559 }
9560 9560
9561 9561
9562 9562 /*
9563 9563 * Function: sd_get_write_cache_enabled()
9564 9564 *
9565 9565 * Description: This routine is the driver entry point for determining if
9566 9566 * write caching is enabled. It examines the WCE (write cache
9567 9567 * enable) bits of mode page 8 (MODEPAGE_CACHING).
9568 9568 *
9569 9569 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
9570 9570 * structure for this target.
9571 9571 * is_enabled - pointer to int where write cache enabled state
9572 9572 * is returned (non-zero -> write cache enabled)
9573 9573 *
9574 9574 *
9575 9575 * Return Code: EIO
9576 9576 * code returned by sd_send_scsi_MODE_SENSE
9577 9577 *
9578 9578 * Context: Kernel Thread
9579 9579 *
9580 9580 * NOTE: If ioctl is added to disable write cache, this sequence should
9581 9581 * be followed so that no locking is required for accesses to
9582 9582 * un->un_f_write_cache_enabled:
9583 9583 * do mode select to clear wce
9584 9584 * do synchronize cache to flush cache
9585 9585 * set un->un_f_write_cache_enabled = FALSE
9586 9586 *
9587 9587 * Conversely, an ioctl to enable the write cache should be done
9588 9588 * in this order:
9589 9589 * set un->un_f_write_cache_enabled = TRUE
9590 9590 * do mode select to set wce
9591 9591 */
9592 9592
9593 9593 static int
9594 9594 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled)
9595 9595 {
9596 9596 struct mode_caching *mode_caching_page;
9597 9597 uchar_t *header;
9598 9598 size_t buflen;
9599 9599 int hdrlen;
9600 9600 int bd_len;
9601 9601 int rval = 0;
9602 9602 struct sd_lun *un;
9603 9603 int status;
9604 9604
9605 9605 ASSERT(ssc != NULL);
9606 9606 un = ssc->ssc_un;
9607 9607 ASSERT(un != NULL);
9608 9608 ASSERT(is_enabled != NULL);
9609 9609
9610 9610 /* in case of error, flag as enabled */
9611 9611 *is_enabled = TRUE;
9612 9612
9613 9613 /*
9614 9614 * Do a test unit ready, otherwise a mode sense may not work if this
9615 9615 * is the first command sent to the device after boot.
9616 9616 */
9617 9617 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
9618 9618
9619 9619 if (status != 0)
9620 9620 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9621 9621
9622 9622 if (un->un_f_cfg_is_atapi == TRUE) {
9623 9623 hdrlen = MODE_HEADER_LENGTH_GRP2;
9624 9624 } else {
9625 9625 hdrlen = MODE_HEADER_LENGTH;
9626 9626 }
9627 9627
9628 9628 /*
9629 9629 * Allocate memory for the retrieved mode page and its headers. Set
9630 9630 * a pointer to the page itself.
9631 9631 */
9632 9632 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching);
9633 9633 header = kmem_zalloc(buflen, KM_SLEEP);
9634 9634
9635 9635 /* Get the information from the device. */
9636 9636 if (un->un_f_cfg_is_atapi == TRUE) {
9637 9637 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen,
9638 9638 MODEPAGE_CACHING, SD_PATH_DIRECT);
9639 9639 } else {
9640 9640 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen,
9641 9641 MODEPAGE_CACHING, SD_PATH_DIRECT);
9642 9642 }
9643 9643
9644 9644 if (rval != 0) {
9645 9645 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
9646 9646 "sd_get_write_cache_enabled: Mode Sense Failed\n");
9647 9647 goto mode_sense_failed;
9648 9648 }
9649 9649
9650 9650 /*
9651 9651 * Determine size of Block Descriptors in order to locate
9652 9652 * the mode page data. ATAPI devices return 0, SCSI devices
9653 9653 * should return MODE_BLK_DESC_LENGTH.
9654 9654 */
9655 9655 if (un->un_f_cfg_is_atapi == TRUE) {
9656 9656 struct mode_header_grp2 *mhp;
9657 9657 mhp = (struct mode_header_grp2 *)header;
9658 9658 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
9659 9659 } else {
9660 9660 bd_len = ((struct mode_header *)header)->bdesc_length;
9661 9661 }
9662 9662
9663 9663 if (bd_len > MODE_BLK_DESC_LENGTH) {
9664 9664 /* FMA should make upset complain here */
9665 9665 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0,
9666 9666 "sd_get_write_cache_enabled: Mode Sense returned invalid "
9667 9667 "block descriptor length\n");
9668 9668 rval = EIO;
9669 9669 goto mode_sense_failed;
9670 9670 }
9671 9671
9672 9672 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len);
9673 9673 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) {
9674 9674 /* FMA could make upset complain here */
9675 9675 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
9676 9676 "sd_get_write_cache_enabled: Mode Sense caching page "
9677 9677 "code mismatch %d\n", mode_caching_page->mode_page.code);
9678 9678 rval = EIO;
9679 9679 goto mode_sense_failed;
9680 9680 }
9681 9681 *is_enabled = mode_caching_page->wce;
9682 9682
9683 9683 mode_sense_failed:
9684 9684 if (rval == 0) {
9685 9685 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
9686 9686 } else if (rval == EIO) {
9687 9687 /*
9688 9688 * Some disks do not support mode sense(6), we
9689 9689 * should ignore this kind of error(sense key is
9690 9690 * 0x5 - illegal request).
9691 9691 */
9692 9692 uint8_t *sensep;
9693 9693 int senlen;
9694 9694
9695 9695 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
9696 9696 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
9697 9697 ssc->ssc_uscsi_cmd->uscsi_rqresid);
9698 9698
9699 9699 if (senlen > 0 &&
9700 9700 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) {
9701 9701 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE);
9702 9702 } else {
9703 9703 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
9704 9704 }
9705 9705 } else {
9706 9706 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9707 9707 }
9708 9708 kmem_free(header, buflen);
9709 9709 return (rval);
9710 9710 }
9711 9711
9712 9712 /*
9713 9713 * Function: sd_get_nv_sup()
9714 9714 *
9715 9715 * Description: This routine is the driver entry point for
9716 9716 * determining whether non-volatile cache is supported. This
9717 9717 * determination process works as follows:
9718 9718 *
9719 9719 * 1. sd first queries sd.conf on whether
9720 9720 * suppress_cache_flush bit is set for this device.
9721 9721 *
9722 9722 * 2. if not there, then queries the internal disk table.
9723 9723 *
9724 9724 * 3. if either sd.conf or internal disk table specifies
9725 9725 * cache flush be suppressed, we don't bother checking
9726 9726 * NV_SUP bit.
9727 9727 *
9728 9728 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries
9729 9729 * the optional INQUIRY VPD page 0x86. If the device
9730 9730 * supports VPD page 0x86, sd examines the NV_SUP
9731 9731 * (non-volatile cache support) bit in the INQUIRY VPD page
9732 9732 * 0x86:
9733 9733 * o If NV_SUP bit is set, sd assumes the device has a
9734 9734 * non-volatile cache and set the
9735 9735 * un_f_sync_nv_supported to TRUE.
9736 9736 * o Otherwise cache is not non-volatile,
9737 9737 * un_f_sync_nv_supported is set to FALSE.
9738 9738 *
9739 9739 * Arguments: un - driver soft state (unit) structure
9740 9740 *
9741 9741 * Return Code:
9742 9742 *
9743 9743 * Context: Kernel Thread
9744 9744 */
9745 9745
9746 9746 static void
9747 9747 sd_get_nv_sup(sd_ssc_t *ssc)
9748 9748 {
9749 9749 int rval = 0;
9750 9750 uchar_t *inq86 = NULL;
9751 9751 size_t inq86_len = MAX_INQUIRY_SIZE;
9752 9752 size_t inq86_resid = 0;
9753 9753 struct dk_callback *dkc;
9754 9754 struct sd_lun *un;
9755 9755
9756 9756 ASSERT(ssc != NULL);
9757 9757 un = ssc->ssc_un;
9758 9758 ASSERT(un != NULL);
9759 9759
9760 9760 mutex_enter(SD_MUTEX(un));
9761 9761
9762 9762 /*
9763 9763 * Be conservative on the device's support of
9764 9764 * SYNC_NV bit: un_f_sync_nv_supported is
9765 9765 * initialized to be false.
9766 9766 */
9767 9767 un->un_f_sync_nv_supported = FALSE;
9768 9768
9769 9769 /*
9770 9770 * If either sd.conf or internal disk table
9771 9771 * specifies cache flush be suppressed, then
9772 9772 * we don't bother checking NV_SUP bit.
9773 9773 */
9774 9774 if (un->un_f_suppress_cache_flush == TRUE) {
9775 9775 mutex_exit(SD_MUTEX(un));
9776 9776 return;
9777 9777 }
9778 9778
9779 9779 if (sd_check_vpd_page_support(ssc) == 0 &&
9780 9780 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) {
9781 9781 mutex_exit(SD_MUTEX(un));
9782 9782 /* collect page 86 data if available */
9783 9783 inq86 = kmem_zalloc(inq86_len, KM_SLEEP);
9784 9784
9785 9785 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len,
9786 9786 0x01, 0x86, &inq86_resid);
9787 9787
9788 9788 if (rval == 0 && (inq86_len - inq86_resid > 6)) {
9789 9789 SD_TRACE(SD_LOG_COMMON, un,
9790 9790 "sd_get_nv_sup: \
9791 9791 successfully get VPD page: %x \
9792 9792 PAGE LENGTH: %x BYTE 6: %x\n",
9793 9793 inq86[1], inq86[3], inq86[6]);
9794 9794
9795 9795 mutex_enter(SD_MUTEX(un));
9796 9796 /*
9797 9797 * check the value of NV_SUP bit: only if the device
9798 9798 * reports NV_SUP bit to be 1, the
9799 9799 * un_f_sync_nv_supported bit will be set to true.
9800 9800 */
9801 9801 if (inq86[6] & SD_VPD_NV_SUP) {
9802 9802 un->un_f_sync_nv_supported = TRUE;
9803 9803 }
9804 9804 mutex_exit(SD_MUTEX(un));
9805 9805 } else if (rval != 0) {
9806 9806 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9807 9807 }
9808 9808
9809 9809 kmem_free(inq86, inq86_len);
9810 9810 } else {
9811 9811 mutex_exit(SD_MUTEX(un));
9812 9812 }
9813 9813
9814 9814 /*
9815 9815 * Send a SYNC CACHE command to check whether
9816 9816 * SYNC_NV bit is supported. This command should have
9817 9817 * un_f_sync_nv_supported set to correct value.
9818 9818 */
9819 9819 mutex_enter(SD_MUTEX(un));
9820 9820 if (un->un_f_sync_nv_supported) {
9821 9821 mutex_exit(SD_MUTEX(un));
9822 9822 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP);
9823 9823 dkc->dkc_flag = FLUSH_VOLATILE;
9824 9824 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc);
9825 9825
9826 9826 /*
9827 9827 * Send a TEST UNIT READY command to the device. This should
9828 9828 * clear any outstanding UNIT ATTENTION that may be present.
9829 9829 */
9830 9830 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR);
9831 9831 if (rval != 0)
9832 9832 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9833 9833
9834 9834 kmem_free(dkc, sizeof (struct dk_callback));
9835 9835 } else {
9836 9836 mutex_exit(SD_MUTEX(un));
9837 9837 }
9838 9838
9839 9839 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \
9840 9840 un_f_suppress_cache_flush is set to %d\n",
9841 9841 un->un_f_suppress_cache_flush);
9842 9842 }
9843 9843
9844 9844 /*
9845 9845 * Function: sd_make_device
9846 9846 *
9847 9847 * Description: Utility routine to return the Solaris device number from
9848 9848 * the data in the device's dev_info structure.
9849 9849 *
9850 9850 * Return Code: The Solaris device number
9851 9851 *
9852 9852 * Context: Any
9853 9853 */
9854 9854
9855 9855 static dev_t
9856 9856 sd_make_device(dev_info_t *devi)
9857 9857 {
9858 9858 return (makedevice(ddi_driver_major(devi),
9859 9859 ddi_get_instance(devi) << SDUNIT_SHIFT));
9860 9860 }
9861 9861
9862 9862
9863 9863 /*
9864 9864 * Function: sd_pm_entry
9865 9865 *
9866 9866 * Description: Called at the start of a new command to manage power
9867 9867 * and busy status of a device. This includes determining whether
9868 9868 * the current power state of the device is sufficient for
9869 9869 * performing the command or whether it must be changed.
9870 9870 * The PM framework is notified appropriately.
9871 9871 * Only with a return status of DDI_SUCCESS will the
9872 9872 * component be busy to the framework.
9873 9873 *
9874 9874 * All callers of sd_pm_entry must check the return status
9875 9875 * and only call sd_pm_exit it it was DDI_SUCCESS. A status
9876 9876 * of DDI_FAILURE indicates the device failed to power up.
9877 9877 * In this case un_pm_count has been adjusted so the result
9878 9878 * on exit is still powered down, ie. count is less than 0.
9879 9879 * Calling sd_pm_exit with this count value hits an ASSERT.
9880 9880 *
9881 9881 * Return Code: DDI_SUCCESS or DDI_FAILURE
9882 9882 *
9883 9883 * Context: Kernel thread context.
9884 9884 */
9885 9885
9886 9886 static int
9887 9887 sd_pm_entry(struct sd_lun *un)
9888 9888 {
9889 9889 int return_status = DDI_SUCCESS;
9890 9890
9891 9891 ASSERT(!mutex_owned(SD_MUTEX(un)));
9892 9892 ASSERT(!mutex_owned(&un->un_pm_mutex));
9893 9893
9894 9894 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n");
9895 9895
9896 9896 if (un->un_f_pm_is_enabled == FALSE) {
9897 9897 SD_TRACE(SD_LOG_IO_PM, un,
9898 9898 "sd_pm_entry: exiting, PM not enabled\n");
9899 9899 return (return_status);
9900 9900 }
9901 9901
9902 9902 /*
9903 9903 * Just increment a counter if PM is enabled. On the transition from
9904 9904 * 0 ==> 1, mark the device as busy. The iodone side will decrement
9905 9905 * the count with each IO and mark the device as idle when the count
9906 9906 * hits 0.
9907 9907 *
9908 9908 * If the count is less than 0 the device is powered down. If a powered
9909 9909 * down device is successfully powered up then the count must be
9910 9910 * incremented to reflect the power up. Note that it'll get incremented
9911 9911 * a second time to become busy.
9912 9912 *
9913 9913 * Because the following has the potential to change the device state
9914 9914 * and must release the un_pm_mutex to do so, only one thread can be
9915 9915 * allowed through at a time.
9916 9916 */
9917 9917
9918 9918 mutex_enter(&un->un_pm_mutex);
9919 9919 while (un->un_pm_busy == TRUE) {
9920 9920 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex);
9921 9921 }
9922 9922 un->un_pm_busy = TRUE;
9923 9923
9924 9924 if (un->un_pm_count < 1) {
9925 9925
9926 9926 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n");
9927 9927
9928 9928 /*
9929 9929 * Indicate we are now busy so the framework won't attempt to
9930 9930 * power down the device. This call will only fail if either
9931 9931 * we passed a bad component number or the device has no
9932 9932 * components. Neither of these should ever happen.
9933 9933 */
9934 9934 mutex_exit(&un->un_pm_mutex);
9935 9935 return_status = pm_busy_component(SD_DEVINFO(un), 0);
9936 9936 ASSERT(return_status == DDI_SUCCESS);
9937 9937
9938 9938 mutex_enter(&un->un_pm_mutex);
9939 9939
9940 9940 if (un->un_pm_count < 0) {
9941 9941 mutex_exit(&un->un_pm_mutex);
9942 9942
9943 9943 SD_TRACE(SD_LOG_IO_PM, un,
9944 9944 "sd_pm_entry: power up component\n");
9945 9945
9946 9946 /*
9947 9947 * pm_raise_power will cause sdpower to be called
9948 9948 * which brings the device power level to the
9949 9949 * desired state, If successful, un_pm_count and
9950 9950 * un_power_level will be updated appropriately.
9951 9951 */
9952 9952 return_status = pm_raise_power(SD_DEVINFO(un), 0,
9953 9953 SD_PM_STATE_ACTIVE(un));
9954 9954
9955 9955 mutex_enter(&un->un_pm_mutex);
9956 9956
9957 9957 if (return_status != DDI_SUCCESS) {
9958 9958 /*
9959 9959 * Power up failed.
9960 9960 * Idle the device and adjust the count
9961 9961 * so the result on exit is that we're
9962 9962 * still powered down, ie. count is less than 0.
9963 9963 */
9964 9964 SD_TRACE(SD_LOG_IO_PM, un,
9965 9965 "sd_pm_entry: power up failed,"
9966 9966 " idle the component\n");
9967 9967
9968 9968 (void) pm_idle_component(SD_DEVINFO(un), 0);
9969 9969 un->un_pm_count--;
9970 9970 } else {
9971 9971 /*
9972 9972 * Device is powered up, verify the
9973 9973 * count is non-negative.
9974 9974 * This is debug only.
9975 9975 */
9976 9976 ASSERT(un->un_pm_count == 0);
9977 9977 }
9978 9978 }
9979 9979
9980 9980 if (return_status == DDI_SUCCESS) {
9981 9981 /*
9982 9982 * For performance, now that the device has been tagged
9983 9983 * as busy, and it's known to be powered up, update the
9984 9984 * chain types to use jump tables that do not include
9985 9985 * pm. This significantly lowers the overhead and
9986 9986 * therefore improves performance.
9987 9987 */
9988 9988
9989 9989 mutex_exit(&un->un_pm_mutex);
9990 9990 mutex_enter(SD_MUTEX(un));
9991 9991 SD_TRACE(SD_LOG_IO_PM, un,
9992 9992 "sd_pm_entry: changing uscsi_chain_type from %d\n",
9993 9993 un->un_uscsi_chain_type);
9994 9994
9995 9995 if (un->un_f_non_devbsize_supported) {
9996 9996 un->un_buf_chain_type =
9997 9997 SD_CHAIN_INFO_RMMEDIA_NO_PM;
9998 9998 } else {
9999 9999 un->un_buf_chain_type =
10000 10000 SD_CHAIN_INFO_DISK_NO_PM;
10001 10001 }
10002 10002 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM;
10003 10003
10004 10004 SD_TRACE(SD_LOG_IO_PM, un,
10005 10005 " changed uscsi_chain_type to %d\n",
10006 10006 un->un_uscsi_chain_type);
10007 10007 mutex_exit(SD_MUTEX(un));
10008 10008 mutex_enter(&un->un_pm_mutex);
10009 10009
10010 10010 if (un->un_pm_idle_timeid == NULL) {
10011 10011 /* 300 ms. */
10012 10012 un->un_pm_idle_timeid =
10013 10013 timeout(sd_pm_idletimeout_handler, un,
10014 10014 (drv_usectohz((clock_t)300000)));
10015 10015 /*
10016 10016 * Include an extra call to busy which keeps the
10017 10017 * device busy with-respect-to the PM layer
10018 10018 * until the timer fires, at which time it'll
10019 10019 * get the extra idle call.
10020 10020 */
10021 10021 (void) pm_busy_component(SD_DEVINFO(un), 0);
10022 10022 }
10023 10023 }
10024 10024 }
10025 10025 un->un_pm_busy = FALSE;
10026 10026 /* Next... */
10027 10027 cv_signal(&un->un_pm_busy_cv);
10028 10028
10029 10029 un->un_pm_count++;
10030 10030
10031 10031 SD_TRACE(SD_LOG_IO_PM, un,
10032 10032 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count);
10033 10033
10034 10034 mutex_exit(&un->un_pm_mutex);
10035 10035
10036 10036 return (return_status);
10037 10037 }
10038 10038
10039 10039
10040 10040 /*
10041 10041 * Function: sd_pm_exit
10042 10042 *
10043 10043 * Description: Called at the completion of a command to manage busy
10044 10044 * status for the device. If the device becomes idle the
10045 10045 * PM framework is notified.
10046 10046 *
10047 10047 * Context: Kernel thread context
10048 10048 */
10049 10049
10050 10050 static void
10051 10051 sd_pm_exit(struct sd_lun *un)
10052 10052 {
10053 10053 ASSERT(!mutex_owned(SD_MUTEX(un)));
10054 10054 ASSERT(!mutex_owned(&un->un_pm_mutex));
10055 10055
10056 10056 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n");
10057 10057
10058 10058 /*
10059 10059 * After attach the following flag is only read, so don't
10060 10060 * take the penalty of acquiring a mutex for it.
10061 10061 */
10062 10062 if (un->un_f_pm_is_enabled == TRUE) {
10063 10063
10064 10064 mutex_enter(&un->un_pm_mutex);
10065 10065 un->un_pm_count--;
10066 10066
10067 10067 SD_TRACE(SD_LOG_IO_PM, un,
10068 10068 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count);
10069 10069
10070 10070 ASSERT(un->un_pm_count >= 0);
10071 10071 if (un->un_pm_count == 0) {
10072 10072 mutex_exit(&un->un_pm_mutex);
10073 10073
10074 10074 SD_TRACE(SD_LOG_IO_PM, un,
10075 10075 "sd_pm_exit: idle component\n");
10076 10076
10077 10077 (void) pm_idle_component(SD_DEVINFO(un), 0);
10078 10078
10079 10079 } else {
10080 10080 mutex_exit(&un->un_pm_mutex);
10081 10081 }
10082 10082 }
10083 10083
10084 10084 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n");
10085 10085 }
10086 10086
10087 10087
10088 10088 /*
10089 10089 * Function: sdopen
10090 10090 *
10091 10091 * Description: Driver's open(9e) entry point function.
10092 10092 *
10093 10093 * Arguments: dev_i - pointer to device number
10094 10094 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE)
10095 10095 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR)
10096 10096 * cred_p - user credential pointer
10097 10097 *
10098 10098 * Return Code: EINVAL
10099 10099 * ENXIO
10100 10100 * EIO
10101 10101 * EROFS
10102 10102 * EBUSY
10103 10103 *
10104 10104 * Context: Kernel thread context
10105 10105 */
10106 10106 /* ARGSUSED */
10107 10107 static int
10108 10108 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p)
10109 10109 {
10110 10110 struct sd_lun *un;
10111 10111 int nodelay;
10112 10112 int part;
10113 10113 uint64_t partmask;
10114 10114 int instance;
10115 10115 dev_t dev;
10116 10116 int rval = EIO;
10117 10117 diskaddr_t nblks = 0;
10118 10118 diskaddr_t label_cap;
10119 10119
10120 10120 /* Validate the open type */
10121 10121 if (otyp >= OTYPCNT) {
10122 10122 return (EINVAL);
10123 10123 }
10124 10124
10125 10125 dev = *dev_p;
10126 10126 instance = SDUNIT(dev);
10127 10127 mutex_enter(&sd_detach_mutex);
10128 10128
10129 10129 /*
10130 10130 * Fail the open if there is no softstate for the instance, or
10131 10131 * if another thread somewhere is trying to detach the instance.
10132 10132 */
10133 10133 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) ||
10134 10134 (un->un_detach_count != 0)) {
10135 10135 mutex_exit(&sd_detach_mutex);
10136 10136 /*
10137 10137 * The probe cache only needs to be cleared when open (9e) fails
10138 10138 * with ENXIO (4238046).
10139 10139 */
10140 10140 /*
10141 10141 * un-conditionally clearing probe cache is ok with
10142 10142 * separate sd/ssd binaries
10143 10143 * x86 platform can be an issue with both parallel
10144 10144 * and fibre in 1 binary
10145 10145 */
10146 10146 sd_scsi_clear_probe_cache();
10147 10147 return (ENXIO);
10148 10148 }
10149 10149
10150 10150 /*
10151 10151 * The un_layer_count is to prevent another thread in specfs from
10152 10152 * trying to detach the instance, which can happen when we are
10153 10153 * called from a higher-layer driver instead of thru specfs.
10154 10154 * This will not be needed when DDI provides a layered driver
10155 10155 * interface that allows specfs to know that an instance is in
10156 10156 * use by a layered driver & should not be detached.
10157 10157 *
10158 10158 * Note: the semantics for layered driver opens are exactly one
10159 10159 * close for every open.
10160 10160 */
10161 10161 if (otyp == OTYP_LYR) {
10162 10162 un->un_layer_count++;
10163 10163 }
10164 10164
10165 10165 /*
10166 10166 * Keep a count of the current # of opens in progress. This is because
10167 10167 * some layered drivers try to call us as a regular open. This can
10168 10168 * cause problems that we cannot prevent, however by keeping this count
10169 10169 * we can at least keep our open and detach routines from racing against
10170 10170 * each other under such conditions.
10171 10171 */
10172 10172 un->un_opens_in_progress++;
10173 10173 mutex_exit(&sd_detach_mutex);
10174 10174
10175 10175 nodelay = (flag & (FNDELAY | FNONBLOCK));
10176 10176 part = SDPART(dev);
10177 10177 partmask = 1 << part;
10178 10178
10179 10179 /*
10180 10180 * We use a semaphore here in order to serialize
10181 10181 * open and close requests on the device.
10182 10182 */
10183 10183 sema_p(&un->un_semoclose);
10184 10184
10185 10185 mutex_enter(SD_MUTEX(un));
10186 10186
10187 10187 /*
10188 10188 * All device accesses go thru sdstrategy() where we check
10189 10189 * on suspend status but there could be a scsi_poll command,
10190 10190 * which bypasses sdstrategy(), so we need to check pm
10191 10191 * status.
10192 10192 */
10193 10193
10194 10194 if (!nodelay) {
10195 10195 while ((un->un_state == SD_STATE_SUSPENDED) ||
10196 10196 (un->un_state == SD_STATE_PM_CHANGING)) {
10197 10197 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10198 10198 }
10199 10199
10200 10200 mutex_exit(SD_MUTEX(un));
10201 10201 if (sd_pm_entry(un) != DDI_SUCCESS) {
10202 10202 rval = EIO;
10203 10203 SD_ERROR(SD_LOG_OPEN_CLOSE, un,
10204 10204 "sdopen: sd_pm_entry failed\n");
10205 10205 goto open_failed_with_pm;
10206 10206 }
10207 10207 mutex_enter(SD_MUTEX(un));
10208 10208 }
10209 10209
10210 10210 /* check for previous exclusive open */
10211 10211 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un);
10212 10212 SD_TRACE(SD_LOG_OPEN_CLOSE, un,
10213 10213 "sdopen: exclopen=%x, flag=%x, regopen=%x\n",
10214 10214 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]);
10215 10215
10216 10216 if (un->un_exclopen & (partmask)) {
10217 10217 goto excl_open_fail;
10218 10218 }
10219 10219
10220 10220 if (flag & FEXCL) {
10221 10221 int i;
10222 10222 if (un->un_ocmap.lyropen[part]) {
10223 10223 goto excl_open_fail;
10224 10224 }
10225 10225 for (i = 0; i < (OTYPCNT - 1); i++) {
10226 10226 if (un->un_ocmap.regopen[i] & (partmask)) {
10227 10227 goto excl_open_fail;
10228 10228 }
10229 10229 }
10230 10230 }
10231 10231
10232 10232 /*
10233 10233 * Check the write permission if this is a removable media device,
10234 10234 * NDELAY has not been set, and writable permission is requested.
10235 10235 *
10236 10236 * Note: If NDELAY was set and this is write-protected media the WRITE
10237 10237 * attempt will fail with EIO as part of the I/O processing. This is a
10238 10238 * more permissive implementation that allows the open to succeed and
10239 10239 * WRITE attempts to fail when appropriate.
10240 10240 */
10241 10241 if (un->un_f_chk_wp_open) {
10242 10242 if ((flag & FWRITE) && (!nodelay)) {
10243 10243 mutex_exit(SD_MUTEX(un));
10244 10244 /*
10245 10245 * Defer the check for write permission on writable
10246 10246 * DVD drive till sdstrategy and will not fail open even
10247 10247 * if FWRITE is set as the device can be writable
10248 10248 * depending upon the media and the media can change
10249 10249 * after the call to open().
10250 10250 */
10251 10251 if (un->un_f_dvdram_writable_device == FALSE) {
10252 10252 if (ISCD(un) || sr_check_wp(dev)) {
10253 10253 rval = EROFS;
10254 10254 mutex_enter(SD_MUTEX(un));
10255 10255 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: "
10256 10256 "write to cd or write protected media\n");
10257 10257 goto open_fail;
10258 10258 }
10259 10259 }
10260 10260 mutex_enter(SD_MUTEX(un));
10261 10261 }
10262 10262 }
10263 10263
10264 10264 /*
10265 10265 * If opening in NDELAY/NONBLOCK mode, just return.
10266 10266 * Check if disk is ready and has a valid geometry later.
10267 10267 */
10268 10268 if (!nodelay) {
10269 10269 sd_ssc_t *ssc;
10270 10270
10271 10271 mutex_exit(SD_MUTEX(un));
10272 10272 ssc = sd_ssc_init(un);
10273 10273 rval = sd_ready_and_valid(ssc, part);
10274 10274 sd_ssc_fini(ssc);
10275 10275 mutex_enter(SD_MUTEX(un));
10276 10276 /*
10277 10277 * Fail if device is not ready or if the number of disk
10278 10278 * blocks is zero or negative for non CD devices.
10279 10279 */
10280 10280
10281 10281 nblks = 0;
10282 10282
10283 10283 if (rval == SD_READY_VALID && (!ISCD(un))) {
10284 10284 /* if cmlb_partinfo fails, nblks remains 0 */
10285 10285 mutex_exit(SD_MUTEX(un));
10286 10286 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks,
10287 10287 NULL, NULL, NULL, (void *)SD_PATH_DIRECT);
10288 10288 mutex_enter(SD_MUTEX(un));
10289 10289 }
10290 10290
10291 10291 if ((rval != SD_READY_VALID) ||
10292 10292 (!ISCD(un) && nblks <= 0)) {
10293 10293 rval = un->un_f_has_removable_media ? ENXIO : EIO;
10294 10294 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: "
10295 10295 "device not ready or invalid disk block value\n");
10296 10296 goto open_fail;
10297 10297 }
10298 10298 #if defined(__i386) || defined(__amd64)
10299 10299 } else {
10300 10300 uchar_t *cp;
10301 10301 /*
10302 10302 * x86 requires special nodelay handling, so that p0 is
10303 10303 * always defined and accessible.
10304 10304 * Invalidate geometry only if device is not already open.
10305 10305 */
10306 10306 cp = &un->un_ocmap.chkd[0];
10307 10307 while (cp < &un->un_ocmap.chkd[OCSIZE]) {
10308 10308 if (*cp != (uchar_t)0) {
10309 10309 break;
10310 10310 }
10311 10311 cp++;
10312 10312 }
10313 10313 if (cp == &un->un_ocmap.chkd[OCSIZE]) {
10314 10314 mutex_exit(SD_MUTEX(un));
10315 10315 cmlb_invalidate(un->un_cmlbhandle,
10316 10316 (void *)SD_PATH_DIRECT);
10317 10317 mutex_enter(SD_MUTEX(un));
10318 10318 }
10319 10319
10320 10320 #endif
10321 10321 }
10322 10322
10323 10323 if (otyp == OTYP_LYR) {
10324 10324 un->un_ocmap.lyropen[part]++;
10325 10325 } else {
10326 10326 un->un_ocmap.regopen[otyp] |= partmask;
10327 10327 }
10328 10328
10329 10329 /* Set up open and exclusive open flags */
10330 10330 if (flag & FEXCL) {
10331 10331 un->un_exclopen |= (partmask);
10332 10332 }
10333 10333
10334 10334 /*
10335 10335 * If the lun is EFI labeled and lun capacity is greater than the
10336 10336 * capacity contained in the label, log a sys-event to notify the
10337 10337 * interested module.
10338 10338 * To avoid an infinite loop of logging sys-event, we only log the
10339 10339 * event when the lun is not opened in NDELAY mode. The event handler
10340 10340 * should open the lun in NDELAY mode.
10341 10341 */
10342 10342 if (!nodelay) {
10343 10343 mutex_exit(SD_MUTEX(un));
10344 10344 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap,
10345 10345 (void*)SD_PATH_DIRECT) == 0) {
10346 10346 mutex_enter(SD_MUTEX(un));
10347 10347 if (un->un_f_blockcount_is_valid &&
10348 10348 un->un_blockcount > label_cap &&
10349 10349 un->un_f_expnevent == B_FALSE) {
10350 10350 un->un_f_expnevent = B_TRUE;
10351 10351 mutex_exit(SD_MUTEX(un));
10352 10352 sd_log_lun_expansion_event(un,
10353 10353 (nodelay ? KM_NOSLEEP : KM_SLEEP));
10354 10354 mutex_enter(SD_MUTEX(un));
10355 10355 }
10356 10356 } else {
10357 10357 mutex_enter(SD_MUTEX(un));
10358 10358 }
10359 10359 }
10360 10360
10361 10361 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: "
10362 10362 "open of part %d type %d\n", part, otyp);
10363 10363
10364 10364 mutex_exit(SD_MUTEX(un));
10365 10365 if (!nodelay) {
10366 10366 sd_pm_exit(un);
10367 10367 }
10368 10368
10369 10369 sema_v(&un->un_semoclose);
10370 10370
10371 10371 mutex_enter(&sd_detach_mutex);
10372 10372 un->un_opens_in_progress--;
10373 10373 mutex_exit(&sd_detach_mutex);
10374 10374
10375 10375 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n");
10376 10376 return (DDI_SUCCESS);
10377 10377
10378 10378 excl_open_fail:
10379 10379 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n");
10380 10380 rval = EBUSY;
10381 10381
10382 10382 open_fail:
10383 10383 mutex_exit(SD_MUTEX(un));
10384 10384
10385 10385 /*
10386 10386 * On a failed open we must exit the pm management.
10387 10387 */
10388 10388 if (!nodelay) {
10389 10389 sd_pm_exit(un);
10390 10390 }
10391 10391 open_failed_with_pm:
10392 10392 sema_v(&un->un_semoclose);
10393 10393
10394 10394 mutex_enter(&sd_detach_mutex);
10395 10395 un->un_opens_in_progress--;
10396 10396 if (otyp == OTYP_LYR) {
10397 10397 un->un_layer_count--;
10398 10398 }
10399 10399 mutex_exit(&sd_detach_mutex);
10400 10400
10401 10401 return (rval);
10402 10402 }
10403 10403
10404 10404
10405 10405 /*
10406 10406 * Function: sdclose
10407 10407 *
10408 10408 * Description: Driver's close(9e) entry point function.
10409 10409 *
10410 10410 * Arguments: dev - device number
10411 10411 * flag - file status flag, informational only
10412 10412 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR)
10413 10413 * cred_p - user credential pointer
10414 10414 *
10415 10415 * Return Code: ENXIO
10416 10416 *
10417 10417 * Context: Kernel thread context
10418 10418 */
10419 10419 /* ARGSUSED */
10420 10420 static int
10421 10421 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p)
10422 10422 {
10423 10423 struct sd_lun *un;
10424 10424 uchar_t *cp;
10425 10425 int part;
10426 10426 int nodelay;
10427 10427 int rval = 0;
10428 10428
10429 10429 /* Validate the open type */
10430 10430 if (otyp >= OTYPCNT) {
10431 10431 return (ENXIO);
10432 10432 }
10433 10433
10434 10434 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
10435 10435 return (ENXIO);
10436 10436 }
10437 10437
10438 10438 part = SDPART(dev);
10439 10439 nodelay = flag & (FNDELAY | FNONBLOCK);
10440 10440
10441 10441 SD_TRACE(SD_LOG_OPEN_CLOSE, un,
10442 10442 "sdclose: close of part %d type %d\n", part, otyp);
10443 10443
10444 10444 /*
10445 10445 * We use a semaphore here in order to serialize
10446 10446 * open and close requests on the device.
10447 10447 */
10448 10448 sema_p(&un->un_semoclose);
10449 10449
10450 10450 mutex_enter(SD_MUTEX(un));
10451 10451
10452 10452 /* Don't proceed if power is being changed. */
10453 10453 while (un->un_state == SD_STATE_PM_CHANGING) {
10454 10454 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10455 10455 }
10456 10456
10457 10457 if (un->un_exclopen & (1 << part)) {
10458 10458 un->un_exclopen &= ~(1 << part);
10459 10459 }
10460 10460
10461 10461 /* Update the open partition map */
10462 10462 if (otyp == OTYP_LYR) {
10463 10463 un->un_ocmap.lyropen[part] -= 1;
10464 10464 } else {
10465 10465 un->un_ocmap.regopen[otyp] &= ~(1 << part);
10466 10466 }
10467 10467
10468 10468 cp = &un->un_ocmap.chkd[0];
10469 10469 while (cp < &un->un_ocmap.chkd[OCSIZE]) {
10470 10470 if (*cp != NULL) {
10471 10471 break;
10472 10472 }
10473 10473 cp++;
10474 10474 }
10475 10475
10476 10476 if (cp == &un->un_ocmap.chkd[OCSIZE]) {
10477 10477 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n");
10478 10478
10479 10479 /*
10480 10480 * We avoid persistance upon the last close, and set
10481 10481 * the throttle back to the maximum.
10482 10482 */
10483 10483 un->un_throttle = un->un_saved_throttle;
10484 10484
10485 10485 if (un->un_state == SD_STATE_OFFLINE) {
10486 10486 if (un->un_f_is_fibre == FALSE) {
10487 10487 scsi_log(SD_DEVINFO(un), sd_label,
10488 10488 CE_WARN, "offline\n");
10489 10489 }
10490 10490 mutex_exit(SD_MUTEX(un));
10491 10491 cmlb_invalidate(un->un_cmlbhandle,
10492 10492 (void *)SD_PATH_DIRECT);
10493 10493 mutex_enter(SD_MUTEX(un));
10494 10494
10495 10495 } else {
10496 10496 /*
10497 10497 * Flush any outstanding writes in NVRAM cache.
10498 10498 * Note: SYNCHRONIZE CACHE is an optional SCSI-2
10499 10499 * cmd, it may not work for non-Pluto devices.
10500 10500 * SYNCHRONIZE CACHE is not required for removables,
10501 10501 * except DVD-RAM drives.
10502 10502 *
10503 10503 * Also note: because SYNCHRONIZE CACHE is currently
10504 10504 * the only command issued here that requires the
10505 10505 * drive be powered up, only do the power up before
10506 10506 * sending the Sync Cache command. If additional
10507 10507 * commands are added which require a powered up
10508 10508 * drive, the following sequence may have to change.
10509 10509 *
10510 10510 * And finally, note that parallel SCSI on SPARC
10511 10511 * only issues a Sync Cache to DVD-RAM, a newly
10512 10512 * supported device.
10513 10513 */
10514 10514 #if defined(__i386) || defined(__amd64)
10515 10515 if ((un->un_f_sync_cache_supported &&
10516 10516 un->un_f_sync_cache_required) ||
10517 10517 un->un_f_dvdram_writable_device == TRUE) {
10518 10518 #else
10519 10519 if (un->un_f_dvdram_writable_device == TRUE) {
10520 10520 #endif
10521 10521 mutex_exit(SD_MUTEX(un));
10522 10522 if (sd_pm_entry(un) == DDI_SUCCESS) {
10523 10523 rval =
10524 10524 sd_send_scsi_SYNCHRONIZE_CACHE(un,
10525 10525 NULL);
10526 10526 /* ignore error if not supported */
10527 10527 if (rval == ENOTSUP) {
10528 10528 rval = 0;
10529 10529 } else if (rval != 0) {
10530 10530 rval = EIO;
10531 10531 }
10532 10532 sd_pm_exit(un);
10533 10533 } else {
10534 10534 rval = EIO;
10535 10535 }
10536 10536 mutex_enter(SD_MUTEX(un));
10537 10537 }
10538 10538
10539 10539 /*
10540 10540 * For devices which supports DOOR_LOCK, send an ALLOW
10541 10541 * MEDIA REMOVAL command, but don't get upset if it
10542 10542 * fails. We need to raise the power of the drive before
10543 10543 * we can call sd_send_scsi_DOORLOCK()
10544 10544 */
10545 10545 if (un->un_f_doorlock_supported) {
10546 10546 mutex_exit(SD_MUTEX(un));
10547 10547 if (sd_pm_entry(un) == DDI_SUCCESS) {
10548 10548 sd_ssc_t *ssc;
10549 10549
10550 10550 ssc = sd_ssc_init(un);
10551 10551 rval = sd_send_scsi_DOORLOCK(ssc,
10552 10552 SD_REMOVAL_ALLOW, SD_PATH_DIRECT);
10553 10553 if (rval != 0)
10554 10554 sd_ssc_assessment(ssc,
10555 10555 SD_FMT_IGNORE);
10556 10556 sd_ssc_fini(ssc);
10557 10557
10558 10558 sd_pm_exit(un);
10559 10559 if (ISCD(un) && (rval != 0) &&
10560 10560 (nodelay != 0)) {
10561 10561 rval = ENXIO;
10562 10562 }
10563 10563 } else {
10564 10564 rval = EIO;
10565 10565 }
10566 10566 mutex_enter(SD_MUTEX(un));
10567 10567 }
10568 10568
10569 10569 /*
10570 10570 * If a device has removable media, invalidate all
10571 10571 * parameters related to media, such as geometry,
10572 10572 * blocksize, and blockcount.
10573 10573 */
10574 10574 if (un->un_f_has_removable_media) {
10575 10575 sr_ejected(un);
10576 10576 }
10577 10577
10578 10578 /*
10579 10579 * Destroy the cache (if it exists) which was
10580 10580 * allocated for the write maps since this is
10581 10581 * the last close for this media.
10582 10582 */
10583 10583 if (un->un_wm_cache) {
10584 10584 /*
10585 10585 * Check if there are pending commands.
10586 10586 * and if there are give a warning and
10587 10587 * do not destroy the cache.
10588 10588 */
10589 10589 if (un->un_ncmds_in_driver > 0) {
10590 10590 scsi_log(SD_DEVINFO(un),
10591 10591 sd_label, CE_WARN,
10592 10592 "Unable to clean up memory "
10593 10593 "because of pending I/O\n");
10594 10594 } else {
10595 10595 kmem_cache_destroy(
10596 10596 un->un_wm_cache);
10597 10597 un->un_wm_cache = NULL;
10598 10598 }
10599 10599 }
10600 10600 }
10601 10601 }
10602 10602
10603 10603 mutex_exit(SD_MUTEX(un));
10604 10604 sema_v(&un->un_semoclose);
10605 10605
10606 10606 if (otyp == OTYP_LYR) {
10607 10607 mutex_enter(&sd_detach_mutex);
10608 10608 /*
10609 10609 * The detach routine may run when the layer count
10610 10610 * drops to zero.
10611 10611 */
10612 10612 un->un_layer_count--;
10613 10613 mutex_exit(&sd_detach_mutex);
10614 10614 }
10615 10615
10616 10616 return (rval);
10617 10617 }
10618 10618
10619 10619
10620 10620 /*
10621 10621 * Function: sd_ready_and_valid
10622 10622 *
10623 10623 * Description: Test if device is ready and has a valid geometry.
10624 10624 *
10625 10625 * Arguments: ssc - sd_ssc_t will contain un
10626 10626 * un - driver soft state (unit) structure
10627 10627 *
10628 10628 * Return Code: SD_READY_VALID ready and valid label
10629 10629 * SD_NOT_READY_VALID not ready, no label
10630 10630 * SD_RESERVED_BY_OTHERS reservation conflict
10631 10631 *
10632 10632 * Context: Never called at interrupt context.
10633 10633 */
10634 10634
10635 10635 static int
10636 10636 sd_ready_and_valid(sd_ssc_t *ssc, int part)
10637 10637 {
10638 10638 struct sd_errstats *stp;
10639 10639 uint64_t capacity;
10640 10640 uint_t lbasize;
10641 10641 int rval = SD_READY_VALID;
10642 10642 char name_str[48];
10643 10643 boolean_t is_valid;
10644 10644 struct sd_lun *un;
10645 10645 int status;
10646 10646
10647 10647 ASSERT(ssc != NULL);
10648 10648 un = ssc->ssc_un;
10649 10649 ASSERT(un != NULL);
10650 10650 ASSERT(!mutex_owned(SD_MUTEX(un)));
10651 10651
10652 10652 mutex_enter(SD_MUTEX(un));
10653 10653 /*
10654 10654 * If a device has removable media, we must check if media is
10655 10655 * ready when checking if this device is ready and valid.
10656 10656 */
10657 10657 if (un->un_f_has_removable_media) {
10658 10658 mutex_exit(SD_MUTEX(un));
10659 10659 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
10660 10660
10661 10661 if (status != 0) {
10662 10662 rval = SD_NOT_READY_VALID;
10663 10663 mutex_enter(SD_MUTEX(un));
10664 10664
10665 10665 /* Ignore all failed status for removalbe media */
10666 10666 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10667 10667
10668 10668 goto done;
10669 10669 }
10670 10670
10671 10671 is_valid = SD_IS_VALID_LABEL(un);
10672 10672 mutex_enter(SD_MUTEX(un));
10673 10673 if (!is_valid ||
10674 10674 (un->un_f_blockcount_is_valid == FALSE) ||
10675 10675 (un->un_f_tgt_blocksize_is_valid == FALSE)) {
10676 10676
10677 10677 /* capacity has to be read every open. */
10678 10678 mutex_exit(SD_MUTEX(un));
10679 10679 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity,
10680 10680 &lbasize, SD_PATH_DIRECT);
10681 10681
10682 10682 if (status != 0) {
10683 10683 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10684 10684
10685 10685 cmlb_invalidate(un->un_cmlbhandle,
10686 10686 (void *)SD_PATH_DIRECT);
10687 10687 mutex_enter(SD_MUTEX(un));
10688 10688 rval = SD_NOT_READY_VALID;
10689 10689
10690 10690 goto done;
10691 10691 } else {
10692 10692 mutex_enter(SD_MUTEX(un));
10693 10693 sd_update_block_info(un, lbasize, capacity);
10694 10694 }
10695 10695 }
10696 10696
10697 10697 /*
10698 10698 * Check if the media in the device is writable or not.
10699 10699 */
10700 10700 if (!is_valid && ISCD(un)) {
10701 10701 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT);
10702 10702 }
10703 10703
10704 10704 } else {
10705 10705 /*
10706 10706 * Do a test unit ready to clear any unit attention from non-cd
10707 10707 * devices.
10708 10708 */
10709 10709 mutex_exit(SD_MUTEX(un));
10710 10710
10711 10711 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
10712 10712 if (status != 0) {
10713 10713 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10714 10714 }
10715 10715
10716 10716 mutex_enter(SD_MUTEX(un));
10717 10717 }
10718 10718
10719 10719
10720 10720 /*
10721 10721 * If this is a non 512 block device, allocate space for
10722 10722 * the wmap cache. This is being done here since every time
10723 10723 * a media is changed this routine will be called and the
10724 10724 * block size is a function of media rather than device.
10725 10725 */
10726 10726 if (((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR ||
10727 10727 un->un_f_non_devbsize_supported) &&
10728 10728 un->un_tgt_blocksize != DEV_BSIZE) ||
10729 10729 un->un_f_enable_rmw) {
10730 10730 if (!(un->un_wm_cache)) {
10731 10731 (void) snprintf(name_str, sizeof (name_str),
10732 10732 "%s%d_cache",
10733 10733 ddi_driver_name(SD_DEVINFO(un)),
10734 10734 ddi_get_instance(SD_DEVINFO(un)));
10735 10735 un->un_wm_cache = kmem_cache_create(
10736 10736 name_str, sizeof (struct sd_w_map),
10737 10737 8, sd_wm_cache_constructor,
10738 10738 sd_wm_cache_destructor, NULL,
10739 10739 (void *)un, NULL, 0);
10740 10740 if (!(un->un_wm_cache)) {
10741 10741 rval = ENOMEM;
10742 10742 goto done;
10743 10743 }
10744 10744 }
10745 10745 }
10746 10746
10747 10747 if (un->un_state == SD_STATE_NORMAL) {
10748 10748 /*
10749 10749 * If the target is not yet ready here (defined by a TUR
10750 10750 * failure), invalidate the geometry and print an 'offline'
10751 10751 * message. This is a legacy message, as the state of the
10752 10752 * target is not actually changed to SD_STATE_OFFLINE.
10753 10753 *
10754 10754 * If the TUR fails for EACCES (Reservation Conflict),
10755 10755 * SD_RESERVED_BY_OTHERS will be returned to indicate
10756 10756 * reservation conflict. If the TUR fails for other
10757 10757 * reasons, SD_NOT_READY_VALID will be returned.
10758 10758 */
10759 10759 int err;
10760 10760
10761 10761 mutex_exit(SD_MUTEX(un));
10762 10762 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
10763 10763 mutex_enter(SD_MUTEX(un));
10764 10764
10765 10765 if (err != 0) {
10766 10766 mutex_exit(SD_MUTEX(un));
10767 10767 cmlb_invalidate(un->un_cmlbhandle,
10768 10768 (void *)SD_PATH_DIRECT);
10769 10769 mutex_enter(SD_MUTEX(un));
10770 10770 if (err == EACCES) {
10771 10771 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
10772 10772 "reservation conflict\n");
10773 10773 rval = SD_RESERVED_BY_OTHERS;
10774 10774 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10775 10775 } else {
10776 10776 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
10777 10777 "drive offline\n");
10778 10778 rval = SD_NOT_READY_VALID;
10779 10779 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
10780 10780 }
10781 10781 goto done;
10782 10782 }
10783 10783 }
10784 10784
10785 10785 if (un->un_f_format_in_progress == FALSE) {
10786 10786 mutex_exit(SD_MUTEX(un));
10787 10787
10788 10788 (void) cmlb_validate(un->un_cmlbhandle, 0,
10789 10789 (void *)SD_PATH_DIRECT);
10790 10790 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL,
10791 10791 NULL, (void *) SD_PATH_DIRECT) != 0) {
10792 10792 rval = SD_NOT_READY_VALID;
10793 10793 mutex_enter(SD_MUTEX(un));
10794 10794
10795 10795 goto done;
10796 10796 }
10797 10797 if (un->un_f_pkstats_enabled) {
10798 10798 sd_set_pstats(un);
10799 10799 SD_TRACE(SD_LOG_IO_PARTITION, un,
10800 10800 "sd_ready_and_valid: un:0x%p pstats created and "
10801 10801 "set\n", un);
10802 10802 }
10803 10803 mutex_enter(SD_MUTEX(un));
10804 10804 }
10805 10805
10806 10806 /*
10807 10807 * If this device supports DOOR_LOCK command, try and send
10808 10808 * this command to PREVENT MEDIA REMOVAL, but don't get upset
10809 10809 * if it fails. For a CD, however, it is an error
10810 10810 */
10811 10811 if (un->un_f_doorlock_supported) {
10812 10812 mutex_exit(SD_MUTEX(un));
10813 10813 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
10814 10814 SD_PATH_DIRECT);
10815 10815
10816 10816 if ((status != 0) && ISCD(un)) {
10817 10817 rval = SD_NOT_READY_VALID;
10818 10818 mutex_enter(SD_MUTEX(un));
10819 10819
10820 10820 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10821 10821
10822 10822 goto done;
10823 10823 } else if (status != 0)
10824 10824 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10825 10825 mutex_enter(SD_MUTEX(un));
10826 10826 }
10827 10827
10828 10828 /* The state has changed, inform the media watch routines */
10829 10829 un->un_mediastate = DKIO_INSERTED;
10830 10830 cv_broadcast(&un->un_state_cv);
10831 10831 rval = SD_READY_VALID;
10832 10832
10833 10833 done:
10834 10834
10835 10835 /*
10836 10836 * Initialize the capacity kstat value, if no media previously
10837 10837 * (capacity kstat is 0) and a media has been inserted
10838 10838 * (un_blockcount > 0).
10839 10839 */
10840 10840 if (un->un_errstats != NULL) {
10841 10841 stp = (struct sd_errstats *)un->un_errstats->ks_data;
10842 10842 if ((stp->sd_capacity.value.ui64 == 0) &&
10843 10843 (un->un_f_blockcount_is_valid == TRUE)) {
10844 10844 stp->sd_capacity.value.ui64 =
10845 10845 (uint64_t)((uint64_t)un->un_blockcount *
10846 10846 un->un_sys_blocksize);
10847 10847 }
10848 10848 }
10849 10849
10850 10850 mutex_exit(SD_MUTEX(un));
10851 10851 return (rval);
10852 10852 }
10853 10853
10854 10854
10855 10855 /*
10856 10856 * Function: sdmin
10857 10857 *
10858 10858 * Description: Routine to limit the size of a data transfer. Used in
10859 10859 * conjunction with physio(9F).
10860 10860 *
10861 10861 * Arguments: bp - pointer to the indicated buf(9S) struct.
10862 10862 *
10863 10863 * Context: Kernel thread context.
10864 10864 */
10865 10865
10866 10866 static void
10867 10867 sdmin(struct buf *bp)
10868 10868 {
10869 10869 struct sd_lun *un;
10870 10870 int instance;
10871 10871
10872 10872 instance = SDUNIT(bp->b_edev);
10873 10873
10874 10874 un = ddi_get_soft_state(sd_state, instance);
10875 10875 ASSERT(un != NULL);
10876 10876
10877 10877 /*
10878 10878 * We depend on buf breakup to restrict
10879 10879 * IO size if it is enabled.
10880 10880 */
10881 10881 if (un->un_buf_breakup_supported) {
10882 10882 return;
10883 10883 }
10884 10884
10885 10885 if (bp->b_bcount > un->un_max_xfer_size) {
10886 10886 bp->b_bcount = un->un_max_xfer_size;
10887 10887 }
10888 10888 }
10889 10889
10890 10890
10891 10891 /*
10892 10892 * Function: sdread
10893 10893 *
10894 10894 * Description: Driver's read(9e) entry point function.
10895 10895 *
10896 10896 * Arguments: dev - device number
10897 10897 * uio - structure pointer describing where data is to be stored
10898 10898 * in user's space
10899 10899 * cred_p - user credential pointer
10900 10900 *
10901 10901 * Return Code: ENXIO
10902 10902 * EIO
10903 10903 * EINVAL
10904 10904 * value returned by physio
10905 10905 *
10906 10906 * Context: Kernel thread context.
10907 10907 */
10908 10908 /* ARGSUSED */
10909 10909 static int
10910 10910 sdread(dev_t dev, struct uio *uio, cred_t *cred_p)
10911 10911 {
10912 10912 struct sd_lun *un = NULL;
10913 10913 int secmask;
10914 10914 int err = 0;
10915 10915 sd_ssc_t *ssc;
10916 10916
10917 10917 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
10918 10918 return (ENXIO);
10919 10919 }
10920 10920
10921 10921 ASSERT(!mutex_owned(SD_MUTEX(un)));
10922 10922
10923 10923
10924 10924 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
10925 10925 mutex_enter(SD_MUTEX(un));
10926 10926 /*
10927 10927 * Because the call to sd_ready_and_valid will issue I/O we
10928 10928 * must wait here if either the device is suspended or
10929 10929 * if it's power level is changing.
10930 10930 */
10931 10931 while ((un->un_state == SD_STATE_SUSPENDED) ||
10932 10932 (un->un_state == SD_STATE_PM_CHANGING)) {
10933 10933 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10934 10934 }
10935 10935 un->un_ncmds_in_driver++;
10936 10936 mutex_exit(SD_MUTEX(un));
10937 10937
10938 10938 /* Initialize sd_ssc_t for internal uscsi commands */
10939 10939 ssc = sd_ssc_init(un);
10940 10940 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
10941 10941 err = EIO;
10942 10942 } else {
10943 10943 err = 0;
10944 10944 }
10945 10945 sd_ssc_fini(ssc);
10946 10946
10947 10947 mutex_enter(SD_MUTEX(un));
10948 10948 un->un_ncmds_in_driver--;
10949 10949 ASSERT(un->un_ncmds_in_driver >= 0);
10950 10950 mutex_exit(SD_MUTEX(un));
10951 10951 if (err != 0)
10952 10952 return (err);
10953 10953 }
10954 10954
10955 10955 /*
10956 10956 * Read requests are restricted to multiples of the system block size.
10957 10957 */
10958 10958 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
10959 10959 !un->un_f_enable_rmw)
10960 10960 secmask = un->un_tgt_blocksize - 1;
10961 10961 else
10962 10962 secmask = DEV_BSIZE - 1;
10963 10963
10964 10964 if (uio->uio_loffset & ((offset_t)(secmask))) {
10965 10965 SD_ERROR(SD_LOG_READ_WRITE, un,
10966 10966 "sdread: file offset not modulo %d\n",
10967 10967 secmask + 1);
10968 10968 err = EINVAL;
10969 10969 } else if (uio->uio_iov->iov_len & (secmask)) {
10970 10970 SD_ERROR(SD_LOG_READ_WRITE, un,
10971 10971 "sdread: transfer length not modulo %d\n",
10972 10972 secmask + 1);
10973 10973 err = EINVAL;
10974 10974 } else {
10975 10975 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio);
10976 10976 }
10977 10977
10978 10978 return (err);
10979 10979 }
10980 10980
10981 10981
10982 10982 /*
10983 10983 * Function: sdwrite
10984 10984 *
10985 10985 * Description: Driver's write(9e) entry point function.
10986 10986 *
10987 10987 * Arguments: dev - device number
10988 10988 * uio - structure pointer describing where data is stored in
10989 10989 * user's space
10990 10990 * cred_p - user credential pointer
10991 10991 *
10992 10992 * Return Code: ENXIO
10993 10993 * EIO
10994 10994 * EINVAL
10995 10995 * value returned by physio
10996 10996 *
10997 10997 * Context: Kernel thread context.
10998 10998 */
10999 10999 /* ARGSUSED */
11000 11000 static int
11001 11001 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p)
11002 11002 {
11003 11003 struct sd_lun *un = NULL;
11004 11004 int secmask;
11005 11005 int err = 0;
11006 11006 sd_ssc_t *ssc;
11007 11007
11008 11008 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
11009 11009 return (ENXIO);
11010 11010 }
11011 11011
11012 11012 ASSERT(!mutex_owned(SD_MUTEX(un)));
11013 11013
11014 11014 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
11015 11015 mutex_enter(SD_MUTEX(un));
11016 11016 /*
11017 11017 * Because the call to sd_ready_and_valid will issue I/O we
11018 11018 * must wait here if either the device is suspended or
11019 11019 * if it's power level is changing.
11020 11020 */
11021 11021 while ((un->un_state == SD_STATE_SUSPENDED) ||
11022 11022 (un->un_state == SD_STATE_PM_CHANGING)) {
11023 11023 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11024 11024 }
11025 11025 un->un_ncmds_in_driver++;
11026 11026 mutex_exit(SD_MUTEX(un));
11027 11027
11028 11028 /* Initialize sd_ssc_t for internal uscsi commands */
11029 11029 ssc = sd_ssc_init(un);
11030 11030 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
11031 11031 err = EIO;
11032 11032 } else {
11033 11033 err = 0;
11034 11034 }
11035 11035 sd_ssc_fini(ssc);
11036 11036
11037 11037 mutex_enter(SD_MUTEX(un));
11038 11038 un->un_ncmds_in_driver--;
11039 11039 ASSERT(un->un_ncmds_in_driver >= 0);
11040 11040 mutex_exit(SD_MUTEX(un));
11041 11041 if (err != 0)
11042 11042 return (err);
11043 11043 }
11044 11044
11045 11045 /*
11046 11046 * Write requests are restricted to multiples of the system block size.
11047 11047 */
11048 11048 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
11049 11049 !un->un_f_enable_rmw)
11050 11050 secmask = un->un_tgt_blocksize - 1;
11051 11051 else
11052 11052 secmask = DEV_BSIZE - 1;
11053 11053
11054 11054 if (uio->uio_loffset & ((offset_t)(secmask))) {
11055 11055 SD_ERROR(SD_LOG_READ_WRITE, un,
11056 11056 "sdwrite: file offset not modulo %d\n",
11057 11057 secmask + 1);
11058 11058 err = EINVAL;
11059 11059 } else if (uio->uio_iov->iov_len & (secmask)) {
11060 11060 SD_ERROR(SD_LOG_READ_WRITE, un,
11061 11061 "sdwrite: transfer length not modulo %d\n",
11062 11062 secmask + 1);
11063 11063 err = EINVAL;
11064 11064 } else {
11065 11065 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio);
11066 11066 }
11067 11067
11068 11068 return (err);
11069 11069 }
11070 11070
11071 11071
11072 11072 /*
11073 11073 * Function: sdaread
11074 11074 *
11075 11075 * Description: Driver's aread(9e) entry point function.
11076 11076 *
11077 11077 * Arguments: dev - device number
11078 11078 * aio - structure pointer describing where data is to be stored
11079 11079 * cred_p - user credential pointer
11080 11080 *
11081 11081 * Return Code: ENXIO
11082 11082 * EIO
11083 11083 * EINVAL
11084 11084 * value returned by aphysio
11085 11085 *
11086 11086 * Context: Kernel thread context.
11087 11087 */
11088 11088 /* ARGSUSED */
11089 11089 static int
11090 11090 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p)
11091 11091 {
11092 11092 struct sd_lun *un = NULL;
11093 11093 struct uio *uio = aio->aio_uio;
11094 11094 int secmask;
11095 11095 int err = 0;
11096 11096 sd_ssc_t *ssc;
11097 11097
11098 11098 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
11099 11099 return (ENXIO);
11100 11100 }
11101 11101
11102 11102 ASSERT(!mutex_owned(SD_MUTEX(un)));
11103 11103
11104 11104 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
11105 11105 mutex_enter(SD_MUTEX(un));
11106 11106 /*
11107 11107 * Because the call to sd_ready_and_valid will issue I/O we
11108 11108 * must wait here if either the device is suspended or
11109 11109 * if it's power level is changing.
11110 11110 */
11111 11111 while ((un->un_state == SD_STATE_SUSPENDED) ||
11112 11112 (un->un_state == SD_STATE_PM_CHANGING)) {
11113 11113 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11114 11114 }
11115 11115 un->un_ncmds_in_driver++;
11116 11116 mutex_exit(SD_MUTEX(un));
11117 11117
11118 11118 /* Initialize sd_ssc_t for internal uscsi commands */
11119 11119 ssc = sd_ssc_init(un);
11120 11120 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
11121 11121 err = EIO;
11122 11122 } else {
11123 11123 err = 0;
11124 11124 }
11125 11125 sd_ssc_fini(ssc);
11126 11126
11127 11127 mutex_enter(SD_MUTEX(un));
11128 11128 un->un_ncmds_in_driver--;
11129 11129 ASSERT(un->un_ncmds_in_driver >= 0);
11130 11130 mutex_exit(SD_MUTEX(un));
11131 11131 if (err != 0)
11132 11132 return (err);
11133 11133 }
11134 11134
11135 11135 /*
11136 11136 * Read requests are restricted to multiples of the system block size.
11137 11137 */
11138 11138 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
11139 11139 !un->un_f_enable_rmw)
11140 11140 secmask = un->un_tgt_blocksize - 1;
11141 11141 else
11142 11142 secmask = DEV_BSIZE - 1;
11143 11143
11144 11144 if (uio->uio_loffset & ((offset_t)(secmask))) {
11145 11145 SD_ERROR(SD_LOG_READ_WRITE, un,
11146 11146 "sdaread: file offset not modulo %d\n",
11147 11147 secmask + 1);
11148 11148 err = EINVAL;
11149 11149 } else if (uio->uio_iov->iov_len & (secmask)) {
11150 11150 SD_ERROR(SD_LOG_READ_WRITE, un,
11151 11151 "sdaread: transfer length not modulo %d\n",
11152 11152 secmask + 1);
11153 11153 err = EINVAL;
11154 11154 } else {
11155 11155 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio);
11156 11156 }
11157 11157
11158 11158 return (err);
11159 11159 }
11160 11160
11161 11161
11162 11162 /*
11163 11163 * Function: sdawrite
11164 11164 *
11165 11165 * Description: Driver's awrite(9e) entry point function.
11166 11166 *
11167 11167 * Arguments: dev - device number
11168 11168 * aio - structure pointer describing where data is stored
11169 11169 * cred_p - user credential pointer
11170 11170 *
11171 11171 * Return Code: ENXIO
11172 11172 * EIO
11173 11173 * EINVAL
11174 11174 * value returned by aphysio
11175 11175 *
11176 11176 * Context: Kernel thread context.
11177 11177 */
11178 11178 /* ARGSUSED */
11179 11179 static int
11180 11180 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p)
11181 11181 {
11182 11182 struct sd_lun *un = NULL;
11183 11183 struct uio *uio = aio->aio_uio;
11184 11184 int secmask;
11185 11185 int err = 0;
11186 11186 sd_ssc_t *ssc;
11187 11187
11188 11188 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
11189 11189 return (ENXIO);
11190 11190 }
11191 11191
11192 11192 ASSERT(!mutex_owned(SD_MUTEX(un)));
11193 11193
11194 11194 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
11195 11195 mutex_enter(SD_MUTEX(un));
11196 11196 /*
11197 11197 * Because the call to sd_ready_and_valid will issue I/O we
11198 11198 * must wait here if either the device is suspended or
11199 11199 * if it's power level is changing.
11200 11200 */
11201 11201 while ((un->un_state == SD_STATE_SUSPENDED) ||
11202 11202 (un->un_state == SD_STATE_PM_CHANGING)) {
11203 11203 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11204 11204 }
11205 11205 un->un_ncmds_in_driver++;
11206 11206 mutex_exit(SD_MUTEX(un));
11207 11207
11208 11208 /* Initialize sd_ssc_t for internal uscsi commands */
11209 11209 ssc = sd_ssc_init(un);
11210 11210 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
11211 11211 err = EIO;
11212 11212 } else {
11213 11213 err = 0;
11214 11214 }
11215 11215 sd_ssc_fini(ssc);
11216 11216
11217 11217 mutex_enter(SD_MUTEX(un));
11218 11218 un->un_ncmds_in_driver--;
11219 11219 ASSERT(un->un_ncmds_in_driver >= 0);
11220 11220 mutex_exit(SD_MUTEX(un));
11221 11221 if (err != 0)
11222 11222 return (err);
11223 11223 }
11224 11224
11225 11225 /*
11226 11226 * Write requests are restricted to multiples of the system block size.
11227 11227 */
11228 11228 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
11229 11229 !un->un_f_enable_rmw)
11230 11230 secmask = un->un_tgt_blocksize - 1;
11231 11231 else
11232 11232 secmask = DEV_BSIZE - 1;
11233 11233
11234 11234 if (uio->uio_loffset & ((offset_t)(secmask))) {
11235 11235 SD_ERROR(SD_LOG_READ_WRITE, un,
11236 11236 "sdawrite: file offset not modulo %d\n",
11237 11237 secmask + 1);
11238 11238 err = EINVAL;
11239 11239 } else if (uio->uio_iov->iov_len & (secmask)) {
11240 11240 SD_ERROR(SD_LOG_READ_WRITE, un,
11241 11241 "sdawrite: transfer length not modulo %d\n",
11242 11242 secmask + 1);
11243 11243 err = EINVAL;
11244 11244 } else {
11245 11245 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio);
11246 11246 }
11247 11247
11248 11248 return (err);
11249 11249 }
11250 11250
11251 11251
11252 11252
11253 11253
11254 11254
11255 11255 /*
11256 11256 * Driver IO processing follows the following sequence:
11257 11257 *
11258 11258 * sdioctl(9E) sdstrategy(9E) biodone(9F)
11259 11259 * | | ^
11260 11260 * v v |
11261 11261 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+
11262 11262 * | | | |
11263 11263 * v | | |
11264 11264 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone()
11265 11265 * | | ^ ^
11266 11266 * v v | |
11267 11267 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | |
11268 11268 * | | | |
11269 11269 * +---+ | +------------+ +-------+
11270 11270 * | | | |
11271 11271 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| |
11272 11272 * | v | |
11273 11273 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() |
11274 11274 * | | ^ |
11275 11275 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| |
11276 11276 * | v | |
11277 11277 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() |
11278 11278 * | | ^ |
11279 11279 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| |
11280 11280 * | v | |
11281 11281 * | sd_checksum_iostart() sd_checksum_iodone() |
11282 11282 * | | ^ |
11283 11283 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+
11284 11284 * | v | |
11285 11285 * | sd_pm_iostart() sd_pm_iodone() |
11286 11286 * | | ^ |
11287 11287 * | | | |
11288 11288 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+
11289 11289 * | ^
11290 11290 * v |
11291 11291 * sd_core_iostart() |
11292 11292 * | |
11293 11293 * | +------>(*destroypkt)()
11294 11294 * +-> sd_start_cmds() <-+ | |
11295 11295 * | | | v
11296 11296 * | | | scsi_destroy_pkt(9F)
11297 11297 * | | |
11298 11298 * +->(*initpkt)() +- sdintr()
11299 11299 * | | | |
11300 11300 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx()
11301 11301 * | +-> scsi_setup_cdb(9F) |
11302 11302 * | |
11303 11303 * +--> scsi_transport(9F) |
11304 11304 * | |
11305 11305 * +----> SCSA ---->+
11306 11306 *
11307 11307 *
11308 11308 * This code is based upon the following presumptions:
11309 11309 *
11310 11310 * - iostart and iodone functions operate on buf(9S) structures. These
11311 11311 * functions perform the necessary operations on the buf(9S) and pass
11312 11312 * them along to the next function in the chain by using the macros
11313 11313 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE()
11314 11314 * (for iodone side functions).
11315 11315 *
11316 11316 * - The iostart side functions may sleep. The iodone side functions
11317 11317 * are called under interrupt context and may NOT sleep. Therefore
11318 11318 * iodone side functions also may not call iostart side functions.
11319 11319 * (NOTE: iostart side functions should NOT sleep for memory, as
11320 11320 * this could result in deadlock.)
11321 11321 *
11322 11322 * - An iostart side function may call its corresponding iodone side
11323 11323 * function directly (if necessary).
11324 11324 *
11325 11325 * - In the event of an error, an iostart side function can return a buf(9S)
11326 11326 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and
11327 11327 * b_error in the usual way of course).
11328 11328 *
11329 11329 * - The taskq mechanism may be used by the iodone side functions to dispatch
11330 11330 * requests to the iostart side functions. The iostart side functions in
11331 11331 * this case would be called under the context of a taskq thread, so it's
11332 11332 * OK for them to block/sleep/spin in this case.
11333 11333 *
11334 11334 * - iostart side functions may allocate "shadow" buf(9S) structs and
11335 11335 * pass them along to the next function in the chain. The corresponding
11336 11336 * iodone side functions must coalesce the "shadow" bufs and return
11337 11337 * the "original" buf to the next higher layer.
11338 11338 *
11339 11339 * - The b_private field of the buf(9S) struct holds a pointer to
11340 11340 * an sd_xbuf struct, which contains information needed to
11341 11341 * construct the scsi_pkt for the command.
11342 11342 *
11343 11343 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each
11344 11344 * layer must acquire & release the SD_MUTEX(un) as needed.
11345 11345 */
11346 11346
11347 11347
11348 11348 /*
11349 11349 * Create taskq for all targets in the system. This is created at
11350 11350 * _init(9E) and destroyed at _fini(9E).
11351 11351 *
11352 11352 * Note: here we set the minalloc to a reasonably high number to ensure that
11353 11353 * we will have an adequate supply of task entries available at interrupt time.
11354 11354 * This is used in conjunction with the TASKQ_PREPOPULATE flag in
11355 11355 * sd_create_taskq(). Since we do not want to sleep for allocations at
11356 11356 * interrupt time, set maxalloc equal to minalloc. That way we will just fail
11357 11357 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq
11358 11358 * requests any one instant in time.
11359 11359 */
11360 11360 #define SD_TASKQ_NUMTHREADS 8
11361 11361 #define SD_TASKQ_MINALLOC 256
11362 11362 #define SD_TASKQ_MAXALLOC 256
11363 11363
11364 11364 static taskq_t *sd_tq = NULL;
11365 11365 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq))
11366 11366
11367 11367 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC;
11368 11368 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC;
11369 11369
11370 11370 /*
11371 11371 * The following task queue is being created for the write part of
11372 11372 * read-modify-write of non-512 block size devices.
11373 11373 * Limit the number of threads to 1 for now. This number has been chosen
11374 11374 * considering the fact that it applies only to dvd ram drives/MO drives
11375 11375 * currently. Performance for which is not main criteria at this stage.
11376 11376 * Note: It needs to be explored if we can use a single taskq in future
11377 11377 */
11378 11378 #define SD_WMR_TASKQ_NUMTHREADS 1
11379 11379 static taskq_t *sd_wmr_tq = NULL;
11380 11380 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq))
11381 11381
11382 11382 /*
11383 11383 * Function: sd_taskq_create
11384 11384 *
11385 11385 * Description: Create taskq thread(s) and preallocate task entries
11386 11386 *
11387 11387 * Return Code: Returns a pointer to the allocated taskq_t.
11388 11388 *
11389 11389 * Context: Can sleep. Requires blockable context.
11390 11390 *
11391 11391 * Notes: - The taskq() facility currently is NOT part of the DDI.
11392 11392 * (definitely NOT recommeded for 3rd-party drivers!) :-)
11393 11393 * - taskq_create() will block for memory, also it will panic
11394 11394 * if it cannot create the requested number of threads.
11395 11395 * - Currently taskq_create() creates threads that cannot be
11396 11396 * swapped.
11397 11397 * - We use TASKQ_PREPOPULATE to ensure we have an adequate
11398 11398 * supply of taskq entries at interrupt time (ie, so that we
11399 11399 * do not have to sleep for memory)
11400 11400 */
11401 11401
11402 11402 static void
11403 11403 sd_taskq_create(void)
11404 11404 {
11405 11405 char taskq_name[TASKQ_NAMELEN];
11406 11406
11407 11407 ASSERT(sd_tq == NULL);
11408 11408 ASSERT(sd_wmr_tq == NULL);
11409 11409
11410 11410 (void) snprintf(taskq_name, sizeof (taskq_name),
11411 11411 "%s_drv_taskq", sd_label);
11412 11412 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS,
11413 11413 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc,
11414 11414 TASKQ_PREPOPULATE));
11415 11415
11416 11416 (void) snprintf(taskq_name, sizeof (taskq_name),
11417 11417 "%s_rmw_taskq", sd_label);
11418 11418 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS,
11419 11419 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc,
11420 11420 TASKQ_PREPOPULATE));
11421 11421 }
11422 11422
11423 11423
11424 11424 /*
11425 11425 * Function: sd_taskq_delete
11426 11426 *
11427 11427 * Description: Complementary cleanup routine for sd_taskq_create().
11428 11428 *
11429 11429 * Context: Kernel thread context.
11430 11430 */
11431 11431
11432 11432 static void
11433 11433 sd_taskq_delete(void)
11434 11434 {
11435 11435 ASSERT(sd_tq != NULL);
11436 11436 ASSERT(sd_wmr_tq != NULL);
11437 11437 taskq_destroy(sd_tq);
11438 11438 taskq_destroy(sd_wmr_tq);
11439 11439 sd_tq = NULL;
11440 11440 sd_wmr_tq = NULL;
11441 11441 }
11442 11442
11443 11443
11444 11444 /*
11445 11445 * Function: sdstrategy
11446 11446 *
11447 11447 * Description: Driver's strategy (9E) entry point function.
11448 11448 *
11449 11449 * Arguments: bp - pointer to buf(9S)
11450 11450 *
11451 11451 * Return Code: Always returns zero
11452 11452 *
11453 11453 * Context: Kernel thread context.
11454 11454 */
11455 11455
11456 11456 static int
11457 11457 sdstrategy(struct buf *bp)
11458 11458 {
11459 11459 struct sd_lun *un;
11460 11460
11461 11461 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
11462 11462 if (un == NULL) {
11463 11463 bioerror(bp, EIO);
11464 11464 bp->b_resid = bp->b_bcount;
11465 11465 biodone(bp);
11466 11466 return (0);
11467 11467 }
11468 11468
11469 11469 /* As was done in the past, fail new cmds. if state is dumping. */
11470 11470 if (un->un_state == SD_STATE_DUMPING) {
11471 11471 bioerror(bp, ENXIO);
11472 11472 bp->b_resid = bp->b_bcount;
11473 11473 biodone(bp);
11474 11474 return (0);
11475 11475 }
11476 11476
11477 11477 ASSERT(!mutex_owned(SD_MUTEX(un)));
11478 11478
11479 11479 /*
11480 11480 * Commands may sneak in while we released the mutex in
11481 11481 * DDI_SUSPEND, we should block new commands. However, old
11482 11482 * commands that are still in the driver at this point should
11483 11483 * still be allowed to drain.
11484 11484 */
11485 11485 mutex_enter(SD_MUTEX(un));
11486 11486 /*
11487 11487 * Must wait here if either the device is suspended or
11488 11488 * if it's power level is changing.
11489 11489 */
11490 11490 while ((un->un_state == SD_STATE_SUSPENDED) ||
11491 11491 (un->un_state == SD_STATE_PM_CHANGING)) {
11492 11492 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11493 11493 }
11494 11494
11495 11495 un->un_ncmds_in_driver++;
11496 11496
11497 11497 /*
11498 11498 * atapi: Since we are running the CD for now in PIO mode we need to
11499 11499 * call bp_mapin here to avoid bp_mapin called interrupt context under
11500 11500 * the HBA's init_pkt routine.
11501 11501 */
11502 11502 if (un->un_f_cfg_is_atapi == TRUE) {
11503 11503 mutex_exit(SD_MUTEX(un));
11504 11504 bp_mapin(bp);
11505 11505 mutex_enter(SD_MUTEX(un));
11506 11506 }
11507 11507 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n",
11508 11508 un->un_ncmds_in_driver);
11509 11509
11510 11510 if (bp->b_flags & B_WRITE)
11511 11511 un->un_f_sync_cache_required = TRUE;
11512 11512
11513 11513 mutex_exit(SD_MUTEX(un));
11514 11514
11515 11515 /*
11516 11516 * This will (eventually) allocate the sd_xbuf area and
11517 11517 * call sd_xbuf_strategy(). We just want to return the
11518 11518 * result of ddi_xbuf_qstrategy so that we have an opt-
11519 11519 * imized tail call which saves us a stack frame.
11520 11520 */
11521 11521 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr));
11522 11522 }
11523 11523
11524 11524
11525 11525 /*
11526 11526 * Function: sd_xbuf_strategy
11527 11527 *
11528 11528 * Description: Function for initiating IO operations via the
11529 11529 * ddi_xbuf_qstrategy() mechanism.
11530 11530 *
11531 11531 * Context: Kernel thread context.
11532 11532 */
11533 11533
11534 11534 static void
11535 11535 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg)
11536 11536 {
11537 11537 struct sd_lun *un = arg;
11538 11538
11539 11539 ASSERT(bp != NULL);
11540 11540 ASSERT(xp != NULL);
11541 11541 ASSERT(un != NULL);
11542 11542 ASSERT(!mutex_owned(SD_MUTEX(un)));
11543 11543
11544 11544 /*
11545 11545 * Initialize the fields in the xbuf and save a pointer to the
11546 11546 * xbuf in bp->b_private.
11547 11547 */
11548 11548 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL);
11549 11549
11550 11550 /* Send the buf down the iostart chain */
11551 11551 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp);
11552 11552 }
11553 11553
11554 11554
11555 11555 /*
11556 11556 * Function: sd_xbuf_init
11557 11557 *
11558 11558 * Description: Prepare the given sd_xbuf struct for use.
11559 11559 *
11560 11560 * Arguments: un - ptr to softstate
11561 11561 * bp - ptr to associated buf(9S)
11562 11562 * xp - ptr to associated sd_xbuf
11563 11563 * chain_type - IO chain type to use:
11564 11564 * SD_CHAIN_NULL
11565 11565 * SD_CHAIN_BUFIO
11566 11566 * SD_CHAIN_USCSI
11567 11567 * SD_CHAIN_DIRECT
11568 11568 * SD_CHAIN_DIRECT_PRIORITY
11569 11569 * pktinfop - ptr to private data struct for scsi_pkt(9S)
11570 11570 * initialization; may be NULL if none.
11571 11571 *
11572 11572 * Context: Kernel thread context
11573 11573 */
11574 11574
11575 11575 static void
11576 11576 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
11577 11577 uchar_t chain_type, void *pktinfop)
11578 11578 {
11579 11579 int index;
11580 11580
11581 11581 ASSERT(un != NULL);
11582 11582 ASSERT(bp != NULL);
11583 11583 ASSERT(xp != NULL);
11584 11584
11585 11585 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n",
11586 11586 bp, chain_type);
11587 11587
11588 11588 xp->xb_un = un;
11589 11589 xp->xb_pktp = NULL;
11590 11590 xp->xb_pktinfo = pktinfop;
11591 11591 xp->xb_private = bp->b_private;
11592 11592 xp->xb_blkno = (daddr_t)bp->b_blkno;
11593 11593
11594 11594 /*
11595 11595 * Set up the iostart and iodone chain indexes in the xbuf, based
11596 11596 * upon the specified chain type to use.
11597 11597 */
11598 11598 switch (chain_type) {
11599 11599 case SD_CHAIN_NULL:
11600 11600 /*
11601 11601 * Fall thru to just use the values for the buf type, even
11602 11602 * tho for the NULL chain these values will never be used.
11603 11603 */
11604 11604 /* FALLTHRU */
11605 11605 case SD_CHAIN_BUFIO:
11606 11606 index = un->un_buf_chain_type;
11607 11607 if ((!un->un_f_has_removable_media) &&
11608 11608 (un->un_tgt_blocksize != 0) &&
11609 11609 (un->un_tgt_blocksize != DEV_BSIZE ||
11610 11610 un->un_f_enable_rmw)) {
11611 11611 int secmask = 0, blknomask = 0;
11612 11612 if (un->un_f_enable_rmw) {
11613 11613 blknomask =
11614 11614 (un->un_phy_blocksize / DEV_BSIZE) - 1;
11615 11615 secmask = un->un_phy_blocksize - 1;
11616 11616 } else {
11617 11617 blknomask =
11618 11618 (un->un_tgt_blocksize / DEV_BSIZE) - 1;
11619 11619 secmask = un->un_tgt_blocksize - 1;
11620 11620 }
11621 11621
11622 11622 if ((bp->b_lblkno & (blknomask)) ||
11623 11623 (bp->b_bcount & (secmask))) {
11624 11624 if ((un->un_f_rmw_type !=
11625 11625 SD_RMW_TYPE_RETURN_ERROR) ||
11626 11626 un->un_f_enable_rmw) {
11627 11627 if (un->un_f_pm_is_enabled == FALSE)
11628 11628 index =
11629 11629 SD_CHAIN_INFO_MSS_DSK_NO_PM;
11630 11630 else
11631 11631 index =
11632 11632 SD_CHAIN_INFO_MSS_DISK;
11633 11633 }
11634 11634 }
11635 11635 }
11636 11636 break;
11637 11637 case SD_CHAIN_USCSI:
11638 11638 index = un->un_uscsi_chain_type;
11639 11639 break;
11640 11640 case SD_CHAIN_DIRECT:
11641 11641 index = un->un_direct_chain_type;
11642 11642 break;
11643 11643 case SD_CHAIN_DIRECT_PRIORITY:
11644 11644 index = un->un_priority_chain_type;
11645 11645 break;
11646 11646 default:
11647 11647 /* We're really broken if we ever get here... */
11648 11648 panic("sd_xbuf_init: illegal chain type!");
11649 11649 /*NOTREACHED*/
11650 11650 }
11651 11651
11652 11652 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index;
11653 11653 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index;
11654 11654
11655 11655 /*
11656 11656 * It might be a bit easier to simply bzero the entire xbuf above,
11657 11657 * but it turns out that since we init a fair number of members anyway,
11658 11658 * we save a fair number cycles by doing explicit assignment of zero.
11659 11659 */
11660 11660 xp->xb_pkt_flags = 0;
11661 11661 xp->xb_dma_resid = 0;
11662 11662 xp->xb_retry_count = 0;
11663 11663 xp->xb_victim_retry_count = 0;
11664 11664 xp->xb_ua_retry_count = 0;
11665 11665 xp->xb_nr_retry_count = 0;
11666 11666 xp->xb_sense_bp = NULL;
11667 11667 xp->xb_sense_status = 0;
11668 11668 xp->xb_sense_state = 0;
11669 11669 xp->xb_sense_resid = 0;
11670 11670 xp->xb_ena = 0;
11671 11671
11672 11672 bp->b_private = xp;
11673 11673 bp->b_flags &= ~(B_DONE | B_ERROR);
11674 11674 bp->b_resid = 0;
11675 11675 bp->av_forw = NULL;
11676 11676 bp->av_back = NULL;
11677 11677 bioerror(bp, 0);
11678 11678
11679 11679 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n");
11680 11680 }
11681 11681
11682 11682
11683 11683 /*
11684 11684 * Function: sd_uscsi_strategy
11685 11685 *
11686 11686 * Description: Wrapper for calling into the USCSI chain via physio(9F)
11687 11687 *
11688 11688 * Arguments: bp - buf struct ptr
11689 11689 *
11690 11690 * Return Code: Always returns 0
11691 11691 *
11692 11692 * Context: Kernel thread context
11693 11693 */
11694 11694
11695 11695 static int
11696 11696 sd_uscsi_strategy(struct buf *bp)
11697 11697 {
11698 11698 struct sd_lun *un;
11699 11699 struct sd_uscsi_info *uip;
11700 11700 struct sd_xbuf *xp;
11701 11701 uchar_t chain_type;
11702 11702 uchar_t cmd;
11703 11703
11704 11704 ASSERT(bp != NULL);
11705 11705
11706 11706 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
11707 11707 if (un == NULL) {
11708 11708 bioerror(bp, EIO);
11709 11709 bp->b_resid = bp->b_bcount;
11710 11710 biodone(bp);
11711 11711 return (0);
11712 11712 }
11713 11713
11714 11714 ASSERT(!mutex_owned(SD_MUTEX(un)));
11715 11715
11716 11716 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp);
11717 11717
11718 11718 /*
11719 11719 * A pointer to a struct sd_uscsi_info is expected in bp->b_private
11720 11720 */
11721 11721 ASSERT(bp->b_private != NULL);
11722 11722 uip = (struct sd_uscsi_info *)bp->b_private;
11723 11723 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0];
11724 11724
11725 11725 mutex_enter(SD_MUTEX(un));
11726 11726 /*
11727 11727 * atapi: Since we are running the CD for now in PIO mode we need to
11728 11728 * call bp_mapin here to avoid bp_mapin called interrupt context under
11729 11729 * the HBA's init_pkt routine.
11730 11730 */
11731 11731 if (un->un_f_cfg_is_atapi == TRUE) {
11732 11732 mutex_exit(SD_MUTEX(un));
11733 11733 bp_mapin(bp);
11734 11734 mutex_enter(SD_MUTEX(un));
11735 11735 }
11736 11736 un->un_ncmds_in_driver++;
11737 11737 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n",
11738 11738 un->un_ncmds_in_driver);
11739 11739
11740 11740 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) &&
11741 11741 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1))
11742 11742 un->un_f_sync_cache_required = TRUE;
11743 11743
11744 11744 mutex_exit(SD_MUTEX(un));
11745 11745
11746 11746 switch (uip->ui_flags) {
11747 11747 case SD_PATH_DIRECT:
11748 11748 chain_type = SD_CHAIN_DIRECT;
11749 11749 break;
11750 11750 case SD_PATH_DIRECT_PRIORITY:
11751 11751 chain_type = SD_CHAIN_DIRECT_PRIORITY;
11752 11752 break;
11753 11753 default:
11754 11754 chain_type = SD_CHAIN_USCSI;
11755 11755 break;
11756 11756 }
11757 11757
11758 11758 /*
11759 11759 * We may allocate extra buf for external USCSI commands. If the
11760 11760 * application asks for bigger than 20-byte sense data via USCSI,
11761 11761 * SCSA layer will allocate 252 bytes sense buf for that command.
11762 11762 */
11763 11763 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen >
11764 11764 SENSE_LENGTH) {
11765 11765 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH +
11766 11766 MAX_SENSE_LENGTH, KM_SLEEP);
11767 11767 } else {
11768 11768 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP);
11769 11769 }
11770 11770
11771 11771 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp);
11772 11772
11773 11773 /* Use the index obtained within xbuf_init */
11774 11774 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp);
11775 11775
11776 11776 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp);
11777 11777
11778 11778 return (0);
11779 11779 }
11780 11780
11781 11781 /*
11782 11782 * Function: sd_send_scsi_cmd
11783 11783 *
11784 11784 * Description: Runs a USCSI command for user (when called thru sdioctl),
11785 11785 * or for the driver
11786 11786 *
11787 11787 * Arguments: dev - the dev_t for the device
11788 11788 * incmd - ptr to a valid uscsi_cmd struct
11789 11789 * flag - bit flag, indicating open settings, 32/64 bit type
11790 11790 * dataspace - UIO_USERSPACE or UIO_SYSSPACE
11791 11791 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
11792 11792 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
11793 11793 * to use the USCSI "direct" chain and bypass the normal
11794 11794 * command waitq.
11795 11795 *
11796 11796 * Return Code: 0 - successful completion of the given command
11797 11797 * EIO - scsi_uscsi_handle_command() failed
11798 11798 * ENXIO - soft state not found for specified dev
11799 11799 * EINVAL
11800 11800 * EFAULT - copyin/copyout error
11801 11801 * return code of scsi_uscsi_handle_command():
11802 11802 * EIO
11803 11803 * ENXIO
11804 11804 * EACCES
11805 11805 *
11806 11806 * Context: Waits for command to complete. Can sleep.
11807 11807 */
11808 11808
11809 11809 static int
11810 11810 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag,
11811 11811 enum uio_seg dataspace, int path_flag)
11812 11812 {
11813 11813 struct sd_lun *un;
11814 11814 sd_ssc_t *ssc;
11815 11815 int rval;
11816 11816
11817 11817 un = ddi_get_soft_state(sd_state, SDUNIT(dev));
11818 11818 if (un == NULL) {
11819 11819 return (ENXIO);
11820 11820 }
11821 11821
11822 11822 /*
11823 11823 * Using sd_ssc_send to handle uscsi cmd
11824 11824 */
11825 11825 ssc = sd_ssc_init(un);
11826 11826 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag);
11827 11827 sd_ssc_fini(ssc);
11828 11828
11829 11829 return (rval);
11830 11830 }
11831 11831
11832 11832 /*
11833 11833 * Function: sd_ssc_init
11834 11834 *
11835 11835 * Description: Uscsi end-user call this function to initialize necessary
11836 11836 * fields, such as uscsi_cmd and sd_uscsi_info struct.
11837 11837 *
11838 11838 * The return value of sd_send_scsi_cmd will be treated as a
11839 11839 * fault in various conditions. Even it is not Zero, some
11840 11840 * callers may ignore the return value. That is to say, we can
11841 11841 * not make an accurate assessment in sdintr, since if a
11842 11842 * command is failed in sdintr it does not mean the caller of
11843 11843 * sd_send_scsi_cmd will treat it as a real failure.
11844 11844 *
11845 11845 * To avoid printing too many error logs for a failed uscsi
11846 11846 * packet that the caller may not treat it as a failure, the
11847 11847 * sd will keep silent for handling all uscsi commands.
11848 11848 *
11849 11849 * During detach->attach and attach-open, for some types of
11850 11850 * problems, the driver should be providing information about
11851 11851 * the problem encountered. Device use USCSI_SILENT, which
11852 11852 * suppresses all driver information. The result is that no
11853 11853 * information about the problem is available. Being
11854 11854 * completely silent during this time is inappropriate. The
11855 11855 * driver needs a more selective filter than USCSI_SILENT, so
11856 11856 * that information related to faults is provided.
11857 11857 *
11858 11858 * To make the accurate accessment, the caller of
11859 11859 * sd_send_scsi_USCSI_CMD should take the ownership and
11860 11860 * get necessary information to print error messages.
11861 11861 *
11862 11862 * If we want to print necessary info of uscsi command, we need to
11863 11863 * keep the uscsi_cmd and sd_uscsi_info till we can make the
11864 11864 * assessment. We use sd_ssc_init to alloc necessary
11865 11865 * structs for sending an uscsi command and we are also
11866 11866 * responsible for free the memory by calling
11867 11867 * sd_ssc_fini.
11868 11868 *
11869 11869 * The calling secquences will look like:
11870 11870 * sd_ssc_init->
11871 11871 *
11872 11872 * ...
11873 11873 *
11874 11874 * sd_send_scsi_USCSI_CMD->
11875 11875 * sd_ssc_send-> - - - sdintr
11876 11876 * ...
11877 11877 *
11878 11878 * if we think the return value should be treated as a
11879 11879 * failure, we make the accessment here and print out
11880 11880 * necessary by retrieving uscsi_cmd and sd_uscsi_info'
11881 11881 *
11882 11882 * ...
11883 11883 *
11884 11884 * sd_ssc_fini
11885 11885 *
11886 11886 *
11887 11887 * Arguments: un - pointer to driver soft state (unit) structure for this
11888 11888 * target.
11889 11889 *
11890 11890 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains
11891 11891 * uscsi_cmd and sd_uscsi_info.
11892 11892 * NULL - if can not alloc memory for sd_ssc_t struct
11893 11893 *
11894 11894 * Context: Kernel Thread.
11895 11895 */
11896 11896 static sd_ssc_t *
11897 11897 sd_ssc_init(struct sd_lun *un)
11898 11898 {
11899 11899 sd_ssc_t *ssc;
11900 11900 struct uscsi_cmd *ucmdp;
11901 11901 struct sd_uscsi_info *uip;
11902 11902
11903 11903 ASSERT(un != NULL);
11904 11904 ASSERT(!mutex_owned(SD_MUTEX(un)));
11905 11905
11906 11906 /*
11907 11907 * Allocate sd_ssc_t structure
11908 11908 */
11909 11909 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP);
11910 11910
11911 11911 /*
11912 11912 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine
11913 11913 */
11914 11914 ucmdp = scsi_uscsi_alloc();
11915 11915
11916 11916 /*
11917 11917 * Allocate sd_uscsi_info structure
11918 11918 */
11919 11919 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP);
11920 11920
11921 11921 ssc->ssc_uscsi_cmd = ucmdp;
11922 11922 ssc->ssc_uscsi_info = uip;
11923 11923 ssc->ssc_un = un;
11924 11924
11925 11925 return (ssc);
11926 11926 }
11927 11927
11928 11928 /*
11929 11929 * Function: sd_ssc_fini
11930 11930 *
11931 11931 * Description: To free sd_ssc_t and it's hanging off
11932 11932 *
11933 11933 * Arguments: ssc - struct pointer of sd_ssc_t.
11934 11934 */
11935 11935 static void
11936 11936 sd_ssc_fini(sd_ssc_t *ssc)
11937 11937 {
11938 11938 scsi_uscsi_free(ssc->ssc_uscsi_cmd);
11939 11939
11940 11940 if (ssc->ssc_uscsi_info != NULL) {
11941 11941 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info));
11942 11942 ssc->ssc_uscsi_info = NULL;
11943 11943 }
11944 11944
11945 11945 kmem_free(ssc, sizeof (sd_ssc_t));
11946 11946 ssc = NULL;
11947 11947 }
11948 11948
11949 11949 /*
11950 11950 * Function: sd_ssc_send
11951 11951 *
11952 11952 * Description: Runs a USCSI command for user when called through sdioctl,
11953 11953 * or for the driver.
11954 11954 *
11955 11955 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
11956 11956 * sd_uscsi_info in.
11957 11957 * incmd - ptr to a valid uscsi_cmd struct
11958 11958 * flag - bit flag, indicating open settings, 32/64 bit type
11959 11959 * dataspace - UIO_USERSPACE or UIO_SYSSPACE
11960 11960 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
11961 11961 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
11962 11962 * to use the USCSI "direct" chain and bypass the normal
11963 11963 * command waitq.
11964 11964 *
11965 11965 * Return Code: 0 - successful completion of the given command
11966 11966 * EIO - scsi_uscsi_handle_command() failed
11967 11967 * ENXIO - soft state not found for specified dev
11968 11968 * ECANCELED - command cancelled due to low power
11969 11969 * EINVAL
11970 11970 * EFAULT - copyin/copyout error
11971 11971 * return code of scsi_uscsi_handle_command():
11972 11972 * EIO
11973 11973 * ENXIO
11974 11974 * EACCES
11975 11975 *
11976 11976 * Context: Kernel Thread;
11977 11977 * Waits for command to complete. Can sleep.
11978 11978 */
11979 11979 static int
11980 11980 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag,
11981 11981 enum uio_seg dataspace, int path_flag)
11982 11982 {
11983 11983 struct sd_uscsi_info *uip;
11984 11984 struct uscsi_cmd *uscmd;
11985 11985 struct sd_lun *un;
11986 11986 dev_t dev;
11987 11987
11988 11988 int format = 0;
11989 11989 int rval;
11990 11990
11991 11991 ASSERT(ssc != NULL);
11992 11992 un = ssc->ssc_un;
11993 11993 ASSERT(un != NULL);
11994 11994 uscmd = ssc->ssc_uscsi_cmd;
11995 11995 ASSERT(uscmd != NULL);
11996 11996 ASSERT(!mutex_owned(SD_MUTEX(un)));
11997 11997 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) {
11998 11998 /*
11999 11999 * If enter here, it indicates that the previous uscsi
12000 12000 * command has not been processed by sd_ssc_assessment.
12001 12001 * This is violating our rules of FMA telemetry processing.
12002 12002 * We should print out this message and the last undisposed
12003 12003 * uscsi command.
12004 12004 */
12005 12005 if (uscmd->uscsi_cdb != NULL) {
12006 12006 SD_INFO(SD_LOG_SDTEST, un,
12007 12007 "sd_ssc_send is missing the alternative "
12008 12008 "sd_ssc_assessment when running command 0x%x.\n",
12009 12009 uscmd->uscsi_cdb[0]);
12010 12010 }
12011 12011 /*
12012 12012 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be
12013 12013 * the initial status.
12014 12014 */
12015 12015 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12016 12016 }
12017 12017
12018 12018 /*
12019 12019 * We need to make sure sd_ssc_send will have sd_ssc_assessment
12020 12020 * followed to avoid missing FMA telemetries.
12021 12021 */
12022 12022 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT;
12023 12023
12024 12024 /*
12025 12025 * if USCSI_PMFAILFAST is set and un is in low power, fail the
12026 12026 * command immediately.
12027 12027 */
12028 12028 mutex_enter(SD_MUTEX(un));
12029 12029 mutex_enter(&un->un_pm_mutex);
12030 12030 if ((uscmd->uscsi_flags & USCSI_PMFAILFAST) &&
12031 12031 SD_DEVICE_IS_IN_LOW_POWER(un)) {
12032 12032 SD_TRACE(SD_LOG_IO, un, "sd_ssc_send:"
12033 12033 "un:0x%p is in low power\n", un);
12034 12034 mutex_exit(&un->un_pm_mutex);
12035 12035 mutex_exit(SD_MUTEX(un));
12036 12036 return (ECANCELED);
12037 12037 }
12038 12038 mutex_exit(&un->un_pm_mutex);
12039 12039 mutex_exit(SD_MUTEX(un));
12040 12040
12041 12041 #ifdef SDDEBUG
12042 12042 switch (dataspace) {
12043 12043 case UIO_USERSPACE:
12044 12044 SD_TRACE(SD_LOG_IO, un,
12045 12045 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un);
12046 12046 break;
12047 12047 case UIO_SYSSPACE:
12048 12048 SD_TRACE(SD_LOG_IO, un,
12049 12049 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un);
12050 12050 break;
12051 12051 default:
12052 12052 SD_TRACE(SD_LOG_IO, un,
12053 12053 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un);
12054 12054 break;
12055 12055 }
12056 12056 #endif
12057 12057
12058 12058 rval = scsi_uscsi_copyin((intptr_t)incmd, flag,
12059 12059 SD_ADDRESS(un), &uscmd);
12060 12060 if (rval != 0) {
12061 12061 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: "
12062 12062 "scsi_uscsi_alloc_and_copyin failed\n", un);
12063 12063 return (rval);
12064 12064 }
12065 12065
12066 12066 if ((uscmd->uscsi_cdb != NULL) &&
12067 12067 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) {
12068 12068 mutex_enter(SD_MUTEX(un));
12069 12069 un->un_f_format_in_progress = TRUE;
12070 12070 mutex_exit(SD_MUTEX(un));
12071 12071 format = 1;
12072 12072 }
12073 12073
12074 12074 /*
12075 12075 * Allocate an sd_uscsi_info struct and fill it with the info
12076 12076 * needed by sd_initpkt_for_uscsi(). Then put the pointer into
12077 12077 * b_private in the buf for sd_initpkt_for_uscsi(). Note that
12078 12078 * since we allocate the buf here in this function, we do not
12079 12079 * need to preserve the prior contents of b_private.
12080 12080 * The sd_uscsi_info struct is also used by sd_uscsi_strategy()
12081 12081 */
12082 12082 uip = ssc->ssc_uscsi_info;
12083 12083 uip->ui_flags = path_flag;
12084 12084 uip->ui_cmdp = uscmd;
12085 12085
12086 12086 /*
12087 12087 * Commands sent with priority are intended for error recovery
12088 12088 * situations, and do not have retries performed.
12089 12089 */
12090 12090 if (path_flag == SD_PATH_DIRECT_PRIORITY) {
12091 12091 uscmd->uscsi_flags |= USCSI_DIAGNOSE;
12092 12092 }
12093 12093 uscmd->uscsi_flags &= ~USCSI_NOINTR;
12094 12094
12095 12095 dev = SD_GET_DEV(un);
12096 12096 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd,
12097 12097 sd_uscsi_strategy, NULL, uip);
12098 12098
12099 12099 /*
12100 12100 * mark ssc_flags right after handle_cmd to make sure
12101 12101 * the uscsi has been sent
12102 12102 */
12103 12103 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED;
12104 12104
12105 12105 #ifdef SDDEBUG
12106 12106 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: "
12107 12107 "uscsi_status: 0x%02x uscsi_resid:0x%x\n",
12108 12108 uscmd->uscsi_status, uscmd->uscsi_resid);
12109 12109 if (uscmd->uscsi_bufaddr != NULL) {
12110 12110 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: "
12111 12111 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n",
12112 12112 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen);
12113 12113 if (dataspace == UIO_SYSSPACE) {
12114 12114 SD_DUMP_MEMORY(un, SD_LOG_IO,
12115 12115 "data", (uchar_t *)uscmd->uscsi_bufaddr,
12116 12116 uscmd->uscsi_buflen, SD_LOG_HEX);
12117 12117 }
12118 12118 }
12119 12119 #endif
12120 12120
12121 12121 if (format == 1) {
12122 12122 mutex_enter(SD_MUTEX(un));
12123 12123 un->un_f_format_in_progress = FALSE;
12124 12124 mutex_exit(SD_MUTEX(un));
12125 12125 }
12126 12126
12127 12127 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd);
12128 12128
12129 12129 return (rval);
12130 12130 }
12131 12131
12132 12132 /*
12133 12133 * Function: sd_ssc_print
12134 12134 *
12135 12135 * Description: Print information available to the console.
12136 12136 *
12137 12137 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
12138 12138 * sd_uscsi_info in.
12139 12139 * sd_severity - log level.
12140 12140 * Context: Kernel thread or interrupt context.
12141 12141 */
12142 12142 static void
12143 12143 sd_ssc_print(sd_ssc_t *ssc, int sd_severity)
12144 12144 {
12145 12145 struct uscsi_cmd *ucmdp;
12146 12146 struct scsi_device *devp;
12147 12147 dev_info_t *devinfo;
12148 12148 uchar_t *sensep;
12149 12149 int senlen;
12150 12150 union scsi_cdb *cdbp;
12151 12151 uchar_t com;
12152 12152 extern struct scsi_key_strings scsi_cmds[];
12153 12153
12154 12154 ASSERT(ssc != NULL);
12155 12155 ASSERT(ssc->ssc_un != NULL);
12156 12156
12157 12157 if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT)
12158 12158 return;
12159 12159 ucmdp = ssc->ssc_uscsi_cmd;
12160 12160 devp = SD_SCSI_DEVP(ssc->ssc_un);
12161 12161 devinfo = SD_DEVINFO(ssc->ssc_un);
12162 12162 ASSERT(ucmdp != NULL);
12163 12163 ASSERT(devp != NULL);
12164 12164 ASSERT(devinfo != NULL);
12165 12165 sensep = (uint8_t *)ucmdp->uscsi_rqbuf;
12166 12166 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid;
12167 12167 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb;
12168 12168
12169 12169 /* In certain case (like DOORLOCK), the cdb could be NULL. */
12170 12170 if (cdbp == NULL)
12171 12171 return;
12172 12172 /* We don't print log if no sense data available. */
12173 12173 if (senlen == 0)
12174 12174 sensep = NULL;
12175 12175 com = cdbp->scc_cmd;
12176 12176 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com,
12177 12177 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL);
12178 12178 }
12179 12179
12180 12180 /*
12181 12181 * Function: sd_ssc_assessment
12182 12182 *
12183 12183 * Description: We use this function to make an assessment at the point
12184 12184 * where SD driver may encounter a potential error.
12185 12185 *
12186 12186 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
12187 12187 * sd_uscsi_info in.
12188 12188 * tp_assess - a hint of strategy for ereport posting.
12189 12189 * Possible values of tp_assess include:
12190 12190 * SD_FMT_IGNORE - we don't post any ereport because we're
12191 12191 * sure that it is ok to ignore the underlying problems.
12192 12192 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now
12193 12193 * but it might be not correct to ignore the underlying hardware
12194 12194 * error.
12195 12195 * SD_FMT_STATUS_CHECK - we will post an ereport with the
12196 12196 * payload driver-assessment of value "fail" or
12197 12197 * "fatal"(depending on what information we have here). This
12198 12198 * assessment value is usually set when SD driver think there
12199 12199 * is a potential error occurred(Typically, when return value
12200 12200 * of the SCSI command is EIO).
12201 12201 * SD_FMT_STANDARD - we will post an ereport with the payload
12202 12202 * driver-assessment of value "info". This assessment value is
12203 12203 * set when the SCSI command returned successfully and with
12204 12204 * sense data sent back.
12205 12205 *
12206 12206 * Context: Kernel thread.
12207 12207 */
12208 12208 static void
12209 12209 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess)
12210 12210 {
12211 12211 int senlen = 0;
12212 12212 struct uscsi_cmd *ucmdp = NULL;
12213 12213 struct sd_lun *un;
12214 12214
12215 12215 ASSERT(ssc != NULL);
12216 12216 un = ssc->ssc_un;
12217 12217 ASSERT(un != NULL);
12218 12218 ucmdp = ssc->ssc_uscsi_cmd;
12219 12219 ASSERT(ucmdp != NULL);
12220 12220
12221 12221 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) {
12222 12222 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT;
12223 12223 } else {
12224 12224 /*
12225 12225 * If enter here, it indicates that we have a wrong
12226 12226 * calling sequence of sd_ssc_send and sd_ssc_assessment,
12227 12227 * both of which should be called in a pair in case of
12228 12228 * loss of FMA telemetries.
12229 12229 */
12230 12230 if (ucmdp->uscsi_cdb != NULL) {
12231 12231 SD_INFO(SD_LOG_SDTEST, un,
12232 12232 "sd_ssc_assessment is missing the "
12233 12233 "alternative sd_ssc_send when running 0x%x, "
12234 12234 "or there are superfluous sd_ssc_assessment for "
12235 12235 "the same sd_ssc_send.\n",
12236 12236 ucmdp->uscsi_cdb[0]);
12237 12237 }
12238 12238 /*
12239 12239 * Set the ssc_flags to the initial value to avoid passing
12240 12240 * down dirty flags to the following sd_ssc_send function.
12241 12241 */
12242 12242 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12243 12243 return;
12244 12244 }
12245 12245
12246 12246 /*
12247 12247 * Only handle an issued command which is waiting for assessment.
12248 12248 * A command which is not issued will not have
12249 12249 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here.
12250 12250 */
12251 12251 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) {
12252 12252 sd_ssc_print(ssc, SCSI_ERR_INFO);
12253 12253 return;
12254 12254 } else {
12255 12255 /*
12256 12256 * For an issued command, we should clear this flag in
12257 12257 * order to make the sd_ssc_t structure be used off
12258 12258 * multiple uscsi commands.
12259 12259 */
12260 12260 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED;
12261 12261 }
12262 12262
12263 12263 /*
12264 12264 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set)
12265 12265 * commands here. And we should clear the ssc_flags before return.
12266 12266 */
12267 12267 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) {
12268 12268 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12269 12269 return;
12270 12270 }
12271 12271
12272 12272 switch (tp_assess) {
12273 12273 case SD_FMT_IGNORE:
12274 12274 case SD_FMT_IGNORE_COMPROMISE:
12275 12275 break;
12276 12276 case SD_FMT_STATUS_CHECK:
12277 12277 /*
12278 12278 * For a failed command(including the succeeded command
12279 12279 * with invalid data sent back).
12280 12280 */
12281 12281 sd_ssc_post(ssc, SD_FM_DRV_FATAL);
12282 12282 break;
12283 12283 case SD_FMT_STANDARD:
12284 12284 /*
12285 12285 * Always for the succeeded commands probably with sense
12286 12286 * data sent back.
12287 12287 * Limitation:
12288 12288 * We can only handle a succeeded command with sense
12289 12289 * data sent back when auto-request-sense is enabled.
12290 12290 */
12291 12291 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen -
12292 12292 ssc->ssc_uscsi_cmd->uscsi_rqresid;
12293 12293 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) &&
12294 12294 (un->un_f_arq_enabled == TRUE) &&
12295 12295 senlen > 0 &&
12296 12296 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) {
12297 12297 sd_ssc_post(ssc, SD_FM_DRV_NOTICE);
12298 12298 }
12299 12299 break;
12300 12300 default:
12301 12301 /*
12302 12302 * Should not have other type of assessment.
12303 12303 */
12304 12304 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
12305 12305 "sd_ssc_assessment got wrong "
12306 12306 "sd_type_assessment %d.\n", tp_assess);
12307 12307 break;
12308 12308 }
12309 12309 /*
12310 12310 * Clear up the ssc_flags before return.
12311 12311 */
12312 12312 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12313 12313 }
12314 12314
12315 12315 /*
12316 12316 * Function: sd_ssc_post
12317 12317 *
12318 12318 * Description: 1. read the driver property to get fm-scsi-log flag.
12319 12319 * 2. print log if fm_log_capable is non-zero.
12320 12320 * 3. call sd_ssc_ereport_post to post ereport if possible.
12321 12321 *
12322 12322 * Context: May be called from kernel thread or interrupt context.
12323 12323 */
12324 12324 static void
12325 12325 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess)
12326 12326 {
12327 12327 struct sd_lun *un;
12328 12328 int sd_severity;
12329 12329
12330 12330 ASSERT(ssc != NULL);
12331 12331 un = ssc->ssc_un;
12332 12332 ASSERT(un != NULL);
12333 12333
12334 12334 /*
12335 12335 * We may enter here from sd_ssc_assessment(for USCSI command) or
12336 12336 * by directly called from sdintr context.
12337 12337 * We don't handle a non-disk drive(CD-ROM, removable media).
12338 12338 * Clear the ssc_flags before return in case we've set
12339 12339 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk
12340 12340 * driver.
12341 12341 */
12342 12342 if (ISCD(un) || un->un_f_has_removable_media) {
12343 12343 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12344 12344 return;
12345 12345 }
12346 12346
12347 12347 switch (sd_assess) {
12348 12348 case SD_FM_DRV_FATAL:
12349 12349 sd_severity = SCSI_ERR_FATAL;
12350 12350 break;
12351 12351 case SD_FM_DRV_RECOVERY:
12352 12352 sd_severity = SCSI_ERR_RECOVERED;
12353 12353 break;
12354 12354 case SD_FM_DRV_RETRY:
12355 12355 sd_severity = SCSI_ERR_RETRYABLE;
12356 12356 break;
12357 12357 case SD_FM_DRV_NOTICE:
12358 12358 sd_severity = SCSI_ERR_INFO;
12359 12359 break;
12360 12360 default:
12361 12361 sd_severity = SCSI_ERR_UNKNOWN;
12362 12362 }
12363 12363 /* print log */
12364 12364 sd_ssc_print(ssc, sd_severity);
12365 12365
12366 12366 /* always post ereport */
12367 12367 sd_ssc_ereport_post(ssc, sd_assess);
12368 12368 }
12369 12369
12370 12370 /*
12371 12371 * Function: sd_ssc_set_info
12372 12372 *
12373 12373 * Description: Mark ssc_flags and set ssc_info which would be the
12374 12374 * payload of uderr ereport. This function will cause
12375 12375 * sd_ssc_ereport_post to post uderr ereport only.
12376 12376 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI),
12377 12377 * the function will also call SD_ERROR or scsi_log for a
12378 12378 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device.
12379 12379 *
12380 12380 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
12381 12381 * sd_uscsi_info in.
12382 12382 * ssc_flags - indicate the sub-category of a uderr.
12383 12383 * comp - this argument is meaningful only when
12384 12384 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible
12385 12385 * values include:
12386 12386 * > 0, SD_ERROR is used with comp as the driver logging
12387 12387 * component;
12388 12388 * = 0, scsi-log is used to log error telemetries;
12389 12389 * < 0, no log available for this telemetry.
12390 12390 *
12391 12391 * Context: Kernel thread or interrupt context
12392 12392 */
12393 12393 static void
12394 12394 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...)
12395 12395 {
12396 12396 va_list ap;
12397 12397
12398 12398 ASSERT(ssc != NULL);
12399 12399 ASSERT(ssc->ssc_un != NULL);
12400 12400
12401 12401 ssc->ssc_flags |= ssc_flags;
12402 12402 va_start(ap, fmt);
12403 12403 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap);
12404 12404 va_end(ap);
12405 12405
12406 12406 /*
12407 12407 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command
12408 12408 * with invalid data sent back. For non-uscsi command, the
12409 12409 * following code will be bypassed.
12410 12410 */
12411 12411 if (ssc_flags & SSC_FLAGS_INVALID_DATA) {
12412 12412 if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) {
12413 12413 /*
12414 12414 * If the error belong to certain component and we
12415 12415 * do not want it to show up on the console, we
12416 12416 * will use SD_ERROR, otherwise scsi_log is
12417 12417 * preferred.
12418 12418 */
12419 12419 if (comp > 0) {
12420 12420 SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info);
12421 12421 } else if (comp == 0) {
12422 12422 scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label,
12423 12423 CE_WARN, ssc->ssc_info);
12424 12424 }
12425 12425 }
12426 12426 }
12427 12427 }
12428 12428
12429 12429 /*
12430 12430 * Function: sd_buf_iodone
12431 12431 *
12432 12432 * Description: Frees the sd_xbuf & returns the buf to its originator.
12433 12433 *
12434 12434 * Context: May be called from interrupt context.
12435 12435 */
12436 12436 /* ARGSUSED */
12437 12437 static void
12438 12438 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp)
12439 12439 {
12440 12440 struct sd_xbuf *xp;
12441 12441
12442 12442 ASSERT(un != NULL);
12443 12443 ASSERT(bp != NULL);
12444 12444 ASSERT(!mutex_owned(SD_MUTEX(un)));
12445 12445
12446 12446 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n");
12447 12447
12448 12448 xp = SD_GET_XBUF(bp);
12449 12449 ASSERT(xp != NULL);
12450 12450
12451 12451 /* xbuf is gone after this */
12452 12452 if (ddi_xbuf_done(bp, un->un_xbuf_attr)) {
12453 12453 mutex_enter(SD_MUTEX(un));
12454 12454
12455 12455 /*
12456 12456 * Grab time when the cmd completed.
12457 12457 * This is used for determining if the system has been
12458 12458 * idle long enough to make it idle to the PM framework.
12459 12459 * This is for lowering the overhead, and therefore improving
12460 12460 * performance per I/O operation.
12461 12461 */
12462 12462 un->un_pm_idle_time = gethrtime();
12463 12463
12464 12464 un->un_ncmds_in_driver--;
12465 12465 ASSERT(un->un_ncmds_in_driver >= 0);
12466 12466 SD_INFO(SD_LOG_IO, un,
12467 12467 "sd_buf_iodone: un_ncmds_in_driver = %ld\n",
12468 12468 un->un_ncmds_in_driver);
12469 12469
12470 12470 mutex_exit(SD_MUTEX(un));
12471 12471 }
12472 12472
12473 12473 biodone(bp); /* bp is gone after this */
12474 12474
12475 12475 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n");
12476 12476 }
12477 12477
12478 12478
12479 12479 /*
12480 12480 * Function: sd_uscsi_iodone
12481 12481 *
12482 12482 * Description: Frees the sd_xbuf & returns the buf to its originator.
12483 12483 *
12484 12484 * Context: May be called from interrupt context.
12485 12485 */
12486 12486 /* ARGSUSED */
12487 12487 static void
12488 12488 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp)
12489 12489 {
12490 12490 struct sd_xbuf *xp;
12491 12491
12492 12492 ASSERT(un != NULL);
12493 12493 ASSERT(bp != NULL);
12494 12494
12495 12495 xp = SD_GET_XBUF(bp);
12496 12496 ASSERT(xp != NULL);
12497 12497 ASSERT(!mutex_owned(SD_MUTEX(un)));
12498 12498
12499 12499 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n");
12500 12500
12501 12501 bp->b_private = xp->xb_private;
12502 12502
12503 12503 mutex_enter(SD_MUTEX(un));
12504 12504
12505 12505 /*
12506 12506 * Grab time when the cmd completed.
12507 12507 * This is used for determining if the system has been
12508 12508 * idle long enough to make it idle to the PM framework.
12509 12509 * This is for lowering the overhead, and therefore improving
12510 12510 * performance per I/O operation.
12511 12511 */
12512 12512 un->un_pm_idle_time = gethrtime();
12513 12513
12514 12514 un->un_ncmds_in_driver--;
12515 12515 ASSERT(un->un_ncmds_in_driver >= 0);
12516 12516 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n",
12517 12517 un->un_ncmds_in_driver);
12518 12518
12519 12519 mutex_exit(SD_MUTEX(un));
12520 12520
12521 12521 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen >
12522 12522 SENSE_LENGTH) {
12523 12523 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH +
12524 12524 MAX_SENSE_LENGTH);
12525 12525 } else {
12526 12526 kmem_free(xp, sizeof (struct sd_xbuf));
12527 12527 }
12528 12528
12529 12529 biodone(bp);
12530 12530
12531 12531 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n");
12532 12532 }
12533 12533
12534 12534
12535 12535 /*
12536 12536 * Function: sd_mapblockaddr_iostart
12537 12537 *
12538 12538 * Description: Verify request lies within the partition limits for
12539 12539 * the indicated minor device. Issue "overrun" buf if
12540 12540 * request would exceed partition range. Converts
12541 12541 * partition-relative block address to absolute.
12542 12542 *
12543 12543 * Upon exit of this function:
12544 12544 * 1.I/O is aligned
12545 12545 * xp->xb_blkno represents the absolute sector address
12546 12546 * 2.I/O is misaligned
12547 12547 * xp->xb_blkno represents the absolute logical block address
12548 12548 * based on DEV_BSIZE. The logical block address will be
12549 12549 * converted to physical sector address in sd_mapblocksize_\
12550 12550 * iostart.
12551 12551 * 3.I/O is misaligned but is aligned in "overrun" buf
12552 12552 * xp->xb_blkno represents the absolute logical block address
12553 12553 * based on DEV_BSIZE. The logical block address will be
12554 12554 * converted to physical sector address in sd_mapblocksize_\
12555 12555 * iostart. But no RMW will be issued in this case.
12556 12556 *
12557 12557 * Context: Can sleep
12558 12558 *
12559 12559 * Issues: This follows what the old code did, in terms of accessing
12560 12560 * some of the partition info in the unit struct without holding
12561 12561 * the mutext. This is a general issue, if the partition info
12562 12562 * can be altered while IO is in progress... as soon as we send
12563 12563 * a buf, its partitioning can be invalid before it gets to the
12564 12564 * device. Probably the right fix is to move partitioning out
12565 12565 * of the driver entirely.
12566 12566 */
12567 12567
12568 12568 static void
12569 12569 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp)
12570 12570 {
12571 12571 diskaddr_t nblocks; /* #blocks in the given partition */
12572 12572 daddr_t blocknum; /* Block number specified by the buf */
12573 12573 size_t requested_nblocks;
12574 12574 size_t available_nblocks;
12575 12575 int partition;
12576 12576 diskaddr_t partition_offset;
12577 12577 struct sd_xbuf *xp;
12578 12578 int secmask = 0, blknomask = 0;
12579 12579 ushort_t is_aligned = TRUE;
12580 12580
12581 12581 ASSERT(un != NULL);
12582 12582 ASSERT(bp != NULL);
12583 12583 ASSERT(!mutex_owned(SD_MUTEX(un)));
12584 12584
12585 12585 SD_TRACE(SD_LOG_IO_PARTITION, un,
12586 12586 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp);
12587 12587
12588 12588 xp = SD_GET_XBUF(bp);
12589 12589 ASSERT(xp != NULL);
12590 12590
12591 12591 /*
12592 12592 * If the geometry is not indicated as valid, attempt to access
12593 12593 * the unit & verify the geometry/label. This can be the case for
12594 12594 * removable-media devices, of if the device was opened in
12595 12595 * NDELAY/NONBLOCK mode.
12596 12596 */
12597 12597 partition = SDPART(bp->b_edev);
12598 12598
12599 12599 if (!SD_IS_VALID_LABEL(un)) {
12600 12600 sd_ssc_t *ssc;
12601 12601 /*
12602 12602 * Initialize sd_ssc_t for internal uscsi commands
12603 12603 * In case of potential porformance issue, we need
12604 12604 * to alloc memory only if there is invalid label
12605 12605 */
12606 12606 ssc = sd_ssc_init(un);
12607 12607
12608 12608 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) {
12609 12609 /*
12610 12610 * For removable devices it is possible to start an
12611 12611 * I/O without a media by opening the device in nodelay
12612 12612 * mode. Also for writable CDs there can be many
12613 12613 * scenarios where there is no geometry yet but volume
12614 12614 * manager is trying to issue a read() just because
12615 12615 * it can see TOC on the CD. So do not print a message
12616 12616 * for removables.
12617 12617 */
12618 12618 if (!un->un_f_has_removable_media) {
12619 12619 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
12620 12620 "i/o to invalid geometry\n");
12621 12621 }
12622 12622 bioerror(bp, EIO);
12623 12623 bp->b_resid = bp->b_bcount;
12624 12624 SD_BEGIN_IODONE(index, un, bp);
12625 12625
12626 12626 sd_ssc_fini(ssc);
12627 12627 return;
12628 12628 }
12629 12629 sd_ssc_fini(ssc);
12630 12630 }
12631 12631
12632 12632 nblocks = 0;
12633 12633 (void) cmlb_partinfo(un->un_cmlbhandle, partition,
12634 12634 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT);
12635 12635
12636 12636 if (un->un_f_enable_rmw) {
12637 12637 blknomask = (un->un_phy_blocksize / DEV_BSIZE) - 1;
12638 12638 secmask = un->un_phy_blocksize - 1;
12639 12639 } else {
12640 12640 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1;
12641 12641 secmask = un->un_tgt_blocksize - 1;
12642 12642 }
12643 12643
12644 12644 if ((bp->b_lblkno & (blknomask)) || (bp->b_bcount & (secmask))) {
12645 12645 is_aligned = FALSE;
12646 12646 }
12647 12647
12648 12648 if (!(NOT_DEVBSIZE(un)) || un->un_f_enable_rmw) {
12649 12649 /*
12650 12650 * If I/O is aligned, no need to involve RMW(Read Modify Write)
12651 12651 * Convert the logical block number to target's physical sector
12652 12652 * number.
12653 12653 */
12654 12654 if (is_aligned) {
12655 12655 xp->xb_blkno = SD_SYS2TGTBLOCK(un, xp->xb_blkno);
12656 12656 } else {
12657 12657 /*
12658 12658 * There is no RMW if we're just reading, so don't
12659 12659 * warn or error out because of it.
12660 12660 */
12661 12661 if (bp->b_flags & B_READ) {
12662 12662 /*EMPTY*/
12663 12663 } else if (!un->un_f_enable_rmw &&
12664 12664 un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) {
12665 12665 bp->b_flags |= B_ERROR;
12666 12666 goto error_exit;
12667 12667 } else if (un->un_f_rmw_type == SD_RMW_TYPE_DEFAULT) {
12668 12668 mutex_enter(SD_MUTEX(un));
12669 12669 if (!un->un_f_enable_rmw &&
12670 12670 un->un_rmw_msg_timeid == NULL) {
12671 12671 scsi_log(SD_DEVINFO(un), sd_label,
12672 12672 CE_WARN, "I/O request is not "
12673 12673 "aligned with %d disk sector size. "
12674 12674 "It is handled through Read Modify "
12675 12675 "Write but the performance is "
12676 12676 "very low.\n",
12677 12677 un->un_tgt_blocksize);
12678 12678 un->un_rmw_msg_timeid =
12679 12679 timeout(sd_rmw_msg_print_handler,
12680 12680 un, SD_RMW_MSG_PRINT_TIMEOUT);
12681 12681 } else {
12682 12682 un->un_rmw_incre_count ++;
12683 12683 }
12684 12684 mutex_exit(SD_MUTEX(un));
12685 12685 }
12686 12686
12687 12687 nblocks = SD_TGT2SYSBLOCK(un, nblocks);
12688 12688 partition_offset = SD_TGT2SYSBLOCK(un,
12689 12689 partition_offset);
12690 12690 }
12691 12691 }
12692 12692
12693 12693 /*
12694 12694 * blocknum is the starting block number of the request. At this
12695 12695 * point it is still relative to the start of the minor device.
12696 12696 */
12697 12697 blocknum = xp->xb_blkno;
12698 12698
12699 12699 /*
12700 12700 * Legacy: If the starting block number is one past the last block
12701 12701 * in the partition, do not set B_ERROR in the buf.
12702 12702 */
12703 12703 if (blocknum == nblocks) {
12704 12704 goto error_exit;
12705 12705 }
12706 12706
12707 12707 /*
12708 12708 * Confirm that the first block of the request lies within the
12709 12709 * partition limits. Also the requested number of bytes must be
12710 12710 * a multiple of the system block size.
12711 12711 */
12712 12712 if ((blocknum < 0) || (blocknum >= nblocks) ||
12713 12713 ((bp->b_bcount & (DEV_BSIZE - 1)) != 0)) {
12714 12714 bp->b_flags |= B_ERROR;
12715 12715 goto error_exit;
12716 12716 }
12717 12717
12718 12718 /*
12719 12719 * If the requsted # blocks exceeds the available # blocks, that
12720 12720 * is an overrun of the partition.
12721 12721 */
12722 12722 if ((!NOT_DEVBSIZE(un)) && is_aligned) {
12723 12723 requested_nblocks = SD_BYTES2TGTBLOCKS(un, bp->b_bcount);
12724 12724 } else {
12725 12725 requested_nblocks = SD_BYTES2SYSBLOCKS(bp->b_bcount);
12726 12726 }
12727 12727
12728 12728 available_nblocks = (size_t)(nblocks - blocknum);
12729 12729 ASSERT(nblocks >= blocknum);
12730 12730
12731 12731 if (requested_nblocks > available_nblocks) {
12732 12732 size_t resid;
12733 12733
12734 12734 /*
12735 12735 * Allocate an "overrun" buf to allow the request to proceed
12736 12736 * for the amount of space available in the partition. The
12737 12737 * amount not transferred will be added into the b_resid
12738 12738 * when the operation is complete. The overrun buf
12739 12739 * replaces the original buf here, and the original buf
12740 12740 * is saved inside the overrun buf, for later use.
12741 12741 */
12742 12742 if ((!NOT_DEVBSIZE(un)) && is_aligned) {
12743 12743 resid = SD_TGTBLOCKS2BYTES(un,
12744 12744 (offset_t)(requested_nblocks - available_nblocks));
12745 12745 } else {
12746 12746 resid = SD_SYSBLOCKS2BYTES(
12747 12747 (offset_t)(requested_nblocks - available_nblocks));
12748 12748 }
12749 12749
12750 12750 size_t count = bp->b_bcount - resid;
12751 12751 /*
12752 12752 * Note: count is an unsigned entity thus it'll NEVER
12753 12753 * be less than 0 so ASSERT the original values are
12754 12754 * correct.
12755 12755 */
12756 12756 ASSERT(bp->b_bcount >= resid);
12757 12757
12758 12758 bp = sd_bioclone_alloc(bp, count, blocknum,
12759 12759 (int (*)(struct buf *)) sd_mapblockaddr_iodone);
12760 12760 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */
12761 12761 ASSERT(xp != NULL);
12762 12762 }
12763 12763
12764 12764 /* At this point there should be no residual for this buf. */
12765 12765 ASSERT(bp->b_resid == 0);
12766 12766
12767 12767 /* Convert the block number to an absolute address. */
12768 12768 xp->xb_blkno += partition_offset;
12769 12769
12770 12770 SD_NEXT_IOSTART(index, un, bp);
12771 12771
12772 12772 SD_TRACE(SD_LOG_IO_PARTITION, un,
12773 12773 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp);
12774 12774
12775 12775 return;
12776 12776
12777 12777 error_exit:
12778 12778 bp->b_resid = bp->b_bcount;
12779 12779 SD_BEGIN_IODONE(index, un, bp);
12780 12780 SD_TRACE(SD_LOG_IO_PARTITION, un,
12781 12781 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp);
12782 12782 }
12783 12783
12784 12784
12785 12785 /*
12786 12786 * Function: sd_mapblockaddr_iodone
12787 12787 *
12788 12788 * Description: Completion-side processing for partition management.
12789 12789 *
12790 12790 * Context: May be called under interrupt context
12791 12791 */
12792 12792
12793 12793 static void
12794 12794 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp)
12795 12795 {
12796 12796 /* int partition; */ /* Not used, see below. */
12797 12797 ASSERT(un != NULL);
12798 12798 ASSERT(bp != NULL);
12799 12799 ASSERT(!mutex_owned(SD_MUTEX(un)));
12800 12800
12801 12801 SD_TRACE(SD_LOG_IO_PARTITION, un,
12802 12802 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp);
12803 12803
12804 12804 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) {
12805 12805 /*
12806 12806 * We have an "overrun" buf to deal with...
12807 12807 */
12808 12808 struct sd_xbuf *xp;
12809 12809 struct buf *obp; /* ptr to the original buf */
12810 12810
12811 12811 xp = SD_GET_XBUF(bp);
12812 12812 ASSERT(xp != NULL);
12813 12813
12814 12814 /* Retrieve the pointer to the original buf */
12815 12815 obp = (struct buf *)xp->xb_private;
12816 12816 ASSERT(obp != NULL);
12817 12817
12818 12818 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid);
12819 12819 bioerror(obp, bp->b_error);
12820 12820
12821 12821 sd_bioclone_free(bp);
12822 12822
12823 12823 /*
12824 12824 * Get back the original buf.
12825 12825 * Note that since the restoration of xb_blkno below
12826 12826 * was removed, the sd_xbuf is not needed.
12827 12827 */
12828 12828 bp = obp;
12829 12829 /*
12830 12830 * xp = SD_GET_XBUF(bp);
12831 12831 * ASSERT(xp != NULL);
12832 12832 */
12833 12833 }
12834 12834
12835 12835 /*
12836 12836 * Convert sd->xb_blkno back to a minor-device relative value.
12837 12837 * Note: this has been commented out, as it is not needed in the
12838 12838 * current implementation of the driver (ie, since this function
12839 12839 * is at the top of the layering chains, so the info will be
12840 12840 * discarded) and it is in the "hot" IO path.
12841 12841 *
12842 12842 * partition = getminor(bp->b_edev) & SDPART_MASK;
12843 12843 * xp->xb_blkno -= un->un_offset[partition];
12844 12844 */
12845 12845
12846 12846 SD_NEXT_IODONE(index, un, bp);
12847 12847
12848 12848 SD_TRACE(SD_LOG_IO_PARTITION, un,
12849 12849 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp);
12850 12850 }
12851 12851
12852 12852
12853 12853 /*
12854 12854 * Function: sd_mapblocksize_iostart
12855 12855 *
12856 12856 * Description: Convert between system block size (un->un_sys_blocksize)
12857 12857 * and target block size (un->un_tgt_blocksize).
12858 12858 *
12859 12859 * Context: Can sleep to allocate resources.
12860 12860 *
12861 12861 * Assumptions: A higher layer has already performed any partition validation,
12862 12862 * and converted the xp->xb_blkno to an absolute value relative
12863 12863 * to the start of the device.
12864 12864 *
12865 12865 * It is also assumed that the higher layer has implemented
12866 12866 * an "overrun" mechanism for the case where the request would
12867 12867 * read/write beyond the end of a partition. In this case we
12868 12868 * assume (and ASSERT) that bp->b_resid == 0.
12869 12869 *
12870 12870 * Note: The implementation for this routine assumes the target
12871 12871 * block size remains constant between allocation and transport.
12872 12872 */
12873 12873
12874 12874 static void
12875 12875 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp)
12876 12876 {
12877 12877 struct sd_mapblocksize_info *bsp;
12878 12878 struct sd_xbuf *xp;
12879 12879 offset_t first_byte;
12880 12880 daddr_t start_block, end_block;
12881 12881 daddr_t request_bytes;
12882 12882 ushort_t is_aligned = FALSE;
12883 12883
12884 12884 ASSERT(un != NULL);
12885 12885 ASSERT(bp != NULL);
12886 12886 ASSERT(!mutex_owned(SD_MUTEX(un)));
12887 12887 ASSERT(bp->b_resid == 0);
12888 12888
12889 12889 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
12890 12890 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp);
12891 12891
12892 12892 /*
12893 12893 * For a non-writable CD, a write request is an error
12894 12894 */
12895 12895 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) &&
12896 12896 (un->un_f_mmc_writable_media == FALSE)) {
12897 12897 bioerror(bp, EIO);
12898 12898 bp->b_resid = bp->b_bcount;
12899 12899 SD_BEGIN_IODONE(index, un, bp);
12900 12900 return;
12901 12901 }
12902 12902
12903 12903 /*
12904 12904 * We do not need a shadow buf if the device is using
12905 12905 * un->un_sys_blocksize as its block size or if bcount == 0.
12906 12906 * In this case there is no layer-private data block allocated.
12907 12907 */
12908 12908 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) ||
12909 12909 (bp->b_bcount == 0)) {
12910 12910 goto done;
12911 12911 }
12912 12912
12913 12913 #if defined(__i386) || defined(__amd64)
12914 12914 /* We do not support non-block-aligned transfers for ROD devices */
12915 12915 ASSERT(!ISROD(un));
12916 12916 #endif
12917 12917
12918 12918 xp = SD_GET_XBUF(bp);
12919 12919 ASSERT(xp != NULL);
12920 12920
12921 12921 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
12922 12922 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n",
12923 12923 un->un_tgt_blocksize, DEV_BSIZE);
12924 12924 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
12925 12925 "request start block:0x%x\n", xp->xb_blkno);
12926 12926 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
12927 12927 "request len:0x%x\n", bp->b_bcount);
12928 12928
12929 12929 /*
12930 12930 * Allocate the layer-private data area for the mapblocksize layer.
12931 12931 * Layers are allowed to use the xp_private member of the sd_xbuf
12932 12932 * struct to store the pointer to their layer-private data block, but
12933 12933 * each layer also has the responsibility of restoring the prior
12934 12934 * contents of xb_private before returning the buf/xbuf to the
12935 12935 * higher layer that sent it.
12936 12936 *
12937 12937 * Here we save the prior contents of xp->xb_private into the
12938 12938 * bsp->mbs_oprivate field of our layer-private data area. This value
12939 12939 * is restored by sd_mapblocksize_iodone() just prior to freeing up
12940 12940 * the layer-private area and returning the buf/xbuf to the layer
12941 12941 * that sent it.
12942 12942 *
12943 12943 * Note that here we use kmem_zalloc for the allocation as there are
12944 12944 * parts of the mapblocksize code that expect certain fields to be
12945 12945 * zero unless explicitly set to a required value.
12946 12946 */
12947 12947 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP);
12948 12948 bsp->mbs_oprivate = xp->xb_private;
12949 12949 xp->xb_private = bsp;
12950 12950
12951 12951 /*
12952 12952 * This treats the data on the disk (target) as an array of bytes.
12953 12953 * first_byte is the byte offset, from the beginning of the device,
12954 12954 * to the location of the request. This is converted from a
12955 12955 * un->un_sys_blocksize block address to a byte offset, and then back
12956 12956 * to a block address based upon a un->un_tgt_blocksize block size.
12957 12957 *
12958 12958 * xp->xb_blkno should be absolute upon entry into this function,
12959 12959 * but, but it is based upon partitions that use the "system"
12960 12960 * block size. It must be adjusted to reflect the block size of
12961 12961 * the target.
12962 12962 *
12963 12963 * Note that end_block is actually the block that follows the last
12964 12964 * block of the request, but that's what is needed for the computation.
12965 12965 */
12966 12966 first_byte = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno);
12967 12967 if (un->un_f_enable_rmw) {
12968 12968 start_block = xp->xb_blkno =
12969 12969 (first_byte / un->un_phy_blocksize) *
12970 12970 (un->un_phy_blocksize / DEV_BSIZE);
12971 12971 end_block = ((first_byte + bp->b_bcount +
12972 12972 un->un_phy_blocksize - 1) / un->un_phy_blocksize) *
12973 12973 (un->un_phy_blocksize / DEV_BSIZE);
12974 12974 } else {
12975 12975 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize;
12976 12976 end_block = (first_byte + bp->b_bcount +
12977 12977 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize;
12978 12978 }
12979 12979
12980 12980 /* request_bytes is rounded up to a multiple of the target block size */
12981 12981 request_bytes = (end_block - start_block) * un->un_tgt_blocksize;
12982 12982
12983 12983 /*
12984 12984 * See if the starting address of the request and the request
12985 12985 * length are aligned on a un->un_tgt_blocksize boundary. If aligned
12986 12986 * then we do not need to allocate a shadow buf to handle the request.
12987 12987 */
12988 12988 if (un->un_f_enable_rmw) {
12989 12989 if (((first_byte % un->un_phy_blocksize) == 0) &&
12990 12990 ((bp->b_bcount % un->un_phy_blocksize) == 0)) {
12991 12991 is_aligned = TRUE;
12992 12992 }
12993 12993 } else {
12994 12994 if (((first_byte % un->un_tgt_blocksize) == 0) &&
12995 12995 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) {
12996 12996 is_aligned = TRUE;
12997 12997 }
12998 12998 }
12999 12999
13000 13000 if ((bp->b_flags & B_READ) == 0) {
13001 13001 /*
13002 13002 * Lock the range for a write operation. An aligned request is
13003 13003 * considered a simple write; otherwise the request must be a
13004 13004 * read-modify-write.
13005 13005 */
13006 13006 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1,
13007 13007 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW);
13008 13008 }
13009 13009
13010 13010 /*
13011 13011 * Alloc a shadow buf if the request is not aligned. Also, this is
13012 13012 * where the READ command is generated for a read-modify-write. (The
13013 13013 * write phase is deferred until after the read completes.)
13014 13014 */
13015 13015 if (is_aligned == FALSE) {
13016 13016
13017 13017 struct sd_mapblocksize_info *shadow_bsp;
13018 13018 struct sd_xbuf *shadow_xp;
13019 13019 struct buf *shadow_bp;
13020 13020
13021 13021 /*
13022 13022 * Allocate the shadow buf and it associated xbuf. Note that
13023 13023 * after this call the xb_blkno value in both the original
13024 13024 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the
13025 13025 * same: absolute relative to the start of the device, and
13026 13026 * adjusted for the target block size. The b_blkno in the
13027 13027 * shadow buf will also be set to this value. We should never
13028 13028 * change b_blkno in the original bp however.
13029 13029 *
13030 13030 * Note also that the shadow buf will always need to be a
13031 13031 * READ command, regardless of whether the incoming command
13032 13032 * is a READ or a WRITE.
13033 13033 */
13034 13034 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ,
13035 13035 xp->xb_blkno,
13036 13036 (int (*)(struct buf *)) sd_mapblocksize_iodone);
13037 13037
13038 13038 shadow_xp = SD_GET_XBUF(shadow_bp);
13039 13039
13040 13040 /*
13041 13041 * Allocate the layer-private data for the shadow buf.
13042 13042 * (No need to preserve xb_private in the shadow xbuf.)
13043 13043 */
13044 13044 shadow_xp->xb_private = shadow_bsp =
13045 13045 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP);
13046 13046
13047 13047 /*
13048 13048 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone
13049 13049 * to figure out where the start of the user data is (based upon
13050 13050 * the system block size) in the data returned by the READ
13051 13051 * command (which will be based upon the target blocksize). Note
13052 13052 * that this is only really used if the request is unaligned.
13053 13053 */
13054 13054 if (un->un_f_enable_rmw) {
13055 13055 bsp->mbs_copy_offset = (ssize_t)(first_byte -
13056 13056 ((offset_t)xp->xb_blkno * un->un_sys_blocksize));
13057 13057 ASSERT((bsp->mbs_copy_offset >= 0) &&
13058 13058 (bsp->mbs_copy_offset < un->un_phy_blocksize));
13059 13059 } else {
13060 13060 bsp->mbs_copy_offset = (ssize_t)(first_byte -
13061 13061 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize));
13062 13062 ASSERT((bsp->mbs_copy_offset >= 0) &&
13063 13063 (bsp->mbs_copy_offset < un->un_tgt_blocksize));
13064 13064 }
13065 13065
13066 13066 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset;
13067 13067
13068 13068 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index;
13069 13069
13070 13070 /* Transfer the wmap (if any) to the shadow buf */
13071 13071 shadow_bsp->mbs_wmp = bsp->mbs_wmp;
13072 13072 bsp->mbs_wmp = NULL;
13073 13073
13074 13074 /*
13075 13075 * The shadow buf goes on from here in place of the
13076 13076 * original buf.
13077 13077 */
13078 13078 shadow_bsp->mbs_orig_bp = bp;
13079 13079 bp = shadow_bp;
13080 13080 }
13081 13081
13082 13082 SD_INFO(SD_LOG_IO_RMMEDIA, un,
13083 13083 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno);
13084 13084 SD_INFO(SD_LOG_IO_RMMEDIA, un,
13085 13085 "sd_mapblocksize_iostart: tgt request len:0x%x\n",
13086 13086 request_bytes);
13087 13087 SD_INFO(SD_LOG_IO_RMMEDIA, un,
13088 13088 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp);
13089 13089
13090 13090 done:
13091 13091 SD_NEXT_IOSTART(index, un, bp);
13092 13092
13093 13093 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
13094 13094 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp);
13095 13095 }
13096 13096
13097 13097
13098 13098 /*
13099 13099 * Function: sd_mapblocksize_iodone
13100 13100 *
13101 13101 * Description: Completion side processing for block-size mapping.
13102 13102 *
13103 13103 * Context: May be called under interrupt context
13104 13104 */
13105 13105
13106 13106 static void
13107 13107 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp)
13108 13108 {
13109 13109 struct sd_mapblocksize_info *bsp;
13110 13110 struct sd_xbuf *xp;
13111 13111 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */
13112 13112 struct buf *orig_bp; /* ptr to the original buf */
13113 13113 offset_t shadow_end;
13114 13114 offset_t request_end;
13115 13115 offset_t shadow_start;
13116 13116 ssize_t copy_offset;
13117 13117 size_t copy_length;
13118 13118 size_t shortfall;
13119 13119 uint_t is_write; /* TRUE if this bp is a WRITE */
13120 13120 uint_t has_wmap; /* TRUE is this bp has a wmap */
13121 13121
13122 13122 ASSERT(un != NULL);
13123 13123 ASSERT(bp != NULL);
13124 13124
13125 13125 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
13126 13126 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp);
13127 13127
13128 13128 /*
13129 13129 * There is no shadow buf or layer-private data if the target is
13130 13130 * using un->un_sys_blocksize as its block size or if bcount == 0.
13131 13131 */
13132 13132 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) ||
13133 13133 (bp->b_bcount == 0)) {
13134 13134 goto exit;
13135 13135 }
13136 13136
13137 13137 xp = SD_GET_XBUF(bp);
13138 13138 ASSERT(xp != NULL);
13139 13139
13140 13140 /* Retrieve the pointer to the layer-private data area from the xbuf. */
13141 13141 bsp = xp->xb_private;
13142 13142
13143 13143 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE;
13144 13144 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE;
13145 13145
13146 13146 if (is_write) {
13147 13147 /*
13148 13148 * For a WRITE request we must free up the block range that
13149 13149 * we have locked up. This holds regardless of whether this is
13150 13150 * an aligned write request or a read-modify-write request.
13151 13151 */
13152 13152 sd_range_unlock(un, bsp->mbs_wmp);
13153 13153 bsp->mbs_wmp = NULL;
13154 13154 }
13155 13155
13156 13156 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) {
13157 13157 /*
13158 13158 * An aligned read or write command will have no shadow buf;
13159 13159 * there is not much else to do with it.
13160 13160 */
13161 13161 goto done;
13162 13162 }
13163 13163
13164 13164 orig_bp = bsp->mbs_orig_bp;
13165 13165 ASSERT(orig_bp != NULL);
13166 13166 orig_xp = SD_GET_XBUF(orig_bp);
13167 13167 ASSERT(orig_xp != NULL);
13168 13168 ASSERT(!mutex_owned(SD_MUTEX(un)));
13169 13169
13170 13170 if (!is_write && has_wmap) {
13171 13171 /*
13172 13172 * A READ with a wmap means this is the READ phase of a
13173 13173 * read-modify-write. If an error occurred on the READ then
13174 13174 * we do not proceed with the WRITE phase or copy any data.
13175 13175 * Just release the write maps and return with an error.
13176 13176 */
13177 13177 if ((bp->b_resid != 0) || (bp->b_error != 0)) {
13178 13178 orig_bp->b_resid = orig_bp->b_bcount;
13179 13179 bioerror(orig_bp, bp->b_error);
13180 13180 sd_range_unlock(un, bsp->mbs_wmp);
13181 13181 goto freebuf_done;
13182 13182 }
13183 13183 }
13184 13184
13185 13185 /*
13186 13186 * Here is where we set up to copy the data from the shadow buf
13187 13187 * into the space associated with the original buf.
13188 13188 *
13189 13189 * To deal with the conversion between block sizes, these
13190 13190 * computations treat the data as an array of bytes, with the
13191 13191 * first byte (byte 0) corresponding to the first byte in the
13192 13192 * first block on the disk.
13193 13193 */
13194 13194
13195 13195 /*
13196 13196 * shadow_start and shadow_len indicate the location and size of
13197 13197 * the data returned with the shadow IO request.
13198 13198 */
13199 13199 if (un->un_f_enable_rmw) {
13200 13200 shadow_start = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno);
13201 13201 } else {
13202 13202 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno);
13203 13203 }
13204 13204 shadow_end = shadow_start + bp->b_bcount - bp->b_resid;
13205 13205
13206 13206 /*
13207 13207 * copy_offset gives the offset (in bytes) from the start of the first
13208 13208 * block of the READ request to the beginning of the data. We retrieve
13209 13209 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved
13210 13210 * there by sd_mapblockize_iostart(). copy_length gives the amount of
13211 13211 * data to be copied (in bytes).
13212 13212 */
13213 13213 copy_offset = bsp->mbs_copy_offset;
13214 13214 if (un->un_f_enable_rmw) {
13215 13215 ASSERT((copy_offset >= 0) &&
13216 13216 (copy_offset < un->un_phy_blocksize));
13217 13217 } else {
13218 13218 ASSERT((copy_offset >= 0) &&
13219 13219 (copy_offset < un->un_tgt_blocksize));
13220 13220 }
13221 13221
13222 13222 copy_length = orig_bp->b_bcount;
13223 13223 request_end = shadow_start + copy_offset + orig_bp->b_bcount;
13224 13224
13225 13225 /*
13226 13226 * Set up the resid and error fields of orig_bp as appropriate.
13227 13227 */
13228 13228 if (shadow_end >= request_end) {
13229 13229 /* We got all the requested data; set resid to zero */
13230 13230 orig_bp->b_resid = 0;
13231 13231 } else {
13232 13232 /*
13233 13233 * We failed to get enough data to fully satisfy the original
13234 13234 * request. Just copy back whatever data we got and set
13235 13235 * up the residual and error code as required.
13236 13236 *
13237 13237 * 'shortfall' is the amount by which the data received with the
13238 13238 * shadow buf has "fallen short" of the requested amount.
13239 13239 */
13240 13240 shortfall = (size_t)(request_end - shadow_end);
13241 13241
13242 13242 if (shortfall > orig_bp->b_bcount) {
13243 13243 /*
13244 13244 * We did not get enough data to even partially
13245 13245 * fulfill the original request. The residual is
13246 13246 * equal to the amount requested.
13247 13247 */
13248 13248 orig_bp->b_resid = orig_bp->b_bcount;
13249 13249 } else {
13250 13250 /*
13251 13251 * We did not get all the data that we requested
13252 13252 * from the device, but we will try to return what
13253 13253 * portion we did get.
13254 13254 */
13255 13255 orig_bp->b_resid = shortfall;
13256 13256 }
13257 13257 ASSERT(copy_length >= orig_bp->b_resid);
13258 13258 copy_length -= orig_bp->b_resid;
13259 13259 }
13260 13260
13261 13261 /* Propagate the error code from the shadow buf to the original buf */
13262 13262 bioerror(orig_bp, bp->b_error);
13263 13263
13264 13264 if (is_write) {
13265 13265 goto freebuf_done; /* No data copying for a WRITE */
13266 13266 }
13267 13267
13268 13268 if (has_wmap) {
13269 13269 /*
13270 13270 * This is a READ command from the READ phase of a
13271 13271 * read-modify-write request. We have to copy the data given
13272 13272 * by the user OVER the data returned by the READ command,
13273 13273 * then convert the command from a READ to a WRITE and send
13274 13274 * it back to the target.
13275 13275 */
13276 13276 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset,
13277 13277 copy_length);
13278 13278
13279 13279 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */
13280 13280
13281 13281 /*
13282 13282 * Dispatch the WRITE command to the taskq thread, which
13283 13283 * will in turn send the command to the target. When the
13284 13284 * WRITE command completes, we (sd_mapblocksize_iodone())
13285 13285 * will get called again as part of the iodone chain
13286 13286 * processing for it. Note that we will still be dealing
13287 13287 * with the shadow buf at that point.
13288 13288 */
13289 13289 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp,
13290 13290 KM_NOSLEEP) != 0) {
13291 13291 /*
13292 13292 * Dispatch was successful so we are done. Return
13293 13293 * without going any higher up the iodone chain. Do
13294 13294 * not free up any layer-private data until after the
13295 13295 * WRITE completes.
13296 13296 */
13297 13297 return;
13298 13298 }
13299 13299
13300 13300 /*
13301 13301 * Dispatch of the WRITE command failed; set up the error
13302 13302 * condition and send this IO back up the iodone chain.
13303 13303 */
13304 13304 bioerror(orig_bp, EIO);
13305 13305 orig_bp->b_resid = orig_bp->b_bcount;
13306 13306
13307 13307 } else {
13308 13308 /*
13309 13309 * This is a regular READ request (ie, not a RMW). Copy the
13310 13310 * data from the shadow buf into the original buf. The
13311 13311 * copy_offset compensates for any "misalignment" between the
13312 13312 * shadow buf (with its un->un_tgt_blocksize blocks) and the
13313 13313 * original buf (with its un->un_sys_blocksize blocks).
13314 13314 */
13315 13315 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr,
13316 13316 copy_length);
13317 13317 }
13318 13318
13319 13319 freebuf_done:
13320 13320
13321 13321 /*
13322 13322 * At this point we still have both the shadow buf AND the original
13323 13323 * buf to deal with, as well as the layer-private data area in each.
13324 13324 * Local variables are as follows:
13325 13325 *
13326 13326 * bp -- points to shadow buf
13327 13327 * xp -- points to xbuf of shadow buf
13328 13328 * bsp -- points to layer-private data area of shadow buf
13329 13329 * orig_bp -- points to original buf
13330 13330 *
13331 13331 * First free the shadow buf and its associated xbuf, then free the
13332 13332 * layer-private data area from the shadow buf. There is no need to
13333 13333 * restore xb_private in the shadow xbuf.
13334 13334 */
13335 13335 sd_shadow_buf_free(bp);
13336 13336 kmem_free(bsp, sizeof (struct sd_mapblocksize_info));
13337 13337
13338 13338 /*
13339 13339 * Now update the local variables to point to the original buf, xbuf,
13340 13340 * and layer-private area.
13341 13341 */
13342 13342 bp = orig_bp;
13343 13343 xp = SD_GET_XBUF(bp);
13344 13344 ASSERT(xp != NULL);
13345 13345 ASSERT(xp == orig_xp);
13346 13346 bsp = xp->xb_private;
13347 13347 ASSERT(bsp != NULL);
13348 13348
13349 13349 done:
13350 13350 /*
13351 13351 * Restore xb_private to whatever it was set to by the next higher
13352 13352 * layer in the chain, then free the layer-private data area.
13353 13353 */
13354 13354 xp->xb_private = bsp->mbs_oprivate;
13355 13355 kmem_free(bsp, sizeof (struct sd_mapblocksize_info));
13356 13356
13357 13357 exit:
13358 13358 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp),
13359 13359 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp);
13360 13360
13361 13361 SD_NEXT_IODONE(index, un, bp);
13362 13362 }
13363 13363
13364 13364
13365 13365 /*
13366 13366 * Function: sd_checksum_iostart
13367 13367 *
13368 13368 * Description: A stub function for a layer that's currently not used.
13369 13369 * For now just a placeholder.
13370 13370 *
13371 13371 * Context: Kernel thread context
13372 13372 */
13373 13373
13374 13374 static void
13375 13375 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp)
13376 13376 {
13377 13377 ASSERT(un != NULL);
13378 13378 ASSERT(bp != NULL);
13379 13379 ASSERT(!mutex_owned(SD_MUTEX(un)));
13380 13380 SD_NEXT_IOSTART(index, un, bp);
13381 13381 }
13382 13382
13383 13383
13384 13384 /*
13385 13385 * Function: sd_checksum_iodone
13386 13386 *
13387 13387 * Description: A stub function for a layer that's currently not used.
13388 13388 * For now just a placeholder.
13389 13389 *
13390 13390 * Context: May be called under interrupt context
13391 13391 */
13392 13392
13393 13393 static void
13394 13394 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp)
13395 13395 {
13396 13396 ASSERT(un != NULL);
13397 13397 ASSERT(bp != NULL);
13398 13398 ASSERT(!mutex_owned(SD_MUTEX(un)));
13399 13399 SD_NEXT_IODONE(index, un, bp);
13400 13400 }
13401 13401
13402 13402
13403 13403 /*
13404 13404 * Function: sd_checksum_uscsi_iostart
13405 13405 *
13406 13406 * Description: A stub function for a layer that's currently not used.
13407 13407 * For now just a placeholder.
13408 13408 *
13409 13409 * Context: Kernel thread context
13410 13410 */
13411 13411
13412 13412 static void
13413 13413 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp)
13414 13414 {
13415 13415 ASSERT(un != NULL);
13416 13416 ASSERT(bp != NULL);
13417 13417 ASSERT(!mutex_owned(SD_MUTEX(un)));
13418 13418 SD_NEXT_IOSTART(index, un, bp);
13419 13419 }
13420 13420
13421 13421
13422 13422 /*
13423 13423 * Function: sd_checksum_uscsi_iodone
13424 13424 *
13425 13425 * Description: A stub function for a layer that's currently not used.
13426 13426 * For now just a placeholder.
13427 13427 *
13428 13428 * Context: May be called under interrupt context
13429 13429 */
13430 13430
13431 13431 static void
13432 13432 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp)
13433 13433 {
13434 13434 ASSERT(un != NULL);
13435 13435 ASSERT(bp != NULL);
13436 13436 ASSERT(!mutex_owned(SD_MUTEX(un)));
13437 13437 SD_NEXT_IODONE(index, un, bp);
13438 13438 }
13439 13439
13440 13440
13441 13441 /*
13442 13442 * Function: sd_pm_iostart
13443 13443 *
13444 13444 * Description: iostart-side routine for Power mangement.
13445 13445 *
13446 13446 * Context: Kernel thread context
13447 13447 */
13448 13448
13449 13449 static void
13450 13450 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp)
13451 13451 {
13452 13452 ASSERT(un != NULL);
13453 13453 ASSERT(bp != NULL);
13454 13454 ASSERT(!mutex_owned(SD_MUTEX(un)));
13455 13455 ASSERT(!mutex_owned(&un->un_pm_mutex));
13456 13456
13457 13457 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n");
13458 13458
13459 13459 if (sd_pm_entry(un) != DDI_SUCCESS) {
13460 13460 /*
13461 13461 * Set up to return the failed buf back up the 'iodone'
13462 13462 * side of the calling chain.
13463 13463 */
13464 13464 bioerror(bp, EIO);
13465 13465 bp->b_resid = bp->b_bcount;
13466 13466
13467 13467 SD_BEGIN_IODONE(index, un, bp);
13468 13468
13469 13469 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n");
13470 13470 return;
13471 13471 }
13472 13472
13473 13473 SD_NEXT_IOSTART(index, un, bp);
13474 13474
13475 13475 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n");
13476 13476 }
13477 13477
13478 13478
13479 13479 /*
13480 13480 * Function: sd_pm_iodone
13481 13481 *
13482 13482 * Description: iodone-side routine for power mangement.
13483 13483 *
13484 13484 * Context: may be called from interrupt context
13485 13485 */
13486 13486
13487 13487 static void
13488 13488 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp)
13489 13489 {
13490 13490 ASSERT(un != NULL);
13491 13491 ASSERT(bp != NULL);
13492 13492 ASSERT(!mutex_owned(&un->un_pm_mutex));
13493 13493
13494 13494 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n");
13495 13495
13496 13496 /*
13497 13497 * After attach the following flag is only read, so don't
13498 13498 * take the penalty of acquiring a mutex for it.
13499 13499 */
13500 13500 if (un->un_f_pm_is_enabled == TRUE) {
13501 13501 sd_pm_exit(un);
13502 13502 }
13503 13503
13504 13504 SD_NEXT_IODONE(index, un, bp);
13505 13505
13506 13506 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n");
13507 13507 }
13508 13508
13509 13509
13510 13510 /*
13511 13511 * Function: sd_core_iostart
13512 13512 *
13513 13513 * Description: Primary driver function for enqueuing buf(9S) structs from
13514 13514 * the system and initiating IO to the target device
13515 13515 *
13516 13516 * Context: Kernel thread context. Can sleep.
13517 13517 *
13518 13518 * Assumptions: - The given xp->xb_blkno is absolute
13519 13519 * (ie, relative to the start of the device).
13520 13520 * - The IO is to be done using the native blocksize of
13521 13521 * the device, as specified in un->un_tgt_blocksize.
13522 13522 */
13523 13523 /* ARGSUSED */
13524 13524 static void
13525 13525 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp)
13526 13526 {
13527 13527 struct sd_xbuf *xp;
13528 13528
13529 13529 ASSERT(un != NULL);
13530 13530 ASSERT(bp != NULL);
13531 13531 ASSERT(!mutex_owned(SD_MUTEX(un)));
13532 13532 ASSERT(bp->b_resid == 0);
13533 13533
13534 13534 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp);
13535 13535
13536 13536 xp = SD_GET_XBUF(bp);
13537 13537 ASSERT(xp != NULL);
13538 13538
13539 13539 mutex_enter(SD_MUTEX(un));
13540 13540
13541 13541 /*
13542 13542 * If we are currently in the failfast state, fail any new IO
13543 13543 * that has B_FAILFAST set, then return.
13544 13544 */
13545 13545 if ((bp->b_flags & B_FAILFAST) &&
13546 13546 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) {
13547 13547 mutex_exit(SD_MUTEX(un));
13548 13548 bioerror(bp, EIO);
13549 13549 bp->b_resid = bp->b_bcount;
13550 13550 SD_BEGIN_IODONE(index, un, bp);
13551 13551 return;
13552 13552 }
13553 13553
13554 13554 if (SD_IS_DIRECT_PRIORITY(xp)) {
13555 13555 /*
13556 13556 * Priority command -- transport it immediately.
13557 13557 *
13558 13558 * Note: We may want to assert that USCSI_DIAGNOSE is set,
13559 13559 * because all direct priority commands should be associated
13560 13560 * with error recovery actions which we don't want to retry.
13561 13561 */
13562 13562 sd_start_cmds(un, bp);
13563 13563 } else {
13564 13564 /*
13565 13565 * Normal command -- add it to the wait queue, then start
13566 13566 * transporting commands from the wait queue.
13567 13567 */
13568 13568 sd_add_buf_to_waitq(un, bp);
13569 13569 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp);
13570 13570 sd_start_cmds(un, NULL);
13571 13571 }
13572 13572
13573 13573 mutex_exit(SD_MUTEX(un));
13574 13574
13575 13575 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp);
13576 13576 }
13577 13577
13578 13578
13579 13579 /*
13580 13580 * Function: sd_init_cdb_limits
13581 13581 *
13582 13582 * Description: This is to handle scsi_pkt initialization differences
13583 13583 * between the driver platforms.
13584 13584 *
13585 13585 * Legacy behaviors:
13586 13586 *
13587 13587 * If the block number or the sector count exceeds the
13588 13588 * capabilities of a Group 0 command, shift over to a
13589 13589 * Group 1 command. We don't blindly use Group 1
13590 13590 * commands because a) some drives (CDC Wren IVs) get a
13591 13591 * bit confused, and b) there is probably a fair amount
13592 13592 * of speed difference for a target to receive and decode
13593 13593 * a 10 byte command instead of a 6 byte command.
13594 13594 *
13595 13595 * The xfer time difference of 6 vs 10 byte CDBs is
13596 13596 * still significant so this code is still worthwhile.
13597 13597 * 10 byte CDBs are very inefficient with the fas HBA driver
13598 13598 * and older disks. Each CDB byte took 1 usec with some
13599 13599 * popular disks.
13600 13600 *
13601 13601 * Context: Must be called at attach time
13602 13602 */
13603 13603
13604 13604 static void
13605 13605 sd_init_cdb_limits(struct sd_lun *un)
13606 13606 {
13607 13607 int hba_cdb_limit;
13608 13608
13609 13609 /*
13610 13610 * Use CDB_GROUP1 commands for most devices except for
13611 13611 * parallel SCSI fixed drives in which case we get better
13612 13612 * performance using CDB_GROUP0 commands (where applicable).
13613 13613 */
13614 13614 un->un_mincdb = SD_CDB_GROUP1;
13615 13615 #if !defined(__fibre)
13616 13616 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) &&
13617 13617 !un->un_f_has_removable_media) {
13618 13618 un->un_mincdb = SD_CDB_GROUP0;
13619 13619 }
13620 13620 #endif
13621 13621
13622 13622 /*
13623 13623 * Try to read the max-cdb-length supported by HBA.
13624 13624 */
13625 13625 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1);
13626 13626 if (0 >= un->un_max_hba_cdb) {
13627 13627 un->un_max_hba_cdb = CDB_GROUP4;
13628 13628 hba_cdb_limit = SD_CDB_GROUP4;
13629 13629 } else if (0 < un->un_max_hba_cdb &&
13630 13630 un->un_max_hba_cdb < CDB_GROUP1) {
13631 13631 hba_cdb_limit = SD_CDB_GROUP0;
13632 13632 } else if (CDB_GROUP1 <= un->un_max_hba_cdb &&
13633 13633 un->un_max_hba_cdb < CDB_GROUP5) {
13634 13634 hba_cdb_limit = SD_CDB_GROUP1;
13635 13635 } else if (CDB_GROUP5 <= un->un_max_hba_cdb &&
13636 13636 un->un_max_hba_cdb < CDB_GROUP4) {
13637 13637 hba_cdb_limit = SD_CDB_GROUP5;
13638 13638 } else {
13639 13639 hba_cdb_limit = SD_CDB_GROUP4;
13640 13640 }
13641 13641
13642 13642 /*
13643 13643 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4
13644 13644 * commands for fixed disks unless we are building for a 32 bit
13645 13645 * kernel.
13646 13646 */
13647 13647 #ifdef _LP64
13648 13648 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 :
13649 13649 min(hba_cdb_limit, SD_CDB_GROUP4);
13650 13650 #else
13651 13651 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 :
13652 13652 min(hba_cdb_limit, SD_CDB_GROUP1);
13653 13653 #endif
13654 13654
13655 13655 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE)
13656 13656 ? sizeof (struct scsi_arq_status) : 1);
13657 13657 if (!ISCD(un))
13658 13658 un->un_cmd_timeout = (ushort_t)sd_io_time;
13659 13659 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout;
13660 13660 }
13661 13661
13662 13662
13663 13663 /*
13664 13664 * Function: sd_initpkt_for_buf
13665 13665 *
13666 13666 * Description: Allocate and initialize for transport a scsi_pkt struct,
13667 13667 * based upon the info specified in the given buf struct.
13668 13668 *
13669 13669 * Assumes the xb_blkno in the request is absolute (ie,
13670 13670 * relative to the start of the device (NOT partition!).
13671 13671 * Also assumes that the request is using the native block
13672 13672 * size of the device (as returned by the READ CAPACITY
13673 13673 * command).
13674 13674 *
13675 13675 * Return Code: SD_PKT_ALLOC_SUCCESS
13676 13676 * SD_PKT_ALLOC_FAILURE
13677 13677 * SD_PKT_ALLOC_FAILURE_NO_DMA
13678 13678 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL
13679 13679 *
13680 13680 * Context: Kernel thread and may be called from software interrupt context
13681 13681 * as part of a sdrunout callback. This function may not block or
13682 13682 * call routines that block
13683 13683 */
13684 13684
13685 13685 static int
13686 13686 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp)
13687 13687 {
13688 13688 struct sd_xbuf *xp;
13689 13689 struct scsi_pkt *pktp = NULL;
13690 13690 struct sd_lun *un;
13691 13691 size_t blockcount;
13692 13692 daddr_t startblock;
13693 13693 int rval;
13694 13694 int cmd_flags;
13695 13695
13696 13696 ASSERT(bp != NULL);
13697 13697 ASSERT(pktpp != NULL);
13698 13698 xp = SD_GET_XBUF(bp);
13699 13699 ASSERT(xp != NULL);
13700 13700 un = SD_GET_UN(bp);
13701 13701 ASSERT(un != NULL);
13702 13702 ASSERT(mutex_owned(SD_MUTEX(un)));
13703 13703 ASSERT(bp->b_resid == 0);
13704 13704
13705 13705 SD_TRACE(SD_LOG_IO_CORE, un,
13706 13706 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp);
13707 13707
13708 13708 mutex_exit(SD_MUTEX(un));
13709 13709
13710 13710 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
13711 13711 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) {
13712 13712 /*
13713 13713 * Already have a scsi_pkt -- just need DMA resources.
13714 13714 * We must recompute the CDB in case the mapping returns
13715 13715 * a nonzero pkt_resid.
13716 13716 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer
13717 13717 * that is being retried, the unmap/remap of the DMA resouces
13718 13718 * will result in the entire transfer starting over again
13719 13719 * from the very first block.
13720 13720 */
13721 13721 ASSERT(xp->xb_pktp != NULL);
13722 13722 pktp = xp->xb_pktp;
13723 13723 } else {
13724 13724 pktp = NULL;
13725 13725 }
13726 13726 #endif /* __i386 || __amd64 */
13727 13727
13728 13728 startblock = xp->xb_blkno; /* Absolute block num. */
13729 13729 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount);
13730 13730
13731 13731 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK);
13732 13732
13733 13733 /*
13734 13734 * sd_setup_rw_pkt will determine the appropriate CDB group to use,
13735 13735 * call scsi_init_pkt, and build the CDB.
13736 13736 */
13737 13737 rval = sd_setup_rw_pkt(un, &pktp, bp,
13738 13738 cmd_flags, sdrunout, (caddr_t)un,
13739 13739 startblock, blockcount);
13740 13740
13741 13741 if (rval == 0) {
13742 13742 /*
13743 13743 * Success.
13744 13744 *
13745 13745 * If partial DMA is being used and required for this transfer.
13746 13746 * set it up here.
13747 13747 */
13748 13748 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 &&
13749 13749 (pktp->pkt_resid != 0)) {
13750 13750
13751 13751 /*
13752 13752 * Save the CDB length and pkt_resid for the
13753 13753 * next xfer
13754 13754 */
13755 13755 xp->xb_dma_resid = pktp->pkt_resid;
13756 13756
13757 13757 /* rezero resid */
13758 13758 pktp->pkt_resid = 0;
13759 13759
13760 13760 } else {
13761 13761 xp->xb_dma_resid = 0;
13762 13762 }
13763 13763
13764 13764 pktp->pkt_flags = un->un_tagflags;
13765 13765 pktp->pkt_time = un->un_cmd_timeout;
13766 13766 pktp->pkt_comp = sdintr;
13767 13767
13768 13768 pktp->pkt_private = bp;
13769 13769 *pktpp = pktp;
13770 13770
13771 13771 SD_TRACE(SD_LOG_IO_CORE, un,
13772 13772 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp);
13773 13773
13774 13774 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
13775 13775 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED;
13776 13776 #endif
13777 13777
13778 13778 mutex_enter(SD_MUTEX(un));
13779 13779 return (SD_PKT_ALLOC_SUCCESS);
13780 13780
13781 13781 }
13782 13782
13783 13783 /*
13784 13784 * SD_PKT_ALLOC_FAILURE is the only expected failure code
13785 13785 * from sd_setup_rw_pkt.
13786 13786 */
13787 13787 ASSERT(rval == SD_PKT_ALLOC_FAILURE);
13788 13788
13789 13789 if (rval == SD_PKT_ALLOC_FAILURE) {
13790 13790 *pktpp = NULL;
13791 13791 /*
13792 13792 * Set the driver state to RWAIT to indicate the driver
13793 13793 * is waiting on resource allocations. The driver will not
13794 13794 * suspend, pm_suspend, or detatch while the state is RWAIT.
13795 13795 */
13796 13796 mutex_enter(SD_MUTEX(un));
13797 13797 New_state(un, SD_STATE_RWAIT);
13798 13798
13799 13799 SD_ERROR(SD_LOG_IO_CORE, un,
13800 13800 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp);
13801 13801
13802 13802 if ((bp->b_flags & B_ERROR) != 0) {
13803 13803 return (SD_PKT_ALLOC_FAILURE_NO_DMA);
13804 13804 }
13805 13805 return (SD_PKT_ALLOC_FAILURE);
13806 13806 } else {
13807 13807 /*
13808 13808 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL
13809 13809 *
13810 13810 * This should never happen. Maybe someone messed with the
13811 13811 * kernel's minphys?
13812 13812 */
13813 13813 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
13814 13814 "Request rejected: too large for CDB: "
13815 13815 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount);
13816 13816 SD_ERROR(SD_LOG_IO_CORE, un,
13817 13817 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp);
13818 13818 mutex_enter(SD_MUTEX(un));
13819 13819 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
13820 13820
13821 13821 }
13822 13822 }
13823 13823
13824 13824
13825 13825 /*
13826 13826 * Function: sd_destroypkt_for_buf
13827 13827 *
13828 13828 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing).
13829 13829 *
13830 13830 * Context: Kernel thread or interrupt context
13831 13831 */
13832 13832
13833 13833 static void
13834 13834 sd_destroypkt_for_buf(struct buf *bp)
13835 13835 {
13836 13836 ASSERT(bp != NULL);
13837 13837 ASSERT(SD_GET_UN(bp) != NULL);
13838 13838
13839 13839 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp),
13840 13840 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp);
13841 13841
13842 13842 ASSERT(SD_GET_PKTP(bp) != NULL);
13843 13843 scsi_destroy_pkt(SD_GET_PKTP(bp));
13844 13844
13845 13845 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp),
13846 13846 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp);
13847 13847 }
13848 13848
13849 13849 /*
13850 13850 * Function: sd_setup_rw_pkt
13851 13851 *
13852 13852 * Description: Determines appropriate CDB group for the requested LBA
13853 13853 * and transfer length, calls scsi_init_pkt, and builds
13854 13854 * the CDB. Do not use for partial DMA transfers except
13855 13855 * for the initial transfer since the CDB size must
13856 13856 * remain constant.
13857 13857 *
13858 13858 * Context: Kernel thread and may be called from software interrupt
13859 13859 * context as part of a sdrunout callback. This function may not
13860 13860 * block or call routines that block
13861 13861 */
13862 13862
13863 13863
13864 13864 int
13865 13865 sd_setup_rw_pkt(struct sd_lun *un,
13866 13866 struct scsi_pkt **pktpp, struct buf *bp, int flags,
13867 13867 int (*callback)(caddr_t), caddr_t callback_arg,
13868 13868 diskaddr_t lba, uint32_t blockcount)
13869 13869 {
13870 13870 struct scsi_pkt *return_pktp;
13871 13871 union scsi_cdb *cdbp;
13872 13872 struct sd_cdbinfo *cp = NULL;
13873 13873 int i;
13874 13874
13875 13875 /*
13876 13876 * See which size CDB to use, based upon the request.
13877 13877 */
13878 13878 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) {
13879 13879
13880 13880 /*
13881 13881 * Check lba and block count against sd_cdbtab limits.
13882 13882 * In the partial DMA case, we have to use the same size
13883 13883 * CDB for all the transfers. Check lba + blockcount
13884 13884 * against the max LBA so we know that segment of the
13885 13885 * transfer can use the CDB we select.
13886 13886 */
13887 13887 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) &&
13888 13888 (blockcount <= sd_cdbtab[i].sc_maxlen)) {
13889 13889
13890 13890 /*
13891 13891 * The command will fit into the CDB type
13892 13892 * specified by sd_cdbtab[i].
13893 13893 */
13894 13894 cp = sd_cdbtab + i;
13895 13895
13896 13896 /*
13897 13897 * Call scsi_init_pkt so we can fill in the
13898 13898 * CDB.
13899 13899 */
13900 13900 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp,
13901 13901 bp, cp->sc_grpcode, un->un_status_len, 0,
13902 13902 flags, callback, callback_arg);
13903 13903
13904 13904 if (return_pktp != NULL) {
13905 13905
13906 13906 /*
13907 13907 * Return new value of pkt
13908 13908 */
13909 13909 *pktpp = return_pktp;
13910 13910
13911 13911 /*
13912 13912 * To be safe, zero the CDB insuring there is
13913 13913 * no leftover data from a previous command.
13914 13914 */
13915 13915 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode);
13916 13916
13917 13917 /*
13918 13918 * Handle partial DMA mapping
13919 13919 */
13920 13920 if (return_pktp->pkt_resid != 0) {
13921 13921
13922 13922 /*
13923 13923 * Not going to xfer as many blocks as
13924 13924 * originally expected
13925 13925 */
13926 13926 blockcount -=
13927 13927 SD_BYTES2TGTBLOCKS(un,
13928 13928 return_pktp->pkt_resid);
13929 13929 }
13930 13930
13931 13931 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp;
13932 13932
13933 13933 /*
13934 13934 * Set command byte based on the CDB
13935 13935 * type we matched.
13936 13936 */
13937 13937 cdbp->scc_cmd = cp->sc_grpmask |
13938 13938 ((bp->b_flags & B_READ) ?
13939 13939 SCMD_READ : SCMD_WRITE);
13940 13940
13941 13941 SD_FILL_SCSI1_LUN(un, return_pktp);
13942 13942
13943 13943 /*
13944 13944 * Fill in LBA and length
13945 13945 */
13946 13946 ASSERT((cp->sc_grpcode == CDB_GROUP1) ||
13947 13947 (cp->sc_grpcode == CDB_GROUP4) ||
13948 13948 (cp->sc_grpcode == CDB_GROUP0) ||
13949 13949 (cp->sc_grpcode == CDB_GROUP5));
13950 13950
13951 13951 if (cp->sc_grpcode == CDB_GROUP1) {
13952 13952 FORMG1ADDR(cdbp, lba);
13953 13953 FORMG1COUNT(cdbp, blockcount);
13954 13954 return (0);
13955 13955 } else if (cp->sc_grpcode == CDB_GROUP4) {
13956 13956 FORMG4LONGADDR(cdbp, lba);
13957 13957 FORMG4COUNT(cdbp, blockcount);
13958 13958 return (0);
13959 13959 } else if (cp->sc_grpcode == CDB_GROUP0) {
13960 13960 FORMG0ADDR(cdbp, lba);
13961 13961 FORMG0COUNT(cdbp, blockcount);
13962 13962 return (0);
13963 13963 } else if (cp->sc_grpcode == CDB_GROUP5) {
13964 13964 FORMG5ADDR(cdbp, lba);
13965 13965 FORMG5COUNT(cdbp, blockcount);
13966 13966 return (0);
13967 13967 }
13968 13968
13969 13969 /*
13970 13970 * It should be impossible to not match one
13971 13971 * of the CDB types above, so we should never
13972 13972 * reach this point. Set the CDB command byte
13973 13973 * to test-unit-ready to avoid writing
13974 13974 * to somewhere we don't intend.
13975 13975 */
13976 13976 cdbp->scc_cmd = SCMD_TEST_UNIT_READY;
13977 13977 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
13978 13978 } else {
13979 13979 /*
13980 13980 * Couldn't get scsi_pkt
13981 13981 */
13982 13982 return (SD_PKT_ALLOC_FAILURE);
13983 13983 }
13984 13984 }
13985 13985 }
13986 13986
13987 13987 /*
13988 13988 * None of the available CDB types were suitable. This really
13989 13989 * should never happen: on a 64 bit system we support
13990 13990 * READ16/WRITE16 which will hold an entire 64 bit disk address
13991 13991 * and on a 32 bit system we will refuse to bind to a device
13992 13992 * larger than 2TB so addresses will never be larger than 32 bits.
13993 13993 */
13994 13994 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
13995 13995 }
13996 13996
13997 13997 /*
13998 13998 * Function: sd_setup_next_rw_pkt
13999 13999 *
14000 14000 * Description: Setup packet for partial DMA transfers, except for the
14001 14001 * initial transfer. sd_setup_rw_pkt should be used for
14002 14002 * the initial transfer.
14003 14003 *
14004 14004 * Context: Kernel thread and may be called from interrupt context.
14005 14005 */
14006 14006
14007 14007 int
14008 14008 sd_setup_next_rw_pkt(struct sd_lun *un,
14009 14009 struct scsi_pkt *pktp, struct buf *bp,
14010 14010 diskaddr_t lba, uint32_t blockcount)
14011 14011 {
14012 14012 uchar_t com;
14013 14013 union scsi_cdb *cdbp;
14014 14014 uchar_t cdb_group_id;
14015 14015
14016 14016 ASSERT(pktp != NULL);
14017 14017 ASSERT(pktp->pkt_cdbp != NULL);
14018 14018
14019 14019 cdbp = (union scsi_cdb *)pktp->pkt_cdbp;
14020 14020 com = cdbp->scc_cmd;
14021 14021 cdb_group_id = CDB_GROUPID(com);
14022 14022
14023 14023 ASSERT((cdb_group_id == CDB_GROUPID_0) ||
14024 14024 (cdb_group_id == CDB_GROUPID_1) ||
14025 14025 (cdb_group_id == CDB_GROUPID_4) ||
14026 14026 (cdb_group_id == CDB_GROUPID_5));
14027 14027
14028 14028 /*
14029 14029 * Move pkt to the next portion of the xfer.
14030 14030 * func is NULL_FUNC so we do not have to release
14031 14031 * the disk mutex here.
14032 14032 */
14033 14033 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0,
14034 14034 NULL_FUNC, NULL) == pktp) {
14035 14035 /* Success. Handle partial DMA */
14036 14036 if (pktp->pkt_resid != 0) {
14037 14037 blockcount -=
14038 14038 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid);
14039 14039 }
14040 14040
14041 14041 cdbp->scc_cmd = com;
14042 14042 SD_FILL_SCSI1_LUN(un, pktp);
14043 14043 if (cdb_group_id == CDB_GROUPID_1) {
14044 14044 FORMG1ADDR(cdbp, lba);
14045 14045 FORMG1COUNT(cdbp, blockcount);
14046 14046 return (0);
14047 14047 } else if (cdb_group_id == CDB_GROUPID_4) {
14048 14048 FORMG4LONGADDR(cdbp, lba);
14049 14049 FORMG4COUNT(cdbp, blockcount);
14050 14050 return (0);
14051 14051 } else if (cdb_group_id == CDB_GROUPID_0) {
14052 14052 FORMG0ADDR(cdbp, lba);
14053 14053 FORMG0COUNT(cdbp, blockcount);
14054 14054 return (0);
14055 14055 } else if (cdb_group_id == CDB_GROUPID_5) {
14056 14056 FORMG5ADDR(cdbp, lba);
14057 14057 FORMG5COUNT(cdbp, blockcount);
14058 14058 return (0);
14059 14059 }
14060 14060
14061 14061 /* Unreachable */
14062 14062 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
14063 14063 }
14064 14064
14065 14065 /*
14066 14066 * Error setting up next portion of cmd transfer.
14067 14067 * Something is definitely very wrong and this
14068 14068 * should not happen.
14069 14069 */
14070 14070 return (SD_PKT_ALLOC_FAILURE);
14071 14071 }
14072 14072
14073 14073 /*
14074 14074 * Function: sd_initpkt_for_uscsi
14075 14075 *
14076 14076 * Description: Allocate and initialize for transport a scsi_pkt struct,
14077 14077 * based upon the info specified in the given uscsi_cmd struct.
14078 14078 *
14079 14079 * Return Code: SD_PKT_ALLOC_SUCCESS
14080 14080 * SD_PKT_ALLOC_FAILURE
14081 14081 * SD_PKT_ALLOC_FAILURE_NO_DMA
14082 14082 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL
14083 14083 *
14084 14084 * Context: Kernel thread and may be called from software interrupt context
14085 14085 * as part of a sdrunout callback. This function may not block or
14086 14086 * call routines that block
14087 14087 */
14088 14088
14089 14089 static int
14090 14090 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp)
14091 14091 {
14092 14092 struct uscsi_cmd *uscmd;
14093 14093 struct sd_xbuf *xp;
14094 14094 struct scsi_pkt *pktp;
14095 14095 struct sd_lun *un;
14096 14096 uint32_t flags = 0;
14097 14097
14098 14098 ASSERT(bp != NULL);
14099 14099 ASSERT(pktpp != NULL);
14100 14100 xp = SD_GET_XBUF(bp);
14101 14101 ASSERT(xp != NULL);
14102 14102 un = SD_GET_UN(bp);
14103 14103 ASSERT(un != NULL);
14104 14104 ASSERT(mutex_owned(SD_MUTEX(un)));
14105 14105
14106 14106 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */
14107 14107 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo;
14108 14108 ASSERT(uscmd != NULL);
14109 14109
14110 14110 SD_TRACE(SD_LOG_IO_CORE, un,
14111 14111 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp);
14112 14112
14113 14113 /*
14114 14114 * Allocate the scsi_pkt for the command.
14115 14115 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path
14116 14116 * during scsi_init_pkt time and will continue to use the
14117 14117 * same path as long as the same scsi_pkt is used without
14118 14118 * intervening scsi_dma_free(). Since uscsi command does
14119 14119 * not call scsi_dmafree() before retry failed command, it
14120 14120 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT
14121 14121 * set such that scsi_vhci can use other available path for
14122 14122 * retry. Besides, ucsci command does not allow DMA breakup,
14123 14123 * so there is no need to set PKT_DMA_PARTIAL flag.
14124 14124 */
14125 14125 if (uscmd->uscsi_rqlen > SENSE_LENGTH) {
14126 14126 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL,
14127 14127 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen,
14128 14128 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status)
14129 14129 - sizeof (struct scsi_extended_sense)), 0,
14130 14130 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ,
14131 14131 sdrunout, (caddr_t)un);
14132 14132 } else {
14133 14133 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL,
14134 14134 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen,
14135 14135 sizeof (struct scsi_arq_status), 0,
14136 14136 (un->un_pkt_flags & ~PKT_DMA_PARTIAL),
14137 14137 sdrunout, (caddr_t)un);
14138 14138 }
14139 14139
14140 14140 if (pktp == NULL) {
14141 14141 *pktpp = NULL;
14142 14142 /*
14143 14143 * Set the driver state to RWAIT to indicate the driver
14144 14144 * is waiting on resource allocations. The driver will not
14145 14145 * suspend, pm_suspend, or detatch while the state is RWAIT.
14146 14146 */
14147 14147 New_state(un, SD_STATE_RWAIT);
14148 14148
14149 14149 SD_ERROR(SD_LOG_IO_CORE, un,
14150 14150 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp);
14151 14151
14152 14152 if ((bp->b_flags & B_ERROR) != 0) {
14153 14153 return (SD_PKT_ALLOC_FAILURE_NO_DMA);
14154 14154 }
14155 14155 return (SD_PKT_ALLOC_FAILURE);
14156 14156 }
14157 14157
14158 14158 /*
14159 14159 * We do not do DMA breakup for USCSI commands, so return failure
14160 14160 * here if all the needed DMA resources were not allocated.
14161 14161 */
14162 14162 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) &&
14163 14163 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) {
14164 14164 scsi_destroy_pkt(pktp);
14165 14165 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: "
14166 14166 "No partial DMA for USCSI. exit: buf:0x%p\n", bp);
14167 14167 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL);
14168 14168 }
14169 14169
14170 14170 /* Init the cdb from the given uscsi struct */
14171 14171 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp,
14172 14172 uscmd->uscsi_cdb[0], 0, 0, 0);
14173 14173
14174 14174 SD_FILL_SCSI1_LUN(un, pktp);
14175 14175
14176 14176 /*
14177 14177 * Set up the optional USCSI flags. See the uscsi (7I) man page
14178 14178 * for listing of the supported flags.
14179 14179 */
14180 14180
14181 14181 if (uscmd->uscsi_flags & USCSI_SILENT) {
14182 14182 flags |= FLAG_SILENT;
14183 14183 }
14184 14184
14185 14185 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) {
14186 14186 flags |= FLAG_DIAGNOSE;
14187 14187 }
14188 14188
14189 14189 if (uscmd->uscsi_flags & USCSI_ISOLATE) {
14190 14190 flags |= FLAG_ISOLATE;
14191 14191 }
14192 14192
14193 14193 if (un->un_f_is_fibre == FALSE) {
14194 14194 if (uscmd->uscsi_flags & USCSI_RENEGOT) {
14195 14195 flags |= FLAG_RENEGOTIATE_WIDE_SYNC;
14196 14196 }
14197 14197 }
14198 14198
14199 14199 /*
14200 14200 * Set the pkt flags here so we save time later.
14201 14201 * Note: These flags are NOT in the uscsi man page!!!
14202 14202 */
14203 14203 if (uscmd->uscsi_flags & USCSI_HEAD) {
14204 14204 flags |= FLAG_HEAD;
14205 14205 }
14206 14206
14207 14207 if (uscmd->uscsi_flags & USCSI_NOINTR) {
14208 14208 flags |= FLAG_NOINTR;
14209 14209 }
14210 14210
14211 14211 /*
14212 14212 * For tagged queueing, things get a bit complicated.
14213 14213 * Check first for head of queue and last for ordered queue.
14214 14214 * If neither head nor order, use the default driver tag flags.
14215 14215 */
14216 14216 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) {
14217 14217 if (uscmd->uscsi_flags & USCSI_HTAG) {
14218 14218 flags |= FLAG_HTAG;
14219 14219 } else if (uscmd->uscsi_flags & USCSI_OTAG) {
14220 14220 flags |= FLAG_OTAG;
14221 14221 } else {
14222 14222 flags |= un->un_tagflags & FLAG_TAGMASK;
14223 14223 }
14224 14224 }
14225 14225
14226 14226 if (uscmd->uscsi_flags & USCSI_NODISCON) {
14227 14227 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON;
14228 14228 }
14229 14229
14230 14230 pktp->pkt_flags = flags;
14231 14231
14232 14232 /* Transfer uscsi information to scsi_pkt */
14233 14233 (void) scsi_uscsi_pktinit(uscmd, pktp);
14234 14234
14235 14235 /* Copy the caller's CDB into the pkt... */
14236 14236 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen);
14237 14237
14238 14238 if (uscmd->uscsi_timeout == 0) {
14239 14239 pktp->pkt_time = un->un_uscsi_timeout;
14240 14240 } else {
14241 14241 pktp->pkt_time = uscmd->uscsi_timeout;
14242 14242 }
14243 14243
14244 14244 /* need it later to identify USCSI request in sdintr */
14245 14245 xp->xb_pkt_flags |= SD_XB_USCSICMD;
14246 14246
14247 14247 xp->xb_sense_resid = uscmd->uscsi_rqresid;
14248 14248
14249 14249 pktp->pkt_private = bp;
14250 14250 pktp->pkt_comp = sdintr;
14251 14251 *pktpp = pktp;
14252 14252
14253 14253 SD_TRACE(SD_LOG_IO_CORE, un,
14254 14254 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp);
14255 14255
14256 14256 return (SD_PKT_ALLOC_SUCCESS);
14257 14257 }
14258 14258
14259 14259
14260 14260 /*
14261 14261 * Function: sd_destroypkt_for_uscsi
14262 14262 *
14263 14263 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi
14264 14264 * IOs.. Also saves relevant info into the associated uscsi_cmd
14265 14265 * struct.
14266 14266 *
14267 14267 * Context: May be called under interrupt context
14268 14268 */
14269 14269
14270 14270 static void
14271 14271 sd_destroypkt_for_uscsi(struct buf *bp)
14272 14272 {
14273 14273 struct uscsi_cmd *uscmd;
14274 14274 struct sd_xbuf *xp;
14275 14275 struct scsi_pkt *pktp;
14276 14276 struct sd_lun *un;
14277 14277 struct sd_uscsi_info *suip;
14278 14278
14279 14279 ASSERT(bp != NULL);
14280 14280 xp = SD_GET_XBUF(bp);
14281 14281 ASSERT(xp != NULL);
14282 14282 un = SD_GET_UN(bp);
14283 14283 ASSERT(un != NULL);
14284 14284 ASSERT(!mutex_owned(SD_MUTEX(un)));
14285 14285 pktp = SD_GET_PKTP(bp);
14286 14286 ASSERT(pktp != NULL);
14287 14287
14288 14288 SD_TRACE(SD_LOG_IO_CORE, un,
14289 14289 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp);
14290 14290
14291 14291 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */
14292 14292 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo;
14293 14293 ASSERT(uscmd != NULL);
14294 14294
14295 14295 /* Save the status and the residual into the uscsi_cmd struct */
14296 14296 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK);
14297 14297 uscmd->uscsi_resid = bp->b_resid;
14298 14298
14299 14299 /* Transfer scsi_pkt information to uscsi */
14300 14300 (void) scsi_uscsi_pktfini(pktp, uscmd);
14301 14301
14302 14302 /*
14303 14303 * If enabled, copy any saved sense data into the area specified
14304 14304 * by the uscsi command.
14305 14305 */
14306 14306 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) &&
14307 14307 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) {
14308 14308 /*
14309 14309 * Note: uscmd->uscsi_rqbuf should always point to a buffer
14310 14310 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd())
14311 14311 */
14312 14312 uscmd->uscsi_rqstatus = xp->xb_sense_status;
14313 14313 uscmd->uscsi_rqresid = xp->xb_sense_resid;
14314 14314 if (uscmd->uscsi_rqlen > SENSE_LENGTH) {
14315 14315 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf,
14316 14316 MAX_SENSE_LENGTH);
14317 14317 } else {
14318 14318 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf,
14319 14319 SENSE_LENGTH);
14320 14320 }
14321 14321 }
14322 14322 /*
14323 14323 * The following assignments are for SCSI FMA.
14324 14324 */
14325 14325 ASSERT(xp->xb_private != NULL);
14326 14326 suip = (struct sd_uscsi_info *)xp->xb_private;
14327 14327 suip->ui_pkt_reason = pktp->pkt_reason;
14328 14328 suip->ui_pkt_state = pktp->pkt_state;
14329 14329 suip->ui_pkt_statistics = pktp->pkt_statistics;
14330 14330 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp);
14331 14331
14332 14332 /* We are done with the scsi_pkt; free it now */
14333 14333 ASSERT(SD_GET_PKTP(bp) != NULL);
14334 14334 scsi_destroy_pkt(SD_GET_PKTP(bp));
14335 14335
14336 14336 SD_TRACE(SD_LOG_IO_CORE, un,
14337 14337 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp);
14338 14338 }
14339 14339
14340 14340
14341 14341 /*
14342 14342 * Function: sd_bioclone_alloc
14343 14343 *
14344 14344 * Description: Allocate a buf(9S) and init it as per the given buf
14345 14345 * and the various arguments. The associated sd_xbuf
14346 14346 * struct is (nearly) duplicated. The struct buf *bp
14347 14347 * argument is saved in new_xp->xb_private.
14348 14348 *
14349 14349 * Arguments: bp - ptr the the buf(9S) to be "shadowed"
14350 14350 * datalen - size of data area for the shadow bp
14351 14351 * blkno - starting LBA
14352 14352 * func - function pointer for b_iodone in the shadow buf. (May
14353 14353 * be NULL if none.)
14354 14354 *
14355 14355 * Return Code: Pointer to allocates buf(9S) struct
14356 14356 *
14357 14357 * Context: Can sleep.
14358 14358 */
14359 14359
14360 14360 static struct buf *
14361 14361 sd_bioclone_alloc(struct buf *bp, size_t datalen,
14362 14362 daddr_t blkno, int (*func)(struct buf *))
14363 14363 {
14364 14364 struct sd_lun *un;
14365 14365 struct sd_xbuf *xp;
14366 14366 struct sd_xbuf *new_xp;
14367 14367 struct buf *new_bp;
14368 14368
14369 14369 ASSERT(bp != NULL);
14370 14370 xp = SD_GET_XBUF(bp);
14371 14371 ASSERT(xp != NULL);
14372 14372 un = SD_GET_UN(bp);
14373 14373 ASSERT(un != NULL);
14374 14374 ASSERT(!mutex_owned(SD_MUTEX(un)));
14375 14375
14376 14376 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func,
14377 14377 NULL, KM_SLEEP);
14378 14378
14379 14379 new_bp->b_lblkno = blkno;
14380 14380
14381 14381 /*
14382 14382 * Allocate an xbuf for the shadow bp and copy the contents of the
14383 14383 * original xbuf into it.
14384 14384 */
14385 14385 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP);
14386 14386 bcopy(xp, new_xp, sizeof (struct sd_xbuf));
14387 14387
14388 14388 /*
14389 14389 * The given bp is automatically saved in the xb_private member
14390 14390 * of the new xbuf. Callers are allowed to depend on this.
14391 14391 */
14392 14392 new_xp->xb_private = bp;
14393 14393
14394 14394 new_bp->b_private = new_xp;
14395 14395
14396 14396 return (new_bp);
14397 14397 }
14398 14398
14399 14399 /*
14400 14400 * Function: sd_shadow_buf_alloc
14401 14401 *
14402 14402 * Description: Allocate a buf(9S) and init it as per the given buf
14403 14403 * and the various arguments. The associated sd_xbuf
14404 14404 * struct is (nearly) duplicated. The struct buf *bp
14405 14405 * argument is saved in new_xp->xb_private.
14406 14406 *
14407 14407 * Arguments: bp - ptr the the buf(9S) to be "shadowed"
14408 14408 * datalen - size of data area for the shadow bp
14409 14409 * bflags - B_READ or B_WRITE (pseudo flag)
14410 14410 * blkno - starting LBA
14411 14411 * func - function pointer for b_iodone in the shadow buf. (May
14412 14412 * be NULL if none.)
14413 14413 *
14414 14414 * Return Code: Pointer to allocates buf(9S) struct
14415 14415 *
14416 14416 * Context: Can sleep.
14417 14417 */
14418 14418
14419 14419 static struct buf *
14420 14420 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags,
14421 14421 daddr_t blkno, int (*func)(struct buf *))
14422 14422 {
14423 14423 struct sd_lun *un;
14424 14424 struct sd_xbuf *xp;
14425 14425 struct sd_xbuf *new_xp;
14426 14426 struct buf *new_bp;
14427 14427
14428 14428 ASSERT(bp != NULL);
14429 14429 xp = SD_GET_XBUF(bp);
14430 14430 ASSERT(xp != NULL);
14431 14431 un = SD_GET_UN(bp);
14432 14432 ASSERT(un != NULL);
14433 14433 ASSERT(!mutex_owned(SD_MUTEX(un)));
14434 14434
14435 14435 if (bp->b_flags & (B_PAGEIO | B_PHYS)) {
14436 14436 bp_mapin(bp);
14437 14437 }
14438 14438
14439 14439 bflags &= (B_READ | B_WRITE);
14440 14440 #if defined(__i386) || defined(__amd64)
14441 14441 new_bp = getrbuf(KM_SLEEP);
14442 14442 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP);
14443 14443 new_bp->b_bcount = datalen;
14444 14444 new_bp->b_flags = bflags |
14445 14445 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW));
14446 14446 #else
14447 14447 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL,
14448 14448 datalen, bflags, SLEEP_FUNC, NULL);
14449 14449 #endif
14450 14450 new_bp->av_forw = NULL;
14451 14451 new_bp->av_back = NULL;
14452 14452 new_bp->b_dev = bp->b_dev;
14453 14453 new_bp->b_blkno = blkno;
14454 14454 new_bp->b_iodone = func;
14455 14455 new_bp->b_edev = bp->b_edev;
14456 14456 new_bp->b_resid = 0;
14457 14457
14458 14458 /* We need to preserve the B_FAILFAST flag */
14459 14459 if (bp->b_flags & B_FAILFAST) {
14460 14460 new_bp->b_flags |= B_FAILFAST;
14461 14461 }
14462 14462
14463 14463 /*
14464 14464 * Allocate an xbuf for the shadow bp and copy the contents of the
14465 14465 * original xbuf into it.
14466 14466 */
14467 14467 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP);
14468 14468 bcopy(xp, new_xp, sizeof (struct sd_xbuf));
14469 14469
14470 14470 /* Need later to copy data between the shadow buf & original buf! */
14471 14471 new_xp->xb_pkt_flags |= PKT_CONSISTENT;
14472 14472
14473 14473 /*
14474 14474 * The given bp is automatically saved in the xb_private member
14475 14475 * of the new xbuf. Callers are allowed to depend on this.
14476 14476 */
14477 14477 new_xp->xb_private = bp;
14478 14478
14479 14479 new_bp->b_private = new_xp;
14480 14480
14481 14481 return (new_bp);
14482 14482 }
14483 14483
14484 14484 /*
14485 14485 * Function: sd_bioclone_free
14486 14486 *
14487 14487 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations
14488 14488 * in the larger than partition operation.
14489 14489 *
14490 14490 * Context: May be called under interrupt context
14491 14491 */
14492 14492
14493 14493 static void
14494 14494 sd_bioclone_free(struct buf *bp)
14495 14495 {
14496 14496 struct sd_xbuf *xp;
14497 14497
14498 14498 ASSERT(bp != NULL);
14499 14499 xp = SD_GET_XBUF(bp);
14500 14500 ASSERT(xp != NULL);
14501 14501
14502 14502 /*
14503 14503 * Call bp_mapout() before freeing the buf, in case a lower
14504 14504 * layer or HBA had done a bp_mapin(). we must do this here
14505 14505 * as we are the "originator" of the shadow buf.
14506 14506 */
14507 14507 bp_mapout(bp);
14508 14508
14509 14509 /*
14510 14510 * Null out b_iodone before freeing the bp, to ensure that the driver
14511 14511 * never gets confused by a stale value in this field. (Just a little
14512 14512 * extra defensiveness here.)
14513 14513 */
14514 14514 bp->b_iodone = NULL;
14515 14515
14516 14516 freerbuf(bp);
14517 14517
14518 14518 kmem_free(xp, sizeof (struct sd_xbuf));
14519 14519 }
14520 14520
14521 14521 /*
14522 14522 * Function: sd_shadow_buf_free
14523 14523 *
14524 14524 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations.
14525 14525 *
14526 14526 * Context: May be called under interrupt context
14527 14527 */
14528 14528
14529 14529 static void
14530 14530 sd_shadow_buf_free(struct buf *bp)
14531 14531 {
14532 14532 struct sd_xbuf *xp;
14533 14533
14534 14534 ASSERT(bp != NULL);
14535 14535 xp = SD_GET_XBUF(bp);
14536 14536 ASSERT(xp != NULL);
14537 14537
14538 14538 #if defined(__sparc)
14539 14539 /*
14540 14540 * Call bp_mapout() before freeing the buf, in case a lower
14541 14541 * layer or HBA had done a bp_mapin(). we must do this here
14542 14542 * as we are the "originator" of the shadow buf.
14543 14543 */
14544 14544 bp_mapout(bp);
14545 14545 #endif
14546 14546
14547 14547 /*
14548 14548 * Null out b_iodone before freeing the bp, to ensure that the driver
14549 14549 * never gets confused by a stale value in this field. (Just a little
14550 14550 * extra defensiveness here.)
14551 14551 */
14552 14552 bp->b_iodone = NULL;
14553 14553
14554 14554 #if defined(__i386) || defined(__amd64)
14555 14555 kmem_free(bp->b_un.b_addr, bp->b_bcount);
14556 14556 freerbuf(bp);
14557 14557 #else
14558 14558 scsi_free_consistent_buf(bp);
14559 14559 #endif
14560 14560
14561 14561 kmem_free(xp, sizeof (struct sd_xbuf));
14562 14562 }
14563 14563
14564 14564
14565 14565 /*
14566 14566 * Function: sd_print_transport_rejected_message
14567 14567 *
14568 14568 * Description: This implements the ludicrously complex rules for printing
14569 14569 * a "transport rejected" message. This is to address the
14570 14570 * specific problem of having a flood of this error message
14571 14571 * produced when a failover occurs.
14572 14572 *
14573 14573 * Context: Any.
14574 14574 */
14575 14575
14576 14576 static void
14577 14577 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp,
14578 14578 int code)
14579 14579 {
14580 14580 ASSERT(un != NULL);
14581 14581 ASSERT(mutex_owned(SD_MUTEX(un)));
14582 14582 ASSERT(xp != NULL);
14583 14583
14584 14584 /*
14585 14585 * Print the "transport rejected" message under the following
14586 14586 * conditions:
14587 14587 *
14588 14588 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set
14589 14589 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR.
14590 14590 * - If the error code IS a TRAN_FATAL_ERROR, then the message is
14591 14591 * printed the FIRST time a TRAN_FATAL_ERROR is returned from
14592 14592 * scsi_transport(9F) (which indicates that the target might have
14593 14593 * gone off-line). This uses the un->un_tran_fatal_count
14594 14594 * count, which is incremented whenever a TRAN_FATAL_ERROR is
14595 14595 * received, and reset to zero whenver a TRAN_ACCEPT is returned
14596 14596 * from scsi_transport().
14597 14597 *
14598 14598 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of
14599 14599 * the preceeding cases in order for the message to be printed.
14600 14600 */
14601 14601 if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) &&
14602 14602 (SD_FM_LOG(un) == SD_FM_LOG_NSUP)) {
14603 14603 if ((sd_level_mask & SD_LOGMASK_DIAG) ||
14604 14604 (code != TRAN_FATAL_ERROR) ||
14605 14605 (un->un_tran_fatal_count == 1)) {
14606 14606 switch (code) {
14607 14607 case TRAN_BADPKT:
14608 14608 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
14609 14609 "transport rejected bad packet\n");
14610 14610 break;
14611 14611 case TRAN_FATAL_ERROR:
14612 14612 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
14613 14613 "transport rejected fatal error\n");
14614 14614 break;
14615 14615 default:
14616 14616 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
14617 14617 "transport rejected (%d)\n", code);
14618 14618 break;
14619 14619 }
14620 14620 }
14621 14621 }
14622 14622 }
14623 14623
14624 14624
14625 14625 /*
14626 14626 * Function: sd_add_buf_to_waitq
14627 14627 *
14628 14628 * Description: Add the given buf(9S) struct to the wait queue for the
14629 14629 * instance. If sorting is enabled, then the buf is added
14630 14630 * to the queue via an elevator sort algorithm (a la
14631 14631 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key.
14632 14632 * If sorting is not enabled, then the buf is just added
14633 14633 * to the end of the wait queue.
14634 14634 *
14635 14635 * Return Code: void
14636 14636 *
14637 14637 * Context: Does not sleep/block, therefore technically can be called
14638 14638 * from any context. However if sorting is enabled then the
14639 14639 * execution time is indeterminate, and may take long if
14640 14640 * the wait queue grows large.
14641 14641 */
14642 14642
14643 14643 static void
14644 14644 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp)
14645 14645 {
14646 14646 struct buf *ap;
14647 14647
14648 14648 ASSERT(bp != NULL);
14649 14649 ASSERT(un != NULL);
14650 14650 ASSERT(mutex_owned(SD_MUTEX(un)));
14651 14651
14652 14652 /* If the queue is empty, add the buf as the only entry & return. */
14653 14653 if (un->un_waitq_headp == NULL) {
14654 14654 ASSERT(un->un_waitq_tailp == NULL);
14655 14655 un->un_waitq_headp = un->un_waitq_tailp = bp;
14656 14656 bp->av_forw = NULL;
14657 14657 return;
14658 14658 }
14659 14659
14660 14660 ASSERT(un->un_waitq_tailp != NULL);
14661 14661
14662 14662 /*
14663 14663 * If sorting is disabled, just add the buf to the tail end of
14664 14664 * the wait queue and return.
14665 14665 */
14666 14666 if (un->un_f_disksort_disabled || un->un_f_enable_rmw) {
14667 14667 un->un_waitq_tailp->av_forw = bp;
14668 14668 un->un_waitq_tailp = bp;
14669 14669 bp->av_forw = NULL;
14670 14670 return;
14671 14671 }
14672 14672
14673 14673 /*
14674 14674 * Sort thru the list of requests currently on the wait queue
14675 14675 * and add the new buf request at the appropriate position.
14676 14676 *
14677 14677 * The un->un_waitq_headp is an activity chain pointer on which
14678 14678 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The
14679 14679 * first queue holds those requests which are positioned after
14680 14680 * the current SD_GET_BLKNO() (in the first request); the second holds
14681 14681 * requests which came in after their SD_GET_BLKNO() number was passed.
14682 14682 * Thus we implement a one way scan, retracting after reaching
14683 14683 * the end of the drive to the first request on the second
14684 14684 * queue, at which time it becomes the first queue.
14685 14685 * A one-way scan is natural because of the way UNIX read-ahead
14686 14686 * blocks are allocated.
14687 14687 *
14688 14688 * If we lie after the first request, then we must locate the
14689 14689 * second request list and add ourselves to it.
14690 14690 */
14691 14691 ap = un->un_waitq_headp;
14692 14692 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) {
14693 14693 while (ap->av_forw != NULL) {
14694 14694 /*
14695 14695 * Look for an "inversion" in the (normally
14696 14696 * ascending) block numbers. This indicates
14697 14697 * the start of the second request list.
14698 14698 */
14699 14699 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) {
14700 14700 /*
14701 14701 * Search the second request list for the
14702 14702 * first request at a larger block number.
14703 14703 * We go before that; however if there is
14704 14704 * no such request, we go at the end.
14705 14705 */
14706 14706 do {
14707 14707 if (SD_GET_BLKNO(bp) <
14708 14708 SD_GET_BLKNO(ap->av_forw)) {
14709 14709 goto insert;
14710 14710 }
14711 14711 ap = ap->av_forw;
14712 14712 } while (ap->av_forw != NULL);
14713 14713 goto insert; /* after last */
14714 14714 }
14715 14715 ap = ap->av_forw;
14716 14716 }
14717 14717
14718 14718 /*
14719 14719 * No inversions... we will go after the last, and
14720 14720 * be the first request in the second request list.
14721 14721 */
14722 14722 goto insert;
14723 14723 }
14724 14724
14725 14725 /*
14726 14726 * Request is at/after the current request...
14727 14727 * sort in the first request list.
14728 14728 */
14729 14729 while (ap->av_forw != NULL) {
14730 14730 /*
14731 14731 * We want to go after the current request (1) if
14732 14732 * there is an inversion after it (i.e. it is the end
14733 14733 * of the first request list), or (2) if the next
14734 14734 * request is a larger block no. than our request.
14735 14735 */
14736 14736 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) ||
14737 14737 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) {
14738 14738 goto insert;
14739 14739 }
14740 14740 ap = ap->av_forw;
14741 14741 }
14742 14742
14743 14743 /*
14744 14744 * Neither a second list nor a larger request, therefore
14745 14745 * we go at the end of the first list (which is the same
14746 14746 * as the end of the whole schebang).
14747 14747 */
14748 14748 insert:
14749 14749 bp->av_forw = ap->av_forw;
14750 14750 ap->av_forw = bp;
14751 14751
14752 14752 /*
14753 14753 * If we inserted onto the tail end of the waitq, make sure the
14754 14754 * tail pointer is updated.
14755 14755 */
14756 14756 if (ap == un->un_waitq_tailp) {
14757 14757 un->un_waitq_tailp = bp;
14758 14758 }
14759 14759 }
14760 14760
14761 14761
14762 14762 /*
14763 14763 * Function: sd_start_cmds
14764 14764 *
14765 14765 * Description: Remove and transport cmds from the driver queues.
14766 14766 *
14767 14767 * Arguments: un - pointer to the unit (soft state) struct for the target.
14768 14768 *
14769 14769 * immed_bp - ptr to a buf to be transported immediately. Only
14770 14770 * the immed_bp is transported; bufs on the waitq are not
14771 14771 * processed and the un_retry_bp is not checked. If immed_bp is
14772 14772 * NULL, then normal queue processing is performed.
14773 14773 *
14774 14774 * Context: May be called from kernel thread context, interrupt context,
14775 14775 * or runout callback context. This function may not block or
14776 14776 * call routines that block.
14777 14777 */
14778 14778
14779 14779 static void
14780 14780 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp)
14781 14781 {
14782 14782 struct sd_xbuf *xp;
14783 14783 struct buf *bp;
14784 14784 void (*statp)(kstat_io_t *);
14785 14785 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14786 14786 void (*saved_statp)(kstat_io_t *);
14787 14787 #endif
14788 14788 int rval;
14789 14789 struct sd_fm_internal *sfip = NULL;
14790 14790
14791 14791 ASSERT(un != NULL);
14792 14792 ASSERT(mutex_owned(SD_MUTEX(un)));
14793 14793 ASSERT(un->un_ncmds_in_transport >= 0);
14794 14794 ASSERT(un->un_throttle >= 0);
14795 14795
14796 14796 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n");
14797 14797
14798 14798 do {
14799 14799 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14800 14800 saved_statp = NULL;
14801 14801 #endif
14802 14802
14803 14803 /*
14804 14804 * If we are syncing or dumping, fail the command to
14805 14805 * avoid recursively calling back into scsi_transport().
14806 14806 * The dump I/O itself uses a separate code path so this
14807 14807 * only prevents non-dump I/O from being sent while dumping.
14808 14808 * File system sync takes place before dumping begins.
14809 14809 * During panic, filesystem I/O is allowed provided
14810 14810 * un_in_callback is <= 1. This is to prevent recursion
14811 14811 * such as sd_start_cmds -> scsi_transport -> sdintr ->
14812 14812 * sd_start_cmds and so on. See panic.c for more information
14813 14813 * about the states the system can be in during panic.
14814 14814 */
14815 14815 if ((un->un_state == SD_STATE_DUMPING) ||
14816 14816 (ddi_in_panic() && (un->un_in_callback > 1))) {
14817 14817 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14818 14818 "sd_start_cmds: panicking\n");
14819 14819 goto exit;
14820 14820 }
14821 14821
14822 14822 if ((bp = immed_bp) != NULL) {
14823 14823 /*
14824 14824 * We have a bp that must be transported immediately.
14825 14825 * It's OK to transport the immed_bp here without doing
14826 14826 * the throttle limit check because the immed_bp is
14827 14827 * always used in a retry/recovery case. This means
14828 14828 * that we know we are not at the throttle limit by
14829 14829 * virtue of the fact that to get here we must have
14830 14830 * already gotten a command back via sdintr(). This also
14831 14831 * relies on (1) the command on un_retry_bp preventing
14832 14832 * further commands from the waitq from being issued;
14833 14833 * and (2) the code in sd_retry_command checking the
14834 14834 * throttle limit before issuing a delayed or immediate
14835 14835 * retry. This holds even if the throttle limit is
14836 14836 * currently ratcheted down from its maximum value.
14837 14837 */
14838 14838 statp = kstat_runq_enter;
14839 14839 if (bp == un->un_retry_bp) {
14840 14840 ASSERT((un->un_retry_statp == NULL) ||
14841 14841 (un->un_retry_statp == kstat_waitq_enter) ||
14842 14842 (un->un_retry_statp ==
14843 14843 kstat_runq_back_to_waitq));
14844 14844 /*
14845 14845 * If the waitq kstat was incremented when
14846 14846 * sd_set_retry_bp() queued this bp for a retry,
14847 14847 * then we must set up statp so that the waitq
14848 14848 * count will get decremented correctly below.
14849 14849 * Also we must clear un->un_retry_statp to
14850 14850 * ensure that we do not act on a stale value
14851 14851 * in this field.
14852 14852 */
14853 14853 if ((un->un_retry_statp == kstat_waitq_enter) ||
14854 14854 (un->un_retry_statp ==
14855 14855 kstat_runq_back_to_waitq)) {
14856 14856 statp = kstat_waitq_to_runq;
14857 14857 }
14858 14858 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14859 14859 saved_statp = un->un_retry_statp;
14860 14860 #endif
14861 14861 un->un_retry_statp = NULL;
14862 14862
14863 14863 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
14864 14864 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p "
14865 14865 "un_throttle:%d un_ncmds_in_transport:%d\n",
14866 14866 un, un->un_retry_bp, un->un_throttle,
14867 14867 un->un_ncmds_in_transport);
14868 14868 } else {
14869 14869 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: "
14870 14870 "processing priority bp:0x%p\n", bp);
14871 14871 }
14872 14872
14873 14873 } else if ((bp = un->un_waitq_headp) != NULL) {
14874 14874 /*
14875 14875 * A command on the waitq is ready to go, but do not
14876 14876 * send it if:
14877 14877 *
14878 14878 * (1) the throttle limit has been reached, or
14879 14879 * (2) a retry is pending, or
14880 14880 * (3) a START_STOP_UNIT callback pending, or
14881 14881 * (4) a callback for a SD_PATH_DIRECT_PRIORITY
14882 14882 * command is pending.
14883 14883 *
14884 14884 * For all of these conditions, IO processing will
14885 14885 * restart after the condition is cleared.
14886 14886 */
14887 14887 if (un->un_ncmds_in_transport >= un->un_throttle) {
14888 14888 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14889 14889 "sd_start_cmds: exiting, "
14890 14890 "throttle limit reached!\n");
14891 14891 goto exit;
14892 14892 }
14893 14893 if (un->un_retry_bp != NULL) {
14894 14894 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14895 14895 "sd_start_cmds: exiting, retry pending!\n");
14896 14896 goto exit;
14897 14897 }
14898 14898 if (un->un_startstop_timeid != NULL) {
14899 14899 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14900 14900 "sd_start_cmds: exiting, "
14901 14901 "START_STOP pending!\n");
14902 14902 goto exit;
14903 14903 }
14904 14904 if (un->un_direct_priority_timeid != NULL) {
14905 14905 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14906 14906 "sd_start_cmds: exiting, "
14907 14907 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n");
14908 14908 goto exit;
14909 14909 }
14910 14910
14911 14911 /* Dequeue the command */
14912 14912 un->un_waitq_headp = bp->av_forw;
14913 14913 if (un->un_waitq_headp == NULL) {
14914 14914 un->un_waitq_tailp = NULL;
14915 14915 }
14916 14916 bp->av_forw = NULL;
14917 14917 statp = kstat_waitq_to_runq;
14918 14918 SD_TRACE(SD_LOG_IO_CORE, un,
14919 14919 "sd_start_cmds: processing waitq bp:0x%p\n", bp);
14920 14920
14921 14921 } else {
14922 14922 /* No work to do so bail out now */
14923 14923 SD_TRACE(SD_LOG_IO_CORE, un,
14924 14924 "sd_start_cmds: no more work, exiting!\n");
14925 14925 goto exit;
14926 14926 }
14927 14927
14928 14928 /*
14929 14929 * Reset the state to normal. This is the mechanism by which
14930 14930 * the state transitions from either SD_STATE_RWAIT or
14931 14931 * SD_STATE_OFFLINE to SD_STATE_NORMAL.
14932 14932 * If state is SD_STATE_PM_CHANGING then this command is
14933 14933 * part of the device power control and the state must
14934 14934 * not be put back to normal. Doing so would would
14935 14935 * allow new commands to proceed when they shouldn't,
14936 14936 * the device may be going off.
14937 14937 */
14938 14938 if ((un->un_state != SD_STATE_SUSPENDED) &&
14939 14939 (un->un_state != SD_STATE_PM_CHANGING)) {
14940 14940 New_state(un, SD_STATE_NORMAL);
14941 14941 }
14942 14942
14943 14943 xp = SD_GET_XBUF(bp);
14944 14944 ASSERT(xp != NULL);
14945 14945
14946 14946 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14947 14947 /*
14948 14948 * Allocate the scsi_pkt if we need one, or attach DMA
14949 14949 * resources if we have a scsi_pkt that needs them. The
14950 14950 * latter should only occur for commands that are being
14951 14951 * retried.
14952 14952 */
14953 14953 if ((xp->xb_pktp == NULL) ||
14954 14954 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) {
14955 14955 #else
14956 14956 if (xp->xb_pktp == NULL) {
14957 14957 #endif
14958 14958 /*
14959 14959 * There is no scsi_pkt allocated for this buf. Call
14960 14960 * the initpkt function to allocate & init one.
14961 14961 *
14962 14962 * The scsi_init_pkt runout callback functionality is
14963 14963 * implemented as follows:
14964 14964 *
14965 14965 * 1) The initpkt function always calls
14966 14966 * scsi_init_pkt(9F) with sdrunout specified as the
14967 14967 * callback routine.
14968 14968 * 2) A successful packet allocation is initialized and
14969 14969 * the I/O is transported.
14970 14970 * 3) The I/O associated with an allocation resource
14971 14971 * failure is left on its queue to be retried via
14972 14972 * runout or the next I/O.
14973 14973 * 4) The I/O associated with a DMA error is removed
14974 14974 * from the queue and failed with EIO. Processing of
14975 14975 * the transport queues is also halted to be
14976 14976 * restarted via runout or the next I/O.
14977 14977 * 5) The I/O associated with a CDB size or packet
14978 14978 * size error is removed from the queue and failed
14979 14979 * with EIO. Processing of the transport queues is
14980 14980 * continued.
14981 14981 *
14982 14982 * Note: there is no interface for canceling a runout
14983 14983 * callback. To prevent the driver from detaching or
14984 14984 * suspending while a runout is pending the driver
14985 14985 * state is set to SD_STATE_RWAIT
14986 14986 *
14987 14987 * Note: using the scsi_init_pkt callback facility can
14988 14988 * result in an I/O request persisting at the head of
14989 14989 * the list which cannot be satisfied even after
14990 14990 * multiple retries. In the future the driver may
14991 14991 * implement some kind of maximum runout count before
14992 14992 * failing an I/O.
14993 14993 *
14994 14994 * Note: the use of funcp below may seem superfluous,
14995 14995 * but it helps warlock figure out the correct
14996 14996 * initpkt function calls (see [s]sd.wlcmd).
14997 14997 */
14998 14998 struct scsi_pkt *pktp;
14999 14999 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp);
15000 15000
15001 15001 ASSERT(bp != un->un_rqs_bp);
15002 15002
15003 15003 funcp = sd_initpkt_map[xp->xb_chain_iostart];
15004 15004 switch ((*funcp)(bp, &pktp)) {
15005 15005 case SD_PKT_ALLOC_SUCCESS:
15006 15006 xp->xb_pktp = pktp;
15007 15007 SD_TRACE(SD_LOG_IO_CORE, un,
15008 15008 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n",
15009 15009 pktp);
15010 15010 goto got_pkt;
15011 15011
15012 15012 case SD_PKT_ALLOC_FAILURE:
15013 15013 /*
15014 15014 * Temporary (hopefully) resource depletion.
15015 15015 * Since retries and RQS commands always have a
15016 15016 * scsi_pkt allocated, these cases should never
15017 15017 * get here. So the only cases this needs to
15018 15018 * handle is a bp from the waitq (which we put
15019 15019 * back onto the waitq for sdrunout), or a bp
15020 15020 * sent as an immed_bp (which we just fail).
15021 15021 */
15022 15022 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15023 15023 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n");
15024 15024
15025 15025 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
15026 15026
15027 15027 if (bp == immed_bp) {
15028 15028 /*
15029 15029 * If SD_XB_DMA_FREED is clear, then
15030 15030 * this is a failure to allocate a
15031 15031 * scsi_pkt, and we must fail the
15032 15032 * command.
15033 15033 */
15034 15034 if ((xp->xb_pkt_flags &
15035 15035 SD_XB_DMA_FREED) == 0) {
15036 15036 break;
15037 15037 }
15038 15038
15039 15039 /*
15040 15040 * If this immediate command is NOT our
15041 15041 * un_retry_bp, then we must fail it.
15042 15042 */
15043 15043 if (bp != un->un_retry_bp) {
15044 15044 break;
15045 15045 }
15046 15046
15047 15047 /*
15048 15048 * We get here if this cmd is our
15049 15049 * un_retry_bp that was DMAFREED, but
15050 15050 * scsi_init_pkt() failed to reallocate
15051 15051 * DMA resources when we attempted to
15052 15052 * retry it. This can happen when an
15053 15053 * mpxio failover is in progress, but
15054 15054 * we don't want to just fail the
15055 15055 * command in this case.
15056 15056 *
15057 15057 * Use timeout(9F) to restart it after
15058 15058 * a 100ms delay. We don't want to
15059 15059 * let sdrunout() restart it, because
15060 15060 * sdrunout() is just supposed to start
15061 15061 * commands that are sitting on the
15062 15062 * wait queue. The un_retry_bp stays
15063 15063 * set until the command completes, but
15064 15064 * sdrunout can be called many times
15065 15065 * before that happens. Since sdrunout
15066 15066 * cannot tell if the un_retry_bp is
15067 15067 * already in the transport, it could
15068 15068 * end up calling scsi_transport() for
15069 15069 * the un_retry_bp multiple times.
15070 15070 *
15071 15071 * Also: don't schedule the callback
15072 15072 * if some other callback is already
15073 15073 * pending.
15074 15074 */
15075 15075 if (un->un_retry_statp == NULL) {
15076 15076 /*
15077 15077 * restore the kstat pointer to
15078 15078 * keep kstat counts coherent
15079 15079 * when we do retry the command.
15080 15080 */
15081 15081 un->un_retry_statp =
15082 15082 saved_statp;
15083 15083 }
15084 15084
15085 15085 if ((un->un_startstop_timeid == NULL) &&
15086 15086 (un->un_retry_timeid == NULL) &&
15087 15087 (un->un_direct_priority_timeid ==
15088 15088 NULL)) {
15089 15089
15090 15090 un->un_retry_timeid =
15091 15091 timeout(
15092 15092 sd_start_retry_command,
15093 15093 un, SD_RESTART_TIMEOUT);
15094 15094 }
15095 15095 goto exit;
15096 15096 }
15097 15097
15098 15098 #else
15099 15099 if (bp == immed_bp) {
15100 15100 break; /* Just fail the command */
15101 15101 }
15102 15102 #endif
15103 15103
15104 15104 /* Add the buf back to the head of the waitq */
15105 15105 bp->av_forw = un->un_waitq_headp;
15106 15106 un->un_waitq_headp = bp;
15107 15107 if (un->un_waitq_tailp == NULL) {
15108 15108 un->un_waitq_tailp = bp;
15109 15109 }
15110 15110 goto exit;
15111 15111
15112 15112 case SD_PKT_ALLOC_FAILURE_NO_DMA:
15113 15113 /*
15114 15114 * HBA DMA resource failure. Fail the command
15115 15115 * and continue processing of the queues.
15116 15116 */
15117 15117 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15118 15118 "sd_start_cmds: "
15119 15119 "SD_PKT_ALLOC_FAILURE_NO_DMA\n");
15120 15120 break;
15121 15121
15122 15122 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL:
15123 15123 /*
15124 15124 * Note:x86: Partial DMA mapping not supported
15125 15125 * for USCSI commands, and all the needed DMA
15126 15126 * resources were not allocated.
15127 15127 */
15128 15128 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15129 15129 "sd_start_cmds: "
15130 15130 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n");
15131 15131 break;
15132 15132
15133 15133 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL:
15134 15134 /*
15135 15135 * Note:x86: Request cannot fit into CDB based
15136 15136 * on lba and len.
15137 15137 */
15138 15138 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15139 15139 "sd_start_cmds: "
15140 15140 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n");
15141 15141 break;
15142 15142
15143 15143 default:
15144 15144 /* Should NEVER get here! */
15145 15145 panic("scsi_initpkt error");
15146 15146 /*NOTREACHED*/
15147 15147 }
15148 15148
15149 15149 /*
15150 15150 * Fatal error in allocating a scsi_pkt for this buf.
15151 15151 * Update kstats & return the buf with an error code.
15152 15152 * We must use sd_return_failed_command_no_restart() to
15153 15153 * avoid a recursive call back into sd_start_cmds().
15154 15154 * However this also means that we must keep processing
15155 15155 * the waitq here in order to avoid stalling.
15156 15156 */
15157 15157 if (statp == kstat_waitq_to_runq) {
15158 15158 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp);
15159 15159 }
15160 15160 sd_return_failed_command_no_restart(un, bp, EIO);
15161 15161 if (bp == immed_bp) {
15162 15162 /* immed_bp is gone by now, so clear this */
15163 15163 immed_bp = NULL;
15164 15164 }
15165 15165 continue;
15166 15166 }
15167 15167 got_pkt:
15168 15168 if (bp == immed_bp) {
15169 15169 /* goto the head of the class.... */
15170 15170 xp->xb_pktp->pkt_flags |= FLAG_HEAD;
15171 15171 }
15172 15172
15173 15173 un->un_ncmds_in_transport++;
15174 15174 SD_UPDATE_KSTATS(un, statp, bp);
15175 15175
15176 15176 /*
15177 15177 * Call scsi_transport() to send the command to the target.
15178 15178 * According to SCSA architecture, we must drop the mutex here
15179 15179 * before calling scsi_transport() in order to avoid deadlock.
15180 15180 * Note that the scsi_pkt's completion routine can be executed
15181 15181 * (from interrupt context) even before the call to
15182 15182 * scsi_transport() returns.
15183 15183 */
15184 15184 SD_TRACE(SD_LOG_IO_CORE, un,
15185 15185 "sd_start_cmds: calling scsi_transport()\n");
15186 15186 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp);
15187 15187
15188 15188 mutex_exit(SD_MUTEX(un));
15189 15189 rval = scsi_transport(xp->xb_pktp);
15190 15190 mutex_enter(SD_MUTEX(un));
15191 15191
15192 15192 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15193 15193 "sd_start_cmds: scsi_transport() returned %d\n", rval);
15194 15194
15195 15195 switch (rval) {
15196 15196 case TRAN_ACCEPT:
15197 15197 /* Clear this with every pkt accepted by the HBA */
15198 15198 un->un_tran_fatal_count = 0;
15199 15199 break; /* Success; try the next cmd (if any) */
15200 15200
15201 15201 case TRAN_BUSY:
15202 15202 un->un_ncmds_in_transport--;
15203 15203 ASSERT(un->un_ncmds_in_transport >= 0);
15204 15204
15205 15205 /*
15206 15206 * Don't retry request sense, the sense data
15207 15207 * is lost when another request is sent.
15208 15208 * Free up the rqs buf and retry
15209 15209 * the original failed cmd. Update kstat.
15210 15210 */
15211 15211 if (bp == un->un_rqs_bp) {
15212 15212 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
15213 15213 bp = sd_mark_rqs_idle(un, xp);
15214 15214 sd_retry_command(un, bp, SD_RETRIES_STANDARD,
15215 15215 NULL, NULL, EIO, un->un_busy_timeout / 500,
15216 15216 kstat_waitq_enter);
15217 15217 goto exit;
15218 15218 }
15219 15219
15220 15220 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
15221 15221 /*
15222 15222 * Free the DMA resources for the scsi_pkt. This will
15223 15223 * allow mpxio to select another path the next time
15224 15224 * we call scsi_transport() with this scsi_pkt.
15225 15225 * See sdintr() for the rationalization behind this.
15226 15226 */
15227 15227 if ((un->un_f_is_fibre == TRUE) &&
15228 15228 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
15229 15229 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) {
15230 15230 scsi_dmafree(xp->xb_pktp);
15231 15231 xp->xb_pkt_flags |= SD_XB_DMA_FREED;
15232 15232 }
15233 15233 #endif
15234 15234
15235 15235 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) {
15236 15236 /*
15237 15237 * Commands that are SD_PATH_DIRECT_PRIORITY
15238 15238 * are for error recovery situations. These do
15239 15239 * not use the normal command waitq, so if they
15240 15240 * get a TRAN_BUSY we cannot put them back onto
15241 15241 * the waitq for later retry. One possible
15242 15242 * problem is that there could already be some
15243 15243 * other command on un_retry_bp that is waiting
15244 15244 * for this one to complete, so we would be
15245 15245 * deadlocked if we put this command back onto
15246 15246 * the waitq for later retry (since un_retry_bp
15247 15247 * must complete before the driver gets back to
15248 15248 * commands on the waitq).
15249 15249 *
15250 15250 * To avoid deadlock we must schedule a callback
15251 15251 * that will restart this command after a set
15252 15252 * interval. This should keep retrying for as
15253 15253 * long as the underlying transport keeps
15254 15254 * returning TRAN_BUSY (just like for other
15255 15255 * commands). Use the same timeout interval as
15256 15256 * for the ordinary TRAN_BUSY retry.
15257 15257 */
15258 15258 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15259 15259 "sd_start_cmds: scsi_transport() returned "
15260 15260 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n");
15261 15261
15262 15262 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
15263 15263 un->un_direct_priority_timeid =
15264 15264 timeout(sd_start_direct_priority_command,
15265 15265 bp, un->un_busy_timeout / 500);
15266 15266
15267 15267 goto exit;
15268 15268 }
15269 15269
15270 15270 /*
15271 15271 * For TRAN_BUSY, we want to reduce the throttle value,
15272 15272 * unless we are retrying a command.
15273 15273 */
15274 15274 if (bp != un->un_retry_bp) {
15275 15275 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY);
15276 15276 }
15277 15277
15278 15278 /*
15279 15279 * Set up the bp to be tried again 10 ms later.
15280 15280 * Note:x86: Is there a timeout value in the sd_lun
15281 15281 * for this condition?
15282 15282 */
15283 15283 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500,
15284 15284 kstat_runq_back_to_waitq);
15285 15285 goto exit;
15286 15286
15287 15287 case TRAN_FATAL_ERROR:
15288 15288 un->un_tran_fatal_count++;
15289 15289 /* FALLTHRU */
15290 15290
15291 15291 case TRAN_BADPKT:
15292 15292 default:
15293 15293 un->un_ncmds_in_transport--;
15294 15294 ASSERT(un->un_ncmds_in_transport >= 0);
15295 15295
15296 15296 /*
15297 15297 * If this is our REQUEST SENSE command with a
15298 15298 * transport error, we must get back the pointers
15299 15299 * to the original buf, and mark the REQUEST
15300 15300 * SENSE command as "available".
15301 15301 */
15302 15302 if (bp == un->un_rqs_bp) {
15303 15303 bp = sd_mark_rqs_idle(un, xp);
15304 15304 xp = SD_GET_XBUF(bp);
15305 15305 } else {
15306 15306 /*
15307 15307 * Legacy behavior: do not update transport
15308 15308 * error count for request sense commands.
15309 15309 */
15310 15310 SD_UPDATE_ERRSTATS(un, sd_transerrs);
15311 15311 }
15312 15312
15313 15313 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
15314 15314 sd_print_transport_rejected_message(un, xp, rval);
15315 15315
15316 15316 /*
15317 15317 * This command will be terminated by SD driver due
15318 15318 * to a fatal transport error. We should post
15319 15319 * ereport.io.scsi.cmd.disk.tran with driver-assessment
15320 15320 * of "fail" for any command to indicate this
15321 15321 * situation.
15322 15322 */
15323 15323 if (xp->xb_ena > 0) {
15324 15324 ASSERT(un->un_fm_private != NULL);
15325 15325 sfip = un->un_fm_private;
15326 15326 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT;
15327 15327 sd_ssc_extract_info(&sfip->fm_ssc, un,
15328 15328 xp->xb_pktp, bp, xp);
15329 15329 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL);
15330 15330 }
15331 15331
15332 15332 /*
15333 15333 * We must use sd_return_failed_command_no_restart() to
15334 15334 * avoid a recursive call back into sd_start_cmds().
15335 15335 * However this also means that we must keep processing
15336 15336 * the waitq here in order to avoid stalling.
15337 15337 */
15338 15338 sd_return_failed_command_no_restart(un, bp, EIO);
15339 15339
15340 15340 /*
15341 15341 * Notify any threads waiting in sd_ddi_suspend() that
15342 15342 * a command completion has occurred.
15343 15343 */
15344 15344 if (un->un_state == SD_STATE_SUSPENDED) {
15345 15345 cv_broadcast(&un->un_disk_busy_cv);
15346 15346 }
15347 15347
15348 15348 if (bp == immed_bp) {
15349 15349 /* immed_bp is gone by now, so clear this */
15350 15350 immed_bp = NULL;
15351 15351 }
15352 15352 break;
15353 15353 }
15354 15354
15355 15355 } while (immed_bp == NULL);
15356 15356
15357 15357 exit:
15358 15358 ASSERT(mutex_owned(SD_MUTEX(un)));
15359 15359 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n");
15360 15360 }
15361 15361
15362 15362
15363 15363 /*
15364 15364 * Function: sd_return_command
15365 15365 *
15366 15366 * Description: Returns a command to its originator (with or without an
15367 15367 * error). Also starts commands waiting to be transported
15368 15368 * to the target.
15369 15369 *
15370 15370 * Context: May be called from interrupt, kernel, or timeout context
15371 15371 */
15372 15372
15373 15373 static void
15374 15374 sd_return_command(struct sd_lun *un, struct buf *bp)
15375 15375 {
15376 15376 struct sd_xbuf *xp;
15377 15377 struct scsi_pkt *pktp;
15378 15378 struct sd_fm_internal *sfip;
15379 15379
15380 15380 ASSERT(bp != NULL);
15381 15381 ASSERT(un != NULL);
15382 15382 ASSERT(mutex_owned(SD_MUTEX(un)));
15383 15383 ASSERT(bp != un->un_rqs_bp);
15384 15384 xp = SD_GET_XBUF(bp);
15385 15385 ASSERT(xp != NULL);
15386 15386
15387 15387 pktp = SD_GET_PKTP(bp);
15388 15388 sfip = (struct sd_fm_internal *)un->un_fm_private;
15389 15389 ASSERT(sfip != NULL);
15390 15390
15391 15391 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n");
15392 15392
15393 15393 /*
15394 15394 * Note: check for the "sdrestart failed" case.
15395 15395 */
15396 15396 if ((un->un_partial_dma_supported == 1) &&
15397 15397 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) &&
15398 15398 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) &&
15399 15399 (xp->xb_pktp->pkt_resid == 0)) {
15400 15400
15401 15401 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) {
15402 15402 /*
15403 15403 * Successfully set up next portion of cmd
15404 15404 * transfer, try sending it
15405 15405 */
15406 15406 sd_retry_command(un, bp, SD_RETRIES_NOCHECK,
15407 15407 NULL, NULL, 0, (clock_t)0, NULL);
15408 15408 sd_start_cmds(un, NULL);
15409 15409 return; /* Note:x86: need a return here? */
15410 15410 }
15411 15411 }
15412 15412
15413 15413 /*
15414 15414 * If this is the failfast bp, clear it from un_failfast_bp. This
15415 15415 * can happen if upon being re-tried the failfast bp either
15416 15416 * succeeded or encountered another error (possibly even a different
15417 15417 * error than the one that precipitated the failfast state, but in
15418 15418 * that case it would have had to exhaust retries as well). Regardless,
15419 15419 * this should not occur whenever the instance is in the active
15420 15420 * failfast state.
15421 15421 */
15422 15422 if (bp == un->un_failfast_bp) {
15423 15423 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE);
15424 15424 un->un_failfast_bp = NULL;
15425 15425 }
15426 15426
15427 15427 /*
15428 15428 * Clear the failfast state upon successful completion of ANY cmd.
15429 15429 */
15430 15430 if (bp->b_error == 0) {
15431 15431 un->un_failfast_state = SD_FAILFAST_INACTIVE;
15432 15432 /*
15433 15433 * If this is a successful command, but used to be retried,
15434 15434 * we will take it as a recovered command and post an
15435 15435 * ereport with driver-assessment of "recovered".
15436 15436 */
15437 15437 if (xp->xb_ena > 0) {
15438 15438 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp);
15439 15439 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY);
15440 15440 }
15441 15441 } else {
15442 15442 /*
15443 15443 * If this is a failed non-USCSI command we will post an
15444 15444 * ereport with driver-assessment set accordingly("fail" or
15445 15445 * "fatal").
15446 15446 */
15447 15447 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
15448 15448 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp);
15449 15449 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL);
15450 15450 }
15451 15451 }
15452 15452
15453 15453 /*
15454 15454 * This is used if the command was retried one or more times. Show that
15455 15455 * we are done with it, and allow processing of the waitq to resume.
15456 15456 */
15457 15457 if (bp == un->un_retry_bp) {
15458 15458 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15459 15459 "sd_return_command: un:0x%p: "
15460 15460 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp);
15461 15461 un->un_retry_bp = NULL;
15462 15462 un->un_retry_statp = NULL;
15463 15463 }
15464 15464
15465 15465 SD_UPDATE_RDWR_STATS(un, bp);
15466 15466 SD_UPDATE_PARTITION_STATS(un, bp);
15467 15467
15468 15468 switch (un->un_state) {
15469 15469 case SD_STATE_SUSPENDED:
15470 15470 /*
15471 15471 * Notify any threads waiting in sd_ddi_suspend() that
15472 15472 * a command completion has occurred.
15473 15473 */
15474 15474 cv_broadcast(&un->un_disk_busy_cv);
15475 15475 break;
15476 15476 default:
15477 15477 sd_start_cmds(un, NULL);
15478 15478 break;
15479 15479 }
15480 15480
15481 15481 /* Return this command up the iodone chain to its originator. */
15482 15482 mutex_exit(SD_MUTEX(un));
15483 15483
15484 15484 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp);
15485 15485 xp->xb_pktp = NULL;
15486 15486
15487 15487 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp);
15488 15488
15489 15489 ASSERT(!mutex_owned(SD_MUTEX(un)));
15490 15490 mutex_enter(SD_MUTEX(un));
15491 15491
15492 15492 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n");
15493 15493 }
15494 15494
15495 15495
15496 15496 /*
15497 15497 * Function: sd_return_failed_command
15498 15498 *
15499 15499 * Description: Command completion when an error occurred.
15500 15500 *
15501 15501 * Context: May be called from interrupt context
15502 15502 */
15503 15503
15504 15504 static void
15505 15505 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode)
15506 15506 {
15507 15507 ASSERT(bp != NULL);
15508 15508 ASSERT(un != NULL);
15509 15509 ASSERT(mutex_owned(SD_MUTEX(un)));
15510 15510
15511 15511 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15512 15512 "sd_return_failed_command: entry\n");
15513 15513
15514 15514 /*
15515 15515 * b_resid could already be nonzero due to a partial data
15516 15516 * transfer, so do not change it here.
15517 15517 */
15518 15518 SD_BIOERROR(bp, errcode);
15519 15519
15520 15520 sd_return_command(un, bp);
15521 15521 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15522 15522 "sd_return_failed_command: exit\n");
15523 15523 }
15524 15524
15525 15525
15526 15526 /*
15527 15527 * Function: sd_return_failed_command_no_restart
15528 15528 *
15529 15529 * Description: Same as sd_return_failed_command, but ensures that no
15530 15530 * call back into sd_start_cmds will be issued.
15531 15531 *
15532 15532 * Context: May be called from interrupt context
15533 15533 */
15534 15534
15535 15535 static void
15536 15536 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp,
15537 15537 int errcode)
15538 15538 {
15539 15539 struct sd_xbuf *xp;
15540 15540
15541 15541 ASSERT(bp != NULL);
15542 15542 ASSERT(un != NULL);
15543 15543 ASSERT(mutex_owned(SD_MUTEX(un)));
15544 15544 xp = SD_GET_XBUF(bp);
15545 15545 ASSERT(xp != NULL);
15546 15546 ASSERT(errcode != 0);
15547 15547
15548 15548 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15549 15549 "sd_return_failed_command_no_restart: entry\n");
15550 15550
15551 15551 /*
15552 15552 * b_resid could already be nonzero due to a partial data
15553 15553 * transfer, so do not change it here.
15554 15554 */
15555 15555 SD_BIOERROR(bp, errcode);
15556 15556
15557 15557 /*
15558 15558 * If this is the failfast bp, clear it. This can happen if the
15559 15559 * failfast bp encounterd a fatal error when we attempted to
15560 15560 * re-try it (such as a scsi_transport(9F) failure). However
15561 15561 * we should NOT be in an active failfast state if the failfast
15562 15562 * bp is not NULL.
15563 15563 */
15564 15564 if (bp == un->un_failfast_bp) {
15565 15565 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE);
15566 15566 un->un_failfast_bp = NULL;
15567 15567 }
15568 15568
15569 15569 if (bp == un->un_retry_bp) {
15570 15570 /*
15571 15571 * This command was retried one or more times. Show that we are
15572 15572 * done with it, and allow processing of the waitq to resume.
15573 15573 */
15574 15574 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15575 15575 "sd_return_failed_command_no_restart: "
15576 15576 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp);
15577 15577 un->un_retry_bp = NULL;
15578 15578 un->un_retry_statp = NULL;
15579 15579 }
15580 15580
15581 15581 SD_UPDATE_RDWR_STATS(un, bp);
15582 15582 SD_UPDATE_PARTITION_STATS(un, bp);
15583 15583
15584 15584 mutex_exit(SD_MUTEX(un));
15585 15585
15586 15586 if (xp->xb_pktp != NULL) {
15587 15587 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp);
15588 15588 xp->xb_pktp = NULL;
15589 15589 }
15590 15590
15591 15591 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp);
15592 15592
15593 15593 mutex_enter(SD_MUTEX(un));
15594 15594
15595 15595 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15596 15596 "sd_return_failed_command_no_restart: exit\n");
15597 15597 }
15598 15598
15599 15599
15600 15600 /*
15601 15601 * Function: sd_retry_command
15602 15602 *
15603 15603 * Description: queue up a command for retry, or (optionally) fail it
15604 15604 * if retry counts are exhausted.
15605 15605 *
15606 15606 * Arguments: un - Pointer to the sd_lun struct for the target.
15607 15607 *
15608 15608 * bp - Pointer to the buf for the command to be retried.
15609 15609 *
15610 15610 * retry_check_flag - Flag to see which (if any) of the retry
15611 15611 * counts should be decremented/checked. If the indicated
15612 15612 * retry count is exhausted, then the command will not be
15613 15613 * retried; it will be failed instead. This should use a
15614 15614 * value equal to one of the following:
15615 15615 *
15616 15616 * SD_RETRIES_NOCHECK
15617 15617 * SD_RESD_RETRIES_STANDARD
15618 15618 * SD_RETRIES_VICTIM
15619 15619 *
15620 15620 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE
15621 15621 * if the check should be made to see of FLAG_ISOLATE is set
15622 15622 * in the pkt. If FLAG_ISOLATE is set, then the command is
15623 15623 * not retried, it is simply failed.
15624 15624 *
15625 15625 * user_funcp - Ptr to function to call before dispatching the
15626 15626 * command. May be NULL if no action needs to be performed.
15627 15627 * (Primarily intended for printing messages.)
15628 15628 *
15629 15629 * user_arg - Optional argument to be passed along to
15630 15630 * the user_funcp call.
15631 15631 *
15632 15632 * failure_code - errno return code to set in the bp if the
15633 15633 * command is going to be failed.
15634 15634 *
15635 15635 * retry_delay - Retry delay interval in (clock_t) units. May
15636 15636 * be zero which indicates that the retry should be retried
15637 15637 * immediately (ie, without an intervening delay).
15638 15638 *
15639 15639 * statp - Ptr to kstat function to be updated if the command
15640 15640 * is queued for a delayed retry. May be NULL if no kstat
15641 15641 * update is desired.
15642 15642 *
15643 15643 * Context: May be called from interrupt context.
15644 15644 */
15645 15645
15646 15646 static void
15647 15647 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag,
15648 15648 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int
15649 15649 code), void *user_arg, int failure_code, clock_t retry_delay,
15650 15650 void (*statp)(kstat_io_t *))
15651 15651 {
15652 15652 struct sd_xbuf *xp;
15653 15653 struct scsi_pkt *pktp;
15654 15654 struct sd_fm_internal *sfip;
15655 15655
15656 15656 ASSERT(un != NULL);
15657 15657 ASSERT(mutex_owned(SD_MUTEX(un)));
15658 15658 ASSERT(bp != NULL);
15659 15659 xp = SD_GET_XBUF(bp);
15660 15660 ASSERT(xp != NULL);
15661 15661 pktp = SD_GET_PKTP(bp);
15662 15662 ASSERT(pktp != NULL);
15663 15663
15664 15664 sfip = (struct sd_fm_internal *)un->un_fm_private;
15665 15665 ASSERT(sfip != NULL);
15666 15666
15667 15667 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
15668 15668 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp);
15669 15669
15670 15670 /*
15671 15671 * If we are syncing or dumping, fail the command to avoid
15672 15672 * recursively calling back into scsi_transport().
15673 15673 */
15674 15674 if (ddi_in_panic()) {
15675 15675 goto fail_command_no_log;
15676 15676 }
15677 15677
15678 15678 /*
15679 15679 * We should never be be retrying a command with FLAG_DIAGNOSE set, so
15680 15680 * log an error and fail the command.
15681 15681 */
15682 15682 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) {
15683 15683 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE,
15684 15684 "ERROR, retrying FLAG_DIAGNOSE command.\n");
15685 15685 sd_dump_memory(un, SD_LOG_IO, "CDB",
15686 15686 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX);
15687 15687 sd_dump_memory(un, SD_LOG_IO, "Sense Data",
15688 15688 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX);
15689 15689 goto fail_command;
15690 15690 }
15691 15691
15692 15692 /*
15693 15693 * If we are suspended, then put the command onto head of the
15694 15694 * wait queue since we don't want to start more commands, and
15695 15695 * clear the un_retry_bp. Next time when we are resumed, will
15696 15696 * handle the command in the wait queue.
15697 15697 */
15698 15698 switch (un->un_state) {
15699 15699 case SD_STATE_SUSPENDED:
15700 15700 case SD_STATE_DUMPING:
15701 15701 bp->av_forw = un->un_waitq_headp;
15702 15702 un->un_waitq_headp = bp;
15703 15703 if (un->un_waitq_tailp == NULL) {
15704 15704 un->un_waitq_tailp = bp;
15705 15705 }
15706 15706 if (bp == un->un_retry_bp) {
15707 15707 un->un_retry_bp = NULL;
15708 15708 un->un_retry_statp = NULL;
15709 15709 }
15710 15710 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp);
15711 15711 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: "
15712 15712 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp);
15713 15713 return;
15714 15714 default:
15715 15715 break;
15716 15716 }
15717 15717
15718 15718 /*
15719 15719 * If the caller wants us to check FLAG_ISOLATE, then see if that
15720 15720 * is set; if it is then we do not want to retry the command.
15721 15721 * Normally, FLAG_ISOLATE is only used with USCSI cmds.
15722 15722 */
15723 15723 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) {
15724 15724 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) {
15725 15725 goto fail_command;
15726 15726 }
15727 15727 }
15728 15728
15729 15729
15730 15730 /*
15731 15731 * If SD_RETRIES_FAILFAST is set, it indicates that either a
15732 15732 * command timeout or a selection timeout has occurred. This means
15733 15733 * that we were unable to establish an kind of communication with
15734 15734 * the target, and subsequent retries and/or commands are likely
15735 15735 * to encounter similar results and take a long time to complete.
15736 15736 *
15737 15737 * If this is a failfast error condition, we need to update the
15738 15738 * failfast state, even if this bp does not have B_FAILFAST set.
15739 15739 */
15740 15740 if (retry_check_flag & SD_RETRIES_FAILFAST) {
15741 15741 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) {
15742 15742 ASSERT(un->un_failfast_bp == NULL);
15743 15743 /*
15744 15744 * If we are already in the active failfast state, and
15745 15745 * another failfast error condition has been detected,
15746 15746 * then fail this command if it has B_FAILFAST set.
15747 15747 * If B_FAILFAST is clear, then maintain the legacy
15748 15748 * behavior of retrying heroically, even tho this will
15749 15749 * take a lot more time to fail the command.
15750 15750 */
15751 15751 if (bp->b_flags & B_FAILFAST) {
15752 15752 goto fail_command;
15753 15753 }
15754 15754 } else {
15755 15755 /*
15756 15756 * We're not in the active failfast state, but we
15757 15757 * have a failfast error condition, so we must begin
15758 15758 * transition to the next state. We do this regardless
15759 15759 * of whether or not this bp has B_FAILFAST set.
15760 15760 */
15761 15761 if (un->un_failfast_bp == NULL) {
15762 15762 /*
15763 15763 * This is the first bp to meet a failfast
15764 15764 * condition so save it on un_failfast_bp &
15765 15765 * do normal retry processing. Do not enter
15766 15766 * active failfast state yet. This marks
15767 15767 * entry into the "failfast pending" state.
15768 15768 */
15769 15769 un->un_failfast_bp = bp;
15770 15770
15771 15771 } else if (un->un_failfast_bp == bp) {
15772 15772 /*
15773 15773 * This is the second time *this* bp has
15774 15774 * encountered a failfast error condition,
15775 15775 * so enter active failfast state & flush
15776 15776 * queues as appropriate.
15777 15777 */
15778 15778 un->un_failfast_state = SD_FAILFAST_ACTIVE;
15779 15779 un->un_failfast_bp = NULL;
15780 15780 sd_failfast_flushq(un);
15781 15781
15782 15782 /*
15783 15783 * Fail this bp now if B_FAILFAST set;
15784 15784 * otherwise continue with retries. (It would
15785 15785 * be pretty ironic if this bp succeeded on a
15786 15786 * subsequent retry after we just flushed all
15787 15787 * the queues).
15788 15788 */
15789 15789 if (bp->b_flags & B_FAILFAST) {
15790 15790 goto fail_command;
15791 15791 }
15792 15792
15793 15793 #if !defined(lint) && !defined(__lint)
15794 15794 } else {
15795 15795 /*
15796 15796 * If neither of the preceeding conditionals
15797 15797 * was true, it means that there is some
15798 15798 * *other* bp that has met an inital failfast
15799 15799 * condition and is currently either being
15800 15800 * retried or is waiting to be retried. In
15801 15801 * that case we should perform normal retry
15802 15802 * processing on *this* bp, since there is a
15803 15803 * chance that the current failfast condition
15804 15804 * is transient and recoverable. If that does
15805 15805 * not turn out to be the case, then retries
15806 15806 * will be cleared when the wait queue is
15807 15807 * flushed anyway.
15808 15808 */
15809 15809 #endif
15810 15810 }
15811 15811 }
15812 15812 } else {
15813 15813 /*
15814 15814 * SD_RETRIES_FAILFAST is clear, which indicates that we
15815 15815 * likely were able to at least establish some level of
15816 15816 * communication with the target and subsequent commands
15817 15817 * and/or retries are likely to get through to the target,
15818 15818 * In this case we want to be aggressive about clearing
15819 15819 * the failfast state. Note that this does not affect
15820 15820 * the "failfast pending" condition.
15821 15821 */
15822 15822 un->un_failfast_state = SD_FAILFAST_INACTIVE;
15823 15823 }
15824 15824
15825 15825
15826 15826 /*
15827 15827 * Check the specified retry count to see if we can still do
15828 15828 * any retries with this pkt before we should fail it.
15829 15829 */
15830 15830 switch (retry_check_flag & SD_RETRIES_MASK) {
15831 15831 case SD_RETRIES_VICTIM:
15832 15832 /*
15833 15833 * Check the victim retry count. If exhausted, then fall
15834 15834 * thru & check against the standard retry count.
15835 15835 */
15836 15836 if (xp->xb_victim_retry_count < un->un_victim_retry_count) {
15837 15837 /* Increment count & proceed with the retry */
15838 15838 xp->xb_victim_retry_count++;
15839 15839 break;
15840 15840 }
15841 15841 /* Victim retries exhausted, fall back to std. retries... */
15842 15842 /* FALLTHRU */
15843 15843
15844 15844 case SD_RETRIES_STANDARD:
15845 15845 if (xp->xb_retry_count >= un->un_retry_count) {
15846 15846 /* Retries exhausted, fail the command */
15847 15847 SD_TRACE(SD_LOG_IO_CORE, un,
15848 15848 "sd_retry_command: retries exhausted!\n");
15849 15849 /*
15850 15850 * update b_resid for failed SCMD_READ & SCMD_WRITE
15851 15851 * commands with nonzero pkt_resid.
15852 15852 */
15853 15853 if ((pktp->pkt_reason == CMD_CMPLT) &&
15854 15854 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) &&
15855 15855 (pktp->pkt_resid != 0)) {
15856 15856 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F;
15857 15857 if ((op == SCMD_READ) || (op == SCMD_WRITE)) {
15858 15858 SD_UPDATE_B_RESID(bp, pktp);
15859 15859 }
15860 15860 }
15861 15861 goto fail_command;
15862 15862 }
15863 15863 xp->xb_retry_count++;
15864 15864 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15865 15865 "sd_retry_command: retry count:%d\n", xp->xb_retry_count);
15866 15866 break;
15867 15867
15868 15868 case SD_RETRIES_UA:
15869 15869 if (xp->xb_ua_retry_count >= sd_ua_retry_count) {
15870 15870 /* Retries exhausted, fail the command */
15871 15871 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
15872 15872 "Unit Attention retries exhausted. "
15873 15873 "Check the target.\n");
15874 15874 goto fail_command;
15875 15875 }
15876 15876 xp->xb_ua_retry_count++;
15877 15877 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15878 15878 "sd_retry_command: retry count:%d\n",
15879 15879 xp->xb_ua_retry_count);
15880 15880 break;
15881 15881
15882 15882 case SD_RETRIES_BUSY:
15883 15883 if (xp->xb_retry_count >= un->un_busy_retry_count) {
15884 15884 /* Retries exhausted, fail the command */
15885 15885 SD_TRACE(SD_LOG_IO_CORE, un,
15886 15886 "sd_retry_command: retries exhausted!\n");
15887 15887 goto fail_command;
15888 15888 }
15889 15889 xp->xb_retry_count++;
15890 15890 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15891 15891 "sd_retry_command: retry count:%d\n", xp->xb_retry_count);
15892 15892 break;
15893 15893
15894 15894 case SD_RETRIES_NOCHECK:
15895 15895 default:
15896 15896 /* No retry count to check. Just proceed with the retry */
15897 15897 break;
15898 15898 }
15899 15899
15900 15900 xp->xb_pktp->pkt_flags |= FLAG_HEAD;
15901 15901
15902 15902 /*
15903 15903 * If this is a non-USCSI command being retried
15904 15904 * during execution last time, we should post an ereport with
15905 15905 * driver-assessment of the value "retry".
15906 15906 * For partial DMA, request sense and STATUS_QFULL, there are no
15907 15907 * hardware errors, we bypass ereport posting.
15908 15908 */
15909 15909 if (failure_code != 0) {
15910 15910 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
15911 15911 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp);
15912 15912 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY);
15913 15913 }
15914 15914 }
15915 15915
15916 15916 /*
15917 15917 * If we were given a zero timeout, we must attempt to retry the
15918 15918 * command immediately (ie, without a delay).
15919 15919 */
15920 15920 if (retry_delay == 0) {
15921 15921 /*
15922 15922 * Check some limiting conditions to see if we can actually
15923 15923 * do the immediate retry. If we cannot, then we must
15924 15924 * fall back to queueing up a delayed retry.
15925 15925 */
15926 15926 if (un->un_ncmds_in_transport >= un->un_throttle) {
15927 15927 /*
15928 15928 * We are at the throttle limit for the target,
15929 15929 * fall back to delayed retry.
15930 15930 */
15931 15931 retry_delay = un->un_busy_timeout;
15932 15932 statp = kstat_waitq_enter;
15933 15933 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15934 15934 "sd_retry_command: immed. retry hit "
15935 15935 "throttle!\n");
15936 15936 } else {
15937 15937 /*
15938 15938 * We're clear to proceed with the immediate retry.
15939 15939 * First call the user-provided function (if any)
15940 15940 */
15941 15941 if (user_funcp != NULL) {
15942 15942 (*user_funcp)(un, bp, user_arg,
15943 15943 SD_IMMEDIATE_RETRY_ISSUED);
15944 15944 #ifdef __lock_lint
15945 15945 sd_print_incomplete_msg(un, bp, user_arg,
15946 15946 SD_IMMEDIATE_RETRY_ISSUED);
15947 15947 sd_print_cmd_incomplete_msg(un, bp, user_arg,
15948 15948 SD_IMMEDIATE_RETRY_ISSUED);
15949 15949 sd_print_sense_failed_msg(un, bp, user_arg,
15950 15950 SD_IMMEDIATE_RETRY_ISSUED);
15951 15951 #endif
15952 15952 }
15953 15953
15954 15954 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15955 15955 "sd_retry_command: issuing immediate retry\n");
15956 15956
15957 15957 /*
15958 15958 * Call sd_start_cmds() to transport the command to
15959 15959 * the target.
15960 15960 */
15961 15961 sd_start_cmds(un, bp);
15962 15962
15963 15963 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15964 15964 "sd_retry_command exit\n");
15965 15965 return;
15966 15966 }
15967 15967 }
15968 15968
15969 15969 /*
15970 15970 * Set up to retry the command after a delay.
15971 15971 * First call the user-provided function (if any)
15972 15972 */
15973 15973 if (user_funcp != NULL) {
15974 15974 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED);
15975 15975 }
15976 15976
15977 15977 sd_set_retry_bp(un, bp, retry_delay, statp);
15978 15978
15979 15979 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n");
15980 15980 return;
15981 15981
15982 15982 fail_command:
15983 15983
15984 15984 if (user_funcp != NULL) {
15985 15985 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED);
15986 15986 }
15987 15987
15988 15988 fail_command_no_log:
15989 15989
15990 15990 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15991 15991 "sd_retry_command: returning failed command\n");
15992 15992
15993 15993 sd_return_failed_command(un, bp, failure_code);
15994 15994
15995 15995 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n");
15996 15996 }
15997 15997
15998 15998
15999 15999 /*
16000 16000 * Function: sd_set_retry_bp
16001 16001 *
16002 16002 * Description: Set up the given bp for retry.
16003 16003 *
16004 16004 * Arguments: un - ptr to associated softstate
16005 16005 * bp - ptr to buf(9S) for the command
16006 16006 * retry_delay - time interval before issuing retry (may be 0)
16007 16007 * statp - optional pointer to kstat function
16008 16008 *
16009 16009 * Context: May be called under interrupt context
16010 16010 */
16011 16011
16012 16012 static void
16013 16013 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay,
16014 16014 void (*statp)(kstat_io_t *))
16015 16015 {
16016 16016 ASSERT(un != NULL);
16017 16017 ASSERT(mutex_owned(SD_MUTEX(un)));
16018 16018 ASSERT(bp != NULL);
16019 16019
16020 16020 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
16021 16021 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp);
16022 16022
16023 16023 /*
16024 16024 * Indicate that the command is being retried. This will not allow any
16025 16025 * other commands on the wait queue to be transported to the target
16026 16026 * until this command has been completed (success or failure). The
16027 16027 * "retry command" is not transported to the target until the given
16028 16028 * time delay expires, unless the user specified a 0 retry_delay.
16029 16029 *
16030 16030 * Note: the timeout(9F) callback routine is what actually calls
16031 16031 * sd_start_cmds() to transport the command, with the exception of a
16032 16032 * zero retry_delay. The only current implementor of a zero retry delay
16033 16033 * is the case where a START_STOP_UNIT is sent to spin-up a device.
16034 16034 */
16035 16035 if (un->un_retry_bp == NULL) {
16036 16036 ASSERT(un->un_retry_statp == NULL);
16037 16037 un->un_retry_bp = bp;
16038 16038
16039 16039 /*
16040 16040 * If the user has not specified a delay the command should
16041 16041 * be queued and no timeout should be scheduled.
16042 16042 */
16043 16043 if (retry_delay == 0) {
16044 16044 /*
16045 16045 * Save the kstat pointer that will be used in the
16046 16046 * call to SD_UPDATE_KSTATS() below, so that
16047 16047 * sd_start_cmds() can correctly decrement the waitq
16048 16048 * count when it is time to transport this command.
16049 16049 */
16050 16050 un->un_retry_statp = statp;
16051 16051 goto done;
16052 16052 }
16053 16053 }
16054 16054
16055 16055 if (un->un_retry_bp == bp) {
16056 16056 /*
16057 16057 * Save the kstat pointer that will be used in the call to
16058 16058 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can
16059 16059 * correctly decrement the waitq count when it is time to
16060 16060 * transport this command.
16061 16061 */
16062 16062 un->un_retry_statp = statp;
16063 16063
16064 16064 /*
16065 16065 * Schedule a timeout if:
16066 16066 * 1) The user has specified a delay.
16067 16067 * 2) There is not a START_STOP_UNIT callback pending.
16068 16068 *
16069 16069 * If no delay has been specified, then it is up to the caller
16070 16070 * to ensure that IO processing continues without stalling.
16071 16071 * Effectively, this means that the caller will issue the
16072 16072 * required call to sd_start_cmds(). The START_STOP_UNIT
16073 16073 * callback does this after the START STOP UNIT command has
16074 16074 * completed. In either of these cases we should not schedule
16075 16075 * a timeout callback here. Also don't schedule the timeout if
16076 16076 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart.
16077 16077 */
16078 16078 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) &&
16079 16079 (un->un_direct_priority_timeid == NULL)) {
16080 16080 un->un_retry_timeid =
16081 16081 timeout(sd_start_retry_command, un, retry_delay);
16082 16082 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16083 16083 "sd_set_retry_bp: setting timeout: un: 0x%p"
16084 16084 " bp:0x%p un_retry_timeid:0x%p\n",
16085 16085 un, bp, un->un_retry_timeid);
16086 16086 }
16087 16087 } else {
16088 16088 /*
16089 16089 * We only get in here if there is already another command
16090 16090 * waiting to be retried. In this case, we just put the
16091 16091 * given command onto the wait queue, so it can be transported
16092 16092 * after the current retry command has completed.
16093 16093 *
16094 16094 * Also we have to make sure that if the command at the head
16095 16095 * of the wait queue is the un_failfast_bp, that we do not
16096 16096 * put ahead of it any other commands that are to be retried.
16097 16097 */
16098 16098 if ((un->un_failfast_bp != NULL) &&
16099 16099 (un->un_failfast_bp == un->un_waitq_headp)) {
16100 16100 /*
16101 16101 * Enqueue this command AFTER the first command on
16102 16102 * the wait queue (which is also un_failfast_bp).
16103 16103 */
16104 16104 bp->av_forw = un->un_waitq_headp->av_forw;
16105 16105 un->un_waitq_headp->av_forw = bp;
16106 16106 if (un->un_waitq_headp == un->un_waitq_tailp) {
16107 16107 un->un_waitq_tailp = bp;
16108 16108 }
16109 16109 } else {
16110 16110 /* Enqueue this command at the head of the waitq. */
16111 16111 bp->av_forw = un->un_waitq_headp;
16112 16112 un->un_waitq_headp = bp;
16113 16113 if (un->un_waitq_tailp == NULL) {
16114 16114 un->un_waitq_tailp = bp;
16115 16115 }
16116 16116 }
16117 16117
16118 16118 if (statp == NULL) {
16119 16119 statp = kstat_waitq_enter;
16120 16120 }
16121 16121 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16122 16122 "sd_set_retry_bp: un:0x%p already delayed retry\n", un);
16123 16123 }
16124 16124
16125 16125 done:
16126 16126 if (statp != NULL) {
16127 16127 SD_UPDATE_KSTATS(un, statp, bp);
16128 16128 }
16129 16129
16130 16130 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16131 16131 "sd_set_retry_bp: exit un:0x%p\n", un);
16132 16132 }
16133 16133
16134 16134
16135 16135 /*
16136 16136 * Function: sd_start_retry_command
16137 16137 *
16138 16138 * Description: Start the command that has been waiting on the target's
16139 16139 * retry queue. Called from timeout(9F) context after the
16140 16140 * retry delay interval has expired.
16141 16141 *
16142 16142 * Arguments: arg - pointer to associated softstate for the device.
16143 16143 *
16144 16144 * Context: timeout(9F) thread context. May not sleep.
16145 16145 */
16146 16146
16147 16147 static void
16148 16148 sd_start_retry_command(void *arg)
16149 16149 {
16150 16150 struct sd_lun *un = arg;
16151 16151
16152 16152 ASSERT(un != NULL);
16153 16153 ASSERT(!mutex_owned(SD_MUTEX(un)));
16154 16154
16155 16155 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16156 16156 "sd_start_retry_command: entry\n");
16157 16157
16158 16158 mutex_enter(SD_MUTEX(un));
16159 16159
16160 16160 un->un_retry_timeid = NULL;
16161 16161
16162 16162 if (un->un_retry_bp != NULL) {
16163 16163 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16164 16164 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n",
16165 16165 un, un->un_retry_bp);
16166 16166 sd_start_cmds(un, un->un_retry_bp);
16167 16167 }
16168 16168
16169 16169 mutex_exit(SD_MUTEX(un));
16170 16170
16171 16171 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16172 16172 "sd_start_retry_command: exit\n");
16173 16173 }
16174 16174
16175 16175 /*
16176 16176 * Function: sd_rmw_msg_print_handler
16177 16177 *
16178 16178 * Description: If RMW mode is enabled and warning message is triggered
16179 16179 * print I/O count during a fixed interval.
16180 16180 *
16181 16181 * Arguments: arg - pointer to associated softstate for the device.
16182 16182 *
16183 16183 * Context: timeout(9F) thread context. May not sleep.
16184 16184 */
16185 16185 static void
16186 16186 sd_rmw_msg_print_handler(void *arg)
16187 16187 {
16188 16188 struct sd_lun *un = arg;
16189 16189
16190 16190 ASSERT(un != NULL);
16191 16191 ASSERT(!mutex_owned(SD_MUTEX(un)));
16192 16192
16193 16193 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16194 16194 "sd_rmw_msg_print_handler: entry\n");
16195 16195
16196 16196 mutex_enter(SD_MUTEX(un));
16197 16197
16198 16198 if (un->un_rmw_incre_count > 0) {
16199 16199 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
16200 16200 "%"PRIu64" I/O requests are not aligned with %d disk "
16201 16201 "sector size in %ld seconds. They are handled through "
16202 16202 "Read Modify Write but the performance is very low!\n",
16203 16203 un->un_rmw_incre_count, un->un_tgt_blocksize,
16204 16204 drv_hztousec(SD_RMW_MSG_PRINT_TIMEOUT) / 1000000);
16205 16205 un->un_rmw_incre_count = 0;
16206 16206 un->un_rmw_msg_timeid = timeout(sd_rmw_msg_print_handler,
16207 16207 un, SD_RMW_MSG_PRINT_TIMEOUT);
16208 16208 } else {
16209 16209 un->un_rmw_msg_timeid = NULL;
16210 16210 }
16211 16211
16212 16212 mutex_exit(SD_MUTEX(un));
16213 16213
16214 16214 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16215 16215 "sd_rmw_msg_print_handler: exit\n");
16216 16216 }
16217 16217
16218 16218 /*
16219 16219 * Function: sd_start_direct_priority_command
16220 16220 *
16221 16221 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had
16222 16222 * received TRAN_BUSY when we called scsi_transport() to send it
16223 16223 * to the underlying HBA. This function is called from timeout(9F)
16224 16224 * context after the delay interval has expired.
16225 16225 *
16226 16226 * Arguments: arg - pointer to associated buf(9S) to be restarted.
16227 16227 *
16228 16228 * Context: timeout(9F) thread context. May not sleep.
16229 16229 */
16230 16230
16231 16231 static void
16232 16232 sd_start_direct_priority_command(void *arg)
16233 16233 {
16234 16234 struct buf *priority_bp = arg;
16235 16235 struct sd_lun *un;
16236 16236
16237 16237 ASSERT(priority_bp != NULL);
16238 16238 un = SD_GET_UN(priority_bp);
16239 16239 ASSERT(un != NULL);
16240 16240 ASSERT(!mutex_owned(SD_MUTEX(un)));
16241 16241
16242 16242 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16243 16243 "sd_start_direct_priority_command: entry\n");
16244 16244
16245 16245 mutex_enter(SD_MUTEX(un));
16246 16246 un->un_direct_priority_timeid = NULL;
16247 16247 sd_start_cmds(un, priority_bp);
16248 16248 mutex_exit(SD_MUTEX(un));
16249 16249
16250 16250 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16251 16251 "sd_start_direct_priority_command: exit\n");
16252 16252 }
16253 16253
16254 16254
16255 16255 /*
16256 16256 * Function: sd_send_request_sense_command
16257 16257 *
16258 16258 * Description: Sends a REQUEST SENSE command to the target
16259 16259 *
16260 16260 * Context: May be called from interrupt context.
16261 16261 */
16262 16262
16263 16263 static void
16264 16264 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp,
16265 16265 struct scsi_pkt *pktp)
16266 16266 {
16267 16267 ASSERT(bp != NULL);
16268 16268 ASSERT(un != NULL);
16269 16269 ASSERT(mutex_owned(SD_MUTEX(un)));
16270 16270
16271 16271 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: "
16272 16272 "entry: buf:0x%p\n", bp);
16273 16273
16274 16274 /*
16275 16275 * If we are syncing or dumping, then fail the command to avoid a
16276 16276 * recursive callback into scsi_transport(). Also fail the command
16277 16277 * if we are suspended (legacy behavior).
16278 16278 */
16279 16279 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) ||
16280 16280 (un->un_state == SD_STATE_DUMPING)) {
16281 16281 sd_return_failed_command(un, bp, EIO);
16282 16282 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16283 16283 "sd_send_request_sense_command: syncing/dumping, exit\n");
16284 16284 return;
16285 16285 }
16286 16286
16287 16287 /*
16288 16288 * Retry the failed command and don't issue the request sense if:
16289 16289 * 1) the sense buf is busy
16290 16290 * 2) we have 1 or more outstanding commands on the target
16291 16291 * (the sense data will be cleared or invalidated any way)
16292 16292 *
16293 16293 * Note: There could be an issue with not checking a retry limit here,
16294 16294 * the problem is determining which retry limit to check.
16295 16295 */
16296 16296 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) {
16297 16297 /* Don't retry if the command is flagged as non-retryable */
16298 16298 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) {
16299 16299 sd_retry_command(un, bp, SD_RETRIES_NOCHECK,
16300 16300 NULL, NULL, 0, un->un_busy_timeout,
16301 16301 kstat_waitq_enter);
16302 16302 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16303 16303 "sd_send_request_sense_command: "
16304 16304 "at full throttle, retrying exit\n");
16305 16305 } else {
16306 16306 sd_return_failed_command(un, bp, EIO);
16307 16307 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16308 16308 "sd_send_request_sense_command: "
16309 16309 "at full throttle, non-retryable exit\n");
16310 16310 }
16311 16311 return;
16312 16312 }
16313 16313
16314 16314 sd_mark_rqs_busy(un, bp);
16315 16315 sd_start_cmds(un, un->un_rqs_bp);
16316 16316
16317 16317 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16318 16318 "sd_send_request_sense_command: exit\n");
16319 16319 }
16320 16320
16321 16321
16322 16322 /*
16323 16323 * Function: sd_mark_rqs_busy
16324 16324 *
16325 16325 * Description: Indicate that the request sense bp for this instance is
16326 16326 * in use.
16327 16327 *
16328 16328 * Context: May be called under interrupt context
16329 16329 */
16330 16330
16331 16331 static void
16332 16332 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp)
16333 16333 {
16334 16334 struct sd_xbuf *sense_xp;
16335 16335
16336 16336 ASSERT(un != NULL);
16337 16337 ASSERT(bp != NULL);
16338 16338 ASSERT(mutex_owned(SD_MUTEX(un)));
16339 16339 ASSERT(un->un_sense_isbusy == 0);
16340 16340
16341 16341 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: "
16342 16342 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un);
16343 16343
16344 16344 sense_xp = SD_GET_XBUF(un->un_rqs_bp);
16345 16345 ASSERT(sense_xp != NULL);
16346 16346
16347 16347 SD_INFO(SD_LOG_IO, un,
16348 16348 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp);
16349 16349
16350 16350 ASSERT(sense_xp->xb_pktp != NULL);
16351 16351 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD))
16352 16352 == (FLAG_SENSING | FLAG_HEAD));
16353 16353
16354 16354 un->un_sense_isbusy = 1;
16355 16355 un->un_rqs_bp->b_resid = 0;
16356 16356 sense_xp->xb_pktp->pkt_resid = 0;
16357 16357 sense_xp->xb_pktp->pkt_reason = 0;
16358 16358
16359 16359 /* So we can get back the bp at interrupt time! */
16360 16360 sense_xp->xb_sense_bp = bp;
16361 16361
16362 16362 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH);
16363 16363
16364 16364 /*
16365 16365 * Mark this buf as awaiting sense data. (This is already set in
16366 16366 * the pkt_flags for the RQS packet.)
16367 16367 */
16368 16368 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING;
16369 16369
16370 16370 /* Request sense down same path */
16371 16371 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) &&
16372 16372 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance)
16373 16373 sense_xp->xb_pktp->pkt_path_instance =
16374 16374 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance;
16375 16375
16376 16376 sense_xp->xb_retry_count = 0;
16377 16377 sense_xp->xb_victim_retry_count = 0;
16378 16378 sense_xp->xb_ua_retry_count = 0;
16379 16379 sense_xp->xb_nr_retry_count = 0;
16380 16380 sense_xp->xb_dma_resid = 0;
16381 16381
16382 16382 /* Clean up the fields for auto-request sense */
16383 16383 sense_xp->xb_sense_status = 0;
16384 16384 sense_xp->xb_sense_state = 0;
16385 16385 sense_xp->xb_sense_resid = 0;
16386 16386 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data));
16387 16387
16388 16388 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n");
16389 16389 }
16390 16390
16391 16391
16392 16392 /*
16393 16393 * Function: sd_mark_rqs_idle
16394 16394 *
16395 16395 * Description: SD_MUTEX must be held continuously through this routine
16396 16396 * to prevent reuse of the rqs struct before the caller can
16397 16397 * complete it's processing.
16398 16398 *
16399 16399 * Return Code: Pointer to the RQS buf
16400 16400 *
16401 16401 * Context: May be called under interrupt context
16402 16402 */
16403 16403
16404 16404 static struct buf *
16405 16405 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp)
16406 16406 {
16407 16407 struct buf *bp;
16408 16408 ASSERT(un != NULL);
16409 16409 ASSERT(sense_xp != NULL);
16410 16410 ASSERT(mutex_owned(SD_MUTEX(un)));
16411 16411 ASSERT(un->un_sense_isbusy != 0);
16412 16412
16413 16413 un->un_sense_isbusy = 0;
16414 16414 bp = sense_xp->xb_sense_bp;
16415 16415 sense_xp->xb_sense_bp = NULL;
16416 16416
16417 16417 /* This pkt is no longer interested in getting sense data */
16418 16418 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING;
16419 16419
16420 16420 return (bp);
16421 16421 }
16422 16422
16423 16423
16424 16424
16425 16425 /*
16426 16426 * Function: sd_alloc_rqs
16427 16427 *
16428 16428 * Description: Set up the unit to receive auto request sense data
16429 16429 *
16430 16430 * Return Code: DDI_SUCCESS or DDI_FAILURE
16431 16431 *
16432 16432 * Context: Called under attach(9E) context
16433 16433 */
16434 16434
16435 16435 static int
16436 16436 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un)
16437 16437 {
16438 16438 struct sd_xbuf *xp;
16439 16439
16440 16440 ASSERT(un != NULL);
16441 16441 ASSERT(!mutex_owned(SD_MUTEX(un)));
16442 16442 ASSERT(un->un_rqs_bp == NULL);
16443 16443 ASSERT(un->un_rqs_pktp == NULL);
16444 16444
16445 16445 /*
16446 16446 * First allocate the required buf and scsi_pkt structs, then set up
16447 16447 * the CDB in the scsi_pkt for a REQUEST SENSE command.
16448 16448 */
16449 16449 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL,
16450 16450 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL);
16451 16451 if (un->un_rqs_bp == NULL) {
16452 16452 return (DDI_FAILURE);
16453 16453 }
16454 16454
16455 16455 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp,
16456 16456 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL);
16457 16457
16458 16458 if (un->un_rqs_pktp == NULL) {
16459 16459 sd_free_rqs(un);
16460 16460 return (DDI_FAILURE);
16461 16461 }
16462 16462
16463 16463 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */
16464 16464 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp,
16465 16465 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0);
16466 16466
16467 16467 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp);
16468 16468
16469 16469 /* Set up the other needed members in the ARQ scsi_pkt. */
16470 16470 un->un_rqs_pktp->pkt_comp = sdintr;
16471 16471 un->un_rqs_pktp->pkt_time = sd_io_time;
16472 16472 un->un_rqs_pktp->pkt_flags |=
16473 16473 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */
16474 16474
16475 16475 /*
16476 16476 * Allocate & init the sd_xbuf struct for the RQS command. Do not
16477 16477 * provide any intpkt, destroypkt routines as we take care of
16478 16478 * scsi_pkt allocation/freeing here and in sd_free_rqs().
16479 16479 */
16480 16480 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP);
16481 16481 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL);
16482 16482 xp->xb_pktp = un->un_rqs_pktp;
16483 16483 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16484 16484 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n",
16485 16485 un, xp, un->un_rqs_pktp, un->un_rqs_bp);
16486 16486
16487 16487 /*
16488 16488 * Save the pointer to the request sense private bp so it can
16489 16489 * be retrieved in sdintr.
16490 16490 */
16491 16491 un->un_rqs_pktp->pkt_private = un->un_rqs_bp;
16492 16492 ASSERT(un->un_rqs_bp->b_private == xp);
16493 16493
16494 16494 /*
16495 16495 * See if the HBA supports auto-request sense for the specified
16496 16496 * target/lun. If it does, then try to enable it (if not already
16497 16497 * enabled).
16498 16498 *
16499 16499 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return
16500 16500 * failure, while for other HBAs (pln) scsi_ifsetcap will always
16501 16501 * return success. However, in both of these cases ARQ is always
16502 16502 * enabled and scsi_ifgetcap will always return true. The best approach
16503 16503 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap().
16504 16504 *
16505 16505 * The 3rd case is the HBA (adp) always return enabled on
16506 16506 * scsi_ifgetgetcap even when it's not enable, the best approach
16507 16507 * is issue a scsi_ifsetcap then a scsi_ifgetcap
16508 16508 * Note: this case is to circumvent the Adaptec bug. (x86 only)
16509 16509 */
16510 16510
16511 16511 if (un->un_f_is_fibre == TRUE) {
16512 16512 un->un_f_arq_enabled = TRUE;
16513 16513 } else {
16514 16514 #if defined(__i386) || defined(__amd64)
16515 16515 /*
16516 16516 * Circumvent the Adaptec bug, remove this code when
16517 16517 * the bug is fixed
16518 16518 */
16519 16519 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1);
16520 16520 #endif
16521 16521 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) {
16522 16522 case 0:
16523 16523 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16524 16524 "sd_alloc_rqs: HBA supports ARQ\n");
16525 16525 /*
16526 16526 * ARQ is supported by this HBA but currently is not
16527 16527 * enabled. Attempt to enable it and if successful then
16528 16528 * mark this instance as ARQ enabled.
16529 16529 */
16530 16530 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1)
16531 16531 == 1) {
16532 16532 /* Successfully enabled ARQ in the HBA */
16533 16533 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16534 16534 "sd_alloc_rqs: ARQ enabled\n");
16535 16535 un->un_f_arq_enabled = TRUE;
16536 16536 } else {
16537 16537 /* Could not enable ARQ in the HBA */
16538 16538 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16539 16539 "sd_alloc_rqs: failed ARQ enable\n");
16540 16540 un->un_f_arq_enabled = FALSE;
16541 16541 }
16542 16542 break;
16543 16543 case 1:
16544 16544 /*
16545 16545 * ARQ is supported by this HBA and is already enabled.
16546 16546 * Just mark ARQ as enabled for this instance.
16547 16547 */
16548 16548 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16549 16549 "sd_alloc_rqs: ARQ already enabled\n");
16550 16550 un->un_f_arq_enabled = TRUE;
16551 16551 break;
16552 16552 default:
16553 16553 /*
16554 16554 * ARQ is not supported by this HBA; disable it for this
16555 16555 * instance.
16556 16556 */
16557 16557 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16558 16558 "sd_alloc_rqs: HBA does not support ARQ\n");
16559 16559 un->un_f_arq_enabled = FALSE;
16560 16560 break;
16561 16561 }
16562 16562 }
16563 16563
16564 16564 return (DDI_SUCCESS);
16565 16565 }
16566 16566
16567 16567
16568 16568 /*
16569 16569 * Function: sd_free_rqs
16570 16570 *
16571 16571 * Description: Cleanup for the pre-instance RQS command.
16572 16572 *
16573 16573 * Context: Kernel thread context
16574 16574 */
16575 16575
16576 16576 static void
16577 16577 sd_free_rqs(struct sd_lun *un)
16578 16578 {
16579 16579 ASSERT(un != NULL);
16580 16580
16581 16581 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n");
16582 16582
16583 16583 /*
16584 16584 * If consistent memory is bound to a scsi_pkt, the pkt
16585 16585 * has to be destroyed *before* freeing the consistent memory.
16586 16586 * Don't change the sequence of this operations.
16587 16587 * scsi_destroy_pkt() might access memory, which isn't allowed,
16588 16588 * after it was freed in scsi_free_consistent_buf().
16589 16589 */
16590 16590 if (un->un_rqs_pktp != NULL) {
16591 16591 scsi_destroy_pkt(un->un_rqs_pktp);
16592 16592 un->un_rqs_pktp = NULL;
16593 16593 }
16594 16594
16595 16595 if (un->un_rqs_bp != NULL) {
16596 16596 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp);
16597 16597 if (xp != NULL) {
16598 16598 kmem_free(xp, sizeof (struct sd_xbuf));
16599 16599 }
16600 16600 scsi_free_consistent_buf(un->un_rqs_bp);
16601 16601 un->un_rqs_bp = NULL;
16602 16602 }
16603 16603 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n");
16604 16604 }
16605 16605
16606 16606
16607 16607
16608 16608 /*
16609 16609 * Function: sd_reduce_throttle
16610 16610 *
16611 16611 * Description: Reduces the maximum # of outstanding commands on a
16612 16612 * target to the current number of outstanding commands.
16613 16613 * Queues a tiemout(9F) callback to restore the limit
16614 16614 * after a specified interval has elapsed.
16615 16615 * Typically used when we get a TRAN_BUSY return code
16616 16616 * back from scsi_transport().
16617 16617 *
16618 16618 * Arguments: un - ptr to the sd_lun softstate struct
16619 16619 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL
16620 16620 *
16621 16621 * Context: May be called from interrupt context
16622 16622 */
16623 16623
16624 16624 static void
16625 16625 sd_reduce_throttle(struct sd_lun *un, int throttle_type)
16626 16626 {
16627 16627 ASSERT(un != NULL);
16628 16628 ASSERT(mutex_owned(SD_MUTEX(un)));
16629 16629 ASSERT(un->un_ncmds_in_transport >= 0);
16630 16630
16631 16631 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: "
16632 16632 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n",
16633 16633 un, un->un_throttle, un->un_ncmds_in_transport);
16634 16634
16635 16635 if (un->un_throttle > 1) {
16636 16636 if (un->un_f_use_adaptive_throttle == TRUE) {
16637 16637 switch (throttle_type) {
16638 16638 case SD_THROTTLE_TRAN_BUSY:
16639 16639 if (un->un_busy_throttle == 0) {
16640 16640 un->un_busy_throttle = un->un_throttle;
16641 16641 }
16642 16642 break;
16643 16643 case SD_THROTTLE_QFULL:
16644 16644 un->un_busy_throttle = 0;
16645 16645 break;
16646 16646 default:
16647 16647 ASSERT(FALSE);
16648 16648 }
16649 16649
16650 16650 if (un->un_ncmds_in_transport > 0) {
16651 16651 un->un_throttle = un->un_ncmds_in_transport;
16652 16652 }
16653 16653
16654 16654 } else {
16655 16655 if (un->un_ncmds_in_transport == 0) {
16656 16656 un->un_throttle = 1;
16657 16657 } else {
16658 16658 un->un_throttle = un->un_ncmds_in_transport;
16659 16659 }
16660 16660 }
16661 16661 }
16662 16662
16663 16663 /* Reschedule the timeout if none is currently active */
16664 16664 if (un->un_reset_throttle_timeid == NULL) {
16665 16665 un->un_reset_throttle_timeid = timeout(sd_restore_throttle,
16666 16666 un, SD_THROTTLE_RESET_INTERVAL);
16667 16667 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16668 16668 "sd_reduce_throttle: timeout scheduled!\n");
16669 16669 }
16670 16670
16671 16671 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: "
16672 16672 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle);
16673 16673 }
16674 16674
16675 16675
16676 16676
16677 16677 /*
16678 16678 * Function: sd_restore_throttle
16679 16679 *
16680 16680 * Description: Callback function for timeout(9F). Resets the current
16681 16681 * value of un->un_throttle to its default.
16682 16682 *
16683 16683 * Arguments: arg - pointer to associated softstate for the device.
16684 16684 *
16685 16685 * Context: May be called from interrupt context
16686 16686 */
16687 16687
16688 16688 static void
16689 16689 sd_restore_throttle(void *arg)
16690 16690 {
16691 16691 struct sd_lun *un = arg;
16692 16692
16693 16693 ASSERT(un != NULL);
16694 16694 ASSERT(!mutex_owned(SD_MUTEX(un)));
16695 16695
16696 16696 mutex_enter(SD_MUTEX(un));
16697 16697
16698 16698 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: "
16699 16699 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle);
16700 16700
16701 16701 un->un_reset_throttle_timeid = NULL;
16702 16702
16703 16703 if (un->un_f_use_adaptive_throttle == TRUE) {
16704 16704 /*
16705 16705 * If un_busy_throttle is nonzero, then it contains the
16706 16706 * value that un_throttle was when we got a TRAN_BUSY back
16707 16707 * from scsi_transport(). We want to revert back to this
16708 16708 * value.
16709 16709 *
16710 16710 * In the QFULL case, the throttle limit will incrementally
16711 16711 * increase until it reaches max throttle.
16712 16712 */
16713 16713 if (un->un_busy_throttle > 0) {
16714 16714 un->un_throttle = un->un_busy_throttle;
16715 16715 un->un_busy_throttle = 0;
16716 16716 } else {
16717 16717 /*
16718 16718 * increase throttle by 10% open gate slowly, schedule
16719 16719 * another restore if saved throttle has not been
16720 16720 * reached
16721 16721 */
16722 16722 short throttle;
16723 16723 if (sd_qfull_throttle_enable) {
16724 16724 throttle = un->un_throttle +
16725 16725 max((un->un_throttle / 10), 1);
16726 16726 un->un_throttle =
16727 16727 (throttle < un->un_saved_throttle) ?
16728 16728 throttle : un->un_saved_throttle;
16729 16729 if (un->un_throttle < un->un_saved_throttle) {
16730 16730 un->un_reset_throttle_timeid =
16731 16731 timeout(sd_restore_throttle,
16732 16732 un,
16733 16733 SD_QFULL_THROTTLE_RESET_INTERVAL);
16734 16734 }
16735 16735 }
16736 16736 }
16737 16737
16738 16738 /*
16739 16739 * If un_throttle has fallen below the low-water mark, we
16740 16740 * restore the maximum value here (and allow it to ratchet
16741 16741 * down again if necessary).
16742 16742 */
16743 16743 if (un->un_throttle < un->un_min_throttle) {
16744 16744 un->un_throttle = un->un_saved_throttle;
16745 16745 }
16746 16746 } else {
16747 16747 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: "
16748 16748 "restoring limit from 0x%x to 0x%x\n",
16749 16749 un->un_throttle, un->un_saved_throttle);
16750 16750 un->un_throttle = un->un_saved_throttle;
16751 16751 }
16752 16752
16753 16753 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
16754 16754 "sd_restore_throttle: calling sd_start_cmds!\n");
16755 16755
16756 16756 sd_start_cmds(un, NULL);
16757 16757
16758 16758 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
16759 16759 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n",
16760 16760 un, un->un_throttle);
16761 16761
16762 16762 mutex_exit(SD_MUTEX(un));
16763 16763
16764 16764 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n");
16765 16765 }
16766 16766
16767 16767 /*
16768 16768 * Function: sdrunout
16769 16769 *
16770 16770 * Description: Callback routine for scsi_init_pkt when a resource allocation
16771 16771 * fails.
16772 16772 *
16773 16773 * Arguments: arg - a pointer to the sd_lun unit struct for the particular
16774 16774 * soft state instance.
16775 16775 *
16776 16776 * Return Code: The scsi_init_pkt routine allows for the callback function to
16777 16777 * return a 0 indicating the callback should be rescheduled or a 1
16778 16778 * indicating not to reschedule. This routine always returns 1
16779 16779 * because the driver always provides a callback function to
16780 16780 * scsi_init_pkt. This results in a callback always being scheduled
16781 16781 * (via the scsi_init_pkt callback implementation) if a resource
16782 16782 * failure occurs.
16783 16783 *
16784 16784 * Context: This callback function may not block or call routines that block
16785 16785 *
16786 16786 * Note: Using the scsi_init_pkt callback facility can result in an I/O
16787 16787 * request persisting at the head of the list which cannot be
16788 16788 * satisfied even after multiple retries. In the future the driver
16789 16789 * may implement some time of maximum runout count before failing
16790 16790 * an I/O.
16791 16791 */
16792 16792
16793 16793 static int
16794 16794 sdrunout(caddr_t arg)
16795 16795 {
16796 16796 struct sd_lun *un = (struct sd_lun *)arg;
16797 16797
16798 16798 ASSERT(un != NULL);
16799 16799 ASSERT(!mutex_owned(SD_MUTEX(un)));
16800 16800
16801 16801 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n");
16802 16802
16803 16803 mutex_enter(SD_MUTEX(un));
16804 16804 sd_start_cmds(un, NULL);
16805 16805 mutex_exit(SD_MUTEX(un));
16806 16806 /*
16807 16807 * This callback routine always returns 1 (i.e. do not reschedule)
16808 16808 * because we always specify sdrunout as the callback handler for
16809 16809 * scsi_init_pkt inside the call to sd_start_cmds.
16810 16810 */
16811 16811 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n");
16812 16812 return (1);
16813 16813 }
16814 16814
16815 16815
16816 16816 /*
16817 16817 * Function: sdintr
16818 16818 *
16819 16819 * Description: Completion callback routine for scsi_pkt(9S) structs
16820 16820 * sent to the HBA driver via scsi_transport(9F).
16821 16821 *
16822 16822 * Context: Interrupt context
16823 16823 */
16824 16824
16825 16825 static void
16826 16826 sdintr(struct scsi_pkt *pktp)
16827 16827 {
16828 16828 struct buf *bp;
16829 16829 struct sd_xbuf *xp;
16830 16830 struct sd_lun *un;
16831 16831 size_t actual_len;
16832 16832 sd_ssc_t *sscp;
16833 16833
16834 16834 ASSERT(pktp != NULL);
16835 16835 bp = (struct buf *)pktp->pkt_private;
16836 16836 ASSERT(bp != NULL);
16837 16837 xp = SD_GET_XBUF(bp);
16838 16838 ASSERT(xp != NULL);
16839 16839 ASSERT(xp->xb_pktp != NULL);
16840 16840 un = SD_GET_UN(bp);
16841 16841 ASSERT(un != NULL);
16842 16842 ASSERT(!mutex_owned(SD_MUTEX(un)));
16843 16843
16844 16844 #ifdef SD_FAULT_INJECTION
16845 16845
16846 16846 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n");
16847 16847 /* SD FaultInjection */
16848 16848 sd_faultinjection(pktp);
16849 16849
16850 16850 #endif /* SD_FAULT_INJECTION */
16851 16851
16852 16852 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p,"
16853 16853 " xp:0x%p, un:0x%p\n", bp, xp, un);
16854 16854
16855 16855 mutex_enter(SD_MUTEX(un));
16856 16856
16857 16857 ASSERT(un->un_fm_private != NULL);
16858 16858 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc;
16859 16859 ASSERT(sscp != NULL);
16860 16860
16861 16861 /* Reduce the count of the #commands currently in transport */
16862 16862 un->un_ncmds_in_transport--;
16863 16863 ASSERT(un->un_ncmds_in_transport >= 0);
16864 16864
16865 16865 /* Increment counter to indicate that the callback routine is active */
16866 16866 un->un_in_callback++;
16867 16867
16868 16868 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
16869 16869
16870 16870 #ifdef SDDEBUG
16871 16871 if (bp == un->un_retry_bp) {
16872 16872 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: "
16873 16873 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n",
16874 16874 un, un->un_retry_bp, un->un_ncmds_in_transport);
16875 16875 }
16876 16876 #endif
16877 16877
16878 16878 /*
16879 16879 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media
16880 16880 * state if needed.
16881 16881 */
16882 16882 if (pktp->pkt_reason == CMD_DEV_GONE) {
16883 16883 /* Prevent multiple console messages for the same failure. */
16884 16884 if (un->un_last_pkt_reason != CMD_DEV_GONE) {
16885 16885 un->un_last_pkt_reason = CMD_DEV_GONE;
16886 16886 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
16887 16887 "Command failed to complete...Device is gone\n");
16888 16888 }
16889 16889 if (un->un_mediastate != DKIO_DEV_GONE) {
16890 16890 un->un_mediastate = DKIO_DEV_GONE;
16891 16891 cv_broadcast(&un->un_state_cv);
16892 16892 }
16893 16893 /*
16894 16894 * If the command happens to be the REQUEST SENSE command,
16895 16895 * free up the rqs buf and fail the original command.
16896 16896 */
16897 16897 if (bp == un->un_rqs_bp) {
16898 16898 bp = sd_mark_rqs_idle(un, xp);
16899 16899 }
16900 16900 sd_return_failed_command(un, bp, EIO);
16901 16901 goto exit;
16902 16902 }
16903 16903
16904 16904 if (pktp->pkt_state & STATE_XARQ_DONE) {
16905 16905 SD_TRACE(SD_LOG_COMMON, un,
16906 16906 "sdintr: extra sense data received. pkt=%p\n", pktp);
16907 16907 }
16908 16908
16909 16909 /*
16910 16910 * First see if the pkt has auto-request sense data with it....
16911 16911 * Look at the packet state first so we don't take a performance
16912 16912 * hit looking at the arq enabled flag unless absolutely necessary.
16913 16913 */
16914 16914 if ((pktp->pkt_state & STATE_ARQ_DONE) &&
16915 16915 (un->un_f_arq_enabled == TRUE)) {
16916 16916 /*
16917 16917 * The HBA did an auto request sense for this command so check
16918 16918 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal
16919 16919 * driver command that should not be retried.
16920 16920 */
16921 16921 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) {
16922 16922 /*
16923 16923 * Save the relevant sense info into the xp for the
16924 16924 * original cmd.
16925 16925 */
16926 16926 struct scsi_arq_status *asp;
16927 16927 asp = (struct scsi_arq_status *)(pktp->pkt_scbp);
16928 16928 xp->xb_sense_status =
16929 16929 *((uchar_t *)(&(asp->sts_rqpkt_status)));
16930 16930 xp->xb_sense_state = asp->sts_rqpkt_state;
16931 16931 xp->xb_sense_resid = asp->sts_rqpkt_resid;
16932 16932 if (pktp->pkt_state & STATE_XARQ_DONE) {
16933 16933 actual_len = MAX_SENSE_LENGTH -
16934 16934 xp->xb_sense_resid;
16935 16935 bcopy(&asp->sts_sensedata, xp->xb_sense_data,
16936 16936 MAX_SENSE_LENGTH);
16937 16937 } else {
16938 16938 if (xp->xb_sense_resid > SENSE_LENGTH) {
16939 16939 actual_len = MAX_SENSE_LENGTH -
16940 16940 xp->xb_sense_resid;
16941 16941 } else {
16942 16942 actual_len = SENSE_LENGTH -
16943 16943 xp->xb_sense_resid;
16944 16944 }
16945 16945 if (xp->xb_pkt_flags & SD_XB_USCSICMD) {
16946 16946 if ((((struct uscsi_cmd *)
16947 16947 (xp->xb_pktinfo))->uscsi_rqlen) >
16948 16948 actual_len) {
16949 16949 xp->xb_sense_resid =
16950 16950 (((struct uscsi_cmd *)
16951 16951 (xp->xb_pktinfo))->
16952 16952 uscsi_rqlen) - actual_len;
16953 16953 } else {
16954 16954 xp->xb_sense_resid = 0;
16955 16955 }
16956 16956 }
16957 16957 bcopy(&asp->sts_sensedata, xp->xb_sense_data,
16958 16958 SENSE_LENGTH);
16959 16959 }
16960 16960
16961 16961 /* fail the command */
16962 16962 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16963 16963 "sdintr: arq done and FLAG_DIAGNOSE set\n");
16964 16964 sd_return_failed_command(un, bp, EIO);
16965 16965 goto exit;
16966 16966 }
16967 16967
16968 16968 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */
16969 16969 /*
16970 16970 * We want to either retry or fail this command, so free
16971 16971 * the DMA resources here. If we retry the command then
16972 16972 * the DMA resources will be reallocated in sd_start_cmds().
16973 16973 * Note that when PKT_DMA_PARTIAL is used, this reallocation
16974 16974 * causes the *entire* transfer to start over again from the
16975 16975 * beginning of the request, even for PARTIAL chunks that
16976 16976 * have already transferred successfully.
16977 16977 */
16978 16978 if ((un->un_f_is_fibre == TRUE) &&
16979 16979 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
16980 16980 ((pktp->pkt_flags & FLAG_SENSING) == 0)) {
16981 16981 scsi_dmafree(pktp);
16982 16982 xp->xb_pkt_flags |= SD_XB_DMA_FREED;
16983 16983 }
16984 16984 #endif
16985 16985
16986 16986 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16987 16987 "sdintr: arq done, sd_handle_auto_request_sense\n");
16988 16988
16989 16989 sd_handle_auto_request_sense(un, bp, xp, pktp);
16990 16990 goto exit;
16991 16991 }
16992 16992
16993 16993 /* Next see if this is the REQUEST SENSE pkt for the instance */
16994 16994 if (pktp->pkt_flags & FLAG_SENSING) {
16995 16995 /* This pktp is from the unit's REQUEST_SENSE command */
16996 16996 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16997 16997 "sdintr: sd_handle_request_sense\n");
16998 16998 sd_handle_request_sense(un, bp, xp, pktp);
16999 16999 goto exit;
17000 17000 }
17001 17001
17002 17002 /*
17003 17003 * Check to see if the command successfully completed as requested;
17004 17004 * this is the most common case (and also the hot performance path).
17005 17005 *
17006 17006 * Requirements for successful completion are:
17007 17007 * pkt_reason is CMD_CMPLT and packet status is status good.
17008 17008 * In addition:
17009 17009 * - A residual of zero indicates successful completion no matter what
17010 17010 * the command is.
17011 17011 * - If the residual is not zero and the command is not a read or
17012 17012 * write, then it's still defined as successful completion. In other
17013 17013 * words, if the command is a read or write the residual must be
17014 17014 * zero for successful completion.
17015 17015 * - If the residual is not zero and the command is a read or
17016 17016 * write, and it's a USCSICMD, then it's still defined as
17017 17017 * successful completion.
17018 17018 */
17019 17019 if ((pktp->pkt_reason == CMD_CMPLT) &&
17020 17020 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) {
17021 17021
17022 17022 /*
17023 17023 * Since this command is returned with a good status, we
17024 17024 * can reset the count for Sonoma failover.
17025 17025 */
17026 17026 un->un_sonoma_failure_count = 0;
17027 17027
17028 17028 /*
17029 17029 * Return all USCSI commands on good status
17030 17030 */
17031 17031 if (pktp->pkt_resid == 0) {
17032 17032 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17033 17033 "sdintr: returning command for resid == 0\n");
17034 17034 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) &&
17035 17035 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) {
17036 17036 SD_UPDATE_B_RESID(bp, pktp);
17037 17037 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17038 17038 "sdintr: returning command for resid != 0\n");
17039 17039 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) {
17040 17040 SD_UPDATE_B_RESID(bp, pktp);
17041 17041 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17042 17042 "sdintr: returning uscsi command\n");
17043 17043 } else {
17044 17044 goto not_successful;
17045 17045 }
17046 17046 sd_return_command(un, bp);
17047 17047
17048 17048 /*
17049 17049 * Decrement counter to indicate that the callback routine
17050 17050 * is done.
17051 17051 */
17052 17052 un->un_in_callback--;
17053 17053 ASSERT(un->un_in_callback >= 0);
17054 17054 mutex_exit(SD_MUTEX(un));
17055 17055
17056 17056 return;
17057 17057 }
17058 17058
17059 17059 not_successful:
17060 17060
17061 17061 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */
17062 17062 /*
17063 17063 * The following is based upon knowledge of the underlying transport
17064 17064 * and its use of DMA resources. This code should be removed when
17065 17065 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor
17066 17066 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf()
17067 17067 * and sd_start_cmds().
17068 17068 *
17069 17069 * Free any DMA resources associated with this command if there
17070 17070 * is a chance it could be retried or enqueued for later retry.
17071 17071 * If we keep the DMA binding then mpxio cannot reissue the
17072 17072 * command on another path whenever a path failure occurs.
17073 17073 *
17074 17074 * Note that when PKT_DMA_PARTIAL is used, free/reallocation
17075 17075 * causes the *entire* transfer to start over again from the
17076 17076 * beginning of the request, even for PARTIAL chunks that
17077 17077 * have already transferred successfully.
17078 17078 *
17079 17079 * This is only done for non-uscsi commands (and also skipped for the
17080 17080 * driver's internal RQS command). Also just do this for Fibre Channel
17081 17081 * devices as these are the only ones that support mpxio.
17082 17082 */
17083 17083 if ((un->un_f_is_fibre == TRUE) &&
17084 17084 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
17085 17085 ((pktp->pkt_flags & FLAG_SENSING) == 0)) {
17086 17086 scsi_dmafree(pktp);
17087 17087 xp->xb_pkt_flags |= SD_XB_DMA_FREED;
17088 17088 }
17089 17089 #endif
17090 17090
17091 17091 /*
17092 17092 * The command did not successfully complete as requested so check
17093 17093 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal
17094 17094 * driver command that should not be retried so just return. If
17095 17095 * FLAG_DIAGNOSE is not set the error will be processed below.
17096 17096 */
17097 17097 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) {
17098 17098 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17099 17099 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n");
17100 17100 /*
17101 17101 * Issue a request sense if a check condition caused the error
17102 17102 * (we handle the auto request sense case above), otherwise
17103 17103 * just fail the command.
17104 17104 */
17105 17105 if ((pktp->pkt_reason == CMD_CMPLT) &&
17106 17106 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) {
17107 17107 sd_send_request_sense_command(un, bp, pktp);
17108 17108 } else {
17109 17109 sd_return_failed_command(un, bp, EIO);
17110 17110 }
17111 17111 goto exit;
17112 17112 }
17113 17113
17114 17114 /*
17115 17115 * The command did not successfully complete as requested so process
17116 17116 * the error, retry, and/or attempt recovery.
17117 17117 */
17118 17118 switch (pktp->pkt_reason) {
17119 17119 case CMD_CMPLT:
17120 17120 switch (SD_GET_PKT_STATUS(pktp)) {
17121 17121 case STATUS_GOOD:
17122 17122 /*
17123 17123 * The command completed successfully with a non-zero
17124 17124 * residual
17125 17125 */
17126 17126 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17127 17127 "sdintr: STATUS_GOOD \n");
17128 17128 sd_pkt_status_good(un, bp, xp, pktp);
17129 17129 break;
17130 17130
17131 17131 case STATUS_CHECK:
17132 17132 case STATUS_TERMINATED:
17133 17133 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17134 17134 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n");
17135 17135 sd_pkt_status_check_condition(un, bp, xp, pktp);
17136 17136 break;
17137 17137
17138 17138 case STATUS_BUSY:
17139 17139 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17140 17140 "sdintr: STATUS_BUSY\n");
17141 17141 sd_pkt_status_busy(un, bp, xp, pktp);
17142 17142 break;
17143 17143
17144 17144 case STATUS_RESERVATION_CONFLICT:
17145 17145 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17146 17146 "sdintr: STATUS_RESERVATION_CONFLICT\n");
17147 17147 sd_pkt_status_reservation_conflict(un, bp, xp, pktp);
17148 17148 break;
17149 17149
17150 17150 case STATUS_QFULL:
17151 17151 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17152 17152 "sdintr: STATUS_QFULL\n");
17153 17153 sd_pkt_status_qfull(un, bp, xp, pktp);
17154 17154 break;
17155 17155
17156 17156 case STATUS_MET:
17157 17157 case STATUS_INTERMEDIATE:
17158 17158 case STATUS_SCSI2:
17159 17159 case STATUS_INTERMEDIATE_MET:
17160 17160 case STATUS_ACA_ACTIVE:
17161 17161 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17162 17162 "Unexpected SCSI status received: 0x%x\n",
17163 17163 SD_GET_PKT_STATUS(pktp));
17164 17164 /*
17165 17165 * Mark the ssc_flags when detected invalid status
17166 17166 * code for non-USCSI command.
17167 17167 */
17168 17168 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17169 17169 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS,
17170 17170 0, "stat-code");
17171 17171 }
17172 17172 sd_return_failed_command(un, bp, EIO);
17173 17173 break;
17174 17174
17175 17175 default:
17176 17176 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17177 17177 "Invalid SCSI status received: 0x%x\n",
17178 17178 SD_GET_PKT_STATUS(pktp));
17179 17179 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17180 17180 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS,
17181 17181 0, "stat-code");
17182 17182 }
17183 17183 sd_return_failed_command(un, bp, EIO);
17184 17184 break;
17185 17185
17186 17186 }
17187 17187 break;
17188 17188
17189 17189 case CMD_INCOMPLETE:
17190 17190 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17191 17191 "sdintr: CMD_INCOMPLETE\n");
17192 17192 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp);
17193 17193 break;
17194 17194 case CMD_TRAN_ERR:
17195 17195 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17196 17196 "sdintr: CMD_TRAN_ERR\n");
17197 17197 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp);
17198 17198 break;
17199 17199 case CMD_RESET:
17200 17200 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17201 17201 "sdintr: CMD_RESET \n");
17202 17202 sd_pkt_reason_cmd_reset(un, bp, xp, pktp);
17203 17203 break;
17204 17204 case CMD_ABORTED:
17205 17205 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17206 17206 "sdintr: CMD_ABORTED \n");
17207 17207 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp);
17208 17208 break;
17209 17209 case CMD_TIMEOUT:
17210 17210 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17211 17211 "sdintr: CMD_TIMEOUT\n");
17212 17212 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp);
17213 17213 break;
17214 17214 case CMD_UNX_BUS_FREE:
17215 17215 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17216 17216 "sdintr: CMD_UNX_BUS_FREE \n");
17217 17217 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp);
17218 17218 break;
17219 17219 case CMD_TAG_REJECT:
17220 17220 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17221 17221 "sdintr: CMD_TAG_REJECT\n");
17222 17222 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp);
17223 17223 break;
17224 17224 default:
17225 17225 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17226 17226 "sdintr: default\n");
17227 17227 /*
17228 17228 * Mark the ssc_flags for detecting invliad pkt_reason.
17229 17229 */
17230 17230 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17231 17231 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON,
17232 17232 0, "pkt-reason");
17233 17233 }
17234 17234 sd_pkt_reason_default(un, bp, xp, pktp);
17235 17235 break;
17236 17236 }
17237 17237
17238 17238 exit:
17239 17239 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n");
17240 17240
17241 17241 /* Decrement counter to indicate that the callback routine is done. */
17242 17242 un->un_in_callback--;
17243 17243 ASSERT(un->un_in_callback >= 0);
17244 17244
17245 17245 /*
17246 17246 * At this point, the pkt has been dispatched, ie, it is either
17247 17247 * being re-tried or has been returned to its caller and should
17248 17248 * not be referenced.
17249 17249 */
17250 17250
17251 17251 mutex_exit(SD_MUTEX(un));
17252 17252 }
17253 17253
17254 17254
17255 17255 /*
17256 17256 * Function: sd_print_incomplete_msg
17257 17257 *
17258 17258 * Description: Prints the error message for a CMD_INCOMPLETE error.
17259 17259 *
17260 17260 * Arguments: un - ptr to associated softstate for the device.
17261 17261 * bp - ptr to the buf(9S) for the command.
17262 17262 * arg - message string ptr
17263 17263 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED,
17264 17264 * or SD_NO_RETRY_ISSUED.
17265 17265 *
17266 17266 * Context: May be called under interrupt context
17267 17267 */
17268 17268
17269 17269 static void
17270 17270 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code)
17271 17271 {
17272 17272 struct scsi_pkt *pktp;
17273 17273 char *msgp;
17274 17274 char *cmdp = arg;
17275 17275
17276 17276 ASSERT(un != NULL);
17277 17277 ASSERT(mutex_owned(SD_MUTEX(un)));
17278 17278 ASSERT(bp != NULL);
17279 17279 ASSERT(arg != NULL);
17280 17280 pktp = SD_GET_PKTP(bp);
17281 17281 ASSERT(pktp != NULL);
17282 17282
17283 17283 switch (code) {
17284 17284 case SD_DELAYED_RETRY_ISSUED:
17285 17285 case SD_IMMEDIATE_RETRY_ISSUED:
17286 17286 msgp = "retrying";
17287 17287 break;
17288 17288 case SD_NO_RETRY_ISSUED:
17289 17289 default:
17290 17290 msgp = "giving up";
17291 17291 break;
17292 17292 }
17293 17293
17294 17294 if ((pktp->pkt_flags & FLAG_SILENT) == 0) {
17295 17295 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17296 17296 "incomplete %s- %s\n", cmdp, msgp);
17297 17297 }
17298 17298 }
17299 17299
17300 17300
17301 17301
17302 17302 /*
17303 17303 * Function: sd_pkt_status_good
17304 17304 *
17305 17305 * Description: Processing for a STATUS_GOOD code in pkt_status.
17306 17306 *
17307 17307 * Context: May be called under interrupt context
17308 17308 */
17309 17309
17310 17310 static void
17311 17311 sd_pkt_status_good(struct sd_lun *un, struct buf *bp,
17312 17312 struct sd_xbuf *xp, struct scsi_pkt *pktp)
17313 17313 {
17314 17314 char *cmdp;
17315 17315
17316 17316 ASSERT(un != NULL);
17317 17317 ASSERT(mutex_owned(SD_MUTEX(un)));
17318 17318 ASSERT(bp != NULL);
17319 17319 ASSERT(xp != NULL);
17320 17320 ASSERT(pktp != NULL);
17321 17321 ASSERT(pktp->pkt_reason == CMD_CMPLT);
17322 17322 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD);
17323 17323 ASSERT(pktp->pkt_resid != 0);
17324 17324
17325 17325 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n");
17326 17326
17327 17327 SD_UPDATE_ERRSTATS(un, sd_harderrs);
17328 17328 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) {
17329 17329 case SCMD_READ:
17330 17330 cmdp = "read";
17331 17331 break;
17332 17332 case SCMD_WRITE:
17333 17333 cmdp = "write";
17334 17334 break;
17335 17335 default:
17336 17336 SD_UPDATE_B_RESID(bp, pktp);
17337 17337 sd_return_command(un, bp);
17338 17338 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n");
17339 17339 return;
17340 17340 }
17341 17341
17342 17342 /*
17343 17343 * See if we can retry the read/write, preferrably immediately.
17344 17344 * If retries are exhaused, then sd_retry_command() will update
17345 17345 * the b_resid count.
17346 17346 */
17347 17347 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg,
17348 17348 cmdp, EIO, (clock_t)0, NULL);
17349 17349
17350 17350 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n");
17351 17351 }
17352 17352
17353 17353
17354 17354
17355 17355
17356 17356
17357 17357 /*
17358 17358 * Function: sd_handle_request_sense
17359 17359 *
17360 17360 * Description: Processing for non-auto Request Sense command.
17361 17361 *
17362 17362 * Arguments: un - ptr to associated softstate
17363 17363 * sense_bp - ptr to buf(9S) for the RQS command
17364 17364 * sense_xp - ptr to the sd_xbuf for the RQS command
17365 17365 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command
17366 17366 *
17367 17367 * Context: May be called under interrupt context
17368 17368 */
17369 17369
17370 17370 static void
17371 17371 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp,
17372 17372 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp)
17373 17373 {
17374 17374 struct buf *cmd_bp; /* buf for the original command */
17375 17375 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */
17376 17376 struct scsi_pkt *cmd_pktp; /* pkt for the original command */
17377 17377 size_t actual_len; /* actual sense data length */
17378 17378
17379 17379 ASSERT(un != NULL);
17380 17380 ASSERT(mutex_owned(SD_MUTEX(un)));
17381 17381 ASSERT(sense_bp != NULL);
17382 17382 ASSERT(sense_xp != NULL);
17383 17383 ASSERT(sense_pktp != NULL);
17384 17384
17385 17385 /*
17386 17386 * Note the sense_bp, sense_xp, and sense_pktp here are for the
17387 17387 * RQS command and not the original command.
17388 17388 */
17389 17389 ASSERT(sense_pktp == un->un_rqs_pktp);
17390 17390 ASSERT(sense_bp == un->un_rqs_bp);
17391 17391 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) ==
17392 17392 (FLAG_SENSING | FLAG_HEAD));
17393 17393 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) &
17394 17394 FLAG_SENSING) == FLAG_SENSING);
17395 17395
17396 17396 /* These are the bp, xp, and pktp for the original command */
17397 17397 cmd_bp = sense_xp->xb_sense_bp;
17398 17398 cmd_xp = SD_GET_XBUF(cmd_bp);
17399 17399 cmd_pktp = SD_GET_PKTP(cmd_bp);
17400 17400
17401 17401 if (sense_pktp->pkt_reason != CMD_CMPLT) {
17402 17402 /*
17403 17403 * The REQUEST SENSE command failed. Release the REQUEST
17404 17404 * SENSE command for re-use, get back the bp for the original
17405 17405 * command, and attempt to re-try the original command if
17406 17406 * FLAG_DIAGNOSE is not set in the original packet.
17407 17407 */
17408 17408 SD_UPDATE_ERRSTATS(un, sd_harderrs);
17409 17409 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) {
17410 17410 cmd_bp = sd_mark_rqs_idle(un, sense_xp);
17411 17411 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD,
17412 17412 NULL, NULL, EIO, (clock_t)0, NULL);
17413 17413 return;
17414 17414 }
17415 17415 }
17416 17416
17417 17417 /*
17418 17418 * Save the relevant sense info into the xp for the original cmd.
17419 17419 *
17420 17420 * Note: if the request sense failed the state info will be zero
17421 17421 * as set in sd_mark_rqs_busy()
17422 17422 */
17423 17423 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp);
17424 17424 cmd_xp->xb_sense_state = sense_pktp->pkt_state;
17425 17425 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid;
17426 17426 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) &&
17427 17427 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen >
17428 17428 SENSE_LENGTH)) {
17429 17429 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data,
17430 17430 MAX_SENSE_LENGTH);
17431 17431 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid;
17432 17432 } else {
17433 17433 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data,
17434 17434 SENSE_LENGTH);
17435 17435 if (actual_len < SENSE_LENGTH) {
17436 17436 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len;
17437 17437 } else {
17438 17438 cmd_xp->xb_sense_resid = 0;
17439 17439 }
17440 17440 }
17441 17441
17442 17442 /*
17443 17443 * Free up the RQS command....
17444 17444 * NOTE:
17445 17445 * Must do this BEFORE calling sd_validate_sense_data!
17446 17446 * sd_validate_sense_data may return the original command in
17447 17447 * which case the pkt will be freed and the flags can no
17448 17448 * longer be touched.
17449 17449 * SD_MUTEX is held through this process until the command
17450 17450 * is dispatched based upon the sense data, so there are
17451 17451 * no race conditions.
17452 17452 */
17453 17453 (void) sd_mark_rqs_idle(un, sense_xp);
17454 17454
17455 17455 /*
17456 17456 * For a retryable command see if we have valid sense data, if so then
17457 17457 * turn it over to sd_decode_sense() to figure out the right course of
17458 17458 * action. Just fail a non-retryable command.
17459 17459 */
17460 17460 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) {
17461 17461 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) ==
17462 17462 SD_SENSE_DATA_IS_VALID) {
17463 17463 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp);
17464 17464 }
17465 17465 } else {
17466 17466 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB",
17467 17467 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX);
17468 17468 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data",
17469 17469 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX);
17470 17470 sd_return_failed_command(un, cmd_bp, EIO);
17471 17471 }
17472 17472 }
17473 17473
17474 17474
17475 17475
17476 17476
17477 17477 /*
17478 17478 * Function: sd_handle_auto_request_sense
17479 17479 *
17480 17480 * Description: Processing for auto-request sense information.
17481 17481 *
17482 17482 * Arguments: un - ptr to associated softstate
17483 17483 * bp - ptr to buf(9S) for the command
17484 17484 * xp - ptr to the sd_xbuf for the command
17485 17485 * pktp - ptr to the scsi_pkt(9S) for the command
17486 17486 *
17487 17487 * Context: May be called under interrupt context
17488 17488 */
17489 17489
17490 17490 static void
17491 17491 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp,
17492 17492 struct sd_xbuf *xp, struct scsi_pkt *pktp)
17493 17493 {
17494 17494 struct scsi_arq_status *asp;
17495 17495 size_t actual_len;
17496 17496
17497 17497 ASSERT(un != NULL);
17498 17498 ASSERT(mutex_owned(SD_MUTEX(un)));
17499 17499 ASSERT(bp != NULL);
17500 17500 ASSERT(xp != NULL);
17501 17501 ASSERT(pktp != NULL);
17502 17502 ASSERT(pktp != un->un_rqs_pktp);
17503 17503 ASSERT(bp != un->un_rqs_bp);
17504 17504
17505 17505 /*
17506 17506 * For auto-request sense, we get a scsi_arq_status back from
17507 17507 * the HBA, with the sense data in the sts_sensedata member.
17508 17508 * The pkt_scbp of the packet points to this scsi_arq_status.
17509 17509 */
17510 17510 asp = (struct scsi_arq_status *)(pktp->pkt_scbp);
17511 17511
17512 17512 if (asp->sts_rqpkt_reason != CMD_CMPLT) {
17513 17513 /*
17514 17514 * The auto REQUEST SENSE failed; see if we can re-try
17515 17515 * the original command.
17516 17516 */
17517 17517 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17518 17518 "auto request sense failed (reason=%s)\n",
17519 17519 scsi_rname(asp->sts_rqpkt_reason));
17520 17520
17521 17521 sd_reset_target(un, pktp);
17522 17522
17523 17523 sd_retry_command(un, bp, SD_RETRIES_STANDARD,
17524 17524 NULL, NULL, EIO, (clock_t)0, NULL);
17525 17525 return;
17526 17526 }
17527 17527
17528 17528 /* Save the relevant sense info into the xp for the original cmd. */
17529 17529 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status)));
17530 17530 xp->xb_sense_state = asp->sts_rqpkt_state;
17531 17531 xp->xb_sense_resid = asp->sts_rqpkt_resid;
17532 17532 if (xp->xb_sense_state & STATE_XARQ_DONE) {
17533 17533 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid;
17534 17534 bcopy(&asp->sts_sensedata, xp->xb_sense_data,
17535 17535 MAX_SENSE_LENGTH);
17536 17536 } else {
17537 17537 if (xp->xb_sense_resid > SENSE_LENGTH) {
17538 17538 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid;
17539 17539 } else {
17540 17540 actual_len = SENSE_LENGTH - xp->xb_sense_resid;
17541 17541 }
17542 17542 if (xp->xb_pkt_flags & SD_XB_USCSICMD) {
17543 17543 if ((((struct uscsi_cmd *)
17544 17544 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) {
17545 17545 xp->xb_sense_resid = (((struct uscsi_cmd *)
17546 17546 (xp->xb_pktinfo))->uscsi_rqlen) -
17547 17547 actual_len;
17548 17548 } else {
17549 17549 xp->xb_sense_resid = 0;
17550 17550 }
17551 17551 }
17552 17552 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH);
17553 17553 }
17554 17554
17555 17555 /*
17556 17556 * See if we have valid sense data, if so then turn it over to
17557 17557 * sd_decode_sense() to figure out the right course of action.
17558 17558 */
17559 17559 if (sd_validate_sense_data(un, bp, xp, actual_len) ==
17560 17560 SD_SENSE_DATA_IS_VALID) {
17561 17561 sd_decode_sense(un, bp, xp, pktp);
17562 17562 }
17563 17563 }
17564 17564
17565 17565
17566 17566 /*
17567 17567 * Function: sd_print_sense_failed_msg
17568 17568 *
17569 17569 * Description: Print log message when RQS has failed.
17570 17570 *
17571 17571 * Arguments: un - ptr to associated softstate
17572 17572 * bp - ptr to buf(9S) for the command
17573 17573 * arg - generic message string ptr
17574 17574 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
17575 17575 * or SD_NO_RETRY_ISSUED
17576 17576 *
17577 17577 * Context: May be called from interrupt context
17578 17578 */
17579 17579
17580 17580 static void
17581 17581 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg,
17582 17582 int code)
17583 17583 {
17584 17584 char *msgp = arg;
17585 17585
17586 17586 ASSERT(un != NULL);
17587 17587 ASSERT(mutex_owned(SD_MUTEX(un)));
17588 17588 ASSERT(bp != NULL);
17589 17589
17590 17590 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) {
17591 17591 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp);
17592 17592 }
17593 17593 }
17594 17594
17595 17595
17596 17596 /*
17597 17597 * Function: sd_validate_sense_data
17598 17598 *
17599 17599 * Description: Check the given sense data for validity.
17600 17600 * If the sense data is not valid, the command will
17601 17601 * be either failed or retried!
17602 17602 *
17603 17603 * Return Code: SD_SENSE_DATA_IS_INVALID
17604 17604 * SD_SENSE_DATA_IS_VALID
17605 17605 *
17606 17606 * Context: May be called from interrupt context
17607 17607 */
17608 17608
17609 17609 static int
17610 17610 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
17611 17611 size_t actual_len)
17612 17612 {
17613 17613 struct scsi_extended_sense *esp;
17614 17614 struct scsi_pkt *pktp;
17615 17615 char *msgp = NULL;
17616 17616 sd_ssc_t *sscp;
17617 17617
17618 17618 ASSERT(un != NULL);
17619 17619 ASSERT(mutex_owned(SD_MUTEX(un)));
17620 17620 ASSERT(bp != NULL);
17621 17621 ASSERT(bp != un->un_rqs_bp);
17622 17622 ASSERT(xp != NULL);
17623 17623 ASSERT(un->un_fm_private != NULL);
17624 17624
17625 17625 pktp = SD_GET_PKTP(bp);
17626 17626 ASSERT(pktp != NULL);
17627 17627
17628 17628 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc;
17629 17629 ASSERT(sscp != NULL);
17630 17630
17631 17631 /*
17632 17632 * Check the status of the RQS command (auto or manual).
17633 17633 */
17634 17634 switch (xp->xb_sense_status & STATUS_MASK) {
17635 17635 case STATUS_GOOD:
17636 17636 break;
17637 17637
17638 17638 case STATUS_RESERVATION_CONFLICT:
17639 17639 sd_pkt_status_reservation_conflict(un, bp, xp, pktp);
17640 17640 return (SD_SENSE_DATA_IS_INVALID);
17641 17641
17642 17642 case STATUS_BUSY:
17643 17643 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17644 17644 "Busy Status on REQUEST SENSE\n");
17645 17645 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL,
17646 17646 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter);
17647 17647 return (SD_SENSE_DATA_IS_INVALID);
17648 17648
17649 17649 case STATUS_QFULL:
17650 17650 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17651 17651 "QFULL Status on REQUEST SENSE\n");
17652 17652 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL,
17653 17653 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter);
17654 17654 return (SD_SENSE_DATA_IS_INVALID);
17655 17655
17656 17656 case STATUS_CHECK:
17657 17657 case STATUS_TERMINATED:
17658 17658 msgp = "Check Condition on REQUEST SENSE\n";
17659 17659 goto sense_failed;
17660 17660
17661 17661 default:
17662 17662 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n";
17663 17663 goto sense_failed;
17664 17664 }
17665 17665
17666 17666 /*
17667 17667 * See if we got the minimum required amount of sense data.
17668 17668 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes
17669 17669 * or less.
17670 17670 */
17671 17671 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) ||
17672 17672 (actual_len == 0)) {
17673 17673 msgp = "Request Sense couldn't get sense data\n";
17674 17674 goto sense_failed;
17675 17675 }
17676 17676
17677 17677 if (actual_len < SUN_MIN_SENSE_LENGTH) {
17678 17678 msgp = "Not enough sense information\n";
17679 17679 /* Mark the ssc_flags for detecting invalid sense data */
17680 17680 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17681 17681 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0,
17682 17682 "sense-data");
17683 17683 }
17684 17684 goto sense_failed;
17685 17685 }
17686 17686
17687 17687 /*
17688 17688 * We require the extended sense data
17689 17689 */
17690 17690 esp = (struct scsi_extended_sense *)xp->xb_sense_data;
17691 17691 if (esp->es_class != CLASS_EXTENDED_SENSE) {
17692 17692 if ((pktp->pkt_flags & FLAG_SILENT) == 0) {
17693 17693 static char tmp[8];
17694 17694 static char buf[148];
17695 17695 char *p = (char *)(xp->xb_sense_data);
17696 17696 int i;
17697 17697
17698 17698 mutex_enter(&sd_sense_mutex);
17699 17699 (void) strcpy(buf, "undecodable sense information:");
17700 17700 for (i = 0; i < actual_len; i++) {
17701 17701 (void) sprintf(tmp, " 0x%x", *(p++)&0xff);
17702 17702 (void) strcpy(&buf[strlen(buf)], tmp);
17703 17703 }
17704 17704 i = strlen(buf);
17705 17705 (void) strcpy(&buf[i], "-(assumed fatal)\n");
17706 17706
17707 17707 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) {
17708 17708 scsi_log(SD_DEVINFO(un), sd_label,
17709 17709 CE_WARN, buf);
17710 17710 }
17711 17711 mutex_exit(&sd_sense_mutex);
17712 17712 }
17713 17713
17714 17714 /* Mark the ssc_flags for detecting invalid sense data */
17715 17715 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17716 17716 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0,
17717 17717 "sense-data");
17718 17718 }
17719 17719
17720 17720 /* Note: Legacy behavior, fail the command with no retry */
17721 17721 sd_return_failed_command(un, bp, EIO);
17722 17722 return (SD_SENSE_DATA_IS_INVALID);
17723 17723 }
17724 17724
17725 17725 /*
17726 17726 * Check that es_code is valid (es_class concatenated with es_code
17727 17727 * make up the "response code" field. es_class will always be 7, so
17728 17728 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the
17729 17729 * format.
17730 17730 */
17731 17731 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) &&
17732 17732 (esp->es_code != CODE_FMT_FIXED_DEFERRED) &&
17733 17733 (esp->es_code != CODE_FMT_DESCR_CURRENT) &&
17734 17734 (esp->es_code != CODE_FMT_DESCR_DEFERRED) &&
17735 17735 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) {
17736 17736 /* Mark the ssc_flags for detecting invalid sense data */
17737 17737 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17738 17738 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0,
17739 17739 "sense-data");
17740 17740 }
17741 17741 goto sense_failed;
17742 17742 }
17743 17743
17744 17744 return (SD_SENSE_DATA_IS_VALID);
17745 17745
17746 17746 sense_failed:
17747 17747 /*
17748 17748 * If the request sense failed (for whatever reason), attempt
17749 17749 * to retry the original command.
17750 17750 */
17751 17751 #if defined(__i386) || defined(__amd64)
17752 17752 /*
17753 17753 * SD_RETRY_DELAY is conditionally compile (#if fibre) in
17754 17754 * sddef.h for Sparc platform, and x86 uses 1 binary
17755 17755 * for both SCSI/FC.
17756 17756 * The SD_RETRY_DELAY value need to be adjusted here
17757 17757 * when SD_RETRY_DELAY change in sddef.h
17758 17758 */
17759 17759 sd_retry_command(un, bp, SD_RETRIES_STANDARD,
17760 17760 sd_print_sense_failed_msg, msgp, EIO,
17761 17761 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL);
17762 17762 #else
17763 17763 sd_retry_command(un, bp, SD_RETRIES_STANDARD,
17764 17764 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL);
17765 17765 #endif
17766 17766
17767 17767 return (SD_SENSE_DATA_IS_INVALID);
17768 17768 }
17769 17769
17770 17770 /*
17771 17771 * Function: sd_decode_sense
17772 17772 *
17773 17773 * Description: Take recovery action(s) when SCSI Sense Data is received.
17774 17774 *
17775 17775 * Context: Interrupt context.
17776 17776 */
17777 17777
17778 17778 static void
17779 17779 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
17780 17780 struct scsi_pkt *pktp)
17781 17781 {
17782 17782 uint8_t sense_key;
17783 17783
17784 17784 ASSERT(un != NULL);
17785 17785 ASSERT(mutex_owned(SD_MUTEX(un)));
17786 17786 ASSERT(bp != NULL);
17787 17787 ASSERT(bp != un->un_rqs_bp);
17788 17788 ASSERT(xp != NULL);
17789 17789 ASSERT(pktp != NULL);
17790 17790
17791 17791 sense_key = scsi_sense_key(xp->xb_sense_data);
17792 17792
17793 17793 switch (sense_key) {
17794 17794 case KEY_NO_SENSE:
17795 17795 sd_sense_key_no_sense(un, bp, xp, pktp);
17796 17796 break;
17797 17797 case KEY_RECOVERABLE_ERROR:
17798 17798 sd_sense_key_recoverable_error(un, xp->xb_sense_data,
17799 17799 bp, xp, pktp);
17800 17800 break;
17801 17801 case KEY_NOT_READY:
17802 17802 sd_sense_key_not_ready(un, xp->xb_sense_data,
17803 17803 bp, xp, pktp);
17804 17804 break;
17805 17805 case KEY_MEDIUM_ERROR:
17806 17806 case KEY_HARDWARE_ERROR:
17807 17807 sd_sense_key_medium_or_hardware_error(un,
17808 17808 xp->xb_sense_data, bp, xp, pktp);
17809 17809 break;
17810 17810 case KEY_ILLEGAL_REQUEST:
17811 17811 sd_sense_key_illegal_request(un, bp, xp, pktp);
17812 17812 break;
17813 17813 case KEY_UNIT_ATTENTION:
17814 17814 sd_sense_key_unit_attention(un, xp->xb_sense_data,
17815 17815 bp, xp, pktp);
17816 17816 break;
17817 17817 case KEY_WRITE_PROTECT:
17818 17818 case KEY_VOLUME_OVERFLOW:
17819 17819 case KEY_MISCOMPARE:
17820 17820 sd_sense_key_fail_command(un, bp, xp, pktp);
17821 17821 break;
17822 17822 case KEY_BLANK_CHECK:
17823 17823 sd_sense_key_blank_check(un, bp, xp, pktp);
17824 17824 break;
17825 17825 case KEY_ABORTED_COMMAND:
17826 17826 sd_sense_key_aborted_command(un, bp, xp, pktp);
17827 17827 break;
17828 17828 case KEY_VENDOR_UNIQUE:
17829 17829 case KEY_COPY_ABORTED:
17830 17830 case KEY_EQUAL:
17831 17831 case KEY_RESERVED:
17832 17832 default:
17833 17833 sd_sense_key_default(un, xp->xb_sense_data,
17834 17834 bp, xp, pktp);
17835 17835 break;
17836 17836 }
17837 17837 }
17838 17838
17839 17839
17840 17840 /*
17841 17841 * Function: sd_dump_memory
17842 17842 *
17843 17843 * Description: Debug logging routine to print the contents of a user provided
17844 17844 * buffer. The output of the buffer is broken up into 256 byte
17845 17845 * segments due to a size constraint of the scsi_log.
17846 17846 * implementation.
17847 17847 *
17848 17848 * Arguments: un - ptr to softstate
17849 17849 * comp - component mask
17850 17850 * title - "title" string to preceed data when printed
17851 17851 * data - ptr to data block to be printed
17852 17852 * len - size of data block to be printed
17853 17853 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c)
17854 17854 *
17855 17855 * Context: May be called from interrupt context
17856 17856 */
17857 17857
17858 17858 #define SD_DUMP_MEMORY_BUF_SIZE 256
17859 17859
17860 17860 static char *sd_dump_format_string[] = {
17861 17861 " 0x%02x",
17862 17862 " %c"
17863 17863 };
17864 17864
17865 17865 static void
17866 17866 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data,
17867 17867 int len, int fmt)
17868 17868 {
17869 17869 int i, j;
17870 17870 int avail_count;
17871 17871 int start_offset;
17872 17872 int end_offset;
17873 17873 size_t entry_len;
17874 17874 char *bufp;
17875 17875 char *local_buf;
17876 17876 char *format_string;
17877 17877
17878 17878 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR));
17879 17879
17880 17880 /*
17881 17881 * In the debug version of the driver, this function is called from a
17882 17882 * number of places which are NOPs in the release driver.
17883 17883 * The debug driver therefore has additional methods of filtering
17884 17884 * debug output.
17885 17885 */
17886 17886 #ifdef SDDEBUG
17887 17887 /*
17888 17888 * In the debug version of the driver we can reduce the amount of debug
17889 17889 * messages by setting sd_error_level to something other than
17890 17890 * SCSI_ERR_ALL and clearing bits in sd_level_mask and
17891 17891 * sd_component_mask.
17892 17892 */
17893 17893 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) ||
17894 17894 (sd_error_level != SCSI_ERR_ALL)) {
17895 17895 return;
17896 17896 }
17897 17897 if (((sd_component_mask & comp) == 0) ||
17898 17898 (sd_error_level != SCSI_ERR_ALL)) {
17899 17899 return;
17900 17900 }
17901 17901 #else
17902 17902 if (sd_error_level != SCSI_ERR_ALL) {
17903 17903 return;
17904 17904 }
17905 17905 #endif
17906 17906
17907 17907 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP);
17908 17908 bufp = local_buf;
17909 17909 /*
17910 17910 * Available length is the length of local_buf[], minus the
17911 17911 * length of the title string, minus one for the ":", minus
17912 17912 * one for the newline, minus one for the NULL terminator.
17913 17913 * This gives the #bytes available for holding the printed
17914 17914 * values from the given data buffer.
17915 17915 */
17916 17916 if (fmt == SD_LOG_HEX) {
17917 17917 format_string = sd_dump_format_string[0];
17918 17918 } else /* SD_LOG_CHAR */ {
17919 17919 format_string = sd_dump_format_string[1];
17920 17920 }
17921 17921 /*
17922 17922 * Available count is the number of elements from the given
17923 17923 * data buffer that we can fit into the available length.
17924 17924 * This is based upon the size of the format string used.
17925 17925 * Make one entry and find it's size.
17926 17926 */
17927 17927 (void) sprintf(bufp, format_string, data[0]);
17928 17928 entry_len = strlen(bufp);
17929 17929 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len;
17930 17930
17931 17931 j = 0;
17932 17932 while (j < len) {
17933 17933 bufp = local_buf;
17934 17934 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE);
17935 17935 start_offset = j;
17936 17936
17937 17937 end_offset = start_offset + avail_count;
17938 17938
17939 17939 (void) sprintf(bufp, "%s:", title);
17940 17940 bufp += strlen(bufp);
17941 17941 for (i = start_offset; ((i < end_offset) && (j < len));
17942 17942 i++, j++) {
17943 17943 (void) sprintf(bufp, format_string, data[i]);
17944 17944 bufp += entry_len;
17945 17945 }
17946 17946 (void) sprintf(bufp, "\n");
17947 17947
17948 17948 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf);
17949 17949 }
17950 17950 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE);
17951 17951 }
17952 17952
17953 17953 /*
17954 17954 * Function: sd_print_sense_msg
17955 17955 *
17956 17956 * Description: Log a message based upon the given sense data.
17957 17957 *
17958 17958 * Arguments: un - ptr to associated softstate
17959 17959 * bp - ptr to buf(9S) for the command
17960 17960 * arg - ptr to associate sd_sense_info struct
17961 17961 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
17962 17962 * or SD_NO_RETRY_ISSUED
17963 17963 *
17964 17964 * Context: May be called from interrupt context
17965 17965 */
17966 17966
17967 17967 static void
17968 17968 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code)
17969 17969 {
17970 17970 struct sd_xbuf *xp;
17971 17971 struct scsi_pkt *pktp;
17972 17972 uint8_t *sensep;
17973 17973 daddr_t request_blkno;
17974 17974 diskaddr_t err_blkno;
17975 17975 int severity;
17976 17976 int pfa_flag;
17977 17977 extern struct scsi_key_strings scsi_cmds[];
17978 17978
17979 17979 ASSERT(un != NULL);
17980 17980 ASSERT(mutex_owned(SD_MUTEX(un)));
17981 17981 ASSERT(bp != NULL);
17982 17982 xp = SD_GET_XBUF(bp);
17983 17983 ASSERT(xp != NULL);
17984 17984 pktp = SD_GET_PKTP(bp);
17985 17985 ASSERT(pktp != NULL);
17986 17986 ASSERT(arg != NULL);
17987 17987
17988 17988 severity = ((struct sd_sense_info *)(arg))->ssi_severity;
17989 17989 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag;
17990 17990
17991 17991 if ((code == SD_DELAYED_RETRY_ISSUED) ||
17992 17992 (code == SD_IMMEDIATE_RETRY_ISSUED)) {
17993 17993 severity = SCSI_ERR_RETRYABLE;
17994 17994 }
17995 17995
17996 17996 /* Use absolute block number for the request block number */
17997 17997 request_blkno = xp->xb_blkno;
17998 17998
17999 17999 /*
18000 18000 * Now try to get the error block number from the sense data
18001 18001 */
18002 18002 sensep = xp->xb_sense_data;
18003 18003
18004 18004 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH,
18005 18005 (uint64_t *)&err_blkno)) {
18006 18006 /*
18007 18007 * We retrieved the error block number from the information
18008 18008 * portion of the sense data.
18009 18009 *
18010 18010 * For USCSI commands we are better off using the error
18011 18011 * block no. as the requested block no. (This is the best
18012 18012 * we can estimate.)
18013 18013 */
18014 18014 if ((SD_IS_BUFIO(xp) == FALSE) &&
18015 18015 ((pktp->pkt_flags & FLAG_SILENT) == 0)) {
18016 18016 request_blkno = err_blkno;
18017 18017 }
18018 18018 } else {
18019 18019 /*
18020 18020 * Without the es_valid bit set (for fixed format) or an
18021 18021 * information descriptor (for descriptor format) we cannot
18022 18022 * be certain of the error blkno, so just use the
18023 18023 * request_blkno.
18024 18024 */
18025 18025 err_blkno = (diskaddr_t)request_blkno;
18026 18026 }
18027 18027
18028 18028 /*
18029 18029 * The following will log the buffer contents for the release driver
18030 18030 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error
18031 18031 * level is set to verbose.
18032 18032 */
18033 18033 sd_dump_memory(un, SD_LOG_IO, "Failed CDB",
18034 18034 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX);
18035 18035 sd_dump_memory(un, SD_LOG_IO, "Sense Data",
18036 18036 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX);
18037 18037
18038 18038 if (pfa_flag == FALSE) {
18039 18039 /* This is normally only set for USCSI */
18040 18040 if ((pktp->pkt_flags & FLAG_SILENT) != 0) {
18041 18041 return;
18042 18042 }
18043 18043
18044 18044 if ((SD_IS_BUFIO(xp) == TRUE) &&
18045 18045 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) &&
18046 18046 (severity < sd_error_level))) {
18047 18047 return;
18048 18048 }
18049 18049 }
18050 18050 /*
18051 18051 * Check for Sonoma Failover and keep a count of how many failed I/O's
18052 18052 */
18053 18053 if ((SD_IS_LSI(un)) &&
18054 18054 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) &&
18055 18055 (scsi_sense_asc(sensep) == 0x94) &&
18056 18056 (scsi_sense_ascq(sensep) == 0x01)) {
18057 18057 un->un_sonoma_failure_count++;
18058 18058 if (un->un_sonoma_failure_count > 1) {
18059 18059 return;
18060 18060 }
18061 18061 }
18062 18062
18063 18063 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP ||
18064 18064 ((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) &&
18065 18065 (pktp->pkt_resid == 0))) {
18066 18066 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity,
18067 18067 request_blkno, err_blkno, scsi_cmds,
18068 18068 (struct scsi_extended_sense *)sensep,
18069 18069 un->un_additional_codes, NULL);
18070 18070 }
18071 18071 }
18072 18072
18073 18073 /*
18074 18074 * Function: sd_sense_key_no_sense
18075 18075 *
18076 18076 * Description: Recovery action when sense data was not received.
18077 18077 *
18078 18078 * Context: May be called from interrupt context
18079 18079 */
18080 18080
18081 18081 static void
18082 18082 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp,
18083 18083 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18084 18084 {
18085 18085 struct sd_sense_info si;
18086 18086
18087 18087 ASSERT(un != NULL);
18088 18088 ASSERT(mutex_owned(SD_MUTEX(un)));
18089 18089 ASSERT(bp != NULL);
18090 18090 ASSERT(xp != NULL);
18091 18091 ASSERT(pktp != NULL);
18092 18092
18093 18093 si.ssi_severity = SCSI_ERR_FATAL;
18094 18094 si.ssi_pfa_flag = FALSE;
18095 18095
18096 18096 SD_UPDATE_ERRSTATS(un, sd_softerrs);
18097 18097
18098 18098 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18099 18099 &si, EIO, (clock_t)0, NULL);
18100 18100 }
18101 18101
18102 18102
18103 18103 /*
18104 18104 * Function: sd_sense_key_recoverable_error
18105 18105 *
18106 18106 * Description: Recovery actions for a SCSI "Recovered Error" sense key.
18107 18107 *
18108 18108 * Context: May be called from interrupt context
18109 18109 */
18110 18110
18111 18111 static void
18112 18112 sd_sense_key_recoverable_error(struct sd_lun *un,
18113 18113 uint8_t *sense_datap,
18114 18114 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18115 18115 {
18116 18116 struct sd_sense_info si;
18117 18117 uint8_t asc = scsi_sense_asc(sense_datap);
18118 18118 uint8_t ascq = scsi_sense_ascq(sense_datap);
18119 18119
18120 18120 ASSERT(un != NULL);
18121 18121 ASSERT(mutex_owned(SD_MUTEX(un)));
18122 18122 ASSERT(bp != NULL);
18123 18123 ASSERT(xp != NULL);
18124 18124 ASSERT(pktp != NULL);
18125 18125
18126 18126 /*
18127 18127 * 0x00, 0x1D: ATA PASSTHROUGH INFORMATION AVAILABLE
18128 18128 */
18129 18129 if (asc == 0x00 && ascq == 0x1D) {
18130 18130 sd_return_command(un, bp);
18131 18131 return;
18132 18132 }
18133 18133
18134 18134 /*
18135 18135 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED
18136 18136 */
18137 18137 if ((asc == 0x5D) && (sd_report_pfa != 0)) {
18138 18138 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err);
18139 18139 si.ssi_severity = SCSI_ERR_INFO;
18140 18140 si.ssi_pfa_flag = TRUE;
18141 18141 } else {
18142 18142 SD_UPDATE_ERRSTATS(un, sd_softerrs);
18143 18143 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err);
18144 18144 si.ssi_severity = SCSI_ERR_RECOVERED;
18145 18145 si.ssi_pfa_flag = FALSE;
18146 18146 }
18147 18147
18148 18148 if (pktp->pkt_resid == 0) {
18149 18149 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18150 18150 sd_return_command(un, bp);
18151 18151 return;
18152 18152 }
18153 18153
18154 18154 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18155 18155 &si, EIO, (clock_t)0, NULL);
18156 18156 }
18157 18157
18158 18158
18159 18159
18160 18160
18161 18161 /*
18162 18162 * Function: sd_sense_key_not_ready
18163 18163 *
18164 18164 * Description: Recovery actions for a SCSI "Not Ready" sense key.
18165 18165 *
18166 18166 * Context: May be called from interrupt context
18167 18167 */
18168 18168
18169 18169 static void
18170 18170 sd_sense_key_not_ready(struct sd_lun *un,
18171 18171 uint8_t *sense_datap,
18172 18172 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18173 18173 {
18174 18174 struct sd_sense_info si;
18175 18175 uint8_t asc = scsi_sense_asc(sense_datap);
18176 18176 uint8_t ascq = scsi_sense_ascq(sense_datap);
18177 18177
18178 18178 ASSERT(un != NULL);
18179 18179 ASSERT(mutex_owned(SD_MUTEX(un)));
18180 18180 ASSERT(bp != NULL);
18181 18181 ASSERT(xp != NULL);
18182 18182 ASSERT(pktp != NULL);
18183 18183
18184 18184 si.ssi_severity = SCSI_ERR_FATAL;
18185 18185 si.ssi_pfa_flag = FALSE;
18186 18186
18187 18187 /*
18188 18188 * Update error stats after first NOT READY error. Disks may have
18189 18189 * been powered down and may need to be restarted. For CDROMs,
18190 18190 * report NOT READY errors only if media is present.
18191 18191 */
18192 18192 if ((ISCD(un) && (asc == 0x3A)) ||
18193 18193 (xp->xb_nr_retry_count > 0)) {
18194 18194 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18195 18195 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err);
18196 18196 }
18197 18197
18198 18198 /*
18199 18199 * Just fail if the "not ready" retry limit has been reached.
18200 18200 */
18201 18201 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) {
18202 18202 /* Special check for error message printing for removables. */
18203 18203 if (un->un_f_has_removable_media && (asc == 0x04) &&
18204 18204 (ascq >= 0x04)) {
18205 18205 si.ssi_severity = SCSI_ERR_ALL;
18206 18206 }
18207 18207 goto fail_command;
18208 18208 }
18209 18209
18210 18210 /*
18211 18211 * Check the ASC and ASCQ in the sense data as needed, to determine
18212 18212 * what to do.
18213 18213 */
18214 18214 switch (asc) {
18215 18215 case 0x04: /* LOGICAL UNIT NOT READY */
18216 18216 /*
18217 18217 * disk drives that don't spin up result in a very long delay
18218 18218 * in format without warning messages. We will log a message
18219 18219 * if the error level is set to verbose.
18220 18220 */
18221 18221 if (sd_error_level < SCSI_ERR_RETRYABLE) {
18222 18222 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18223 18223 "logical unit not ready, resetting disk\n");
18224 18224 }
18225 18225
18226 18226 /*
18227 18227 * There are different requirements for CDROMs and disks for
18228 18228 * the number of retries. If a CD-ROM is giving this, it is
18229 18229 * probably reading TOC and is in the process of getting
18230 18230 * ready, so we should keep on trying for a long time to make
18231 18231 * sure that all types of media are taken in account (for
18232 18232 * some media the drive takes a long time to read TOC). For
18233 18233 * disks we do not want to retry this too many times as this
18234 18234 * can cause a long hang in format when the drive refuses to
18235 18235 * spin up (a very common failure).
18236 18236 */
18237 18237 switch (ascq) {
18238 18238 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */
18239 18239 /*
18240 18240 * Disk drives frequently refuse to spin up which
18241 18241 * results in a very long hang in format without
18242 18242 * warning messages.
18243 18243 *
18244 18244 * Note: This code preserves the legacy behavior of
18245 18245 * comparing xb_nr_retry_count against zero for fibre
18246 18246 * channel targets instead of comparing against the
18247 18247 * un_reset_retry_count value. The reason for this
18248 18248 * discrepancy has been so utterly lost beneath the
18249 18249 * Sands of Time that even Indiana Jones could not
18250 18250 * find it.
18251 18251 */
18252 18252 if (un->un_f_is_fibre == TRUE) {
18253 18253 if (((sd_level_mask & SD_LOGMASK_DIAG) ||
18254 18254 (xp->xb_nr_retry_count > 0)) &&
18255 18255 (un->un_startstop_timeid == NULL)) {
18256 18256 scsi_log(SD_DEVINFO(un), sd_label,
18257 18257 CE_WARN, "logical unit not ready, "
18258 18258 "resetting disk\n");
18259 18259 sd_reset_target(un, pktp);
18260 18260 }
18261 18261 } else {
18262 18262 if (((sd_level_mask & SD_LOGMASK_DIAG) ||
18263 18263 (xp->xb_nr_retry_count >
18264 18264 un->un_reset_retry_count)) &&
18265 18265 (un->un_startstop_timeid == NULL)) {
18266 18266 scsi_log(SD_DEVINFO(un), sd_label,
18267 18267 CE_WARN, "logical unit not ready, "
18268 18268 "resetting disk\n");
18269 18269 sd_reset_target(un, pktp);
18270 18270 }
18271 18271 }
18272 18272 break;
18273 18273
18274 18274 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */
18275 18275 /*
18276 18276 * If the target is in the process of becoming
18277 18277 * ready, just proceed with the retry. This can
18278 18278 * happen with CD-ROMs that take a long time to
18279 18279 * read TOC after a power cycle or reset.
18280 18280 */
18281 18281 goto do_retry;
18282 18282
18283 18283 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */
18284 18284 break;
18285 18285
18286 18286 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */
18287 18287 /*
18288 18288 * Retries cannot help here so just fail right away.
18289 18289 */
18290 18290 goto fail_command;
18291 18291
18292 18292 case 0x88:
18293 18293 /*
18294 18294 * Vendor-unique code for T3/T4: it indicates a
18295 18295 * path problem in a mutipathed config, but as far as
18296 18296 * the target driver is concerned it equates to a fatal
18297 18297 * error, so we should just fail the command right away
18298 18298 * (without printing anything to the console). If this
18299 18299 * is not a T3/T4, fall thru to the default recovery
18300 18300 * action.
18301 18301 * T3/T4 is FC only, don't need to check is_fibre
18302 18302 */
18303 18303 if (SD_IS_T3(un) || SD_IS_T4(un)) {
18304 18304 sd_return_failed_command(un, bp, EIO);
18305 18305 return;
18306 18306 }
18307 18307 /* FALLTHRU */
18308 18308
18309 18309 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */
18310 18310 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */
18311 18311 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */
18312 18312 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */
18313 18313 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */
18314 18314 default: /* Possible future codes in SCSI spec? */
18315 18315 /*
18316 18316 * For removable-media devices, do not retry if
18317 18317 * ASCQ > 2 as these result mostly from USCSI commands
18318 18318 * on MMC devices issued to check status of an
18319 18319 * operation initiated in immediate mode. Also for
18320 18320 * ASCQ >= 4 do not print console messages as these
18321 18321 * mainly represent a user-initiated operation
18322 18322 * instead of a system failure.
18323 18323 */
18324 18324 if (un->un_f_has_removable_media) {
18325 18325 si.ssi_severity = SCSI_ERR_ALL;
18326 18326 goto fail_command;
18327 18327 }
18328 18328 break;
18329 18329 }
18330 18330
18331 18331 /*
18332 18332 * As part of our recovery attempt for the NOT READY
18333 18333 * condition, we issue a START STOP UNIT command. However
18334 18334 * we want to wait for a short delay before attempting this
18335 18335 * as there may still be more commands coming back from the
18336 18336 * target with the check condition. To do this we use
18337 18337 * timeout(9F) to call sd_start_stop_unit_callback() after
18338 18338 * the delay interval expires. (sd_start_stop_unit_callback()
18339 18339 * dispatches sd_start_stop_unit_task(), which will issue
18340 18340 * the actual START STOP UNIT command. The delay interval
18341 18341 * is one-half of the delay that we will use to retry the
18342 18342 * command that generated the NOT READY condition.
18343 18343 *
18344 18344 * Note that we could just dispatch sd_start_stop_unit_task()
18345 18345 * from here and allow it to sleep for the delay interval,
18346 18346 * but then we would be tying up the taskq thread
18347 18347 * uncesessarily for the duration of the delay.
18348 18348 *
18349 18349 * Do not issue the START STOP UNIT if the current command
18350 18350 * is already a START STOP UNIT.
18351 18351 */
18352 18352 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) {
18353 18353 break;
18354 18354 }
18355 18355
18356 18356 /*
18357 18357 * Do not schedule the timeout if one is already pending.
18358 18358 */
18359 18359 if (un->un_startstop_timeid != NULL) {
18360 18360 SD_INFO(SD_LOG_ERROR, un,
18361 18361 "sd_sense_key_not_ready: restart already issued to"
18362 18362 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)),
18363 18363 ddi_get_instance(SD_DEVINFO(un)));
18364 18364 break;
18365 18365 }
18366 18366
18367 18367 /*
18368 18368 * Schedule the START STOP UNIT command, then queue the command
18369 18369 * for a retry.
18370 18370 *
18371 18371 * Note: A timeout is not scheduled for this retry because we
18372 18372 * want the retry to be serial with the START_STOP_UNIT. The
18373 18373 * retry will be started when the START_STOP_UNIT is completed
18374 18374 * in sd_start_stop_unit_task.
18375 18375 */
18376 18376 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback,
18377 18377 un, un->un_busy_timeout / 2);
18378 18378 xp->xb_nr_retry_count++;
18379 18379 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter);
18380 18380 return;
18381 18381
18382 18382 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */
18383 18383 if (sd_error_level < SCSI_ERR_RETRYABLE) {
18384 18384 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18385 18385 "unit does not respond to selection\n");
18386 18386 }
18387 18387 break;
18388 18388
18389 18389 case 0x3A: /* MEDIUM NOT PRESENT */
18390 18390 if (sd_error_level >= SCSI_ERR_FATAL) {
18391 18391 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18392 18392 "Caddy not inserted in drive\n");
18393 18393 }
18394 18394
18395 18395 sr_ejected(un);
18396 18396 un->un_mediastate = DKIO_EJECTED;
18397 18397 /* The state has changed, inform the media watch routines */
18398 18398 cv_broadcast(&un->un_state_cv);
18399 18399 /* Just fail if no media is present in the drive. */
18400 18400 goto fail_command;
18401 18401
18402 18402 default:
18403 18403 if (sd_error_level < SCSI_ERR_RETRYABLE) {
18404 18404 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE,
18405 18405 "Unit not Ready. Additional sense code 0x%x\n",
18406 18406 asc);
18407 18407 }
18408 18408 break;
18409 18409 }
18410 18410
18411 18411 do_retry:
18412 18412
18413 18413 /*
18414 18414 * Retry the command, as some targets may report NOT READY for
18415 18415 * several seconds after being reset.
18416 18416 */
18417 18417 xp->xb_nr_retry_count++;
18418 18418 si.ssi_severity = SCSI_ERR_RETRYABLE;
18419 18419 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg,
18420 18420 &si, EIO, un->un_busy_timeout, NULL);
18421 18421
18422 18422 return;
18423 18423
18424 18424 fail_command:
18425 18425 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18426 18426 sd_return_failed_command(un, bp, EIO);
18427 18427 }
18428 18428
18429 18429
18430 18430
18431 18431 /*
18432 18432 * Function: sd_sense_key_medium_or_hardware_error
18433 18433 *
18434 18434 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error"
18435 18435 * sense key.
18436 18436 *
18437 18437 * Context: May be called from interrupt context
18438 18438 */
18439 18439
18440 18440 static void
18441 18441 sd_sense_key_medium_or_hardware_error(struct sd_lun *un,
18442 18442 uint8_t *sense_datap,
18443 18443 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18444 18444 {
18445 18445 struct sd_sense_info si;
18446 18446 uint8_t sense_key = scsi_sense_key(sense_datap);
18447 18447 uint8_t asc = scsi_sense_asc(sense_datap);
18448 18448
18449 18449 ASSERT(un != NULL);
18450 18450 ASSERT(mutex_owned(SD_MUTEX(un)));
18451 18451 ASSERT(bp != NULL);
18452 18452 ASSERT(xp != NULL);
18453 18453 ASSERT(pktp != NULL);
18454 18454
18455 18455 si.ssi_severity = SCSI_ERR_FATAL;
18456 18456 si.ssi_pfa_flag = FALSE;
18457 18457
18458 18458 if (sense_key == KEY_MEDIUM_ERROR) {
18459 18459 SD_UPDATE_ERRSTATS(un, sd_rq_media_err);
18460 18460 }
18461 18461
18462 18462 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18463 18463
18464 18464 if ((un->un_reset_retry_count != 0) &&
18465 18465 (xp->xb_retry_count == un->un_reset_retry_count)) {
18466 18466 mutex_exit(SD_MUTEX(un));
18467 18467 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */
18468 18468 if (un->un_f_allow_bus_device_reset == TRUE) {
18469 18469
18470 18470 boolean_t try_resetting_target = B_TRUE;
18471 18471
18472 18472 /*
18473 18473 * We need to be able to handle specific ASC when we are
18474 18474 * handling a KEY_HARDWARE_ERROR. In particular
18475 18475 * taking the default action of resetting the target may
18476 18476 * not be the appropriate way to attempt recovery.
18477 18477 * Resetting a target because of a single LUN failure
18478 18478 * victimizes all LUNs on that target.
18479 18479 *
18480 18480 * This is true for the LSI arrays, if an LSI
18481 18481 * array controller returns an ASC of 0x84 (LUN Dead) we
18482 18482 * should trust it.
18483 18483 */
18484 18484
18485 18485 if (sense_key == KEY_HARDWARE_ERROR) {
18486 18486 switch (asc) {
18487 18487 case 0x84:
18488 18488 if (SD_IS_LSI(un)) {
18489 18489 try_resetting_target = B_FALSE;
18490 18490 }
18491 18491 break;
18492 18492 default:
18493 18493 break;
18494 18494 }
18495 18495 }
18496 18496
18497 18497 if (try_resetting_target == B_TRUE) {
18498 18498 int reset_retval = 0;
18499 18499 if (un->un_f_lun_reset_enabled == TRUE) {
18500 18500 SD_TRACE(SD_LOG_IO_CORE, un,
18501 18501 "sd_sense_key_medium_or_hardware_"
18502 18502 "error: issuing RESET_LUN\n");
18503 18503 reset_retval =
18504 18504 scsi_reset(SD_ADDRESS(un),
18505 18505 RESET_LUN);
18506 18506 }
18507 18507 if (reset_retval == 0) {
18508 18508 SD_TRACE(SD_LOG_IO_CORE, un,
18509 18509 "sd_sense_key_medium_or_hardware_"
18510 18510 "error: issuing RESET_TARGET\n");
18511 18511 (void) scsi_reset(SD_ADDRESS(un),
18512 18512 RESET_TARGET);
18513 18513 }
18514 18514 }
18515 18515 }
18516 18516 mutex_enter(SD_MUTEX(un));
18517 18517 }
18518 18518
18519 18519 /*
18520 18520 * This really ought to be a fatal error, but we will retry anyway
18521 18521 * as some drives report this as a spurious error.
18522 18522 */
18523 18523 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18524 18524 &si, EIO, (clock_t)0, NULL);
18525 18525 }
18526 18526
18527 18527
18528 18528
18529 18529 /*
18530 18530 * Function: sd_sense_key_illegal_request
18531 18531 *
18532 18532 * Description: Recovery actions for a SCSI "Illegal Request" sense key.
18533 18533 *
18534 18534 * Context: May be called from interrupt context
18535 18535 */
18536 18536
18537 18537 static void
18538 18538 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp,
18539 18539 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18540 18540 {
18541 18541 struct sd_sense_info si;
18542 18542
18543 18543 ASSERT(un != NULL);
18544 18544 ASSERT(mutex_owned(SD_MUTEX(un)));
18545 18545 ASSERT(bp != NULL);
18546 18546 ASSERT(xp != NULL);
18547 18547 ASSERT(pktp != NULL);
18548 18548
18549 18549 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err);
18550 18550
18551 18551 si.ssi_severity = SCSI_ERR_INFO;
18552 18552 si.ssi_pfa_flag = FALSE;
18553 18553
18554 18554 /* Pointless to retry if the target thinks it's an illegal request */
18555 18555 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18556 18556 sd_return_failed_command(un, bp, EIO);
18557 18557 }
18558 18558
18559 18559
18560 18560
18561 18561
18562 18562 /*
18563 18563 * Function: sd_sense_key_unit_attention
18564 18564 *
18565 18565 * Description: Recovery actions for a SCSI "Unit Attention" sense key.
18566 18566 *
18567 18567 * Context: May be called from interrupt context
18568 18568 */
18569 18569
18570 18570 static void
18571 18571 sd_sense_key_unit_attention(struct sd_lun *un,
18572 18572 uint8_t *sense_datap,
18573 18573 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18574 18574 {
18575 18575 /*
18576 18576 * For UNIT ATTENTION we allow retries for one minute. Devices
18577 18577 * like Sonoma can return UNIT ATTENTION close to a minute
18578 18578 * under certain conditions.
18579 18579 */
18580 18580 int retry_check_flag = SD_RETRIES_UA;
18581 18581 boolean_t kstat_updated = B_FALSE;
18582 18582 struct sd_sense_info si;
18583 18583 uint8_t asc = scsi_sense_asc(sense_datap);
18584 18584 uint8_t ascq = scsi_sense_ascq(sense_datap);
18585 18585
18586 18586 ASSERT(un != NULL);
18587 18587 ASSERT(mutex_owned(SD_MUTEX(un)));
18588 18588 ASSERT(bp != NULL);
18589 18589 ASSERT(xp != NULL);
18590 18590 ASSERT(pktp != NULL);
18591 18591
18592 18592 si.ssi_severity = SCSI_ERR_INFO;
18593 18593 si.ssi_pfa_flag = FALSE;
18594 18594
18595 18595
18596 18596 switch (asc) {
18597 18597 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */
18598 18598 if (sd_report_pfa != 0) {
18599 18599 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err);
18600 18600 si.ssi_pfa_flag = TRUE;
18601 18601 retry_check_flag = SD_RETRIES_STANDARD;
18602 18602 goto do_retry;
18603 18603 }
18604 18604
18605 18605 break;
18606 18606
18607 18607 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */
18608 18608 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
18609 18609 un->un_resvd_status |=
18610 18610 (SD_LOST_RESERVE | SD_WANT_RESERVE);
18611 18611 }
18612 18612 #ifdef _LP64
18613 18613 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) {
18614 18614 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task,
18615 18615 un, KM_NOSLEEP) == 0) {
18616 18616 /*
18617 18617 * If we can't dispatch the task we'll just
18618 18618 * live without descriptor sense. We can
18619 18619 * try again on the next "unit attention"
18620 18620 */
18621 18621 SD_ERROR(SD_LOG_ERROR, un,
18622 18622 "sd_sense_key_unit_attention: "
18623 18623 "Could not dispatch "
18624 18624 "sd_reenable_dsense_task\n");
18625 18625 }
18626 18626 }
18627 18627 #endif /* _LP64 */
18628 18628 /* FALLTHRU */
18629 18629
18630 18630 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */
18631 18631 if (!un->un_f_has_removable_media) {
18632 18632 break;
18633 18633 }
18634 18634
18635 18635 /*
18636 18636 * When we get a unit attention from a removable-media device,
18637 18637 * it may be in a state that will take a long time to recover
18638 18638 * (e.g., from a reset). Since we are executing in interrupt
18639 18639 * context here, we cannot wait around for the device to come
18640 18640 * back. So hand this command off to sd_media_change_task()
18641 18641 * for deferred processing under taskq thread context. (Note
18642 18642 * that the command still may be failed if a problem is
18643 18643 * encountered at a later time.)
18644 18644 */
18645 18645 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp,
18646 18646 KM_NOSLEEP) == 0) {
18647 18647 /*
18648 18648 * Cannot dispatch the request so fail the command.
18649 18649 */
18650 18650 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18651 18651 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err);
18652 18652 si.ssi_severity = SCSI_ERR_FATAL;
18653 18653 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18654 18654 sd_return_failed_command(un, bp, EIO);
18655 18655 }
18656 18656
18657 18657 /*
18658 18658 * If failed to dispatch sd_media_change_task(), we already
18659 18659 * updated kstat. If succeed to dispatch sd_media_change_task(),
18660 18660 * we should update kstat later if it encounters an error. So,
18661 18661 * we update kstat_updated flag here.
18662 18662 */
18663 18663 kstat_updated = B_TRUE;
18664 18664
18665 18665 /*
18666 18666 * Either the command has been successfully dispatched to a
18667 18667 * task Q for retrying, or the dispatch failed. In either case
18668 18668 * do NOT retry again by calling sd_retry_command. This sets up
18669 18669 * two retries of the same command and when one completes and
18670 18670 * frees the resources the other will access freed memory,
18671 18671 * a bad thing.
18672 18672 */
18673 18673 return;
18674 18674
18675 18675 default:
18676 18676 break;
18677 18677 }
18678 18678
18679 18679 /*
18680 18680 * ASC ASCQ
18681 18681 * 2A 09 Capacity data has changed
18682 18682 * 2A 01 Mode parameters changed
18683 18683 * 3F 0E Reported luns data has changed
18684 18684 * Arrays that support logical unit expansion should report
18685 18685 * capacity changes(2Ah/09). Mode parameters changed and
18686 18686 * reported luns data has changed are the approximation.
18687 18687 */
18688 18688 if (((asc == 0x2a) && (ascq == 0x09)) ||
18689 18689 ((asc == 0x2a) && (ascq == 0x01)) ||
18690 18690 ((asc == 0x3f) && (ascq == 0x0e))) {
18691 18691 if (taskq_dispatch(sd_tq, sd_target_change_task, un,
18692 18692 KM_NOSLEEP) == 0) {
18693 18693 SD_ERROR(SD_LOG_ERROR, un,
18694 18694 "sd_sense_key_unit_attention: "
18695 18695 "Could not dispatch sd_target_change_task\n");
18696 18696 }
18697 18697 }
18698 18698
18699 18699 /*
18700 18700 * Update kstat if we haven't done that.
18701 18701 */
18702 18702 if (!kstat_updated) {
18703 18703 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18704 18704 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err);
18705 18705 }
18706 18706
18707 18707 do_retry:
18708 18708 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si,
18709 18709 EIO, SD_UA_RETRY_DELAY, NULL);
18710 18710 }
18711 18711
18712 18712
18713 18713
18714 18714 /*
18715 18715 * Function: sd_sense_key_fail_command
18716 18716 *
18717 18717 * Description: Use to fail a command when we don't like the sense key that
18718 18718 * was returned.
18719 18719 *
18720 18720 * Context: May be called from interrupt context
18721 18721 */
18722 18722
18723 18723 static void
18724 18724 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp,
18725 18725 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18726 18726 {
18727 18727 struct sd_sense_info si;
18728 18728
18729 18729 ASSERT(un != NULL);
18730 18730 ASSERT(mutex_owned(SD_MUTEX(un)));
18731 18731 ASSERT(bp != NULL);
18732 18732 ASSERT(xp != NULL);
18733 18733 ASSERT(pktp != NULL);
18734 18734
18735 18735 si.ssi_severity = SCSI_ERR_FATAL;
18736 18736 si.ssi_pfa_flag = FALSE;
18737 18737
18738 18738 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18739 18739 sd_return_failed_command(un, bp, EIO);
18740 18740 }
18741 18741
18742 18742
18743 18743
18744 18744 /*
18745 18745 * Function: sd_sense_key_blank_check
18746 18746 *
18747 18747 * Description: Recovery actions for a SCSI "Blank Check" sense key.
18748 18748 * Has no monetary connotation.
18749 18749 *
18750 18750 * Context: May be called from interrupt context
18751 18751 */
18752 18752
18753 18753 static void
18754 18754 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp,
18755 18755 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18756 18756 {
18757 18757 struct sd_sense_info si;
18758 18758
18759 18759 ASSERT(un != NULL);
18760 18760 ASSERT(mutex_owned(SD_MUTEX(un)));
18761 18761 ASSERT(bp != NULL);
18762 18762 ASSERT(xp != NULL);
18763 18763 ASSERT(pktp != NULL);
18764 18764
18765 18765 /*
18766 18766 * Blank check is not fatal for removable devices, therefore
18767 18767 * it does not require a console message.
18768 18768 */
18769 18769 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL :
18770 18770 SCSI_ERR_FATAL;
18771 18771 si.ssi_pfa_flag = FALSE;
18772 18772
18773 18773 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18774 18774 sd_return_failed_command(un, bp, EIO);
18775 18775 }
18776 18776
18777 18777
18778 18778
18779 18779
18780 18780 /*
18781 18781 * Function: sd_sense_key_aborted_command
18782 18782 *
18783 18783 * Description: Recovery actions for a SCSI "Aborted Command" sense key.
18784 18784 *
18785 18785 * Context: May be called from interrupt context
18786 18786 */
18787 18787
18788 18788 static void
18789 18789 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp,
18790 18790 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18791 18791 {
18792 18792 struct sd_sense_info si;
18793 18793
18794 18794 ASSERT(un != NULL);
18795 18795 ASSERT(mutex_owned(SD_MUTEX(un)));
18796 18796 ASSERT(bp != NULL);
18797 18797 ASSERT(xp != NULL);
18798 18798 ASSERT(pktp != NULL);
18799 18799
18800 18800 si.ssi_severity = SCSI_ERR_FATAL;
18801 18801 si.ssi_pfa_flag = FALSE;
18802 18802
18803 18803 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18804 18804
18805 18805 /*
18806 18806 * This really ought to be a fatal error, but we will retry anyway
18807 18807 * as some drives report this as a spurious error.
18808 18808 */
18809 18809 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18810 18810 &si, EIO, drv_usectohz(100000), NULL);
18811 18811 }
18812 18812
18813 18813
18814 18814
18815 18815 /*
18816 18816 * Function: sd_sense_key_default
18817 18817 *
18818 18818 * Description: Default recovery action for several SCSI sense keys (basically
18819 18819 * attempts a retry).
18820 18820 *
18821 18821 * Context: May be called from interrupt context
18822 18822 */
18823 18823
18824 18824 static void
18825 18825 sd_sense_key_default(struct sd_lun *un,
18826 18826 uint8_t *sense_datap,
18827 18827 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18828 18828 {
18829 18829 struct sd_sense_info si;
18830 18830 uint8_t sense_key = scsi_sense_key(sense_datap);
18831 18831
18832 18832 ASSERT(un != NULL);
18833 18833 ASSERT(mutex_owned(SD_MUTEX(un)));
18834 18834 ASSERT(bp != NULL);
18835 18835 ASSERT(xp != NULL);
18836 18836 ASSERT(pktp != NULL);
18837 18837
18838 18838 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18839 18839
18840 18840 /*
18841 18841 * Undecoded sense key. Attempt retries and hope that will fix
18842 18842 * the problem. Otherwise, we're dead.
18843 18843 */
18844 18844 if ((pktp->pkt_flags & FLAG_SILENT) == 0) {
18845 18845 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18846 18846 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]);
18847 18847 }
18848 18848
18849 18849 si.ssi_severity = SCSI_ERR_FATAL;
18850 18850 si.ssi_pfa_flag = FALSE;
18851 18851
18852 18852 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18853 18853 &si, EIO, (clock_t)0, NULL);
18854 18854 }
18855 18855
18856 18856
18857 18857
18858 18858 /*
18859 18859 * Function: sd_print_retry_msg
18860 18860 *
18861 18861 * Description: Print a message indicating the retry action being taken.
18862 18862 *
18863 18863 * Arguments: un - ptr to associated softstate
18864 18864 * bp - ptr to buf(9S) for the command
18865 18865 * arg - not used.
18866 18866 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
18867 18867 * or SD_NO_RETRY_ISSUED
18868 18868 *
18869 18869 * Context: May be called from interrupt context
18870 18870 */
18871 18871 /* ARGSUSED */
18872 18872 static void
18873 18873 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag)
18874 18874 {
18875 18875 struct sd_xbuf *xp;
18876 18876 struct scsi_pkt *pktp;
18877 18877 char *reasonp;
18878 18878 char *msgp;
18879 18879
18880 18880 ASSERT(un != NULL);
18881 18881 ASSERT(mutex_owned(SD_MUTEX(un)));
18882 18882 ASSERT(bp != NULL);
18883 18883 pktp = SD_GET_PKTP(bp);
18884 18884 ASSERT(pktp != NULL);
18885 18885 xp = SD_GET_XBUF(bp);
18886 18886 ASSERT(xp != NULL);
18887 18887
18888 18888 ASSERT(!mutex_owned(&un->un_pm_mutex));
18889 18889 mutex_enter(&un->un_pm_mutex);
18890 18890 if ((un->un_state == SD_STATE_SUSPENDED) ||
18891 18891 (SD_DEVICE_IS_IN_LOW_POWER(un)) ||
18892 18892 (pktp->pkt_flags & FLAG_SILENT)) {
18893 18893 mutex_exit(&un->un_pm_mutex);
18894 18894 goto update_pkt_reason;
18895 18895 }
18896 18896 mutex_exit(&un->un_pm_mutex);
18897 18897
18898 18898 /*
18899 18899 * Suppress messages if they are all the same pkt_reason; with
18900 18900 * TQ, many (up to 256) are returned with the same pkt_reason.
18901 18901 * If we are in panic, then suppress the retry messages.
18902 18902 */
18903 18903 switch (flag) {
18904 18904 case SD_NO_RETRY_ISSUED:
18905 18905 msgp = "giving up";
18906 18906 break;
18907 18907 case SD_IMMEDIATE_RETRY_ISSUED:
18908 18908 case SD_DELAYED_RETRY_ISSUED:
18909 18909 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) ||
18910 18910 ((pktp->pkt_reason == un->un_last_pkt_reason) &&
18911 18911 (sd_error_level != SCSI_ERR_ALL))) {
18912 18912 return;
18913 18913 }
18914 18914 msgp = "retrying command";
18915 18915 break;
18916 18916 default:
18917 18917 goto update_pkt_reason;
18918 18918 }
18919 18919
18920 18920 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" :
18921 18921 scsi_rname(pktp->pkt_reason));
18922 18922
18923 18923 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) {
18924 18924 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18925 18925 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp);
18926 18926 }
18927 18927
18928 18928 update_pkt_reason:
18929 18929 /*
18930 18930 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason.
18931 18931 * This is to prevent multiple console messages for the same failure
18932 18932 * condition. Note that un->un_last_pkt_reason is NOT restored if &
18933 18933 * when the command is retried successfully because there still may be
18934 18934 * more commands coming back with the same value of pktp->pkt_reason.
18935 18935 */
18936 18936 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) {
18937 18937 un->un_last_pkt_reason = pktp->pkt_reason;
18938 18938 }
18939 18939 }
18940 18940
18941 18941
18942 18942 /*
18943 18943 * Function: sd_print_cmd_incomplete_msg
18944 18944 *
18945 18945 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason.
18946 18946 *
18947 18947 * Arguments: un - ptr to associated softstate
18948 18948 * bp - ptr to buf(9S) for the command
18949 18949 * arg - passed to sd_print_retry_msg()
18950 18950 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
18951 18951 * or SD_NO_RETRY_ISSUED
18952 18952 *
18953 18953 * Context: May be called from interrupt context
18954 18954 */
18955 18955
18956 18956 static void
18957 18957 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg,
18958 18958 int code)
18959 18959 {
18960 18960 dev_info_t *dip;
18961 18961
18962 18962 ASSERT(un != NULL);
18963 18963 ASSERT(mutex_owned(SD_MUTEX(un)));
18964 18964 ASSERT(bp != NULL);
18965 18965
18966 18966 switch (code) {
18967 18967 case SD_NO_RETRY_ISSUED:
18968 18968 /* Command was failed. Someone turned off this target? */
18969 18969 if (un->un_state != SD_STATE_OFFLINE) {
18970 18970 /*
18971 18971 * Suppress message if we are detaching and
18972 18972 * device has been disconnected
18973 18973 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation
18974 18974 * private interface and not part of the DDI
18975 18975 */
18976 18976 dip = un->un_sd->sd_dev;
18977 18977 if (!(DEVI_IS_DETACHING(dip) &&
18978 18978 DEVI_IS_DEVICE_REMOVED(dip))) {
18979 18979 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18980 18980 "disk not responding to selection\n");
18981 18981 }
18982 18982 New_state(un, SD_STATE_OFFLINE);
18983 18983 }
18984 18984 break;
18985 18985
18986 18986 case SD_DELAYED_RETRY_ISSUED:
18987 18987 case SD_IMMEDIATE_RETRY_ISSUED:
18988 18988 default:
18989 18989 /* Command was successfully queued for retry */
18990 18990 sd_print_retry_msg(un, bp, arg, code);
18991 18991 break;
18992 18992 }
18993 18993 }
18994 18994
18995 18995
18996 18996 /*
18997 18997 * Function: sd_pkt_reason_cmd_incomplete
18998 18998 *
18999 18999 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason.
19000 19000 *
19001 19001 * Context: May be called from interrupt context
19002 19002 */
19003 19003
19004 19004 static void
19005 19005 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp,
19006 19006 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19007 19007 {
19008 19008 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE;
19009 19009
19010 19010 ASSERT(un != NULL);
19011 19011 ASSERT(mutex_owned(SD_MUTEX(un)));
19012 19012 ASSERT(bp != NULL);
19013 19013 ASSERT(xp != NULL);
19014 19014 ASSERT(pktp != NULL);
19015 19015
19016 19016 /* Do not do a reset if selection did not complete */
19017 19017 /* Note: Should this not just check the bit? */
19018 19018 if (pktp->pkt_state != STATE_GOT_BUS) {
19019 19019 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19020 19020 sd_reset_target(un, pktp);
19021 19021 }
19022 19022
19023 19023 /*
19024 19024 * If the target was not successfully selected, then set
19025 19025 * SD_RETRIES_FAILFAST to indicate that we lost communication
19026 19026 * with the target, and further retries and/or commands are
19027 19027 * likely to take a long time.
19028 19028 */
19029 19029 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) {
19030 19030 flag |= SD_RETRIES_FAILFAST;
19031 19031 }
19032 19032
19033 19033 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19034 19034
19035 19035 sd_retry_command(un, bp, flag,
19036 19036 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19037 19037 }
19038 19038
19039 19039
19040 19040
19041 19041 /*
19042 19042 * Function: sd_pkt_reason_cmd_tran_err
19043 19043 *
19044 19044 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason.
19045 19045 *
19046 19046 * Context: May be called from interrupt context
19047 19047 */
19048 19048
19049 19049 static void
19050 19050 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp,
19051 19051 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19052 19052 {
19053 19053 ASSERT(un != NULL);
19054 19054 ASSERT(mutex_owned(SD_MUTEX(un)));
19055 19055 ASSERT(bp != NULL);
19056 19056 ASSERT(xp != NULL);
19057 19057 ASSERT(pktp != NULL);
19058 19058
19059 19059 /*
19060 19060 * Do not reset if we got a parity error, or if
19061 19061 * selection did not complete.
19062 19062 */
19063 19063 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19064 19064 /* Note: Should this not just check the bit for pkt_state? */
19065 19065 if (((pktp->pkt_statistics & STAT_PERR) == 0) &&
19066 19066 (pktp->pkt_state != STATE_GOT_BUS)) {
19067 19067 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19068 19068 sd_reset_target(un, pktp);
19069 19069 }
19070 19070
19071 19071 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19072 19072
19073 19073 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE),
19074 19074 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19075 19075 }
19076 19076
19077 19077
19078 19078
19079 19079 /*
19080 19080 * Function: sd_pkt_reason_cmd_reset
19081 19081 *
19082 19082 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason.
19083 19083 *
19084 19084 * Context: May be called from interrupt context
19085 19085 */
19086 19086
19087 19087 static void
19088 19088 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp,
19089 19089 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19090 19090 {
19091 19091 ASSERT(un != NULL);
19092 19092 ASSERT(mutex_owned(SD_MUTEX(un)));
19093 19093 ASSERT(bp != NULL);
19094 19094 ASSERT(xp != NULL);
19095 19095 ASSERT(pktp != NULL);
19096 19096
19097 19097 /* The target may still be running the command, so try to reset. */
19098 19098 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19099 19099 sd_reset_target(un, pktp);
19100 19100
19101 19101 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19102 19102
19103 19103 /*
19104 19104 * If pkt_reason is CMD_RESET chances are that this pkt got
19105 19105 * reset because another target on this bus caused it. The target
19106 19106 * that caused it should get CMD_TIMEOUT with pkt_statistics
19107 19107 * of STAT_TIMEOUT/STAT_DEV_RESET.
19108 19108 */
19109 19109
19110 19110 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE),
19111 19111 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19112 19112 }
19113 19113
19114 19114
19115 19115
19116 19116
19117 19117 /*
19118 19118 * Function: sd_pkt_reason_cmd_aborted
19119 19119 *
19120 19120 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason.
19121 19121 *
19122 19122 * Context: May be called from interrupt context
19123 19123 */
19124 19124
19125 19125 static void
19126 19126 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp,
19127 19127 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19128 19128 {
19129 19129 ASSERT(un != NULL);
19130 19130 ASSERT(mutex_owned(SD_MUTEX(un)));
19131 19131 ASSERT(bp != NULL);
19132 19132 ASSERT(xp != NULL);
19133 19133 ASSERT(pktp != NULL);
19134 19134
19135 19135 /* The target may still be running the command, so try to reset. */
19136 19136 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19137 19137 sd_reset_target(un, pktp);
19138 19138
19139 19139 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19140 19140
19141 19141 /*
19142 19142 * If pkt_reason is CMD_ABORTED chances are that this pkt got
19143 19143 * aborted because another target on this bus caused it. The target
19144 19144 * that caused it should get CMD_TIMEOUT with pkt_statistics
19145 19145 * of STAT_TIMEOUT/STAT_DEV_RESET.
19146 19146 */
19147 19147
19148 19148 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE),
19149 19149 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19150 19150 }
19151 19151
19152 19152
19153 19153
19154 19154 /*
19155 19155 * Function: sd_pkt_reason_cmd_timeout
19156 19156 *
19157 19157 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason.
19158 19158 *
19159 19159 * Context: May be called from interrupt context
19160 19160 */
19161 19161
19162 19162 static void
19163 19163 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp,
19164 19164 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19165 19165 {
19166 19166 ASSERT(un != NULL);
19167 19167 ASSERT(mutex_owned(SD_MUTEX(un)));
19168 19168 ASSERT(bp != NULL);
19169 19169 ASSERT(xp != NULL);
19170 19170 ASSERT(pktp != NULL);
19171 19171
19172 19172
19173 19173 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19174 19174 sd_reset_target(un, pktp);
19175 19175
19176 19176 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19177 19177
19178 19178 /*
19179 19179 * A command timeout indicates that we could not establish
19180 19180 * communication with the target, so set SD_RETRIES_FAILFAST
19181 19181 * as further retries/commands are likely to take a long time.
19182 19182 */
19183 19183 sd_retry_command(un, bp,
19184 19184 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST),
19185 19185 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19186 19186 }
19187 19187
19188 19188
19189 19189
19190 19190 /*
19191 19191 * Function: sd_pkt_reason_cmd_unx_bus_free
19192 19192 *
19193 19193 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason.
19194 19194 *
19195 19195 * Context: May be called from interrupt context
19196 19196 */
19197 19197
19198 19198 static void
19199 19199 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp,
19200 19200 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19201 19201 {
19202 19202 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code);
19203 19203
19204 19204 ASSERT(un != NULL);
19205 19205 ASSERT(mutex_owned(SD_MUTEX(un)));
19206 19206 ASSERT(bp != NULL);
19207 19207 ASSERT(xp != NULL);
19208 19208 ASSERT(pktp != NULL);
19209 19209
19210 19210 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19211 19211 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19212 19212
19213 19213 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ?
19214 19214 sd_print_retry_msg : NULL;
19215 19215
19216 19216 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE),
19217 19217 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19218 19218 }
19219 19219
19220 19220
19221 19221 /*
19222 19222 * Function: sd_pkt_reason_cmd_tag_reject
19223 19223 *
19224 19224 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason.
19225 19225 *
19226 19226 * Context: May be called from interrupt context
19227 19227 */
19228 19228
19229 19229 static void
19230 19230 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp,
19231 19231 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19232 19232 {
19233 19233 ASSERT(un != NULL);
19234 19234 ASSERT(mutex_owned(SD_MUTEX(un)));
19235 19235 ASSERT(bp != NULL);
19236 19236 ASSERT(xp != NULL);
19237 19237 ASSERT(pktp != NULL);
19238 19238
19239 19239 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19240 19240 pktp->pkt_flags = 0;
19241 19241 un->un_tagflags = 0;
19242 19242 if (un->un_f_opt_queueing == TRUE) {
19243 19243 un->un_throttle = min(un->un_throttle, 3);
19244 19244 } else {
19245 19245 un->un_throttle = 1;
19246 19246 }
19247 19247 mutex_exit(SD_MUTEX(un));
19248 19248 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
19249 19249 mutex_enter(SD_MUTEX(un));
19250 19250
19251 19251 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19252 19252
19253 19253 /* Legacy behavior not to check retry counts here. */
19254 19254 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE),
19255 19255 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19256 19256 }
19257 19257
19258 19258
19259 19259 /*
19260 19260 * Function: sd_pkt_reason_default
19261 19261 *
19262 19262 * Description: Default recovery actions for SCSA pkt_reason values that
19263 19263 * do not have more explicit recovery actions.
19264 19264 *
19265 19265 * Context: May be called from interrupt context
19266 19266 */
19267 19267
19268 19268 static void
19269 19269 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp,
19270 19270 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19271 19271 {
19272 19272 ASSERT(un != NULL);
19273 19273 ASSERT(mutex_owned(SD_MUTEX(un)));
19274 19274 ASSERT(bp != NULL);
19275 19275 ASSERT(xp != NULL);
19276 19276 ASSERT(pktp != NULL);
19277 19277
19278 19278 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19279 19279 sd_reset_target(un, pktp);
19280 19280
19281 19281 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19282 19282
19283 19283 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE),
19284 19284 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19285 19285 }
19286 19286
19287 19287
19288 19288
19289 19289 /*
19290 19290 * Function: sd_pkt_status_check_condition
19291 19291 *
19292 19292 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status.
19293 19293 *
19294 19294 * Context: May be called from interrupt context
19295 19295 */
19296 19296
19297 19297 static void
19298 19298 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp,
19299 19299 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19300 19300 {
19301 19301 ASSERT(un != NULL);
19302 19302 ASSERT(mutex_owned(SD_MUTEX(un)));
19303 19303 ASSERT(bp != NULL);
19304 19304 ASSERT(xp != NULL);
19305 19305 ASSERT(pktp != NULL);
19306 19306
19307 19307 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: "
19308 19308 "entry: buf:0x%p xp:0x%p\n", bp, xp);
19309 19309
19310 19310 /*
19311 19311 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the
19312 19312 * command will be retried after the request sense). Otherwise, retry
19313 19313 * the command. Note: we are issuing the request sense even though the
19314 19314 * retry limit may have been reached for the failed command.
19315 19315 */
19316 19316 if (un->un_f_arq_enabled == FALSE) {
19317 19317 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: "
19318 19318 "no ARQ, sending request sense command\n");
19319 19319 sd_send_request_sense_command(un, bp, pktp);
19320 19320 } else {
19321 19321 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: "
19322 19322 "ARQ,retrying request sense command\n");
19323 19323 #if defined(__i386) || defined(__amd64)
19324 19324 /*
19325 19325 * The SD_RETRY_DELAY value need to be adjusted here
19326 19326 * when SD_RETRY_DELAY change in sddef.h
19327 19327 */
19328 19328 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO,
19329 19329 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0,
19330 19330 NULL);
19331 19331 #else
19332 19332 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL,
19333 19333 EIO, SD_RETRY_DELAY, NULL);
19334 19334 #endif
19335 19335 }
19336 19336
19337 19337 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n");
19338 19338 }
19339 19339
19340 19340
19341 19341 /*
19342 19342 * Function: sd_pkt_status_busy
19343 19343 *
19344 19344 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status.
19345 19345 *
19346 19346 * Context: May be called from interrupt context
19347 19347 */
19348 19348
19349 19349 static void
19350 19350 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
19351 19351 struct scsi_pkt *pktp)
19352 19352 {
19353 19353 ASSERT(un != NULL);
19354 19354 ASSERT(mutex_owned(SD_MUTEX(un)));
19355 19355 ASSERT(bp != NULL);
19356 19356 ASSERT(xp != NULL);
19357 19357 ASSERT(pktp != NULL);
19358 19358
19359 19359 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19360 19360 "sd_pkt_status_busy: entry\n");
19361 19361
19362 19362 /* If retries are exhausted, just fail the command. */
19363 19363 if (xp->xb_retry_count >= un->un_busy_retry_count) {
19364 19364 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
19365 19365 "device busy too long\n");
19366 19366 sd_return_failed_command(un, bp, EIO);
19367 19367 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19368 19368 "sd_pkt_status_busy: exit\n");
19369 19369 return;
19370 19370 }
19371 19371 xp->xb_retry_count++;
19372 19372
19373 19373 /*
19374 19374 * Try to reset the target. However, we do not want to perform
19375 19375 * more than one reset if the device continues to fail. The reset
19376 19376 * will be performed when the retry count reaches the reset
19377 19377 * threshold. This threshold should be set such that at least
19378 19378 * one retry is issued before the reset is performed.
19379 19379 */
19380 19380 if (xp->xb_retry_count ==
19381 19381 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) {
19382 19382 int rval = 0;
19383 19383 mutex_exit(SD_MUTEX(un));
19384 19384 if (un->un_f_allow_bus_device_reset == TRUE) {
19385 19385 /*
19386 19386 * First try to reset the LUN; if we cannot then
19387 19387 * try to reset the target.
19388 19388 */
19389 19389 if (un->un_f_lun_reset_enabled == TRUE) {
19390 19390 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19391 19391 "sd_pkt_status_busy: RESET_LUN\n");
19392 19392 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN);
19393 19393 }
19394 19394 if (rval == 0) {
19395 19395 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19396 19396 "sd_pkt_status_busy: RESET_TARGET\n");
19397 19397 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET);
19398 19398 }
19399 19399 }
19400 19400 if (rval == 0) {
19401 19401 /*
19402 19402 * If the RESET_LUN and/or RESET_TARGET failed,
19403 19403 * try RESET_ALL
19404 19404 */
19405 19405 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19406 19406 "sd_pkt_status_busy: RESET_ALL\n");
19407 19407 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL);
19408 19408 }
19409 19409 mutex_enter(SD_MUTEX(un));
19410 19410 if (rval == 0) {
19411 19411 /*
19412 19412 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed.
19413 19413 * At this point we give up & fail the command.
19414 19414 */
19415 19415 sd_return_failed_command(un, bp, EIO);
19416 19416 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19417 19417 "sd_pkt_status_busy: exit (failed cmd)\n");
19418 19418 return;
19419 19419 }
19420 19420 }
19421 19421
19422 19422 /*
19423 19423 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as
19424 19424 * we have already checked the retry counts above.
19425 19425 */
19426 19426 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL,
19427 19427 EIO, un->un_busy_timeout, NULL);
19428 19428
19429 19429 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19430 19430 "sd_pkt_status_busy: exit\n");
19431 19431 }
19432 19432
19433 19433
19434 19434 /*
19435 19435 * Function: sd_pkt_status_reservation_conflict
19436 19436 *
19437 19437 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI
19438 19438 * command status.
19439 19439 *
19440 19440 * Context: May be called from interrupt context
19441 19441 */
19442 19442
19443 19443 static void
19444 19444 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp,
19445 19445 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19446 19446 {
19447 19447 ASSERT(un != NULL);
19448 19448 ASSERT(mutex_owned(SD_MUTEX(un)));
19449 19449 ASSERT(bp != NULL);
19450 19450 ASSERT(xp != NULL);
19451 19451 ASSERT(pktp != NULL);
19452 19452
19453 19453 /*
19454 19454 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation
19455 19455 * conflict could be due to various reasons like incorrect keys, not
19456 19456 * registered or not reserved etc. So, we return EACCES to the caller.
19457 19457 */
19458 19458 if (un->un_reservation_type == SD_SCSI3_RESERVATION) {
19459 19459 int cmd = SD_GET_PKT_OPCODE(pktp);
19460 19460 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) ||
19461 19461 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) {
19462 19462 sd_return_failed_command(un, bp, EACCES);
19463 19463 return;
19464 19464 }
19465 19465 }
19466 19466
19467 19467 un->un_resvd_status |= SD_RESERVATION_CONFLICT;
19468 19468
19469 19469 if ((un->un_resvd_status & SD_FAILFAST) != 0) {
19470 19470 if (sd_failfast_enable != 0) {
19471 19471 /* By definition, we must panic here.... */
19472 19472 sd_panic_for_res_conflict(un);
19473 19473 /*NOTREACHED*/
19474 19474 }
19475 19475 SD_ERROR(SD_LOG_IO, un,
19476 19476 "sd_handle_resv_conflict: Disk Reserved\n");
19477 19477 sd_return_failed_command(un, bp, EACCES);
19478 19478 return;
19479 19479 }
19480 19480
19481 19481 /*
19482 19482 * 1147670: retry only if sd_retry_on_reservation_conflict
19483 19483 * property is set (default is 1). Retries will not succeed
19484 19484 * on a disk reserved by another initiator. HA systems
19485 19485 * may reset this via sd.conf to avoid these retries.
19486 19486 *
19487 19487 * Note: The legacy return code for this failure is EIO, however EACCES
19488 19488 * seems more appropriate for a reservation conflict.
19489 19489 */
19490 19490 if (sd_retry_on_reservation_conflict == 0) {
19491 19491 SD_ERROR(SD_LOG_IO, un,
19492 19492 "sd_handle_resv_conflict: Device Reserved\n");
19493 19493 sd_return_failed_command(un, bp, EIO);
19494 19494 return;
19495 19495 }
19496 19496
19497 19497 /*
19498 19498 * Retry the command if we can.
19499 19499 *
19500 19500 * Note: The legacy return code for this failure is EIO, however EACCES
19501 19501 * seems more appropriate for a reservation conflict.
19502 19502 */
19503 19503 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO,
19504 19504 (clock_t)2, NULL);
19505 19505 }
19506 19506
19507 19507
19508 19508
19509 19509 /*
19510 19510 * Function: sd_pkt_status_qfull
19511 19511 *
19512 19512 * Description: Handle a QUEUE FULL condition from the target. This can
19513 19513 * occur if the HBA does not handle the queue full condition.
19514 19514 * (Basically this means third-party HBAs as Sun HBAs will
19515 19515 * handle the queue full condition.) Note that if there are
19516 19516 * some commands already in the transport, then the queue full
19517 19517 * has occurred because the queue for this nexus is actually
19518 19518 * full. If there are no commands in the transport, then the
19519 19519 * queue full is resulting from some other initiator or lun
19520 19520 * consuming all the resources at the target.
19521 19521 *
19522 19522 * Context: May be called from interrupt context
19523 19523 */
19524 19524
19525 19525 static void
19526 19526 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp,
19527 19527 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19528 19528 {
19529 19529 ASSERT(un != NULL);
19530 19530 ASSERT(mutex_owned(SD_MUTEX(un)));
19531 19531 ASSERT(bp != NULL);
19532 19532 ASSERT(xp != NULL);
19533 19533 ASSERT(pktp != NULL);
19534 19534
19535 19535 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19536 19536 "sd_pkt_status_qfull: entry\n");
19537 19537
19538 19538 /*
19539 19539 * Just lower the QFULL throttle and retry the command. Note that
19540 19540 * we do not limit the number of retries here.
19541 19541 */
19542 19542 sd_reduce_throttle(un, SD_THROTTLE_QFULL);
19543 19543 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0,
19544 19544 SD_RESTART_TIMEOUT, NULL);
19545 19545
19546 19546 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19547 19547 "sd_pkt_status_qfull: exit\n");
19548 19548 }
19549 19549
19550 19550
19551 19551 /*
19552 19552 * Function: sd_reset_target
19553 19553 *
19554 19554 * Description: Issue a scsi_reset(9F), with either RESET_LUN,
19555 19555 * RESET_TARGET, or RESET_ALL.
19556 19556 *
19557 19557 * Context: May be called under interrupt context.
19558 19558 */
19559 19559
19560 19560 static void
19561 19561 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp)
19562 19562 {
19563 19563 int rval = 0;
19564 19564
19565 19565 ASSERT(un != NULL);
19566 19566 ASSERT(mutex_owned(SD_MUTEX(un)));
19567 19567 ASSERT(pktp != NULL);
19568 19568
19569 19569 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n");
19570 19570
19571 19571 /*
19572 19572 * No need to reset if the transport layer has already done so.
19573 19573 */
19574 19574 if ((pktp->pkt_statistics &
19575 19575 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) {
19576 19576 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19577 19577 "sd_reset_target: no reset\n");
19578 19578 return;
19579 19579 }
19580 19580
19581 19581 mutex_exit(SD_MUTEX(un));
19582 19582
19583 19583 if (un->un_f_allow_bus_device_reset == TRUE) {
19584 19584 if (un->un_f_lun_reset_enabled == TRUE) {
19585 19585 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19586 19586 "sd_reset_target: RESET_LUN\n");
19587 19587 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN);
19588 19588 }
19589 19589 if (rval == 0) {
19590 19590 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19591 19591 "sd_reset_target: RESET_TARGET\n");
19592 19592 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET);
19593 19593 }
19594 19594 }
19595 19595
19596 19596 if (rval == 0) {
19597 19597 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19598 19598 "sd_reset_target: RESET_ALL\n");
19599 19599 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
19600 19600 }
19601 19601
19602 19602 mutex_enter(SD_MUTEX(un));
19603 19603
19604 19604 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n");
19605 19605 }
19606 19606
19607 19607 /*
19608 19608 * Function: sd_target_change_task
19609 19609 *
19610 19610 * Description: Handle dynamic target change
19611 19611 *
19612 19612 * Context: Executes in a taskq() thread context
19613 19613 */
19614 19614 static void
19615 19615 sd_target_change_task(void *arg)
19616 19616 {
19617 19617 struct sd_lun *un = arg;
19618 19618 uint64_t capacity;
19619 19619 diskaddr_t label_cap;
19620 19620 uint_t lbasize;
19621 19621 sd_ssc_t *ssc;
19622 19622
19623 19623 ASSERT(un != NULL);
19624 19624 ASSERT(!mutex_owned(SD_MUTEX(un)));
19625 19625
19626 19626 if ((un->un_f_blockcount_is_valid == FALSE) ||
19627 19627 (un->un_f_tgt_blocksize_is_valid == FALSE)) {
19628 19628 return;
19629 19629 }
19630 19630
19631 19631 ssc = sd_ssc_init(un);
19632 19632
19633 19633 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity,
19634 19634 &lbasize, SD_PATH_DIRECT) != 0) {
19635 19635 SD_ERROR(SD_LOG_ERROR, un,
19636 19636 "sd_target_change_task: fail to read capacity\n");
19637 19637 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
19638 19638 goto task_exit;
19639 19639 }
19640 19640
19641 19641 mutex_enter(SD_MUTEX(un));
19642 19642 if (capacity <= un->un_blockcount) {
19643 19643 mutex_exit(SD_MUTEX(un));
19644 19644 goto task_exit;
19645 19645 }
19646 19646
19647 19647 sd_update_block_info(un, lbasize, capacity);
19648 19648 mutex_exit(SD_MUTEX(un));
19649 19649
19650 19650 /*
19651 19651 * If lun is EFI labeled and lun capacity is greater than the
19652 19652 * capacity contained in the label, log a sys event.
19653 19653 */
19654 19654 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap,
19655 19655 (void*)SD_PATH_DIRECT) == 0) {
19656 19656 mutex_enter(SD_MUTEX(un));
19657 19657 if (un->un_f_blockcount_is_valid &&
19658 19658 un->un_blockcount > label_cap) {
19659 19659 mutex_exit(SD_MUTEX(un));
19660 19660 sd_log_lun_expansion_event(un, KM_SLEEP);
19661 19661 } else {
19662 19662 mutex_exit(SD_MUTEX(un));
19663 19663 }
19664 19664 }
19665 19665
19666 19666 task_exit:
19667 19667 sd_ssc_fini(ssc);
19668 19668 }
19669 19669
19670 19670
19671 19671 /*
19672 19672 * Function: sd_log_dev_status_event
19673 19673 *
19674 19674 * Description: Log EC_dev_status sysevent
19675 19675 *
19676 19676 * Context: Never called from interrupt context
19677 19677 */
19678 19678 static void
19679 19679 sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag)
19680 19680 {
19681 19681 int err;
19682 19682 char *path;
19683 19683 nvlist_t *attr_list;
19684 19684
19685 19685 /* Allocate and build sysevent attribute list */
19686 19686 err = nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, km_flag);
19687 19687 if (err != 0) {
19688 19688 SD_ERROR(SD_LOG_ERROR, un,
19689 19689 "sd_log_dev_status_event: fail to allocate space\n");
19690 19690 return;
19691 19691 }
19692 19692
19693 19693 path = kmem_alloc(MAXPATHLEN, km_flag);
19694 19694 if (path == NULL) {
19695 19695 nvlist_free(attr_list);
19696 19696 SD_ERROR(SD_LOG_ERROR, un,
19697 19697 "sd_log_dev_status_event: fail to allocate space\n");
19698 19698 return;
19699 19699 }
19700 19700 /*
19701 19701 * Add path attribute to identify the lun.
19702 19702 * We are using minor node 'a' as the sysevent attribute.
19703 19703 */
19704 19704 (void) snprintf(path, MAXPATHLEN, "/devices");
19705 19705 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path));
19706 19706 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path),
19707 19707 ":a");
19708 19708
19709 19709 err = nvlist_add_string(attr_list, DEV_PHYS_PATH, path);
19710 19710 if (err != 0) {
19711 19711 nvlist_free(attr_list);
19712 19712 kmem_free(path, MAXPATHLEN);
19713 19713 SD_ERROR(SD_LOG_ERROR, un,
19714 19714 "sd_log_dev_status_event: fail to add attribute\n");
19715 19715 return;
19716 19716 }
19717 19717
19718 19718 /* Log dynamic lun expansion sysevent */
19719 19719 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS,
19720 19720 esc, attr_list, NULL, km_flag);
19721 19721 if (err != DDI_SUCCESS) {
19722 19722 SD_ERROR(SD_LOG_ERROR, un,
19723 19723 "sd_log_dev_status_event: fail to log sysevent\n");
19724 19724 }
19725 19725
19726 19726 nvlist_free(attr_list);
19727 19727 kmem_free(path, MAXPATHLEN);
19728 19728 }
19729 19729
19730 19730
19731 19731 /*
19732 19732 * Function: sd_log_lun_expansion_event
19733 19733 *
19734 19734 * Description: Log lun expansion sys event
19735 19735 *
19736 19736 * Context: Never called from interrupt context
19737 19737 */
19738 19738 static void
19739 19739 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag)
19740 19740 {
19741 19741 sd_log_dev_status_event(un, ESC_DEV_DLE, km_flag);
19742 19742 }
19743 19743
19744 19744
19745 19745 /*
19746 19746 * Function: sd_log_eject_request_event
19747 19747 *
19748 19748 * Description: Log eject request sysevent
19749 19749 *
19750 19750 * Context: Never called from interrupt context
19751 19751 */
19752 19752 static void
19753 19753 sd_log_eject_request_event(struct sd_lun *un, int km_flag)
19754 19754 {
19755 19755 sd_log_dev_status_event(un, ESC_DEV_EJECT_REQUEST, km_flag);
19756 19756 }
19757 19757
19758 19758
19759 19759 /*
19760 19760 * Function: sd_media_change_task
19761 19761 *
19762 19762 * Description: Recovery action for CDROM to become available.
19763 19763 *
19764 19764 * Context: Executes in a taskq() thread context
19765 19765 */
19766 19766
19767 19767 static void
19768 19768 sd_media_change_task(void *arg)
19769 19769 {
19770 19770 struct scsi_pkt *pktp = arg;
19771 19771 struct sd_lun *un;
19772 19772 struct buf *bp;
19773 19773 struct sd_xbuf *xp;
19774 19774 int err = 0;
19775 19775 int retry_count = 0;
19776 19776 int retry_limit = SD_UNIT_ATTENTION_RETRY/10;
19777 19777 struct sd_sense_info si;
19778 19778
19779 19779 ASSERT(pktp != NULL);
19780 19780 bp = (struct buf *)pktp->pkt_private;
19781 19781 ASSERT(bp != NULL);
19782 19782 xp = SD_GET_XBUF(bp);
19783 19783 ASSERT(xp != NULL);
19784 19784 un = SD_GET_UN(bp);
19785 19785 ASSERT(un != NULL);
19786 19786 ASSERT(!mutex_owned(SD_MUTEX(un)));
19787 19787 ASSERT(un->un_f_monitor_media_state);
19788 19788
19789 19789 si.ssi_severity = SCSI_ERR_INFO;
19790 19790 si.ssi_pfa_flag = FALSE;
19791 19791
19792 19792 /*
19793 19793 * When a reset is issued on a CDROM, it takes a long time to
19794 19794 * recover. First few attempts to read capacity and other things
19795 19795 * related to handling unit attention fail (with a ASC 0x4 and
19796 19796 * ASCQ 0x1). In that case we want to do enough retries and we want
19797 19797 * to limit the retries in other cases of genuine failures like
19798 19798 * no media in drive.
19799 19799 */
19800 19800 while (retry_count++ < retry_limit) {
19801 19801 if ((err = sd_handle_mchange(un)) == 0) {
19802 19802 break;
19803 19803 }
19804 19804 if (err == EAGAIN) {
19805 19805 retry_limit = SD_UNIT_ATTENTION_RETRY;
19806 19806 }
19807 19807 /* Sleep for 0.5 sec. & try again */
19808 19808 delay(drv_usectohz(500000));
19809 19809 }
19810 19810
19811 19811 /*
19812 19812 * Dispatch (retry or fail) the original command here,
19813 19813 * along with appropriate console messages....
19814 19814 *
19815 19815 * Must grab the mutex before calling sd_retry_command,
19816 19816 * sd_print_sense_msg and sd_return_failed_command.
19817 19817 */
19818 19818 mutex_enter(SD_MUTEX(un));
19819 19819 if (err != SD_CMD_SUCCESS) {
19820 19820 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19821 19821 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err);
19822 19822 si.ssi_severity = SCSI_ERR_FATAL;
19823 19823 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
19824 19824 sd_return_failed_command(un, bp, EIO);
19825 19825 } else {
19826 19826 sd_retry_command(un, bp, SD_RETRIES_UA, sd_print_sense_msg,
19827 19827 &si, EIO, (clock_t)0, NULL);
19828 19828 }
19829 19829 mutex_exit(SD_MUTEX(un));
19830 19830 }
19831 19831
19832 19832
19833 19833
19834 19834 /*
19835 19835 * Function: sd_handle_mchange
19836 19836 *
19837 19837 * Description: Perform geometry validation & other recovery when CDROM
19838 19838 * has been removed from drive.
19839 19839 *
19840 19840 * Return Code: 0 for success
19841 19841 * errno-type return code of either sd_send_scsi_DOORLOCK() or
19842 19842 * sd_send_scsi_READ_CAPACITY()
19843 19843 *
19844 19844 * Context: Executes in a taskq() thread context
19845 19845 */
19846 19846
19847 19847 static int
19848 19848 sd_handle_mchange(struct sd_lun *un)
19849 19849 {
19850 19850 uint64_t capacity;
19851 19851 uint32_t lbasize;
19852 19852 int rval;
19853 19853 sd_ssc_t *ssc;
19854 19854
19855 19855 ASSERT(!mutex_owned(SD_MUTEX(un)));
19856 19856 ASSERT(un->un_f_monitor_media_state);
19857 19857
19858 19858 ssc = sd_ssc_init(un);
19859 19859 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize,
19860 19860 SD_PATH_DIRECT_PRIORITY);
19861 19861
19862 19862 if (rval != 0)
19863 19863 goto failed;
19864 19864
19865 19865 mutex_enter(SD_MUTEX(un));
19866 19866 sd_update_block_info(un, lbasize, capacity);
19867 19867
19868 19868 if (un->un_errstats != NULL) {
19869 19869 struct sd_errstats *stp =
19870 19870 (struct sd_errstats *)un->un_errstats->ks_data;
19871 19871 stp->sd_capacity.value.ui64 = (uint64_t)
19872 19872 ((uint64_t)un->un_blockcount *
19873 19873 (uint64_t)un->un_tgt_blocksize);
19874 19874 }
19875 19875
19876 19876 /*
19877 19877 * Check if the media in the device is writable or not
19878 19878 */
19879 19879 if (ISCD(un)) {
19880 19880 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY);
19881 19881 }
19882 19882
19883 19883 /*
19884 19884 * Note: Maybe let the strategy/partitioning chain worry about getting
19885 19885 * valid geometry.
19886 19886 */
19887 19887 mutex_exit(SD_MUTEX(un));
19888 19888 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY);
19889 19889
19890 19890
19891 19891 if (cmlb_validate(un->un_cmlbhandle, 0,
19892 19892 (void *)SD_PATH_DIRECT_PRIORITY) != 0) {
19893 19893 sd_ssc_fini(ssc);
19894 19894 return (EIO);
19895 19895 } else {
19896 19896 if (un->un_f_pkstats_enabled) {
19897 19897 sd_set_pstats(un);
19898 19898 SD_TRACE(SD_LOG_IO_PARTITION, un,
19899 19899 "sd_handle_mchange: un:0x%p pstats created and "
19900 19900 "set\n", un);
19901 19901 }
19902 19902 }
19903 19903
19904 19904 /*
19905 19905 * Try to lock the door
19906 19906 */
19907 19907 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
19908 19908 SD_PATH_DIRECT_PRIORITY);
19909 19909 failed:
19910 19910 if (rval != 0)
19911 19911 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
19912 19912 sd_ssc_fini(ssc);
19913 19913 return (rval);
19914 19914 }
19915 19915
19916 19916
19917 19917 /*
19918 19918 * Function: sd_send_scsi_DOORLOCK
19919 19919 *
19920 19920 * Description: Issue the scsi DOOR LOCK command
19921 19921 *
19922 19922 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
19923 19923 * structure for this target.
19924 19924 * flag - SD_REMOVAL_ALLOW
19925 19925 * SD_REMOVAL_PREVENT
19926 19926 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
19927 19927 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
19928 19928 * to use the USCSI "direct" chain and bypass the normal
19929 19929 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this
19930 19930 * command is issued as part of an error recovery action.
19931 19931 *
19932 19932 * Return Code: 0 - Success
19933 19933 * errno return code from sd_ssc_send()
19934 19934 *
19935 19935 * Context: Can sleep.
19936 19936 */
19937 19937
19938 19938 static int
19939 19939 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag)
19940 19940 {
19941 19941 struct scsi_extended_sense sense_buf;
19942 19942 union scsi_cdb cdb;
19943 19943 struct uscsi_cmd ucmd_buf;
19944 19944 int status;
19945 19945 struct sd_lun *un;
19946 19946
19947 19947 ASSERT(ssc != NULL);
19948 19948 un = ssc->ssc_un;
19949 19949 ASSERT(un != NULL);
19950 19950 ASSERT(!mutex_owned(SD_MUTEX(un)));
19951 19951
19952 19952 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un);
19953 19953
19954 19954 /* already determined doorlock is not supported, fake success */
19955 19955 if (un->un_f_doorlock_supported == FALSE) {
19956 19956 return (0);
19957 19957 }
19958 19958
19959 19959 /*
19960 19960 * If we are ejecting and see an SD_REMOVAL_PREVENT
19961 19961 * ignore the command so we can complete the eject
19962 19962 * operation.
19963 19963 */
19964 19964 if (flag == SD_REMOVAL_PREVENT) {
19965 19965 mutex_enter(SD_MUTEX(un));
19966 19966 if (un->un_f_ejecting == TRUE) {
19967 19967 mutex_exit(SD_MUTEX(un));
19968 19968 return (EAGAIN);
19969 19969 }
19970 19970 mutex_exit(SD_MUTEX(un));
19971 19971 }
19972 19972
19973 19973 bzero(&cdb, sizeof (cdb));
19974 19974 bzero(&ucmd_buf, sizeof (ucmd_buf));
19975 19975
19976 19976 cdb.scc_cmd = SCMD_DOORLOCK;
19977 19977 cdb.cdb_opaque[4] = (uchar_t)flag;
19978 19978
19979 19979 ucmd_buf.uscsi_cdb = (char *)&cdb;
19980 19980 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
19981 19981 ucmd_buf.uscsi_bufaddr = NULL;
19982 19982 ucmd_buf.uscsi_buflen = 0;
19983 19983 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
19984 19984 ucmd_buf.uscsi_rqlen = sizeof (sense_buf);
19985 19985 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
19986 19986 ucmd_buf.uscsi_timeout = 15;
19987 19987
19988 19988 SD_TRACE(SD_LOG_IO, un,
19989 19989 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n");
19990 19990
19991 19991 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
19992 19992 UIO_SYSSPACE, path_flag);
19993 19993
19994 19994 if (status == 0)
19995 19995 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
19996 19996
19997 19997 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) &&
19998 19998 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
19999 19999 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) {
20000 20000 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
20001 20001
20002 20002 /* fake success and skip subsequent doorlock commands */
20003 20003 un->un_f_doorlock_supported = FALSE;
20004 20004 return (0);
20005 20005 }
20006 20006
20007 20007 return (status);
20008 20008 }
20009 20009
20010 20010 /*
20011 20011 * Function: sd_send_scsi_READ_CAPACITY
20012 20012 *
20013 20013 * Description: This routine uses the scsi READ CAPACITY command to determine
20014 20014 * the device capacity in number of blocks and the device native
20015 20015 * block size. If this function returns a failure, then the
20016 20016 * values in *capp and *lbap are undefined. If the capacity
20017 20017 * returned is 0xffffffff then the lun is too large for a
20018 20018 * normal READ CAPACITY command and the results of a
20019 20019 * READ CAPACITY 16 will be used instead.
20020 20020 *
20021 20021 * Arguments: ssc - ssc contains ptr to soft state struct for the target
20022 20022 * capp - ptr to unsigned 64-bit variable to receive the
20023 20023 * capacity value from the command.
20024 20024 * lbap - ptr to unsigned 32-bit varaible to receive the
20025 20025 * block size value from the command
20026 20026 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
20027 20027 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
20028 20028 * to use the USCSI "direct" chain and bypass the normal
20029 20029 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this
20030 20030 * command is issued as part of an error recovery action.
20031 20031 *
20032 20032 * Return Code: 0 - Success
20033 20033 * EIO - IO error
20034 20034 * EACCES - Reservation conflict detected
20035 20035 * EAGAIN - Device is becoming ready
20036 20036 * errno return code from sd_ssc_send()
20037 20037 *
20038 20038 * Context: Can sleep. Blocks until command completes.
20039 20039 */
20040 20040
20041 20041 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity)
20042 20042
20043 20043 static int
20044 20044 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap,
20045 20045 int path_flag)
20046 20046 {
20047 20047 struct scsi_extended_sense sense_buf;
20048 20048 struct uscsi_cmd ucmd_buf;
20049 20049 union scsi_cdb cdb;
20050 20050 uint32_t *capacity_buf;
20051 20051 uint64_t capacity;
20052 20052 uint32_t lbasize;
20053 20053 uint32_t pbsize;
20054 20054 int status;
20055 20055 struct sd_lun *un;
20056 20056
20057 20057 ASSERT(ssc != NULL);
20058 20058
20059 20059 un = ssc->ssc_un;
20060 20060 ASSERT(un != NULL);
20061 20061 ASSERT(!mutex_owned(SD_MUTEX(un)));
20062 20062 ASSERT(capp != NULL);
20063 20063 ASSERT(lbap != NULL);
20064 20064
20065 20065 SD_TRACE(SD_LOG_IO, un,
20066 20066 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un);
20067 20067
20068 20068 /*
20069 20069 * First send a READ_CAPACITY command to the target.
20070 20070 * (This command is mandatory under SCSI-2.)
20071 20071 *
20072 20072 * Set up the CDB for the READ_CAPACITY command. The Partial
20073 20073 * Medium Indicator bit is cleared. The address field must be
20074 20074 * zero if the PMI bit is zero.
20075 20075 */
20076 20076 bzero(&cdb, sizeof (cdb));
20077 20077 bzero(&ucmd_buf, sizeof (ucmd_buf));
20078 20078
20079 20079 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP);
20080 20080
20081 20081 cdb.scc_cmd = SCMD_READ_CAPACITY;
20082 20082
20083 20083 ucmd_buf.uscsi_cdb = (char *)&cdb;
20084 20084 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
20085 20085 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf;
20086 20086 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE;
20087 20087 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20088 20088 ucmd_buf.uscsi_rqlen = sizeof (sense_buf);
20089 20089 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
20090 20090 ucmd_buf.uscsi_timeout = 60;
20091 20091
20092 20092 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20093 20093 UIO_SYSSPACE, path_flag);
20094 20094
20095 20095 switch (status) {
20096 20096 case 0:
20097 20097 /* Return failure if we did not get valid capacity data. */
20098 20098 if (ucmd_buf.uscsi_resid != 0) {
20099 20099 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20100 20100 "sd_send_scsi_READ_CAPACITY received invalid "
20101 20101 "capacity data");
20102 20102 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20103 20103 return (EIO);
20104 20104 }
20105 20105 /*
20106 20106 * Read capacity and block size from the READ CAPACITY 10 data.
20107 20107 * This data may be adjusted later due to device specific
20108 20108 * issues.
20109 20109 *
20110 20110 * According to the SCSI spec, the READ CAPACITY 10
20111 20111 * command returns the following:
20112 20112 *
20113 20113 * bytes 0-3: Maximum logical block address available.
20114 20114 * (MSB in byte:0 & LSB in byte:3)
20115 20115 *
20116 20116 * bytes 4-7: Block length in bytes
20117 20117 * (MSB in byte:4 & LSB in byte:7)
20118 20118 *
20119 20119 */
20120 20120 capacity = BE_32(capacity_buf[0]);
20121 20121 lbasize = BE_32(capacity_buf[1]);
20122 20122
20123 20123 /*
20124 20124 * Done with capacity_buf
20125 20125 */
20126 20126 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20127 20127
20128 20128 /*
20129 20129 * if the reported capacity is set to all 0xf's, then
20130 20130 * this disk is too large and requires SBC-2 commands.
20131 20131 * Reissue the request using READ CAPACITY 16.
20132 20132 */
20133 20133 if (capacity == 0xffffffff) {
20134 20134 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
20135 20135 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity,
20136 20136 &lbasize, &pbsize, path_flag);
20137 20137 if (status != 0) {
20138 20138 return (status);
20139 20139 } else {
20140 20140 goto rc16_done;
20141 20141 }
20142 20142 }
20143 20143 break; /* Success! */
20144 20144 case EIO:
20145 20145 switch (ucmd_buf.uscsi_status) {
20146 20146 case STATUS_RESERVATION_CONFLICT:
20147 20147 status = EACCES;
20148 20148 break;
20149 20149 case STATUS_CHECK:
20150 20150 /*
20151 20151 * Check condition; look for ASC/ASCQ of 0x04/0x01
20152 20152 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY)
20153 20153 */
20154 20154 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20155 20155 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) &&
20156 20156 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) {
20157 20157 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20158 20158 return (EAGAIN);
20159 20159 }
20160 20160 break;
20161 20161 default:
20162 20162 break;
20163 20163 }
20164 20164 /* FALLTHRU */
20165 20165 default:
20166 20166 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20167 20167 return (status);
20168 20168 }
20169 20169
20170 20170 /*
20171 20171 * Some ATAPI CD-ROM drives report inaccurate LBA size values
20172 20172 * (2352 and 0 are common) so for these devices always force the value
20173 20173 * to 2048 as required by the ATAPI specs.
20174 20174 */
20175 20175 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) {
20176 20176 lbasize = 2048;
20177 20177 }
20178 20178
20179 20179 /*
20180 20180 * Get the maximum LBA value from the READ CAPACITY data.
20181 20181 * Here we assume that the Partial Medium Indicator (PMI) bit
20182 20182 * was cleared when issuing the command. This means that the LBA
20183 20183 * returned from the device is the LBA of the last logical block
20184 20184 * on the logical unit. The actual logical block count will be
20185 20185 * this value plus one.
20186 20186 */
20187 20187 capacity += 1;
20188 20188
20189 20189 /*
20190 20190 * Currently, for removable media, the capacity is saved in terms
20191 20191 * of un->un_sys_blocksize, so scale the capacity value to reflect this.
20192 20192 */
20193 20193 if (un->un_f_has_removable_media)
20194 20194 capacity *= (lbasize / un->un_sys_blocksize);
20195 20195
20196 20196 rc16_done:
20197 20197
20198 20198 /*
20199 20199 * Copy the values from the READ CAPACITY command into the space
20200 20200 * provided by the caller.
20201 20201 */
20202 20202 *capp = capacity;
20203 20203 *lbap = lbasize;
20204 20204
20205 20205 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: "
20206 20206 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize);
20207 20207
20208 20208 /*
20209 20209 * Both the lbasize and capacity from the device must be nonzero,
20210 20210 * otherwise we assume that the values are not valid and return
20211 20211 * failure to the caller. (4203735)
20212 20212 */
20213 20213 if ((capacity == 0) || (lbasize == 0)) {
20214 20214 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20215 20215 "sd_send_scsi_READ_CAPACITY received invalid value "
20216 20216 "capacity %llu lbasize %d", capacity, lbasize);
20217 20217 return (EIO);
20218 20218 }
20219 20219 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20220 20220 return (0);
20221 20221 }
20222 20222
20223 20223 /*
20224 20224 * Function: sd_send_scsi_READ_CAPACITY_16
20225 20225 *
20226 20226 * Description: This routine uses the scsi READ CAPACITY 16 command to
20227 20227 * determine the device capacity in number of blocks and the
20228 20228 * device native block size. If this function returns a failure,
20229 20229 * then the values in *capp and *lbap are undefined.
20230 20230 * This routine should be called by sd_send_scsi_READ_CAPACITY
20231 20231 * which will apply any device specific adjustments to capacity
20232 20232 * and lbasize. One exception is it is also called by
20233 20233 * sd_get_media_info_ext. In that function, there is no need to
20234 20234 * adjust the capacity and lbasize.
20235 20235 *
20236 20236 * Arguments: ssc - ssc contains ptr to soft state struct for the target
20237 20237 * capp - ptr to unsigned 64-bit variable to receive the
20238 20238 * capacity value from the command.
20239 20239 * lbap - ptr to unsigned 32-bit varaible to receive the
20240 20240 * block size value from the command
20241 20241 * psp - ptr to unsigned 32-bit variable to receive the
20242 20242 * physical block size value from the command
20243 20243 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
20244 20244 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
20245 20245 * to use the USCSI "direct" chain and bypass the normal
20246 20246 * command waitq. SD_PATH_DIRECT_PRIORITY is used when
20247 20247 * this command is issued as part of an error recovery
20248 20248 * action.
20249 20249 *
20250 20250 * Return Code: 0 - Success
20251 20251 * EIO - IO error
20252 20252 * EACCES - Reservation conflict detected
20253 20253 * EAGAIN - Device is becoming ready
20254 20254 * errno return code from sd_ssc_send()
20255 20255 *
20256 20256 * Context: Can sleep. Blocks until command completes.
20257 20257 */
20258 20258
20259 20259 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16)
20260 20260
20261 20261 static int
20262 20262 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp,
20263 20263 uint32_t *lbap, uint32_t *psp, int path_flag)
20264 20264 {
20265 20265 struct scsi_extended_sense sense_buf;
20266 20266 struct uscsi_cmd ucmd_buf;
20267 20267 union scsi_cdb cdb;
20268 20268 uint64_t *capacity16_buf;
20269 20269 uint64_t capacity;
20270 20270 uint32_t lbasize;
20271 20271 uint32_t pbsize;
20272 20272 uint32_t lbpb_exp;
20273 20273 int status;
20274 20274 struct sd_lun *un;
20275 20275
20276 20276 ASSERT(ssc != NULL);
20277 20277
20278 20278 un = ssc->ssc_un;
20279 20279 ASSERT(un != NULL);
20280 20280 ASSERT(!mutex_owned(SD_MUTEX(un)));
20281 20281 ASSERT(capp != NULL);
20282 20282 ASSERT(lbap != NULL);
20283 20283
20284 20284 SD_TRACE(SD_LOG_IO, un,
20285 20285 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un);
20286 20286
20287 20287 /*
20288 20288 * First send a READ_CAPACITY_16 command to the target.
20289 20289 *
20290 20290 * Set up the CDB for the READ_CAPACITY_16 command. The Partial
20291 20291 * Medium Indicator bit is cleared. The address field must be
20292 20292 * zero if the PMI bit is zero.
20293 20293 */
20294 20294 bzero(&cdb, sizeof (cdb));
20295 20295 bzero(&ucmd_buf, sizeof (ucmd_buf));
20296 20296
20297 20297 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP);
20298 20298
20299 20299 ucmd_buf.uscsi_cdb = (char *)&cdb;
20300 20300 ucmd_buf.uscsi_cdblen = CDB_GROUP4;
20301 20301 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf;
20302 20302 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE;
20303 20303 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20304 20304 ucmd_buf.uscsi_rqlen = sizeof (sense_buf);
20305 20305 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
20306 20306 ucmd_buf.uscsi_timeout = 60;
20307 20307
20308 20308 /*
20309 20309 * Read Capacity (16) is a Service Action In command. One
20310 20310 * command byte (0x9E) is overloaded for multiple operations,
20311 20311 * with the second CDB byte specifying the desired operation
20312 20312 */
20313 20313 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4;
20314 20314 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4;
20315 20315
20316 20316 /*
20317 20317 * Fill in allocation length field
20318 20318 */
20319 20319 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen);
20320 20320
20321 20321 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20322 20322 UIO_SYSSPACE, path_flag);
20323 20323
20324 20324 switch (status) {
20325 20325 case 0:
20326 20326 /* Return failure if we did not get valid capacity data. */
20327 20327 if (ucmd_buf.uscsi_resid > 20) {
20328 20328 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20329 20329 "sd_send_scsi_READ_CAPACITY_16 received invalid "
20330 20330 "capacity data");
20331 20331 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20332 20332 return (EIO);
20333 20333 }
20334 20334
20335 20335 /*
20336 20336 * Read capacity and block size from the READ CAPACITY 16 data.
20337 20337 * This data may be adjusted later due to device specific
20338 20338 * issues.
20339 20339 *
20340 20340 * According to the SCSI spec, the READ CAPACITY 16
20341 20341 * command returns the following:
20342 20342 *
20343 20343 * bytes 0-7: Maximum logical block address available.
20344 20344 * (MSB in byte:0 & LSB in byte:7)
20345 20345 *
20346 20346 * bytes 8-11: Block length in bytes
20347 20347 * (MSB in byte:8 & LSB in byte:11)
20348 20348 *
20349 20349 * byte 13: LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT
20350 20350 */
20351 20351 capacity = BE_64(capacity16_buf[0]);
20352 20352 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]);
20353 20353 lbpb_exp = (BE_64(capacity16_buf[1]) >> 16) & 0x0f;
20354 20354
20355 20355 pbsize = lbasize << lbpb_exp;
20356 20356
20357 20357 /*
20358 20358 * Done with capacity16_buf
20359 20359 */
20360 20360 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20361 20361
20362 20362 /*
20363 20363 * if the reported capacity is set to all 0xf's, then
20364 20364 * this disk is too large. This could only happen with
20365 20365 * a device that supports LBAs larger than 64 bits which
20366 20366 * are not defined by any current T10 standards.
20367 20367 */
20368 20368 if (capacity == 0xffffffffffffffff) {
20369 20369 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20370 20370 "disk is too large");
20371 20371 return (EIO);
20372 20372 }
20373 20373 break; /* Success! */
20374 20374 case EIO:
20375 20375 switch (ucmd_buf.uscsi_status) {
20376 20376 case STATUS_RESERVATION_CONFLICT:
20377 20377 status = EACCES;
20378 20378 break;
20379 20379 case STATUS_CHECK:
20380 20380 /*
20381 20381 * Check condition; look for ASC/ASCQ of 0x04/0x01
20382 20382 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY)
20383 20383 */
20384 20384 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20385 20385 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) &&
20386 20386 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) {
20387 20387 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20388 20388 return (EAGAIN);
20389 20389 }
20390 20390 break;
20391 20391 default:
20392 20392 break;
20393 20393 }
20394 20394 /* FALLTHRU */
20395 20395 default:
20396 20396 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20397 20397 return (status);
20398 20398 }
20399 20399
20400 20400 /*
20401 20401 * Some ATAPI CD-ROM drives report inaccurate LBA size values
20402 20402 * (2352 and 0 are common) so for these devices always force the value
20403 20403 * to 2048 as required by the ATAPI specs.
20404 20404 */
20405 20405 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) {
20406 20406 lbasize = 2048;
20407 20407 }
20408 20408
20409 20409 /*
20410 20410 * Get the maximum LBA value from the READ CAPACITY 16 data.
20411 20411 * Here we assume that the Partial Medium Indicator (PMI) bit
20412 20412 * was cleared when issuing the command. This means that the LBA
20413 20413 * returned from the device is the LBA of the last logical block
20414 20414 * on the logical unit. The actual logical block count will be
20415 20415 * this value plus one.
20416 20416 */
20417 20417 capacity += 1;
20418 20418
20419 20419 /*
20420 20420 * Currently, for removable media, the capacity is saved in terms
20421 20421 * of un->un_sys_blocksize, so scale the capacity value to reflect this.
20422 20422 */
20423 20423 if (un->un_f_has_removable_media)
20424 20424 capacity *= (lbasize / un->un_sys_blocksize);
20425 20425
20426 20426 *capp = capacity;
20427 20427 *lbap = lbasize;
20428 20428 *psp = pbsize;
20429 20429
20430 20430 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: "
20431 20431 "capacity:0x%llx lbasize:0x%x, pbsize: 0x%x\n",
20432 20432 capacity, lbasize, pbsize);
20433 20433
20434 20434 if ((capacity == 0) || (lbasize == 0) || (pbsize == 0)) {
20435 20435 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20436 20436 "sd_send_scsi_READ_CAPACITY_16 received invalid value "
20437 20437 "capacity %llu lbasize %d pbsize %d", capacity, lbasize);
20438 20438 return (EIO);
20439 20439 }
20440 20440
20441 20441 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20442 20442 return (0);
20443 20443 }
20444 20444
20445 20445
20446 20446 /*
20447 20447 * Function: sd_send_scsi_START_STOP_UNIT
20448 20448 *
20449 20449 * Description: Issue a scsi START STOP UNIT command to the target.
20450 20450 *
20451 20451 * Arguments: ssc - ssc contatins pointer to driver soft state (unit)
20452 20452 * structure for this target.
20453 20453 * pc_flag - SD_POWER_CONDITION
20454 20454 * SD_START_STOP
20455 20455 * flag - SD_TARGET_START
20456 20456 * SD_TARGET_STOP
20457 20457 * SD_TARGET_EJECT
20458 20458 * SD_TARGET_CLOSE
20459 20459 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
20460 20460 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
20461 20461 * to use the USCSI "direct" chain and bypass the normal
20462 20462 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this
20463 20463 * command is issued as part of an error recovery action.
20464 20464 *
20465 20465 * Return Code: 0 - Success
20466 20466 * EIO - IO error
20467 20467 * EACCES - Reservation conflict detected
20468 20468 * ENXIO - Not Ready, medium not present
20469 20469 * errno return code from sd_ssc_send()
20470 20470 *
20471 20471 * Context: Can sleep.
20472 20472 */
20473 20473
20474 20474 static int
20475 20475 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, int flag,
20476 20476 int path_flag)
20477 20477 {
20478 20478 struct scsi_extended_sense sense_buf;
20479 20479 union scsi_cdb cdb;
20480 20480 struct uscsi_cmd ucmd_buf;
20481 20481 int status;
20482 20482 struct sd_lun *un;
20483 20483
20484 20484 ASSERT(ssc != NULL);
20485 20485 un = ssc->ssc_un;
20486 20486 ASSERT(un != NULL);
20487 20487 ASSERT(!mutex_owned(SD_MUTEX(un)));
20488 20488
20489 20489 SD_TRACE(SD_LOG_IO, un,
20490 20490 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un);
20491 20491
20492 20492 if (un->un_f_check_start_stop &&
20493 20493 (pc_flag == SD_START_STOP) &&
20494 20494 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) &&
20495 20495 (un->un_f_start_stop_supported != TRUE)) {
20496 20496 return (0);
20497 20497 }
20498 20498
20499 20499 /*
20500 20500 * If we are performing an eject operation and
20501 20501 * we receive any command other than SD_TARGET_EJECT
20502 20502 * we should immediately return.
20503 20503 */
20504 20504 if (flag != SD_TARGET_EJECT) {
20505 20505 mutex_enter(SD_MUTEX(un));
20506 20506 if (un->un_f_ejecting == TRUE) {
20507 20507 mutex_exit(SD_MUTEX(un));
20508 20508 return (EAGAIN);
20509 20509 }
20510 20510 mutex_exit(SD_MUTEX(un));
20511 20511 }
20512 20512
20513 20513 bzero(&cdb, sizeof (cdb));
20514 20514 bzero(&ucmd_buf, sizeof (ucmd_buf));
20515 20515 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
20516 20516
20517 20517 cdb.scc_cmd = SCMD_START_STOP;
20518 20518 cdb.cdb_opaque[4] = (pc_flag == SD_POWER_CONDITION) ?
20519 20519 (uchar_t)(flag << 4) : (uchar_t)flag;
20520 20520
20521 20521 ucmd_buf.uscsi_cdb = (char *)&cdb;
20522 20522 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
20523 20523 ucmd_buf.uscsi_bufaddr = NULL;
20524 20524 ucmd_buf.uscsi_buflen = 0;
20525 20525 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20526 20526 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
20527 20527 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
20528 20528 ucmd_buf.uscsi_timeout = 200;
20529 20529
20530 20530 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20531 20531 UIO_SYSSPACE, path_flag);
20532 20532
20533 20533 switch (status) {
20534 20534 case 0:
20535 20535 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20536 20536 break; /* Success! */
20537 20537 case EIO:
20538 20538 switch (ucmd_buf.uscsi_status) {
20539 20539 case STATUS_RESERVATION_CONFLICT:
20540 20540 status = EACCES;
20541 20541 break;
20542 20542 case STATUS_CHECK:
20543 20543 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) {
20544 20544 switch (scsi_sense_key(
20545 20545 (uint8_t *)&sense_buf)) {
20546 20546 case KEY_ILLEGAL_REQUEST:
20547 20547 status = ENOTSUP;
20548 20548 break;
20549 20549 case KEY_NOT_READY:
20550 20550 if (scsi_sense_asc(
20551 20551 (uint8_t *)&sense_buf)
20552 20552 == 0x3A) {
20553 20553 status = ENXIO;
20554 20554 }
20555 20555 break;
20556 20556 default:
20557 20557 break;
20558 20558 }
20559 20559 }
20560 20560 break;
20561 20561 default:
20562 20562 break;
20563 20563 }
20564 20564 break;
20565 20565 default:
20566 20566 break;
20567 20567 }
20568 20568
20569 20569 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n");
20570 20570
20571 20571 return (status);
20572 20572 }
20573 20573
20574 20574
20575 20575 /*
20576 20576 * Function: sd_start_stop_unit_callback
20577 20577 *
20578 20578 * Description: timeout(9F) callback to begin recovery process for a
20579 20579 * device that has spun down.
20580 20580 *
20581 20581 * Arguments: arg - pointer to associated softstate struct.
20582 20582 *
20583 20583 * Context: Executes in a timeout(9F) thread context
20584 20584 */
20585 20585
20586 20586 static void
20587 20587 sd_start_stop_unit_callback(void *arg)
20588 20588 {
20589 20589 struct sd_lun *un = arg;
20590 20590 ASSERT(un != NULL);
20591 20591 ASSERT(!mutex_owned(SD_MUTEX(un)));
20592 20592
20593 20593 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n");
20594 20594
20595 20595 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP);
20596 20596 }
20597 20597
20598 20598
20599 20599 /*
20600 20600 * Function: sd_start_stop_unit_task
20601 20601 *
20602 20602 * Description: Recovery procedure when a drive is spun down.
20603 20603 *
20604 20604 * Arguments: arg - pointer to associated softstate struct.
20605 20605 *
20606 20606 * Context: Executes in a taskq() thread context
20607 20607 */
20608 20608
20609 20609 static void
20610 20610 sd_start_stop_unit_task(void *arg)
20611 20611 {
20612 20612 struct sd_lun *un = arg;
20613 20613 sd_ssc_t *ssc;
20614 20614 int power_level;
20615 20615 int rval;
20616 20616
20617 20617 ASSERT(un != NULL);
20618 20618 ASSERT(!mutex_owned(SD_MUTEX(un)));
20619 20619
20620 20620 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n");
20621 20621
20622 20622 /*
20623 20623 * Some unformatted drives report not ready error, no need to
20624 20624 * restart if format has been initiated.
20625 20625 */
20626 20626 mutex_enter(SD_MUTEX(un));
20627 20627 if (un->un_f_format_in_progress == TRUE) {
20628 20628 mutex_exit(SD_MUTEX(un));
20629 20629 return;
20630 20630 }
20631 20631 mutex_exit(SD_MUTEX(un));
20632 20632
20633 20633 ssc = sd_ssc_init(un);
20634 20634 /*
20635 20635 * When a START STOP command is issued from here, it is part of a
20636 20636 * failure recovery operation and must be issued before any other
20637 20637 * commands, including any pending retries. Thus it must be sent
20638 20638 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up
20639 20639 * succeeds or not, we will start I/O after the attempt.
20640 20640 * If power condition is supported and the current power level
20641 20641 * is capable of performing I/O, we should set the power condition
20642 20642 * to that level. Otherwise, set the power condition to ACTIVE.
20643 20643 */
20644 20644 if (un->un_f_power_condition_supported) {
20645 20645 mutex_enter(SD_MUTEX(un));
20646 20646 ASSERT(SD_PM_IS_LEVEL_VALID(un, un->un_power_level));
20647 20647 power_level = sd_pwr_pc.ran_perf[un->un_power_level]
20648 20648 > 0 ? un->un_power_level : SD_SPINDLE_ACTIVE;
20649 20649 mutex_exit(SD_MUTEX(un));
20650 20650 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION,
20651 20651 sd_pl2pc[power_level], SD_PATH_DIRECT_PRIORITY);
20652 20652 } else {
20653 20653 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
20654 20654 SD_TARGET_START, SD_PATH_DIRECT_PRIORITY);
20655 20655 }
20656 20656
20657 20657 if (rval != 0)
20658 20658 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
20659 20659 sd_ssc_fini(ssc);
20660 20660 /*
20661 20661 * The above call blocks until the START_STOP_UNIT command completes.
20662 20662 * Now that it has completed, we must re-try the original IO that
20663 20663 * received the NOT READY condition in the first place. There are
20664 20664 * three possible conditions here:
20665 20665 *
20666 20666 * (1) The original IO is on un_retry_bp.
20667 20667 * (2) The original IO is on the regular wait queue, and un_retry_bp
20668 20668 * is NULL.
20669 20669 * (3) The original IO is on the regular wait queue, and un_retry_bp
20670 20670 * points to some other, unrelated bp.
20671 20671 *
20672 20672 * For each case, we must call sd_start_cmds() with un_retry_bp
20673 20673 * as the argument. If un_retry_bp is NULL, this will initiate
20674 20674 * processing of the regular wait queue. If un_retry_bp is not NULL,
20675 20675 * then this will process the bp on un_retry_bp. That may or may not
20676 20676 * be the original IO, but that does not matter: the important thing
20677 20677 * is to keep the IO processing going at this point.
20678 20678 *
20679 20679 * Note: This is a very specific error recovery sequence associated
20680 20680 * with a drive that is not spun up. We attempt a START_STOP_UNIT and
20681 20681 * serialize the I/O with completion of the spin-up.
20682 20682 */
20683 20683 mutex_enter(SD_MUTEX(un));
20684 20684 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
20685 20685 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n",
20686 20686 un, un->un_retry_bp);
20687 20687 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */
20688 20688 sd_start_cmds(un, un->un_retry_bp);
20689 20689 mutex_exit(SD_MUTEX(un));
20690 20690
20691 20691 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n");
20692 20692 }
20693 20693
20694 20694
20695 20695 /*
20696 20696 * Function: sd_send_scsi_INQUIRY
20697 20697 *
20698 20698 * Description: Issue the scsi INQUIRY command.
20699 20699 *
20700 20700 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
20701 20701 * structure for this target.
20702 20702 * bufaddr
20703 20703 * buflen
20704 20704 * evpd
20705 20705 * page_code
20706 20706 * page_length
20707 20707 *
20708 20708 * Return Code: 0 - Success
20709 20709 * errno return code from sd_ssc_send()
20710 20710 *
20711 20711 * Context: Can sleep. Does not return until command is completed.
20712 20712 */
20713 20713
20714 20714 static int
20715 20715 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen,
20716 20716 uchar_t evpd, uchar_t page_code, size_t *residp)
20717 20717 {
20718 20718 union scsi_cdb cdb;
20719 20719 struct uscsi_cmd ucmd_buf;
20720 20720 int status;
20721 20721 struct sd_lun *un;
20722 20722
20723 20723 ASSERT(ssc != NULL);
20724 20724 un = ssc->ssc_un;
20725 20725 ASSERT(un != NULL);
20726 20726 ASSERT(!mutex_owned(SD_MUTEX(un)));
20727 20727 ASSERT(bufaddr != NULL);
20728 20728
20729 20729 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un);
20730 20730
20731 20731 bzero(&cdb, sizeof (cdb));
20732 20732 bzero(&ucmd_buf, sizeof (ucmd_buf));
20733 20733 bzero(bufaddr, buflen);
20734 20734
20735 20735 cdb.scc_cmd = SCMD_INQUIRY;
20736 20736 cdb.cdb_opaque[1] = evpd;
20737 20737 cdb.cdb_opaque[2] = page_code;
20738 20738 FORMG0COUNT(&cdb, buflen);
20739 20739
20740 20740 ucmd_buf.uscsi_cdb = (char *)&cdb;
20741 20741 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
20742 20742 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
20743 20743 ucmd_buf.uscsi_buflen = buflen;
20744 20744 ucmd_buf.uscsi_rqbuf = NULL;
20745 20745 ucmd_buf.uscsi_rqlen = 0;
20746 20746 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT;
20747 20747 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */
20748 20748
20749 20749 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20750 20750 UIO_SYSSPACE, SD_PATH_DIRECT);
20751 20751
20752 20752 /*
20753 20753 * Only handle status == 0, the upper-level caller
20754 20754 * will put different assessment based on the context.
20755 20755 */
20756 20756 if (status == 0)
20757 20757 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20758 20758
20759 20759 if ((status == 0) && (residp != NULL)) {
20760 20760 *residp = ucmd_buf.uscsi_resid;
20761 20761 }
20762 20762
20763 20763 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n");
20764 20764
20765 20765 return (status);
20766 20766 }
20767 20767
20768 20768
20769 20769 /*
20770 20770 * Function: sd_send_scsi_TEST_UNIT_READY
20771 20771 *
20772 20772 * Description: Issue the scsi TEST UNIT READY command.
20773 20773 * This routine can be told to set the flag USCSI_DIAGNOSE to
20774 20774 * prevent retrying failed commands. Use this when the intent
20775 20775 * is either to check for device readiness, to clear a Unit
20776 20776 * Attention, or to clear any outstanding sense data.
20777 20777 * However under specific conditions the expected behavior
20778 20778 * is for retries to bring a device ready, so use the flag
20779 20779 * with caution.
20780 20780 *
20781 20781 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
20782 20782 * structure for this target.
20783 20783 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present
20784 20784 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE.
20785 20785 * 0: dont check for media present, do retries on cmd.
20786 20786 *
20787 20787 * Return Code: 0 - Success
20788 20788 * EIO - IO error
20789 20789 * EACCES - Reservation conflict detected
20790 20790 * ENXIO - Not Ready, medium not present
20791 20791 * errno return code from sd_ssc_send()
20792 20792 *
20793 20793 * Context: Can sleep. Does not return until command is completed.
20794 20794 */
20795 20795
20796 20796 static int
20797 20797 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag)
20798 20798 {
20799 20799 struct scsi_extended_sense sense_buf;
20800 20800 union scsi_cdb cdb;
20801 20801 struct uscsi_cmd ucmd_buf;
20802 20802 int status;
20803 20803 struct sd_lun *un;
20804 20804
20805 20805 ASSERT(ssc != NULL);
20806 20806 un = ssc->ssc_un;
20807 20807 ASSERT(un != NULL);
20808 20808 ASSERT(!mutex_owned(SD_MUTEX(un)));
20809 20809
20810 20810 SD_TRACE(SD_LOG_IO, un,
20811 20811 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un);
20812 20812
20813 20813 /*
20814 20814 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect
20815 20815 * timeouts when they receive a TUR and the queue is not empty. Check
20816 20816 * the configuration flag set during attach (indicating the drive has
20817 20817 * this firmware bug) and un_ncmds_in_transport before issuing the
20818 20818 * TUR. If there are
20819 20819 * pending commands return success, this is a bit arbitrary but is ok
20820 20820 * for non-removables (i.e. the eliteI disks) and non-clustering
20821 20821 * configurations.
20822 20822 */
20823 20823 if (un->un_f_cfg_tur_check == TRUE) {
20824 20824 mutex_enter(SD_MUTEX(un));
20825 20825 if (un->un_ncmds_in_transport != 0) {
20826 20826 mutex_exit(SD_MUTEX(un));
20827 20827 return (0);
20828 20828 }
20829 20829 mutex_exit(SD_MUTEX(un));
20830 20830 }
20831 20831
20832 20832 bzero(&cdb, sizeof (cdb));
20833 20833 bzero(&ucmd_buf, sizeof (ucmd_buf));
20834 20834 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
20835 20835
20836 20836 cdb.scc_cmd = SCMD_TEST_UNIT_READY;
20837 20837
20838 20838 ucmd_buf.uscsi_cdb = (char *)&cdb;
20839 20839 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
20840 20840 ucmd_buf.uscsi_bufaddr = NULL;
20841 20841 ucmd_buf.uscsi_buflen = 0;
20842 20842 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20843 20843 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
20844 20844 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
20845 20845
20846 20846 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */
20847 20847 if ((flag & SD_DONT_RETRY_TUR) != 0) {
20848 20848 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE;
20849 20849 }
20850 20850 ucmd_buf.uscsi_timeout = 60;
20851 20851
20852 20852 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20853 20853 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT :
20854 20854 SD_PATH_STANDARD));
20855 20855
20856 20856 switch (status) {
20857 20857 case 0:
20858 20858 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20859 20859 break; /* Success! */
20860 20860 case EIO:
20861 20861 switch (ucmd_buf.uscsi_status) {
20862 20862 case STATUS_RESERVATION_CONFLICT:
20863 20863 status = EACCES;
20864 20864 break;
20865 20865 case STATUS_CHECK:
20866 20866 if ((flag & SD_CHECK_FOR_MEDIA) == 0) {
20867 20867 break;
20868 20868 }
20869 20869 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20870 20870 (scsi_sense_key((uint8_t *)&sense_buf) ==
20871 20871 KEY_NOT_READY) &&
20872 20872 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) {
20873 20873 status = ENXIO;
20874 20874 }
20875 20875 break;
20876 20876 default:
20877 20877 break;
20878 20878 }
20879 20879 break;
20880 20880 default:
20881 20881 break;
20882 20882 }
20883 20883
20884 20884 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n");
20885 20885
20886 20886 return (status);
20887 20887 }
20888 20888
20889 20889 /*
20890 20890 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN
20891 20891 *
20892 20892 * Description: Issue the scsi PERSISTENT RESERVE IN command.
20893 20893 *
20894 20894 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
20895 20895 * structure for this target.
20896 20896 *
20897 20897 * Return Code: 0 - Success
20898 20898 * EACCES
20899 20899 * ENOTSUP
20900 20900 * errno return code from sd_ssc_send()
20901 20901 *
20902 20902 * Context: Can sleep. Does not return until command is completed.
20903 20903 */
20904 20904
20905 20905 static int
20906 20906 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd,
20907 20907 uint16_t data_len, uchar_t *data_bufp)
20908 20908 {
20909 20909 struct scsi_extended_sense sense_buf;
20910 20910 union scsi_cdb cdb;
20911 20911 struct uscsi_cmd ucmd_buf;
20912 20912 int status;
20913 20913 int no_caller_buf = FALSE;
20914 20914 struct sd_lun *un;
20915 20915
20916 20916 ASSERT(ssc != NULL);
20917 20917 un = ssc->ssc_un;
20918 20918 ASSERT(un != NULL);
20919 20919 ASSERT(!mutex_owned(SD_MUTEX(un)));
20920 20920 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV));
20921 20921
20922 20922 SD_TRACE(SD_LOG_IO, un,
20923 20923 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un);
20924 20924
20925 20925 bzero(&cdb, sizeof (cdb));
20926 20926 bzero(&ucmd_buf, sizeof (ucmd_buf));
20927 20927 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
20928 20928 if (data_bufp == NULL) {
20929 20929 /* Allocate a default buf if the caller did not give one */
20930 20930 ASSERT(data_len == 0);
20931 20931 data_len = MHIOC_RESV_KEY_SIZE;
20932 20932 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP);
20933 20933 no_caller_buf = TRUE;
20934 20934 }
20935 20935
20936 20936 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN;
20937 20937 cdb.cdb_opaque[1] = usr_cmd;
20938 20938 FORMG1COUNT(&cdb, data_len);
20939 20939
20940 20940 ucmd_buf.uscsi_cdb = (char *)&cdb;
20941 20941 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
20942 20942 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp;
20943 20943 ucmd_buf.uscsi_buflen = data_len;
20944 20944 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20945 20945 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
20946 20946 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
20947 20947 ucmd_buf.uscsi_timeout = 60;
20948 20948
20949 20949 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20950 20950 UIO_SYSSPACE, SD_PATH_STANDARD);
20951 20951
20952 20952 switch (status) {
20953 20953 case 0:
20954 20954 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20955 20955
20956 20956 break; /* Success! */
20957 20957 case EIO:
20958 20958 switch (ucmd_buf.uscsi_status) {
20959 20959 case STATUS_RESERVATION_CONFLICT:
20960 20960 status = EACCES;
20961 20961 break;
20962 20962 case STATUS_CHECK:
20963 20963 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20964 20964 (scsi_sense_key((uint8_t *)&sense_buf) ==
20965 20965 KEY_ILLEGAL_REQUEST)) {
20966 20966 status = ENOTSUP;
20967 20967 }
20968 20968 break;
20969 20969 default:
20970 20970 break;
20971 20971 }
20972 20972 break;
20973 20973 default:
20974 20974 break;
20975 20975 }
20976 20976
20977 20977 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n");
20978 20978
20979 20979 if (no_caller_buf == TRUE) {
20980 20980 kmem_free(data_bufp, data_len);
20981 20981 }
20982 20982
20983 20983 return (status);
20984 20984 }
20985 20985
20986 20986
20987 20987 /*
20988 20988 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT
20989 20989 *
20990 20990 * Description: This routine is the driver entry point for handling CD-ROM
20991 20991 * multi-host persistent reservation requests (MHIOCGRP_INKEYS,
20992 20992 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the
20993 20993 * device.
20994 20994 *
20995 20995 * Arguments: ssc - ssc contains un - pointer to soft state struct
20996 20996 * for the target.
20997 20997 * usr_cmd SCSI-3 reservation facility command (one of
20998 20998 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE,
20999 20999 * SD_SCSI3_PREEMPTANDABORT, SD_SCSI3_CLEAR)
21000 21000 * usr_bufp - user provided pointer register, reserve descriptor or
21001 21001 * preempt and abort structure (mhioc_register_t,
21002 21002 * mhioc_resv_desc_t, mhioc_preemptandabort_t)
21003 21003 *
21004 21004 * Return Code: 0 - Success
21005 21005 * EACCES
21006 21006 * ENOTSUP
21007 21007 * errno return code from sd_ssc_send()
21008 21008 *
21009 21009 * Context: Can sleep. Does not return until command is completed.
21010 21010 */
21011 21011
21012 21012 static int
21013 21013 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd,
21014 21014 uchar_t *usr_bufp)
21015 21015 {
21016 21016 struct scsi_extended_sense sense_buf;
21017 21017 union scsi_cdb cdb;
21018 21018 struct uscsi_cmd ucmd_buf;
21019 21019 int status;
21020 21020 uchar_t data_len = sizeof (sd_prout_t);
21021 21021 sd_prout_t *prp;
21022 21022 struct sd_lun *un;
21023 21023
21024 21024 ASSERT(ssc != NULL);
21025 21025 un = ssc->ssc_un;
21026 21026 ASSERT(un != NULL);
21027 21027 ASSERT(!mutex_owned(SD_MUTEX(un)));
21028 21028 ASSERT(data_len == 24); /* required by scsi spec */
21029 21029
21030 21030 SD_TRACE(SD_LOG_IO, un,
21031 21031 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un);
21032 21032
21033 21033 if (usr_bufp == NULL) {
21034 21034 return (EINVAL);
21035 21035 }
21036 21036
21037 21037 bzero(&cdb, sizeof (cdb));
21038 21038 bzero(&ucmd_buf, sizeof (ucmd_buf));
21039 21039 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21040 21040 prp = kmem_zalloc(data_len, KM_SLEEP);
21041 21041
21042 21042 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT;
21043 21043 cdb.cdb_opaque[1] = usr_cmd;
21044 21044 FORMG1COUNT(&cdb, data_len);
21045 21045
21046 21046 ucmd_buf.uscsi_cdb = (char *)&cdb;
21047 21047 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
21048 21048 ucmd_buf.uscsi_bufaddr = (caddr_t)prp;
21049 21049 ucmd_buf.uscsi_buflen = data_len;
21050 21050 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21051 21051 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21052 21052 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT;
21053 21053 ucmd_buf.uscsi_timeout = 60;
21054 21054
21055 21055 switch (usr_cmd) {
21056 21056 case SD_SCSI3_REGISTER: {
21057 21057 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp;
21058 21058
21059 21059 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21060 21060 bcopy(ptr->newkey.key, prp->service_key,
21061 21061 MHIOC_RESV_KEY_SIZE);
21062 21062 prp->aptpl = ptr->aptpl;
21063 21063 break;
21064 21064 }
21065 21065 case SD_SCSI3_CLEAR: {
21066 21066 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp;
21067 21067
21068 21068 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21069 21069 break;
21070 21070 }
21071 21071 case SD_SCSI3_RESERVE:
21072 21072 case SD_SCSI3_RELEASE: {
21073 21073 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp;
21074 21074
21075 21075 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21076 21076 prp->scope_address = BE_32(ptr->scope_specific_addr);
21077 21077 cdb.cdb_opaque[2] = ptr->type;
21078 21078 break;
21079 21079 }
21080 21080 case SD_SCSI3_PREEMPTANDABORT: {
21081 21081 mhioc_preemptandabort_t *ptr =
21082 21082 (mhioc_preemptandabort_t *)usr_bufp;
21083 21083
21084 21084 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21085 21085 bcopy(ptr->victim_key.key, prp->service_key,
21086 21086 MHIOC_RESV_KEY_SIZE);
21087 21087 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr);
21088 21088 cdb.cdb_opaque[2] = ptr->resvdesc.type;
21089 21089 ucmd_buf.uscsi_flags |= USCSI_HEAD;
21090 21090 break;
21091 21091 }
21092 21092 case SD_SCSI3_REGISTERANDIGNOREKEY:
21093 21093 {
21094 21094 mhioc_registerandignorekey_t *ptr;
21095 21095 ptr = (mhioc_registerandignorekey_t *)usr_bufp;
21096 21096 bcopy(ptr->newkey.key,
21097 21097 prp->service_key, MHIOC_RESV_KEY_SIZE);
21098 21098 prp->aptpl = ptr->aptpl;
21099 21099 break;
21100 21100 }
21101 21101 default:
21102 21102 ASSERT(FALSE);
21103 21103 break;
21104 21104 }
21105 21105
21106 21106 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21107 21107 UIO_SYSSPACE, SD_PATH_STANDARD);
21108 21108
21109 21109 switch (status) {
21110 21110 case 0:
21111 21111 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21112 21112 break; /* Success! */
21113 21113 case EIO:
21114 21114 switch (ucmd_buf.uscsi_status) {
21115 21115 case STATUS_RESERVATION_CONFLICT:
21116 21116 status = EACCES;
21117 21117 break;
21118 21118 case STATUS_CHECK:
21119 21119 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
21120 21120 (scsi_sense_key((uint8_t *)&sense_buf) ==
21121 21121 KEY_ILLEGAL_REQUEST)) {
21122 21122 status = ENOTSUP;
21123 21123 }
21124 21124 break;
21125 21125 default:
21126 21126 break;
21127 21127 }
21128 21128 break;
21129 21129 default:
21130 21130 break;
21131 21131 }
21132 21132
21133 21133 kmem_free(prp, data_len);
21134 21134 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n");
21135 21135 return (status);
21136 21136 }
21137 21137
21138 21138
21139 21139 /*
21140 21140 * Function: sd_send_scsi_SYNCHRONIZE_CACHE
21141 21141 *
21142 21142 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target
21143 21143 *
21144 21144 * Arguments: un - pointer to the target's soft state struct
21145 21145 * dkc - pointer to the callback structure
21146 21146 *
21147 21147 * Return Code: 0 - success
21148 21148 * errno-type error code
21149 21149 *
21150 21150 * Context: kernel thread context only.
21151 21151 *
21152 21152 * _______________________________________________________________
21153 21153 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE |
21154 21154 * |FLUSH_VOLATILE| | operation |
21155 21155 * |______________|______________|_________________________________|
21156 21156 * | 0 | NULL | Synchronous flush on both |
21157 21157 * | | | volatile and non-volatile cache |
21158 21158 * |______________|______________|_________________________________|
21159 21159 * | 1 | NULL | Synchronous flush on volatile |
21160 21160 * | | | cache; disk drivers may suppress|
21161 21161 * | | | flush if disk table indicates |
21162 21162 * | | | non-volatile cache |
21163 21163 * |______________|______________|_________________________________|
21164 21164 * | 0 | !NULL | Asynchronous flush on both |
21165 21165 * | | | volatile and non-volatile cache;|
21166 21166 * |______________|______________|_________________________________|
21167 21167 * | 1 | !NULL | Asynchronous flush on volatile |
21168 21168 * | | | cache; disk drivers may suppress|
21169 21169 * | | | flush if disk table indicates |
21170 21170 * | | | non-volatile cache |
21171 21171 * |______________|______________|_________________________________|
21172 21172 *
21173 21173 */
21174 21174
21175 21175 static int
21176 21176 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc)
21177 21177 {
21178 21178 struct sd_uscsi_info *uip;
21179 21179 struct uscsi_cmd *uscmd;
21180 21180 union scsi_cdb *cdb;
21181 21181 struct buf *bp;
21182 21182 int rval = 0;
21183 21183 int is_async;
21184 21184
21185 21185 SD_TRACE(SD_LOG_IO, un,
21186 21186 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un);
21187 21187
21188 21188 ASSERT(un != NULL);
21189 21189 ASSERT(!mutex_owned(SD_MUTEX(un)));
21190 21190
21191 21191 if (dkc == NULL || dkc->dkc_callback == NULL) {
21192 21192 is_async = FALSE;
21193 21193 } else {
21194 21194 is_async = TRUE;
21195 21195 }
21196 21196
21197 21197 mutex_enter(SD_MUTEX(un));
21198 21198 /* check whether cache flush should be suppressed */
21199 21199 if (un->un_f_suppress_cache_flush == TRUE) {
21200 21200 mutex_exit(SD_MUTEX(un));
21201 21201 /*
21202 21202 * suppress the cache flush if the device is told to do
21203 21203 * so by sd.conf or disk table
21204 21204 */
21205 21205 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \
21206 21206 skip the cache flush since suppress_cache_flush is %d!\n",
21207 21207 un->un_f_suppress_cache_flush);
21208 21208
21209 21209 if (is_async == TRUE) {
21210 21210 /* invoke callback for asynchronous flush */
21211 21211 (*dkc->dkc_callback)(dkc->dkc_cookie, 0);
21212 21212 }
21213 21213 return (rval);
21214 21214 }
21215 21215 mutex_exit(SD_MUTEX(un));
21216 21216
21217 21217 /*
21218 21218 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be
21219 21219 * set properly
21220 21220 */
21221 21221 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP);
21222 21222 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE;
21223 21223
21224 21224 mutex_enter(SD_MUTEX(un));
21225 21225 if (dkc != NULL && un->un_f_sync_nv_supported &&
21226 21226 (dkc->dkc_flag & FLUSH_VOLATILE)) {
21227 21227 /*
21228 21228 * if the device supports SYNC_NV bit, turn on
21229 21229 * the SYNC_NV bit to only flush volatile cache
21230 21230 */
21231 21231 cdb->cdb_un.tag |= SD_SYNC_NV_BIT;
21232 21232 }
21233 21233 mutex_exit(SD_MUTEX(un));
21234 21234
21235 21235 /*
21236 21236 * First get some memory for the uscsi_cmd struct and cdb
21237 21237 * and initialize for SYNCHRONIZE_CACHE cmd.
21238 21238 */
21239 21239 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP);
21240 21240 uscmd->uscsi_cdblen = CDB_GROUP1;
21241 21241 uscmd->uscsi_cdb = (caddr_t)cdb;
21242 21242 uscmd->uscsi_bufaddr = NULL;
21243 21243 uscmd->uscsi_buflen = 0;
21244 21244 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
21245 21245 uscmd->uscsi_rqlen = SENSE_LENGTH;
21246 21246 uscmd->uscsi_rqresid = SENSE_LENGTH;
21247 21247 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
21248 21248 uscmd->uscsi_timeout = sd_io_time;
21249 21249
21250 21250 /*
21251 21251 * Allocate an sd_uscsi_info struct and fill it with the info
21252 21252 * needed by sd_initpkt_for_uscsi(). Then put the pointer into
21253 21253 * b_private in the buf for sd_initpkt_for_uscsi(). Note that
21254 21254 * since we allocate the buf here in this function, we do not
21255 21255 * need to preserve the prior contents of b_private.
21256 21256 * The sd_uscsi_info struct is also used by sd_uscsi_strategy()
21257 21257 */
21258 21258 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP);
21259 21259 uip->ui_flags = SD_PATH_DIRECT;
21260 21260 uip->ui_cmdp = uscmd;
21261 21261
21262 21262 bp = getrbuf(KM_SLEEP);
21263 21263 bp->b_private = uip;
21264 21264
21265 21265 /*
21266 21266 * Setup buffer to carry uscsi request.
21267 21267 */
21268 21268 bp->b_flags = B_BUSY;
21269 21269 bp->b_bcount = 0;
21270 21270 bp->b_blkno = 0;
21271 21271
21272 21272 if (is_async == TRUE) {
21273 21273 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone;
21274 21274 uip->ui_dkc = *dkc;
21275 21275 }
21276 21276
21277 21277 bp->b_edev = SD_GET_DEV(un);
21278 21278 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */
21279 21279
21280 21280 /*
21281 21281 * Unset un_f_sync_cache_required flag
21282 21282 */
21283 21283 mutex_enter(SD_MUTEX(un));
21284 21284 un->un_f_sync_cache_required = FALSE;
21285 21285 mutex_exit(SD_MUTEX(un));
21286 21286
21287 21287 (void) sd_uscsi_strategy(bp);
21288 21288
21289 21289 /*
21290 21290 * If synchronous request, wait for completion
21291 21291 * If async just return and let b_iodone callback
21292 21292 * cleanup.
21293 21293 * NOTE: On return, u_ncmds_in_driver will be decremented,
21294 21294 * but it was also incremented in sd_uscsi_strategy(), so
21295 21295 * we should be ok.
21296 21296 */
21297 21297 if (is_async == FALSE) {
21298 21298 (void) biowait(bp);
21299 21299 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp);
21300 21300 }
21301 21301
21302 21302 return (rval);
21303 21303 }
21304 21304
21305 21305
21306 21306 static int
21307 21307 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp)
21308 21308 {
21309 21309 struct sd_uscsi_info *uip;
21310 21310 struct uscsi_cmd *uscmd;
21311 21311 uint8_t *sense_buf;
21312 21312 struct sd_lun *un;
21313 21313 int status;
21314 21314 union scsi_cdb *cdb;
21315 21315
21316 21316 uip = (struct sd_uscsi_info *)(bp->b_private);
21317 21317 ASSERT(uip != NULL);
21318 21318
21319 21319 uscmd = uip->ui_cmdp;
21320 21320 ASSERT(uscmd != NULL);
21321 21321
21322 21322 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf;
21323 21323 ASSERT(sense_buf != NULL);
21324 21324
21325 21325 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
21326 21326 ASSERT(un != NULL);
21327 21327
21328 21328 cdb = (union scsi_cdb *)uscmd->uscsi_cdb;
21329 21329
21330 21330 status = geterror(bp);
21331 21331 switch (status) {
21332 21332 case 0:
21333 21333 break; /* Success! */
21334 21334 case EIO:
21335 21335 switch (uscmd->uscsi_status) {
21336 21336 case STATUS_RESERVATION_CONFLICT:
21337 21337 /* Ignore reservation conflict */
21338 21338 status = 0;
21339 21339 goto done;
21340 21340
21341 21341 case STATUS_CHECK:
21342 21342 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) &&
21343 21343 (scsi_sense_key(sense_buf) ==
21344 21344 KEY_ILLEGAL_REQUEST)) {
21345 21345 /* Ignore Illegal Request error */
21346 21346 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) {
21347 21347 mutex_enter(SD_MUTEX(un));
21348 21348 un->un_f_sync_nv_supported = FALSE;
21349 21349 mutex_exit(SD_MUTEX(un));
21350 21350 status = 0;
21351 21351 SD_TRACE(SD_LOG_IO, un,
21352 21352 "un_f_sync_nv_supported \
21353 21353 is set to false.\n");
21354 21354 goto done;
21355 21355 }
21356 21356
21357 21357 mutex_enter(SD_MUTEX(un));
21358 21358 un->un_f_sync_cache_supported = FALSE;
21359 21359 mutex_exit(SD_MUTEX(un));
21360 21360 SD_TRACE(SD_LOG_IO, un,
21361 21361 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \
21362 21362 un_f_sync_cache_supported set to false \
21363 21363 with asc = %x, ascq = %x\n",
21364 21364 scsi_sense_asc(sense_buf),
21365 21365 scsi_sense_ascq(sense_buf));
21366 21366 status = ENOTSUP;
21367 21367 goto done;
21368 21368 }
21369 21369 break;
21370 21370 default:
21371 21371 break;
21372 21372 }
21373 21373 /* FALLTHRU */
21374 21374 default:
21375 21375 /*
21376 21376 * Turn on the un_f_sync_cache_required flag
21377 21377 * since the SYNC CACHE command failed
21378 21378 */
21379 21379 mutex_enter(SD_MUTEX(un));
21380 21380 un->un_f_sync_cache_required = TRUE;
21381 21381 mutex_exit(SD_MUTEX(un));
21382 21382
21383 21383 /*
21384 21384 * Don't log an error message if this device
21385 21385 * has removable media.
21386 21386 */
21387 21387 if (!un->un_f_has_removable_media) {
21388 21388 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
21389 21389 "SYNCHRONIZE CACHE command failed (%d)\n", status);
21390 21390 }
21391 21391 break;
21392 21392 }
21393 21393
21394 21394 done:
21395 21395 if (uip->ui_dkc.dkc_callback != NULL) {
21396 21396 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status);
21397 21397 }
21398 21398
21399 21399 ASSERT((bp->b_flags & B_REMAPPED) == 0);
21400 21400 freerbuf(bp);
21401 21401 kmem_free(uip, sizeof (struct sd_uscsi_info));
21402 21402 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH);
21403 21403 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen);
21404 21404 kmem_free(uscmd, sizeof (struct uscsi_cmd));
21405 21405
21406 21406 return (status);
21407 21407 }
21408 21408
21409 21409
21410 21410 /*
21411 21411 * Function: sd_send_scsi_GET_CONFIGURATION
21412 21412 *
21413 21413 * Description: Issues the get configuration command to the device.
21414 21414 * Called from sd_check_for_writable_cd & sd_get_media_info
21415 21415 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN
21416 21416 * Arguments: ssc
21417 21417 * ucmdbuf
21418 21418 * rqbuf
21419 21419 * rqbuflen
21420 21420 * bufaddr
21421 21421 * buflen
21422 21422 * path_flag
21423 21423 *
21424 21424 * Return Code: 0 - Success
21425 21425 * errno return code from sd_ssc_send()
21426 21426 *
21427 21427 * Context: Can sleep. Does not return until command is completed.
21428 21428 *
21429 21429 */
21430 21430
21431 21431 static int
21432 21432 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf,
21433 21433 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen,
21434 21434 int path_flag)
21435 21435 {
21436 21436 char cdb[CDB_GROUP1];
21437 21437 int status;
21438 21438 struct sd_lun *un;
21439 21439
21440 21440 ASSERT(ssc != NULL);
21441 21441 un = ssc->ssc_un;
21442 21442 ASSERT(un != NULL);
21443 21443 ASSERT(!mutex_owned(SD_MUTEX(un)));
21444 21444 ASSERT(bufaddr != NULL);
21445 21445 ASSERT(ucmdbuf != NULL);
21446 21446 ASSERT(rqbuf != NULL);
21447 21447
21448 21448 SD_TRACE(SD_LOG_IO, un,
21449 21449 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un);
21450 21450
21451 21451 bzero(cdb, sizeof (cdb));
21452 21452 bzero(ucmdbuf, sizeof (struct uscsi_cmd));
21453 21453 bzero(rqbuf, rqbuflen);
21454 21454 bzero(bufaddr, buflen);
21455 21455
21456 21456 /*
21457 21457 * Set up cdb field for the get configuration command.
21458 21458 */
21459 21459 cdb[0] = SCMD_GET_CONFIGURATION;
21460 21460 cdb[1] = 0x02; /* Requested Type */
21461 21461 cdb[8] = SD_PROFILE_HEADER_LEN;
21462 21462 ucmdbuf->uscsi_cdb = cdb;
21463 21463 ucmdbuf->uscsi_cdblen = CDB_GROUP1;
21464 21464 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr;
21465 21465 ucmdbuf->uscsi_buflen = buflen;
21466 21466 ucmdbuf->uscsi_timeout = sd_io_time;
21467 21467 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf;
21468 21468 ucmdbuf->uscsi_rqlen = rqbuflen;
21469 21469 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ;
21470 21470
21471 21471 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL,
21472 21472 UIO_SYSSPACE, path_flag);
21473 21473
21474 21474 switch (status) {
21475 21475 case 0:
21476 21476 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21477 21477 break; /* Success! */
21478 21478 case EIO:
21479 21479 switch (ucmdbuf->uscsi_status) {
21480 21480 case STATUS_RESERVATION_CONFLICT:
21481 21481 status = EACCES;
21482 21482 break;
21483 21483 default:
21484 21484 break;
21485 21485 }
21486 21486 break;
21487 21487 default:
21488 21488 break;
21489 21489 }
21490 21490
21491 21491 if (status == 0) {
21492 21492 SD_DUMP_MEMORY(un, SD_LOG_IO,
21493 21493 "sd_send_scsi_GET_CONFIGURATION: data",
21494 21494 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX);
21495 21495 }
21496 21496
21497 21497 SD_TRACE(SD_LOG_IO, un,
21498 21498 "sd_send_scsi_GET_CONFIGURATION: exit\n");
21499 21499
21500 21500 return (status);
21501 21501 }
21502 21502
21503 21503 /*
21504 21504 * Function: sd_send_scsi_feature_GET_CONFIGURATION
21505 21505 *
21506 21506 * Description: Issues the get configuration command to the device to
21507 21507 * retrieve a specific feature. Called from
21508 21508 * sd_check_for_writable_cd & sd_set_mmc_caps.
21509 21509 * Arguments: ssc
21510 21510 * ucmdbuf
21511 21511 * rqbuf
21512 21512 * rqbuflen
21513 21513 * bufaddr
21514 21514 * buflen
21515 21515 * feature
21516 21516 *
21517 21517 * Return Code: 0 - Success
21518 21518 * errno return code from sd_ssc_send()
21519 21519 *
21520 21520 * Context: Can sleep. Does not return until command is completed.
21521 21521 *
21522 21522 */
21523 21523 static int
21524 21524 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc,
21525 21525 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen,
21526 21526 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag)
21527 21527 {
21528 21528 char cdb[CDB_GROUP1];
21529 21529 int status;
21530 21530 struct sd_lun *un;
21531 21531
21532 21532 ASSERT(ssc != NULL);
21533 21533 un = ssc->ssc_un;
21534 21534 ASSERT(un != NULL);
21535 21535 ASSERT(!mutex_owned(SD_MUTEX(un)));
21536 21536 ASSERT(bufaddr != NULL);
21537 21537 ASSERT(ucmdbuf != NULL);
21538 21538 ASSERT(rqbuf != NULL);
21539 21539
21540 21540 SD_TRACE(SD_LOG_IO, un,
21541 21541 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un);
21542 21542
21543 21543 bzero(cdb, sizeof (cdb));
21544 21544 bzero(ucmdbuf, sizeof (struct uscsi_cmd));
21545 21545 bzero(rqbuf, rqbuflen);
21546 21546 bzero(bufaddr, buflen);
21547 21547
21548 21548 /*
21549 21549 * Set up cdb field for the get configuration command.
21550 21550 */
21551 21551 cdb[0] = SCMD_GET_CONFIGURATION;
21552 21552 cdb[1] = 0x02; /* Requested Type */
21553 21553 cdb[3] = feature;
21554 21554 cdb[8] = buflen;
21555 21555 ucmdbuf->uscsi_cdb = cdb;
21556 21556 ucmdbuf->uscsi_cdblen = CDB_GROUP1;
21557 21557 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr;
21558 21558 ucmdbuf->uscsi_buflen = buflen;
21559 21559 ucmdbuf->uscsi_timeout = sd_io_time;
21560 21560 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf;
21561 21561 ucmdbuf->uscsi_rqlen = rqbuflen;
21562 21562 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ;
21563 21563
21564 21564 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL,
21565 21565 UIO_SYSSPACE, path_flag);
21566 21566
21567 21567 switch (status) {
21568 21568 case 0:
21569 21569
21570 21570 break; /* Success! */
21571 21571 case EIO:
21572 21572 switch (ucmdbuf->uscsi_status) {
21573 21573 case STATUS_RESERVATION_CONFLICT:
21574 21574 status = EACCES;
21575 21575 break;
21576 21576 default:
21577 21577 break;
21578 21578 }
21579 21579 break;
21580 21580 default:
21581 21581 break;
21582 21582 }
21583 21583
21584 21584 if (status == 0) {
21585 21585 SD_DUMP_MEMORY(un, SD_LOG_IO,
21586 21586 "sd_send_scsi_feature_GET_CONFIGURATION: data",
21587 21587 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX);
21588 21588 }
21589 21589
21590 21590 SD_TRACE(SD_LOG_IO, un,
21591 21591 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n");
21592 21592
21593 21593 return (status);
21594 21594 }
21595 21595
21596 21596
21597 21597 /*
21598 21598 * Function: sd_send_scsi_MODE_SENSE
21599 21599 *
21600 21600 * Description: Utility function for issuing a scsi MODE SENSE command.
21601 21601 * Note: This routine uses a consistent implementation for Group0,
21602 21602 * Group1, and Group2 commands across all platforms. ATAPI devices
21603 21603 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select
21604 21604 *
21605 21605 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21606 21606 * structure for this target.
21607 21607 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or
21608 21608 * CDB_GROUP[1|2] (10 byte).
21609 21609 * bufaddr - buffer for page data retrieved from the target.
21610 21610 * buflen - size of page to be retrieved.
21611 21611 * page_code - page code of data to be retrieved from the target.
21612 21612 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
21613 21613 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
21614 21614 * to use the USCSI "direct" chain and bypass the normal
21615 21615 * command waitq.
21616 21616 *
21617 21617 * Return Code: 0 - Success
21618 21618 * errno return code from sd_ssc_send()
21619 21619 *
21620 21620 * Context: Can sleep. Does not return until command is completed.
21621 21621 */
21622 21622
21623 21623 static int
21624 21624 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr,
21625 21625 size_t buflen, uchar_t page_code, int path_flag)
21626 21626 {
21627 21627 struct scsi_extended_sense sense_buf;
21628 21628 union scsi_cdb cdb;
21629 21629 struct uscsi_cmd ucmd_buf;
21630 21630 int status;
21631 21631 int headlen;
21632 21632 struct sd_lun *un;
21633 21633
21634 21634 ASSERT(ssc != NULL);
21635 21635 un = ssc->ssc_un;
21636 21636 ASSERT(un != NULL);
21637 21637 ASSERT(!mutex_owned(SD_MUTEX(un)));
21638 21638 ASSERT(bufaddr != NULL);
21639 21639 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) ||
21640 21640 (cdbsize == CDB_GROUP2));
21641 21641
21642 21642 SD_TRACE(SD_LOG_IO, un,
21643 21643 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un);
21644 21644
21645 21645 bzero(&cdb, sizeof (cdb));
21646 21646 bzero(&ucmd_buf, sizeof (ucmd_buf));
21647 21647 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21648 21648 bzero(bufaddr, buflen);
21649 21649
21650 21650 if (cdbsize == CDB_GROUP0) {
21651 21651 cdb.scc_cmd = SCMD_MODE_SENSE;
21652 21652 cdb.cdb_opaque[2] = page_code;
21653 21653 FORMG0COUNT(&cdb, buflen);
21654 21654 headlen = MODE_HEADER_LENGTH;
21655 21655 } else {
21656 21656 cdb.scc_cmd = SCMD_MODE_SENSE_G1;
21657 21657 cdb.cdb_opaque[2] = page_code;
21658 21658 FORMG1COUNT(&cdb, buflen);
21659 21659 headlen = MODE_HEADER_LENGTH_GRP2;
21660 21660 }
21661 21661
21662 21662 ASSERT(headlen <= buflen);
21663 21663 SD_FILL_SCSI1_LUN_CDB(un, &cdb);
21664 21664
21665 21665 ucmd_buf.uscsi_cdb = (char *)&cdb;
21666 21666 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize;
21667 21667 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
21668 21668 ucmd_buf.uscsi_buflen = buflen;
21669 21669 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21670 21670 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21671 21671 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
21672 21672 ucmd_buf.uscsi_timeout = 60;
21673 21673
21674 21674 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21675 21675 UIO_SYSSPACE, path_flag);
21676 21676
21677 21677 switch (status) {
21678 21678 case 0:
21679 21679 /*
21680 21680 * sr_check_wp() uses 0x3f page code and check the header of
21681 21681 * mode page to determine if target device is write-protected.
21682 21682 * But some USB devices return 0 bytes for 0x3f page code. For
21683 21683 * this case, make sure that mode page header is returned at
21684 21684 * least.
21685 21685 */
21686 21686 if (buflen - ucmd_buf.uscsi_resid < headlen) {
21687 21687 status = EIO;
21688 21688 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
21689 21689 "mode page header is not returned");
21690 21690 }
21691 21691 break; /* Success! */
21692 21692 case EIO:
21693 21693 switch (ucmd_buf.uscsi_status) {
21694 21694 case STATUS_RESERVATION_CONFLICT:
21695 21695 status = EACCES;
21696 21696 break;
21697 21697 default:
21698 21698 break;
21699 21699 }
21700 21700 break;
21701 21701 default:
21702 21702 break;
21703 21703 }
21704 21704
21705 21705 if (status == 0) {
21706 21706 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data",
21707 21707 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
21708 21708 }
21709 21709 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n");
21710 21710
21711 21711 return (status);
21712 21712 }
21713 21713
21714 21714
21715 21715 /*
21716 21716 * Function: sd_send_scsi_MODE_SELECT
21717 21717 *
21718 21718 * Description: Utility function for issuing a scsi MODE SELECT command.
21719 21719 * Note: This routine uses a consistent implementation for Group0,
21720 21720 * Group1, and Group2 commands across all platforms. ATAPI devices
21721 21721 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select
21722 21722 *
21723 21723 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21724 21724 * structure for this target.
21725 21725 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or
21726 21726 * CDB_GROUP[1|2] (10 byte).
21727 21727 * bufaddr - buffer for page data retrieved from the target.
21728 21728 * buflen - size of page to be retrieved.
21729 21729 * save_page - boolean to determin if SP bit should be set.
21730 21730 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
21731 21731 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
21732 21732 * to use the USCSI "direct" chain and bypass the normal
21733 21733 * command waitq.
21734 21734 *
21735 21735 * Return Code: 0 - Success
21736 21736 * errno return code from sd_ssc_send()
21737 21737 *
21738 21738 * Context: Can sleep. Does not return until command is completed.
21739 21739 */
21740 21740
21741 21741 static int
21742 21742 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr,
21743 21743 size_t buflen, uchar_t save_page, int path_flag)
21744 21744 {
21745 21745 struct scsi_extended_sense sense_buf;
21746 21746 union scsi_cdb cdb;
21747 21747 struct uscsi_cmd ucmd_buf;
21748 21748 int status;
21749 21749 struct sd_lun *un;
21750 21750
21751 21751 ASSERT(ssc != NULL);
21752 21752 un = ssc->ssc_un;
21753 21753 ASSERT(un != NULL);
21754 21754 ASSERT(!mutex_owned(SD_MUTEX(un)));
21755 21755 ASSERT(bufaddr != NULL);
21756 21756 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) ||
21757 21757 (cdbsize == CDB_GROUP2));
21758 21758
21759 21759 SD_TRACE(SD_LOG_IO, un,
21760 21760 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un);
21761 21761
21762 21762 bzero(&cdb, sizeof (cdb));
21763 21763 bzero(&ucmd_buf, sizeof (ucmd_buf));
21764 21764 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21765 21765
21766 21766 /* Set the PF bit for many third party drives */
21767 21767 cdb.cdb_opaque[1] = 0x10;
21768 21768
21769 21769 /* Set the savepage(SP) bit if given */
21770 21770 if (save_page == SD_SAVE_PAGE) {
21771 21771 cdb.cdb_opaque[1] |= 0x01;
21772 21772 }
21773 21773
21774 21774 if (cdbsize == CDB_GROUP0) {
21775 21775 cdb.scc_cmd = SCMD_MODE_SELECT;
21776 21776 FORMG0COUNT(&cdb, buflen);
21777 21777 } else {
21778 21778 cdb.scc_cmd = SCMD_MODE_SELECT_G1;
21779 21779 FORMG1COUNT(&cdb, buflen);
21780 21780 }
21781 21781
21782 21782 SD_FILL_SCSI1_LUN_CDB(un, &cdb);
21783 21783
21784 21784 ucmd_buf.uscsi_cdb = (char *)&cdb;
21785 21785 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize;
21786 21786 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
21787 21787 ucmd_buf.uscsi_buflen = buflen;
21788 21788 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21789 21789 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21790 21790 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT;
21791 21791 ucmd_buf.uscsi_timeout = 60;
21792 21792
21793 21793 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21794 21794 UIO_SYSSPACE, path_flag);
21795 21795
21796 21796 switch (status) {
21797 21797 case 0:
21798 21798 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21799 21799 break; /* Success! */
21800 21800 case EIO:
21801 21801 switch (ucmd_buf.uscsi_status) {
21802 21802 case STATUS_RESERVATION_CONFLICT:
21803 21803 status = EACCES;
21804 21804 break;
21805 21805 default:
21806 21806 break;
21807 21807 }
21808 21808 break;
21809 21809 default:
21810 21810 break;
21811 21811 }
21812 21812
21813 21813 if (status == 0) {
21814 21814 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data",
21815 21815 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
21816 21816 }
21817 21817 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n");
21818 21818
21819 21819 return (status);
21820 21820 }
21821 21821
21822 21822
21823 21823 /*
21824 21824 * Function: sd_send_scsi_RDWR
21825 21825 *
21826 21826 * Description: Issue a scsi READ or WRITE command with the given parameters.
21827 21827 *
21828 21828 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21829 21829 * structure for this target.
21830 21830 * cmd: SCMD_READ or SCMD_WRITE
21831 21831 * bufaddr: Address of caller's buffer to receive the RDWR data
21832 21832 * buflen: Length of caller's buffer receive the RDWR data.
21833 21833 * start_block: Block number for the start of the RDWR operation.
21834 21834 * (Assumes target-native block size.)
21835 21835 * residp: Pointer to variable to receive the redisual of the
21836 21836 * RDWR operation (may be NULL of no residual requested).
21837 21837 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
21838 21838 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
21839 21839 * to use the USCSI "direct" chain and bypass the normal
21840 21840 * command waitq.
21841 21841 *
21842 21842 * Return Code: 0 - Success
21843 21843 * errno return code from sd_ssc_send()
21844 21844 *
21845 21845 * Context: Can sleep. Does not return until command is completed.
21846 21846 */
21847 21847
21848 21848 static int
21849 21849 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr,
21850 21850 size_t buflen, daddr_t start_block, int path_flag)
21851 21851 {
21852 21852 struct scsi_extended_sense sense_buf;
21853 21853 union scsi_cdb cdb;
21854 21854 struct uscsi_cmd ucmd_buf;
21855 21855 uint32_t block_count;
21856 21856 int status;
21857 21857 int cdbsize;
21858 21858 uchar_t flag;
21859 21859 struct sd_lun *un;
21860 21860
21861 21861 ASSERT(ssc != NULL);
21862 21862 un = ssc->ssc_un;
21863 21863 ASSERT(un != NULL);
21864 21864 ASSERT(!mutex_owned(SD_MUTEX(un)));
21865 21865 ASSERT(bufaddr != NULL);
21866 21866 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE));
21867 21867
21868 21868 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un);
21869 21869
21870 21870 if (un->un_f_tgt_blocksize_is_valid != TRUE) {
21871 21871 return (EINVAL);
21872 21872 }
21873 21873
21874 21874 mutex_enter(SD_MUTEX(un));
21875 21875 block_count = SD_BYTES2TGTBLOCKS(un, buflen);
21876 21876 mutex_exit(SD_MUTEX(un));
21877 21877
21878 21878 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE;
21879 21879
21880 21880 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: "
21881 21881 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n",
21882 21882 bufaddr, buflen, start_block, block_count);
21883 21883
21884 21884 bzero(&cdb, sizeof (cdb));
21885 21885 bzero(&ucmd_buf, sizeof (ucmd_buf));
21886 21886 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21887 21887
21888 21888 /* Compute CDB size to use */
21889 21889 if (start_block > 0xffffffff)
21890 21890 cdbsize = CDB_GROUP4;
21891 21891 else if ((start_block & 0xFFE00000) ||
21892 21892 (un->un_f_cfg_is_atapi == TRUE))
21893 21893 cdbsize = CDB_GROUP1;
21894 21894 else
21895 21895 cdbsize = CDB_GROUP0;
21896 21896
21897 21897 switch (cdbsize) {
21898 21898 case CDB_GROUP0: /* 6-byte CDBs */
21899 21899 cdb.scc_cmd = cmd;
21900 21900 FORMG0ADDR(&cdb, start_block);
21901 21901 FORMG0COUNT(&cdb, block_count);
21902 21902 break;
21903 21903 case CDB_GROUP1: /* 10-byte CDBs */
21904 21904 cdb.scc_cmd = cmd | SCMD_GROUP1;
21905 21905 FORMG1ADDR(&cdb, start_block);
21906 21906 FORMG1COUNT(&cdb, block_count);
21907 21907 break;
21908 21908 case CDB_GROUP4: /* 16-byte CDBs */
21909 21909 cdb.scc_cmd = cmd | SCMD_GROUP4;
21910 21910 FORMG4LONGADDR(&cdb, (uint64_t)start_block);
21911 21911 FORMG4COUNT(&cdb, block_count);
21912 21912 break;
21913 21913 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */
21914 21914 default:
21915 21915 /* All others reserved */
21916 21916 return (EINVAL);
21917 21917 }
21918 21918
21919 21919 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */
21920 21920 SD_FILL_SCSI1_LUN_CDB(un, &cdb);
21921 21921
21922 21922 ucmd_buf.uscsi_cdb = (char *)&cdb;
21923 21923 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize;
21924 21924 ucmd_buf.uscsi_bufaddr = bufaddr;
21925 21925 ucmd_buf.uscsi_buflen = buflen;
21926 21926 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21927 21927 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21928 21928 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT;
21929 21929 ucmd_buf.uscsi_timeout = 60;
21930 21930 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21931 21931 UIO_SYSSPACE, path_flag);
21932 21932
21933 21933 switch (status) {
21934 21934 case 0:
21935 21935 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21936 21936 break; /* Success! */
21937 21937 case EIO:
21938 21938 switch (ucmd_buf.uscsi_status) {
21939 21939 case STATUS_RESERVATION_CONFLICT:
21940 21940 status = EACCES;
21941 21941 break;
21942 21942 default:
21943 21943 break;
21944 21944 }
21945 21945 break;
21946 21946 default:
21947 21947 break;
21948 21948 }
21949 21949
21950 21950 if (status == 0) {
21951 21951 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data",
21952 21952 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
21953 21953 }
21954 21954
21955 21955 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n");
21956 21956
21957 21957 return (status);
21958 21958 }
21959 21959
21960 21960
21961 21961 /*
21962 21962 * Function: sd_send_scsi_LOG_SENSE
21963 21963 *
21964 21964 * Description: Issue a scsi LOG_SENSE command with the given parameters.
21965 21965 *
21966 21966 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21967 21967 * structure for this target.
21968 21968 *
21969 21969 * Return Code: 0 - Success
21970 21970 * errno return code from sd_ssc_send()
21971 21971 *
21972 21972 * Context: Can sleep. Does not return until command is completed.
21973 21973 */
21974 21974
21975 21975 static int
21976 21976 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen,
21977 21977 uchar_t page_code, uchar_t page_control, uint16_t param_ptr,
21978 21978 int path_flag)
21979 21979
21980 21980 {
21981 21981 struct scsi_extended_sense sense_buf;
21982 21982 union scsi_cdb cdb;
21983 21983 struct uscsi_cmd ucmd_buf;
21984 21984 int status;
21985 21985 struct sd_lun *un;
21986 21986
21987 21987 ASSERT(ssc != NULL);
21988 21988 un = ssc->ssc_un;
21989 21989 ASSERT(un != NULL);
21990 21990 ASSERT(!mutex_owned(SD_MUTEX(un)));
21991 21991
21992 21992 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un);
21993 21993
21994 21994 bzero(&cdb, sizeof (cdb));
21995 21995 bzero(&ucmd_buf, sizeof (ucmd_buf));
21996 21996 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21997 21997
21998 21998 cdb.scc_cmd = SCMD_LOG_SENSE_G1;
21999 21999 cdb.cdb_opaque[2] = (page_control << 6) | page_code;
22000 22000 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8);
22001 22001 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF);
22002 22002 FORMG1COUNT(&cdb, buflen);
22003 22003
22004 22004 ucmd_buf.uscsi_cdb = (char *)&cdb;
22005 22005 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
22006 22006 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
22007 22007 ucmd_buf.uscsi_buflen = buflen;
22008 22008 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
22009 22009 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
22010 22010 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
22011 22011 ucmd_buf.uscsi_timeout = 60;
22012 22012
22013 22013 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
22014 22014 UIO_SYSSPACE, path_flag);
22015 22015
22016 22016 switch (status) {
22017 22017 case 0:
22018 22018 break;
22019 22019 case EIO:
22020 22020 switch (ucmd_buf.uscsi_status) {
22021 22021 case STATUS_RESERVATION_CONFLICT:
22022 22022 status = EACCES;
22023 22023 break;
22024 22024 case STATUS_CHECK:
22025 22025 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
22026 22026 (scsi_sense_key((uint8_t *)&sense_buf) ==
22027 22027 KEY_ILLEGAL_REQUEST) &&
22028 22028 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) {
22029 22029 /*
22030 22030 * ASC 0x24: INVALID FIELD IN CDB
22031 22031 */
22032 22032 switch (page_code) {
22033 22033 case START_STOP_CYCLE_PAGE:
22034 22034 /*
22035 22035 * The start stop cycle counter is
22036 22036 * implemented as page 0x31 in earlier
22037 22037 * generation disks. In new generation
22038 22038 * disks the start stop cycle counter is
22039 22039 * implemented as page 0xE. To properly
22040 22040 * handle this case if an attempt for
22041 22041 * log page 0xE is made and fails we
22042 22042 * will try again using page 0x31.
22043 22043 *
22044 22044 * Network storage BU committed to
22045 22045 * maintain the page 0x31 for this
22046 22046 * purpose and will not have any other
22047 22047 * page implemented with page code 0x31
22048 22048 * until all disks transition to the
22049 22049 * standard page.
22050 22050 */
22051 22051 mutex_enter(SD_MUTEX(un));
22052 22052 un->un_start_stop_cycle_page =
22053 22053 START_STOP_CYCLE_VU_PAGE;
22054 22054 cdb.cdb_opaque[2] =
22055 22055 (char)(page_control << 6) |
22056 22056 un->un_start_stop_cycle_page;
22057 22057 mutex_exit(SD_MUTEX(un));
22058 22058 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
22059 22059 status = sd_ssc_send(
22060 22060 ssc, &ucmd_buf, FKIOCTL,
22061 22061 UIO_SYSSPACE, path_flag);
22062 22062
22063 22063 break;
22064 22064 case TEMPERATURE_PAGE:
22065 22065 status = ENOTTY;
22066 22066 break;
22067 22067 default:
22068 22068 break;
22069 22069 }
22070 22070 }
22071 22071 break;
22072 22072 default:
22073 22073 break;
22074 22074 }
22075 22075 break;
22076 22076 default:
22077 22077 break;
22078 22078 }
22079 22079
22080 22080 if (status == 0) {
22081 22081 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
22082 22082 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data",
22083 22083 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
22084 22084 }
22085 22085
22086 22086 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n");
22087 22087
22088 22088 return (status);
22089 22089 }
22090 22090
22091 22091
22092 22092 /*
22093 22093 * Function: sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION
22094 22094 *
22095 22095 * Description: Issue the scsi GET EVENT STATUS NOTIFICATION command.
22096 22096 *
22097 22097 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
22098 22098 * structure for this target.
22099 22099 * bufaddr
22100 22100 * buflen
22101 22101 * class_req
22102 22102 *
22103 22103 * Return Code: 0 - Success
22104 22104 * errno return code from sd_ssc_send()
22105 22105 *
22106 22106 * Context: Can sleep. Does not return until command is completed.
22107 22107 */
22108 22108
22109 22109 static int
22110 22110 sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc, uchar_t *bufaddr,
22111 22111 size_t buflen, uchar_t class_req)
22112 22112 {
22113 22113 union scsi_cdb cdb;
22114 22114 struct uscsi_cmd ucmd_buf;
22115 22115 int status;
22116 22116 struct sd_lun *un;
22117 22117
22118 22118 ASSERT(ssc != NULL);
22119 22119 un = ssc->ssc_un;
22120 22120 ASSERT(un != NULL);
22121 22121 ASSERT(!mutex_owned(SD_MUTEX(un)));
22122 22122 ASSERT(bufaddr != NULL);
22123 22123
22124 22124 SD_TRACE(SD_LOG_IO, un,
22125 22125 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: entry: un:0x%p\n", un);
22126 22126
22127 22127 bzero(&cdb, sizeof (cdb));
22128 22128 bzero(&ucmd_buf, sizeof (ucmd_buf));
22129 22129 bzero(bufaddr, buflen);
22130 22130
22131 22131 cdb.scc_cmd = SCMD_GET_EVENT_STATUS_NOTIFICATION;
22132 22132 cdb.cdb_opaque[1] = 1; /* polled */
22133 22133 cdb.cdb_opaque[4] = class_req;
22134 22134 FORMG1COUNT(&cdb, buflen);
22135 22135
22136 22136 ucmd_buf.uscsi_cdb = (char *)&cdb;
22137 22137 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
22138 22138 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
22139 22139 ucmd_buf.uscsi_buflen = buflen;
22140 22140 ucmd_buf.uscsi_rqbuf = NULL;
22141 22141 ucmd_buf.uscsi_rqlen = 0;
22142 22142 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT;
22143 22143 ucmd_buf.uscsi_timeout = 60;
22144 22144
22145 22145 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
22146 22146 UIO_SYSSPACE, SD_PATH_DIRECT);
22147 22147
22148 22148 /*
22149 22149 * Only handle status == 0, the upper-level caller
22150 22150 * will put different assessment based on the context.
22151 22151 */
22152 22152 if (status == 0) {
22153 22153 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
22154 22154
22155 22155 if (ucmd_buf.uscsi_resid != 0) {
22156 22156 status = EIO;
22157 22157 }
22158 22158 }
22159 22159
22160 22160 SD_TRACE(SD_LOG_IO, un,
22161 22161 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: exit\n");
22162 22162
22163 22163 return (status);
22164 22164 }
22165 22165
22166 22166
22167 22167 static boolean_t
22168 22168 sd_gesn_media_data_valid(uchar_t *data)
22169 22169 {
22170 22170 uint16_t len;
22171 22171
22172 22172 len = (data[1] << 8) | data[0];
22173 22173 return ((len >= 6) &&
22174 22174 ((data[2] & SD_GESN_HEADER_NEA) == 0) &&
22175 22175 ((data[2] & SD_GESN_HEADER_CLASS) == SD_GESN_MEDIA_CLASS) &&
22176 22176 ((data[3] & (1 << SD_GESN_MEDIA_CLASS)) != 0));
22177 22177 }
22178 22178
22179 22179
22180 22180 /*
22181 22181 * Function: sdioctl
22182 22182 *
22183 22183 * Description: Driver's ioctl(9e) entry point function.
22184 22184 *
22185 22185 * Arguments: dev - device number
22186 22186 * cmd - ioctl operation to be performed
22187 22187 * arg - user argument, contains data to be set or reference
22188 22188 * parameter for get
22189 22189 * flag - bit flag, indicating open settings, 32/64 bit type
22190 22190 * cred_p - user credential pointer
22191 22191 * rval_p - calling process return value (OPT)
22192 22192 *
22193 22193 * Return Code: EINVAL
22194 22194 * ENOTTY
22195 22195 * ENXIO
22196 22196 * EIO
22197 22197 * EFAULT
22198 22198 * ENOTSUP
22199 22199 * EPERM
22200 22200 *
22201 22201 * Context: Called from the device switch at normal priority.
22202 22202 */
22203 22203
22204 22204 static int
22205 22205 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p)
22206 22206 {
22207 22207 struct sd_lun *un = NULL;
22208 22208 int err = 0;
22209 22209 int i = 0;
22210 22210 cred_t *cr;
22211 22211 int tmprval = EINVAL;
22212 22212 boolean_t is_valid;
22213 22213 sd_ssc_t *ssc;
22214 22214
22215 22215 /*
22216 22216 * All device accesses go thru sdstrategy where we check on suspend
22217 22217 * status
22218 22218 */
22219 22219 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
22220 22220 return (ENXIO);
22221 22221 }
22222 22222
22223 22223 ASSERT(!mutex_owned(SD_MUTEX(un)));
22224 22224
22225 22225 /* Initialize sd_ssc_t for internal uscsi commands */
22226 22226 ssc = sd_ssc_init(un);
22227 22227
22228 22228 is_valid = SD_IS_VALID_LABEL(un);
22229 22229
22230 22230 /*
22231 22231 * Moved this wait from sd_uscsi_strategy to here for
22232 22232 * reasons of deadlock prevention. Internal driver commands,
22233 22233 * specifically those to change a devices power level, result
22234 22234 * in a call to sd_uscsi_strategy.
22235 22235 */
22236 22236 mutex_enter(SD_MUTEX(un));
22237 22237 while ((un->un_state == SD_STATE_SUSPENDED) ||
22238 22238 (un->un_state == SD_STATE_PM_CHANGING)) {
22239 22239 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
22240 22240 }
22241 22241 /*
22242 22242 * Twiddling the counter here protects commands from now
22243 22243 * through to the top of sd_uscsi_strategy. Without the
22244 22244 * counter inc. a power down, for example, could get in
22245 22245 * after the above check for state is made and before
22246 22246 * execution gets to the top of sd_uscsi_strategy.
22247 22247 * That would cause problems.
22248 22248 */
22249 22249 un->un_ncmds_in_driver++;
22250 22250
22251 22251 if (!is_valid &&
22252 22252 (flag & (FNDELAY | FNONBLOCK))) {
22253 22253 switch (cmd) {
22254 22254 case DKIOCGGEOM: /* SD_PATH_DIRECT */
22255 22255 case DKIOCGVTOC:
22256 22256 case DKIOCGEXTVTOC:
22257 22257 case DKIOCGAPART:
22258 22258 case DKIOCPARTINFO:
22259 22259 case DKIOCEXTPARTINFO:
22260 22260 case DKIOCSGEOM:
22261 22261 case DKIOCSAPART:
22262 22262 case DKIOCGETEFI:
22263 22263 case DKIOCPARTITION:
22264 22264 case DKIOCSVTOC:
22265 22265 case DKIOCSEXTVTOC:
22266 22266 case DKIOCSETEFI:
22267 22267 case DKIOCGMBOOT:
22268 22268 case DKIOCSMBOOT:
22269 22269 case DKIOCG_PHYGEOM:
22270 22270 case DKIOCG_VIRTGEOM:
22271 22271 #if defined(__i386) || defined(__amd64)
22272 22272 case DKIOCSETEXTPART:
22273 22273 #endif
22274 22274 /* let cmlb handle it */
22275 22275 goto skip_ready_valid;
22276 22276
22277 22277 case CDROMPAUSE:
22278 22278 case CDROMRESUME:
22279 22279 case CDROMPLAYMSF:
22280 22280 case CDROMPLAYTRKIND:
22281 22281 case CDROMREADTOCHDR:
22282 22282 case CDROMREADTOCENTRY:
22283 22283 case CDROMSTOP:
22284 22284 case CDROMSTART:
22285 22285 case CDROMVOLCTRL:
22286 22286 case CDROMSUBCHNL:
22287 22287 case CDROMREADMODE2:
22288 22288 case CDROMREADMODE1:
22289 22289 case CDROMREADOFFSET:
22290 22290 case CDROMSBLKMODE:
22291 22291 case CDROMGBLKMODE:
22292 22292 case CDROMGDRVSPEED:
22293 22293 case CDROMSDRVSPEED:
22294 22294 case CDROMCDDA:
22295 22295 case CDROMCDXA:
22296 22296 case CDROMSUBCODE:
22297 22297 if (!ISCD(un)) {
22298 22298 un->un_ncmds_in_driver--;
22299 22299 ASSERT(un->un_ncmds_in_driver >= 0);
22300 22300 mutex_exit(SD_MUTEX(un));
22301 22301 err = ENOTTY;
22302 22302 goto done_without_assess;
22303 22303 }
22304 22304 break;
22305 22305 case FDEJECT:
22306 22306 case DKIOCEJECT:
22307 22307 case CDROMEJECT:
22308 22308 if (!un->un_f_eject_media_supported) {
22309 22309 un->un_ncmds_in_driver--;
22310 22310 ASSERT(un->un_ncmds_in_driver >= 0);
22311 22311 mutex_exit(SD_MUTEX(un));
22312 22312 err = ENOTTY;
22313 22313 goto done_without_assess;
22314 22314 }
22315 22315 break;
22316 22316 case DKIOCFLUSHWRITECACHE:
22317 22317 mutex_exit(SD_MUTEX(un));
22318 22318 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
22319 22319 if (err != 0) {
22320 22320 mutex_enter(SD_MUTEX(un));
22321 22321 un->un_ncmds_in_driver--;
22322 22322 ASSERT(un->un_ncmds_in_driver >= 0);
22323 22323 mutex_exit(SD_MUTEX(un));
22324 22324 err = EIO;
22325 22325 goto done_quick_assess;
22326 22326 }
22327 22327 mutex_enter(SD_MUTEX(un));
22328 22328 /* FALLTHROUGH */
22329 22329 case DKIOCREMOVABLE:
22330 22330 case DKIOCHOTPLUGGABLE:
22331 22331 case DKIOCINFO:
22332 22332 case DKIOCGMEDIAINFO:
22333 22333 case DKIOCGMEDIAINFOEXT:
22334 22334 case DKIOCSOLIDSTATE:
22335 22335 case MHIOCENFAILFAST:
22336 22336 case MHIOCSTATUS:
22337 22337 case MHIOCTKOWN:
22338 22338 case MHIOCRELEASE:
22339 22339 case MHIOCGRP_INKEYS:
22340 22340 case MHIOCGRP_INRESV:
22341 22341 case MHIOCGRP_REGISTER:
22342 22342 case MHIOCGRP_CLEAR:
22343 22343 case MHIOCGRP_RESERVE:
22344 22344 case MHIOCGRP_PREEMPTANDABORT:
22345 22345 case MHIOCGRP_REGISTERANDIGNOREKEY:
22346 22346 case CDROMCLOSETRAY:
22347 22347 case USCSICMD:
22348 22348 goto skip_ready_valid;
22349 22349 default:
22350 22350 break;
22351 22351 }
22352 22352
22353 22353 mutex_exit(SD_MUTEX(un));
22354 22354 err = sd_ready_and_valid(ssc, SDPART(dev));
22355 22355 mutex_enter(SD_MUTEX(un));
22356 22356
22357 22357 if (err != SD_READY_VALID) {
22358 22358 switch (cmd) {
22359 22359 case DKIOCSTATE:
22360 22360 case CDROMGDRVSPEED:
22361 22361 case CDROMSDRVSPEED:
22362 22362 case FDEJECT: /* for eject command */
22363 22363 case DKIOCEJECT:
22364 22364 case CDROMEJECT:
22365 22365 case DKIOCREMOVABLE:
22366 22366 case DKIOCHOTPLUGGABLE:
22367 22367 break;
22368 22368 default:
22369 22369 if (un->un_f_has_removable_media) {
22370 22370 err = ENXIO;
22371 22371 } else {
22372 22372 /* Do not map SD_RESERVED_BY_OTHERS to EIO */
22373 22373 if (err == SD_RESERVED_BY_OTHERS) {
22374 22374 err = EACCES;
22375 22375 } else {
22376 22376 err = EIO;
22377 22377 }
22378 22378 }
22379 22379 un->un_ncmds_in_driver--;
22380 22380 ASSERT(un->un_ncmds_in_driver >= 0);
22381 22381 mutex_exit(SD_MUTEX(un));
22382 22382
22383 22383 goto done_without_assess;
22384 22384 }
22385 22385 }
22386 22386 }
22387 22387
22388 22388 skip_ready_valid:
22389 22389 mutex_exit(SD_MUTEX(un));
22390 22390
22391 22391 switch (cmd) {
22392 22392 case DKIOCINFO:
22393 22393 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n");
22394 22394 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag);
22395 22395 break;
22396 22396
22397 22397 case DKIOCGMEDIAINFO:
22398 22398 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n");
22399 22399 err = sd_get_media_info(dev, (caddr_t)arg, flag);
22400 22400 break;
22401 22401
22402 22402 case DKIOCGMEDIAINFOEXT:
22403 22403 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFOEXT\n");
22404 22404 err = sd_get_media_info_ext(dev, (caddr_t)arg, flag);
22405 22405 break;
22406 22406
22407 22407 case DKIOCGGEOM:
22408 22408 case DKIOCGVTOC:
22409 22409 case DKIOCGEXTVTOC:
22410 22410 case DKIOCGAPART:
22411 22411 case DKIOCPARTINFO:
22412 22412 case DKIOCEXTPARTINFO:
22413 22413 case DKIOCSGEOM:
22414 22414 case DKIOCSAPART:
22415 22415 case DKIOCGETEFI:
22416 22416 case DKIOCPARTITION:
22417 22417 case DKIOCSVTOC:
22418 22418 case DKIOCSEXTVTOC:
22419 22419 case DKIOCSETEFI:
22420 22420 case DKIOCGMBOOT:
22421 22421 case DKIOCSMBOOT:
22422 22422 case DKIOCG_PHYGEOM:
22423 22423 case DKIOCG_VIRTGEOM:
22424 22424 #if defined(__i386) || defined(__amd64)
22425 22425 case DKIOCSETEXTPART:
22426 22426 #endif
22427 22427 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd);
22428 22428
22429 22429 /* TUR should spin up */
22430 22430
22431 22431 if (un->un_f_has_removable_media)
22432 22432 err = sd_send_scsi_TEST_UNIT_READY(ssc,
22433 22433 SD_CHECK_FOR_MEDIA);
22434 22434
22435 22435 else
22436 22436 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
22437 22437
22438 22438 if (err != 0)
22439 22439 goto done_with_assess;
22440 22440
22441 22441 err = cmlb_ioctl(un->un_cmlbhandle, dev,
22442 22442 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT);
22443 22443
22444 22444 if ((err == 0) &&
22445 22445 ((cmd == DKIOCSETEFI) ||
22446 22446 (un->un_f_pkstats_enabled) &&
22447 22447 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC ||
22448 22448 cmd == DKIOCSEXTVTOC))) {
22449 22449
22450 22450 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT,
22451 22451 (void *)SD_PATH_DIRECT);
22452 22452 if ((tmprval == 0) && un->un_f_pkstats_enabled) {
22453 22453 sd_set_pstats(un);
22454 22454 SD_TRACE(SD_LOG_IO_PARTITION, un,
22455 22455 "sd_ioctl: un:0x%p pstats created and "
22456 22456 "set\n", un);
22457 22457 }
22458 22458 }
22459 22459
22460 22460 if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) ||
22461 22461 ((cmd == DKIOCSETEFI) && (tmprval == 0))) {
22462 22462
22463 22463 mutex_enter(SD_MUTEX(un));
22464 22464 if (un->un_f_devid_supported &&
22465 22465 (un->un_f_opt_fab_devid == TRUE)) {
22466 22466 if (un->un_devid == NULL) {
22467 22467 sd_register_devid(ssc, SD_DEVINFO(un),
22468 22468 SD_TARGET_IS_UNRESERVED);
22469 22469 } else {
22470 22470 /*
22471 22471 * The device id for this disk
22472 22472 * has been fabricated. The
22473 22473 * device id must be preserved
22474 22474 * by writing it back out to
22475 22475 * disk.
22476 22476 */
22477 22477 if (sd_write_deviceid(ssc) != 0) {
22478 22478 ddi_devid_free(un->un_devid);
22479 22479 un->un_devid = NULL;
22480 22480 }
22481 22481 }
22482 22482 }
22483 22483 mutex_exit(SD_MUTEX(un));
22484 22484 }
22485 22485
22486 22486 break;
22487 22487
22488 22488 case DKIOCLOCK:
22489 22489 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n");
22490 22490 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
22491 22491 SD_PATH_STANDARD);
22492 22492 goto done_with_assess;
22493 22493
22494 22494 case DKIOCUNLOCK:
22495 22495 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n");
22496 22496 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW,
22497 22497 SD_PATH_STANDARD);
22498 22498 goto done_with_assess;
22499 22499
22500 22500 case DKIOCSTATE: {
22501 22501 enum dkio_state state;
22502 22502 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n");
22503 22503
22504 22504 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) {
22505 22505 err = EFAULT;
22506 22506 } else {
22507 22507 err = sd_check_media(dev, state);
22508 22508 if (err == 0) {
22509 22509 if (ddi_copyout(&un->un_mediastate, (void *)arg,
22510 22510 sizeof (int), flag) != 0)
22511 22511 err = EFAULT;
22512 22512 }
22513 22513 }
22514 22514 break;
22515 22515 }
22516 22516
22517 22517 case DKIOCREMOVABLE:
22518 22518 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n");
22519 22519 i = un->un_f_has_removable_media ? 1 : 0;
22520 22520 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
22521 22521 err = EFAULT;
22522 22522 } else {
22523 22523 err = 0;
22524 22524 }
22525 22525 break;
22526 22526
22527 22527 case DKIOCSOLIDSTATE:
22528 22528 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSOLIDSTATE\n");
22529 22529 i = un->un_f_is_solid_state ? 1 : 0;
22530 22530 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
22531 22531 err = EFAULT;
22532 22532 } else {
22533 22533 err = 0;
22534 22534 }
22535 22535 break;
22536 22536
22537 22537 case DKIOCHOTPLUGGABLE:
22538 22538 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n");
22539 22539 i = un->un_f_is_hotpluggable ? 1 : 0;
22540 22540 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
22541 22541 err = EFAULT;
22542 22542 } else {
22543 22543 err = 0;
22544 22544 }
22545 22545 break;
22546 22546
22547 22547 case DKIOCREADONLY:
22548 22548 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREADONLY\n");
22549 22549 i = 0;
22550 22550 if ((ISCD(un) && !un->un_f_mmc_writable_media) ||
22551 22551 (sr_check_wp(dev) != 0)) {
22552 22552 i = 1;
22553 22553 }
22554 22554 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
22555 22555 err = EFAULT;
22556 22556 } else {
22557 22557 err = 0;
22558 22558 }
22559 22559 break;
22560 22560
22561 22561 case DKIOCGTEMPERATURE:
22562 22562 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n");
22563 22563 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag);
22564 22564 break;
22565 22565
22566 22566 case MHIOCENFAILFAST:
22567 22567 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n");
22568 22568 if ((err = drv_priv(cred_p)) == 0) {
22569 22569 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag);
22570 22570 }
22571 22571 break;
22572 22572
22573 22573 case MHIOCTKOWN:
22574 22574 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n");
22575 22575 if ((err = drv_priv(cred_p)) == 0) {
22576 22576 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag);
22577 22577 }
22578 22578 break;
22579 22579
22580 22580 case MHIOCRELEASE:
22581 22581 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n");
22582 22582 if ((err = drv_priv(cred_p)) == 0) {
22583 22583 err = sd_mhdioc_release(dev);
22584 22584 }
22585 22585 break;
22586 22586
22587 22587 case MHIOCSTATUS:
22588 22588 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n");
22589 22589 if ((err = drv_priv(cred_p)) == 0) {
22590 22590 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) {
22591 22591 case 0:
22592 22592 err = 0;
22593 22593 break;
22594 22594 case EACCES:
22595 22595 *rval_p = 1;
22596 22596 err = 0;
22597 22597 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
22598 22598 break;
22599 22599 default:
22600 22600 err = EIO;
22601 22601 goto done_with_assess;
22602 22602 }
22603 22603 }
22604 22604 break;
22605 22605
22606 22606 case MHIOCQRESERVE:
22607 22607 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n");
22608 22608 if ((err = drv_priv(cred_p)) == 0) {
22609 22609 err = sd_reserve_release(dev, SD_RESERVE);
22610 22610 }
22611 22611 break;
22612 22612
22613 22613 case MHIOCREREGISTERDEVID:
22614 22614 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n");
22615 22615 if (drv_priv(cred_p) == EPERM) {
22616 22616 err = EPERM;
22617 22617 } else if (!un->un_f_devid_supported) {
22618 22618 err = ENOTTY;
22619 22619 } else {
22620 22620 err = sd_mhdioc_register_devid(dev);
22621 22621 }
22622 22622 break;
22623 22623
22624 22624 case MHIOCGRP_INKEYS:
22625 22625 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n");
22626 22626 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) {
22627 22627 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22628 22628 err = ENOTSUP;
22629 22629 } else {
22630 22630 err = sd_mhdioc_inkeys(dev, (caddr_t)arg,
22631 22631 flag);
22632 22632 }
22633 22633 }
22634 22634 break;
22635 22635
22636 22636 case MHIOCGRP_INRESV:
22637 22637 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n");
22638 22638 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) {
22639 22639 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22640 22640 err = ENOTSUP;
22641 22641 } else {
22642 22642 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag);
22643 22643 }
22644 22644 }
22645 22645 break;
22646 22646
22647 22647 case MHIOCGRP_REGISTER:
22648 22648 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n");
22649 22649 if ((err = drv_priv(cred_p)) != EPERM) {
22650 22650 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22651 22651 err = ENOTSUP;
22652 22652 } else if (arg != NULL) {
22653 22653 mhioc_register_t reg;
22654 22654 if (ddi_copyin((void *)arg, ®,
22655 22655 sizeof (mhioc_register_t), flag) != 0) {
22656 22656 err = EFAULT;
22657 22657 } else {
22658 22658 err =
22659 22659 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22660 22660 ssc, SD_SCSI3_REGISTER,
22661 22661 (uchar_t *)®);
22662 22662 if (err != 0)
22663 22663 goto done_with_assess;
22664 22664 }
22665 22665 }
22666 22666 }
22667 22667 break;
22668 22668
22669 22669 case MHIOCGRP_CLEAR:
22670 22670 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_CLEAR\n");
22671 22671 if ((err = drv_priv(cred_p)) != EPERM) {
22672 22672 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22673 22673 err = ENOTSUP;
22674 22674 } else if (arg != NULL) {
22675 22675 mhioc_register_t reg;
22676 22676 if (ddi_copyin((void *)arg, ®,
22677 22677 sizeof (mhioc_register_t), flag) != 0) {
22678 22678 err = EFAULT;
22679 22679 } else {
22680 22680 err =
22681 22681 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22682 22682 ssc, SD_SCSI3_CLEAR,
22683 22683 (uchar_t *)®);
22684 22684 if (err != 0)
22685 22685 goto done_with_assess;
22686 22686 }
22687 22687 }
22688 22688 }
22689 22689 break;
22690 22690
22691 22691 case MHIOCGRP_RESERVE:
22692 22692 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n");
22693 22693 if ((err = drv_priv(cred_p)) != EPERM) {
22694 22694 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22695 22695 err = ENOTSUP;
22696 22696 } else if (arg != NULL) {
22697 22697 mhioc_resv_desc_t resv_desc;
22698 22698 if (ddi_copyin((void *)arg, &resv_desc,
22699 22699 sizeof (mhioc_resv_desc_t), flag) != 0) {
22700 22700 err = EFAULT;
22701 22701 } else {
22702 22702 err =
22703 22703 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22704 22704 ssc, SD_SCSI3_RESERVE,
22705 22705 (uchar_t *)&resv_desc);
22706 22706 if (err != 0)
22707 22707 goto done_with_assess;
22708 22708 }
22709 22709 }
22710 22710 }
22711 22711 break;
22712 22712
22713 22713 case MHIOCGRP_PREEMPTANDABORT:
22714 22714 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n");
22715 22715 if ((err = drv_priv(cred_p)) != EPERM) {
22716 22716 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22717 22717 err = ENOTSUP;
22718 22718 } else if (arg != NULL) {
22719 22719 mhioc_preemptandabort_t preempt_abort;
22720 22720 if (ddi_copyin((void *)arg, &preempt_abort,
22721 22721 sizeof (mhioc_preemptandabort_t),
22722 22722 flag) != 0) {
22723 22723 err = EFAULT;
22724 22724 } else {
22725 22725 err =
22726 22726 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22727 22727 ssc, SD_SCSI3_PREEMPTANDABORT,
22728 22728 (uchar_t *)&preempt_abort);
22729 22729 if (err != 0)
22730 22730 goto done_with_assess;
22731 22731 }
22732 22732 }
22733 22733 }
22734 22734 break;
22735 22735
22736 22736 case MHIOCGRP_REGISTERANDIGNOREKEY:
22737 22737 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n");
22738 22738 if ((err = drv_priv(cred_p)) != EPERM) {
22739 22739 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22740 22740 err = ENOTSUP;
22741 22741 } else if (arg != NULL) {
22742 22742 mhioc_registerandignorekey_t r_and_i;
22743 22743 if (ddi_copyin((void *)arg, (void *)&r_and_i,
22744 22744 sizeof (mhioc_registerandignorekey_t),
22745 22745 flag) != 0) {
22746 22746 err = EFAULT;
22747 22747 } else {
22748 22748 err =
22749 22749 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22750 22750 ssc, SD_SCSI3_REGISTERANDIGNOREKEY,
22751 22751 (uchar_t *)&r_and_i);
22752 22752 if (err != 0)
22753 22753 goto done_with_assess;
22754 22754 }
22755 22755 }
22756 22756 }
22757 22757 break;
22758 22758
22759 22759 case USCSICMD:
22760 22760 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n");
22761 22761 cr = ddi_get_cred();
22762 22762 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) {
22763 22763 err = EPERM;
22764 22764 } else {
22765 22765 enum uio_seg uioseg;
22766 22766
22767 22767 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE :
22768 22768 UIO_USERSPACE;
22769 22769 if (un->un_f_format_in_progress == TRUE) {
22770 22770 err = EAGAIN;
22771 22771 break;
22772 22772 }
22773 22773
22774 22774 err = sd_ssc_send(ssc,
22775 22775 (struct uscsi_cmd *)arg,
22776 22776 flag, uioseg, SD_PATH_STANDARD);
22777 22777 if (err != 0)
22778 22778 goto done_with_assess;
22779 22779 else
22780 22780 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
22781 22781 }
22782 22782 break;
22783 22783
22784 22784 case CDROMPAUSE:
22785 22785 case CDROMRESUME:
22786 22786 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n");
22787 22787 if (!ISCD(un)) {
22788 22788 err = ENOTTY;
22789 22789 } else {
22790 22790 err = sr_pause_resume(dev, cmd);
22791 22791 }
22792 22792 break;
22793 22793
22794 22794 case CDROMPLAYMSF:
22795 22795 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n");
22796 22796 if (!ISCD(un)) {
22797 22797 err = ENOTTY;
22798 22798 } else {
22799 22799 err = sr_play_msf(dev, (caddr_t)arg, flag);
22800 22800 }
22801 22801 break;
22802 22802
22803 22803 case CDROMPLAYTRKIND:
22804 22804 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n");
22805 22805 #if defined(__i386) || defined(__amd64)
22806 22806 /*
22807 22807 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead
22808 22808 */
22809 22809 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) {
22810 22810 #else
22811 22811 if (!ISCD(un)) {
22812 22812 #endif
22813 22813 err = ENOTTY;
22814 22814 } else {
22815 22815 err = sr_play_trkind(dev, (caddr_t)arg, flag);
22816 22816 }
22817 22817 break;
22818 22818
22819 22819 case CDROMREADTOCHDR:
22820 22820 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n");
22821 22821 if (!ISCD(un)) {
22822 22822 err = ENOTTY;
22823 22823 } else {
22824 22824 err = sr_read_tochdr(dev, (caddr_t)arg, flag);
22825 22825 }
22826 22826 break;
22827 22827
22828 22828 case CDROMREADTOCENTRY:
22829 22829 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n");
22830 22830 if (!ISCD(un)) {
22831 22831 err = ENOTTY;
22832 22832 } else {
22833 22833 err = sr_read_tocentry(dev, (caddr_t)arg, flag);
22834 22834 }
22835 22835 break;
22836 22836
22837 22837 case CDROMSTOP:
22838 22838 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n");
22839 22839 if (!ISCD(un)) {
22840 22840 err = ENOTTY;
22841 22841 } else {
22842 22842 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
22843 22843 SD_TARGET_STOP, SD_PATH_STANDARD);
22844 22844 goto done_with_assess;
22845 22845 }
22846 22846 break;
22847 22847
22848 22848 case CDROMSTART:
22849 22849 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n");
22850 22850 if (!ISCD(un)) {
22851 22851 err = ENOTTY;
22852 22852 } else {
22853 22853 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
22854 22854 SD_TARGET_START, SD_PATH_STANDARD);
22855 22855 goto done_with_assess;
22856 22856 }
22857 22857 break;
22858 22858
22859 22859 case CDROMCLOSETRAY:
22860 22860 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n");
22861 22861 if (!ISCD(un)) {
22862 22862 err = ENOTTY;
22863 22863 } else {
22864 22864 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
22865 22865 SD_TARGET_CLOSE, SD_PATH_STANDARD);
22866 22866 goto done_with_assess;
22867 22867 }
22868 22868 break;
22869 22869
22870 22870 case FDEJECT: /* for eject command */
22871 22871 case DKIOCEJECT:
22872 22872 case CDROMEJECT:
22873 22873 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n");
22874 22874 if (!un->un_f_eject_media_supported) {
22875 22875 err = ENOTTY;
22876 22876 } else {
22877 22877 err = sr_eject(dev);
22878 22878 }
22879 22879 break;
22880 22880
22881 22881 case CDROMVOLCTRL:
22882 22882 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n");
22883 22883 if (!ISCD(un)) {
22884 22884 err = ENOTTY;
22885 22885 } else {
22886 22886 err = sr_volume_ctrl(dev, (caddr_t)arg, flag);
22887 22887 }
22888 22888 break;
22889 22889
22890 22890 case CDROMSUBCHNL:
22891 22891 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n");
22892 22892 if (!ISCD(un)) {
22893 22893 err = ENOTTY;
22894 22894 } else {
22895 22895 err = sr_read_subchannel(dev, (caddr_t)arg, flag);
22896 22896 }
22897 22897 break;
22898 22898
22899 22899 case CDROMREADMODE2:
22900 22900 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n");
22901 22901 if (!ISCD(un)) {
22902 22902 err = ENOTTY;
22903 22903 } else if (un->un_f_cfg_is_atapi == TRUE) {
22904 22904 /*
22905 22905 * If the drive supports READ CD, use that instead of
22906 22906 * switching the LBA size via a MODE SELECT
22907 22907 * Block Descriptor
22908 22908 */
22909 22909 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag);
22910 22910 } else {
22911 22911 err = sr_read_mode2(dev, (caddr_t)arg, flag);
22912 22912 }
22913 22913 break;
22914 22914
22915 22915 case CDROMREADMODE1:
22916 22916 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n");
22917 22917 if (!ISCD(un)) {
22918 22918 err = ENOTTY;
22919 22919 } else {
22920 22920 err = sr_read_mode1(dev, (caddr_t)arg, flag);
22921 22921 }
22922 22922 break;
22923 22923
22924 22924 case CDROMREADOFFSET:
22925 22925 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n");
22926 22926 if (!ISCD(un)) {
22927 22927 err = ENOTTY;
22928 22928 } else {
22929 22929 err = sr_read_sony_session_offset(dev, (caddr_t)arg,
22930 22930 flag);
22931 22931 }
22932 22932 break;
22933 22933
22934 22934 case CDROMSBLKMODE:
22935 22935 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n");
22936 22936 /*
22937 22937 * There is no means of changing block size in case of atapi
22938 22938 * drives, thus return ENOTTY if drive type is atapi
22939 22939 */
22940 22940 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) {
22941 22941 err = ENOTTY;
22942 22942 } else if (un->un_f_mmc_cap == TRUE) {
22943 22943
22944 22944 /*
22945 22945 * MMC Devices do not support changing the
22946 22946 * logical block size
22947 22947 *
22948 22948 * Note: EINVAL is being returned instead of ENOTTY to
22949 22949 * maintain consistancy with the original mmc
22950 22950 * driver update.
22951 22951 */
22952 22952 err = EINVAL;
22953 22953 } else {
22954 22954 mutex_enter(SD_MUTEX(un));
22955 22955 if ((!(un->un_exclopen & (1<<SDPART(dev)))) ||
22956 22956 (un->un_ncmds_in_transport > 0)) {
22957 22957 mutex_exit(SD_MUTEX(un));
22958 22958 err = EINVAL;
22959 22959 } else {
22960 22960 mutex_exit(SD_MUTEX(un));
22961 22961 err = sr_change_blkmode(dev, cmd, arg, flag);
22962 22962 }
22963 22963 }
22964 22964 break;
22965 22965
22966 22966 case CDROMGBLKMODE:
22967 22967 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n");
22968 22968 if (!ISCD(un)) {
22969 22969 err = ENOTTY;
22970 22970 } else if ((un->un_f_cfg_is_atapi != FALSE) &&
22971 22971 (un->un_f_blockcount_is_valid != FALSE)) {
22972 22972 /*
22973 22973 * Drive is an ATAPI drive so return target block
22974 22974 * size for ATAPI drives since we cannot change the
22975 22975 * blocksize on ATAPI drives. Used primarily to detect
22976 22976 * if an ATAPI cdrom is present.
22977 22977 */
22978 22978 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg,
22979 22979 sizeof (int), flag) != 0) {
22980 22980 err = EFAULT;
22981 22981 } else {
22982 22982 err = 0;
22983 22983 }
22984 22984
22985 22985 } else {
22986 22986 /*
22987 22987 * Drive supports changing block sizes via a Mode
22988 22988 * Select.
22989 22989 */
22990 22990 err = sr_change_blkmode(dev, cmd, arg, flag);
22991 22991 }
22992 22992 break;
22993 22993
22994 22994 case CDROMGDRVSPEED:
22995 22995 case CDROMSDRVSPEED:
22996 22996 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n");
22997 22997 if (!ISCD(un)) {
22998 22998 err = ENOTTY;
22999 22999 } else if (un->un_f_mmc_cap == TRUE) {
23000 23000 /*
23001 23001 * Note: In the future the driver implementation
23002 23002 * for getting and
23003 23003 * setting cd speed should entail:
23004 23004 * 1) If non-mmc try the Toshiba mode page
23005 23005 * (sr_change_speed)
23006 23006 * 2) If mmc but no support for Real Time Streaming try
23007 23007 * the SET CD SPEED (0xBB) command
23008 23008 * (sr_atapi_change_speed)
23009 23009 * 3) If mmc and support for Real Time Streaming
23010 23010 * try the GET PERFORMANCE and SET STREAMING
23011 23011 * commands (not yet implemented, 4380808)
23012 23012 */
23013 23013 /*
23014 23014 * As per recent MMC spec, CD-ROM speed is variable
23015 23015 * and changes with LBA. Since there is no such
23016 23016 * things as drive speed now, fail this ioctl.
23017 23017 *
23018 23018 * Note: EINVAL is returned for consistancy of original
23019 23019 * implementation which included support for getting
23020 23020 * the drive speed of mmc devices but not setting
23021 23021 * the drive speed. Thus EINVAL would be returned
23022 23022 * if a set request was made for an mmc device.
23023 23023 * We no longer support get or set speed for
23024 23024 * mmc but need to remain consistent with regard
23025 23025 * to the error code returned.
23026 23026 */
23027 23027 err = EINVAL;
23028 23028 } else if (un->un_f_cfg_is_atapi == TRUE) {
23029 23029 err = sr_atapi_change_speed(dev, cmd, arg, flag);
23030 23030 } else {
23031 23031 err = sr_change_speed(dev, cmd, arg, flag);
23032 23032 }
23033 23033 break;
23034 23034
23035 23035 case CDROMCDDA:
23036 23036 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n");
23037 23037 if (!ISCD(un)) {
23038 23038 err = ENOTTY;
23039 23039 } else {
23040 23040 err = sr_read_cdda(dev, (void *)arg, flag);
23041 23041 }
23042 23042 break;
23043 23043
23044 23044 case CDROMCDXA:
23045 23045 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n");
23046 23046 if (!ISCD(un)) {
23047 23047 err = ENOTTY;
23048 23048 } else {
23049 23049 err = sr_read_cdxa(dev, (caddr_t)arg, flag);
23050 23050 }
23051 23051 break;
23052 23052
23053 23053 case CDROMSUBCODE:
23054 23054 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n");
23055 23055 if (!ISCD(un)) {
23056 23056 err = ENOTTY;
23057 23057 } else {
23058 23058 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag);
23059 23059 }
23060 23060 break;
23061 23061
23062 23062
23063 23063 #ifdef SDDEBUG
23064 23064 /* RESET/ABORTS testing ioctls */
23065 23065 case DKIOCRESET: {
23066 23066 int reset_level;
23067 23067
23068 23068 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) {
23069 23069 err = EFAULT;
23070 23070 } else {
23071 23071 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: "
23072 23072 "reset_level = 0x%lx\n", reset_level);
23073 23073 if (scsi_reset(SD_ADDRESS(un), reset_level)) {
23074 23074 err = 0;
23075 23075 } else {
23076 23076 err = EIO;
23077 23077 }
23078 23078 }
23079 23079 break;
23080 23080 }
23081 23081
23082 23082 case DKIOCABORT:
23083 23083 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n");
23084 23084 if (scsi_abort(SD_ADDRESS(un), NULL)) {
23085 23085 err = 0;
23086 23086 } else {
23087 23087 err = EIO;
23088 23088 }
23089 23089 break;
23090 23090 #endif
23091 23091
23092 23092 #ifdef SD_FAULT_INJECTION
23093 23093 /* SDIOC FaultInjection testing ioctls */
23094 23094 case SDIOCSTART:
23095 23095 case SDIOCSTOP:
23096 23096 case SDIOCINSERTPKT:
23097 23097 case SDIOCINSERTXB:
23098 23098 case SDIOCINSERTUN:
23099 23099 case SDIOCINSERTARQ:
23100 23100 case SDIOCPUSH:
23101 23101 case SDIOCRETRIEVE:
23102 23102 case SDIOCRUN:
23103 23103 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:"
23104 23104 "SDIOC detected cmd:0x%X:\n", cmd);
23105 23105 /* call error generator */
23106 23106 sd_faultinjection_ioctl(cmd, arg, un);
23107 23107 err = 0;
23108 23108 break;
23109 23109
23110 23110 #endif /* SD_FAULT_INJECTION */
23111 23111
23112 23112 case DKIOCFLUSHWRITECACHE:
23113 23113 {
23114 23114 struct dk_callback *dkc = (struct dk_callback *)arg;
23115 23115
23116 23116 mutex_enter(SD_MUTEX(un));
23117 23117 if (!un->un_f_sync_cache_supported ||
23118 23118 !un->un_f_write_cache_enabled) {
23119 23119 err = un->un_f_sync_cache_supported ?
23120 23120 0 : ENOTSUP;
23121 23121 mutex_exit(SD_MUTEX(un));
23122 23122 if ((flag & FKIOCTL) && dkc != NULL &&
23123 23123 dkc->dkc_callback != NULL) {
23124 23124 (*dkc->dkc_callback)(dkc->dkc_cookie,
23125 23125 err);
23126 23126 /*
23127 23127 * Did callback and reported error.
23128 23128 * Since we did a callback, ioctl
23129 23129 * should return 0.
23130 23130 */
23131 23131 err = 0;
23132 23132 }
23133 23133 break;
23134 23134 }
23135 23135 mutex_exit(SD_MUTEX(un));
23136 23136
23137 23137 if ((flag & FKIOCTL) && dkc != NULL &&
23138 23138 dkc->dkc_callback != NULL) {
23139 23139 /* async SYNC CACHE request */
23140 23140 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc);
23141 23141 } else {
23142 23142 /* synchronous SYNC CACHE request */
23143 23143 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL);
23144 23144 }
23145 23145 }
23146 23146 break;
23147 23147
23148 23148 case DKIOCGETWCE: {
23149 23149
23150 23150 int wce;
23151 23151
23152 23152 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) {
23153 23153 break;
23154 23154 }
23155 23155
23156 23156 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) {
23157 23157 err = EFAULT;
23158 23158 }
23159 23159 break;
23160 23160 }
23161 23161
23162 23162 case DKIOCSETWCE: {
23163 23163
23164 23164 int wce, sync_supported;
23165 23165 int cur_wce = 0;
23166 23166
23167 23167 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) {
23168 23168 err = EFAULT;
23169 23169 break;
23170 23170 }
23171 23171
23172 23172 /*
23173 23173 * Synchronize multiple threads trying to enable
23174 23174 * or disable the cache via the un_f_wcc_cv
23175 23175 * condition variable.
23176 23176 */
23177 23177 mutex_enter(SD_MUTEX(un));
23178 23178
23179 23179 /*
23180 23180 * Don't allow the cache to be enabled if the
23181 23181 * config file has it disabled.
23182 23182 */
23183 23183 if (un->un_f_opt_disable_cache && wce) {
23184 23184 mutex_exit(SD_MUTEX(un));
23185 23185 err = EINVAL;
23186 23186 break;
23187 23187 }
23188 23188
23189 23189 /*
23190 23190 * Wait for write cache change in progress
23191 23191 * bit to be clear before proceeding.
23192 23192 */
23193 23193 while (un->un_f_wcc_inprog)
23194 23194 cv_wait(&un->un_wcc_cv, SD_MUTEX(un));
23195 23195
23196 23196 un->un_f_wcc_inprog = 1;
23197 23197
23198 23198 mutex_exit(SD_MUTEX(un));
23199 23199
23200 23200 /*
23201 23201 * Get the current write cache state
23202 23202 */
23203 23203 if ((err = sd_get_write_cache_enabled(ssc, &cur_wce)) != 0) {
23204 23204 mutex_enter(SD_MUTEX(un));
23205 23205 un->un_f_wcc_inprog = 0;
23206 23206 cv_broadcast(&un->un_wcc_cv);
23207 23207 mutex_exit(SD_MUTEX(un));
23208 23208 break;
23209 23209 }
23210 23210
23211 23211 mutex_enter(SD_MUTEX(un));
23212 23212 un->un_f_write_cache_enabled = (cur_wce != 0);
23213 23213
23214 23214 if (un->un_f_write_cache_enabled && wce == 0) {
23215 23215 /*
23216 23216 * Disable the write cache. Don't clear
23217 23217 * un_f_write_cache_enabled until after
23218 23218 * the mode select and flush are complete.
23219 23219 */
23220 23220 sync_supported = un->un_f_sync_cache_supported;
23221 23221
23222 23222 /*
23223 23223 * If cache flush is suppressed, we assume that the
23224 23224 * controller firmware will take care of managing the
23225 23225 * write cache for us: no need to explicitly
23226 23226 * disable it.
23227 23227 */
23228 23228 if (!un->un_f_suppress_cache_flush) {
23229 23229 mutex_exit(SD_MUTEX(un));
23230 23230 if ((err = sd_cache_control(ssc,
23231 23231 SD_CACHE_NOCHANGE,
23232 23232 SD_CACHE_DISABLE)) == 0 &&
23233 23233 sync_supported) {
23234 23234 err = sd_send_scsi_SYNCHRONIZE_CACHE(un,
23235 23235 NULL);
23236 23236 }
23237 23237 } else {
23238 23238 mutex_exit(SD_MUTEX(un));
23239 23239 }
23240 23240
23241 23241 mutex_enter(SD_MUTEX(un));
23242 23242 if (err == 0) {
23243 23243 un->un_f_write_cache_enabled = 0;
23244 23244 }
23245 23245
23246 23246 } else if (!un->un_f_write_cache_enabled && wce != 0) {
23247 23247 /*
23248 23248 * Set un_f_write_cache_enabled first, so there is
23249 23249 * no window where the cache is enabled, but the
23250 23250 * bit says it isn't.
23251 23251 */
23252 23252 un->un_f_write_cache_enabled = 1;
23253 23253
23254 23254 /*
23255 23255 * If cache flush is suppressed, we assume that the
23256 23256 * controller firmware will take care of managing the
23257 23257 * write cache for us: no need to explicitly
23258 23258 * enable it.
23259 23259 */
23260 23260 if (!un->un_f_suppress_cache_flush) {
23261 23261 mutex_exit(SD_MUTEX(un));
23262 23262 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE,
23263 23263 SD_CACHE_ENABLE);
23264 23264 } else {
23265 23265 mutex_exit(SD_MUTEX(un));
23266 23266 }
23267 23267
23268 23268 mutex_enter(SD_MUTEX(un));
23269 23269
23270 23270 if (err) {
23271 23271 un->un_f_write_cache_enabled = 0;
23272 23272 }
23273 23273 }
23274 23274
23275 23275 un->un_f_wcc_inprog = 0;
23276 23276 cv_broadcast(&un->un_wcc_cv);
23277 23277 mutex_exit(SD_MUTEX(un));
23278 23278 break;
23279 23279 }
23280 23280
23281 23281 default:
23282 23282 err = ENOTTY;
23283 23283 break;
23284 23284 }
23285 23285 mutex_enter(SD_MUTEX(un));
23286 23286 un->un_ncmds_in_driver--;
23287 23287 ASSERT(un->un_ncmds_in_driver >= 0);
23288 23288 mutex_exit(SD_MUTEX(un));
23289 23289
23290 23290
23291 23291 done_without_assess:
23292 23292 sd_ssc_fini(ssc);
23293 23293
23294 23294 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err);
23295 23295 return (err);
23296 23296
23297 23297 done_with_assess:
23298 23298 mutex_enter(SD_MUTEX(un));
23299 23299 un->un_ncmds_in_driver--;
23300 23300 ASSERT(un->un_ncmds_in_driver >= 0);
23301 23301 mutex_exit(SD_MUTEX(un));
23302 23302
23303 23303 done_quick_assess:
23304 23304 if (err != 0)
23305 23305 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23306 23306 /* Uninitialize sd_ssc_t pointer */
23307 23307 sd_ssc_fini(ssc);
23308 23308
23309 23309 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err);
23310 23310 return (err);
23311 23311 }
23312 23312
23313 23313
23314 23314 /*
23315 23315 * Function: sd_dkio_ctrl_info
23316 23316 *
23317 23317 * Description: This routine is the driver entry point for handling controller
23318 23318 * information ioctl requests (DKIOCINFO).
23319 23319 *
23320 23320 * Arguments: dev - the device number
23321 23321 * arg - pointer to user provided dk_cinfo structure
23322 23322 * specifying the controller type and attributes.
23323 23323 * flag - this argument is a pass through to ddi_copyxxx()
23324 23324 * directly from the mode argument of ioctl().
23325 23325 *
23326 23326 * Return Code: 0
23327 23327 * EFAULT
23328 23328 * ENXIO
23329 23329 */
23330 23330
23331 23331 static int
23332 23332 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag)
23333 23333 {
23334 23334 struct sd_lun *un = NULL;
23335 23335 struct dk_cinfo *info;
23336 23336 dev_info_t *pdip;
23337 23337 int lun, tgt;
23338 23338
23339 23339 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
23340 23340 return (ENXIO);
23341 23341 }
23342 23342
23343 23343 info = (struct dk_cinfo *)
23344 23344 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP);
23345 23345
23346 23346 switch (un->un_ctype) {
23347 23347 case CTYPE_CDROM:
23348 23348 info->dki_ctype = DKC_CDROM;
23349 23349 break;
23350 23350 default:
23351 23351 info->dki_ctype = DKC_SCSI_CCS;
23352 23352 break;
23353 23353 }
23354 23354 pdip = ddi_get_parent(SD_DEVINFO(un));
23355 23355 info->dki_cnum = ddi_get_instance(pdip);
23356 23356 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) {
23357 23357 (void) strcpy(info->dki_cname, ddi_get_name(pdip));
23358 23358 } else {
23359 23359 (void) strncpy(info->dki_cname, ddi_node_name(pdip),
23360 23360 DK_DEVLEN - 1);
23361 23361 }
23362 23362
23363 23363 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
23364 23364 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0);
23365 23365 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
23366 23366 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0);
23367 23367
23368 23368 /* Unit Information */
23369 23369 info->dki_unit = ddi_get_instance(SD_DEVINFO(un));
23370 23370 info->dki_slave = ((tgt << 3) | lun);
23371 23371 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)),
23372 23372 DK_DEVLEN - 1);
23373 23373 info->dki_flags = DKI_FMTVOL;
23374 23374 info->dki_partition = SDPART(dev);
23375 23375
23376 23376 /* Max Transfer size of this device in blocks */
23377 23377 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize;
23378 23378 info->dki_addr = 0;
23379 23379 info->dki_space = 0;
23380 23380 info->dki_prio = 0;
23381 23381 info->dki_vec = 0;
23382 23382
23383 23383 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) {
23384 23384 kmem_free(info, sizeof (struct dk_cinfo));
23385 23385 return (EFAULT);
23386 23386 } else {
23387 23387 kmem_free(info, sizeof (struct dk_cinfo));
23388 23388 return (0);
23389 23389 }
23390 23390 }
23391 23391
23392 23392 /*
23393 23393 * Function: sd_get_media_info_com
23394 23394 *
23395 23395 * Description: This routine returns the information required to populate
23396 23396 * the fields for the dk_minfo/dk_minfo_ext structures.
23397 23397 *
23398 23398 * Arguments: dev - the device number
23399 23399 * dki_media_type - media_type
23400 23400 * dki_lbsize - logical block size
23401 23401 * dki_capacity - capacity in blocks
23402 23402 * dki_pbsize - physical block size (if requested)
23403 23403 *
23404 23404 * Return Code: 0
23405 23405 * EACCESS
23406 23406 * EFAULT
23407 23407 * ENXIO
23408 23408 * EIO
23409 23409 */
23410 23410 static int
23411 23411 sd_get_media_info_com(dev_t dev, uint_t *dki_media_type, uint_t *dki_lbsize,
23412 23412 diskaddr_t *dki_capacity, uint_t *dki_pbsize)
23413 23413 {
23414 23414 struct sd_lun *un = NULL;
23415 23415 struct uscsi_cmd com;
23416 23416 struct scsi_inquiry *sinq;
23417 23417 u_longlong_t media_capacity;
23418 23418 uint64_t capacity;
23419 23419 uint_t lbasize;
23420 23420 uint_t pbsize;
23421 23421 uchar_t *out_data;
23422 23422 uchar_t *rqbuf;
23423 23423 int rval = 0;
23424 23424 int rtn;
23425 23425 sd_ssc_t *ssc;
23426 23426
23427 23427 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
23428 23428 (un->un_state == SD_STATE_OFFLINE)) {
23429 23429 return (ENXIO);
23430 23430 }
23431 23431
23432 23432 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info_com: entry\n");
23433 23433
23434 23434 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP);
23435 23435 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
23436 23436 ssc = sd_ssc_init(un);
23437 23437
23438 23438 /* Issue a TUR to determine if the drive is ready with media present */
23439 23439 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA);
23440 23440 if (rval == ENXIO) {
23441 23441 goto done;
23442 23442 } else if (rval != 0) {
23443 23443 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23444 23444 }
23445 23445
23446 23446 /* Now get configuration data */
23447 23447 if (ISCD(un)) {
23448 23448 *dki_media_type = DK_CDROM;
23449 23449
23450 23450 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */
23451 23451 if (un->un_f_mmc_cap == TRUE) {
23452 23452 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf,
23453 23453 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN,
23454 23454 SD_PATH_STANDARD);
23455 23455
23456 23456 if (rtn) {
23457 23457 /*
23458 23458 * We ignore all failures for CD and need to
23459 23459 * put the assessment before processing code
23460 23460 * to avoid missing assessment for FMA.
23461 23461 */
23462 23462 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23463 23463 /*
23464 23464 * Failed for other than an illegal request
23465 23465 * or command not supported
23466 23466 */
23467 23467 if ((com.uscsi_status == STATUS_CHECK) &&
23468 23468 (com.uscsi_rqstatus == STATUS_GOOD)) {
23469 23469 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) ||
23470 23470 (rqbuf[12] != 0x20)) {
23471 23471 rval = EIO;
23472 23472 goto no_assessment;
23473 23473 }
23474 23474 }
23475 23475 } else {
23476 23476 /*
23477 23477 * The GET CONFIGURATION command succeeded
23478 23478 * so set the media type according to the
23479 23479 * returned data
23480 23480 */
23481 23481 *dki_media_type = out_data[6];
23482 23482 *dki_media_type <<= 8;
23483 23483 *dki_media_type |= out_data[7];
23484 23484 }
23485 23485 }
23486 23486 } else {
23487 23487 /*
23488 23488 * The profile list is not available, so we attempt to identify
23489 23489 * the media type based on the inquiry data
23490 23490 */
23491 23491 sinq = un->un_sd->sd_inq;
23492 23492 if ((sinq->inq_dtype == DTYPE_DIRECT) ||
23493 23493 (sinq->inq_dtype == DTYPE_OPTICAL)) {
23494 23494 /* This is a direct access device or optical disk */
23495 23495 *dki_media_type = DK_FIXED_DISK;
23496 23496
23497 23497 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) ||
23498 23498 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) {
23499 23499 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) {
23500 23500 *dki_media_type = DK_ZIP;
23501 23501 } else if (
23502 23502 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) {
23503 23503 *dki_media_type = DK_JAZ;
23504 23504 }
23505 23505 }
23506 23506 } else {
23507 23507 /*
23508 23508 * Not a CD, direct access or optical disk so return
23509 23509 * unknown media
23510 23510 */
23511 23511 *dki_media_type = DK_UNKNOWN;
23512 23512 }
23513 23513 }
23514 23514
23515 23515 /*
23516 23516 * Now read the capacity so we can provide the lbasize,
23517 23517 * pbsize and capacity.
23518 23518 */
23519 23519 if (dki_pbsize && un->un_f_descr_format_supported) {
23520 23520 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize,
23521 23521 &pbsize, SD_PATH_DIRECT);
23522 23522
23523 23523 /*
23524 23524 * Override the physical blocksize if the instance already
23525 23525 * has a larger value.
23526 23526 */
23527 23527 pbsize = MAX(pbsize, un->un_phy_blocksize);
23528 23528 }
23529 23529
23530 23530 if (dki_pbsize == NULL || rval != 0 ||
23531 23531 !un->un_f_descr_format_supported) {
23532 23532 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize,
23533 23533 SD_PATH_DIRECT);
23534 23534
23535 23535 switch (rval) {
23536 23536 case 0:
23537 23537 if (un->un_f_enable_rmw &&
23538 23538 un->un_phy_blocksize != 0) {
23539 23539 pbsize = un->un_phy_blocksize;
23540 23540 } else {
23541 23541 pbsize = lbasize;
23542 23542 }
23543 23543 media_capacity = capacity;
23544 23544
23545 23545 /*
23546 23546 * sd_send_scsi_READ_CAPACITY() reports capacity in
23547 23547 * un->un_sys_blocksize chunks. So we need to convert
23548 23548 * it into cap.lbsize chunks.
23549 23549 */
23550 23550 if (un->un_f_has_removable_media) {
23551 23551 media_capacity *= un->un_sys_blocksize;
23552 23552 media_capacity /= lbasize;
23553 23553 }
23554 23554 break;
23555 23555 case EACCES:
23556 23556 rval = EACCES;
23557 23557 goto done;
23558 23558 default:
23559 23559 rval = EIO;
23560 23560 goto done;
23561 23561 }
23562 23562 } else {
23563 23563 if (un->un_f_enable_rmw &&
23564 23564 !ISP2(pbsize % DEV_BSIZE)) {
23565 23565 pbsize = SSD_SECSIZE;
23566 23566 } else if (!ISP2(lbasize % DEV_BSIZE) ||
23567 23567 !ISP2(pbsize % DEV_BSIZE)) {
23568 23568 pbsize = lbasize = DEV_BSIZE;
23569 23569 }
23570 23570 media_capacity = capacity;
23571 23571 }
23572 23572
23573 23573 /*
23574 23574 * If lun is expanded dynamically, update the un structure.
23575 23575 */
23576 23576 mutex_enter(SD_MUTEX(un));
23577 23577 if ((un->un_f_blockcount_is_valid == TRUE) &&
23578 23578 (un->un_f_tgt_blocksize_is_valid == TRUE) &&
23579 23579 (capacity > un->un_blockcount)) {
23580 23580 un->un_f_expnevent = B_FALSE;
23581 23581 sd_update_block_info(un, lbasize, capacity);
23582 23582 }
23583 23583 mutex_exit(SD_MUTEX(un));
23584 23584
23585 23585 *dki_lbsize = lbasize;
23586 23586 *dki_capacity = media_capacity;
23587 23587 if (dki_pbsize)
23588 23588 *dki_pbsize = pbsize;
23589 23589
23590 23590 done:
23591 23591 if (rval != 0) {
23592 23592 if (rval == EIO)
23593 23593 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
23594 23594 else
23595 23595 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23596 23596 }
23597 23597 no_assessment:
23598 23598 sd_ssc_fini(ssc);
23599 23599 kmem_free(out_data, SD_PROFILE_HEADER_LEN);
23600 23600 kmem_free(rqbuf, SENSE_LENGTH);
23601 23601 return (rval);
23602 23602 }
23603 23603
23604 23604 /*
23605 23605 * Function: sd_get_media_info
23606 23606 *
23607 23607 * Description: This routine is the driver entry point for handling ioctl
23608 23608 * requests for the media type or command set profile used by the
23609 23609 * drive to operate on the media (DKIOCGMEDIAINFO).
23610 23610 *
23611 23611 * Arguments: dev - the device number
23612 23612 * arg - pointer to user provided dk_minfo structure
23613 23613 * specifying the media type, logical block size and
23614 23614 * drive capacity.
23615 23615 * flag - this argument is a pass through to ddi_copyxxx()
23616 23616 * directly from the mode argument of ioctl().
23617 23617 *
23618 23618 * Return Code: returns the value from sd_get_media_info_com
23619 23619 */
23620 23620 static int
23621 23621 sd_get_media_info(dev_t dev, caddr_t arg, int flag)
23622 23622 {
23623 23623 struct dk_minfo mi;
23624 23624 int rval;
23625 23625
23626 23626 rval = sd_get_media_info_com(dev, &mi.dki_media_type,
23627 23627 &mi.dki_lbsize, &mi.dki_capacity, NULL);
23628 23628
23629 23629 if (rval)
23630 23630 return (rval);
23631 23631 if (ddi_copyout(&mi, arg, sizeof (struct dk_minfo), flag))
23632 23632 rval = EFAULT;
23633 23633 return (rval);
23634 23634 }
23635 23635
23636 23636 /*
23637 23637 * Function: sd_get_media_info_ext
23638 23638 *
23639 23639 * Description: This routine is the driver entry point for handling ioctl
23640 23640 * requests for the media type or command set profile used by the
23641 23641 * drive to operate on the media (DKIOCGMEDIAINFOEXT). The
23642 23642 * difference this ioctl and DKIOCGMEDIAINFO is the return value
23643 23643 * of this ioctl contains both logical block size and physical
23644 23644 * block size.
23645 23645 *
23646 23646 *
23647 23647 * Arguments: dev - the device number
23648 23648 * arg - pointer to user provided dk_minfo_ext structure
23649 23649 * specifying the media type, logical block size,
23650 23650 * physical block size and disk capacity.
23651 23651 * flag - this argument is a pass through to ddi_copyxxx()
23652 23652 * directly from the mode argument of ioctl().
23653 23653 *
23654 23654 * Return Code: returns the value from sd_get_media_info_com
23655 23655 */
23656 23656 static int
23657 23657 sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag)
23658 23658 {
23659 23659 struct dk_minfo_ext mie;
23660 23660 int rval = 0;
23661 23661
23662 23662 rval = sd_get_media_info_com(dev, &mie.dki_media_type,
23663 23663 &mie.dki_lbsize, &mie.dki_capacity, &mie.dki_pbsize);
23664 23664
23665 23665 if (rval)
23666 23666 return (rval);
23667 23667 if (ddi_copyout(&mie, arg, sizeof (struct dk_minfo_ext), flag))
23668 23668 rval = EFAULT;
23669 23669 return (rval);
23670 23670
23671 23671 }
23672 23672
23673 23673 /*
23674 23674 * Function: sd_watch_request_submit
23675 23675 *
23676 23676 * Description: Call scsi_watch_request_submit or scsi_mmc_watch_request_submit
23677 23677 * depending on which is supported by device.
23678 23678 */
23679 23679 static opaque_t
23680 23680 sd_watch_request_submit(struct sd_lun *un)
23681 23681 {
23682 23682 dev_t dev;
23683 23683
23684 23684 /* All submissions are unified to use same device number */
23685 23685 dev = sd_make_device(SD_DEVINFO(un));
23686 23686
23687 23687 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) {
23688 23688 return (scsi_mmc_watch_request_submit(SD_SCSI_DEVP(un),
23689 23689 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb,
23690 23690 (caddr_t)dev));
23691 23691 } else {
23692 23692 return (scsi_watch_request_submit(SD_SCSI_DEVP(un),
23693 23693 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb,
23694 23694 (caddr_t)dev));
23695 23695 }
23696 23696 }
23697 23697
23698 23698
23699 23699 /*
23700 23700 * Function: sd_check_media
23701 23701 *
23702 23702 * Description: This utility routine implements the functionality for the
23703 23703 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the
23704 23704 * driver state changes from that specified by the user
23705 23705 * (inserted or ejected). For example, if the user specifies
23706 23706 * DKIO_EJECTED and the current media state is inserted this
23707 23707 * routine will immediately return DKIO_INSERTED. However, if the
23708 23708 * current media state is not inserted the user thread will be
23709 23709 * blocked until the drive state changes. If DKIO_NONE is specified
23710 23710 * the user thread will block until a drive state change occurs.
23711 23711 *
23712 23712 * Arguments: dev - the device number
23713 23713 * state - user pointer to a dkio_state, updated with the current
23714 23714 * drive state at return.
23715 23715 *
23716 23716 * Return Code: ENXIO
23717 23717 * EIO
23718 23718 * EAGAIN
23719 23719 * EINTR
23720 23720 */
23721 23721
23722 23722 static int
23723 23723 sd_check_media(dev_t dev, enum dkio_state state)
23724 23724 {
23725 23725 struct sd_lun *un = NULL;
23726 23726 enum dkio_state prev_state;
23727 23727 opaque_t token = NULL;
23728 23728 int rval = 0;
23729 23729 sd_ssc_t *ssc;
23730 23730
23731 23731 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
23732 23732 return (ENXIO);
23733 23733 }
23734 23734
23735 23735 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n");
23736 23736
23737 23737 ssc = sd_ssc_init(un);
23738 23738
23739 23739 mutex_enter(SD_MUTEX(un));
23740 23740
23741 23741 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: "
23742 23742 "state=%x, mediastate=%x\n", state, un->un_mediastate);
23743 23743
23744 23744 prev_state = un->un_mediastate;
23745 23745
23746 23746 /* is there anything to do? */
23747 23747 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) {
23748 23748 /*
23749 23749 * submit the request to the scsi_watch service;
23750 23750 * scsi_media_watch_cb() does the real work
23751 23751 */
23752 23752 mutex_exit(SD_MUTEX(un));
23753 23753
23754 23754 /*
23755 23755 * This change handles the case where a scsi watch request is
23756 23756 * added to a device that is powered down. To accomplish this
23757 23757 * we power up the device before adding the scsi watch request,
23758 23758 * since the scsi watch sends a TUR directly to the device
23759 23759 * which the device cannot handle if it is powered down.
23760 23760 */
23761 23761 if (sd_pm_entry(un) != DDI_SUCCESS) {
23762 23762 mutex_enter(SD_MUTEX(un));
23763 23763 goto done;
23764 23764 }
23765 23765
23766 23766 token = sd_watch_request_submit(un);
23767 23767
23768 23768 sd_pm_exit(un);
23769 23769
23770 23770 mutex_enter(SD_MUTEX(un));
23771 23771 if (token == NULL) {
23772 23772 rval = EAGAIN;
23773 23773 goto done;
23774 23774 }
23775 23775
23776 23776 /*
23777 23777 * This is a special case IOCTL that doesn't return
23778 23778 * until the media state changes. Routine sdpower
23779 23779 * knows about and handles this so don't count it
23780 23780 * as an active cmd in the driver, which would
23781 23781 * keep the device busy to the pm framework.
23782 23782 * If the count isn't decremented the device can't
23783 23783 * be powered down.
23784 23784 */
23785 23785 un->un_ncmds_in_driver--;
23786 23786 ASSERT(un->un_ncmds_in_driver >= 0);
23787 23787
23788 23788 /*
23789 23789 * if a prior request had been made, this will be the same
23790 23790 * token, as scsi_watch was designed that way.
23791 23791 */
23792 23792 un->un_swr_token = token;
23793 23793 un->un_specified_mediastate = state;
23794 23794
23795 23795 /*
23796 23796 * now wait for media change
23797 23797 * we will not be signalled unless mediastate == state but it is
23798 23798 * still better to test for this condition, since there is a
23799 23799 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED
23800 23800 */
23801 23801 SD_TRACE(SD_LOG_COMMON, un,
23802 23802 "sd_check_media: waiting for media state change\n");
23803 23803 while (un->un_mediastate == state) {
23804 23804 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) {
23805 23805 SD_TRACE(SD_LOG_COMMON, un,
23806 23806 "sd_check_media: waiting for media state "
23807 23807 "was interrupted\n");
23808 23808 un->un_ncmds_in_driver++;
23809 23809 rval = EINTR;
23810 23810 goto done;
23811 23811 }
23812 23812 SD_TRACE(SD_LOG_COMMON, un,
23813 23813 "sd_check_media: received signal, state=%x\n",
23814 23814 un->un_mediastate);
23815 23815 }
23816 23816 /*
23817 23817 * Inc the counter to indicate the device once again
23818 23818 * has an active outstanding cmd.
23819 23819 */
23820 23820 un->un_ncmds_in_driver++;
23821 23821 }
23822 23822
23823 23823 /* invalidate geometry */
23824 23824 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) {
23825 23825 sr_ejected(un);
23826 23826 }
23827 23827
23828 23828 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) {
23829 23829 uint64_t capacity;
23830 23830 uint_t lbasize;
23831 23831
23832 23832 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n");
23833 23833 mutex_exit(SD_MUTEX(un));
23834 23834 /*
23835 23835 * Since the following routines use SD_PATH_DIRECT, we must
23836 23836 * call PM directly before the upcoming disk accesses. This
23837 23837 * may cause the disk to be power/spin up.
23838 23838 */
23839 23839
23840 23840 if (sd_pm_entry(un) == DDI_SUCCESS) {
23841 23841 rval = sd_send_scsi_READ_CAPACITY(ssc,
23842 23842 &capacity, &lbasize, SD_PATH_DIRECT);
23843 23843 if (rval != 0) {
23844 23844 sd_pm_exit(un);
23845 23845 if (rval == EIO)
23846 23846 sd_ssc_assessment(ssc,
23847 23847 SD_FMT_STATUS_CHECK);
23848 23848 else
23849 23849 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23850 23850 mutex_enter(SD_MUTEX(un));
23851 23851 goto done;
23852 23852 }
23853 23853 } else {
23854 23854 rval = EIO;
23855 23855 mutex_enter(SD_MUTEX(un));
23856 23856 goto done;
23857 23857 }
23858 23858 mutex_enter(SD_MUTEX(un));
23859 23859
23860 23860 sd_update_block_info(un, lbasize, capacity);
23861 23861
23862 23862 /*
23863 23863 * Check if the media in the device is writable or not
23864 23864 */
23865 23865 if (ISCD(un)) {
23866 23866 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT);
23867 23867 }
23868 23868
23869 23869 mutex_exit(SD_MUTEX(un));
23870 23870 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT);
23871 23871 if ((cmlb_validate(un->un_cmlbhandle, 0,
23872 23872 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) {
23873 23873 sd_set_pstats(un);
23874 23874 SD_TRACE(SD_LOG_IO_PARTITION, un,
23875 23875 "sd_check_media: un:0x%p pstats created and "
23876 23876 "set\n", un);
23877 23877 }
23878 23878
23879 23879 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
23880 23880 SD_PATH_DIRECT);
23881 23881
23882 23882 sd_pm_exit(un);
23883 23883
23884 23884 if (rval != 0) {
23885 23885 if (rval == EIO)
23886 23886 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
23887 23887 else
23888 23888 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23889 23889 }
23890 23890
23891 23891 mutex_enter(SD_MUTEX(un));
23892 23892 }
23893 23893 done:
23894 23894 sd_ssc_fini(ssc);
23895 23895 un->un_f_watcht_stopped = FALSE;
23896 23896 if (token != NULL && un->un_swr_token != NULL) {
23897 23897 /*
23898 23898 * Use of this local token and the mutex ensures that we avoid
23899 23899 * some race conditions associated with terminating the
23900 23900 * scsi watch.
23901 23901 */
23902 23902 token = un->un_swr_token;
23903 23903 mutex_exit(SD_MUTEX(un));
23904 23904 (void) scsi_watch_request_terminate(token,
23905 23905 SCSI_WATCH_TERMINATE_WAIT);
23906 23906 if (scsi_watch_get_ref_count(token) == 0) {
23907 23907 mutex_enter(SD_MUTEX(un));
23908 23908 un->un_swr_token = (opaque_t)NULL;
23909 23909 } else {
23910 23910 mutex_enter(SD_MUTEX(un));
23911 23911 }
23912 23912 }
23913 23913
23914 23914 /*
23915 23915 * Update the capacity kstat value, if no media previously
23916 23916 * (capacity kstat is 0) and a media has been inserted
23917 23917 * (un_f_blockcount_is_valid == TRUE)
23918 23918 */
23919 23919 if (un->un_errstats) {
23920 23920 struct sd_errstats *stp = NULL;
23921 23921
23922 23922 stp = (struct sd_errstats *)un->un_errstats->ks_data;
23923 23923 if ((stp->sd_capacity.value.ui64 == 0) &&
23924 23924 (un->un_f_blockcount_is_valid == TRUE)) {
23925 23925 stp->sd_capacity.value.ui64 =
23926 23926 (uint64_t)((uint64_t)un->un_blockcount *
23927 23927 un->un_sys_blocksize);
23928 23928 }
23929 23929 }
23930 23930 mutex_exit(SD_MUTEX(un));
23931 23931 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n");
23932 23932 return (rval);
23933 23933 }
23934 23934
23935 23935
23936 23936 /*
23937 23937 * Function: sd_delayed_cv_broadcast
23938 23938 *
23939 23939 * Description: Delayed cv_broadcast to allow for target to recover from media
23940 23940 * insertion.
23941 23941 *
23942 23942 * Arguments: arg - driver soft state (unit) structure
23943 23943 */
23944 23944
23945 23945 static void
23946 23946 sd_delayed_cv_broadcast(void *arg)
23947 23947 {
23948 23948 struct sd_lun *un = arg;
23949 23949
23950 23950 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n");
23951 23951
23952 23952 mutex_enter(SD_MUTEX(un));
23953 23953 un->un_dcvb_timeid = NULL;
23954 23954 cv_broadcast(&un->un_state_cv);
23955 23955 mutex_exit(SD_MUTEX(un));
23956 23956 }
23957 23957
23958 23958
23959 23959 /*
23960 23960 * Function: sd_media_watch_cb
23961 23961 *
23962 23962 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This
23963 23963 * routine processes the TUR sense data and updates the driver
23964 23964 * state if a transition has occurred. The user thread
23965 23965 * (sd_check_media) is then signalled.
23966 23966 *
23967 23967 * Arguments: arg - the device 'dev_t' is used for context to discriminate
23968 23968 * among multiple watches that share this callback function
23969 23969 * resultp - scsi watch facility result packet containing scsi
23970 23970 * packet, status byte and sense data
23971 23971 *
23972 23972 * Return Code: 0 for success, -1 for failure
23973 23973 */
23974 23974
23975 23975 static int
23976 23976 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
23977 23977 {
23978 23978 struct sd_lun *un;
23979 23979 struct scsi_status *statusp = resultp->statusp;
23980 23980 uint8_t *sensep = (uint8_t *)resultp->sensep;
23981 23981 enum dkio_state state = DKIO_NONE;
23982 23982 dev_t dev = (dev_t)arg;
23983 23983 uchar_t actual_sense_length;
23984 23984 uint8_t skey, asc, ascq;
23985 23985
23986 23986 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
23987 23987 return (-1);
23988 23988 }
23989 23989 actual_sense_length = resultp->actual_sense_length;
23990 23990
23991 23991 mutex_enter(SD_MUTEX(un));
23992 23992 SD_TRACE(SD_LOG_COMMON, un,
23993 23993 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n",
23994 23994 *((char *)statusp), (void *)sensep, actual_sense_length);
23995 23995
23996 23996 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) {
23997 23997 un->un_mediastate = DKIO_DEV_GONE;
23998 23998 cv_broadcast(&un->un_state_cv);
23999 23999 mutex_exit(SD_MUTEX(un));
24000 24000
24001 24001 return (0);
24002 24002 }
24003 24003
24004 24004 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) {
24005 24005 if (sd_gesn_media_data_valid(resultp->mmc_data)) {
24006 24006 if ((resultp->mmc_data[5] &
24007 24007 SD_GESN_MEDIA_EVENT_STATUS_PRESENT) != 0) {
24008 24008 state = DKIO_INSERTED;
24009 24009 } else {
24010 24010 state = DKIO_EJECTED;
24011 24011 }
24012 24012 if ((resultp->mmc_data[4] & SD_GESN_MEDIA_EVENT_CODE) ==
24013 24013 SD_GESN_MEDIA_EVENT_EJECTREQUEST) {
24014 24014 sd_log_eject_request_event(un, KM_NOSLEEP);
24015 24015 }
24016 24016 }
24017 24017 } else if (sensep != NULL) {
24018 24018 /*
24019 24019 * If there was a check condition then sensep points to valid
24020 24020 * sense data. If status was not a check condition but a
24021 24021 * reservation or busy status then the new state is DKIO_NONE.
24022 24022 */
24023 24023 skey = scsi_sense_key(sensep);
24024 24024 asc = scsi_sense_asc(sensep);
24025 24025 ascq = scsi_sense_ascq(sensep);
24026 24026
24027 24027 SD_INFO(SD_LOG_COMMON, un,
24028 24028 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n",
24029 24029 skey, asc, ascq);
24030 24030 /* This routine only uses up to 13 bytes of sense data. */
24031 24031 if (actual_sense_length >= 13) {
24032 24032 if (skey == KEY_UNIT_ATTENTION) {
24033 24033 if (asc == 0x28) {
24034 24034 state = DKIO_INSERTED;
24035 24035 }
24036 24036 } else if (skey == KEY_NOT_READY) {
24037 24037 /*
24038 24038 * Sense data of 02/06/00 means that the
24039 24039 * drive could not read the media (No
24040 24040 * reference position found). In this case
24041 24041 * to prevent a hang on the DKIOCSTATE IOCTL
24042 24042 * we set the media state to DKIO_INSERTED.
24043 24043 */
24044 24044 if (asc == 0x06 && ascq == 0x00)
24045 24045 state = DKIO_INSERTED;
24046 24046
24047 24047 /*
24048 24048 * if 02/04/02 means that the host
24049 24049 * should send start command. Explicitly
24050 24050 * leave the media state as is
24051 24051 * (inserted) as the media is inserted
24052 24052 * and host has stopped device for PM
24053 24053 * reasons. Upon next true read/write
24054 24054 * to this media will bring the
24055 24055 * device to the right state good for
24056 24056 * media access.
24057 24057 */
24058 24058 if (asc == 0x3a) {
24059 24059 state = DKIO_EJECTED;
24060 24060 } else {
24061 24061 /*
24062 24062 * If the drive is busy with an
24063 24063 * operation or long write, keep the
24064 24064 * media in an inserted state.
24065 24065 */
24066 24066
24067 24067 if ((asc == 0x04) &&
24068 24068 ((ascq == 0x02) ||
24069 24069 (ascq == 0x07) ||
24070 24070 (ascq == 0x08))) {
24071 24071 state = DKIO_INSERTED;
24072 24072 }
24073 24073 }
24074 24074 } else if (skey == KEY_NO_SENSE) {
24075 24075 if ((asc == 0x00) && (ascq == 0x00)) {
24076 24076 /*
24077 24077 * Sense Data 00/00/00 does not provide
24078 24078 * any information about the state of
24079 24079 * the media. Ignore it.
24080 24080 */
24081 24081 mutex_exit(SD_MUTEX(un));
24082 24082 return (0);
24083 24083 }
24084 24084 }
24085 24085 }
24086 24086 } else if ((*((char *)statusp) == STATUS_GOOD) &&
24087 24087 (resultp->pkt->pkt_reason == CMD_CMPLT)) {
24088 24088 state = DKIO_INSERTED;
24089 24089 }
24090 24090
24091 24091 SD_TRACE(SD_LOG_COMMON, un,
24092 24092 "sd_media_watch_cb: state=%x, specified=%x\n",
24093 24093 state, un->un_specified_mediastate);
24094 24094
24095 24095 /*
24096 24096 * now signal the waiting thread if this is *not* the specified state;
24097 24097 * delay the signal if the state is DKIO_INSERTED to allow the target
24098 24098 * to recover
24099 24099 */
24100 24100 if (state != un->un_specified_mediastate) {
24101 24101 un->un_mediastate = state;
24102 24102 if (state == DKIO_INSERTED) {
24103 24103 /*
24104 24104 * delay the signal to give the drive a chance
24105 24105 * to do what it apparently needs to do
24106 24106 */
24107 24107 SD_TRACE(SD_LOG_COMMON, un,
24108 24108 "sd_media_watch_cb: delayed cv_broadcast\n");
24109 24109 if (un->un_dcvb_timeid == NULL) {
24110 24110 un->un_dcvb_timeid =
24111 24111 timeout(sd_delayed_cv_broadcast, un,
24112 24112 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY));
24113 24113 }
24114 24114 } else {
24115 24115 SD_TRACE(SD_LOG_COMMON, un,
24116 24116 "sd_media_watch_cb: immediate cv_broadcast\n");
24117 24117 cv_broadcast(&un->un_state_cv);
24118 24118 }
24119 24119 }
24120 24120 mutex_exit(SD_MUTEX(un));
24121 24121 return (0);
24122 24122 }
24123 24123
24124 24124
24125 24125 /*
24126 24126 * Function: sd_dkio_get_temp
24127 24127 *
24128 24128 * Description: This routine is the driver entry point for handling ioctl
24129 24129 * requests to get the disk temperature.
24130 24130 *
24131 24131 * Arguments: dev - the device number
24132 24132 * arg - pointer to user provided dk_temperature structure.
24133 24133 * flag - this argument is a pass through to ddi_copyxxx()
24134 24134 * directly from the mode argument of ioctl().
24135 24135 *
24136 24136 * Return Code: 0
24137 24137 * EFAULT
24138 24138 * ENXIO
24139 24139 * EAGAIN
24140 24140 */
24141 24141
24142 24142 static int
24143 24143 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag)
24144 24144 {
24145 24145 struct sd_lun *un = NULL;
24146 24146 struct dk_temperature *dktemp = NULL;
24147 24147 uchar_t *temperature_page;
24148 24148 int rval = 0;
24149 24149 int path_flag = SD_PATH_STANDARD;
24150 24150 sd_ssc_t *ssc;
24151 24151
24152 24152 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24153 24153 return (ENXIO);
24154 24154 }
24155 24155
24156 24156 ssc = sd_ssc_init(un);
24157 24157 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP);
24158 24158
24159 24159 /* copyin the disk temp argument to get the user flags */
24160 24160 if (ddi_copyin((void *)arg, dktemp,
24161 24161 sizeof (struct dk_temperature), flag) != 0) {
24162 24162 rval = EFAULT;
24163 24163 goto done;
24164 24164 }
24165 24165
24166 24166 /* Initialize the temperature to invalid. */
24167 24167 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP;
24168 24168 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP;
24169 24169
24170 24170 /*
24171 24171 * Note: Investigate removing the "bypass pm" semantic.
24172 24172 * Can we just bypass PM always?
24173 24173 */
24174 24174 if (dktemp->dkt_flags & DKT_BYPASS_PM) {
24175 24175 path_flag = SD_PATH_DIRECT;
24176 24176 ASSERT(!mutex_owned(&un->un_pm_mutex));
24177 24177 mutex_enter(&un->un_pm_mutex);
24178 24178 if (SD_DEVICE_IS_IN_LOW_POWER(un)) {
24179 24179 /*
24180 24180 * If DKT_BYPASS_PM is set, and the drive happens to be
24181 24181 * in low power mode, we can not wake it up, Need to
24182 24182 * return EAGAIN.
24183 24183 */
24184 24184 mutex_exit(&un->un_pm_mutex);
24185 24185 rval = EAGAIN;
24186 24186 goto done;
24187 24187 } else {
24188 24188 /*
24189 24189 * Indicate to PM the device is busy. This is required
24190 24190 * to avoid a race - i.e. the ioctl is issuing a
24191 24191 * command and the pm framework brings down the device
24192 24192 * to low power mode (possible power cut-off on some
24193 24193 * platforms).
24194 24194 */
24195 24195 mutex_exit(&un->un_pm_mutex);
24196 24196 if (sd_pm_entry(un) != DDI_SUCCESS) {
24197 24197 rval = EAGAIN;
24198 24198 goto done;
24199 24199 }
24200 24200 }
24201 24201 }
24202 24202
24203 24203 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP);
24204 24204
24205 24205 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page,
24206 24206 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag);
24207 24207 if (rval != 0)
24208 24208 goto done2;
24209 24209
24210 24210 /*
24211 24211 * For the current temperature verify that the parameter length is 0x02
24212 24212 * and the parameter code is 0x00
24213 24213 */
24214 24214 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) &&
24215 24215 (temperature_page[5] == 0x00)) {
24216 24216 if (temperature_page[9] == 0xFF) {
24217 24217 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP;
24218 24218 } else {
24219 24219 dktemp->dkt_cur_temp = (short)(temperature_page[9]);
24220 24220 }
24221 24221 }
24222 24222
24223 24223 /*
24224 24224 * For the reference temperature verify that the parameter
24225 24225 * length is 0x02 and the parameter code is 0x01
24226 24226 */
24227 24227 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) &&
24228 24228 (temperature_page[11] == 0x01)) {
24229 24229 if (temperature_page[15] == 0xFF) {
24230 24230 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP;
24231 24231 } else {
24232 24232 dktemp->dkt_ref_temp = (short)(temperature_page[15]);
24233 24233 }
24234 24234 }
24235 24235
24236 24236 /* Do the copyout regardless of the temperature commands status. */
24237 24237 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature),
24238 24238 flag) != 0) {
24239 24239 rval = EFAULT;
24240 24240 goto done1;
24241 24241 }
24242 24242
24243 24243 done2:
24244 24244 if (rval != 0) {
24245 24245 if (rval == EIO)
24246 24246 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
24247 24247 else
24248 24248 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
24249 24249 }
24250 24250 done1:
24251 24251 if (path_flag == SD_PATH_DIRECT) {
24252 24252 sd_pm_exit(un);
24253 24253 }
24254 24254
24255 24255 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE);
24256 24256 done:
24257 24257 sd_ssc_fini(ssc);
24258 24258 if (dktemp != NULL) {
24259 24259 kmem_free(dktemp, sizeof (struct dk_temperature));
24260 24260 }
24261 24261
24262 24262 return (rval);
24263 24263 }
24264 24264
24265 24265
24266 24266 /*
24267 24267 * Function: sd_log_page_supported
24268 24268 *
24269 24269 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of
24270 24270 * supported log pages.
24271 24271 *
24272 24272 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
24273 24273 * structure for this target.
24274 24274 * log_page -
24275 24275 *
24276 24276 * Return Code: -1 - on error (log sense is optional and may not be supported).
24277 24277 * 0 - log page not found.
24278 24278 * 1 - log page found.
24279 24279 */
24280 24280
24281 24281 static int
24282 24282 sd_log_page_supported(sd_ssc_t *ssc, int log_page)
24283 24283 {
24284 24284 uchar_t *log_page_data;
24285 24285 int i;
24286 24286 int match = 0;
24287 24287 int log_size;
24288 24288 int status = 0;
24289 24289 struct sd_lun *un;
24290 24290
24291 24291 ASSERT(ssc != NULL);
24292 24292 un = ssc->ssc_un;
24293 24293 ASSERT(un != NULL);
24294 24294
24295 24295 log_page_data = kmem_zalloc(0xFF, KM_SLEEP);
24296 24296
24297 24297 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0,
24298 24298 SD_PATH_DIRECT);
24299 24299
24300 24300 if (status != 0) {
24301 24301 if (status == EIO) {
24302 24302 /*
24303 24303 * Some disks do not support log sense, we
24304 24304 * should ignore this kind of error(sense key is
24305 24305 * 0x5 - illegal request).
24306 24306 */
24307 24307 uint8_t *sensep;
24308 24308 int senlen;
24309 24309
24310 24310 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
24311 24311 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
24312 24312 ssc->ssc_uscsi_cmd->uscsi_rqresid);
24313 24313
24314 24314 if (senlen > 0 &&
24315 24315 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) {
24316 24316 sd_ssc_assessment(ssc,
24317 24317 SD_FMT_IGNORE_COMPROMISE);
24318 24318 } else {
24319 24319 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
24320 24320 }
24321 24321 } else {
24322 24322 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
24323 24323 }
24324 24324
24325 24325 SD_ERROR(SD_LOG_COMMON, un,
24326 24326 "sd_log_page_supported: failed log page retrieval\n");
24327 24327 kmem_free(log_page_data, 0xFF);
24328 24328 return (-1);
24329 24329 }
24330 24330
24331 24331 log_size = log_page_data[3];
24332 24332
24333 24333 /*
24334 24334 * The list of supported log pages start from the fourth byte. Check
24335 24335 * until we run out of log pages or a match is found.
24336 24336 */
24337 24337 for (i = 4; (i < (log_size + 4)) && !match; i++) {
24338 24338 if (log_page_data[i] == log_page) {
24339 24339 match++;
24340 24340 }
24341 24341 }
24342 24342 kmem_free(log_page_data, 0xFF);
24343 24343 return (match);
24344 24344 }
24345 24345
24346 24346
24347 24347 /*
24348 24348 * Function: sd_mhdioc_failfast
24349 24349 *
24350 24350 * Description: This routine is the driver entry point for handling ioctl
24351 24351 * requests to enable/disable the multihost failfast option.
24352 24352 * (MHIOCENFAILFAST)
24353 24353 *
24354 24354 * Arguments: dev - the device number
24355 24355 * arg - user specified probing interval.
24356 24356 * flag - this argument is a pass through to ddi_copyxxx()
24357 24357 * directly from the mode argument of ioctl().
24358 24358 *
24359 24359 * Return Code: 0
24360 24360 * EFAULT
24361 24361 * ENXIO
24362 24362 */
24363 24363
24364 24364 static int
24365 24365 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag)
24366 24366 {
24367 24367 struct sd_lun *un = NULL;
24368 24368 int mh_time;
24369 24369 int rval = 0;
24370 24370
24371 24371 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24372 24372 return (ENXIO);
24373 24373 }
24374 24374
24375 24375 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag))
24376 24376 return (EFAULT);
24377 24377
24378 24378 if (mh_time) {
24379 24379 mutex_enter(SD_MUTEX(un));
24380 24380 un->un_resvd_status |= SD_FAILFAST;
24381 24381 mutex_exit(SD_MUTEX(un));
24382 24382 /*
24383 24383 * If mh_time is INT_MAX, then this ioctl is being used for
24384 24384 * SCSI-3 PGR purposes, and we don't need to spawn watch thread.
24385 24385 */
24386 24386 if (mh_time != INT_MAX) {
24387 24387 rval = sd_check_mhd(dev, mh_time);
24388 24388 }
24389 24389 } else {
24390 24390 (void) sd_check_mhd(dev, 0);
24391 24391 mutex_enter(SD_MUTEX(un));
24392 24392 un->un_resvd_status &= ~SD_FAILFAST;
24393 24393 mutex_exit(SD_MUTEX(un));
24394 24394 }
24395 24395 return (rval);
24396 24396 }
24397 24397
24398 24398
24399 24399 /*
24400 24400 * Function: sd_mhdioc_takeown
24401 24401 *
24402 24402 * Description: This routine is the driver entry point for handling ioctl
24403 24403 * requests to forcefully acquire exclusive access rights to the
24404 24404 * multihost disk (MHIOCTKOWN).
24405 24405 *
24406 24406 * Arguments: dev - the device number
24407 24407 * arg - user provided structure specifying the delay
24408 24408 * parameters in milliseconds
24409 24409 * flag - this argument is a pass through to ddi_copyxxx()
24410 24410 * directly from the mode argument of ioctl().
24411 24411 *
24412 24412 * Return Code: 0
24413 24413 * EFAULT
24414 24414 * ENXIO
24415 24415 */
24416 24416
24417 24417 static int
24418 24418 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag)
24419 24419 {
24420 24420 struct sd_lun *un = NULL;
24421 24421 struct mhioctkown *tkown = NULL;
24422 24422 int rval = 0;
24423 24423
24424 24424 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24425 24425 return (ENXIO);
24426 24426 }
24427 24427
24428 24428 if (arg != NULL) {
24429 24429 tkown = (struct mhioctkown *)
24430 24430 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP);
24431 24431 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag);
24432 24432 if (rval != 0) {
24433 24433 rval = EFAULT;
24434 24434 goto error;
24435 24435 }
24436 24436 }
24437 24437
24438 24438 rval = sd_take_ownership(dev, tkown);
24439 24439 mutex_enter(SD_MUTEX(un));
24440 24440 if (rval == 0) {
24441 24441 un->un_resvd_status |= SD_RESERVE;
24442 24442 if (tkown != NULL && tkown->reinstate_resv_delay != 0) {
24443 24443 sd_reinstate_resv_delay =
24444 24444 tkown->reinstate_resv_delay * 1000;
24445 24445 } else {
24446 24446 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY;
24447 24447 }
24448 24448 /*
24449 24449 * Give the scsi_watch routine interval set by
24450 24450 * the MHIOCENFAILFAST ioctl precedence here.
24451 24451 */
24452 24452 if ((un->un_resvd_status & SD_FAILFAST) == 0) {
24453 24453 mutex_exit(SD_MUTEX(un));
24454 24454 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000);
24455 24455 SD_TRACE(SD_LOG_IOCTL_MHD, un,
24456 24456 "sd_mhdioc_takeown : %d\n",
24457 24457 sd_reinstate_resv_delay);
24458 24458 } else {
24459 24459 mutex_exit(SD_MUTEX(un));
24460 24460 }
24461 24461 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY,
24462 24462 sd_mhd_reset_notify_cb, (caddr_t)un);
24463 24463 } else {
24464 24464 un->un_resvd_status &= ~SD_RESERVE;
24465 24465 mutex_exit(SD_MUTEX(un));
24466 24466 }
24467 24467
24468 24468 error:
24469 24469 if (tkown != NULL) {
24470 24470 kmem_free(tkown, sizeof (struct mhioctkown));
24471 24471 }
24472 24472 return (rval);
24473 24473 }
24474 24474
24475 24475
24476 24476 /*
24477 24477 * Function: sd_mhdioc_release
24478 24478 *
24479 24479 * Description: This routine is the driver entry point for handling ioctl
24480 24480 * requests to release exclusive access rights to the multihost
24481 24481 * disk (MHIOCRELEASE).
24482 24482 *
24483 24483 * Arguments: dev - the device number
24484 24484 *
24485 24485 * Return Code: 0
24486 24486 * ENXIO
24487 24487 */
24488 24488
24489 24489 static int
24490 24490 sd_mhdioc_release(dev_t dev)
24491 24491 {
24492 24492 struct sd_lun *un = NULL;
24493 24493 timeout_id_t resvd_timeid_save;
24494 24494 int resvd_status_save;
24495 24495 int rval = 0;
24496 24496
24497 24497 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24498 24498 return (ENXIO);
24499 24499 }
24500 24500
24501 24501 mutex_enter(SD_MUTEX(un));
24502 24502 resvd_status_save = un->un_resvd_status;
24503 24503 un->un_resvd_status &=
24504 24504 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE);
24505 24505 if (un->un_resvd_timeid) {
24506 24506 resvd_timeid_save = un->un_resvd_timeid;
24507 24507 un->un_resvd_timeid = NULL;
24508 24508 mutex_exit(SD_MUTEX(un));
24509 24509 (void) untimeout(resvd_timeid_save);
24510 24510 } else {
24511 24511 mutex_exit(SD_MUTEX(un));
24512 24512 }
24513 24513
24514 24514 /*
24515 24515 * destroy any pending timeout thread that may be attempting to
24516 24516 * reinstate reservation on this device.
24517 24517 */
24518 24518 sd_rmv_resv_reclaim_req(dev);
24519 24519
24520 24520 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) {
24521 24521 mutex_enter(SD_MUTEX(un));
24522 24522 if ((un->un_mhd_token) &&
24523 24523 ((un->un_resvd_status & SD_FAILFAST) == 0)) {
24524 24524 mutex_exit(SD_MUTEX(un));
24525 24525 (void) sd_check_mhd(dev, 0);
24526 24526 } else {
24527 24527 mutex_exit(SD_MUTEX(un));
24528 24528 }
24529 24529 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL,
24530 24530 sd_mhd_reset_notify_cb, (caddr_t)un);
24531 24531 } else {
24532 24532 /*
24533 24533 * sd_mhd_watch_cb will restart the resvd recover timeout thread
24534 24534 */
24535 24535 mutex_enter(SD_MUTEX(un));
24536 24536 un->un_resvd_status = resvd_status_save;
24537 24537 mutex_exit(SD_MUTEX(un));
24538 24538 }
24539 24539 return (rval);
24540 24540 }
24541 24541
24542 24542
24543 24543 /*
24544 24544 * Function: sd_mhdioc_register_devid
24545 24545 *
24546 24546 * Description: This routine is the driver entry point for handling ioctl
24547 24547 * requests to register the device id (MHIOCREREGISTERDEVID).
24548 24548 *
24549 24549 * Note: The implementation for this ioctl has been updated to
24550 24550 * be consistent with the original PSARC case (1999/357)
24551 24551 * (4375899, 4241671, 4220005)
24552 24552 *
24553 24553 * Arguments: dev - the device number
24554 24554 *
24555 24555 * Return Code: 0
24556 24556 * ENXIO
24557 24557 */
24558 24558
24559 24559 static int
24560 24560 sd_mhdioc_register_devid(dev_t dev)
24561 24561 {
24562 24562 struct sd_lun *un = NULL;
24563 24563 int rval = 0;
24564 24564 sd_ssc_t *ssc;
24565 24565
24566 24566 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24567 24567 return (ENXIO);
24568 24568 }
24569 24569
24570 24570 ASSERT(!mutex_owned(SD_MUTEX(un)));
24571 24571
24572 24572 mutex_enter(SD_MUTEX(un));
24573 24573
24574 24574 /* If a devid already exists, de-register it */
24575 24575 if (un->un_devid != NULL) {
24576 24576 ddi_devid_unregister(SD_DEVINFO(un));
24577 24577 /*
24578 24578 * After unregister devid, needs to free devid memory
24579 24579 */
24580 24580 ddi_devid_free(un->un_devid);
24581 24581 un->un_devid = NULL;
24582 24582 }
24583 24583
24584 24584 /* Check for reservation conflict */
24585 24585 mutex_exit(SD_MUTEX(un));
24586 24586 ssc = sd_ssc_init(un);
24587 24587 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
24588 24588 mutex_enter(SD_MUTEX(un));
24589 24589
24590 24590 switch (rval) {
24591 24591 case 0:
24592 24592 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED);
24593 24593 break;
24594 24594 case EACCES:
24595 24595 break;
24596 24596 default:
24597 24597 rval = EIO;
24598 24598 }
24599 24599
24600 24600 mutex_exit(SD_MUTEX(un));
24601 24601 if (rval != 0) {
24602 24602 if (rval == EIO)
24603 24603 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
24604 24604 else
24605 24605 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
24606 24606 }
24607 24607 sd_ssc_fini(ssc);
24608 24608 return (rval);
24609 24609 }
24610 24610
24611 24611
24612 24612 /*
24613 24613 * Function: sd_mhdioc_inkeys
24614 24614 *
24615 24615 * Description: This routine is the driver entry point for handling ioctl
24616 24616 * requests to issue the SCSI-3 Persistent In Read Keys command
24617 24617 * to the device (MHIOCGRP_INKEYS).
24618 24618 *
24619 24619 * Arguments: dev - the device number
24620 24620 * arg - user provided in_keys structure
24621 24621 * flag - this argument is a pass through to ddi_copyxxx()
24622 24622 * directly from the mode argument of ioctl().
24623 24623 *
24624 24624 * Return Code: code returned by sd_persistent_reservation_in_read_keys()
24625 24625 * ENXIO
24626 24626 * EFAULT
24627 24627 */
24628 24628
24629 24629 static int
24630 24630 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag)
24631 24631 {
24632 24632 struct sd_lun *un;
24633 24633 mhioc_inkeys_t inkeys;
24634 24634 int rval = 0;
24635 24635
24636 24636 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24637 24637 return (ENXIO);
24638 24638 }
24639 24639
24640 24640 #ifdef _MULTI_DATAMODEL
24641 24641 switch (ddi_model_convert_from(flag & FMODELS)) {
24642 24642 case DDI_MODEL_ILP32: {
24643 24643 struct mhioc_inkeys32 inkeys32;
24644 24644
24645 24645 if (ddi_copyin(arg, &inkeys32,
24646 24646 sizeof (struct mhioc_inkeys32), flag) != 0) {
24647 24647 return (EFAULT);
24648 24648 }
24649 24649 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li;
24650 24650 if ((rval = sd_persistent_reservation_in_read_keys(un,
24651 24651 &inkeys, flag)) != 0) {
24652 24652 return (rval);
24653 24653 }
24654 24654 inkeys32.generation = inkeys.generation;
24655 24655 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32),
24656 24656 flag) != 0) {
24657 24657 return (EFAULT);
24658 24658 }
24659 24659 break;
24660 24660 }
24661 24661 case DDI_MODEL_NONE:
24662 24662 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t),
24663 24663 flag) != 0) {
24664 24664 return (EFAULT);
24665 24665 }
24666 24666 if ((rval = sd_persistent_reservation_in_read_keys(un,
24667 24667 &inkeys, flag)) != 0) {
24668 24668 return (rval);
24669 24669 }
24670 24670 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t),
24671 24671 flag) != 0) {
24672 24672 return (EFAULT);
24673 24673 }
24674 24674 break;
24675 24675 }
24676 24676
24677 24677 #else /* ! _MULTI_DATAMODEL */
24678 24678
24679 24679 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) {
24680 24680 return (EFAULT);
24681 24681 }
24682 24682 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag);
24683 24683 if (rval != 0) {
24684 24684 return (rval);
24685 24685 }
24686 24686 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) {
24687 24687 return (EFAULT);
24688 24688 }
24689 24689
24690 24690 #endif /* _MULTI_DATAMODEL */
24691 24691
24692 24692 return (rval);
24693 24693 }
24694 24694
24695 24695
24696 24696 /*
24697 24697 * Function: sd_mhdioc_inresv
24698 24698 *
24699 24699 * Description: This routine is the driver entry point for handling ioctl
24700 24700 * requests to issue the SCSI-3 Persistent In Read Reservations
24701 24701 * command to the device (MHIOCGRP_INKEYS).
24702 24702 *
24703 24703 * Arguments: dev - the device number
24704 24704 * arg - user provided in_resv structure
24705 24705 * flag - this argument is a pass through to ddi_copyxxx()
24706 24706 * directly from the mode argument of ioctl().
24707 24707 *
24708 24708 * Return Code: code returned by sd_persistent_reservation_in_read_resv()
24709 24709 * ENXIO
24710 24710 * EFAULT
24711 24711 */
24712 24712
24713 24713 static int
24714 24714 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag)
24715 24715 {
24716 24716 struct sd_lun *un;
24717 24717 mhioc_inresvs_t inresvs;
24718 24718 int rval = 0;
24719 24719
24720 24720 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24721 24721 return (ENXIO);
24722 24722 }
24723 24723
24724 24724 #ifdef _MULTI_DATAMODEL
24725 24725
24726 24726 switch (ddi_model_convert_from(flag & FMODELS)) {
24727 24727 case DDI_MODEL_ILP32: {
24728 24728 struct mhioc_inresvs32 inresvs32;
24729 24729
24730 24730 if (ddi_copyin(arg, &inresvs32,
24731 24731 sizeof (struct mhioc_inresvs32), flag) != 0) {
24732 24732 return (EFAULT);
24733 24733 }
24734 24734 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li;
24735 24735 if ((rval = sd_persistent_reservation_in_read_resv(un,
24736 24736 &inresvs, flag)) != 0) {
24737 24737 return (rval);
24738 24738 }
24739 24739 inresvs32.generation = inresvs.generation;
24740 24740 if (ddi_copyout(&inresvs32, arg,
24741 24741 sizeof (struct mhioc_inresvs32), flag) != 0) {
24742 24742 return (EFAULT);
24743 24743 }
24744 24744 break;
24745 24745 }
24746 24746 case DDI_MODEL_NONE:
24747 24747 if (ddi_copyin(arg, &inresvs,
24748 24748 sizeof (mhioc_inresvs_t), flag) != 0) {
24749 24749 return (EFAULT);
24750 24750 }
24751 24751 if ((rval = sd_persistent_reservation_in_read_resv(un,
24752 24752 &inresvs, flag)) != 0) {
24753 24753 return (rval);
24754 24754 }
24755 24755 if (ddi_copyout(&inresvs, arg,
24756 24756 sizeof (mhioc_inresvs_t), flag) != 0) {
24757 24757 return (EFAULT);
24758 24758 }
24759 24759 break;
24760 24760 }
24761 24761
24762 24762 #else /* ! _MULTI_DATAMODEL */
24763 24763
24764 24764 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) {
24765 24765 return (EFAULT);
24766 24766 }
24767 24767 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag);
24768 24768 if (rval != 0) {
24769 24769 return (rval);
24770 24770 }
24771 24771 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) {
24772 24772 return (EFAULT);
24773 24773 }
24774 24774
24775 24775 #endif /* ! _MULTI_DATAMODEL */
24776 24776
24777 24777 return (rval);
24778 24778 }
24779 24779
24780 24780
24781 24781 /*
24782 24782 * The following routines support the clustering functionality described below
24783 24783 * and implement lost reservation reclaim functionality.
24784 24784 *
24785 24785 * Clustering
24786 24786 * ----------
24787 24787 * The clustering code uses two different, independent forms of SCSI
24788 24788 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3
24789 24789 * Persistent Group Reservations. For any particular disk, it will use either
24790 24790 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk.
24791 24791 *
24792 24792 * SCSI-2
24793 24793 * The cluster software takes ownership of a multi-hosted disk by issuing the
24794 24794 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the
24795 24795 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a
24796 24796 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl
24797 24797 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the
24798 24798 * driver. The meaning of failfast is that if the driver (on this host) ever
24799 24799 * encounters the scsi error return code RESERVATION_CONFLICT from the device,
24800 24800 * it should immediately panic the host. The motivation for this ioctl is that
24801 24801 * if this host does encounter reservation conflict, the underlying cause is
24802 24802 * that some other host of the cluster has decided that this host is no longer
24803 24803 * in the cluster and has seized control of the disks for itself. Since this
24804 24804 * host is no longer in the cluster, it ought to panic itself. The
24805 24805 * MHIOCENFAILFAST ioctl does two things:
24806 24806 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT
24807 24807 * error to panic the host
24808 24808 * (b) it sets up a periodic timer to test whether this host still has
24809 24809 * "access" (in that no other host has reserved the device): if the
24810 24810 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The
24811 24811 * purpose of that periodic timer is to handle scenarios where the host is
24812 24812 * otherwise temporarily quiescent, temporarily doing no real i/o.
24813 24813 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host,
24814 24814 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for
24815 24815 * the device itself.
24816 24816 *
24817 24817 * SCSI-3 PGR
24818 24818 * A direct semantic implementation of the SCSI-3 Persistent Reservation
24819 24819 * facility is supported through the shared multihost disk ioctls
24820 24820 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE,
24821 24821 * MHIOCGRP_PREEMPTANDABORT, MHIOCGRP_CLEAR)
24822 24822 *
24823 24823 * Reservation Reclaim:
24824 24824 * --------------------
24825 24825 * To support the lost reservation reclaim operations this driver creates a
24826 24826 * single thread to handle reinstating reservations on all devices that have
24827 24827 * lost reservations sd_resv_reclaim_requests are logged for all devices that
24828 24828 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb
24829 24829 * and the reservation reclaim thread loops through the requests to regain the
24830 24830 * lost reservations.
24831 24831 */
24832 24832
24833 24833 /*
24834 24834 * Function: sd_check_mhd()
24835 24835 *
24836 24836 * Description: This function sets up and submits a scsi watch request or
24837 24837 * terminates an existing watch request. This routine is used in
24838 24838 * support of reservation reclaim.
24839 24839 *
24840 24840 * Arguments: dev - the device 'dev_t' is used for context to discriminate
24841 24841 * among multiple watches that share the callback function
24842 24842 * interval - the number of microseconds specifying the watch
24843 24843 * interval for issuing TEST UNIT READY commands. If
24844 24844 * set to 0 the watch should be terminated. If the
24845 24845 * interval is set to 0 and if the device is required
24846 24846 * to hold reservation while disabling failfast, the
24847 24847 * watch is restarted with an interval of
24848 24848 * reinstate_resv_delay.
24849 24849 *
24850 24850 * Return Code: 0 - Successful submit/terminate of scsi watch request
24851 24851 * ENXIO - Indicates an invalid device was specified
24852 24852 * EAGAIN - Unable to submit the scsi watch request
24853 24853 */
24854 24854
24855 24855 static int
24856 24856 sd_check_mhd(dev_t dev, int interval)
24857 24857 {
24858 24858 struct sd_lun *un;
24859 24859 opaque_t token;
24860 24860
24861 24861 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24862 24862 return (ENXIO);
24863 24863 }
24864 24864
24865 24865 /* is this a watch termination request? */
24866 24866 if (interval == 0) {
24867 24867 mutex_enter(SD_MUTEX(un));
24868 24868 /* if there is an existing watch task then terminate it */
24869 24869 if (un->un_mhd_token) {
24870 24870 token = un->un_mhd_token;
24871 24871 un->un_mhd_token = NULL;
24872 24872 mutex_exit(SD_MUTEX(un));
24873 24873 (void) scsi_watch_request_terminate(token,
24874 24874 SCSI_WATCH_TERMINATE_ALL_WAIT);
24875 24875 mutex_enter(SD_MUTEX(un));
24876 24876 } else {
24877 24877 mutex_exit(SD_MUTEX(un));
24878 24878 /*
24879 24879 * Note: If we return here we don't check for the
24880 24880 * failfast case. This is the original legacy
24881 24881 * implementation but perhaps we should be checking
24882 24882 * the failfast case.
24883 24883 */
24884 24884 return (0);
24885 24885 }
24886 24886 /*
24887 24887 * If the device is required to hold reservation while
24888 24888 * disabling failfast, we need to restart the scsi_watch
24889 24889 * routine with an interval of reinstate_resv_delay.
24890 24890 */
24891 24891 if (un->un_resvd_status & SD_RESERVE) {
24892 24892 interval = sd_reinstate_resv_delay/1000;
24893 24893 } else {
24894 24894 /* no failfast so bail */
24895 24895 mutex_exit(SD_MUTEX(un));
24896 24896 return (0);
24897 24897 }
24898 24898 mutex_exit(SD_MUTEX(un));
24899 24899 }
24900 24900
24901 24901 /*
24902 24902 * adjust minimum time interval to 1 second,
24903 24903 * and convert from msecs to usecs
24904 24904 */
24905 24905 if (interval > 0 && interval < 1000) {
24906 24906 interval = 1000;
24907 24907 }
24908 24908 interval *= 1000;
24909 24909
24910 24910 /*
24911 24911 * submit the request to the scsi_watch service
24912 24912 */
24913 24913 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval,
24914 24914 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev);
24915 24915 if (token == NULL) {
24916 24916 return (EAGAIN);
24917 24917 }
24918 24918
24919 24919 /*
24920 24920 * save token for termination later on
24921 24921 */
24922 24922 mutex_enter(SD_MUTEX(un));
24923 24923 un->un_mhd_token = token;
24924 24924 mutex_exit(SD_MUTEX(un));
24925 24925 return (0);
24926 24926 }
24927 24927
24928 24928
24929 24929 /*
24930 24930 * Function: sd_mhd_watch_cb()
24931 24931 *
24932 24932 * Description: This function is the call back function used by the scsi watch
24933 24933 * facility. The scsi watch facility sends the "Test Unit Ready"
24934 24934 * and processes the status. If applicable (i.e. a "Unit Attention"
24935 24935 * status and automatic "Request Sense" not used) the scsi watch
24936 24936 * facility will send a "Request Sense" and retrieve the sense data
24937 24937 * to be passed to this callback function. In either case the
24938 24938 * automatic "Request Sense" or the facility submitting one, this
24939 24939 * callback is passed the status and sense data.
24940 24940 *
24941 24941 * Arguments: arg - the device 'dev_t' is used for context to discriminate
24942 24942 * among multiple watches that share this callback function
24943 24943 * resultp - scsi watch facility result packet containing scsi
24944 24944 * packet, status byte and sense data
24945 24945 *
24946 24946 * Return Code: 0 - continue the watch task
24947 24947 * non-zero - terminate the watch task
24948 24948 */
24949 24949
24950 24950 static int
24951 24951 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
24952 24952 {
24953 24953 struct sd_lun *un;
24954 24954 struct scsi_status *statusp;
24955 24955 uint8_t *sensep;
24956 24956 struct scsi_pkt *pkt;
24957 24957 uchar_t actual_sense_length;
24958 24958 dev_t dev = (dev_t)arg;
24959 24959
24960 24960 ASSERT(resultp != NULL);
24961 24961 statusp = resultp->statusp;
24962 24962 sensep = (uint8_t *)resultp->sensep;
24963 24963 pkt = resultp->pkt;
24964 24964 actual_sense_length = resultp->actual_sense_length;
24965 24965
24966 24966 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24967 24967 return (ENXIO);
24968 24968 }
24969 24969
24970 24970 SD_TRACE(SD_LOG_IOCTL_MHD, un,
24971 24971 "sd_mhd_watch_cb: reason '%s', status '%s'\n",
24972 24972 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp)));
24973 24973
24974 24974 /* Begin processing of the status and/or sense data */
24975 24975 if (pkt->pkt_reason != CMD_CMPLT) {
24976 24976 /* Handle the incomplete packet */
24977 24977 sd_mhd_watch_incomplete(un, pkt);
24978 24978 return (0);
24979 24979 } else if (*((unsigned char *)statusp) != STATUS_GOOD) {
24980 24980 if (*((unsigned char *)statusp)
24981 24981 == STATUS_RESERVATION_CONFLICT) {
24982 24982 /*
24983 24983 * Handle a reservation conflict by panicking if
24984 24984 * configured for failfast or by logging the conflict
24985 24985 * and updating the reservation status
24986 24986 */
24987 24987 mutex_enter(SD_MUTEX(un));
24988 24988 if ((un->un_resvd_status & SD_FAILFAST) &&
24989 24989 (sd_failfast_enable)) {
24990 24990 sd_panic_for_res_conflict(un);
24991 24991 /*NOTREACHED*/
24992 24992 }
24993 24993 SD_INFO(SD_LOG_IOCTL_MHD, un,
24994 24994 "sd_mhd_watch_cb: Reservation Conflict\n");
24995 24995 un->un_resvd_status |= SD_RESERVATION_CONFLICT;
24996 24996 mutex_exit(SD_MUTEX(un));
24997 24997 }
24998 24998 }
24999 24999
25000 25000 if (sensep != NULL) {
25001 25001 if (actual_sense_length >= (SENSE_LENGTH - 2)) {
25002 25002 mutex_enter(SD_MUTEX(un));
25003 25003 if ((scsi_sense_asc(sensep) ==
25004 25004 SD_SCSI_RESET_SENSE_CODE) &&
25005 25005 (un->un_resvd_status & SD_RESERVE)) {
25006 25006 /*
25007 25007 * The additional sense code indicates a power
25008 25008 * on or bus device reset has occurred; update
25009 25009 * the reservation status.
25010 25010 */
25011 25011 un->un_resvd_status |=
25012 25012 (SD_LOST_RESERVE | SD_WANT_RESERVE);
25013 25013 SD_INFO(SD_LOG_IOCTL_MHD, un,
25014 25014 "sd_mhd_watch_cb: Lost Reservation\n");
25015 25015 }
25016 25016 } else {
25017 25017 return (0);
25018 25018 }
25019 25019 } else {
25020 25020 mutex_enter(SD_MUTEX(un));
25021 25021 }
25022 25022
25023 25023 if ((un->un_resvd_status & SD_RESERVE) &&
25024 25024 (un->un_resvd_status & SD_LOST_RESERVE)) {
25025 25025 if (un->un_resvd_status & SD_WANT_RESERVE) {
25026 25026 /*
25027 25027 * A reset occurred in between the last probe and this
25028 25028 * one so if a timeout is pending cancel it.
25029 25029 */
25030 25030 if (un->un_resvd_timeid) {
25031 25031 timeout_id_t temp_id = un->un_resvd_timeid;
25032 25032 un->un_resvd_timeid = NULL;
25033 25033 mutex_exit(SD_MUTEX(un));
25034 25034 (void) untimeout(temp_id);
25035 25035 mutex_enter(SD_MUTEX(un));
25036 25036 }
25037 25037 un->un_resvd_status &= ~SD_WANT_RESERVE;
25038 25038 }
25039 25039 if (un->un_resvd_timeid == 0) {
25040 25040 /* Schedule a timeout to handle the lost reservation */
25041 25041 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover,
25042 25042 (void *)dev,
25043 25043 drv_usectohz(sd_reinstate_resv_delay));
25044 25044 }
25045 25045 }
25046 25046 mutex_exit(SD_MUTEX(un));
25047 25047 return (0);
25048 25048 }
25049 25049
25050 25050
25051 25051 /*
25052 25052 * Function: sd_mhd_watch_incomplete()
25053 25053 *
25054 25054 * Description: This function is used to find out why a scsi pkt sent by the
25055 25055 * scsi watch facility was not completed. Under some scenarios this
25056 25056 * routine will return. Otherwise it will send a bus reset to see
25057 25057 * if the drive is still online.
25058 25058 *
25059 25059 * Arguments: un - driver soft state (unit) structure
25060 25060 * pkt - incomplete scsi pkt
25061 25061 */
25062 25062
25063 25063 static void
25064 25064 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt)
25065 25065 {
25066 25066 int be_chatty;
25067 25067 int perr;
25068 25068
25069 25069 ASSERT(pkt != NULL);
25070 25070 ASSERT(un != NULL);
25071 25071 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT));
25072 25072 perr = (pkt->pkt_statistics & STAT_PERR);
25073 25073
25074 25074 mutex_enter(SD_MUTEX(un));
25075 25075 if (un->un_state == SD_STATE_DUMPING) {
25076 25076 mutex_exit(SD_MUTEX(un));
25077 25077 return;
25078 25078 }
25079 25079
25080 25080 switch (pkt->pkt_reason) {
25081 25081 case CMD_UNX_BUS_FREE:
25082 25082 /*
25083 25083 * If we had a parity error that caused the target to drop BSY*,
25084 25084 * don't be chatty about it.
25085 25085 */
25086 25086 if (perr && be_chatty) {
25087 25087 be_chatty = 0;
25088 25088 }
25089 25089 break;
25090 25090 case CMD_TAG_REJECT:
25091 25091 /*
25092 25092 * The SCSI-2 spec states that a tag reject will be sent by the
25093 25093 * target if tagged queuing is not supported. A tag reject may
25094 25094 * also be sent during certain initialization periods or to
25095 25095 * control internal resources. For the latter case the target
25096 25096 * may also return Queue Full.
25097 25097 *
25098 25098 * If this driver receives a tag reject from a target that is
25099 25099 * going through an init period or controlling internal
25100 25100 * resources tagged queuing will be disabled. This is a less
25101 25101 * than optimal behavior but the driver is unable to determine
25102 25102 * the target state and assumes tagged queueing is not supported
25103 25103 */
25104 25104 pkt->pkt_flags = 0;
25105 25105 un->un_tagflags = 0;
25106 25106
25107 25107 if (un->un_f_opt_queueing == TRUE) {
25108 25108 un->un_throttle = min(un->un_throttle, 3);
25109 25109 } else {
25110 25110 un->un_throttle = 1;
25111 25111 }
25112 25112 mutex_exit(SD_MUTEX(un));
25113 25113 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
25114 25114 mutex_enter(SD_MUTEX(un));
25115 25115 break;
25116 25116 case CMD_INCOMPLETE:
25117 25117 /*
25118 25118 * The transport stopped with an abnormal state, fallthrough and
25119 25119 * reset the target and/or bus unless selection did not complete
25120 25120 * (indicated by STATE_GOT_BUS) in which case we don't want to
25121 25121 * go through a target/bus reset
25122 25122 */
25123 25123 if (pkt->pkt_state == STATE_GOT_BUS) {
25124 25124 break;
25125 25125 }
25126 25126 /*FALLTHROUGH*/
25127 25127
25128 25128 case CMD_TIMEOUT:
25129 25129 default:
25130 25130 /*
25131 25131 * The lun may still be running the command, so a lun reset
25132 25132 * should be attempted. If the lun reset fails or cannot be
25133 25133 * issued, than try a target reset. Lastly try a bus reset.
25134 25134 */
25135 25135 if ((pkt->pkt_statistics &
25136 25136 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) {
25137 25137 int reset_retval = 0;
25138 25138 mutex_exit(SD_MUTEX(un));
25139 25139 if (un->un_f_allow_bus_device_reset == TRUE) {
25140 25140 if (un->un_f_lun_reset_enabled == TRUE) {
25141 25141 reset_retval =
25142 25142 scsi_reset(SD_ADDRESS(un),
25143 25143 RESET_LUN);
25144 25144 }
25145 25145 if (reset_retval == 0) {
25146 25146 reset_retval =
25147 25147 scsi_reset(SD_ADDRESS(un),
25148 25148 RESET_TARGET);
25149 25149 }
25150 25150 }
25151 25151 if (reset_retval == 0) {
25152 25152 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
25153 25153 }
25154 25154 mutex_enter(SD_MUTEX(un));
25155 25155 }
25156 25156 break;
25157 25157 }
25158 25158
25159 25159 /* A device/bus reset has occurred; update the reservation status. */
25160 25160 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics &
25161 25161 (STAT_BUS_RESET | STAT_DEV_RESET))) {
25162 25162 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
25163 25163 un->un_resvd_status |=
25164 25164 (SD_LOST_RESERVE | SD_WANT_RESERVE);
25165 25165 SD_INFO(SD_LOG_IOCTL_MHD, un,
25166 25166 "sd_mhd_watch_incomplete: Lost Reservation\n");
25167 25167 }
25168 25168 }
25169 25169
25170 25170 /*
25171 25171 * The disk has been turned off; Update the device state.
25172 25172 *
25173 25173 * Note: Should we be offlining the disk here?
25174 25174 */
25175 25175 if (pkt->pkt_state == STATE_GOT_BUS) {
25176 25176 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: "
25177 25177 "Disk not responding to selection\n");
25178 25178 if (un->un_state != SD_STATE_OFFLINE) {
25179 25179 New_state(un, SD_STATE_OFFLINE);
25180 25180 }
25181 25181 } else if (be_chatty) {
25182 25182 /*
25183 25183 * suppress messages if they are all the same pkt reason;
25184 25184 * with TQ, many (up to 256) are returned with the same
25185 25185 * pkt_reason
25186 25186 */
25187 25187 if (pkt->pkt_reason != un->un_last_pkt_reason) {
25188 25188 SD_ERROR(SD_LOG_IOCTL_MHD, un,
25189 25189 "sd_mhd_watch_incomplete: "
25190 25190 "SCSI transport failed: reason '%s'\n",
25191 25191 scsi_rname(pkt->pkt_reason));
25192 25192 }
25193 25193 }
25194 25194 un->un_last_pkt_reason = pkt->pkt_reason;
25195 25195 mutex_exit(SD_MUTEX(un));
25196 25196 }
25197 25197
25198 25198
25199 25199 /*
25200 25200 * Function: sd_sname()
25201 25201 *
25202 25202 * Description: This is a simple little routine to return a string containing
25203 25203 * a printable description of command status byte for use in
25204 25204 * logging.
25205 25205 *
25206 25206 * Arguments: status - pointer to a status byte
25207 25207 *
25208 25208 * Return Code: char * - string containing status description.
25209 25209 */
25210 25210
25211 25211 static char *
25212 25212 sd_sname(uchar_t status)
25213 25213 {
25214 25214 switch (status & STATUS_MASK) {
25215 25215 case STATUS_GOOD:
25216 25216 return ("good status");
25217 25217 case STATUS_CHECK:
25218 25218 return ("check condition");
25219 25219 case STATUS_MET:
25220 25220 return ("condition met");
25221 25221 case STATUS_BUSY:
25222 25222 return ("busy");
25223 25223 case STATUS_INTERMEDIATE:
25224 25224 return ("intermediate");
25225 25225 case STATUS_INTERMEDIATE_MET:
25226 25226 return ("intermediate - condition met");
25227 25227 case STATUS_RESERVATION_CONFLICT:
25228 25228 return ("reservation_conflict");
25229 25229 case STATUS_TERMINATED:
25230 25230 return ("command terminated");
25231 25231 case STATUS_QFULL:
25232 25232 return ("queue full");
25233 25233 default:
25234 25234 return ("<unknown status>");
25235 25235 }
25236 25236 }
25237 25237
25238 25238
25239 25239 /*
25240 25240 * Function: sd_mhd_resvd_recover()
25241 25241 *
25242 25242 * Description: This function adds a reservation entry to the
25243 25243 * sd_resv_reclaim_request list and signals the reservation
25244 25244 * reclaim thread that there is work pending. If the reservation
25245 25245 * reclaim thread has not been previously created this function
25246 25246 * will kick it off.
25247 25247 *
25248 25248 * Arguments: arg - the device 'dev_t' is used for context to discriminate
25249 25249 * among multiple watches that share this callback function
25250 25250 *
25251 25251 * Context: This routine is called by timeout() and is run in interrupt
25252 25252 * context. It must not sleep or call other functions which may
25253 25253 * sleep.
25254 25254 */
25255 25255
25256 25256 static void
25257 25257 sd_mhd_resvd_recover(void *arg)
25258 25258 {
25259 25259 dev_t dev = (dev_t)arg;
25260 25260 struct sd_lun *un;
25261 25261 struct sd_thr_request *sd_treq = NULL;
25262 25262 struct sd_thr_request *sd_cur = NULL;
25263 25263 struct sd_thr_request *sd_prev = NULL;
25264 25264 int already_there = 0;
25265 25265
25266 25266 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
25267 25267 return;
25268 25268 }
25269 25269
25270 25270 mutex_enter(SD_MUTEX(un));
25271 25271 un->un_resvd_timeid = NULL;
25272 25272 if (un->un_resvd_status & SD_WANT_RESERVE) {
25273 25273 /*
25274 25274 * There was a reset so don't issue the reserve, allow the
25275 25275 * sd_mhd_watch_cb callback function to notice this and
25276 25276 * reschedule the timeout for reservation.
25277 25277 */
25278 25278 mutex_exit(SD_MUTEX(un));
25279 25279 return;
25280 25280 }
25281 25281 mutex_exit(SD_MUTEX(un));
25282 25282
25283 25283 /*
25284 25284 * Add this device to the sd_resv_reclaim_request list and the
25285 25285 * sd_resv_reclaim_thread should take care of the rest.
25286 25286 *
25287 25287 * Note: We can't sleep in this context so if the memory allocation
25288 25288 * fails allow the sd_mhd_watch_cb callback function to notice this and
25289 25289 * reschedule the timeout for reservation. (4378460)
25290 25290 */
25291 25291 sd_treq = (struct sd_thr_request *)
25292 25292 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP);
25293 25293 if (sd_treq == NULL) {
25294 25294 return;
25295 25295 }
25296 25296
25297 25297 sd_treq->sd_thr_req_next = NULL;
25298 25298 sd_treq->dev = dev;
25299 25299 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25300 25300 if (sd_tr.srq_thr_req_head == NULL) {
25301 25301 sd_tr.srq_thr_req_head = sd_treq;
25302 25302 } else {
25303 25303 sd_cur = sd_prev = sd_tr.srq_thr_req_head;
25304 25304 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) {
25305 25305 if (sd_cur->dev == dev) {
25306 25306 /*
25307 25307 * already in Queue so don't log
25308 25308 * another request for the device
25309 25309 */
25310 25310 already_there = 1;
25311 25311 break;
25312 25312 }
25313 25313 sd_prev = sd_cur;
25314 25314 }
25315 25315 if (!already_there) {
25316 25316 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: "
25317 25317 "logging request for %lx\n", dev);
25318 25318 sd_prev->sd_thr_req_next = sd_treq;
25319 25319 } else {
25320 25320 kmem_free(sd_treq, sizeof (struct sd_thr_request));
25321 25321 }
25322 25322 }
25323 25323
25324 25324 /*
25325 25325 * Create a kernel thread to do the reservation reclaim and free up this
25326 25326 * thread. We cannot block this thread while we go away to do the
25327 25327 * reservation reclaim
25328 25328 */
25329 25329 if (sd_tr.srq_resv_reclaim_thread == NULL)
25330 25330 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0,
25331 25331 sd_resv_reclaim_thread, NULL,
25332 25332 0, &p0, TS_RUN, v.v_maxsyspri - 2);
25333 25333
25334 25334 /* Tell the reservation reclaim thread that it has work to do */
25335 25335 cv_signal(&sd_tr.srq_resv_reclaim_cv);
25336 25336 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25337 25337 }
25338 25338
25339 25339 /*
25340 25340 * Function: sd_resv_reclaim_thread()
25341 25341 *
25342 25342 * Description: This function implements the reservation reclaim operations
25343 25343 *
25344 25344 * Arguments: arg - the device 'dev_t' is used for context to discriminate
25345 25345 * among multiple watches that share this callback function
25346 25346 */
25347 25347
25348 25348 static void
25349 25349 sd_resv_reclaim_thread()
25350 25350 {
25351 25351 struct sd_lun *un;
25352 25352 struct sd_thr_request *sd_mhreq;
25353 25353
25354 25354 /* Wait for work */
25355 25355 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25356 25356 if (sd_tr.srq_thr_req_head == NULL) {
25357 25357 cv_wait(&sd_tr.srq_resv_reclaim_cv,
25358 25358 &sd_tr.srq_resv_reclaim_mutex);
25359 25359 }
25360 25360
25361 25361 /* Loop while we have work */
25362 25362 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) {
25363 25363 un = ddi_get_soft_state(sd_state,
25364 25364 SDUNIT(sd_tr.srq_thr_cur_req->dev));
25365 25365 if (un == NULL) {
25366 25366 /*
25367 25367 * softstate structure is NULL so just
25368 25368 * dequeue the request and continue
25369 25369 */
25370 25370 sd_tr.srq_thr_req_head =
25371 25371 sd_tr.srq_thr_cur_req->sd_thr_req_next;
25372 25372 kmem_free(sd_tr.srq_thr_cur_req,
25373 25373 sizeof (struct sd_thr_request));
25374 25374 continue;
25375 25375 }
25376 25376
25377 25377 /* dequeue the request */
25378 25378 sd_mhreq = sd_tr.srq_thr_cur_req;
25379 25379 sd_tr.srq_thr_req_head =
25380 25380 sd_tr.srq_thr_cur_req->sd_thr_req_next;
25381 25381 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25382 25382
25383 25383 /*
25384 25384 * Reclaim reservation only if SD_RESERVE is still set. There
25385 25385 * may have been a call to MHIOCRELEASE before we got here.
25386 25386 */
25387 25387 mutex_enter(SD_MUTEX(un));
25388 25388 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
25389 25389 /*
25390 25390 * Note: The SD_LOST_RESERVE flag is cleared before
25391 25391 * reclaiming the reservation. If this is done after the
25392 25392 * call to sd_reserve_release a reservation loss in the
25393 25393 * window between pkt completion of reserve cmd and
25394 25394 * mutex_enter below may not be recognized
25395 25395 */
25396 25396 un->un_resvd_status &= ~SD_LOST_RESERVE;
25397 25397 mutex_exit(SD_MUTEX(un));
25398 25398
25399 25399 if (sd_reserve_release(sd_mhreq->dev,
25400 25400 SD_RESERVE) == 0) {
25401 25401 mutex_enter(SD_MUTEX(un));
25402 25402 un->un_resvd_status |= SD_RESERVE;
25403 25403 mutex_exit(SD_MUTEX(un));
25404 25404 SD_INFO(SD_LOG_IOCTL_MHD, un,
25405 25405 "sd_resv_reclaim_thread: "
25406 25406 "Reservation Recovered\n");
25407 25407 } else {
25408 25408 mutex_enter(SD_MUTEX(un));
25409 25409 un->un_resvd_status |= SD_LOST_RESERVE;
25410 25410 mutex_exit(SD_MUTEX(un));
25411 25411 SD_INFO(SD_LOG_IOCTL_MHD, un,
25412 25412 "sd_resv_reclaim_thread: Failed "
25413 25413 "Reservation Recovery\n");
25414 25414 }
25415 25415 } else {
25416 25416 mutex_exit(SD_MUTEX(un));
25417 25417 }
25418 25418 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25419 25419 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req);
25420 25420 kmem_free(sd_mhreq, sizeof (struct sd_thr_request));
25421 25421 sd_mhreq = sd_tr.srq_thr_cur_req = NULL;
25422 25422 /*
25423 25423 * wakeup the destroy thread if anyone is waiting on
25424 25424 * us to complete.
25425 25425 */
25426 25426 cv_signal(&sd_tr.srq_inprocess_cv);
25427 25427 SD_TRACE(SD_LOG_IOCTL_MHD, un,
25428 25428 "sd_resv_reclaim_thread: cv_signalling current request \n");
25429 25429 }
25430 25430
25431 25431 /*
25432 25432 * cleanup the sd_tr structure now that this thread will not exist
25433 25433 */
25434 25434 ASSERT(sd_tr.srq_thr_req_head == NULL);
25435 25435 ASSERT(sd_tr.srq_thr_cur_req == NULL);
25436 25436 sd_tr.srq_resv_reclaim_thread = NULL;
25437 25437 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25438 25438 thread_exit();
25439 25439 }
25440 25440
25441 25441
25442 25442 /*
25443 25443 * Function: sd_rmv_resv_reclaim_req()
25444 25444 *
25445 25445 * Description: This function removes any pending reservation reclaim requests
25446 25446 * for the specified device.
25447 25447 *
25448 25448 * Arguments: dev - the device 'dev_t'
25449 25449 */
25450 25450
25451 25451 static void
25452 25452 sd_rmv_resv_reclaim_req(dev_t dev)
25453 25453 {
25454 25454 struct sd_thr_request *sd_mhreq;
25455 25455 struct sd_thr_request *sd_prev;
25456 25456
25457 25457 /* Remove a reservation reclaim request from the list */
25458 25458 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25459 25459 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) {
25460 25460 /*
25461 25461 * We are attempting to reinstate reservation for
25462 25462 * this device. We wait for sd_reserve_release()
25463 25463 * to return before we return.
25464 25464 */
25465 25465 cv_wait(&sd_tr.srq_inprocess_cv,
25466 25466 &sd_tr.srq_resv_reclaim_mutex);
25467 25467 } else {
25468 25468 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head;
25469 25469 if (sd_mhreq && sd_mhreq->dev == dev) {
25470 25470 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next;
25471 25471 kmem_free(sd_mhreq, sizeof (struct sd_thr_request));
25472 25472 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25473 25473 return;
25474 25474 }
25475 25475 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) {
25476 25476 if (sd_mhreq && sd_mhreq->dev == dev) {
25477 25477 break;
25478 25478 }
25479 25479 sd_prev = sd_mhreq;
25480 25480 }
25481 25481 if (sd_mhreq != NULL) {
25482 25482 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next;
25483 25483 kmem_free(sd_mhreq, sizeof (struct sd_thr_request));
25484 25484 }
25485 25485 }
25486 25486 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25487 25487 }
25488 25488
25489 25489
25490 25490 /*
25491 25491 * Function: sd_mhd_reset_notify_cb()
25492 25492 *
25493 25493 * Description: This is a call back function for scsi_reset_notify. This
25494 25494 * function updates the softstate reserved status and logs the
25495 25495 * reset. The driver scsi watch facility callback function
25496 25496 * (sd_mhd_watch_cb) and reservation reclaim thread functionality
25497 25497 * will reclaim the reservation.
25498 25498 *
25499 25499 * Arguments: arg - driver soft state (unit) structure
25500 25500 */
25501 25501
25502 25502 static void
25503 25503 sd_mhd_reset_notify_cb(caddr_t arg)
25504 25504 {
25505 25505 struct sd_lun *un = (struct sd_lun *)arg;
25506 25506
25507 25507 mutex_enter(SD_MUTEX(un));
25508 25508 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
25509 25509 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE);
25510 25510 SD_INFO(SD_LOG_IOCTL_MHD, un,
25511 25511 "sd_mhd_reset_notify_cb: Lost Reservation\n");
25512 25512 }
25513 25513 mutex_exit(SD_MUTEX(un));
25514 25514 }
25515 25515
25516 25516
25517 25517 /*
25518 25518 * Function: sd_take_ownership()
25519 25519 *
25520 25520 * Description: This routine implements an algorithm to achieve a stable
25521 25521 * reservation on disks which don't implement priority reserve,
25522 25522 * and makes sure that other host lose re-reservation attempts.
25523 25523 * This algorithm contains of a loop that keeps issuing the RESERVE
25524 25524 * for some period of time (min_ownership_delay, default 6 seconds)
25525 25525 * During that loop, it looks to see if there has been a bus device
25526 25526 * reset or bus reset (both of which cause an existing reservation
25527 25527 * to be lost). If the reservation is lost issue RESERVE until a
25528 25528 * period of min_ownership_delay with no resets has gone by, or
25529 25529 * until max_ownership_delay has expired. This loop ensures that
25530 25530 * the host really did manage to reserve the device, in spite of
25531 25531 * resets. The looping for min_ownership_delay (default six
25532 25532 * seconds) is important to early generation clustering products,
25533 25533 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an
25534 25534 * MHIOCENFAILFAST periodic timer of two seconds. By having
25535 25535 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having
25536 25536 * MHIOCENFAILFAST poll every two seconds, the idea is that by the
25537 25537 * time the MHIOCTKOWN ioctl returns, the other host (if any) will
25538 25538 * have already noticed, via the MHIOCENFAILFAST polling, that it
25539 25539 * no longer "owns" the disk and will have panicked itself. Thus,
25540 25540 * the host issuing the MHIOCTKOWN is assured (with timing
25541 25541 * dependencies) that by the time it actually starts to use the
25542 25542 * disk for real work, the old owner is no longer accessing it.
25543 25543 *
25544 25544 * min_ownership_delay is the minimum amount of time for which the
25545 25545 * disk must be reserved continuously devoid of resets before the
25546 25546 * MHIOCTKOWN ioctl will return success.
25547 25547 *
25548 25548 * max_ownership_delay indicates the amount of time by which the
25549 25549 * take ownership should succeed or timeout with an error.
25550 25550 *
25551 25551 * Arguments: dev - the device 'dev_t'
25552 25552 * *p - struct containing timing info.
25553 25553 *
25554 25554 * Return Code: 0 for success or error code
25555 25555 */
25556 25556
25557 25557 static int
25558 25558 sd_take_ownership(dev_t dev, struct mhioctkown *p)
25559 25559 {
25560 25560 struct sd_lun *un;
25561 25561 int rval;
25562 25562 int err;
25563 25563 int reservation_count = 0;
25564 25564 int min_ownership_delay = 6000000; /* in usec */
25565 25565 int max_ownership_delay = 30000000; /* in usec */
25566 25566 clock_t start_time; /* starting time of this algorithm */
25567 25567 clock_t end_time; /* time limit for giving up */
25568 25568 clock_t ownership_time; /* time limit for stable ownership */
25569 25569 clock_t current_time;
25570 25570 clock_t previous_current_time;
25571 25571
25572 25572 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
25573 25573 return (ENXIO);
25574 25574 }
25575 25575
25576 25576 /*
25577 25577 * Attempt a device reservation. A priority reservation is requested.
25578 25578 */
25579 25579 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE))
25580 25580 != SD_SUCCESS) {
25581 25581 SD_ERROR(SD_LOG_IOCTL_MHD, un,
25582 25582 "sd_take_ownership: return(1)=%d\n", rval);
25583 25583 return (rval);
25584 25584 }
25585 25585
25586 25586 /* Update the softstate reserved status to indicate the reservation */
25587 25587 mutex_enter(SD_MUTEX(un));
25588 25588 un->un_resvd_status |= SD_RESERVE;
25589 25589 un->un_resvd_status &=
25590 25590 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT);
25591 25591 mutex_exit(SD_MUTEX(un));
25592 25592
25593 25593 if (p != NULL) {
25594 25594 if (p->min_ownership_delay != 0) {
25595 25595 min_ownership_delay = p->min_ownership_delay * 1000;
25596 25596 }
25597 25597 if (p->max_ownership_delay != 0) {
25598 25598 max_ownership_delay = p->max_ownership_delay * 1000;
25599 25599 }
25600 25600 }
25601 25601 SD_INFO(SD_LOG_IOCTL_MHD, un,
25602 25602 "sd_take_ownership: min, max delays: %d, %d\n",
25603 25603 min_ownership_delay, max_ownership_delay);
25604 25604
25605 25605 start_time = ddi_get_lbolt();
25606 25606 current_time = start_time;
25607 25607 ownership_time = current_time + drv_usectohz(min_ownership_delay);
25608 25608 end_time = start_time + drv_usectohz(max_ownership_delay);
25609 25609
25610 25610 while (current_time - end_time < 0) {
25611 25611 delay(drv_usectohz(500000));
25612 25612
25613 25613 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) {
25614 25614 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) {
25615 25615 mutex_enter(SD_MUTEX(un));
25616 25616 rval = (un->un_resvd_status &
25617 25617 SD_RESERVATION_CONFLICT) ? EACCES : EIO;
25618 25618 mutex_exit(SD_MUTEX(un));
25619 25619 break;
25620 25620 }
25621 25621 }
25622 25622 previous_current_time = current_time;
25623 25623 current_time = ddi_get_lbolt();
25624 25624 mutex_enter(SD_MUTEX(un));
25625 25625 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) {
25626 25626 ownership_time = ddi_get_lbolt() +
25627 25627 drv_usectohz(min_ownership_delay);
25628 25628 reservation_count = 0;
25629 25629 } else {
25630 25630 reservation_count++;
25631 25631 }
25632 25632 un->un_resvd_status |= SD_RESERVE;
25633 25633 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE);
25634 25634 mutex_exit(SD_MUTEX(un));
25635 25635
25636 25636 SD_INFO(SD_LOG_IOCTL_MHD, un,
25637 25637 "sd_take_ownership: ticks for loop iteration=%ld, "
25638 25638 "reservation=%s\n", (current_time - previous_current_time),
25639 25639 reservation_count ? "ok" : "reclaimed");
25640 25640
25641 25641 if (current_time - ownership_time >= 0 &&
25642 25642 reservation_count >= 4) {
25643 25643 rval = 0; /* Achieved a stable ownership */
25644 25644 break;
25645 25645 }
25646 25646 if (current_time - end_time >= 0) {
25647 25647 rval = EACCES; /* No ownership in max possible time */
25648 25648 break;
25649 25649 }
25650 25650 }
25651 25651 SD_TRACE(SD_LOG_IOCTL_MHD, un,
25652 25652 "sd_take_ownership: return(2)=%d\n", rval);
25653 25653 return (rval);
25654 25654 }
25655 25655
25656 25656
25657 25657 /*
25658 25658 * Function: sd_reserve_release()
25659 25659 *
25660 25660 * Description: This function builds and sends scsi RESERVE, RELEASE, and
25661 25661 * PRIORITY RESERVE commands based on a user specified command type
25662 25662 *
25663 25663 * Arguments: dev - the device 'dev_t'
25664 25664 * cmd - user specified command type; one of SD_PRIORITY_RESERVE,
25665 25665 * SD_RESERVE, SD_RELEASE
25666 25666 *
25667 25667 * Return Code: 0 or Error Code
25668 25668 */
25669 25669
25670 25670 static int
25671 25671 sd_reserve_release(dev_t dev, int cmd)
25672 25672 {
25673 25673 struct uscsi_cmd *com = NULL;
25674 25674 struct sd_lun *un = NULL;
25675 25675 char cdb[CDB_GROUP0];
25676 25676 int rval;
25677 25677
25678 25678 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) ||
25679 25679 (cmd == SD_PRIORITY_RESERVE));
25680 25680
25681 25681 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
25682 25682 return (ENXIO);
25683 25683 }
25684 25684
25685 25685 /* instantiate and initialize the command and cdb */
25686 25686 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
25687 25687 bzero(cdb, CDB_GROUP0);
25688 25688 com->uscsi_flags = USCSI_SILENT;
25689 25689 com->uscsi_timeout = un->un_reserve_release_time;
25690 25690 com->uscsi_cdblen = CDB_GROUP0;
25691 25691 com->uscsi_cdb = cdb;
25692 25692 if (cmd == SD_RELEASE) {
25693 25693 cdb[0] = SCMD_RELEASE;
25694 25694 } else {
25695 25695 cdb[0] = SCMD_RESERVE;
25696 25696 }
25697 25697
25698 25698 /* Send the command. */
25699 25699 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
25700 25700 SD_PATH_STANDARD);
25701 25701
25702 25702 /*
25703 25703 * "break" a reservation that is held by another host, by issuing a
25704 25704 * reset if priority reserve is desired, and we could not get the
25705 25705 * device.
25706 25706 */
25707 25707 if ((cmd == SD_PRIORITY_RESERVE) &&
25708 25708 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) {
25709 25709 /*
25710 25710 * First try to reset the LUN. If we cannot, then try a target
25711 25711 * reset, followed by a bus reset if the target reset fails.
25712 25712 */
25713 25713 int reset_retval = 0;
25714 25714 if (un->un_f_lun_reset_enabled == TRUE) {
25715 25715 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN);
25716 25716 }
25717 25717 if (reset_retval == 0) {
25718 25718 /* The LUN reset either failed or was not issued */
25719 25719 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET);
25720 25720 }
25721 25721 if ((reset_retval == 0) &&
25722 25722 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) {
25723 25723 rval = EIO;
25724 25724 kmem_free(com, sizeof (*com));
25725 25725 return (rval);
25726 25726 }
25727 25727
25728 25728 bzero(com, sizeof (struct uscsi_cmd));
25729 25729 com->uscsi_flags = USCSI_SILENT;
25730 25730 com->uscsi_cdb = cdb;
25731 25731 com->uscsi_cdblen = CDB_GROUP0;
25732 25732 com->uscsi_timeout = 5;
25733 25733
25734 25734 /*
25735 25735 * Reissue the last reserve command, this time without request
25736 25736 * sense. Assume that it is just a regular reserve command.
25737 25737 */
25738 25738 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
25739 25739 SD_PATH_STANDARD);
25740 25740 }
25741 25741
25742 25742 /* Return an error if still getting a reservation conflict. */
25743 25743 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) {
25744 25744 rval = EACCES;
25745 25745 }
25746 25746
25747 25747 kmem_free(com, sizeof (*com));
25748 25748 return (rval);
25749 25749 }
25750 25750
25751 25751
25752 25752 #define SD_NDUMP_RETRIES 12
25753 25753 /*
25754 25754 * System Crash Dump routine
25755 25755 */
25756 25756
25757 25757 static int
25758 25758 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
25759 25759 {
25760 25760 int instance;
25761 25761 int partition;
25762 25762 int i;
25763 25763 int err;
25764 25764 struct sd_lun *un;
25765 25765 struct scsi_pkt *wr_pktp;
25766 25766 struct buf *wr_bp;
25767 25767 struct buf wr_buf;
25768 25768 daddr_t tgt_byte_offset; /* rmw - byte offset for target */
25769 25769 daddr_t tgt_blkno; /* rmw - blkno for target */
25770 25770 size_t tgt_byte_count; /* rmw - # of bytes to xfer */
25771 25771 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */
25772 25772 size_t io_start_offset;
25773 25773 int doing_rmw = FALSE;
25774 25774 int rval;
25775 25775 ssize_t dma_resid;
25776 25776 daddr_t oblkno;
25777 25777 diskaddr_t nblks = 0;
25778 25778 diskaddr_t start_block;
25779 25779
25780 25780 instance = SDUNIT(dev);
25781 25781 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) ||
25782 25782 !SD_IS_VALID_LABEL(un) || ISCD(un)) {
25783 25783 return (ENXIO);
25784 25784 }
25785 25785
25786 25786 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un))
25787 25787
25788 25788 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n");
25789 25789
25790 25790 partition = SDPART(dev);
25791 25791 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition);
25792 25792
25793 25793 if (!(NOT_DEVBSIZE(un))) {
25794 25794 int secmask = 0;
25795 25795 int blknomask = 0;
25796 25796
25797 25797 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1;
25798 25798 secmask = un->un_tgt_blocksize - 1;
25799 25799
25800 25800 if (blkno & blknomask) {
25801 25801 SD_TRACE(SD_LOG_DUMP, un,
25802 25802 "sddump: dump start block not modulo %d\n",
25803 25803 un->un_tgt_blocksize);
25804 25804 return (EINVAL);
25805 25805 }
25806 25806
25807 25807 if ((nblk * DEV_BSIZE) & secmask) {
25808 25808 SD_TRACE(SD_LOG_DUMP, un,
25809 25809 "sddump: dump length not modulo %d\n",
25810 25810 un->un_tgt_blocksize);
25811 25811 return (EINVAL);
25812 25812 }
25813 25813
25814 25814 }
25815 25815
25816 25816 /* Validate blocks to dump at against partition size. */
25817 25817
25818 25818 (void) cmlb_partinfo(un->un_cmlbhandle, partition,
25819 25819 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT);
25820 25820
25821 25821 if (NOT_DEVBSIZE(un)) {
25822 25822 if ((blkno + nblk) > nblks) {
25823 25823 SD_TRACE(SD_LOG_DUMP, un,
25824 25824 "sddump: dump range larger than partition: "
25825 25825 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n",
25826 25826 blkno, nblk, nblks);
25827 25827 return (EINVAL);
25828 25828 }
25829 25829 } else {
25830 25830 if (((blkno / (un->un_tgt_blocksize / DEV_BSIZE)) +
25831 25831 (nblk / (un->un_tgt_blocksize / DEV_BSIZE))) > nblks) {
25832 25832 SD_TRACE(SD_LOG_DUMP, un,
25833 25833 "sddump: dump range larger than partition: "
25834 25834 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n",
25835 25835 blkno, nblk, nblks);
25836 25836 return (EINVAL);
25837 25837 }
25838 25838 }
25839 25839
25840 25840 mutex_enter(&un->un_pm_mutex);
25841 25841 if (SD_DEVICE_IS_IN_LOW_POWER(un)) {
25842 25842 struct scsi_pkt *start_pktp;
25843 25843
25844 25844 mutex_exit(&un->un_pm_mutex);
25845 25845
25846 25846 /*
25847 25847 * use pm framework to power on HBA 1st
25848 25848 */
25849 25849 (void) pm_raise_power(SD_DEVINFO(un), 0,
25850 25850 SD_PM_STATE_ACTIVE(un));
25851 25851
25852 25852 /*
25853 25853 * Dump no long uses sdpower to power on a device, it's
25854 25854 * in-line here so it can be done in polled mode.
25855 25855 */
25856 25856
25857 25857 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n");
25858 25858
25859 25859 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL,
25860 25860 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL);
25861 25861
25862 25862 if (start_pktp == NULL) {
25863 25863 /* We were not given a SCSI packet, fail. */
25864 25864 return (EIO);
25865 25865 }
25866 25866 bzero(start_pktp->pkt_cdbp, CDB_GROUP0);
25867 25867 start_pktp->pkt_cdbp[0] = SCMD_START_STOP;
25868 25868 start_pktp->pkt_cdbp[4] = SD_TARGET_START;
25869 25869 start_pktp->pkt_flags = FLAG_NOINTR;
25870 25870
25871 25871 mutex_enter(SD_MUTEX(un));
25872 25872 SD_FILL_SCSI1_LUN(un, start_pktp);
25873 25873 mutex_exit(SD_MUTEX(un));
25874 25874 /*
25875 25875 * Scsi_poll returns 0 (success) if the command completes and
25876 25876 * the status block is STATUS_GOOD.
25877 25877 */
25878 25878 if (sd_scsi_poll(un, start_pktp) != 0) {
25879 25879 scsi_destroy_pkt(start_pktp);
25880 25880 return (EIO);
25881 25881 }
25882 25882 scsi_destroy_pkt(start_pktp);
25883 25883 (void) sd_pm_state_change(un, SD_PM_STATE_ACTIVE(un),
25884 25884 SD_PM_STATE_CHANGE);
25885 25885 } else {
25886 25886 mutex_exit(&un->un_pm_mutex);
25887 25887 }
25888 25888
25889 25889 mutex_enter(SD_MUTEX(un));
25890 25890 un->un_throttle = 0;
25891 25891
25892 25892 /*
25893 25893 * The first time through, reset the specific target device.
25894 25894 * However, when cpr calls sddump we know that sd is in a
25895 25895 * a good state so no bus reset is required.
25896 25896 * Clear sense data via Request Sense cmd.
25897 25897 * In sddump we don't care about allow_bus_device_reset anymore
25898 25898 */
25899 25899
25900 25900 if ((un->un_state != SD_STATE_SUSPENDED) &&
25901 25901 (un->un_state != SD_STATE_DUMPING)) {
25902 25902
25903 25903 New_state(un, SD_STATE_DUMPING);
25904 25904
25905 25905 if (un->un_f_is_fibre == FALSE) {
25906 25906 mutex_exit(SD_MUTEX(un));
25907 25907 /*
25908 25908 * Attempt a bus reset for parallel scsi.
25909 25909 *
25910 25910 * Note: A bus reset is required because on some host
25911 25911 * systems (i.e. E420R) a bus device reset is
25912 25912 * insufficient to reset the state of the target.
25913 25913 *
25914 25914 * Note: Don't issue the reset for fibre-channel,
25915 25915 * because this tends to hang the bus (loop) for
25916 25916 * too long while everyone is logging out and in
25917 25917 * and the deadman timer for dumping will fire
25918 25918 * before the dump is complete.
25919 25919 */
25920 25920 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) {
25921 25921 mutex_enter(SD_MUTEX(un));
25922 25922 Restore_state(un);
25923 25923 mutex_exit(SD_MUTEX(un));
25924 25924 return (EIO);
25925 25925 }
25926 25926
25927 25927 /* Delay to give the device some recovery time. */
25928 25928 drv_usecwait(10000);
25929 25929
25930 25930 if (sd_send_polled_RQS(un) == SD_FAILURE) {
25931 25931 SD_INFO(SD_LOG_DUMP, un,
25932 25932 "sddump: sd_send_polled_RQS failed\n");
25933 25933 }
25934 25934 mutex_enter(SD_MUTEX(un));
25935 25935 }
25936 25936 }
25937 25937
25938 25938 /*
25939 25939 * Convert the partition-relative block number to a
25940 25940 * disk physical block number.
25941 25941 */
25942 25942 if (NOT_DEVBSIZE(un)) {
25943 25943 blkno += start_block;
25944 25944 } else {
25945 25945 blkno = blkno / (un->un_tgt_blocksize / DEV_BSIZE);
25946 25946 blkno += start_block;
25947 25947 }
25948 25948
25949 25949 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno);
25950 25950
25951 25951
25952 25952 /*
25953 25953 * Check if the device has a non-512 block size.
25954 25954 */
25955 25955 wr_bp = NULL;
25956 25956 if (NOT_DEVBSIZE(un)) {
25957 25957 tgt_byte_offset = blkno * un->un_sys_blocksize;
25958 25958 tgt_byte_count = nblk * un->un_sys_blocksize;
25959 25959 if ((tgt_byte_offset % un->un_tgt_blocksize) ||
25960 25960 (tgt_byte_count % un->un_tgt_blocksize)) {
25961 25961 doing_rmw = TRUE;
25962 25962 /*
25963 25963 * Calculate the block number and number of block
25964 25964 * in terms of the media block size.
25965 25965 */
25966 25966 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize;
25967 25967 tgt_nblk =
25968 25968 ((tgt_byte_offset + tgt_byte_count +
25969 25969 (un->un_tgt_blocksize - 1)) /
25970 25970 un->un_tgt_blocksize) - tgt_blkno;
25971 25971
25972 25972 /*
25973 25973 * Invoke the routine which is going to do read part
25974 25974 * of read-modify-write.
25975 25975 * Note that this routine returns a pointer to
25976 25976 * a valid bp in wr_bp.
25977 25977 */
25978 25978 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk,
25979 25979 &wr_bp);
25980 25980 if (err) {
25981 25981 mutex_exit(SD_MUTEX(un));
25982 25982 return (err);
25983 25983 }
25984 25984 /*
25985 25985 * Offset is being calculated as -
25986 25986 * (original block # * system block size) -
25987 25987 * (new block # * target block size)
25988 25988 */
25989 25989 io_start_offset =
25990 25990 ((uint64_t)(blkno * un->un_sys_blocksize)) -
25991 25991 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize));
25992 25992
25993 25993 ASSERT((io_start_offset >= 0) &&
25994 25994 (io_start_offset < un->un_tgt_blocksize));
25995 25995 /*
25996 25996 * Do the modify portion of read modify write.
25997 25997 */
25998 25998 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset],
25999 25999 (size_t)nblk * un->un_sys_blocksize);
26000 26000 } else {
26001 26001 doing_rmw = FALSE;
26002 26002 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize;
26003 26003 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize;
26004 26004 }
26005 26005
26006 26006 /* Convert blkno and nblk to target blocks */
26007 26007 blkno = tgt_blkno;
26008 26008 nblk = tgt_nblk;
26009 26009 } else {
26010 26010 wr_bp = &wr_buf;
26011 26011 bzero(wr_bp, sizeof (struct buf));
26012 26012 wr_bp->b_flags = B_BUSY;
26013 26013 wr_bp->b_un.b_addr = addr;
26014 26014 wr_bp->b_bcount = nblk << DEV_BSHIFT;
26015 26015 wr_bp->b_resid = 0;
26016 26016 }
26017 26017
26018 26018 mutex_exit(SD_MUTEX(un));
26019 26019
26020 26020 /*
26021 26021 * Obtain a SCSI packet for the write command.
26022 26022 * It should be safe to call the allocator here without
26023 26023 * worrying about being locked for DVMA mapping because
26024 26024 * the address we're passed is already a DVMA mapping
26025 26025 *
26026 26026 * We are also not going to worry about semaphore ownership
26027 26027 * in the dump buffer. Dumping is single threaded at present.
26028 26028 */
26029 26029
26030 26030 wr_pktp = NULL;
26031 26031
26032 26032 dma_resid = wr_bp->b_bcount;
26033 26033 oblkno = blkno;
26034 26034
26035 26035 if (!(NOT_DEVBSIZE(un))) {
26036 26036 nblk = nblk / (un->un_tgt_blocksize / DEV_BSIZE);
26037 26037 }
26038 26038
26039 26039 while (dma_resid != 0) {
26040 26040
26041 26041 for (i = 0; i < SD_NDUMP_RETRIES; i++) {
26042 26042 wr_bp->b_flags &= ~B_ERROR;
26043 26043
26044 26044 if (un->un_partial_dma_supported == 1) {
26045 26045 blkno = oblkno +
26046 26046 ((wr_bp->b_bcount - dma_resid) /
26047 26047 un->un_tgt_blocksize);
26048 26048 nblk = dma_resid / un->un_tgt_blocksize;
26049 26049
26050 26050 if (wr_pktp) {
26051 26051 /*
26052 26052 * Partial DMA transfers after initial transfer
26053 26053 */
26054 26054 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp,
26055 26055 blkno, nblk);
26056 26056 } else {
26057 26057 /* Initial transfer */
26058 26058 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp,
26059 26059 un->un_pkt_flags, NULL_FUNC, NULL,
26060 26060 blkno, nblk);
26061 26061 }
26062 26062 } else {
26063 26063 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp,
26064 26064 0, NULL_FUNC, NULL, blkno, nblk);
26065 26065 }
26066 26066
26067 26067 if (rval == 0) {
26068 26068 /* We were given a SCSI packet, continue. */
26069 26069 break;
26070 26070 }
26071 26071
26072 26072 if (i == 0) {
26073 26073 if (wr_bp->b_flags & B_ERROR) {
26074 26074 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26075 26075 "no resources for dumping; "
26076 26076 "error code: 0x%x, retrying",
26077 26077 geterror(wr_bp));
26078 26078 } else {
26079 26079 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26080 26080 "no resources for dumping; retrying");
26081 26081 }
26082 26082 } else if (i != (SD_NDUMP_RETRIES - 1)) {
26083 26083 if (wr_bp->b_flags & B_ERROR) {
26084 26084 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
26085 26085 "no resources for dumping; error code: "
26086 26086 "0x%x, retrying\n", geterror(wr_bp));
26087 26087 }
26088 26088 } else {
26089 26089 if (wr_bp->b_flags & B_ERROR) {
26090 26090 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
26091 26091 "no resources for dumping; "
26092 26092 "error code: 0x%x, retries failed, "
26093 26093 "giving up.\n", geterror(wr_bp));
26094 26094 } else {
26095 26095 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
26096 26096 "no resources for dumping; "
26097 26097 "retries failed, giving up.\n");
26098 26098 }
26099 26099 mutex_enter(SD_MUTEX(un));
26100 26100 Restore_state(un);
26101 26101 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) {
26102 26102 mutex_exit(SD_MUTEX(un));
26103 26103 scsi_free_consistent_buf(wr_bp);
26104 26104 } else {
26105 26105 mutex_exit(SD_MUTEX(un));
26106 26106 }
26107 26107 return (EIO);
26108 26108 }
26109 26109 drv_usecwait(10000);
26110 26110 }
26111 26111
26112 26112 if (un->un_partial_dma_supported == 1) {
26113 26113 /*
26114 26114 * save the resid from PARTIAL_DMA
26115 26115 */
26116 26116 dma_resid = wr_pktp->pkt_resid;
26117 26117 if (dma_resid != 0)
26118 26118 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid);
26119 26119 wr_pktp->pkt_resid = 0;
26120 26120 } else {
26121 26121 dma_resid = 0;
26122 26122 }
26123 26123
26124 26124 /* SunBug 1222170 */
26125 26125 wr_pktp->pkt_flags = FLAG_NOINTR;
26126 26126
26127 26127 err = EIO;
26128 26128 for (i = 0; i < SD_NDUMP_RETRIES; i++) {
26129 26129
26130 26130 /*
26131 26131 * Scsi_poll returns 0 (success) if the command completes and
26132 26132 * the status block is STATUS_GOOD. We should only check
26133 26133 * errors if this condition is not true. Even then we should
26134 26134 * send our own request sense packet only if we have a check
26135 26135 * condition and auto request sense has not been performed by
26136 26136 * the hba.
26137 26137 */
26138 26138 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n");
26139 26139
26140 26140 if ((sd_scsi_poll(un, wr_pktp) == 0) &&
26141 26141 (wr_pktp->pkt_resid == 0)) {
26142 26142 err = SD_SUCCESS;
26143 26143 break;
26144 26144 }
26145 26145
26146 26146 /*
26147 26147 * Check CMD_DEV_GONE 1st, give up if device is gone.
26148 26148 */
26149 26149 if (wr_pktp->pkt_reason == CMD_DEV_GONE) {
26150 26150 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26151 26151 "Error while dumping state...Device is gone\n");
26152 26152 break;
26153 26153 }
26154 26154
26155 26155 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) {
26156 26156 SD_INFO(SD_LOG_DUMP, un,
26157 26157 "sddump: write failed with CHECK, try # %d\n", i);
26158 26158 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) {
26159 26159 (void) sd_send_polled_RQS(un);
26160 26160 }
26161 26161
26162 26162 continue;
26163 26163 }
26164 26164
26165 26165 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) {
26166 26166 int reset_retval = 0;
26167 26167
26168 26168 SD_INFO(SD_LOG_DUMP, un,
26169 26169 "sddump: write failed with BUSY, try # %d\n", i);
26170 26170
26171 26171 if (un->un_f_lun_reset_enabled == TRUE) {
26172 26172 reset_retval = scsi_reset(SD_ADDRESS(un),
26173 26173 RESET_LUN);
26174 26174 }
26175 26175 if (reset_retval == 0) {
26176 26176 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET);
26177 26177 }
26178 26178 (void) sd_send_polled_RQS(un);
26179 26179
26180 26180 } else {
26181 26181 SD_INFO(SD_LOG_DUMP, un,
26182 26182 "sddump: write failed with 0x%x, try # %d\n",
26183 26183 SD_GET_PKT_STATUS(wr_pktp), i);
26184 26184 mutex_enter(SD_MUTEX(un));
26185 26185 sd_reset_target(un, wr_pktp);
26186 26186 mutex_exit(SD_MUTEX(un));
26187 26187 }
26188 26188
26189 26189 /*
26190 26190 * If we are not getting anywhere with lun/target resets,
26191 26191 * let's reset the bus.
26192 26192 */
26193 26193 if (i == SD_NDUMP_RETRIES/2) {
26194 26194 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
26195 26195 (void) sd_send_polled_RQS(un);
26196 26196 }
26197 26197 }
26198 26198 }
26199 26199
26200 26200 scsi_destroy_pkt(wr_pktp);
26201 26201 mutex_enter(SD_MUTEX(un));
26202 26202 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) {
26203 26203 mutex_exit(SD_MUTEX(un));
26204 26204 scsi_free_consistent_buf(wr_bp);
26205 26205 } else {
26206 26206 mutex_exit(SD_MUTEX(un));
26207 26207 }
26208 26208 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err);
26209 26209 return (err);
26210 26210 }
26211 26211
26212 26212 /*
26213 26213 * Function: sd_scsi_poll()
26214 26214 *
26215 26215 * Description: This is a wrapper for the scsi_poll call.
26216 26216 *
26217 26217 * Arguments: sd_lun - The unit structure
26218 26218 * scsi_pkt - The scsi packet being sent to the device.
26219 26219 *
26220 26220 * Return Code: 0 - Command completed successfully with good status
26221 26221 * -1 - Command failed. This could indicate a check condition
26222 26222 * or other status value requiring recovery action.
26223 26223 *
26224 26224 * NOTE: This code is only called off sddump().
26225 26225 */
26226 26226
26227 26227 static int
26228 26228 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp)
26229 26229 {
26230 26230 int status;
26231 26231
26232 26232 ASSERT(un != NULL);
26233 26233 ASSERT(!mutex_owned(SD_MUTEX(un)));
26234 26234 ASSERT(pktp != NULL);
26235 26235
26236 26236 status = SD_SUCCESS;
26237 26237
26238 26238 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) {
26239 26239 pktp->pkt_flags |= un->un_tagflags;
26240 26240 pktp->pkt_flags &= ~FLAG_NODISCON;
26241 26241 }
26242 26242
26243 26243 status = sd_ddi_scsi_poll(pktp);
26244 26244 /*
26245 26245 * Scsi_poll returns 0 (success) if the command completes and the
26246 26246 * status block is STATUS_GOOD. We should only check errors if this
26247 26247 * condition is not true. Even then we should send our own request
26248 26248 * sense packet only if we have a check condition and auto
26249 26249 * request sense has not been performed by the hba.
26250 26250 * Don't get RQS data if pkt_reason is CMD_DEV_GONE.
26251 26251 */
26252 26252 if ((status != SD_SUCCESS) &&
26253 26253 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) &&
26254 26254 (pktp->pkt_state & STATE_ARQ_DONE) == 0 &&
26255 26255 (pktp->pkt_reason != CMD_DEV_GONE))
26256 26256 (void) sd_send_polled_RQS(un);
26257 26257
26258 26258 return (status);
26259 26259 }
26260 26260
26261 26261 /*
26262 26262 * Function: sd_send_polled_RQS()
26263 26263 *
26264 26264 * Description: This sends the request sense command to a device.
26265 26265 *
26266 26266 * Arguments: sd_lun - The unit structure
26267 26267 *
26268 26268 * Return Code: 0 - Command completed successfully with good status
26269 26269 * -1 - Command failed.
26270 26270 *
26271 26271 */
26272 26272
26273 26273 static int
26274 26274 sd_send_polled_RQS(struct sd_lun *un)
26275 26275 {
26276 26276 int ret_val;
26277 26277 struct scsi_pkt *rqs_pktp;
26278 26278 struct buf *rqs_bp;
26279 26279
26280 26280 ASSERT(un != NULL);
26281 26281 ASSERT(!mutex_owned(SD_MUTEX(un)));
26282 26282
26283 26283 ret_val = SD_SUCCESS;
26284 26284
26285 26285 rqs_pktp = un->un_rqs_pktp;
26286 26286 rqs_bp = un->un_rqs_bp;
26287 26287
26288 26288 mutex_enter(SD_MUTEX(un));
26289 26289
26290 26290 if (un->un_sense_isbusy) {
26291 26291 ret_val = SD_FAILURE;
26292 26292 mutex_exit(SD_MUTEX(un));
26293 26293 return (ret_val);
26294 26294 }
26295 26295
26296 26296 /*
26297 26297 * If the request sense buffer (and packet) is not in use,
26298 26298 * let's set the un_sense_isbusy and send our packet
26299 26299 */
26300 26300 un->un_sense_isbusy = 1;
26301 26301 rqs_pktp->pkt_resid = 0;
26302 26302 rqs_pktp->pkt_reason = 0;
26303 26303 rqs_pktp->pkt_flags |= FLAG_NOINTR;
26304 26304 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH);
26305 26305
26306 26306 mutex_exit(SD_MUTEX(un));
26307 26307
26308 26308 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at"
26309 26309 " 0x%p\n", rqs_bp->b_un.b_addr);
26310 26310
26311 26311 /*
26312 26312 * Can't send this to sd_scsi_poll, we wrap ourselves around the
26313 26313 * axle - it has a call into us!
26314 26314 */
26315 26315 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) {
26316 26316 SD_INFO(SD_LOG_COMMON, un,
26317 26317 "sd_send_polled_RQS: RQS failed\n");
26318 26318 }
26319 26319
26320 26320 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:",
26321 26321 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX);
26322 26322
26323 26323 mutex_enter(SD_MUTEX(un));
26324 26324 un->un_sense_isbusy = 0;
26325 26325 mutex_exit(SD_MUTEX(un));
26326 26326
26327 26327 return (ret_val);
26328 26328 }
26329 26329
26330 26330 /*
26331 26331 * Defines needed for localized version of the scsi_poll routine.
26332 26332 */
26333 26333 #define CSEC 10000 /* usecs */
26334 26334 #define SEC_TO_CSEC (1000000/CSEC)
26335 26335
26336 26336 /*
26337 26337 * Function: sd_ddi_scsi_poll()
26338 26338 *
26339 26339 * Description: Localized version of the scsi_poll routine. The purpose is to
26340 26340 * send a scsi_pkt to a device as a polled command. This version
26341 26341 * is to ensure more robust handling of transport errors.
26342 26342 * Specifically this routine cures not ready, coming ready
26343 26343 * transition for power up and reset of sonoma's. This can take
26344 26344 * up to 45 seconds for power-on and 20 seconds for reset of a
26345 26345 * sonoma lun.
26346 26346 *
26347 26347 * Arguments: scsi_pkt - The scsi_pkt being sent to a device
26348 26348 *
26349 26349 * Return Code: 0 - Command completed successfully with good status
26350 26350 * -1 - Command failed.
26351 26351 *
26352 26352 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can
26353 26353 * be fixed (removing this code), we need to determine how to handle the
26354 26354 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump().
26355 26355 *
26356 26356 * NOTE: This code is only called off sddump().
26357 26357 */
26358 26358 static int
26359 26359 sd_ddi_scsi_poll(struct scsi_pkt *pkt)
26360 26360 {
26361 26361 int rval = -1;
26362 26362 int savef;
26363 26363 long savet;
26364 26364 void (*savec)();
26365 26365 int timeout;
26366 26366 int busy_count;
26367 26367 int poll_delay;
26368 26368 int rc;
26369 26369 uint8_t *sensep;
26370 26370 struct scsi_arq_status *arqstat;
26371 26371 extern int do_polled_io;
26372 26372
26373 26373 ASSERT(pkt->pkt_scbp);
26374 26374
26375 26375 /*
26376 26376 * save old flags..
26377 26377 */
26378 26378 savef = pkt->pkt_flags;
26379 26379 savec = pkt->pkt_comp;
26380 26380 savet = pkt->pkt_time;
26381 26381
26382 26382 pkt->pkt_flags |= FLAG_NOINTR;
26383 26383
26384 26384 /*
26385 26385 * XXX there is nothing in the SCSA spec that states that we should not
26386 26386 * do a callback for polled cmds; however, removing this will break sd
26387 26387 * and probably other target drivers
26388 26388 */
26389 26389 pkt->pkt_comp = NULL;
26390 26390
26391 26391 /*
26392 26392 * we don't like a polled command without timeout.
26393 26393 * 60 seconds seems long enough.
26394 26394 */
26395 26395 if (pkt->pkt_time == 0)
26396 26396 pkt->pkt_time = SCSI_POLL_TIMEOUT;
26397 26397
26398 26398 /*
26399 26399 * Send polled cmd.
26400 26400 *
26401 26401 * We do some error recovery for various errors. Tran_busy,
26402 26402 * queue full, and non-dispatched commands are retried every 10 msec.
26403 26403 * as they are typically transient failures. Busy status and Not
26404 26404 * Ready are retried every second as this status takes a while to
26405 26405 * change.
26406 26406 */
26407 26407 timeout = pkt->pkt_time * SEC_TO_CSEC;
26408 26408
26409 26409 for (busy_count = 0; busy_count < timeout; busy_count++) {
26410 26410 /*
26411 26411 * Initialize pkt status variables.
26412 26412 */
26413 26413 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0;
26414 26414
26415 26415 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) {
26416 26416 if (rc != TRAN_BUSY) {
26417 26417 /* Transport failed - give up. */
26418 26418 break;
26419 26419 } else {
26420 26420 /* Transport busy - try again. */
26421 26421 poll_delay = 1 * CSEC; /* 10 msec. */
26422 26422 }
26423 26423 } else {
26424 26424 /*
26425 26425 * Transport accepted - check pkt status.
26426 26426 */
26427 26427 rc = (*pkt->pkt_scbp) & STATUS_MASK;
26428 26428 if ((pkt->pkt_reason == CMD_CMPLT) &&
26429 26429 (rc == STATUS_CHECK) &&
26430 26430 (pkt->pkt_state & STATE_ARQ_DONE)) {
26431 26431 arqstat =
26432 26432 (struct scsi_arq_status *)(pkt->pkt_scbp);
26433 26433 sensep = (uint8_t *)&arqstat->sts_sensedata;
26434 26434 } else {
26435 26435 sensep = NULL;
26436 26436 }
26437 26437
26438 26438 if ((pkt->pkt_reason == CMD_CMPLT) &&
26439 26439 (rc == STATUS_GOOD)) {
26440 26440 /* No error - we're done */
26441 26441 rval = 0;
26442 26442 break;
26443 26443
26444 26444 } else if (pkt->pkt_reason == CMD_DEV_GONE) {
26445 26445 /* Lost connection - give up */
26446 26446 break;
26447 26447
26448 26448 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) &&
26449 26449 (pkt->pkt_state == 0)) {
26450 26450 /* Pkt not dispatched - try again. */
26451 26451 poll_delay = 1 * CSEC; /* 10 msec. */
26452 26452
26453 26453 } else if ((pkt->pkt_reason == CMD_CMPLT) &&
26454 26454 (rc == STATUS_QFULL)) {
26455 26455 /* Queue full - try again. */
26456 26456 poll_delay = 1 * CSEC; /* 10 msec. */
26457 26457
26458 26458 } else if ((pkt->pkt_reason == CMD_CMPLT) &&
26459 26459 (rc == STATUS_BUSY)) {
26460 26460 /* Busy - try again. */
26461 26461 poll_delay = 100 * CSEC; /* 1 sec. */
26462 26462 busy_count += (SEC_TO_CSEC - 1);
26463 26463
26464 26464 } else if ((sensep != NULL) &&
26465 26465 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) {
26466 26466 /*
26467 26467 * Unit Attention - try again.
26468 26468 * Pretend it took 1 sec.
26469 26469 * NOTE: 'continue' avoids poll_delay
26470 26470 */
26471 26471 busy_count += (SEC_TO_CSEC - 1);
26472 26472 continue;
26473 26473
26474 26474 } else if ((sensep != NULL) &&
26475 26475 (scsi_sense_key(sensep) == KEY_NOT_READY) &&
26476 26476 (scsi_sense_asc(sensep) == 0x04) &&
26477 26477 (scsi_sense_ascq(sensep) == 0x01)) {
26478 26478 /*
26479 26479 * Not ready -> ready - try again.
26480 26480 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY
26481 26481 * ...same as STATUS_BUSY
26482 26482 */
26483 26483 poll_delay = 100 * CSEC; /* 1 sec. */
26484 26484 busy_count += (SEC_TO_CSEC - 1);
26485 26485
26486 26486 } else {
26487 26487 /* BAD status - give up. */
26488 26488 break;
26489 26489 }
26490 26490 }
26491 26491
26492 26492 if (((curthread->t_flag & T_INTR_THREAD) == 0) &&
26493 26493 !do_polled_io) {
26494 26494 delay(drv_usectohz(poll_delay));
26495 26495 } else {
26496 26496 /* we busy wait during cpr_dump or interrupt threads */
26497 26497 drv_usecwait(poll_delay);
26498 26498 }
26499 26499 }
26500 26500
26501 26501 pkt->pkt_flags = savef;
26502 26502 pkt->pkt_comp = savec;
26503 26503 pkt->pkt_time = savet;
26504 26504
26505 26505 /* return on error */
26506 26506 if (rval)
26507 26507 return (rval);
26508 26508
26509 26509 /*
26510 26510 * This is not a performance critical code path.
26511 26511 *
26512 26512 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync()
26513 26513 * issues associated with looking at DMA memory prior to
26514 26514 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return.
26515 26515 */
26516 26516 scsi_sync_pkt(pkt);
26517 26517 return (0);
26518 26518 }
26519 26519
26520 26520
26521 26521
26522 26522 /*
26523 26523 * Function: sd_persistent_reservation_in_read_keys
26524 26524 *
26525 26525 * Description: This routine is the driver entry point for handling CD-ROM
26526 26526 * multi-host persistent reservation requests (MHIOCGRP_INKEYS)
26527 26527 * by sending the SCSI-3 PRIN commands to the device.
26528 26528 * Processes the read keys command response by copying the
26529 26529 * reservation key information into the user provided buffer.
26530 26530 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented.
26531 26531 *
26532 26532 * Arguments: un - Pointer to soft state struct for the target.
26533 26533 * usrp - user provided pointer to multihost Persistent In Read
26534 26534 * Keys structure (mhioc_inkeys_t)
26535 26535 * flag - this argument is a pass through to ddi_copyxxx()
26536 26536 * directly from the mode argument of ioctl().
26537 26537 *
26538 26538 * Return Code: 0 - Success
26539 26539 * EACCES
26540 26540 * ENOTSUP
26541 26541 * errno return code from sd_send_scsi_cmd()
26542 26542 *
26543 26543 * Context: Can sleep. Does not return until command is completed.
26544 26544 */
26545 26545
26546 26546 static int
26547 26547 sd_persistent_reservation_in_read_keys(struct sd_lun *un,
26548 26548 mhioc_inkeys_t *usrp, int flag)
26549 26549 {
26550 26550 #ifdef _MULTI_DATAMODEL
26551 26551 struct mhioc_key_list32 li32;
26552 26552 #endif
26553 26553 sd_prin_readkeys_t *in;
26554 26554 mhioc_inkeys_t *ptr;
26555 26555 mhioc_key_list_t li;
26556 26556 uchar_t *data_bufp;
26557 26557 int data_len;
26558 26558 int rval = 0;
26559 26559 size_t copysz;
26560 26560 sd_ssc_t *ssc;
26561 26561
26562 26562 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) {
26563 26563 return (EINVAL);
26564 26564 }
26565 26565 bzero(&li, sizeof (mhioc_key_list_t));
26566 26566
26567 26567 ssc = sd_ssc_init(un);
26568 26568
26569 26569 /*
26570 26570 * Get the listsize from user
26571 26571 */
26572 26572 #ifdef _MULTI_DATAMODEL
26573 26573
26574 26574 switch (ddi_model_convert_from(flag & FMODELS)) {
26575 26575 case DDI_MODEL_ILP32:
26576 26576 copysz = sizeof (struct mhioc_key_list32);
26577 26577 if (ddi_copyin(ptr->li, &li32, copysz, flag)) {
26578 26578 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26579 26579 "sd_persistent_reservation_in_read_keys: "
26580 26580 "failed ddi_copyin: mhioc_key_list32_t\n");
26581 26581 rval = EFAULT;
26582 26582 goto done;
26583 26583 }
26584 26584 li.listsize = li32.listsize;
26585 26585 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list;
26586 26586 break;
26587 26587
26588 26588 case DDI_MODEL_NONE:
26589 26589 copysz = sizeof (mhioc_key_list_t);
26590 26590 if (ddi_copyin(ptr->li, &li, copysz, flag)) {
26591 26591 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26592 26592 "sd_persistent_reservation_in_read_keys: "
26593 26593 "failed ddi_copyin: mhioc_key_list_t\n");
26594 26594 rval = EFAULT;
26595 26595 goto done;
26596 26596 }
26597 26597 break;
26598 26598 }
26599 26599
26600 26600 #else /* ! _MULTI_DATAMODEL */
26601 26601 copysz = sizeof (mhioc_key_list_t);
26602 26602 if (ddi_copyin(ptr->li, &li, copysz, flag)) {
26603 26603 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26604 26604 "sd_persistent_reservation_in_read_keys: "
26605 26605 "failed ddi_copyin: mhioc_key_list_t\n");
26606 26606 rval = EFAULT;
26607 26607 goto done;
26608 26608 }
26609 26609 #endif
26610 26610
26611 26611 data_len = li.listsize * MHIOC_RESV_KEY_SIZE;
26612 26612 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t));
26613 26613 data_bufp = kmem_zalloc(data_len, KM_SLEEP);
26614 26614
26615 26615 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS,
26616 26616 data_len, data_bufp);
26617 26617 if (rval != 0) {
26618 26618 if (rval == EIO)
26619 26619 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE);
26620 26620 else
26621 26621 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
26622 26622 goto done;
26623 26623 }
26624 26624 in = (sd_prin_readkeys_t *)data_bufp;
26625 26625 ptr->generation = BE_32(in->generation);
26626 26626 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE;
26627 26627
26628 26628 /*
26629 26629 * Return the min(listsize, listlen) keys
26630 26630 */
26631 26631 #ifdef _MULTI_DATAMODEL
26632 26632
26633 26633 switch (ddi_model_convert_from(flag & FMODELS)) {
26634 26634 case DDI_MODEL_ILP32:
26635 26635 li32.listlen = li.listlen;
26636 26636 if (ddi_copyout(&li32, ptr->li, copysz, flag)) {
26637 26637 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26638 26638 "sd_persistent_reservation_in_read_keys: "
26639 26639 "failed ddi_copyout: mhioc_key_list32_t\n");
26640 26640 rval = EFAULT;
26641 26641 goto done;
26642 26642 }
26643 26643 break;
26644 26644
26645 26645 case DDI_MODEL_NONE:
26646 26646 if (ddi_copyout(&li, ptr->li, copysz, flag)) {
26647 26647 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26648 26648 "sd_persistent_reservation_in_read_keys: "
26649 26649 "failed ddi_copyout: mhioc_key_list_t\n");
26650 26650 rval = EFAULT;
26651 26651 goto done;
26652 26652 }
26653 26653 break;
26654 26654 }
26655 26655
26656 26656 #else /* ! _MULTI_DATAMODEL */
26657 26657
26658 26658 if (ddi_copyout(&li, ptr->li, copysz, flag)) {
26659 26659 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26660 26660 "sd_persistent_reservation_in_read_keys: "
26661 26661 "failed ddi_copyout: mhioc_key_list_t\n");
26662 26662 rval = EFAULT;
26663 26663 goto done;
26664 26664 }
26665 26665
26666 26666 #endif /* _MULTI_DATAMODEL */
26667 26667
26668 26668 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE,
26669 26669 li.listsize * MHIOC_RESV_KEY_SIZE);
26670 26670 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) {
26671 26671 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26672 26672 "sd_persistent_reservation_in_read_keys: "
26673 26673 "failed ddi_copyout: keylist\n");
26674 26674 rval = EFAULT;
26675 26675 }
26676 26676 done:
26677 26677 sd_ssc_fini(ssc);
26678 26678 kmem_free(data_bufp, data_len);
26679 26679 return (rval);
26680 26680 }
26681 26681
26682 26682
26683 26683 /*
26684 26684 * Function: sd_persistent_reservation_in_read_resv
26685 26685 *
26686 26686 * Description: This routine is the driver entry point for handling CD-ROM
26687 26687 * multi-host persistent reservation requests (MHIOCGRP_INRESV)
26688 26688 * by sending the SCSI-3 PRIN commands to the device.
26689 26689 * Process the read persistent reservations command response by
26690 26690 * copying the reservation information into the user provided
26691 26691 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented.
26692 26692 *
26693 26693 * Arguments: un - Pointer to soft state struct for the target.
26694 26694 * usrp - user provided pointer to multihost Persistent In Read
26695 26695 * Keys structure (mhioc_inkeys_t)
26696 26696 * flag - this argument is a pass through to ddi_copyxxx()
26697 26697 * directly from the mode argument of ioctl().
26698 26698 *
26699 26699 * Return Code: 0 - Success
26700 26700 * EACCES
26701 26701 * ENOTSUP
26702 26702 * errno return code from sd_send_scsi_cmd()
26703 26703 *
26704 26704 * Context: Can sleep. Does not return until command is completed.
26705 26705 */
26706 26706
26707 26707 static int
26708 26708 sd_persistent_reservation_in_read_resv(struct sd_lun *un,
26709 26709 mhioc_inresvs_t *usrp, int flag)
26710 26710 {
26711 26711 #ifdef _MULTI_DATAMODEL
26712 26712 struct mhioc_resv_desc_list32 resvlist32;
26713 26713 #endif
26714 26714 sd_prin_readresv_t *in;
26715 26715 mhioc_inresvs_t *ptr;
26716 26716 sd_readresv_desc_t *readresv_ptr;
26717 26717 mhioc_resv_desc_list_t resvlist;
26718 26718 mhioc_resv_desc_t resvdesc;
26719 26719 uchar_t *data_bufp = NULL;
26720 26720 int data_len;
26721 26721 int rval = 0;
26722 26722 int i;
26723 26723 size_t copysz;
26724 26724 mhioc_resv_desc_t *bufp;
26725 26725 sd_ssc_t *ssc;
26726 26726
26727 26727 if ((ptr = usrp) == NULL) {
26728 26728 return (EINVAL);
26729 26729 }
26730 26730
26731 26731 ssc = sd_ssc_init(un);
26732 26732
26733 26733 /*
26734 26734 * Get the listsize from user
26735 26735 */
26736 26736 #ifdef _MULTI_DATAMODEL
26737 26737 switch (ddi_model_convert_from(flag & FMODELS)) {
26738 26738 case DDI_MODEL_ILP32:
26739 26739 copysz = sizeof (struct mhioc_resv_desc_list32);
26740 26740 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) {
26741 26741 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26742 26742 "sd_persistent_reservation_in_read_resv: "
26743 26743 "failed ddi_copyin: mhioc_resv_desc_list_t\n");
26744 26744 rval = EFAULT;
26745 26745 goto done;
26746 26746 }
26747 26747 resvlist.listsize = resvlist32.listsize;
26748 26748 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list;
26749 26749 break;
26750 26750
26751 26751 case DDI_MODEL_NONE:
26752 26752 copysz = sizeof (mhioc_resv_desc_list_t);
26753 26753 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) {
26754 26754 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26755 26755 "sd_persistent_reservation_in_read_resv: "
26756 26756 "failed ddi_copyin: mhioc_resv_desc_list_t\n");
26757 26757 rval = EFAULT;
26758 26758 goto done;
26759 26759 }
26760 26760 break;
26761 26761 }
26762 26762 #else /* ! _MULTI_DATAMODEL */
26763 26763 copysz = sizeof (mhioc_resv_desc_list_t);
26764 26764 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) {
26765 26765 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26766 26766 "sd_persistent_reservation_in_read_resv: "
26767 26767 "failed ddi_copyin: mhioc_resv_desc_list_t\n");
26768 26768 rval = EFAULT;
26769 26769 goto done;
26770 26770 }
26771 26771 #endif /* ! _MULTI_DATAMODEL */
26772 26772
26773 26773 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN;
26774 26774 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t));
26775 26775 data_bufp = kmem_zalloc(data_len, KM_SLEEP);
26776 26776
26777 26777 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV,
26778 26778 data_len, data_bufp);
26779 26779 if (rval != 0) {
26780 26780 if (rval == EIO)
26781 26781 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE);
26782 26782 else
26783 26783 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
26784 26784 goto done;
26785 26785 }
26786 26786 in = (sd_prin_readresv_t *)data_bufp;
26787 26787 ptr->generation = BE_32(in->generation);
26788 26788 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN;
26789 26789
26790 26790 /*
26791 26791 * Return the min(listsize, listlen( keys
26792 26792 */
26793 26793 #ifdef _MULTI_DATAMODEL
26794 26794
26795 26795 switch (ddi_model_convert_from(flag & FMODELS)) {
26796 26796 case DDI_MODEL_ILP32:
26797 26797 resvlist32.listlen = resvlist.listlen;
26798 26798 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) {
26799 26799 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26800 26800 "sd_persistent_reservation_in_read_resv: "
26801 26801 "failed ddi_copyout: mhioc_resv_desc_list_t\n");
26802 26802 rval = EFAULT;
26803 26803 goto done;
26804 26804 }
26805 26805 break;
26806 26806
26807 26807 case DDI_MODEL_NONE:
26808 26808 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) {
26809 26809 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26810 26810 "sd_persistent_reservation_in_read_resv: "
26811 26811 "failed ddi_copyout: mhioc_resv_desc_list_t\n");
26812 26812 rval = EFAULT;
26813 26813 goto done;
26814 26814 }
26815 26815 break;
26816 26816 }
26817 26817
26818 26818 #else /* ! _MULTI_DATAMODEL */
26819 26819
26820 26820 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) {
26821 26821 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26822 26822 "sd_persistent_reservation_in_read_resv: "
26823 26823 "failed ddi_copyout: mhioc_resv_desc_list_t\n");
26824 26824 rval = EFAULT;
26825 26825 goto done;
26826 26826 }
26827 26827
26828 26828 #endif /* ! _MULTI_DATAMODEL */
26829 26829
26830 26830 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc;
26831 26831 bufp = resvlist.list;
26832 26832 copysz = sizeof (mhioc_resv_desc_t);
26833 26833 for (i = 0; i < min(resvlist.listlen, resvlist.listsize);
26834 26834 i++, readresv_ptr++, bufp++) {
26835 26835
26836 26836 bcopy(&readresv_ptr->resvkey, &resvdesc.key,
26837 26837 MHIOC_RESV_KEY_SIZE);
26838 26838 resvdesc.type = readresv_ptr->type;
26839 26839 resvdesc.scope = readresv_ptr->scope;
26840 26840 resvdesc.scope_specific_addr =
26841 26841 BE_32(readresv_ptr->scope_specific_addr);
26842 26842
26843 26843 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) {
26844 26844 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26845 26845 "sd_persistent_reservation_in_read_resv: "
26846 26846 "failed ddi_copyout: resvlist\n");
26847 26847 rval = EFAULT;
26848 26848 goto done;
26849 26849 }
26850 26850 }
26851 26851 done:
26852 26852 sd_ssc_fini(ssc);
26853 26853 /* only if data_bufp is allocated, we need to free it */
26854 26854 if (data_bufp) {
26855 26855 kmem_free(data_bufp, data_len);
26856 26856 }
26857 26857 return (rval);
26858 26858 }
26859 26859
26860 26860
26861 26861 /*
26862 26862 * Function: sr_change_blkmode()
26863 26863 *
26864 26864 * Description: This routine is the driver entry point for handling CD-ROM
26865 26865 * block mode ioctl requests. Support for returning and changing
26866 26866 * the current block size in use by the device is implemented. The
26867 26867 * LBA size is changed via a MODE SELECT Block Descriptor.
26868 26868 *
26869 26869 * This routine issues a mode sense with an allocation length of
26870 26870 * 12 bytes for the mode page header and a single block descriptor.
26871 26871 *
26872 26872 * Arguments: dev - the device 'dev_t'
26873 26873 * cmd - the request type; one of CDROMGBLKMODE (get) or
26874 26874 * CDROMSBLKMODE (set)
26875 26875 * data - current block size or requested block size
26876 26876 * flag - this argument is a pass through to ddi_copyxxx() directly
26877 26877 * from the mode argument of ioctl().
26878 26878 *
26879 26879 * Return Code: the code returned by sd_send_scsi_cmd()
26880 26880 * EINVAL if invalid arguments are provided
26881 26881 * EFAULT if ddi_copyxxx() fails
26882 26882 * ENXIO if fail ddi_get_soft_state
26883 26883 * EIO if invalid mode sense block descriptor length
26884 26884 *
26885 26885 */
26886 26886
26887 26887 static int
26888 26888 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag)
26889 26889 {
26890 26890 struct sd_lun *un = NULL;
26891 26891 struct mode_header *sense_mhp, *select_mhp;
26892 26892 struct block_descriptor *sense_desc, *select_desc;
26893 26893 int current_bsize;
26894 26894 int rval = EINVAL;
26895 26895 uchar_t *sense = NULL;
26896 26896 uchar_t *select = NULL;
26897 26897 sd_ssc_t *ssc;
26898 26898
26899 26899 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE));
26900 26900
26901 26901 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
26902 26902 return (ENXIO);
26903 26903 }
26904 26904
26905 26905 /*
26906 26906 * The block length is changed via the Mode Select block descriptor, the
26907 26907 * "Read/Write Error Recovery" mode page (0x1) contents are not actually
26908 26908 * required as part of this routine. Therefore the mode sense allocation
26909 26909 * length is specified to be the length of a mode page header and a
26910 26910 * block descriptor.
26911 26911 */
26912 26912 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP);
26913 26913
26914 26914 ssc = sd_ssc_init(un);
26915 26915 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense,
26916 26916 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD);
26917 26917 sd_ssc_fini(ssc);
26918 26918 if (rval != 0) {
26919 26919 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26920 26920 "sr_change_blkmode: Mode Sense Failed\n");
26921 26921 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
26922 26922 return (rval);
26923 26923 }
26924 26924
26925 26925 /* Check the block descriptor len to handle only 1 block descriptor */
26926 26926 sense_mhp = (struct mode_header *)sense;
26927 26927 if ((sense_mhp->bdesc_length == 0) ||
26928 26928 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) {
26929 26929 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26930 26930 "sr_change_blkmode: Mode Sense returned invalid block"
26931 26931 " descriptor length\n");
26932 26932 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
26933 26933 return (EIO);
26934 26934 }
26935 26935 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH);
26936 26936 current_bsize = ((sense_desc->blksize_hi << 16) |
26937 26937 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo);
26938 26938
26939 26939 /* Process command */
26940 26940 switch (cmd) {
26941 26941 case CDROMGBLKMODE:
26942 26942 /* Return the block size obtained during the mode sense */
26943 26943 if (ddi_copyout(¤t_bsize, (void *)data,
26944 26944 sizeof (int), flag) != 0)
26945 26945 rval = EFAULT;
26946 26946 break;
26947 26947 case CDROMSBLKMODE:
26948 26948 /* Validate the requested block size */
26949 26949 switch (data) {
26950 26950 case CDROM_BLK_512:
26951 26951 case CDROM_BLK_1024:
26952 26952 case CDROM_BLK_2048:
26953 26953 case CDROM_BLK_2056:
26954 26954 case CDROM_BLK_2336:
26955 26955 case CDROM_BLK_2340:
26956 26956 case CDROM_BLK_2352:
26957 26957 case CDROM_BLK_2368:
26958 26958 case CDROM_BLK_2448:
26959 26959 case CDROM_BLK_2646:
26960 26960 case CDROM_BLK_2647:
26961 26961 break;
26962 26962 default:
26963 26963 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26964 26964 "sr_change_blkmode: "
26965 26965 "Block Size '%ld' Not Supported\n", data);
26966 26966 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
26967 26967 return (EINVAL);
26968 26968 }
26969 26969
26970 26970 /*
26971 26971 * The current block size matches the requested block size so
26972 26972 * there is no need to send the mode select to change the size
26973 26973 */
26974 26974 if (current_bsize == data) {
26975 26975 break;
26976 26976 }
26977 26977
26978 26978 /* Build the select data for the requested block size */
26979 26979 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP);
26980 26980 select_mhp = (struct mode_header *)select;
26981 26981 select_desc =
26982 26982 (struct block_descriptor *)(select + MODE_HEADER_LENGTH);
26983 26983 /*
26984 26984 * The LBA size is changed via the block descriptor, so the
26985 26985 * descriptor is built according to the user data
26986 26986 */
26987 26987 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH;
26988 26988 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16);
26989 26989 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8);
26990 26990 select_desc->blksize_lo = (char)((data) & 0x000000ff);
26991 26991
26992 26992 /* Send the mode select for the requested block size */
26993 26993 ssc = sd_ssc_init(un);
26994 26994 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0,
26995 26995 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE,
26996 26996 SD_PATH_STANDARD);
26997 26997 sd_ssc_fini(ssc);
26998 26998 if (rval != 0) {
26999 26999 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27000 27000 "sr_change_blkmode: Mode Select Failed\n");
27001 27001 /*
27002 27002 * The mode select failed for the requested block size,
27003 27003 * so reset the data for the original block size and
27004 27004 * send it to the target. The error is indicated by the
27005 27005 * return value for the failed mode select.
27006 27006 */
27007 27007 select_desc->blksize_hi = sense_desc->blksize_hi;
27008 27008 select_desc->blksize_mid = sense_desc->blksize_mid;
27009 27009 select_desc->blksize_lo = sense_desc->blksize_lo;
27010 27010 ssc = sd_ssc_init(un);
27011 27011 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0,
27012 27012 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE,
27013 27013 SD_PATH_STANDARD);
27014 27014 sd_ssc_fini(ssc);
27015 27015 } else {
27016 27016 ASSERT(!mutex_owned(SD_MUTEX(un)));
27017 27017 mutex_enter(SD_MUTEX(un));
27018 27018 sd_update_block_info(un, (uint32_t)data, 0);
27019 27019 mutex_exit(SD_MUTEX(un));
27020 27020 }
27021 27021 break;
27022 27022 default:
27023 27023 /* should not reach here, but check anyway */
27024 27024 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27025 27025 "sr_change_blkmode: Command '%x' Not Supported\n", cmd);
27026 27026 rval = EINVAL;
27027 27027 break;
27028 27028 }
27029 27029
27030 27030 if (select) {
27031 27031 kmem_free(select, BUFLEN_CHG_BLK_MODE);
27032 27032 }
27033 27033 if (sense) {
27034 27034 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
27035 27035 }
27036 27036 return (rval);
27037 27037 }
27038 27038
27039 27039
27040 27040 /*
27041 27041 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines
27042 27042 * implement driver support for getting and setting the CD speed. The command
27043 27043 * set used will be based on the device type. If the device has not been
27044 27044 * identified as MMC the Toshiba vendor specific mode page will be used. If
27045 27045 * the device is MMC but does not support the Real Time Streaming feature
27046 27046 * the SET CD SPEED command will be used to set speed and mode page 0x2A will
27047 27047 * be used to read the speed.
27048 27048 */
27049 27049
27050 27050 /*
27051 27051 * Function: sr_change_speed()
27052 27052 *
27053 27053 * Description: This routine is the driver entry point for handling CD-ROM
27054 27054 * drive speed ioctl requests for devices supporting the Toshiba
27055 27055 * vendor specific drive speed mode page. Support for returning
27056 27056 * and changing the current drive speed in use by the device is
27057 27057 * implemented.
27058 27058 *
27059 27059 * Arguments: dev - the device 'dev_t'
27060 27060 * cmd - the request type; one of CDROMGDRVSPEED (get) or
27061 27061 * CDROMSDRVSPEED (set)
27062 27062 * data - current drive speed or requested drive speed
27063 27063 * flag - this argument is a pass through to ddi_copyxxx() directly
27064 27064 * from the mode argument of ioctl().
27065 27065 *
27066 27066 * Return Code: the code returned by sd_send_scsi_cmd()
27067 27067 * EINVAL if invalid arguments are provided
27068 27068 * EFAULT if ddi_copyxxx() fails
27069 27069 * ENXIO if fail ddi_get_soft_state
27070 27070 * EIO if invalid mode sense block descriptor length
27071 27071 */
27072 27072
27073 27073 static int
27074 27074 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag)
27075 27075 {
27076 27076 struct sd_lun *un = NULL;
27077 27077 struct mode_header *sense_mhp, *select_mhp;
27078 27078 struct mode_speed *sense_page, *select_page;
27079 27079 int current_speed;
27080 27080 int rval = EINVAL;
27081 27081 int bd_len;
27082 27082 uchar_t *sense = NULL;
27083 27083 uchar_t *select = NULL;
27084 27084 sd_ssc_t *ssc;
27085 27085
27086 27086 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED));
27087 27087 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27088 27088 return (ENXIO);
27089 27089 }
27090 27090
27091 27091 /*
27092 27092 * Note: The drive speed is being modified here according to a Toshiba
27093 27093 * vendor specific mode page (0x31).
27094 27094 */
27095 27095 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP);
27096 27096
27097 27097 ssc = sd_ssc_init(un);
27098 27098 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense,
27099 27099 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED,
27100 27100 SD_PATH_STANDARD);
27101 27101 sd_ssc_fini(ssc);
27102 27102 if (rval != 0) {
27103 27103 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27104 27104 "sr_change_speed: Mode Sense Failed\n");
27105 27105 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27106 27106 return (rval);
27107 27107 }
27108 27108 sense_mhp = (struct mode_header *)sense;
27109 27109
27110 27110 /* Check the block descriptor len to handle only 1 block descriptor */
27111 27111 bd_len = sense_mhp->bdesc_length;
27112 27112 if (bd_len > MODE_BLK_DESC_LENGTH) {
27113 27113 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27114 27114 "sr_change_speed: Mode Sense returned invalid block "
27115 27115 "descriptor length\n");
27116 27116 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27117 27117 return (EIO);
27118 27118 }
27119 27119
27120 27120 sense_page = (struct mode_speed *)
27121 27121 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length);
27122 27122 current_speed = sense_page->speed;
27123 27123
27124 27124 /* Process command */
27125 27125 switch (cmd) {
27126 27126 case CDROMGDRVSPEED:
27127 27127 /* Return the drive speed obtained during the mode sense */
27128 27128 if (current_speed == 0x2) {
27129 27129 current_speed = CDROM_TWELVE_SPEED;
27130 27130 }
27131 27131 if (ddi_copyout(¤t_speed, (void *)data,
27132 27132 sizeof (int), flag) != 0) {
27133 27133 rval = EFAULT;
27134 27134 }
27135 27135 break;
27136 27136 case CDROMSDRVSPEED:
27137 27137 /* Validate the requested drive speed */
27138 27138 switch ((uchar_t)data) {
27139 27139 case CDROM_TWELVE_SPEED:
27140 27140 data = 0x2;
27141 27141 /*FALLTHROUGH*/
27142 27142 case CDROM_NORMAL_SPEED:
27143 27143 case CDROM_DOUBLE_SPEED:
27144 27144 case CDROM_QUAD_SPEED:
27145 27145 case CDROM_MAXIMUM_SPEED:
27146 27146 break;
27147 27147 default:
27148 27148 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27149 27149 "sr_change_speed: "
27150 27150 "Drive Speed '%d' Not Supported\n", (uchar_t)data);
27151 27151 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27152 27152 return (EINVAL);
27153 27153 }
27154 27154
27155 27155 /*
27156 27156 * The current drive speed matches the requested drive speed so
27157 27157 * there is no need to send the mode select to change the speed
27158 27158 */
27159 27159 if (current_speed == data) {
27160 27160 break;
27161 27161 }
27162 27162
27163 27163 /* Build the select data for the requested drive speed */
27164 27164 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP);
27165 27165 select_mhp = (struct mode_header *)select;
27166 27166 select_mhp->bdesc_length = 0;
27167 27167 select_page =
27168 27168 (struct mode_speed *)(select + MODE_HEADER_LENGTH);
27169 27169 select_page =
27170 27170 (struct mode_speed *)(select + MODE_HEADER_LENGTH);
27171 27171 select_page->mode_page.code = CDROM_MODE_SPEED;
27172 27172 select_page->mode_page.length = 2;
27173 27173 select_page->speed = (uchar_t)data;
27174 27174
27175 27175 /* Send the mode select for the requested block size */
27176 27176 ssc = sd_ssc_init(un);
27177 27177 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select,
27178 27178 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH,
27179 27179 SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
27180 27180 sd_ssc_fini(ssc);
27181 27181 if (rval != 0) {
27182 27182 /*
27183 27183 * The mode select failed for the requested drive speed,
27184 27184 * so reset the data for the original drive speed and
27185 27185 * send it to the target. The error is indicated by the
27186 27186 * return value for the failed mode select.
27187 27187 */
27188 27188 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27189 27189 "sr_drive_speed: Mode Select Failed\n");
27190 27190 select_page->speed = sense_page->speed;
27191 27191 ssc = sd_ssc_init(un);
27192 27192 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select,
27193 27193 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH,
27194 27194 SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
27195 27195 sd_ssc_fini(ssc);
27196 27196 }
27197 27197 break;
27198 27198 default:
27199 27199 /* should not reach here, but check anyway */
27200 27200 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27201 27201 "sr_change_speed: Command '%x' Not Supported\n", cmd);
27202 27202 rval = EINVAL;
27203 27203 break;
27204 27204 }
27205 27205
27206 27206 if (select) {
27207 27207 kmem_free(select, BUFLEN_MODE_CDROM_SPEED);
27208 27208 }
27209 27209 if (sense) {
27210 27210 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27211 27211 }
27212 27212
27213 27213 return (rval);
27214 27214 }
27215 27215
27216 27216
27217 27217 /*
27218 27218 * Function: sr_atapi_change_speed()
27219 27219 *
27220 27220 * Description: This routine is the driver entry point for handling CD-ROM
27221 27221 * drive speed ioctl requests for MMC devices that do not support
27222 27222 * the Real Time Streaming feature (0x107).
27223 27223 *
27224 27224 * Note: This routine will use the SET SPEED command which may not
27225 27225 * be supported by all devices.
27226 27226 *
27227 27227 * Arguments: dev- the device 'dev_t'
27228 27228 * cmd- the request type; one of CDROMGDRVSPEED (get) or
27229 27229 * CDROMSDRVSPEED (set)
27230 27230 * data- current drive speed or requested drive speed
27231 27231 * flag- this argument is a pass through to ddi_copyxxx() directly
27232 27232 * from the mode argument of ioctl().
27233 27233 *
27234 27234 * Return Code: the code returned by sd_send_scsi_cmd()
27235 27235 * EINVAL if invalid arguments are provided
27236 27236 * EFAULT if ddi_copyxxx() fails
27237 27237 * ENXIO if fail ddi_get_soft_state
27238 27238 * EIO if invalid mode sense block descriptor length
27239 27239 */
27240 27240
27241 27241 static int
27242 27242 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag)
27243 27243 {
27244 27244 struct sd_lun *un;
27245 27245 struct uscsi_cmd *com = NULL;
27246 27246 struct mode_header_grp2 *sense_mhp;
27247 27247 uchar_t *sense_page;
27248 27248 uchar_t *sense = NULL;
27249 27249 char cdb[CDB_GROUP5];
27250 27250 int bd_len;
27251 27251 int current_speed = 0;
27252 27252 int max_speed = 0;
27253 27253 int rval;
27254 27254 sd_ssc_t *ssc;
27255 27255
27256 27256 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED));
27257 27257
27258 27258 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27259 27259 return (ENXIO);
27260 27260 }
27261 27261
27262 27262 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP);
27263 27263
27264 27264 ssc = sd_ssc_init(un);
27265 27265 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense,
27266 27266 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP,
27267 27267 SD_PATH_STANDARD);
27268 27268 sd_ssc_fini(ssc);
27269 27269 if (rval != 0) {
27270 27270 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27271 27271 "sr_atapi_change_speed: Mode Sense Failed\n");
27272 27272 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27273 27273 return (rval);
27274 27274 }
27275 27275
27276 27276 /* Check the block descriptor len to handle only 1 block descriptor */
27277 27277 sense_mhp = (struct mode_header_grp2 *)sense;
27278 27278 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo;
27279 27279 if (bd_len > MODE_BLK_DESC_LENGTH) {
27280 27280 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27281 27281 "sr_atapi_change_speed: Mode Sense returned invalid "
27282 27282 "block descriptor length\n");
27283 27283 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27284 27284 return (EIO);
27285 27285 }
27286 27286
27287 27287 /* Calculate the current and maximum drive speeds */
27288 27288 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len);
27289 27289 current_speed = (sense_page[14] << 8) | sense_page[15];
27290 27290 max_speed = (sense_page[8] << 8) | sense_page[9];
27291 27291
27292 27292 /* Process the command */
27293 27293 switch (cmd) {
27294 27294 case CDROMGDRVSPEED:
27295 27295 current_speed /= SD_SPEED_1X;
27296 27296 if (ddi_copyout(¤t_speed, (void *)data,
27297 27297 sizeof (int), flag) != 0)
27298 27298 rval = EFAULT;
27299 27299 break;
27300 27300 case CDROMSDRVSPEED:
27301 27301 /* Convert the speed code to KB/sec */
27302 27302 switch ((uchar_t)data) {
27303 27303 case CDROM_NORMAL_SPEED:
27304 27304 current_speed = SD_SPEED_1X;
27305 27305 break;
27306 27306 case CDROM_DOUBLE_SPEED:
27307 27307 current_speed = 2 * SD_SPEED_1X;
27308 27308 break;
27309 27309 case CDROM_QUAD_SPEED:
27310 27310 current_speed = 4 * SD_SPEED_1X;
27311 27311 break;
27312 27312 case CDROM_TWELVE_SPEED:
27313 27313 current_speed = 12 * SD_SPEED_1X;
27314 27314 break;
27315 27315 case CDROM_MAXIMUM_SPEED:
27316 27316 current_speed = 0xffff;
27317 27317 break;
27318 27318 default:
27319 27319 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27320 27320 "sr_atapi_change_speed: invalid drive speed %d\n",
27321 27321 (uchar_t)data);
27322 27322 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27323 27323 return (EINVAL);
27324 27324 }
27325 27325
27326 27326 /* Check the request against the drive's max speed. */
27327 27327 if (current_speed != 0xffff) {
27328 27328 if (current_speed > max_speed) {
27329 27329 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27330 27330 return (EINVAL);
27331 27331 }
27332 27332 }
27333 27333
27334 27334 /*
27335 27335 * Build and send the SET SPEED command
27336 27336 *
27337 27337 * Note: The SET SPEED (0xBB) command used in this routine is
27338 27338 * obsolete per the SCSI MMC spec but still supported in the
27339 27339 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI
27340 27340 * therefore the command is still implemented in this routine.
27341 27341 */
27342 27342 bzero(cdb, sizeof (cdb));
27343 27343 cdb[0] = (char)SCMD_SET_CDROM_SPEED;
27344 27344 cdb[2] = (uchar_t)(current_speed >> 8);
27345 27345 cdb[3] = (uchar_t)current_speed;
27346 27346 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27347 27347 com->uscsi_cdb = (caddr_t)cdb;
27348 27348 com->uscsi_cdblen = CDB_GROUP5;
27349 27349 com->uscsi_bufaddr = NULL;
27350 27350 com->uscsi_buflen = 0;
27351 27351 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27352 27352 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD);
27353 27353 break;
27354 27354 default:
27355 27355 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27356 27356 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd);
27357 27357 rval = EINVAL;
27358 27358 }
27359 27359
27360 27360 if (sense) {
27361 27361 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27362 27362 }
27363 27363 if (com) {
27364 27364 kmem_free(com, sizeof (*com));
27365 27365 }
27366 27366 return (rval);
27367 27367 }
27368 27368
27369 27369
27370 27370 /*
27371 27371 * Function: sr_pause_resume()
27372 27372 *
27373 27373 * Description: This routine is the driver entry point for handling CD-ROM
27374 27374 * pause/resume ioctl requests. This only affects the audio play
27375 27375 * operation.
27376 27376 *
27377 27377 * Arguments: dev - the device 'dev_t'
27378 27378 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used
27379 27379 * for setting the resume bit of the cdb.
27380 27380 *
27381 27381 * Return Code: the code returned by sd_send_scsi_cmd()
27382 27382 * EINVAL if invalid mode specified
27383 27383 *
27384 27384 */
27385 27385
27386 27386 static int
27387 27387 sr_pause_resume(dev_t dev, int cmd)
27388 27388 {
27389 27389 struct sd_lun *un;
27390 27390 struct uscsi_cmd *com;
27391 27391 char cdb[CDB_GROUP1];
27392 27392 int rval;
27393 27393
27394 27394 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27395 27395 return (ENXIO);
27396 27396 }
27397 27397
27398 27398 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27399 27399 bzero(cdb, CDB_GROUP1);
27400 27400 cdb[0] = SCMD_PAUSE_RESUME;
27401 27401 switch (cmd) {
27402 27402 case CDROMRESUME:
27403 27403 cdb[8] = 1;
27404 27404 break;
27405 27405 case CDROMPAUSE:
27406 27406 cdb[8] = 0;
27407 27407 break;
27408 27408 default:
27409 27409 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:"
27410 27410 " Command '%x' Not Supported\n", cmd);
27411 27411 rval = EINVAL;
27412 27412 goto done;
27413 27413 }
27414 27414
27415 27415 com->uscsi_cdb = cdb;
27416 27416 com->uscsi_cdblen = CDB_GROUP1;
27417 27417 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27418 27418
27419 27419 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27420 27420 SD_PATH_STANDARD);
27421 27421
27422 27422 done:
27423 27423 kmem_free(com, sizeof (*com));
27424 27424 return (rval);
27425 27425 }
27426 27426
27427 27427
27428 27428 /*
27429 27429 * Function: sr_play_msf()
27430 27430 *
27431 27431 * Description: This routine is the driver entry point for handling CD-ROM
27432 27432 * ioctl requests to output the audio signals at the specified
27433 27433 * starting address and continue the audio play until the specified
27434 27434 * ending address (CDROMPLAYMSF) The address is in Minute Second
27435 27435 * Frame (MSF) format.
27436 27436 *
27437 27437 * Arguments: dev - the device 'dev_t'
27438 27438 * data - pointer to user provided audio msf structure,
27439 27439 * specifying start/end addresses.
27440 27440 * flag - this argument is a pass through to ddi_copyxxx()
27441 27441 * directly from the mode argument of ioctl().
27442 27442 *
27443 27443 * Return Code: the code returned by sd_send_scsi_cmd()
27444 27444 * EFAULT if ddi_copyxxx() fails
27445 27445 * ENXIO if fail ddi_get_soft_state
27446 27446 * EINVAL if data pointer is NULL
27447 27447 */
27448 27448
27449 27449 static int
27450 27450 sr_play_msf(dev_t dev, caddr_t data, int flag)
27451 27451 {
27452 27452 struct sd_lun *un;
27453 27453 struct uscsi_cmd *com;
27454 27454 struct cdrom_msf msf_struct;
27455 27455 struct cdrom_msf *msf = &msf_struct;
27456 27456 char cdb[CDB_GROUP1];
27457 27457 int rval;
27458 27458
27459 27459 if (data == NULL) {
27460 27460 return (EINVAL);
27461 27461 }
27462 27462
27463 27463 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27464 27464 return (ENXIO);
27465 27465 }
27466 27466
27467 27467 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) {
27468 27468 return (EFAULT);
27469 27469 }
27470 27470
27471 27471 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27472 27472 bzero(cdb, CDB_GROUP1);
27473 27473 cdb[0] = SCMD_PLAYAUDIO_MSF;
27474 27474 if (un->un_f_cfg_playmsf_bcd == TRUE) {
27475 27475 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0);
27476 27476 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0);
27477 27477 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0);
27478 27478 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1);
27479 27479 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1);
27480 27480 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1);
27481 27481 } else {
27482 27482 cdb[3] = msf->cdmsf_min0;
27483 27483 cdb[4] = msf->cdmsf_sec0;
27484 27484 cdb[5] = msf->cdmsf_frame0;
27485 27485 cdb[6] = msf->cdmsf_min1;
27486 27486 cdb[7] = msf->cdmsf_sec1;
27487 27487 cdb[8] = msf->cdmsf_frame1;
27488 27488 }
27489 27489 com->uscsi_cdb = cdb;
27490 27490 com->uscsi_cdblen = CDB_GROUP1;
27491 27491 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27492 27492 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27493 27493 SD_PATH_STANDARD);
27494 27494 kmem_free(com, sizeof (*com));
27495 27495 return (rval);
27496 27496 }
27497 27497
27498 27498
27499 27499 /*
27500 27500 * Function: sr_play_trkind()
27501 27501 *
27502 27502 * Description: This routine is the driver entry point for handling CD-ROM
27503 27503 * ioctl requests to output the audio signals at the specified
27504 27504 * starting address and continue the audio play until the specified
27505 27505 * ending address (CDROMPLAYTRKIND). The address is in Track Index
27506 27506 * format.
27507 27507 *
27508 27508 * Arguments: dev - the device 'dev_t'
27509 27509 * data - pointer to user provided audio track/index structure,
27510 27510 * specifying start/end addresses.
27511 27511 * flag - this argument is a pass through to ddi_copyxxx()
27512 27512 * directly from the mode argument of ioctl().
27513 27513 *
27514 27514 * Return Code: the code returned by sd_send_scsi_cmd()
27515 27515 * EFAULT if ddi_copyxxx() fails
27516 27516 * ENXIO if fail ddi_get_soft_state
27517 27517 * EINVAL if data pointer is NULL
27518 27518 */
27519 27519
27520 27520 static int
27521 27521 sr_play_trkind(dev_t dev, caddr_t data, int flag)
27522 27522 {
27523 27523 struct cdrom_ti ti_struct;
27524 27524 struct cdrom_ti *ti = &ti_struct;
27525 27525 struct uscsi_cmd *com = NULL;
27526 27526 char cdb[CDB_GROUP1];
27527 27527 int rval;
27528 27528
27529 27529 if (data == NULL) {
27530 27530 return (EINVAL);
27531 27531 }
27532 27532
27533 27533 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) {
27534 27534 return (EFAULT);
27535 27535 }
27536 27536
27537 27537 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27538 27538 bzero(cdb, CDB_GROUP1);
27539 27539 cdb[0] = SCMD_PLAYAUDIO_TI;
27540 27540 cdb[4] = ti->cdti_trk0;
27541 27541 cdb[5] = ti->cdti_ind0;
27542 27542 cdb[7] = ti->cdti_trk1;
27543 27543 cdb[8] = ti->cdti_ind1;
27544 27544 com->uscsi_cdb = cdb;
27545 27545 com->uscsi_cdblen = CDB_GROUP1;
27546 27546 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27547 27547 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27548 27548 SD_PATH_STANDARD);
27549 27549 kmem_free(com, sizeof (*com));
27550 27550 return (rval);
27551 27551 }
27552 27552
27553 27553
27554 27554 /*
27555 27555 * Function: sr_read_all_subcodes()
27556 27556 *
27557 27557 * Description: This routine is the driver entry point for handling CD-ROM
27558 27558 * ioctl requests to return raw subcode data while the target is
27559 27559 * playing audio (CDROMSUBCODE).
27560 27560 *
27561 27561 * Arguments: dev - the device 'dev_t'
27562 27562 * data - pointer to user provided cdrom subcode structure,
27563 27563 * specifying the transfer length and address.
27564 27564 * flag - this argument is a pass through to ddi_copyxxx()
27565 27565 * directly from the mode argument of ioctl().
27566 27566 *
27567 27567 * Return Code: the code returned by sd_send_scsi_cmd()
27568 27568 * EFAULT if ddi_copyxxx() fails
27569 27569 * ENXIO if fail ddi_get_soft_state
27570 27570 * EINVAL if data pointer is NULL
27571 27571 */
27572 27572
27573 27573 static int
27574 27574 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag)
27575 27575 {
27576 27576 struct sd_lun *un = NULL;
27577 27577 struct uscsi_cmd *com = NULL;
27578 27578 struct cdrom_subcode *subcode = NULL;
27579 27579 int rval;
27580 27580 size_t buflen;
27581 27581 char cdb[CDB_GROUP5];
27582 27582
27583 27583 #ifdef _MULTI_DATAMODEL
27584 27584 /* To support ILP32 applications in an LP64 world */
27585 27585 struct cdrom_subcode32 cdrom_subcode32;
27586 27586 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32;
27587 27587 #endif
27588 27588 if (data == NULL) {
27589 27589 return (EINVAL);
27590 27590 }
27591 27591
27592 27592 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27593 27593 return (ENXIO);
27594 27594 }
27595 27595
27596 27596 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP);
27597 27597
27598 27598 #ifdef _MULTI_DATAMODEL
27599 27599 switch (ddi_model_convert_from(flag & FMODELS)) {
27600 27600 case DDI_MODEL_ILP32:
27601 27601 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) {
27602 27602 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27603 27603 "sr_read_all_subcodes: ddi_copyin Failed\n");
27604 27604 kmem_free(subcode, sizeof (struct cdrom_subcode));
27605 27605 return (EFAULT);
27606 27606 }
27607 27607 /* Convert the ILP32 uscsi data from the application to LP64 */
27608 27608 cdrom_subcode32tocdrom_subcode(cdsc32, subcode);
27609 27609 break;
27610 27610 case DDI_MODEL_NONE:
27611 27611 if (ddi_copyin(data, subcode,
27612 27612 sizeof (struct cdrom_subcode), flag)) {
27613 27613 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27614 27614 "sr_read_all_subcodes: ddi_copyin Failed\n");
27615 27615 kmem_free(subcode, sizeof (struct cdrom_subcode));
27616 27616 return (EFAULT);
27617 27617 }
27618 27618 break;
27619 27619 }
27620 27620 #else /* ! _MULTI_DATAMODEL */
27621 27621 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) {
27622 27622 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27623 27623 "sr_read_all_subcodes: ddi_copyin Failed\n");
27624 27624 kmem_free(subcode, sizeof (struct cdrom_subcode));
27625 27625 return (EFAULT);
27626 27626 }
27627 27627 #endif /* _MULTI_DATAMODEL */
27628 27628
27629 27629 /*
27630 27630 * Since MMC-2 expects max 3 bytes for length, check if the
27631 27631 * length input is greater than 3 bytes
27632 27632 */
27633 27633 if ((subcode->cdsc_length & 0xFF000000) != 0) {
27634 27634 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27635 27635 "sr_read_all_subcodes: "
27636 27636 "cdrom transfer length too large: %d (limit %d)\n",
27637 27637 subcode->cdsc_length, 0xFFFFFF);
27638 27638 kmem_free(subcode, sizeof (struct cdrom_subcode));
27639 27639 return (EINVAL);
27640 27640 }
27641 27641
27642 27642 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length;
27643 27643 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27644 27644 bzero(cdb, CDB_GROUP5);
27645 27645
27646 27646 if (un->un_f_mmc_cap == TRUE) {
27647 27647 cdb[0] = (char)SCMD_READ_CD;
27648 27648 cdb[2] = (char)0xff;
27649 27649 cdb[3] = (char)0xff;
27650 27650 cdb[4] = (char)0xff;
27651 27651 cdb[5] = (char)0xff;
27652 27652 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16);
27653 27653 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8);
27654 27654 cdb[8] = ((subcode->cdsc_length) & 0x000000ff);
27655 27655 cdb[10] = 1;
27656 27656 } else {
27657 27657 /*
27658 27658 * Note: A vendor specific command (0xDF) is being used her to
27659 27659 * request a read of all subcodes.
27660 27660 */
27661 27661 cdb[0] = (char)SCMD_READ_ALL_SUBCODES;
27662 27662 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24);
27663 27663 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16);
27664 27664 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8);
27665 27665 cdb[9] = ((subcode->cdsc_length) & 0x000000ff);
27666 27666 }
27667 27667 com->uscsi_cdb = cdb;
27668 27668 com->uscsi_cdblen = CDB_GROUP5;
27669 27669 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr;
27670 27670 com->uscsi_buflen = buflen;
27671 27671 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
27672 27672 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
27673 27673 SD_PATH_STANDARD);
27674 27674 kmem_free(subcode, sizeof (struct cdrom_subcode));
27675 27675 kmem_free(com, sizeof (*com));
27676 27676 return (rval);
27677 27677 }
27678 27678
27679 27679
27680 27680 /*
27681 27681 * Function: sr_read_subchannel()
27682 27682 *
27683 27683 * Description: This routine is the driver entry point for handling CD-ROM
27684 27684 * ioctl requests to return the Q sub-channel data of the CD
27685 27685 * current position block. (CDROMSUBCHNL) The data includes the
27686 27686 * track number, index number, absolute CD-ROM address (LBA or MSF
27687 27687 * format per the user) , track relative CD-ROM address (LBA or MSF
27688 27688 * format per the user), control data and audio status.
27689 27689 *
27690 27690 * Arguments: dev - the device 'dev_t'
27691 27691 * data - pointer to user provided cdrom sub-channel structure
27692 27692 * flag - this argument is a pass through to ddi_copyxxx()
27693 27693 * directly from the mode argument of ioctl().
27694 27694 *
27695 27695 * Return Code: the code returned by sd_send_scsi_cmd()
27696 27696 * EFAULT if ddi_copyxxx() fails
27697 27697 * ENXIO if fail ddi_get_soft_state
27698 27698 * EINVAL if data pointer is NULL
27699 27699 */
27700 27700
27701 27701 static int
27702 27702 sr_read_subchannel(dev_t dev, caddr_t data, int flag)
27703 27703 {
27704 27704 struct sd_lun *un;
27705 27705 struct uscsi_cmd *com;
27706 27706 struct cdrom_subchnl subchanel;
27707 27707 struct cdrom_subchnl *subchnl = &subchanel;
27708 27708 char cdb[CDB_GROUP1];
27709 27709 caddr_t buffer;
27710 27710 int rval;
27711 27711
27712 27712 if (data == NULL) {
27713 27713 return (EINVAL);
27714 27714 }
27715 27715
27716 27716 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
27717 27717 (un->un_state == SD_STATE_OFFLINE)) {
27718 27718 return (ENXIO);
27719 27719 }
27720 27720
27721 27721 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) {
27722 27722 return (EFAULT);
27723 27723 }
27724 27724
27725 27725 buffer = kmem_zalloc((size_t)16, KM_SLEEP);
27726 27726 bzero(cdb, CDB_GROUP1);
27727 27727 cdb[0] = SCMD_READ_SUBCHANNEL;
27728 27728 /* Set the MSF bit based on the user requested address format */
27729 27729 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02;
27730 27730 /*
27731 27731 * Set the Q bit in byte 2 to indicate that Q sub-channel data be
27732 27732 * returned
27733 27733 */
27734 27734 cdb[2] = 0x40;
27735 27735 /*
27736 27736 * Set byte 3 to specify the return data format. A value of 0x01
27737 27737 * indicates that the CD-ROM current position should be returned.
27738 27738 */
27739 27739 cdb[3] = 0x01;
27740 27740 cdb[8] = 0x10;
27741 27741 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27742 27742 com->uscsi_cdb = cdb;
27743 27743 com->uscsi_cdblen = CDB_GROUP1;
27744 27744 com->uscsi_bufaddr = buffer;
27745 27745 com->uscsi_buflen = 16;
27746 27746 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
27747 27747 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27748 27748 SD_PATH_STANDARD);
27749 27749 if (rval != 0) {
27750 27750 kmem_free(buffer, 16);
27751 27751 kmem_free(com, sizeof (*com));
27752 27752 return (rval);
27753 27753 }
27754 27754
27755 27755 /* Process the returned Q sub-channel data */
27756 27756 subchnl->cdsc_audiostatus = buffer[1];
27757 27757 subchnl->cdsc_adr = (buffer[5] & 0xF0) >> 4;
27758 27758 subchnl->cdsc_ctrl = (buffer[5] & 0x0F);
27759 27759 subchnl->cdsc_trk = buffer[6];
27760 27760 subchnl->cdsc_ind = buffer[7];
27761 27761 if (subchnl->cdsc_format & CDROM_LBA) {
27762 27762 subchnl->cdsc_absaddr.lba =
27763 27763 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) +
27764 27764 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]);
27765 27765 subchnl->cdsc_reladdr.lba =
27766 27766 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) +
27767 27767 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]);
27768 27768 } else if (un->un_f_cfg_readsub_bcd == TRUE) {
27769 27769 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]);
27770 27770 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]);
27771 27771 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]);
27772 27772 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]);
27773 27773 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]);
27774 27774 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]);
27775 27775 } else {
27776 27776 subchnl->cdsc_absaddr.msf.minute = buffer[9];
27777 27777 subchnl->cdsc_absaddr.msf.second = buffer[10];
27778 27778 subchnl->cdsc_absaddr.msf.frame = buffer[11];
27779 27779 subchnl->cdsc_reladdr.msf.minute = buffer[13];
27780 27780 subchnl->cdsc_reladdr.msf.second = buffer[14];
27781 27781 subchnl->cdsc_reladdr.msf.frame = buffer[15];
27782 27782 }
27783 27783 kmem_free(buffer, 16);
27784 27784 kmem_free(com, sizeof (*com));
27785 27785 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag)
27786 27786 != 0) {
27787 27787 return (EFAULT);
27788 27788 }
27789 27789 return (rval);
27790 27790 }
27791 27791
27792 27792
27793 27793 /*
27794 27794 * Function: sr_read_tocentry()
27795 27795 *
27796 27796 * Description: This routine is the driver entry point for handling CD-ROM
27797 27797 * ioctl requests to read from the Table of Contents (TOC)
27798 27798 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL
27799 27799 * fields, the starting address (LBA or MSF format per the user)
27800 27800 * and the data mode if the user specified track is a data track.
27801 27801 *
27802 27802 * Note: The READ HEADER (0x44) command used in this routine is
27803 27803 * obsolete per the SCSI MMC spec but still supported in the
27804 27804 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI
27805 27805 * therefore the command is still implemented in this routine.
27806 27806 *
27807 27807 * Arguments: dev - the device 'dev_t'
27808 27808 * data - pointer to user provided toc entry structure,
27809 27809 * specifying the track # and the address format
27810 27810 * (LBA or MSF).
27811 27811 * flag - this argument is a pass through to ddi_copyxxx()
27812 27812 * directly from the mode argument of ioctl().
27813 27813 *
27814 27814 * Return Code: the code returned by sd_send_scsi_cmd()
27815 27815 * EFAULT if ddi_copyxxx() fails
27816 27816 * ENXIO if fail ddi_get_soft_state
27817 27817 * EINVAL if data pointer is NULL
27818 27818 */
27819 27819
27820 27820 static int
27821 27821 sr_read_tocentry(dev_t dev, caddr_t data, int flag)
27822 27822 {
27823 27823 struct sd_lun *un = NULL;
27824 27824 struct uscsi_cmd *com;
27825 27825 struct cdrom_tocentry toc_entry;
27826 27826 struct cdrom_tocentry *entry = &toc_entry;
27827 27827 caddr_t buffer;
27828 27828 int rval;
27829 27829 char cdb[CDB_GROUP1];
27830 27830
27831 27831 if (data == NULL) {
27832 27832 return (EINVAL);
27833 27833 }
27834 27834
27835 27835 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
27836 27836 (un->un_state == SD_STATE_OFFLINE)) {
27837 27837 return (ENXIO);
27838 27838 }
27839 27839
27840 27840 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) {
27841 27841 return (EFAULT);
27842 27842 }
27843 27843
27844 27844 /* Validate the requested track and address format */
27845 27845 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) {
27846 27846 return (EINVAL);
27847 27847 }
27848 27848
27849 27849 if (entry->cdte_track == 0) {
27850 27850 return (EINVAL);
27851 27851 }
27852 27852
27853 27853 buffer = kmem_zalloc((size_t)12, KM_SLEEP);
27854 27854 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27855 27855 bzero(cdb, CDB_GROUP1);
27856 27856
27857 27857 cdb[0] = SCMD_READ_TOC;
27858 27858 /* Set the MSF bit based on the user requested address format */
27859 27859 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2);
27860 27860 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) {
27861 27861 cdb[6] = BYTE_TO_BCD(entry->cdte_track);
27862 27862 } else {
27863 27863 cdb[6] = entry->cdte_track;
27864 27864 }
27865 27865
27866 27866 /*
27867 27867 * Bytes 7 & 8 are the 12 byte allocation length for a single entry.
27868 27868 * (4 byte TOC response header + 8 byte track descriptor)
27869 27869 */
27870 27870 cdb[8] = 12;
27871 27871 com->uscsi_cdb = cdb;
27872 27872 com->uscsi_cdblen = CDB_GROUP1;
27873 27873 com->uscsi_bufaddr = buffer;
27874 27874 com->uscsi_buflen = 0x0C;
27875 27875 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ);
27876 27876 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27877 27877 SD_PATH_STANDARD);
27878 27878 if (rval != 0) {
27879 27879 kmem_free(buffer, 12);
27880 27880 kmem_free(com, sizeof (*com));
27881 27881 return (rval);
27882 27882 }
27883 27883
27884 27884 /* Process the toc entry */
27885 27885 entry->cdte_adr = (buffer[5] & 0xF0) >> 4;
27886 27886 entry->cdte_ctrl = (buffer[5] & 0x0F);
27887 27887 if (entry->cdte_format & CDROM_LBA) {
27888 27888 entry->cdte_addr.lba =
27889 27889 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) +
27890 27890 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]);
27891 27891 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) {
27892 27892 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]);
27893 27893 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]);
27894 27894 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]);
27895 27895 /*
27896 27896 * Send a READ TOC command using the LBA address format to get
27897 27897 * the LBA for the track requested so it can be used in the
27898 27898 * READ HEADER request
27899 27899 *
27900 27900 * Note: The MSF bit of the READ HEADER command specifies the
27901 27901 * output format. The block address specified in that command
27902 27902 * must be in LBA format.
27903 27903 */
27904 27904 cdb[1] = 0;
27905 27905 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27906 27906 SD_PATH_STANDARD);
27907 27907 if (rval != 0) {
27908 27908 kmem_free(buffer, 12);
27909 27909 kmem_free(com, sizeof (*com));
27910 27910 return (rval);
27911 27911 }
27912 27912 } else {
27913 27913 entry->cdte_addr.msf.minute = buffer[9];
27914 27914 entry->cdte_addr.msf.second = buffer[10];
27915 27915 entry->cdte_addr.msf.frame = buffer[11];
27916 27916 /*
27917 27917 * Send a READ TOC command using the LBA address format to get
27918 27918 * the LBA for the track requested so it can be used in the
27919 27919 * READ HEADER request
27920 27920 *
27921 27921 * Note: The MSF bit of the READ HEADER command specifies the
27922 27922 * output format. The block address specified in that command
27923 27923 * must be in LBA format.
27924 27924 */
27925 27925 cdb[1] = 0;
27926 27926 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27927 27927 SD_PATH_STANDARD);
27928 27928 if (rval != 0) {
27929 27929 kmem_free(buffer, 12);
27930 27930 kmem_free(com, sizeof (*com));
27931 27931 return (rval);
27932 27932 }
27933 27933 }
27934 27934
27935 27935 /*
27936 27936 * Build and send the READ HEADER command to determine the data mode of
27937 27937 * the user specified track.
27938 27938 */
27939 27939 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) &&
27940 27940 (entry->cdte_track != CDROM_LEADOUT)) {
27941 27941 bzero(cdb, CDB_GROUP1);
27942 27942 cdb[0] = SCMD_READ_HEADER;
27943 27943 cdb[2] = buffer[8];
27944 27944 cdb[3] = buffer[9];
27945 27945 cdb[4] = buffer[10];
27946 27946 cdb[5] = buffer[11];
27947 27947 cdb[8] = 0x08;
27948 27948 com->uscsi_buflen = 0x08;
27949 27949 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27950 27950 SD_PATH_STANDARD);
27951 27951 if (rval == 0) {
27952 27952 entry->cdte_datamode = buffer[0];
27953 27953 } else {
27954 27954 /*
27955 27955 * READ HEADER command failed, since this is
27956 27956 * obsoleted in one spec, its better to return
27957 27957 * -1 for an invlid track so that we can still
27958 27958 * receive the rest of the TOC data.
27959 27959 */
27960 27960 entry->cdte_datamode = (uchar_t)-1;
27961 27961 }
27962 27962 } else {
27963 27963 entry->cdte_datamode = (uchar_t)-1;
27964 27964 }
27965 27965
27966 27966 kmem_free(buffer, 12);
27967 27967 kmem_free(com, sizeof (*com));
27968 27968 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0)
27969 27969 return (EFAULT);
27970 27970
27971 27971 return (rval);
27972 27972 }
27973 27973
27974 27974
27975 27975 /*
27976 27976 * Function: sr_read_tochdr()
27977 27977 *
27978 27978 * Description: This routine is the driver entry point for handling CD-ROM
27979 27979 * ioctl requests to read the Table of Contents (TOC) header
27980 27980 * (CDROMREADTOHDR). The TOC header consists of the disk starting
27981 27981 * and ending track numbers
27982 27982 *
27983 27983 * Arguments: dev - the device 'dev_t'
27984 27984 * data - pointer to user provided toc header structure,
27985 27985 * specifying the starting and ending track numbers.
27986 27986 * flag - this argument is a pass through to ddi_copyxxx()
27987 27987 * directly from the mode argument of ioctl().
27988 27988 *
27989 27989 * Return Code: the code returned by sd_send_scsi_cmd()
27990 27990 * EFAULT if ddi_copyxxx() fails
27991 27991 * ENXIO if fail ddi_get_soft_state
27992 27992 * EINVAL if data pointer is NULL
27993 27993 */
27994 27994
27995 27995 static int
27996 27996 sr_read_tochdr(dev_t dev, caddr_t data, int flag)
27997 27997 {
27998 27998 struct sd_lun *un;
27999 27999 struct uscsi_cmd *com;
28000 28000 struct cdrom_tochdr toc_header;
28001 28001 struct cdrom_tochdr *hdr = &toc_header;
28002 28002 char cdb[CDB_GROUP1];
28003 28003 int rval;
28004 28004 caddr_t buffer;
28005 28005
28006 28006 if (data == NULL) {
28007 28007 return (EINVAL);
28008 28008 }
28009 28009
28010 28010 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28011 28011 (un->un_state == SD_STATE_OFFLINE)) {
28012 28012 return (ENXIO);
28013 28013 }
28014 28014
28015 28015 buffer = kmem_zalloc(4, KM_SLEEP);
28016 28016 bzero(cdb, CDB_GROUP1);
28017 28017 cdb[0] = SCMD_READ_TOC;
28018 28018 /*
28019 28019 * Specifying a track number of 0x00 in the READ TOC command indicates
28020 28020 * that the TOC header should be returned
28021 28021 */
28022 28022 cdb[6] = 0x00;
28023 28023 /*
28024 28024 * Bytes 7 & 8 are the 4 byte allocation length for TOC header.
28025 28025 * (2 byte data len + 1 byte starting track # + 1 byte ending track #)
28026 28026 */
28027 28027 cdb[8] = 0x04;
28028 28028 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28029 28029 com->uscsi_cdb = cdb;
28030 28030 com->uscsi_cdblen = CDB_GROUP1;
28031 28031 com->uscsi_bufaddr = buffer;
28032 28032 com->uscsi_buflen = 0x04;
28033 28033 com->uscsi_timeout = 300;
28034 28034 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28035 28035
28036 28036 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
28037 28037 SD_PATH_STANDARD);
28038 28038 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) {
28039 28039 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]);
28040 28040 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]);
28041 28041 } else {
28042 28042 hdr->cdth_trk0 = buffer[2];
28043 28043 hdr->cdth_trk1 = buffer[3];
28044 28044 }
28045 28045 kmem_free(buffer, 4);
28046 28046 kmem_free(com, sizeof (*com));
28047 28047 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) {
28048 28048 return (EFAULT);
28049 28049 }
28050 28050 return (rval);
28051 28051 }
28052 28052
28053 28053
28054 28054 /*
28055 28055 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(),
28056 28056 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for
28057 28057 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data,
28058 28058 * digital audio and extended architecture digital audio. These modes are
28059 28059 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3
28060 28060 * MMC specs.
28061 28061 *
28062 28062 * In addition to support for the various data formats these routines also
28063 28063 * include support for devices that implement only the direct access READ
28064 28064 * commands (0x08, 0x28), devices that implement the READ_CD commands
28065 28065 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and
28066 28066 * READ CDXA commands (0xD8, 0xDB)
28067 28067 */
28068 28068
28069 28069 /*
28070 28070 * Function: sr_read_mode1()
28071 28071 *
28072 28072 * Description: This routine is the driver entry point for handling CD-ROM
28073 28073 * ioctl read mode1 requests (CDROMREADMODE1).
28074 28074 *
28075 28075 * Arguments: dev - the device 'dev_t'
28076 28076 * data - pointer to user provided cd read structure specifying
28077 28077 * the lba buffer address and length.
28078 28078 * flag - this argument is a pass through to ddi_copyxxx()
28079 28079 * directly from the mode argument of ioctl().
28080 28080 *
28081 28081 * Return Code: the code returned by sd_send_scsi_cmd()
28082 28082 * EFAULT if ddi_copyxxx() fails
28083 28083 * ENXIO if fail ddi_get_soft_state
28084 28084 * EINVAL if data pointer is NULL
28085 28085 */
28086 28086
28087 28087 static int
28088 28088 sr_read_mode1(dev_t dev, caddr_t data, int flag)
28089 28089 {
28090 28090 struct sd_lun *un;
28091 28091 struct cdrom_read mode1_struct;
28092 28092 struct cdrom_read *mode1 = &mode1_struct;
28093 28093 int rval;
28094 28094 sd_ssc_t *ssc;
28095 28095
28096 28096 #ifdef _MULTI_DATAMODEL
28097 28097 /* To support ILP32 applications in an LP64 world */
28098 28098 struct cdrom_read32 cdrom_read32;
28099 28099 struct cdrom_read32 *cdrd32 = &cdrom_read32;
28100 28100 #endif /* _MULTI_DATAMODEL */
28101 28101
28102 28102 if (data == NULL) {
28103 28103 return (EINVAL);
28104 28104 }
28105 28105
28106 28106 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28107 28107 (un->un_state == SD_STATE_OFFLINE)) {
28108 28108 return (ENXIO);
28109 28109 }
28110 28110
28111 28111 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28112 28112 "sd_read_mode1: entry: un:0x%p\n", un);
28113 28113
28114 28114 #ifdef _MULTI_DATAMODEL
28115 28115 switch (ddi_model_convert_from(flag & FMODELS)) {
28116 28116 case DDI_MODEL_ILP32:
28117 28117 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) {
28118 28118 return (EFAULT);
28119 28119 }
28120 28120 /* Convert the ILP32 uscsi data from the application to LP64 */
28121 28121 cdrom_read32tocdrom_read(cdrd32, mode1);
28122 28122 break;
28123 28123 case DDI_MODEL_NONE:
28124 28124 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) {
28125 28125 return (EFAULT);
28126 28126 }
28127 28127 }
28128 28128 #else /* ! _MULTI_DATAMODEL */
28129 28129 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) {
28130 28130 return (EFAULT);
28131 28131 }
28132 28132 #endif /* _MULTI_DATAMODEL */
28133 28133
28134 28134 ssc = sd_ssc_init(un);
28135 28135 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr,
28136 28136 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD);
28137 28137 sd_ssc_fini(ssc);
28138 28138
28139 28139 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28140 28140 "sd_read_mode1: exit: un:0x%p\n", un);
28141 28141
28142 28142 return (rval);
28143 28143 }
28144 28144
28145 28145
28146 28146 /*
28147 28147 * Function: sr_read_cd_mode2()
28148 28148 *
28149 28149 * Description: This routine is the driver entry point for handling CD-ROM
28150 28150 * ioctl read mode2 requests (CDROMREADMODE2) for devices that
28151 28151 * support the READ CD (0xBE) command or the 1st generation
28152 28152 * READ CD (0xD4) command.
28153 28153 *
28154 28154 * Arguments: dev - the device 'dev_t'
28155 28155 * data - pointer to user provided cd read structure specifying
28156 28156 * the lba buffer address and length.
28157 28157 * flag - this argument is a pass through to ddi_copyxxx()
28158 28158 * directly from the mode argument of ioctl().
28159 28159 *
28160 28160 * Return Code: the code returned by sd_send_scsi_cmd()
28161 28161 * EFAULT if ddi_copyxxx() fails
28162 28162 * ENXIO if fail ddi_get_soft_state
28163 28163 * EINVAL if data pointer is NULL
28164 28164 */
28165 28165
28166 28166 static int
28167 28167 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag)
28168 28168 {
28169 28169 struct sd_lun *un;
28170 28170 struct uscsi_cmd *com;
28171 28171 struct cdrom_read mode2_struct;
28172 28172 struct cdrom_read *mode2 = &mode2_struct;
28173 28173 uchar_t cdb[CDB_GROUP5];
28174 28174 int nblocks;
28175 28175 int rval;
28176 28176 #ifdef _MULTI_DATAMODEL
28177 28177 /* To support ILP32 applications in an LP64 world */
28178 28178 struct cdrom_read32 cdrom_read32;
28179 28179 struct cdrom_read32 *cdrd32 = &cdrom_read32;
28180 28180 #endif /* _MULTI_DATAMODEL */
28181 28181
28182 28182 if (data == NULL) {
28183 28183 return (EINVAL);
28184 28184 }
28185 28185
28186 28186 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28187 28187 (un->un_state == SD_STATE_OFFLINE)) {
28188 28188 return (ENXIO);
28189 28189 }
28190 28190
28191 28191 #ifdef _MULTI_DATAMODEL
28192 28192 switch (ddi_model_convert_from(flag & FMODELS)) {
28193 28193 case DDI_MODEL_ILP32:
28194 28194 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) {
28195 28195 return (EFAULT);
28196 28196 }
28197 28197 /* Convert the ILP32 uscsi data from the application to LP64 */
28198 28198 cdrom_read32tocdrom_read(cdrd32, mode2);
28199 28199 break;
28200 28200 case DDI_MODEL_NONE:
28201 28201 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) {
28202 28202 return (EFAULT);
28203 28203 }
28204 28204 break;
28205 28205 }
28206 28206
28207 28207 #else /* ! _MULTI_DATAMODEL */
28208 28208 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) {
28209 28209 return (EFAULT);
28210 28210 }
28211 28211 #endif /* _MULTI_DATAMODEL */
28212 28212
28213 28213 bzero(cdb, sizeof (cdb));
28214 28214 if (un->un_f_cfg_read_cd_xd4 == TRUE) {
28215 28215 /* Read command supported by 1st generation atapi drives */
28216 28216 cdb[0] = SCMD_READ_CDD4;
28217 28217 } else {
28218 28218 /* Universal CD Access Command */
28219 28219 cdb[0] = SCMD_READ_CD;
28220 28220 }
28221 28221
28222 28222 /*
28223 28223 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book
28224 28224 */
28225 28225 cdb[1] = CDROM_SECTOR_TYPE_MODE2;
28226 28226
28227 28227 /* set the start address */
28228 28228 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF);
28229 28229 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF);
28230 28230 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF);
28231 28231 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF);
28232 28232
28233 28233 /* set the transfer length */
28234 28234 nblocks = mode2->cdread_buflen / 2336;
28235 28235 cdb[6] = (uchar_t)(nblocks >> 16);
28236 28236 cdb[7] = (uchar_t)(nblocks >> 8);
28237 28237 cdb[8] = (uchar_t)nblocks;
28238 28238
28239 28239 /* set the filter bits */
28240 28240 cdb[9] = CDROM_READ_CD_USERDATA;
28241 28241
28242 28242 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28243 28243 com->uscsi_cdb = (caddr_t)cdb;
28244 28244 com->uscsi_cdblen = sizeof (cdb);
28245 28245 com->uscsi_bufaddr = mode2->cdread_bufaddr;
28246 28246 com->uscsi_buflen = mode2->cdread_buflen;
28247 28247 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28248 28248
28249 28249 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28250 28250 SD_PATH_STANDARD);
28251 28251 kmem_free(com, sizeof (*com));
28252 28252 return (rval);
28253 28253 }
28254 28254
28255 28255
28256 28256 /*
28257 28257 * Function: sr_read_mode2()
28258 28258 *
28259 28259 * Description: This routine is the driver entry point for handling CD-ROM
28260 28260 * ioctl read mode2 requests (CDROMREADMODE2) for devices that
28261 28261 * do not support the READ CD (0xBE) command.
28262 28262 *
28263 28263 * Arguments: dev - the device 'dev_t'
28264 28264 * data - pointer to user provided cd read structure specifying
28265 28265 * the lba buffer address and length.
28266 28266 * flag - this argument is a pass through to ddi_copyxxx()
28267 28267 * directly from the mode argument of ioctl().
28268 28268 *
28269 28269 * Return Code: the code returned by sd_send_scsi_cmd()
28270 28270 * EFAULT if ddi_copyxxx() fails
28271 28271 * ENXIO if fail ddi_get_soft_state
28272 28272 * EINVAL if data pointer is NULL
28273 28273 * EIO if fail to reset block size
28274 28274 * EAGAIN if commands are in progress in the driver
28275 28275 */
28276 28276
28277 28277 static int
28278 28278 sr_read_mode2(dev_t dev, caddr_t data, int flag)
28279 28279 {
28280 28280 struct sd_lun *un;
28281 28281 struct cdrom_read mode2_struct;
28282 28282 struct cdrom_read *mode2 = &mode2_struct;
28283 28283 int rval;
28284 28284 uint32_t restore_blksize;
28285 28285 struct uscsi_cmd *com;
28286 28286 uchar_t cdb[CDB_GROUP0];
28287 28287 int nblocks;
28288 28288
28289 28289 #ifdef _MULTI_DATAMODEL
28290 28290 /* To support ILP32 applications in an LP64 world */
28291 28291 struct cdrom_read32 cdrom_read32;
28292 28292 struct cdrom_read32 *cdrd32 = &cdrom_read32;
28293 28293 #endif /* _MULTI_DATAMODEL */
28294 28294
28295 28295 if (data == NULL) {
28296 28296 return (EINVAL);
28297 28297 }
28298 28298
28299 28299 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28300 28300 (un->un_state == SD_STATE_OFFLINE)) {
28301 28301 return (ENXIO);
28302 28302 }
28303 28303
28304 28304 /*
28305 28305 * Because this routine will update the device and driver block size
28306 28306 * being used we want to make sure there are no commands in progress.
28307 28307 * If commands are in progress the user will have to try again.
28308 28308 *
28309 28309 * We check for 1 instead of 0 because we increment un_ncmds_in_driver
28310 28310 * in sdioctl to protect commands from sdioctl through to the top of
28311 28311 * sd_uscsi_strategy. See sdioctl for details.
28312 28312 */
28313 28313 mutex_enter(SD_MUTEX(un));
28314 28314 if (un->un_ncmds_in_driver != 1) {
28315 28315 mutex_exit(SD_MUTEX(un));
28316 28316 return (EAGAIN);
28317 28317 }
28318 28318 mutex_exit(SD_MUTEX(un));
28319 28319
28320 28320 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28321 28321 "sd_read_mode2: entry: un:0x%p\n", un);
28322 28322
28323 28323 #ifdef _MULTI_DATAMODEL
28324 28324 switch (ddi_model_convert_from(flag & FMODELS)) {
28325 28325 case DDI_MODEL_ILP32:
28326 28326 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) {
28327 28327 return (EFAULT);
28328 28328 }
28329 28329 /* Convert the ILP32 uscsi data from the application to LP64 */
28330 28330 cdrom_read32tocdrom_read(cdrd32, mode2);
28331 28331 break;
28332 28332 case DDI_MODEL_NONE:
28333 28333 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) {
28334 28334 return (EFAULT);
28335 28335 }
28336 28336 break;
28337 28337 }
28338 28338 #else /* ! _MULTI_DATAMODEL */
28339 28339 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) {
28340 28340 return (EFAULT);
28341 28341 }
28342 28342 #endif /* _MULTI_DATAMODEL */
28343 28343
28344 28344 /* Store the current target block size for restoration later */
28345 28345 restore_blksize = un->un_tgt_blocksize;
28346 28346
28347 28347 /* Change the device and soft state target block size to 2336 */
28348 28348 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) {
28349 28349 rval = EIO;
28350 28350 goto done;
28351 28351 }
28352 28352
28353 28353
28354 28354 bzero(cdb, sizeof (cdb));
28355 28355
28356 28356 /* set READ operation */
28357 28357 cdb[0] = SCMD_READ;
28358 28358
28359 28359 /* adjust lba for 2kbyte blocks from 512 byte blocks */
28360 28360 mode2->cdread_lba >>= 2;
28361 28361
28362 28362 /* set the start address */
28363 28363 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F);
28364 28364 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF);
28365 28365 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF);
28366 28366
28367 28367 /* set the transfer length */
28368 28368 nblocks = mode2->cdread_buflen / 2336;
28369 28369 cdb[4] = (uchar_t)nblocks & 0xFF;
28370 28370
28371 28371 /* build command */
28372 28372 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28373 28373 com->uscsi_cdb = (caddr_t)cdb;
28374 28374 com->uscsi_cdblen = sizeof (cdb);
28375 28375 com->uscsi_bufaddr = mode2->cdread_bufaddr;
28376 28376 com->uscsi_buflen = mode2->cdread_buflen;
28377 28377 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28378 28378
28379 28379 /*
28380 28380 * Issue SCSI command with user space address for read buffer.
28381 28381 *
28382 28382 * This sends the command through main channel in the driver.
28383 28383 *
28384 28384 * Since this is accessed via an IOCTL call, we go through the
28385 28385 * standard path, so that if the device was powered down, then
28386 28386 * it would be 'awakened' to handle the command.
28387 28387 */
28388 28388 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28389 28389 SD_PATH_STANDARD);
28390 28390
28391 28391 kmem_free(com, sizeof (*com));
28392 28392
28393 28393 /* Restore the device and soft state target block size */
28394 28394 if (sr_sector_mode(dev, restore_blksize) != 0) {
28395 28395 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28396 28396 "can't do switch back to mode 1\n");
28397 28397 /*
28398 28398 * If sd_send_scsi_READ succeeded we still need to report
28399 28399 * an error because we failed to reset the block size
28400 28400 */
28401 28401 if (rval == 0) {
28402 28402 rval = EIO;
28403 28403 }
28404 28404 }
28405 28405
28406 28406 done:
28407 28407 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28408 28408 "sd_read_mode2: exit: un:0x%p\n", un);
28409 28409
28410 28410 return (rval);
28411 28411 }
28412 28412
28413 28413
28414 28414 /*
28415 28415 * Function: sr_sector_mode()
28416 28416 *
28417 28417 * Description: This utility function is used by sr_read_mode2 to set the target
28418 28418 * block size based on the user specified size. This is a legacy
28419 28419 * implementation based upon a vendor specific mode page
28420 28420 *
28421 28421 * Arguments: dev - the device 'dev_t'
28422 28422 * data - flag indicating if block size is being set to 2336 or
28423 28423 * 512.
28424 28424 *
28425 28425 * Return Code: the code returned by sd_send_scsi_cmd()
28426 28426 * EFAULT if ddi_copyxxx() fails
28427 28427 * ENXIO if fail ddi_get_soft_state
28428 28428 * EINVAL if data pointer is NULL
28429 28429 */
28430 28430
28431 28431 static int
28432 28432 sr_sector_mode(dev_t dev, uint32_t blksize)
28433 28433 {
28434 28434 struct sd_lun *un;
28435 28435 uchar_t *sense;
28436 28436 uchar_t *select;
28437 28437 int rval;
28438 28438 sd_ssc_t *ssc;
28439 28439
28440 28440 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28441 28441 (un->un_state == SD_STATE_OFFLINE)) {
28442 28442 return (ENXIO);
28443 28443 }
28444 28444
28445 28445 sense = kmem_zalloc(20, KM_SLEEP);
28446 28446
28447 28447 /* Note: This is a vendor specific mode page (0x81) */
28448 28448 ssc = sd_ssc_init(un);
28449 28449 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81,
28450 28450 SD_PATH_STANDARD);
28451 28451 sd_ssc_fini(ssc);
28452 28452 if (rval != 0) {
28453 28453 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
28454 28454 "sr_sector_mode: Mode Sense failed\n");
28455 28455 kmem_free(sense, 20);
28456 28456 return (rval);
28457 28457 }
28458 28458 select = kmem_zalloc(20, KM_SLEEP);
28459 28459 select[3] = 0x08;
28460 28460 select[10] = ((blksize >> 8) & 0xff);
28461 28461 select[11] = (blksize & 0xff);
28462 28462 select[12] = 0x01;
28463 28463 select[13] = 0x06;
28464 28464 select[14] = sense[14];
28465 28465 select[15] = sense[15];
28466 28466 if (blksize == SD_MODE2_BLKSIZE) {
28467 28467 select[14] |= 0x01;
28468 28468 }
28469 28469
28470 28470 ssc = sd_ssc_init(un);
28471 28471 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20,
28472 28472 SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
28473 28473 sd_ssc_fini(ssc);
28474 28474 if (rval != 0) {
28475 28475 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
28476 28476 "sr_sector_mode: Mode Select failed\n");
28477 28477 } else {
28478 28478 /*
28479 28479 * Only update the softstate block size if we successfully
28480 28480 * changed the device block mode.
28481 28481 */
28482 28482 mutex_enter(SD_MUTEX(un));
28483 28483 sd_update_block_info(un, blksize, 0);
28484 28484 mutex_exit(SD_MUTEX(un));
28485 28485 }
28486 28486 kmem_free(sense, 20);
28487 28487 kmem_free(select, 20);
28488 28488 return (rval);
28489 28489 }
28490 28490
28491 28491
28492 28492 /*
28493 28493 * Function: sr_read_cdda()
28494 28494 *
28495 28495 * Description: This routine is the driver entry point for handling CD-ROM
28496 28496 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If
28497 28497 * the target supports CDDA these requests are handled via a vendor
28498 28498 * specific command (0xD8) If the target does not support CDDA
28499 28499 * these requests are handled via the READ CD command (0xBE).
28500 28500 *
28501 28501 * Arguments: dev - the device 'dev_t'
28502 28502 * data - pointer to user provided CD-DA structure specifying
28503 28503 * the track starting address, transfer length, and
28504 28504 * subcode options.
28505 28505 * flag - this argument is a pass through to ddi_copyxxx()
28506 28506 * directly from the mode argument of ioctl().
28507 28507 *
28508 28508 * Return Code: the code returned by sd_send_scsi_cmd()
28509 28509 * EFAULT if ddi_copyxxx() fails
28510 28510 * ENXIO if fail ddi_get_soft_state
28511 28511 * EINVAL if invalid arguments are provided
28512 28512 * ENOTTY
28513 28513 */
28514 28514
28515 28515 static int
28516 28516 sr_read_cdda(dev_t dev, caddr_t data, int flag)
28517 28517 {
28518 28518 struct sd_lun *un;
28519 28519 struct uscsi_cmd *com;
28520 28520 struct cdrom_cdda *cdda;
28521 28521 int rval;
28522 28522 size_t buflen;
28523 28523 char cdb[CDB_GROUP5];
28524 28524
28525 28525 #ifdef _MULTI_DATAMODEL
28526 28526 /* To support ILP32 applications in an LP64 world */
28527 28527 struct cdrom_cdda32 cdrom_cdda32;
28528 28528 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32;
28529 28529 #endif /* _MULTI_DATAMODEL */
28530 28530
28531 28531 if (data == NULL) {
28532 28532 return (EINVAL);
28533 28533 }
28534 28534
28535 28535 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
28536 28536 return (ENXIO);
28537 28537 }
28538 28538
28539 28539 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP);
28540 28540
28541 28541 #ifdef _MULTI_DATAMODEL
28542 28542 switch (ddi_model_convert_from(flag & FMODELS)) {
28543 28543 case DDI_MODEL_ILP32:
28544 28544 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) {
28545 28545 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28546 28546 "sr_read_cdda: ddi_copyin Failed\n");
28547 28547 kmem_free(cdda, sizeof (struct cdrom_cdda));
28548 28548 return (EFAULT);
28549 28549 }
28550 28550 /* Convert the ILP32 uscsi data from the application to LP64 */
28551 28551 cdrom_cdda32tocdrom_cdda(cdda32, cdda);
28552 28552 break;
28553 28553 case DDI_MODEL_NONE:
28554 28554 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) {
28555 28555 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28556 28556 "sr_read_cdda: ddi_copyin Failed\n");
28557 28557 kmem_free(cdda, sizeof (struct cdrom_cdda));
28558 28558 return (EFAULT);
28559 28559 }
28560 28560 break;
28561 28561 }
28562 28562 #else /* ! _MULTI_DATAMODEL */
28563 28563 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) {
28564 28564 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28565 28565 "sr_read_cdda: ddi_copyin Failed\n");
28566 28566 kmem_free(cdda, sizeof (struct cdrom_cdda));
28567 28567 return (EFAULT);
28568 28568 }
28569 28569 #endif /* _MULTI_DATAMODEL */
28570 28570
28571 28571 /*
28572 28572 * Since MMC-2 expects max 3 bytes for length, check if the
28573 28573 * length input is greater than 3 bytes
28574 28574 */
28575 28575 if ((cdda->cdda_length & 0xFF000000) != 0) {
28576 28576 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: "
28577 28577 "cdrom transfer length too large: %d (limit %d)\n",
28578 28578 cdda->cdda_length, 0xFFFFFF);
28579 28579 kmem_free(cdda, sizeof (struct cdrom_cdda));
28580 28580 return (EINVAL);
28581 28581 }
28582 28582
28583 28583 switch (cdda->cdda_subcode) {
28584 28584 case CDROM_DA_NO_SUBCODE:
28585 28585 buflen = CDROM_BLK_2352 * cdda->cdda_length;
28586 28586 break;
28587 28587 case CDROM_DA_SUBQ:
28588 28588 buflen = CDROM_BLK_2368 * cdda->cdda_length;
28589 28589 break;
28590 28590 case CDROM_DA_ALL_SUBCODE:
28591 28591 buflen = CDROM_BLK_2448 * cdda->cdda_length;
28592 28592 break;
28593 28593 case CDROM_DA_SUBCODE_ONLY:
28594 28594 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length;
28595 28595 break;
28596 28596 default:
28597 28597 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28598 28598 "sr_read_cdda: Subcode '0x%x' Not Supported\n",
28599 28599 cdda->cdda_subcode);
28600 28600 kmem_free(cdda, sizeof (struct cdrom_cdda));
28601 28601 return (EINVAL);
28602 28602 }
28603 28603
28604 28604 /* Build and send the command */
28605 28605 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28606 28606 bzero(cdb, CDB_GROUP5);
28607 28607
28608 28608 if (un->un_f_cfg_cdda == TRUE) {
28609 28609 cdb[0] = (char)SCMD_READ_CD;
28610 28610 cdb[1] = 0x04;
28611 28611 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24);
28612 28612 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16);
28613 28613 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8);
28614 28614 cdb[5] = ((cdda->cdda_addr) & 0x000000ff);
28615 28615 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16);
28616 28616 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8);
28617 28617 cdb[8] = ((cdda->cdda_length) & 0x000000ff);
28618 28618 cdb[9] = 0x10;
28619 28619 switch (cdda->cdda_subcode) {
28620 28620 case CDROM_DA_NO_SUBCODE :
28621 28621 cdb[10] = 0x0;
28622 28622 break;
28623 28623 case CDROM_DA_SUBQ :
28624 28624 cdb[10] = 0x2;
28625 28625 break;
28626 28626 case CDROM_DA_ALL_SUBCODE :
28627 28627 cdb[10] = 0x1;
28628 28628 break;
28629 28629 case CDROM_DA_SUBCODE_ONLY :
28630 28630 /* FALLTHROUGH */
28631 28631 default :
28632 28632 kmem_free(cdda, sizeof (struct cdrom_cdda));
28633 28633 kmem_free(com, sizeof (*com));
28634 28634 return (ENOTTY);
28635 28635 }
28636 28636 } else {
28637 28637 cdb[0] = (char)SCMD_READ_CDDA;
28638 28638 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24);
28639 28639 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16);
28640 28640 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8);
28641 28641 cdb[5] = ((cdda->cdda_addr) & 0x000000ff);
28642 28642 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24);
28643 28643 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16);
28644 28644 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8);
28645 28645 cdb[9] = ((cdda->cdda_length) & 0x000000ff);
28646 28646 cdb[10] = cdda->cdda_subcode;
28647 28647 }
28648 28648
28649 28649 com->uscsi_cdb = cdb;
28650 28650 com->uscsi_cdblen = CDB_GROUP5;
28651 28651 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data;
28652 28652 com->uscsi_buflen = buflen;
28653 28653 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28654 28654
28655 28655 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28656 28656 SD_PATH_STANDARD);
28657 28657
28658 28658 kmem_free(cdda, sizeof (struct cdrom_cdda));
28659 28659 kmem_free(com, sizeof (*com));
28660 28660 return (rval);
28661 28661 }
28662 28662
28663 28663
28664 28664 /*
28665 28665 * Function: sr_read_cdxa()
28666 28666 *
28667 28667 * Description: This routine is the driver entry point for handling CD-ROM
28668 28668 * ioctl requests to return CD-XA (Extended Architecture) data.
28669 28669 * (CDROMCDXA).
28670 28670 *
28671 28671 * Arguments: dev - the device 'dev_t'
28672 28672 * data - pointer to user provided CD-XA structure specifying
28673 28673 * the data starting address, transfer length, and format
28674 28674 * flag - this argument is a pass through to ddi_copyxxx()
28675 28675 * directly from the mode argument of ioctl().
28676 28676 *
28677 28677 * Return Code: the code returned by sd_send_scsi_cmd()
28678 28678 * EFAULT if ddi_copyxxx() fails
28679 28679 * ENXIO if fail ddi_get_soft_state
28680 28680 * EINVAL if data pointer is NULL
28681 28681 */
28682 28682
28683 28683 static int
28684 28684 sr_read_cdxa(dev_t dev, caddr_t data, int flag)
28685 28685 {
28686 28686 struct sd_lun *un;
28687 28687 struct uscsi_cmd *com;
28688 28688 struct cdrom_cdxa *cdxa;
28689 28689 int rval;
28690 28690 size_t buflen;
28691 28691 char cdb[CDB_GROUP5];
28692 28692 uchar_t read_flags;
28693 28693
28694 28694 #ifdef _MULTI_DATAMODEL
28695 28695 /* To support ILP32 applications in an LP64 world */
28696 28696 struct cdrom_cdxa32 cdrom_cdxa32;
28697 28697 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32;
28698 28698 #endif /* _MULTI_DATAMODEL */
28699 28699
28700 28700 if (data == NULL) {
28701 28701 return (EINVAL);
28702 28702 }
28703 28703
28704 28704 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
28705 28705 return (ENXIO);
28706 28706 }
28707 28707
28708 28708 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP);
28709 28709
28710 28710 #ifdef _MULTI_DATAMODEL
28711 28711 switch (ddi_model_convert_from(flag & FMODELS)) {
28712 28712 case DDI_MODEL_ILP32:
28713 28713 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) {
28714 28714 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28715 28715 return (EFAULT);
28716 28716 }
28717 28717 /*
28718 28718 * Convert the ILP32 uscsi data from the
28719 28719 * application to LP64 for internal use.
28720 28720 */
28721 28721 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa);
28722 28722 break;
28723 28723 case DDI_MODEL_NONE:
28724 28724 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) {
28725 28725 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28726 28726 return (EFAULT);
28727 28727 }
28728 28728 break;
28729 28729 }
28730 28730 #else /* ! _MULTI_DATAMODEL */
28731 28731 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) {
28732 28732 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28733 28733 return (EFAULT);
28734 28734 }
28735 28735 #endif /* _MULTI_DATAMODEL */
28736 28736
28737 28737 /*
28738 28738 * Since MMC-2 expects max 3 bytes for length, check if the
28739 28739 * length input is greater than 3 bytes
28740 28740 */
28741 28741 if ((cdxa->cdxa_length & 0xFF000000) != 0) {
28742 28742 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: "
28743 28743 "cdrom transfer length too large: %d (limit %d)\n",
28744 28744 cdxa->cdxa_length, 0xFFFFFF);
28745 28745 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28746 28746 return (EINVAL);
28747 28747 }
28748 28748
28749 28749 switch (cdxa->cdxa_format) {
28750 28750 case CDROM_XA_DATA:
28751 28751 buflen = CDROM_BLK_2048 * cdxa->cdxa_length;
28752 28752 read_flags = 0x10;
28753 28753 break;
28754 28754 case CDROM_XA_SECTOR_DATA:
28755 28755 buflen = CDROM_BLK_2352 * cdxa->cdxa_length;
28756 28756 read_flags = 0xf8;
28757 28757 break;
28758 28758 case CDROM_XA_DATA_W_ERROR:
28759 28759 buflen = CDROM_BLK_2646 * cdxa->cdxa_length;
28760 28760 read_flags = 0xfc;
28761 28761 break;
28762 28762 default:
28763 28763 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28764 28764 "sr_read_cdxa: Format '0x%x' Not Supported\n",
28765 28765 cdxa->cdxa_format);
28766 28766 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28767 28767 return (EINVAL);
28768 28768 }
28769 28769
28770 28770 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28771 28771 bzero(cdb, CDB_GROUP5);
28772 28772 if (un->un_f_mmc_cap == TRUE) {
28773 28773 cdb[0] = (char)SCMD_READ_CD;
28774 28774 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24);
28775 28775 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16);
28776 28776 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8);
28777 28777 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff);
28778 28778 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16);
28779 28779 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8);
28780 28780 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff);
28781 28781 cdb[9] = (char)read_flags;
28782 28782 } else {
28783 28783 /*
28784 28784 * Note: A vendor specific command (0xDB) is being used her to
28785 28785 * request a read of all subcodes.
28786 28786 */
28787 28787 cdb[0] = (char)SCMD_READ_CDXA;
28788 28788 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24);
28789 28789 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16);
28790 28790 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8);
28791 28791 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff);
28792 28792 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24);
28793 28793 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16);
28794 28794 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8);
28795 28795 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff);
28796 28796 cdb[10] = cdxa->cdxa_format;
28797 28797 }
28798 28798 com->uscsi_cdb = cdb;
28799 28799 com->uscsi_cdblen = CDB_GROUP5;
28800 28800 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data;
28801 28801 com->uscsi_buflen = buflen;
28802 28802 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28803 28803 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28804 28804 SD_PATH_STANDARD);
28805 28805 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28806 28806 kmem_free(com, sizeof (*com));
28807 28807 return (rval);
28808 28808 }
28809 28809
28810 28810
28811 28811 /*
28812 28812 * Function: sr_eject()
28813 28813 *
28814 28814 * Description: This routine is the driver entry point for handling CD-ROM
28815 28815 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT)
28816 28816 *
28817 28817 * Arguments: dev - the device 'dev_t'
28818 28818 *
28819 28819 * Return Code: the code returned by sd_send_scsi_cmd()
28820 28820 */
28821 28821
28822 28822 static int
28823 28823 sr_eject(dev_t dev)
28824 28824 {
28825 28825 struct sd_lun *un;
28826 28826 int rval;
28827 28827 sd_ssc_t *ssc;
28828 28828
28829 28829 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28830 28830 (un->un_state == SD_STATE_OFFLINE)) {
28831 28831 return (ENXIO);
28832 28832 }
28833 28833
28834 28834 /*
28835 28835 * To prevent race conditions with the eject
28836 28836 * command, keep track of an eject command as
28837 28837 * it progresses. If we are already handling
28838 28838 * an eject command in the driver for the given
28839 28839 * unit and another request to eject is received
28840 28840 * immediately return EAGAIN so we don't lose
28841 28841 * the command if the current eject command fails.
28842 28842 */
28843 28843 mutex_enter(SD_MUTEX(un));
28844 28844 if (un->un_f_ejecting == TRUE) {
28845 28845 mutex_exit(SD_MUTEX(un));
28846 28846 return (EAGAIN);
28847 28847 }
28848 28848 un->un_f_ejecting = TRUE;
28849 28849 mutex_exit(SD_MUTEX(un));
28850 28850
28851 28851 ssc = sd_ssc_init(un);
28852 28852 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW,
28853 28853 SD_PATH_STANDARD);
28854 28854 sd_ssc_fini(ssc);
28855 28855
28856 28856 if (rval != 0) {
28857 28857 mutex_enter(SD_MUTEX(un));
28858 28858 un->un_f_ejecting = FALSE;
28859 28859 mutex_exit(SD_MUTEX(un));
28860 28860 return (rval);
28861 28861 }
28862 28862
28863 28863 ssc = sd_ssc_init(un);
28864 28864 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
28865 28865 SD_TARGET_EJECT, SD_PATH_STANDARD);
28866 28866 sd_ssc_fini(ssc);
28867 28867
28868 28868 if (rval == 0) {
28869 28869 mutex_enter(SD_MUTEX(un));
28870 28870 sr_ejected(un);
28871 28871 un->un_mediastate = DKIO_EJECTED;
28872 28872 un->un_f_ejecting = FALSE;
28873 28873 cv_broadcast(&un->un_state_cv);
28874 28874 mutex_exit(SD_MUTEX(un));
28875 28875 } else {
28876 28876 mutex_enter(SD_MUTEX(un));
28877 28877 un->un_f_ejecting = FALSE;
28878 28878 mutex_exit(SD_MUTEX(un));
28879 28879 }
28880 28880 return (rval);
28881 28881 }
28882 28882
28883 28883
28884 28884 /*
28885 28885 * Function: sr_ejected()
28886 28886 *
28887 28887 * Description: This routine updates the soft state structure to invalidate the
28888 28888 * geometry information after the media has been ejected or a
28889 28889 * media eject has been detected.
28890 28890 *
28891 28891 * Arguments: un - driver soft state (unit) structure
28892 28892 */
28893 28893
28894 28894 static void
28895 28895 sr_ejected(struct sd_lun *un)
28896 28896 {
28897 28897 struct sd_errstats *stp;
28898 28898
28899 28899 ASSERT(un != NULL);
28900 28900 ASSERT(mutex_owned(SD_MUTEX(un)));
28901 28901
28902 28902 un->un_f_blockcount_is_valid = FALSE;
28903 28903 un->un_f_tgt_blocksize_is_valid = FALSE;
28904 28904 mutex_exit(SD_MUTEX(un));
28905 28905 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY);
28906 28906 mutex_enter(SD_MUTEX(un));
28907 28907
28908 28908 if (un->un_errstats != NULL) {
28909 28909 stp = (struct sd_errstats *)un->un_errstats->ks_data;
28910 28910 stp->sd_capacity.value.ui64 = 0;
28911 28911 }
28912 28912 }
28913 28913
28914 28914
28915 28915 /*
28916 28916 * Function: sr_check_wp()
28917 28917 *
28918 28918 * Description: This routine checks the write protection of a removable
28919 28919 * media disk and hotpluggable devices via the write protect bit of
28920 28920 * the Mode Page Header device specific field. Some devices choke
28921 28921 * on unsupported mode page. In order to workaround this issue,
28922 28922 * this routine has been implemented to use 0x3f mode page(request
28923 28923 * for all pages) for all device types.
28924 28924 *
28925 28925 * Arguments: dev - the device 'dev_t'
28926 28926 *
28927 28927 * Return Code: int indicating if the device is write protected (1) or not (0)
28928 28928 *
28929 28929 * Context: Kernel thread.
28930 28930 *
28931 28931 */
28932 28932
28933 28933 static int
28934 28934 sr_check_wp(dev_t dev)
28935 28935 {
28936 28936 struct sd_lun *un;
28937 28937 uchar_t device_specific;
28938 28938 uchar_t *sense;
28939 28939 int hdrlen;
28940 28940 int rval = FALSE;
28941 28941 int status;
28942 28942 sd_ssc_t *ssc;
28943 28943
28944 28944 /*
28945 28945 * Note: The return codes for this routine should be reworked to
28946 28946 * properly handle the case of a NULL softstate.
28947 28947 */
28948 28948 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
28949 28949 return (FALSE);
28950 28950 }
28951 28951
28952 28952 if (un->un_f_cfg_is_atapi == TRUE) {
28953 28953 /*
28954 28954 * The mode page contents are not required; set the allocation
28955 28955 * length for the mode page header only
28956 28956 */
28957 28957 hdrlen = MODE_HEADER_LENGTH_GRP2;
28958 28958 sense = kmem_zalloc(hdrlen, KM_SLEEP);
28959 28959 ssc = sd_ssc_init(un);
28960 28960 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen,
28961 28961 MODEPAGE_ALLPAGES, SD_PATH_STANDARD);
28962 28962 sd_ssc_fini(ssc);
28963 28963 if (status != 0)
28964 28964 goto err_exit;
28965 28965 device_specific =
28966 28966 ((struct mode_header_grp2 *)sense)->device_specific;
28967 28967 } else {
28968 28968 hdrlen = MODE_HEADER_LENGTH;
28969 28969 sense = kmem_zalloc(hdrlen, KM_SLEEP);
28970 28970 ssc = sd_ssc_init(un);
28971 28971 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen,
28972 28972 MODEPAGE_ALLPAGES, SD_PATH_STANDARD);
28973 28973 sd_ssc_fini(ssc);
28974 28974 if (status != 0)
28975 28975 goto err_exit;
28976 28976 device_specific =
28977 28977 ((struct mode_header *)sense)->device_specific;
28978 28978 }
28979 28979
28980 28980
28981 28981 /*
28982 28982 * Write protect mode sense failed; not all disks
28983 28983 * understand this query. Return FALSE assuming that
28984 28984 * these devices are not writable.
28985 28985 */
28986 28986 if (device_specific & WRITE_PROTECT) {
28987 28987 rval = TRUE;
28988 28988 }
28989 28989
28990 28990 err_exit:
28991 28991 kmem_free(sense, hdrlen);
28992 28992 return (rval);
28993 28993 }
28994 28994
28995 28995 /*
28996 28996 * Function: sr_volume_ctrl()
28997 28997 *
28998 28998 * Description: This routine is the driver entry point for handling CD-ROM
28999 28999 * audio output volume ioctl requests. (CDROMVOLCTRL)
29000 29000 *
29001 29001 * Arguments: dev - the device 'dev_t'
29002 29002 * data - pointer to user audio volume control structure
29003 29003 * flag - this argument is a pass through to ddi_copyxxx()
29004 29004 * directly from the mode argument of ioctl().
29005 29005 *
29006 29006 * Return Code: the code returned by sd_send_scsi_cmd()
29007 29007 * EFAULT if ddi_copyxxx() fails
29008 29008 * ENXIO if fail ddi_get_soft_state
29009 29009 * EINVAL if data pointer is NULL
29010 29010 *
29011 29011 */
29012 29012
29013 29013 static int
29014 29014 sr_volume_ctrl(dev_t dev, caddr_t data, int flag)
29015 29015 {
29016 29016 struct sd_lun *un;
29017 29017 struct cdrom_volctrl volume;
29018 29018 struct cdrom_volctrl *vol = &volume;
29019 29019 uchar_t *sense_page;
29020 29020 uchar_t *select_page;
29021 29021 uchar_t *sense;
29022 29022 uchar_t *select;
29023 29023 int sense_buflen;
29024 29024 int select_buflen;
29025 29025 int rval;
29026 29026 sd_ssc_t *ssc;
29027 29027
29028 29028 if (data == NULL) {
29029 29029 return (EINVAL);
29030 29030 }
29031 29031
29032 29032 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
29033 29033 (un->un_state == SD_STATE_OFFLINE)) {
29034 29034 return (ENXIO);
29035 29035 }
29036 29036
29037 29037 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) {
29038 29038 return (EFAULT);
29039 29039 }
29040 29040
29041 29041 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) {
29042 29042 struct mode_header_grp2 *sense_mhp;
29043 29043 struct mode_header_grp2 *select_mhp;
29044 29044 int bd_len;
29045 29045
29046 29046 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN;
29047 29047 select_buflen = MODE_HEADER_LENGTH_GRP2 +
29048 29048 MODEPAGE_AUDIO_CTRL_LEN;
29049 29049 sense = kmem_zalloc(sense_buflen, KM_SLEEP);
29050 29050 select = kmem_zalloc(select_buflen, KM_SLEEP);
29051 29051 ssc = sd_ssc_init(un);
29052 29052 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense,
29053 29053 sense_buflen, MODEPAGE_AUDIO_CTRL,
29054 29054 SD_PATH_STANDARD);
29055 29055 sd_ssc_fini(ssc);
29056 29056
29057 29057 if (rval != 0) {
29058 29058 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
29059 29059 "sr_volume_ctrl: Mode Sense Failed\n");
29060 29060 kmem_free(sense, sense_buflen);
29061 29061 kmem_free(select, select_buflen);
29062 29062 return (rval);
29063 29063 }
29064 29064 sense_mhp = (struct mode_header_grp2 *)sense;
29065 29065 select_mhp = (struct mode_header_grp2 *)select;
29066 29066 bd_len = (sense_mhp->bdesc_length_hi << 8) |
29067 29067 sense_mhp->bdesc_length_lo;
29068 29068 if (bd_len > MODE_BLK_DESC_LENGTH) {
29069 29069 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29070 29070 "sr_volume_ctrl: Mode Sense returned invalid "
29071 29071 "block descriptor length\n");
29072 29072 kmem_free(sense, sense_buflen);
29073 29073 kmem_free(select, select_buflen);
29074 29074 return (EIO);
29075 29075 }
29076 29076 sense_page = (uchar_t *)
29077 29077 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len);
29078 29078 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2);
29079 29079 select_mhp->length_msb = 0;
29080 29080 select_mhp->length_lsb = 0;
29081 29081 select_mhp->bdesc_length_hi = 0;
29082 29082 select_mhp->bdesc_length_lo = 0;
29083 29083 } else {
29084 29084 struct mode_header *sense_mhp, *select_mhp;
29085 29085
29086 29086 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN;
29087 29087 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN;
29088 29088 sense = kmem_zalloc(sense_buflen, KM_SLEEP);
29089 29089 select = kmem_zalloc(select_buflen, KM_SLEEP);
29090 29090 ssc = sd_ssc_init(un);
29091 29091 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense,
29092 29092 sense_buflen, MODEPAGE_AUDIO_CTRL,
29093 29093 SD_PATH_STANDARD);
29094 29094 sd_ssc_fini(ssc);
29095 29095
29096 29096 if (rval != 0) {
29097 29097 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29098 29098 "sr_volume_ctrl: Mode Sense Failed\n");
29099 29099 kmem_free(sense, sense_buflen);
29100 29100 kmem_free(select, select_buflen);
29101 29101 return (rval);
29102 29102 }
29103 29103 sense_mhp = (struct mode_header *)sense;
29104 29104 select_mhp = (struct mode_header *)select;
29105 29105 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) {
29106 29106 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29107 29107 "sr_volume_ctrl: Mode Sense returned invalid "
29108 29108 "block descriptor length\n");
29109 29109 kmem_free(sense, sense_buflen);
29110 29110 kmem_free(select, select_buflen);
29111 29111 return (EIO);
29112 29112 }
29113 29113 sense_page = (uchar_t *)
29114 29114 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length);
29115 29115 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH);
29116 29116 select_mhp->length = 0;
29117 29117 select_mhp->bdesc_length = 0;
29118 29118 }
29119 29119 /*
29120 29120 * Note: An audio control data structure could be created and overlayed
29121 29121 * on the following in place of the array indexing method implemented.
29122 29122 */
29123 29123
29124 29124 /* Build the select data for the user volume data */
29125 29125 select_page[0] = MODEPAGE_AUDIO_CTRL;
29126 29126 select_page[1] = 0xE;
29127 29127 /* Set the immediate bit */
29128 29128 select_page[2] = 0x04;
29129 29129 /* Zero out reserved fields */
29130 29130 select_page[3] = 0x00;
29131 29131 select_page[4] = 0x00;
29132 29132 /* Return sense data for fields not to be modified */
29133 29133 select_page[5] = sense_page[5];
29134 29134 select_page[6] = sense_page[6];
29135 29135 select_page[7] = sense_page[7];
29136 29136 /* Set the user specified volume levels for channel 0 and 1 */
29137 29137 select_page[8] = 0x01;
29138 29138 select_page[9] = vol->channel0;
29139 29139 select_page[10] = 0x02;
29140 29140 select_page[11] = vol->channel1;
29141 29141 /* Channel 2 and 3 are currently unsupported so return the sense data */
29142 29142 select_page[12] = sense_page[12];
29143 29143 select_page[13] = sense_page[13];
29144 29144 select_page[14] = sense_page[14];
29145 29145 select_page[15] = sense_page[15];
29146 29146
29147 29147 ssc = sd_ssc_init(un);
29148 29148 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) {
29149 29149 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select,
29150 29150 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
29151 29151 } else {
29152 29152 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select,
29153 29153 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
29154 29154 }
29155 29155 sd_ssc_fini(ssc);
29156 29156
29157 29157 kmem_free(sense, sense_buflen);
29158 29158 kmem_free(select, select_buflen);
29159 29159 return (rval);
29160 29160 }
29161 29161
29162 29162
29163 29163 /*
29164 29164 * Function: sr_read_sony_session_offset()
29165 29165 *
29166 29166 * Description: This routine is the driver entry point for handling CD-ROM
29167 29167 * ioctl requests for session offset information. (CDROMREADOFFSET)
29168 29168 * The address of the first track in the last session of a
29169 29169 * multi-session CD-ROM is returned
29170 29170 *
29171 29171 * Note: This routine uses a vendor specific key value in the
29172 29172 * command control field without implementing any vendor check here
29173 29173 * or in the ioctl routine.
29174 29174 *
29175 29175 * Arguments: dev - the device 'dev_t'
29176 29176 * data - pointer to an int to hold the requested address
29177 29177 * flag - this argument is a pass through to ddi_copyxxx()
29178 29178 * directly from the mode argument of ioctl().
29179 29179 *
29180 29180 * Return Code: the code returned by sd_send_scsi_cmd()
29181 29181 * EFAULT if ddi_copyxxx() fails
29182 29182 * ENXIO if fail ddi_get_soft_state
29183 29183 * EINVAL if data pointer is NULL
29184 29184 */
29185 29185
29186 29186 static int
29187 29187 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag)
29188 29188 {
29189 29189 struct sd_lun *un;
29190 29190 struct uscsi_cmd *com;
29191 29191 caddr_t buffer;
29192 29192 char cdb[CDB_GROUP1];
29193 29193 int session_offset = 0;
29194 29194 int rval;
29195 29195
29196 29196 if (data == NULL) {
29197 29197 return (EINVAL);
29198 29198 }
29199 29199
29200 29200 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
29201 29201 (un->un_state == SD_STATE_OFFLINE)) {
29202 29202 return (ENXIO);
29203 29203 }
29204 29204
29205 29205 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP);
29206 29206 bzero(cdb, CDB_GROUP1);
29207 29207 cdb[0] = SCMD_READ_TOC;
29208 29208 /*
29209 29209 * Bytes 7 & 8 are the 12 byte allocation length for a single entry.
29210 29210 * (4 byte TOC response header + 8 byte response data)
29211 29211 */
29212 29212 cdb[8] = SONY_SESSION_OFFSET_LEN;
29213 29213 /* Byte 9 is the control byte. A vendor specific value is used */
29214 29214 cdb[9] = SONY_SESSION_OFFSET_KEY;
29215 29215 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
29216 29216 com->uscsi_cdb = cdb;
29217 29217 com->uscsi_cdblen = CDB_GROUP1;
29218 29218 com->uscsi_bufaddr = buffer;
29219 29219 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN;
29220 29220 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
29221 29221
29222 29222 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
29223 29223 SD_PATH_STANDARD);
29224 29224 if (rval != 0) {
29225 29225 kmem_free(buffer, SONY_SESSION_OFFSET_LEN);
29226 29226 kmem_free(com, sizeof (*com));
29227 29227 return (rval);
29228 29228 }
29229 29229 if (buffer[1] == SONY_SESSION_OFFSET_VALID) {
29230 29230 session_offset =
29231 29231 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) +
29232 29232 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]);
29233 29233 /*
29234 29234 * Offset returned offset in current lbasize block's. Convert to
29235 29235 * 2k block's to return to the user
29236 29236 */
29237 29237 if (un->un_tgt_blocksize == CDROM_BLK_512) {
29238 29238 session_offset >>= 2;
29239 29239 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) {
29240 29240 session_offset >>= 1;
29241 29241 }
29242 29242 }
29243 29243
29244 29244 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) {
29245 29245 rval = EFAULT;
29246 29246 }
29247 29247
29248 29248 kmem_free(buffer, SONY_SESSION_OFFSET_LEN);
29249 29249 kmem_free(com, sizeof (*com));
29250 29250 return (rval);
29251 29251 }
29252 29252
29253 29253
29254 29254 /*
29255 29255 * Function: sd_wm_cache_constructor()
29256 29256 *
29257 29257 * Description: Cache Constructor for the wmap cache for the read/modify/write
29258 29258 * devices.
29259 29259 *
29260 29260 * Arguments: wm - A pointer to the sd_w_map to be initialized.
29261 29261 * un - sd_lun structure for the device.
29262 29262 * flag - the km flags passed to constructor
29263 29263 *
29264 29264 * Return Code: 0 on success.
29265 29265 * -1 on failure.
29266 29266 */
29267 29267
29268 29268 /*ARGSUSED*/
29269 29269 static int
29270 29270 sd_wm_cache_constructor(void *wm, void *un, int flags)
29271 29271 {
29272 29272 bzero(wm, sizeof (struct sd_w_map));
29273 29273 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL);
29274 29274 return (0);
29275 29275 }
29276 29276
29277 29277
29278 29278 /*
29279 29279 * Function: sd_wm_cache_destructor()
29280 29280 *
29281 29281 * Description: Cache destructor for the wmap cache for the read/modify/write
29282 29282 * devices.
29283 29283 *
29284 29284 * Arguments: wm - A pointer to the sd_w_map to be initialized.
29285 29285 * un - sd_lun structure for the device.
29286 29286 */
29287 29287 /*ARGSUSED*/
29288 29288 static void
29289 29289 sd_wm_cache_destructor(void *wm, void *un)
29290 29290 {
29291 29291 cv_destroy(&((struct sd_w_map *)wm)->wm_avail);
29292 29292 }
29293 29293
29294 29294
29295 29295 /*
29296 29296 * Function: sd_range_lock()
29297 29297 *
29298 29298 * Description: Lock the range of blocks specified as parameter to ensure
29299 29299 * that read, modify write is atomic and no other i/o writes
29300 29300 * to the same location. The range is specified in terms
29301 29301 * of start and end blocks. Block numbers are the actual
29302 29302 * media block numbers and not system.
29303 29303 *
29304 29304 * Arguments: un - sd_lun structure for the device.
29305 29305 * startb - The starting block number
29306 29306 * endb - The end block number
29307 29307 * typ - type of i/o - simple/read_modify_write
29308 29308 *
29309 29309 * Return Code: wm - pointer to the wmap structure.
29310 29310 *
29311 29311 * Context: This routine can sleep.
29312 29312 */
29313 29313
29314 29314 static struct sd_w_map *
29315 29315 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ)
29316 29316 {
29317 29317 struct sd_w_map *wmp = NULL;
29318 29318 struct sd_w_map *sl_wmp = NULL;
29319 29319 struct sd_w_map *tmp_wmp;
29320 29320 wm_state state = SD_WM_CHK_LIST;
29321 29321
29322 29322
29323 29323 ASSERT(un != NULL);
29324 29324 ASSERT(!mutex_owned(SD_MUTEX(un)));
29325 29325
29326 29326 mutex_enter(SD_MUTEX(un));
29327 29327
29328 29328 while (state != SD_WM_DONE) {
29329 29329
29330 29330 switch (state) {
29331 29331 case SD_WM_CHK_LIST:
29332 29332 /*
29333 29333 * This is the starting state. Check the wmap list
29334 29334 * to see if the range is currently available.
29335 29335 */
29336 29336 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) {
29337 29337 /*
29338 29338 * If this is a simple write and no rmw
29339 29339 * i/o is pending then try to lock the
29340 29340 * range as the range should be available.
29341 29341 */
29342 29342 state = SD_WM_LOCK_RANGE;
29343 29343 } else {
29344 29344 tmp_wmp = sd_get_range(un, startb, endb);
29345 29345 if (tmp_wmp != NULL) {
29346 29346 if ((wmp != NULL) && ONLIST(un, wmp)) {
29347 29347 /*
29348 29348 * Should not keep onlist wmps
29349 29349 * while waiting this macro
29350 29350 * will also do wmp = NULL;
29351 29351 */
29352 29352 FREE_ONLIST_WMAP(un, wmp);
29353 29353 }
29354 29354 /*
29355 29355 * sl_wmp is the wmap on which wait
29356 29356 * is done, since the tmp_wmp points
29357 29357 * to the inuse wmap, set sl_wmp to
29358 29358 * tmp_wmp and change the state to sleep
29359 29359 */
29360 29360 sl_wmp = tmp_wmp;
29361 29361 state = SD_WM_WAIT_MAP;
29362 29362 } else {
29363 29363 state = SD_WM_LOCK_RANGE;
29364 29364 }
29365 29365
29366 29366 }
29367 29367 break;
29368 29368
29369 29369 case SD_WM_LOCK_RANGE:
29370 29370 ASSERT(un->un_wm_cache);
29371 29371 /*
29372 29372 * The range need to be locked, try to get a wmap.
29373 29373 * First attempt it with NO_SLEEP, want to avoid a sleep
29374 29374 * if possible as we will have to release the sd mutex
29375 29375 * if we have to sleep.
29376 29376 */
29377 29377 if (wmp == NULL)
29378 29378 wmp = kmem_cache_alloc(un->un_wm_cache,
29379 29379 KM_NOSLEEP);
29380 29380 if (wmp == NULL) {
29381 29381 mutex_exit(SD_MUTEX(un));
29382 29382 _NOTE(DATA_READABLE_WITHOUT_LOCK
29383 29383 (sd_lun::un_wm_cache))
29384 29384 wmp = kmem_cache_alloc(un->un_wm_cache,
29385 29385 KM_SLEEP);
29386 29386 mutex_enter(SD_MUTEX(un));
29387 29387 /*
29388 29388 * we released the mutex so recheck and go to
29389 29389 * check list state.
29390 29390 */
29391 29391 state = SD_WM_CHK_LIST;
29392 29392 } else {
29393 29393 /*
29394 29394 * We exit out of state machine since we
29395 29395 * have the wmap. Do the housekeeping first.
29396 29396 * place the wmap on the wmap list if it is not
29397 29397 * on it already and then set the state to done.
29398 29398 */
29399 29399 wmp->wm_start = startb;
29400 29400 wmp->wm_end = endb;
29401 29401 wmp->wm_flags = typ | SD_WM_BUSY;
29402 29402 if (typ & SD_WTYPE_RMW) {
29403 29403 un->un_rmw_count++;
29404 29404 }
29405 29405 /*
29406 29406 * If not already on the list then link
29407 29407 */
29408 29408 if (!ONLIST(un, wmp)) {
29409 29409 wmp->wm_next = un->un_wm;
29410 29410 wmp->wm_prev = NULL;
29411 29411 if (wmp->wm_next)
29412 29412 wmp->wm_next->wm_prev = wmp;
29413 29413 un->un_wm = wmp;
29414 29414 }
29415 29415 state = SD_WM_DONE;
29416 29416 }
29417 29417 break;
29418 29418
29419 29419 case SD_WM_WAIT_MAP:
29420 29420 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY);
29421 29421 /*
29422 29422 * Wait is done on sl_wmp, which is set in the
29423 29423 * check_list state.
29424 29424 */
29425 29425 sl_wmp->wm_wanted_count++;
29426 29426 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un));
29427 29427 sl_wmp->wm_wanted_count--;
29428 29428 /*
29429 29429 * We can reuse the memory from the completed sl_wmp
29430 29430 * lock range for our new lock, but only if noone is
29431 29431 * waiting for it.
29432 29432 */
29433 29433 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY));
29434 29434 if (sl_wmp->wm_wanted_count == 0) {
29435 29435 if (wmp != NULL)
29436 29436 CHK_N_FREEWMP(un, wmp);
29437 29437 wmp = sl_wmp;
29438 29438 }
29439 29439 sl_wmp = NULL;
29440 29440 /*
29441 29441 * After waking up, need to recheck for availability of
29442 29442 * range.
29443 29443 */
29444 29444 state = SD_WM_CHK_LIST;
29445 29445 break;
29446 29446
29447 29447 default:
29448 29448 panic("sd_range_lock: "
29449 29449 "Unknown state %d in sd_range_lock", state);
29450 29450 /*NOTREACHED*/
29451 29451 } /* switch(state) */
29452 29452
29453 29453 } /* while(state != SD_WM_DONE) */
29454 29454
29455 29455 mutex_exit(SD_MUTEX(un));
29456 29456
29457 29457 ASSERT(wmp != NULL);
29458 29458
29459 29459 return (wmp);
29460 29460 }
29461 29461
29462 29462
29463 29463 /*
29464 29464 * Function: sd_get_range()
29465 29465 *
29466 29466 * Description: Find if there any overlapping I/O to this one
29467 29467 * Returns the write-map of 1st such I/O, NULL otherwise.
29468 29468 *
29469 29469 * Arguments: un - sd_lun structure for the device.
29470 29470 * startb - The starting block number
29471 29471 * endb - The end block number
29472 29472 *
29473 29473 * Return Code: wm - pointer to the wmap structure.
29474 29474 */
29475 29475
29476 29476 static struct sd_w_map *
29477 29477 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb)
29478 29478 {
29479 29479 struct sd_w_map *wmp;
29480 29480
29481 29481 ASSERT(un != NULL);
29482 29482
29483 29483 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) {
29484 29484 if (!(wmp->wm_flags & SD_WM_BUSY)) {
29485 29485 continue;
29486 29486 }
29487 29487 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) {
29488 29488 break;
29489 29489 }
29490 29490 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) {
29491 29491 break;
29492 29492 }
29493 29493 }
29494 29494
29495 29495 return (wmp);
29496 29496 }
29497 29497
29498 29498
29499 29499 /*
29500 29500 * Function: sd_free_inlist_wmap()
29501 29501 *
29502 29502 * Description: Unlink and free a write map struct.
29503 29503 *
29504 29504 * Arguments: un - sd_lun structure for the device.
29505 29505 * wmp - sd_w_map which needs to be unlinked.
29506 29506 */
29507 29507
29508 29508 static void
29509 29509 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp)
29510 29510 {
29511 29511 ASSERT(un != NULL);
29512 29512
29513 29513 if (un->un_wm == wmp) {
29514 29514 un->un_wm = wmp->wm_next;
29515 29515 } else {
29516 29516 wmp->wm_prev->wm_next = wmp->wm_next;
29517 29517 }
29518 29518
29519 29519 if (wmp->wm_next) {
29520 29520 wmp->wm_next->wm_prev = wmp->wm_prev;
29521 29521 }
29522 29522
29523 29523 wmp->wm_next = wmp->wm_prev = NULL;
29524 29524
29525 29525 kmem_cache_free(un->un_wm_cache, wmp);
29526 29526 }
29527 29527
29528 29528
29529 29529 /*
29530 29530 * Function: sd_range_unlock()
29531 29531 *
29532 29532 * Description: Unlock the range locked by wm.
29533 29533 * Free write map if nobody else is waiting on it.
29534 29534 *
29535 29535 * Arguments: un - sd_lun structure for the device.
29536 29536 * wmp - sd_w_map which needs to be unlinked.
29537 29537 */
29538 29538
29539 29539 static void
29540 29540 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm)
29541 29541 {
29542 29542 ASSERT(un != NULL);
29543 29543 ASSERT(wm != NULL);
29544 29544 ASSERT(!mutex_owned(SD_MUTEX(un)));
29545 29545
29546 29546 mutex_enter(SD_MUTEX(un));
29547 29547
29548 29548 if (wm->wm_flags & SD_WTYPE_RMW) {
29549 29549 un->un_rmw_count--;
29550 29550 }
29551 29551
29552 29552 if (wm->wm_wanted_count) {
29553 29553 wm->wm_flags = 0;
29554 29554 /*
29555 29555 * Broadcast that the wmap is available now.
29556 29556 */
29557 29557 cv_broadcast(&wm->wm_avail);
29558 29558 } else {
29559 29559 /*
29560 29560 * If no one is waiting on the map, it should be free'ed.
29561 29561 */
29562 29562 sd_free_inlist_wmap(un, wm);
29563 29563 }
29564 29564
29565 29565 mutex_exit(SD_MUTEX(un));
29566 29566 }
29567 29567
29568 29568
29569 29569 /*
29570 29570 * Function: sd_read_modify_write_task
29571 29571 *
29572 29572 * Description: Called from a taskq thread to initiate the write phase of
29573 29573 * a read-modify-write request. This is used for targets where
29574 29574 * un->un_sys_blocksize != un->un_tgt_blocksize.
29575 29575 *
29576 29576 * Arguments: arg - a pointer to the buf(9S) struct for the write command.
29577 29577 *
29578 29578 * Context: Called under taskq thread context.
29579 29579 */
29580 29580
29581 29581 static void
29582 29582 sd_read_modify_write_task(void *arg)
29583 29583 {
29584 29584 struct sd_mapblocksize_info *bsp;
29585 29585 struct buf *bp;
29586 29586 struct sd_xbuf *xp;
29587 29587 struct sd_lun *un;
29588 29588
29589 29589 bp = arg; /* The bp is given in arg */
29590 29590 ASSERT(bp != NULL);
29591 29591
29592 29592 /* Get the pointer to the layer-private data struct */
29593 29593 xp = SD_GET_XBUF(bp);
29594 29594 ASSERT(xp != NULL);
29595 29595 bsp = xp->xb_private;
29596 29596 ASSERT(bsp != NULL);
29597 29597
29598 29598 un = SD_GET_UN(bp);
29599 29599 ASSERT(un != NULL);
29600 29600 ASSERT(!mutex_owned(SD_MUTEX(un)));
29601 29601
29602 29602 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
29603 29603 "sd_read_modify_write_task: entry: buf:0x%p\n", bp);
29604 29604
29605 29605 /*
29606 29606 * This is the write phase of a read-modify-write request, called
29607 29607 * under the context of a taskq thread in response to the completion
29608 29608 * of the read portion of the rmw request completing under interrupt
29609 29609 * context. The write request must be sent from here down the iostart
29610 29610 * chain as if it were being sent from sd_mapblocksize_iostart(), so
29611 29611 * we use the layer index saved in the layer-private data area.
29612 29612 */
29613 29613 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp);
29614 29614
29615 29615 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
29616 29616 "sd_read_modify_write_task: exit: buf:0x%p\n", bp);
29617 29617 }
29618 29618
29619 29619
29620 29620 /*
29621 29621 * Function: sddump_do_read_of_rmw()
29622 29622 *
29623 29623 * Description: This routine will be called from sddump, If sddump is called
29624 29624 * with an I/O which not aligned on device blocksize boundary
29625 29625 * then the write has to be converted to read-modify-write.
29626 29626 * Do the read part here in order to keep sddump simple.
29627 29627 * Note - That the sd_mutex is held across the call to this
29628 29628 * routine.
29629 29629 *
29630 29630 * Arguments: un - sd_lun
29631 29631 * blkno - block number in terms of media block size.
29632 29632 * nblk - number of blocks.
29633 29633 * bpp - pointer to pointer to the buf structure. On return
29634 29634 * from this function, *bpp points to the valid buffer
29635 29635 * to which the write has to be done.
29636 29636 *
29637 29637 * Return Code: 0 for success or errno-type return code
29638 29638 */
29639 29639
29640 29640 static int
29641 29641 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk,
29642 29642 struct buf **bpp)
29643 29643 {
29644 29644 int err;
29645 29645 int i;
29646 29646 int rval;
29647 29647 struct buf *bp;
29648 29648 struct scsi_pkt *pkt = NULL;
29649 29649 uint32_t target_blocksize;
29650 29650
29651 29651 ASSERT(un != NULL);
29652 29652 ASSERT(mutex_owned(SD_MUTEX(un)));
29653 29653
29654 29654 target_blocksize = un->un_tgt_blocksize;
29655 29655
29656 29656 mutex_exit(SD_MUTEX(un));
29657 29657
29658 29658 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL,
29659 29659 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL);
29660 29660 if (bp == NULL) {
29661 29661 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29662 29662 "no resources for dumping; giving up");
29663 29663 err = ENOMEM;
29664 29664 goto done;
29665 29665 }
29666 29666
29667 29667 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL,
29668 29668 blkno, nblk);
29669 29669 if (rval != 0) {
29670 29670 scsi_free_consistent_buf(bp);
29671 29671 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29672 29672 "no resources for dumping; giving up");
29673 29673 err = ENOMEM;
29674 29674 goto done;
29675 29675 }
29676 29676
29677 29677 pkt->pkt_flags |= FLAG_NOINTR;
29678 29678
29679 29679 err = EIO;
29680 29680 for (i = 0; i < SD_NDUMP_RETRIES; i++) {
29681 29681
29682 29682 /*
29683 29683 * Scsi_poll returns 0 (success) if the command completes and
29684 29684 * the status block is STATUS_GOOD. We should only check
29685 29685 * errors if this condition is not true. Even then we should
29686 29686 * send our own request sense packet only if we have a check
29687 29687 * condition and auto request sense has not been performed by
29688 29688 * the hba.
29689 29689 */
29690 29690 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n");
29691 29691
29692 29692 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) {
29693 29693 err = 0;
29694 29694 break;
29695 29695 }
29696 29696
29697 29697 /*
29698 29698 * Check CMD_DEV_GONE 1st, give up if device is gone,
29699 29699 * no need to read RQS data.
29700 29700 */
29701 29701 if (pkt->pkt_reason == CMD_DEV_GONE) {
29702 29702 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29703 29703 "Error while dumping state with rmw..."
29704 29704 "Device is gone\n");
29705 29705 break;
29706 29706 }
29707 29707
29708 29708 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) {
29709 29709 SD_INFO(SD_LOG_DUMP, un,
29710 29710 "sddump: read failed with CHECK, try # %d\n", i);
29711 29711 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) {
29712 29712 (void) sd_send_polled_RQS(un);
29713 29713 }
29714 29714
29715 29715 continue;
29716 29716 }
29717 29717
29718 29718 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) {
29719 29719 int reset_retval = 0;
29720 29720
29721 29721 SD_INFO(SD_LOG_DUMP, un,
29722 29722 "sddump: read failed with BUSY, try # %d\n", i);
29723 29723
29724 29724 if (un->un_f_lun_reset_enabled == TRUE) {
29725 29725 reset_retval = scsi_reset(SD_ADDRESS(un),
29726 29726 RESET_LUN);
29727 29727 }
29728 29728 if (reset_retval == 0) {
29729 29729 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET);
29730 29730 }
29731 29731 (void) sd_send_polled_RQS(un);
29732 29732
29733 29733 } else {
29734 29734 SD_INFO(SD_LOG_DUMP, un,
29735 29735 "sddump: read failed with 0x%x, try # %d\n",
29736 29736 SD_GET_PKT_STATUS(pkt), i);
29737 29737 mutex_enter(SD_MUTEX(un));
29738 29738 sd_reset_target(un, pkt);
29739 29739 mutex_exit(SD_MUTEX(un));
29740 29740 }
29741 29741
29742 29742 /*
29743 29743 * If we are not getting anywhere with lun/target resets,
29744 29744 * let's reset the bus.
29745 29745 */
29746 29746 if (i > SD_NDUMP_RETRIES/2) {
29747 29747 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
29748 29748 (void) sd_send_polled_RQS(un);
29749 29749 }
29750 29750
29751 29751 }
29752 29752 scsi_destroy_pkt(pkt);
29753 29753
29754 29754 if (err != 0) {
29755 29755 scsi_free_consistent_buf(bp);
29756 29756 *bpp = NULL;
29757 29757 } else {
29758 29758 *bpp = bp;
29759 29759 }
29760 29760
29761 29761 done:
29762 29762 mutex_enter(SD_MUTEX(un));
29763 29763 return (err);
29764 29764 }
29765 29765
29766 29766
29767 29767 /*
29768 29768 * Function: sd_failfast_flushq
29769 29769 *
29770 29770 * Description: Take all bp's on the wait queue that have B_FAILFAST set
29771 29771 * in b_flags and move them onto the failfast queue, then kick
29772 29772 * off a thread to return all bp's on the failfast queue to
29773 29773 * their owners with an error set.
29774 29774 *
29775 29775 * Arguments: un - pointer to the soft state struct for the instance.
29776 29776 *
29777 29777 * Context: may execute in interrupt context.
29778 29778 */
29779 29779
29780 29780 static void
29781 29781 sd_failfast_flushq(struct sd_lun *un)
29782 29782 {
29783 29783 struct buf *bp;
29784 29784 struct buf *next_waitq_bp;
29785 29785 struct buf *prev_waitq_bp = NULL;
29786 29786
29787 29787 ASSERT(un != NULL);
29788 29788 ASSERT(mutex_owned(SD_MUTEX(un)));
29789 29789 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE);
29790 29790 ASSERT(un->un_failfast_bp == NULL);
29791 29791
29792 29792 SD_TRACE(SD_LOG_IO_FAILFAST, un,
29793 29793 "sd_failfast_flushq: entry: un:0x%p\n", un);
29794 29794
29795 29795 /*
29796 29796 * Check if we should flush all bufs when entering failfast state, or
29797 29797 * just those with B_FAILFAST set.
29798 29798 */
29799 29799 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) {
29800 29800 /*
29801 29801 * Move *all* bp's on the wait queue to the failfast flush
29802 29802 * queue, including those that do NOT have B_FAILFAST set.
29803 29803 */
29804 29804 if (un->un_failfast_headp == NULL) {
29805 29805 ASSERT(un->un_failfast_tailp == NULL);
29806 29806 un->un_failfast_headp = un->un_waitq_headp;
29807 29807 } else {
29808 29808 ASSERT(un->un_failfast_tailp != NULL);
29809 29809 un->un_failfast_tailp->av_forw = un->un_waitq_headp;
29810 29810 }
29811 29811
29812 29812 un->un_failfast_tailp = un->un_waitq_tailp;
29813 29813
29814 29814 /* update kstat for each bp moved out of the waitq */
29815 29815 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) {
29816 29816 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp);
29817 29817 }
29818 29818
29819 29819 /* empty the waitq */
29820 29820 un->un_waitq_headp = un->un_waitq_tailp = NULL;
29821 29821
29822 29822 } else {
29823 29823 /*
29824 29824 * Go thru the wait queue, pick off all entries with
29825 29825 * B_FAILFAST set, and move these onto the failfast queue.
29826 29826 */
29827 29827 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) {
29828 29828 /*
29829 29829 * Save the pointer to the next bp on the wait queue,
29830 29830 * so we get to it on the next iteration of this loop.
29831 29831 */
29832 29832 next_waitq_bp = bp->av_forw;
29833 29833
29834 29834 /*
29835 29835 * If this bp from the wait queue does NOT have
29836 29836 * B_FAILFAST set, just move on to the next element
29837 29837 * in the wait queue. Note, this is the only place
29838 29838 * where it is correct to set prev_waitq_bp.
29839 29839 */
29840 29840 if ((bp->b_flags & B_FAILFAST) == 0) {
29841 29841 prev_waitq_bp = bp;
29842 29842 continue;
29843 29843 }
29844 29844
29845 29845 /*
29846 29846 * Remove the bp from the wait queue.
29847 29847 */
29848 29848 if (bp == un->un_waitq_headp) {
29849 29849 /* The bp is the first element of the waitq. */
29850 29850 un->un_waitq_headp = next_waitq_bp;
29851 29851 if (un->un_waitq_headp == NULL) {
29852 29852 /* The wait queue is now empty */
29853 29853 un->un_waitq_tailp = NULL;
29854 29854 }
29855 29855 } else {
29856 29856 /*
29857 29857 * The bp is either somewhere in the middle
29858 29858 * or at the end of the wait queue.
29859 29859 */
29860 29860 ASSERT(un->un_waitq_headp != NULL);
29861 29861 ASSERT(prev_waitq_bp != NULL);
29862 29862 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST)
29863 29863 == 0);
29864 29864 if (bp == un->un_waitq_tailp) {
29865 29865 /* bp is the last entry on the waitq. */
29866 29866 ASSERT(next_waitq_bp == NULL);
29867 29867 un->un_waitq_tailp = prev_waitq_bp;
29868 29868 }
29869 29869 prev_waitq_bp->av_forw = next_waitq_bp;
29870 29870 }
29871 29871 bp->av_forw = NULL;
29872 29872
29873 29873 /*
29874 29874 * update kstat since the bp is moved out of
29875 29875 * the waitq
29876 29876 */
29877 29877 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp);
29878 29878
29879 29879 /*
29880 29880 * Now put the bp onto the failfast queue.
29881 29881 */
29882 29882 if (un->un_failfast_headp == NULL) {
29883 29883 /* failfast queue is currently empty */
29884 29884 ASSERT(un->un_failfast_tailp == NULL);
29885 29885 un->un_failfast_headp =
29886 29886 un->un_failfast_tailp = bp;
29887 29887 } else {
29888 29888 /* Add the bp to the end of the failfast q */
29889 29889 ASSERT(un->un_failfast_tailp != NULL);
29890 29890 ASSERT(un->un_failfast_tailp->b_flags &
29891 29891 B_FAILFAST);
29892 29892 un->un_failfast_tailp->av_forw = bp;
29893 29893 un->un_failfast_tailp = bp;
29894 29894 }
29895 29895 }
29896 29896 }
29897 29897
29898 29898 /*
29899 29899 * Now return all bp's on the failfast queue to their owners.
29900 29900 */
29901 29901 while ((bp = un->un_failfast_headp) != NULL) {
29902 29902
29903 29903 un->un_failfast_headp = bp->av_forw;
29904 29904 if (un->un_failfast_headp == NULL) {
29905 29905 un->un_failfast_tailp = NULL;
29906 29906 }
29907 29907
29908 29908 /*
29909 29909 * We want to return the bp with a failure error code, but
29910 29910 * we do not want a call to sd_start_cmds() to occur here,
29911 29911 * so use sd_return_failed_command_no_restart() instead of
29912 29912 * sd_return_failed_command().
29913 29913 */
29914 29914 sd_return_failed_command_no_restart(un, bp, EIO);
29915 29915 }
29916 29916
29917 29917 /* Flush the xbuf queues if required. */
29918 29918 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) {
29919 29919 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback);
29920 29920 }
29921 29921
29922 29922 SD_TRACE(SD_LOG_IO_FAILFAST, un,
29923 29923 "sd_failfast_flushq: exit: un:0x%p\n", un);
29924 29924 }
29925 29925
29926 29926
29927 29927 /*
29928 29928 * Function: sd_failfast_flushq_callback
29929 29929 *
29930 29930 * Description: Return TRUE if the given bp meets the criteria for failfast
29931 29931 * flushing. Used with ddi_xbuf_flushq(9F).
29932 29932 *
29933 29933 * Arguments: bp - ptr to buf struct to be examined.
29934 29934 *
29935 29935 * Context: Any
29936 29936 */
29937 29937
29938 29938 static int
29939 29939 sd_failfast_flushq_callback(struct buf *bp)
29940 29940 {
29941 29941 /*
29942 29942 * Return TRUE if (1) we want to flush ALL bufs when the failfast
29943 29943 * state is entered; OR (2) the given bp has B_FAILFAST set.
29944 29944 */
29945 29945 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) ||
29946 29946 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE);
29947 29947 }
29948 29948
29949 29949
29950 29950
29951 29951 /*
29952 29952 * Function: sd_setup_next_xfer
29953 29953 *
29954 29954 * Description: Prepare next I/O operation using DMA_PARTIAL
29955 29955 *
29956 29956 */
29957 29957
29958 29958 static int
29959 29959 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp,
29960 29960 struct scsi_pkt *pkt, struct sd_xbuf *xp)
29961 29961 {
29962 29962 ssize_t num_blks_not_xfered;
29963 29963 daddr_t strt_blk_num;
29964 29964 ssize_t bytes_not_xfered;
29965 29965 int rval;
29966 29966
29967 29967 ASSERT(pkt->pkt_resid == 0);
29968 29968
29969 29969 /*
29970 29970 * Calculate next block number and amount to be transferred.
29971 29971 *
29972 29972 * How much data NOT transfered to the HBA yet.
29973 29973 */
29974 29974 bytes_not_xfered = xp->xb_dma_resid;
29975 29975
29976 29976 /*
29977 29977 * figure how many blocks NOT transfered to the HBA yet.
29978 29978 */
29979 29979 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered);
29980 29980
29981 29981 /*
29982 29982 * set starting block number to the end of what WAS transfered.
29983 29983 */
29984 29984 strt_blk_num = xp->xb_blkno +
29985 29985 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered);
29986 29986
29987 29987 /*
29988 29988 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt
29989 29989 * will call scsi_initpkt with NULL_FUNC so we do not have to release
29990 29990 * the disk mutex here.
29991 29991 */
29992 29992 rval = sd_setup_next_rw_pkt(un, pkt, bp,
29993 29993 strt_blk_num, num_blks_not_xfered);
29994 29994
29995 29995 if (rval == 0) {
29996 29996
29997 29997 /*
29998 29998 * Success.
29999 29999 *
30000 30000 * Adjust things if there are still more blocks to be
30001 30001 * transfered.
30002 30002 */
30003 30003 xp->xb_dma_resid = pkt->pkt_resid;
30004 30004 pkt->pkt_resid = 0;
30005 30005
30006 30006 return (1);
30007 30007 }
30008 30008
30009 30009 /*
30010 30010 * There's really only one possible return value from
30011 30011 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt
30012 30012 * returns NULL.
30013 30013 */
30014 30014 ASSERT(rval == SD_PKT_ALLOC_FAILURE);
30015 30015
30016 30016 bp->b_resid = bp->b_bcount;
30017 30017 bp->b_flags |= B_ERROR;
30018 30018
30019 30019 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
30020 30020 "Error setting up next portion of DMA transfer\n");
30021 30021
30022 30022 return (0);
30023 30023 }
30024 30024
30025 30025 /*
30026 30026 * Function: sd_panic_for_res_conflict
30027 30027 *
30028 30028 * Description: Call panic with a string formatted with "Reservation Conflict"
30029 30029 * and a human readable identifier indicating the SD instance
30030 30030 * that experienced the reservation conflict.
30031 30031 *
30032 30032 * Arguments: un - pointer to the soft state struct for the instance.
30033 30033 *
30034 30034 * Context: may execute in interrupt context.
30035 30035 */
30036 30036
30037 30037 #define SD_RESV_CONFLICT_FMT_LEN 40
30038 30038 void
30039 30039 sd_panic_for_res_conflict(struct sd_lun *un)
30040 30040 {
30041 30041 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN];
30042 30042 char path_str[MAXPATHLEN];
30043 30043
30044 30044 (void) snprintf(panic_str, sizeof (panic_str),
30045 30045 "Reservation Conflict\nDisk: %s",
30046 30046 ddi_pathname(SD_DEVINFO(un), path_str));
30047 30047
30048 30048 panic(panic_str);
30049 30049 }
30050 30050
30051 30051 /*
30052 30052 * Note: The following sd_faultinjection_ioctl( ) routines implement
30053 30053 * driver support for handling fault injection for error analysis
30054 30054 * causing faults in multiple layers of the driver.
30055 30055 *
30056 30056 */
30057 30057
30058 30058 #ifdef SD_FAULT_INJECTION
30059 30059 static uint_t sd_fault_injection_on = 0;
30060 30060
30061 30061 /*
30062 30062 * Function: sd_faultinjection_ioctl()
30063 30063 *
30064 30064 * Description: This routine is the driver entry point for handling
30065 30065 * faultinjection ioctls to inject errors into the
30066 30066 * layer model
30067 30067 *
30068 30068 * Arguments: cmd - the ioctl cmd received
30069 30069 * arg - the arguments from user and returns
30070 30070 */
30071 30071
30072 30072 static void
30073 30073 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) {
30074 30074
30075 30075 uint_t i = 0;
30076 30076 uint_t rval;
30077 30077
30078 30078 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n");
30079 30079
30080 30080 mutex_enter(SD_MUTEX(un));
30081 30081
30082 30082 switch (cmd) {
30083 30083 case SDIOCRUN:
30084 30084 /* Allow pushed faults to be injected */
30085 30085 SD_INFO(SD_LOG_SDTEST, un,
30086 30086 "sd_faultinjection_ioctl: Injecting Fault Run\n");
30087 30087
30088 30088 sd_fault_injection_on = 1;
30089 30089
30090 30090 SD_INFO(SD_LOG_IOERR, un,
30091 30091 "sd_faultinjection_ioctl: run finished\n");
30092 30092 break;
30093 30093
30094 30094 case SDIOCSTART:
30095 30095 /* Start Injection Session */
30096 30096 SD_INFO(SD_LOG_SDTEST, un,
30097 30097 "sd_faultinjection_ioctl: Injecting Fault Start\n");
30098 30098
30099 30099 sd_fault_injection_on = 0;
30100 30100 un->sd_injection_mask = 0xFFFFFFFF;
30101 30101 for (i = 0; i < SD_FI_MAX_ERROR; i++) {
30102 30102 un->sd_fi_fifo_pkt[i] = NULL;
30103 30103 un->sd_fi_fifo_xb[i] = NULL;
30104 30104 un->sd_fi_fifo_un[i] = NULL;
30105 30105 un->sd_fi_fifo_arq[i] = NULL;
30106 30106 }
30107 30107 un->sd_fi_fifo_start = 0;
30108 30108 un->sd_fi_fifo_end = 0;
30109 30109
30110 30110 mutex_enter(&(un->un_fi_mutex));
30111 30111 un->sd_fi_log[0] = '\0';
30112 30112 un->sd_fi_buf_len = 0;
30113 30113 mutex_exit(&(un->un_fi_mutex));
30114 30114
30115 30115 SD_INFO(SD_LOG_IOERR, un,
30116 30116 "sd_faultinjection_ioctl: start finished\n");
30117 30117 break;
30118 30118
30119 30119 case SDIOCSTOP:
30120 30120 /* Stop Injection Session */
30121 30121 SD_INFO(SD_LOG_SDTEST, un,
30122 30122 "sd_faultinjection_ioctl: Injecting Fault Stop\n");
30123 30123 sd_fault_injection_on = 0;
30124 30124 un->sd_injection_mask = 0x0;
30125 30125
30126 30126 /* Empty stray or unuseds structs from fifo */
30127 30127 for (i = 0; i < SD_FI_MAX_ERROR; i++) {
30128 30128 if (un->sd_fi_fifo_pkt[i] != NULL) {
30129 30129 kmem_free(un->sd_fi_fifo_pkt[i],
30130 30130 sizeof (struct sd_fi_pkt));
30131 30131 }
30132 30132 if (un->sd_fi_fifo_xb[i] != NULL) {
30133 30133 kmem_free(un->sd_fi_fifo_xb[i],
30134 30134 sizeof (struct sd_fi_xb));
30135 30135 }
30136 30136 if (un->sd_fi_fifo_un[i] != NULL) {
30137 30137 kmem_free(un->sd_fi_fifo_un[i],
30138 30138 sizeof (struct sd_fi_un));
30139 30139 }
30140 30140 if (un->sd_fi_fifo_arq[i] != NULL) {
30141 30141 kmem_free(un->sd_fi_fifo_arq[i],
30142 30142 sizeof (struct sd_fi_arq));
30143 30143 }
30144 30144 un->sd_fi_fifo_pkt[i] = NULL;
30145 30145 un->sd_fi_fifo_un[i] = NULL;
30146 30146 un->sd_fi_fifo_xb[i] = NULL;
30147 30147 un->sd_fi_fifo_arq[i] = NULL;
30148 30148 }
30149 30149 un->sd_fi_fifo_start = 0;
30150 30150 un->sd_fi_fifo_end = 0;
30151 30151
30152 30152 SD_INFO(SD_LOG_IOERR, un,
30153 30153 "sd_faultinjection_ioctl: stop finished\n");
30154 30154 break;
30155 30155
30156 30156 case SDIOCINSERTPKT:
30157 30157 /* Store a packet struct to be pushed onto fifo */
30158 30158 SD_INFO(SD_LOG_SDTEST, un,
30159 30159 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n");
30160 30160
30161 30161 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30162 30162
30163 30163 sd_fault_injection_on = 0;
30164 30164
30165 30165 /* No more that SD_FI_MAX_ERROR allowed in Queue */
30166 30166 if (un->sd_fi_fifo_pkt[i] != NULL) {
30167 30167 kmem_free(un->sd_fi_fifo_pkt[i],
30168 30168 sizeof (struct sd_fi_pkt));
30169 30169 }
30170 30170 if (arg != NULL) {
30171 30171 un->sd_fi_fifo_pkt[i] =
30172 30172 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP);
30173 30173 if (un->sd_fi_fifo_pkt[i] == NULL) {
30174 30174 /* Alloc failed don't store anything */
30175 30175 break;
30176 30176 }
30177 30177 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i],
30178 30178 sizeof (struct sd_fi_pkt), 0);
30179 30179 if (rval == -1) {
30180 30180 kmem_free(un->sd_fi_fifo_pkt[i],
30181 30181 sizeof (struct sd_fi_pkt));
30182 30182 un->sd_fi_fifo_pkt[i] = NULL;
30183 30183 }
30184 30184 } else {
30185 30185 SD_INFO(SD_LOG_IOERR, un,
30186 30186 "sd_faultinjection_ioctl: pkt null\n");
30187 30187 }
30188 30188 break;
30189 30189
30190 30190 case SDIOCINSERTXB:
30191 30191 /* Store a xb struct to be pushed onto fifo */
30192 30192 SD_INFO(SD_LOG_SDTEST, un,
30193 30193 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n");
30194 30194
30195 30195 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30196 30196
30197 30197 sd_fault_injection_on = 0;
30198 30198
30199 30199 if (un->sd_fi_fifo_xb[i] != NULL) {
30200 30200 kmem_free(un->sd_fi_fifo_xb[i],
30201 30201 sizeof (struct sd_fi_xb));
30202 30202 un->sd_fi_fifo_xb[i] = NULL;
30203 30203 }
30204 30204 if (arg != NULL) {
30205 30205 un->sd_fi_fifo_xb[i] =
30206 30206 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP);
30207 30207 if (un->sd_fi_fifo_xb[i] == NULL) {
30208 30208 /* Alloc failed don't store anything */
30209 30209 break;
30210 30210 }
30211 30211 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i],
30212 30212 sizeof (struct sd_fi_xb), 0);
30213 30213
30214 30214 if (rval == -1) {
30215 30215 kmem_free(un->sd_fi_fifo_xb[i],
30216 30216 sizeof (struct sd_fi_xb));
30217 30217 un->sd_fi_fifo_xb[i] = NULL;
30218 30218 }
30219 30219 } else {
30220 30220 SD_INFO(SD_LOG_IOERR, un,
30221 30221 "sd_faultinjection_ioctl: xb null\n");
30222 30222 }
30223 30223 break;
30224 30224
30225 30225 case SDIOCINSERTUN:
30226 30226 /* Store a un struct to be pushed onto fifo */
30227 30227 SD_INFO(SD_LOG_SDTEST, un,
30228 30228 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n");
30229 30229
30230 30230 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30231 30231
30232 30232 sd_fault_injection_on = 0;
30233 30233
30234 30234 if (un->sd_fi_fifo_un[i] != NULL) {
30235 30235 kmem_free(un->sd_fi_fifo_un[i],
30236 30236 sizeof (struct sd_fi_un));
30237 30237 un->sd_fi_fifo_un[i] = NULL;
30238 30238 }
30239 30239 if (arg != NULL) {
30240 30240 un->sd_fi_fifo_un[i] =
30241 30241 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP);
30242 30242 if (un->sd_fi_fifo_un[i] == NULL) {
30243 30243 /* Alloc failed don't store anything */
30244 30244 break;
30245 30245 }
30246 30246 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i],
30247 30247 sizeof (struct sd_fi_un), 0);
30248 30248 if (rval == -1) {
30249 30249 kmem_free(un->sd_fi_fifo_un[i],
30250 30250 sizeof (struct sd_fi_un));
30251 30251 un->sd_fi_fifo_un[i] = NULL;
30252 30252 }
30253 30253
30254 30254 } else {
30255 30255 SD_INFO(SD_LOG_IOERR, un,
30256 30256 "sd_faultinjection_ioctl: un null\n");
30257 30257 }
30258 30258
30259 30259 break;
30260 30260
30261 30261 case SDIOCINSERTARQ:
30262 30262 /* Store a arq struct to be pushed onto fifo */
30263 30263 SD_INFO(SD_LOG_SDTEST, un,
30264 30264 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n");
30265 30265 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30266 30266
30267 30267 sd_fault_injection_on = 0;
30268 30268
30269 30269 if (un->sd_fi_fifo_arq[i] != NULL) {
30270 30270 kmem_free(un->sd_fi_fifo_arq[i],
30271 30271 sizeof (struct sd_fi_arq));
30272 30272 un->sd_fi_fifo_arq[i] = NULL;
30273 30273 }
30274 30274 if (arg != NULL) {
30275 30275 un->sd_fi_fifo_arq[i] =
30276 30276 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP);
30277 30277 if (un->sd_fi_fifo_arq[i] == NULL) {
30278 30278 /* Alloc failed don't store anything */
30279 30279 break;
30280 30280 }
30281 30281 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i],
30282 30282 sizeof (struct sd_fi_arq), 0);
30283 30283 if (rval == -1) {
30284 30284 kmem_free(un->sd_fi_fifo_arq[i],
30285 30285 sizeof (struct sd_fi_arq));
30286 30286 un->sd_fi_fifo_arq[i] = NULL;
30287 30287 }
30288 30288
30289 30289 } else {
30290 30290 SD_INFO(SD_LOG_IOERR, un,
30291 30291 "sd_faultinjection_ioctl: arq null\n");
30292 30292 }
30293 30293
30294 30294 break;
30295 30295
30296 30296 case SDIOCPUSH:
30297 30297 /* Push stored xb, pkt, un, and arq onto fifo */
30298 30298 sd_fault_injection_on = 0;
30299 30299
30300 30300 if (arg != NULL) {
30301 30301 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0);
30302 30302 if (rval != -1 &&
30303 30303 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) {
30304 30304 un->sd_fi_fifo_end += i;
30305 30305 }
30306 30306 } else {
30307 30307 SD_INFO(SD_LOG_IOERR, un,
30308 30308 "sd_faultinjection_ioctl: push arg null\n");
30309 30309 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) {
30310 30310 un->sd_fi_fifo_end++;
30311 30311 }
30312 30312 }
30313 30313 SD_INFO(SD_LOG_IOERR, un,
30314 30314 "sd_faultinjection_ioctl: push to end=%d\n",
30315 30315 un->sd_fi_fifo_end);
30316 30316 break;
30317 30317
30318 30318 case SDIOCRETRIEVE:
30319 30319 /* Return buffer of log from Injection session */
30320 30320 SD_INFO(SD_LOG_SDTEST, un,
30321 30321 "sd_faultinjection_ioctl: Injecting Fault Retreive");
30322 30322
30323 30323 sd_fault_injection_on = 0;
30324 30324
30325 30325 mutex_enter(&(un->un_fi_mutex));
30326 30326 rval = ddi_copyout(un->sd_fi_log, (void *)arg,
30327 30327 un->sd_fi_buf_len+1, 0);
30328 30328 mutex_exit(&(un->un_fi_mutex));
30329 30329
30330 30330 if (rval == -1) {
30331 30331 /*
30332 30332 * arg is possibly invalid setting
30333 30333 * it to NULL for return
30334 30334 */
30335 30335 arg = NULL;
30336 30336 }
30337 30337 break;
30338 30338 }
30339 30339
30340 30340 mutex_exit(SD_MUTEX(un));
30341 30341 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:"
30342 30342 " exit\n");
30343 30343 }
30344 30344
30345 30345
30346 30346 /*
30347 30347 * Function: sd_injection_log()
30348 30348 *
30349 30349 * Description: This routine adds buff to the already existing injection log
30350 30350 * for retrieval via faultinjection_ioctl for use in fault
30351 30351 * detection and recovery
30352 30352 *
30353 30353 * Arguments: buf - the string to add to the log
30354 30354 */
30355 30355
30356 30356 static void
30357 30357 sd_injection_log(char *buf, struct sd_lun *un)
30358 30358 {
30359 30359 uint_t len;
30360 30360
30361 30361 ASSERT(un != NULL);
30362 30362 ASSERT(buf != NULL);
30363 30363
30364 30364 mutex_enter(&(un->un_fi_mutex));
30365 30365
30366 30366 len = min(strlen(buf), 255);
30367 30367 /* Add logged value to Injection log to be returned later */
30368 30368 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) {
30369 30369 uint_t offset = strlen((char *)un->sd_fi_log);
30370 30370 char *destp = (char *)un->sd_fi_log + offset;
30371 30371 int i;
30372 30372 for (i = 0; i < len; i++) {
30373 30373 *destp++ = *buf++;
30374 30374 }
30375 30375 un->sd_fi_buf_len += len;
30376 30376 un->sd_fi_log[un->sd_fi_buf_len] = '\0';
30377 30377 }
30378 30378
30379 30379 mutex_exit(&(un->un_fi_mutex));
30380 30380 }
30381 30381
30382 30382
30383 30383 /*
30384 30384 * Function: sd_faultinjection()
30385 30385 *
30386 30386 * Description: This routine takes the pkt and changes its
30387 30387 * content based on error injection scenerio.
30388 30388 *
30389 30389 * Arguments: pktp - packet to be changed
30390 30390 */
30391 30391
30392 30392 static void
30393 30393 sd_faultinjection(struct scsi_pkt *pktp)
30394 30394 {
30395 30395 uint_t i;
30396 30396 struct sd_fi_pkt *fi_pkt;
30397 30397 struct sd_fi_xb *fi_xb;
30398 30398 struct sd_fi_un *fi_un;
30399 30399 struct sd_fi_arq *fi_arq;
30400 30400 struct buf *bp;
30401 30401 struct sd_xbuf *xb;
30402 30402 struct sd_lun *un;
30403 30403
30404 30404 ASSERT(pktp != NULL);
30405 30405
30406 30406 /* pull bp xb and un from pktp */
30407 30407 bp = (struct buf *)pktp->pkt_private;
30408 30408 xb = SD_GET_XBUF(bp);
30409 30409 un = SD_GET_UN(bp);
30410 30410
30411 30411 ASSERT(un != NULL);
30412 30412
30413 30413 mutex_enter(SD_MUTEX(un));
30414 30414
30415 30415 SD_TRACE(SD_LOG_SDTEST, un,
30416 30416 "sd_faultinjection: entry Injection from sdintr\n");
30417 30417
30418 30418 /* if injection is off return */
30419 30419 if (sd_fault_injection_on == 0 ||
30420 30420 un->sd_fi_fifo_start == un->sd_fi_fifo_end) {
30421 30421 mutex_exit(SD_MUTEX(un));
30422 30422 return;
30423 30423 }
30424 30424
30425 30425 SD_INFO(SD_LOG_SDTEST, un,
30426 30426 "sd_faultinjection: is working for copying\n");
30427 30427
30428 30428 /* take next set off fifo */
30429 30429 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR;
30430 30430
30431 30431 fi_pkt = un->sd_fi_fifo_pkt[i];
30432 30432 fi_xb = un->sd_fi_fifo_xb[i];
30433 30433 fi_un = un->sd_fi_fifo_un[i];
30434 30434 fi_arq = un->sd_fi_fifo_arq[i];
30435 30435
30436 30436
30437 30437 /* set variables accordingly */
30438 30438 /* set pkt if it was on fifo */
30439 30439 if (fi_pkt != NULL) {
30440 30440 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags");
30441 30441 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp");
30442 30442 if (fi_pkt->pkt_cdbp != 0xff)
30443 30443 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp");
30444 30444 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state");
30445 30445 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics");
30446 30446 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason");
30447 30447
30448 30448 }
30449 30449 /* set xb if it was on fifo */
30450 30450 if (fi_xb != NULL) {
30451 30451 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno");
30452 30452 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid");
30453 30453 if (fi_xb->xb_retry_count != 0)
30454 30454 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count");
30455 30455 SD_CONDSET(xb, xb, xb_victim_retry_count,
30456 30456 "xb_victim_retry_count");
30457 30457 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status");
30458 30458 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state");
30459 30459 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid");
30460 30460
30461 30461 /* copy in block data from sense */
30462 30462 /*
30463 30463 * if (fi_xb->xb_sense_data[0] != -1) {
30464 30464 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data,
30465 30465 * SENSE_LENGTH);
30466 30466 * }
30467 30467 */
30468 30468 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH);
30469 30469
30470 30470 /* copy in extended sense codes */
30471 30471 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30472 30472 xb, es_code, "es_code");
30473 30473 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30474 30474 xb, es_key, "es_key");
30475 30475 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30476 30476 xb, es_add_code, "es_add_code");
30477 30477 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30478 30478 xb, es_qual_code, "es_qual_code");
30479 30479 struct scsi_extended_sense *esp;
30480 30480 esp = (struct scsi_extended_sense *)xb->xb_sense_data;
30481 30481 esp->es_class = CLASS_EXTENDED_SENSE;
30482 30482 }
30483 30483
30484 30484 /* set un if it was on fifo */
30485 30485 if (fi_un != NULL) {
30486 30486 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb");
30487 30487 SD_CONDSET(un, un, un_ctype, "un_ctype");
30488 30488 SD_CONDSET(un, un, un_reset_retry_count,
30489 30489 "un_reset_retry_count");
30490 30490 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type");
30491 30491 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status");
30492 30492 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled");
30493 30493 SD_CONDSET(un, un, un_f_allow_bus_device_reset,
30494 30494 "un_f_allow_bus_device_reset");
30495 30495 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing");
30496 30496
30497 30497 }
30498 30498
30499 30499 /* copy in auto request sense if it was on fifo */
30500 30500 if (fi_arq != NULL) {
30501 30501 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq));
30502 30502 }
30503 30503
30504 30504 /* free structs */
30505 30505 if (un->sd_fi_fifo_pkt[i] != NULL) {
30506 30506 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt));
30507 30507 }
30508 30508 if (un->sd_fi_fifo_xb[i] != NULL) {
30509 30509 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb));
30510 30510 }
30511 30511 if (un->sd_fi_fifo_un[i] != NULL) {
30512 30512 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un));
30513 30513 }
30514 30514 if (un->sd_fi_fifo_arq[i] != NULL) {
30515 30515 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq));
30516 30516 }
30517 30517
30518 30518 /*
30519 30519 * kmem_free does not gurantee to set to NULL
30520 30520 * since we uses these to determine if we set
30521 30521 * values or not lets confirm they are always
30522 30522 * NULL after free
30523 30523 */
30524 30524 un->sd_fi_fifo_pkt[i] = NULL;
30525 30525 un->sd_fi_fifo_un[i] = NULL;
30526 30526 un->sd_fi_fifo_xb[i] = NULL;
30527 30527 un->sd_fi_fifo_arq[i] = NULL;
30528 30528
30529 30529 un->sd_fi_fifo_start++;
30530 30530
30531 30531 mutex_exit(SD_MUTEX(un));
30532 30532
30533 30533 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n");
30534 30534 }
30535 30535
30536 30536 #endif /* SD_FAULT_INJECTION */
30537 30537
30538 30538 /*
30539 30539 * This routine is invoked in sd_unit_attach(). Before calling it, the
30540 30540 * properties in conf file should be processed already, and "hotpluggable"
30541 30541 * property was processed also.
30542 30542 *
30543 30543 * The sd driver distinguishes 3 different type of devices: removable media,
30544 30544 * non-removable media, and hotpluggable. Below the differences are defined:
30545 30545 *
30546 30546 * 1. Device ID
30547 30547 *
30548 30548 * The device ID of a device is used to identify this device. Refer to
30549 30549 * ddi_devid_register(9F).
30550 30550 *
30551 30551 * For a non-removable media disk device which can provide 0x80 or 0x83
30552 30552 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique
30553 30553 * device ID is created to identify this device. For other non-removable
30554 30554 * media devices, a default device ID is created only if this device has
30555 30555 * at least 2 alter cylinders. Otherwise, this device has no devid.
30556 30556 *
30557 30557 * -------------------------------------------------------
30558 30558 * removable media hotpluggable | Can Have Device ID
30559 30559 * -------------------------------------------------------
30560 30560 * false false | Yes
30561 30561 * false true | Yes
30562 30562 * true x | No
30563 30563 * ------------------------------------------------------
30564 30564 *
30565 30565 *
30566 30566 * 2. SCSI group 4 commands
30567 30567 *
30568 30568 * In SCSI specs, only some commands in group 4 command set can use
30569 30569 * 8-byte addresses that can be used to access >2TB storage spaces.
30570 30570 * Other commands have no such capability. Without supporting group4,
30571 30571 * it is impossible to make full use of storage spaces of a disk with
30572 30572 * capacity larger than 2TB.
30573 30573 *
30574 30574 * -----------------------------------------------
30575 30575 * removable media hotpluggable LP64 | Group
30576 30576 * -----------------------------------------------
30577 30577 * false false false | 1
30578 30578 * false false true | 4
30579 30579 * false true false | 1
30580 30580 * false true true | 4
30581 30581 * true x x | 5
30582 30582 * -----------------------------------------------
30583 30583 *
30584 30584 *
30585 30585 * 3. Check for VTOC Label
30586 30586 *
30587 30587 * If a direct-access disk has no EFI label, sd will check if it has a
30588 30588 * valid VTOC label. Now, sd also does that check for removable media
30589 30589 * and hotpluggable devices.
30590 30590 *
30591 30591 * --------------------------------------------------------------
30592 30592 * Direct-Access removable media hotpluggable | Check Label
30593 30593 * -------------------------------------------------------------
30594 30594 * false false false | No
30595 30595 * false false true | No
30596 30596 * false true false | Yes
30597 30597 * false true true | Yes
30598 30598 * true x x | Yes
30599 30599 * --------------------------------------------------------------
30600 30600 *
30601 30601 *
30602 30602 * 4. Building default VTOC label
30603 30603 *
30604 30604 * As section 3 says, sd checks if some kinds of devices have VTOC label.
30605 30605 * If those devices have no valid VTOC label, sd(7d) will attempt to
30606 30606 * create default VTOC for them. Currently sd creates default VTOC label
30607 30607 * for all devices on x86 platform (VTOC_16), but only for removable
30608 30608 * media devices on SPARC (VTOC_8).
30609 30609 *
30610 30610 * -----------------------------------------------------------
30611 30611 * removable media hotpluggable platform | Default Label
30612 30612 * -----------------------------------------------------------
30613 30613 * false false sparc | No
30614 30614 * false true x86 | Yes
30615 30615 * false true sparc | Yes
30616 30616 * true x x | Yes
30617 30617 * ----------------------------------------------------------
30618 30618 *
30619 30619 *
30620 30620 * 5. Supported blocksizes of target devices
30621 30621 *
30622 30622 * Sd supports non-512-byte blocksize for removable media devices only.
30623 30623 * For other devices, only 512-byte blocksize is supported. This may be
30624 30624 * changed in near future because some RAID devices require non-512-byte
30625 30625 * blocksize
30626 30626 *
30627 30627 * -----------------------------------------------------------
30628 30628 * removable media hotpluggable | non-512-byte blocksize
30629 30629 * -----------------------------------------------------------
30630 30630 * false false | No
30631 30631 * false true | No
30632 30632 * true x | Yes
30633 30633 * -----------------------------------------------------------
30634 30634 *
30635 30635 *
30636 30636 * 6. Automatic mount & unmount
30637 30637 *
30638 30638 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query
30639 30639 * if a device is removable media device. It return 1 for removable media
30640 30640 * devices, and 0 for others.
30641 30641 *
30642 30642 * The automatic mounting subsystem should distinguish between the types
30643 30643 * of devices and apply automounting policies to each.
30644 30644 *
30645 30645 *
30646 30646 * 7. fdisk partition management
30647 30647 *
30648 30648 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver
30649 30649 * just supports fdisk partitions on x86 platform. On sparc platform, sd
30650 30650 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize
30651 30651 * fdisk partitions on both x86 and SPARC platform.
30652 30652 *
30653 30653 * -----------------------------------------------------------
30654 30654 * platform removable media USB/1394 | fdisk supported
30655 30655 * -----------------------------------------------------------
30656 30656 * x86 X X | true
30657 30657 * ------------------------------------------------------------
30658 30658 * sparc X X | false
30659 30659 * ------------------------------------------------------------
30660 30660 *
30661 30661 *
30662 30662 * 8. MBOOT/MBR
30663 30663 *
30664 30664 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support
30665 30665 * read/write mboot for removable media devices on sparc platform.
30666 30666 *
30667 30667 * -----------------------------------------------------------
30668 30668 * platform removable media USB/1394 | mboot supported
30669 30669 * -----------------------------------------------------------
30670 30670 * x86 X X | true
30671 30671 * ------------------------------------------------------------
30672 30672 * sparc false false | false
30673 30673 * sparc false true | true
30674 30674 * sparc true false | true
30675 30675 * sparc true true | true
30676 30676 * ------------------------------------------------------------
30677 30677 *
30678 30678 *
30679 30679 * 9. error handling during opening device
30680 30680 *
30681 30681 * If failed to open a disk device, an errno is returned. For some kinds
30682 30682 * of errors, different errno is returned depending on if this device is
30683 30683 * a removable media device. This brings USB/1394 hard disks in line with
30684 30684 * expected hard disk behavior. It is not expected that this breaks any
30685 30685 * application.
30686 30686 *
30687 30687 * ------------------------------------------------------
30688 30688 * removable media hotpluggable | errno
30689 30689 * ------------------------------------------------------
30690 30690 * false false | EIO
30691 30691 * false true | EIO
30692 30692 * true x | ENXIO
30693 30693 * ------------------------------------------------------
30694 30694 *
30695 30695 *
30696 30696 * 11. ioctls: DKIOCEJECT, CDROMEJECT
30697 30697 *
30698 30698 * These IOCTLs are applicable only to removable media devices.
30699 30699 *
30700 30700 * -----------------------------------------------------------
30701 30701 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT
30702 30702 * -----------------------------------------------------------
30703 30703 * false false | No
30704 30704 * false true | No
30705 30705 * true x | Yes
30706 30706 * -----------------------------------------------------------
30707 30707 *
30708 30708 *
30709 30709 * 12. Kstats for partitions
30710 30710 *
30711 30711 * sd creates partition kstat for non-removable media devices. USB and
30712 30712 * Firewire hard disks now have partition kstats
30713 30713 *
30714 30714 * ------------------------------------------------------
30715 30715 * removable media hotpluggable | kstat
30716 30716 * ------------------------------------------------------
30717 30717 * false false | Yes
30718 30718 * false true | Yes
30719 30719 * true x | No
30720 30720 * ------------------------------------------------------
30721 30721 *
30722 30722 *
30723 30723 * 13. Removable media & hotpluggable properties
30724 30724 *
30725 30725 * Sd driver creates a "removable-media" property for removable media
30726 30726 * devices. Parent nexus drivers create a "hotpluggable" property if
30727 30727 * it supports hotplugging.
30728 30728 *
30729 30729 * ---------------------------------------------------------------------
30730 30730 * removable media hotpluggable | "removable-media" " hotpluggable"
30731 30731 * ---------------------------------------------------------------------
30732 30732 * false false | No No
30733 30733 * false true | No Yes
30734 30734 * true false | Yes No
30735 30735 * true true | Yes Yes
30736 30736 * ---------------------------------------------------------------------
30737 30737 *
30738 30738 *
30739 30739 * 14. Power Management
30740 30740 *
30741 30741 * sd only power manages removable media devices or devices that support
30742 30742 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250)
30743 30743 *
30744 30744 * A parent nexus that supports hotplugging can also set "pm-capable"
30745 30745 * if the disk can be power managed.
30746 30746 *
30747 30747 * ------------------------------------------------------------
30748 30748 * removable media hotpluggable pm-capable | power manage
30749 30749 * ------------------------------------------------------------
30750 30750 * false false false | No
30751 30751 * false false true | Yes
30752 30752 * false true false | No
30753 30753 * false true true | Yes
30754 30754 * true x x | Yes
30755 30755 * ------------------------------------------------------------
30756 30756 *
30757 30757 * USB and firewire hard disks can now be power managed independently
30758 30758 * of the framebuffer
30759 30759 *
30760 30760 *
30761 30761 * 15. Support for USB disks with capacity larger than 1TB
30762 30762 *
30763 30763 * Currently, sd doesn't permit a fixed disk device with capacity
30764 30764 * larger than 1TB to be used in a 32-bit operating system environment.
30765 30765 * However, sd doesn't do that for removable media devices. Instead, it
30766 30766 * assumes that removable media devices cannot have a capacity larger
30767 30767 * than 1TB. Therefore, using those devices on 32-bit system is partially
30768 30768 * supported, which can cause some unexpected results.
30769 30769 *
30770 30770 * ---------------------------------------------------------------------
30771 30771 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env
30772 30772 * ---------------------------------------------------------------------
30773 30773 * false false | true | no
30774 30774 * false true | true | no
30775 30775 * true false | true | Yes
30776 30776 * true true | true | Yes
30777 30777 * ---------------------------------------------------------------------
30778 30778 *
30779 30779 *
30780 30780 * 16. Check write-protection at open time
30781 30781 *
30782 30782 * When a removable media device is being opened for writing without NDELAY
30783 30783 * flag, sd will check if this device is writable. If attempting to open
30784 30784 * without NDELAY flag a write-protected device, this operation will abort.
30785 30785 *
30786 30786 * ------------------------------------------------------------
30787 30787 * removable media USB/1394 | WP Check
30788 30788 * ------------------------------------------------------------
30789 30789 * false false | No
30790 30790 * false true | No
30791 30791 * true false | Yes
30792 30792 * true true | Yes
30793 30793 * ------------------------------------------------------------
30794 30794 *
30795 30795 *
30796 30796 * 17. syslog when corrupted VTOC is encountered
30797 30797 *
30798 30798 * Currently, if an invalid VTOC is encountered, sd only print syslog
30799 30799 * for fixed SCSI disks.
30800 30800 * ------------------------------------------------------------
30801 30801 * removable media USB/1394 | print syslog
30802 30802 * ------------------------------------------------------------
30803 30803 * false false | Yes
30804 30804 * false true | No
30805 30805 * true false | No
30806 30806 * true true | No
30807 30807 * ------------------------------------------------------------
30808 30808 */
30809 30809 static void
30810 30810 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi)
30811 30811 {
30812 30812 int pm_cap;
30813 30813
30814 30814 ASSERT(un->un_sd);
30815 30815 ASSERT(un->un_sd->sd_inq);
30816 30816
30817 30817 /*
30818 30818 * Enable SYNC CACHE support for all devices.
30819 30819 */
30820 30820 un->un_f_sync_cache_supported = TRUE;
30821 30821
30822 30822 /*
30823 30823 * Set the sync cache required flag to false.
30824 30824 * This would ensure that there is no SYNC CACHE
30825 30825 * sent when there are no writes
30826 30826 */
30827 30827 un->un_f_sync_cache_required = FALSE;
30828 30828
30829 30829 if (un->un_sd->sd_inq->inq_rmb) {
30830 30830 /*
30831 30831 * The media of this device is removable. And for this kind
30832 30832 * of devices, it is possible to change medium after opening
30833 30833 * devices. Thus we should support this operation.
30834 30834 */
30835 30835 un->un_f_has_removable_media = TRUE;
30836 30836
30837 30837 /*
30838 30838 * support non-512-byte blocksize of removable media devices
30839 30839 */
30840 30840 un->un_f_non_devbsize_supported = TRUE;
30841 30841
30842 30842 /*
30843 30843 * Assume that all removable media devices support DOOR_LOCK
30844 30844 */
30845 30845 un->un_f_doorlock_supported = TRUE;
30846 30846
30847 30847 /*
30848 30848 * For a removable media device, it is possible to be opened
30849 30849 * with NDELAY flag when there is no media in drive, in this
30850 30850 * case we don't care if device is writable. But if without
30851 30851 * NDELAY flag, we need to check if media is write-protected.
30852 30852 */
30853 30853 un->un_f_chk_wp_open = TRUE;
30854 30854
30855 30855 /*
30856 30856 * need to start a SCSI watch thread to monitor media state,
30857 30857 * when media is being inserted or ejected, notify syseventd.
30858 30858 */
30859 30859 un->un_f_monitor_media_state = TRUE;
30860 30860
30861 30861 /*
30862 30862 * Some devices don't support START_STOP_UNIT command.
30863 30863 * Therefore, we'd better check if a device supports it
30864 30864 * before sending it.
30865 30865 */
30866 30866 un->un_f_check_start_stop = TRUE;
30867 30867
30868 30868 /*
30869 30869 * support eject media ioctl:
30870 30870 * FDEJECT, DKIOCEJECT, CDROMEJECT
30871 30871 */
30872 30872 un->un_f_eject_media_supported = TRUE;
30873 30873
30874 30874 /*
30875 30875 * Because many removable-media devices don't support
30876 30876 * LOG_SENSE, we couldn't use this command to check if
30877 30877 * a removable media device support power-management.
30878 30878 * We assume that they support power-management via
30879 30879 * START_STOP_UNIT command and can be spun up and down
30880 30880 * without limitations.
30881 30881 */
30882 30882 un->un_f_pm_supported = TRUE;
30883 30883
30884 30884 /*
30885 30885 * Need to create a zero length (Boolean) property
30886 30886 * removable-media for the removable media devices.
30887 30887 * Note that the return value of the property is not being
30888 30888 * checked, since if unable to create the property
30889 30889 * then do not want the attach to fail altogether. Consistent
30890 30890 * with other property creation in attach.
30891 30891 */
30892 30892 (void) ddi_prop_create(DDI_DEV_T_NONE, devi,
30893 30893 DDI_PROP_CANSLEEP, "removable-media", NULL, 0);
30894 30894
30895 30895 } else {
30896 30896 /*
30897 30897 * create device ID for device
30898 30898 */
30899 30899 un->un_f_devid_supported = TRUE;
30900 30900
30901 30901 /*
30902 30902 * Spin up non-removable-media devices once it is attached
30903 30903 */
30904 30904 un->un_f_attach_spinup = TRUE;
30905 30905
30906 30906 /*
30907 30907 * According to SCSI specification, Sense data has two kinds of
30908 30908 * format: fixed format, and descriptor format. At present, we
30909 30909 * don't support descriptor format sense data for removable
30910 30910 * media.
30911 30911 */
30912 30912 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) {
30913 30913 un->un_f_descr_format_supported = TRUE;
30914 30914 }
30915 30915
30916 30916 /*
30917 30917 * kstats are created only for non-removable media devices.
30918 30918 *
30919 30919 * Set this in sd.conf to 0 in order to disable kstats. The
30920 30920 * default is 1, so they are enabled by default.
30921 30921 */
30922 30922 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY,
30923 30923 SD_DEVINFO(un), DDI_PROP_DONTPASS,
30924 30924 "enable-partition-kstats", 1));
30925 30925
30926 30926 /*
30927 30927 * Check if HBA has set the "pm-capable" property.
30928 30928 * If "pm-capable" exists and is non-zero then we can
30929 30929 * power manage the device without checking the start/stop
30930 30930 * cycle count log sense page.
30931 30931 *
30932 30932 * If "pm-capable" exists and is set to be false (0),
30933 30933 * then we should not power manage the device.
30934 30934 *
30935 30935 * If "pm-capable" doesn't exist then pm_cap will
30936 30936 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case,
30937 30937 * sd will check the start/stop cycle count log sense page
30938 30938 * and power manage the device if the cycle count limit has
30939 30939 * not been exceeded.
30940 30940 */
30941 30941 pm_cap = ddi_prop_get_int(DDI_DEV_T_ANY, devi,
30942 30942 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED);
30943 30943 if (SD_PM_CAPABLE_IS_UNDEFINED(pm_cap)) {
30944 30944 un->un_f_log_sense_supported = TRUE;
30945 30945 if (!un->un_f_power_condition_disabled &&
30946 30946 SD_INQUIRY(un)->inq_ansi == 6) {
30947 30947 un->un_f_power_condition_supported = TRUE;
30948 30948 }
30949 30949 } else {
30950 30950 /*
30951 30951 * pm-capable property exists.
30952 30952 *
30953 30953 * Convert "TRUE" values for pm_cap to
30954 30954 * SD_PM_CAPABLE_IS_TRUE to make it easier to check
30955 30955 * later. "TRUE" values are any values defined in
30956 30956 * inquiry.h.
30957 30957 */
30958 30958 if (SD_PM_CAPABLE_IS_FALSE(pm_cap)) {
30959 30959 un->un_f_log_sense_supported = FALSE;
30960 30960 } else {
30961 30961 /* SD_PM_CAPABLE_IS_TRUE case */
30962 30962 un->un_f_pm_supported = TRUE;
30963 30963 if (!un->un_f_power_condition_disabled &&
30964 30964 SD_PM_CAPABLE_IS_SPC_4(pm_cap)) {
30965 30965 un->un_f_power_condition_supported =
30966 30966 TRUE;
30967 30967 }
30968 30968 if (SD_PM_CAP_LOG_SUPPORTED(pm_cap)) {
30969 30969 un->un_f_log_sense_supported = TRUE;
30970 30970 un->un_f_pm_log_sense_smart =
30971 30971 SD_PM_CAP_SMART_LOG(pm_cap);
30972 30972 }
30973 30973 }
30974 30974
30975 30975 SD_INFO(SD_LOG_ATTACH_DETACH, un,
30976 30976 "sd_unit_attach: un:0x%p pm-capable "
30977 30977 "property set to %d.\n", un, un->un_f_pm_supported);
30978 30978 }
30979 30979 }
30980 30980
30981 30981 if (un->un_f_is_hotpluggable) {
30982 30982
30983 30983 /*
30984 30984 * Have to watch hotpluggable devices as well, since
30985 30985 * that's the only way for userland applications to
30986 30986 * detect hot removal while device is busy/mounted.
30987 30987 */
30988 30988 un->un_f_monitor_media_state = TRUE;
30989 30989
30990 30990 un->un_f_check_start_stop = TRUE;
30991 30991
30992 30992 }
30993 30993 }
30994 30994
30995 30995 /*
30996 30996 * sd_tg_rdwr:
30997 30997 * Provides rdwr access for cmlb via sd_tgops. The start_block is
30998 30998 * in sys block size, req_length in bytes.
30999 30999 *
31000 31000 */
31001 31001 static int
31002 31002 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
31003 31003 diskaddr_t start_block, size_t reqlength, void *tg_cookie)
31004 31004 {
31005 31005 struct sd_lun *un;
31006 31006 int path_flag = (int)(uintptr_t)tg_cookie;
31007 31007 char *dkl = NULL;
31008 31008 diskaddr_t real_addr = start_block;
31009 31009 diskaddr_t first_byte, end_block;
31010 31010
31011 31011 size_t buffer_size = reqlength;
31012 31012 int rval = 0;
31013 31013 diskaddr_t cap;
31014 31014 uint32_t lbasize;
31015 31015 sd_ssc_t *ssc;
31016 31016
31017 31017 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
31018 31018 if (un == NULL)
31019 31019 return (ENXIO);
31020 31020
31021 31021 if (cmd != TG_READ && cmd != TG_WRITE)
31022 31022 return (EINVAL);
31023 31023
31024 31024 ssc = sd_ssc_init(un);
31025 31025 mutex_enter(SD_MUTEX(un));
31026 31026 if (un->un_f_tgt_blocksize_is_valid == FALSE) {
31027 31027 mutex_exit(SD_MUTEX(un));
31028 31028 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap,
31029 31029 &lbasize, path_flag);
31030 31030 if (rval != 0)
31031 31031 goto done1;
31032 31032 mutex_enter(SD_MUTEX(un));
31033 31033 sd_update_block_info(un, lbasize, cap);
31034 31034 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) {
31035 31035 mutex_exit(SD_MUTEX(un));
31036 31036 rval = EIO;
31037 31037 goto done;
31038 31038 }
31039 31039 }
31040 31040
31041 31041 if (NOT_DEVBSIZE(un)) {
31042 31042 /*
31043 31043 * sys_blocksize != tgt_blocksize, need to re-adjust
31044 31044 * blkno and save the index to beginning of dk_label
31045 31045 */
31046 31046 first_byte = SD_SYSBLOCKS2BYTES(start_block);
31047 31047 real_addr = first_byte / un->un_tgt_blocksize;
31048 31048
31049 31049 end_block = (first_byte + reqlength +
31050 31050 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize;
31051 31051
31052 31052 /* round up buffer size to multiple of target block size */
31053 31053 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize;
31054 31054
31055 31055 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr",
31056 31056 "label_addr: 0x%x allocation size: 0x%x\n",
31057 31057 real_addr, buffer_size);
31058 31058
31059 31059 if (((first_byte % un->un_tgt_blocksize) != 0) ||
31060 31060 (reqlength % un->un_tgt_blocksize) != 0)
31061 31061 /* the request is not aligned */
31062 31062 dkl = kmem_zalloc(buffer_size, KM_SLEEP);
31063 31063 }
31064 31064
31065 31065 /*
31066 31066 * The MMC standard allows READ CAPACITY to be
31067 31067 * inaccurate by a bounded amount (in the interest of
31068 31068 * response latency). As a result, failed READs are
31069 31069 * commonplace (due to the reading of metadata and not
31070 31070 * data). Depending on the per-Vendor/drive Sense data,
31071 31071 * the failed READ can cause many (unnecessary) retries.
31072 31072 */
31073 31073
31074 31074 if (ISCD(un) && (cmd == TG_READ) &&
31075 31075 (un->un_f_blockcount_is_valid == TRUE) &&
31076 31076 ((start_block == (un->un_blockcount - 1))||
31077 31077 (start_block == (un->un_blockcount - 2)))) {
31078 31078 path_flag = SD_PATH_DIRECT_PRIORITY;
31079 31079 }
31080 31080
31081 31081 mutex_exit(SD_MUTEX(un));
31082 31082 if (cmd == TG_READ) {
31083 31083 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr,
31084 31084 buffer_size, real_addr, path_flag);
31085 31085 if (dkl != NULL)
31086 31086 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block,
31087 31087 real_addr), bufaddr, reqlength);
31088 31088 } else {
31089 31089 if (dkl) {
31090 31090 rval = sd_send_scsi_READ(ssc, dkl, buffer_size,
31091 31091 real_addr, path_flag);
31092 31092 if (rval) {
31093 31093 goto done1;
31094 31094 }
31095 31095 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block,
31096 31096 real_addr), reqlength);
31097 31097 }
31098 31098 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr,
31099 31099 buffer_size, real_addr, path_flag);
31100 31100 }
31101 31101
31102 31102 done1:
31103 31103 if (dkl != NULL)
31104 31104 kmem_free(dkl, buffer_size);
31105 31105
31106 31106 if (rval != 0) {
31107 31107 if (rval == EIO)
31108 31108 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
31109 31109 else
31110 31110 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
31111 31111 }
31112 31112 done:
31113 31113 sd_ssc_fini(ssc);
31114 31114 return (rval);
31115 31115 }
31116 31116
31117 31117
31118 31118 static int
31119 31119 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie)
31120 31120 {
31121 31121
31122 31122 struct sd_lun *un;
31123 31123 diskaddr_t cap;
31124 31124 uint32_t lbasize;
31125 31125 int path_flag = (int)(uintptr_t)tg_cookie;
31126 31126 int ret = 0;
31127 31127
31128 31128 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
31129 31129 if (un == NULL)
31130 31130 return (ENXIO);
31131 31131
31132 31132 switch (cmd) {
31133 31133 case TG_GETPHYGEOM:
31134 31134 case TG_GETVIRTGEOM:
31135 31135 case TG_GETCAPACITY:
31136 31136 case TG_GETBLOCKSIZE:
31137 31137 mutex_enter(SD_MUTEX(un));
31138 31138
31139 31139 if ((un->un_f_blockcount_is_valid == TRUE) &&
31140 31140 (un->un_f_tgt_blocksize_is_valid == TRUE)) {
31141 31141 cap = un->un_blockcount;
31142 31142 lbasize = un->un_tgt_blocksize;
31143 31143 mutex_exit(SD_MUTEX(un));
31144 31144 } else {
31145 31145 sd_ssc_t *ssc;
31146 31146 mutex_exit(SD_MUTEX(un));
31147 31147 ssc = sd_ssc_init(un);
31148 31148 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap,
31149 31149 &lbasize, path_flag);
31150 31150 if (ret != 0) {
31151 31151 if (ret == EIO)
31152 31152 sd_ssc_assessment(ssc,
31153 31153 SD_FMT_STATUS_CHECK);
31154 31154 else
31155 31155 sd_ssc_assessment(ssc,
31156 31156 SD_FMT_IGNORE);
31157 31157 sd_ssc_fini(ssc);
31158 31158 return (ret);
31159 31159 }
31160 31160 sd_ssc_fini(ssc);
31161 31161 mutex_enter(SD_MUTEX(un));
31162 31162 sd_update_block_info(un, lbasize, cap);
31163 31163 if ((un->un_f_blockcount_is_valid == FALSE) ||
31164 31164 (un->un_f_tgt_blocksize_is_valid == FALSE)) {
31165 31165 mutex_exit(SD_MUTEX(un));
31166 31166 return (EIO);
31167 31167 }
31168 31168 mutex_exit(SD_MUTEX(un));
31169 31169 }
31170 31170
31171 31171 if (cmd == TG_GETCAPACITY) {
31172 31172 *(diskaddr_t *)arg = cap;
31173 31173 return (0);
31174 31174 }
31175 31175
31176 31176 if (cmd == TG_GETBLOCKSIZE) {
31177 31177 *(uint32_t *)arg = lbasize;
31178 31178 return (0);
31179 31179 }
31180 31180
31181 31181 if (cmd == TG_GETPHYGEOM)
31182 31182 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg,
31183 31183 cap, lbasize, path_flag);
31184 31184 else
31185 31185 /* TG_GETVIRTGEOM */
31186 31186 ret = sd_get_virtual_geometry(un,
31187 31187 (cmlb_geom_t *)arg, cap, lbasize);
31188 31188
31189 31189 return (ret);
31190 31190
31191 31191 case TG_GETATTR:
31192 31192 mutex_enter(SD_MUTEX(un));
31193 31193 ((tg_attribute_t *)arg)->media_is_writable =
31194 31194 un->un_f_mmc_writable_media;
31195 31195 ((tg_attribute_t *)arg)->media_is_solid_state =
31196 31196 un->un_f_is_solid_state;
31197 31197 mutex_exit(SD_MUTEX(un));
31198 31198 return (0);
31199 31199 default:
31200 31200 return (ENOTTY);
31201 31201
31202 31202 }
31203 31203 }
31204 31204
31205 31205 /*
31206 31206 * Function: sd_ssc_ereport_post
31207 31207 *
31208 31208 * Description: Will be called when SD driver need to post an ereport.
31209 31209 *
31210 31210 * Context: Kernel thread or interrupt context.
31211 31211 */
31212 31212
31213 31213 #define DEVID_IF_KNOWN(d) "devid", DATA_TYPE_STRING, (d) ? (d) : "unknown"
31214 31214
31215 31215 static void
31216 31216 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess)
31217 31217 {
31218 31218 int uscsi_path_instance = 0;
31219 31219 uchar_t uscsi_pkt_reason;
31220 31220 uint32_t uscsi_pkt_state;
31221 31221 uint32_t uscsi_pkt_statistics;
31222 31222 uint64_t uscsi_ena;
31223 31223 uchar_t op_code;
31224 31224 uint8_t *sensep;
31225 31225 union scsi_cdb *cdbp;
31226 31226 uint_t cdblen = 0;
31227 31227 uint_t senlen = 0;
31228 31228 struct sd_lun *un;
31229 31229 dev_info_t *dip;
31230 31230 char *devid;
31231 31231 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON |
31232 31232 SSC_FLAGS_INVALID_STATUS |
31233 31233 SSC_FLAGS_INVALID_SENSE |
31234 31234 SSC_FLAGS_INVALID_DATA;
31235 31235 char assessment[16];
31236 31236
31237 31237 ASSERT(ssc != NULL);
31238 31238 ASSERT(ssc->ssc_uscsi_cmd != NULL);
31239 31239 ASSERT(ssc->ssc_uscsi_info != NULL);
31240 31240
31241 31241 un = ssc->ssc_un;
31242 31242 ASSERT(un != NULL);
31243 31243
31244 31244 dip = un->un_sd->sd_dev;
31245 31245
31246 31246 /*
31247 31247 * Get the devid:
31248 31248 * devid will only be passed to non-transport error reports.
31249 31249 */
31250 31250 devid = DEVI(dip)->devi_devid_str;
31251 31251
31252 31252 /*
31253 31253 * If we are syncing or dumping, the command will not be executed
31254 31254 * so we bypass this situation.
31255 31255 */
31256 31256 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) ||
31257 31257 (un->un_state == SD_STATE_DUMPING))
31258 31258 return;
31259 31259
31260 31260 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason;
31261 31261 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance;
31262 31262 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state;
31263 31263 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics;
31264 31264 uscsi_ena = ssc->ssc_uscsi_info->ui_ena;
31265 31265
31266 31266 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
31267 31267 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb;
31268 31268
31269 31269 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */
31270 31270 if (cdbp == NULL) {
31271 31271 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
31272 31272 "sd_ssc_ereport_post meet empty cdb\n");
31273 31273 return;
31274 31274 }
31275 31275
31276 31276 op_code = cdbp->scc_cmd;
31277 31277
31278 31278 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen;
31279 31279 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
31280 31280 ssc->ssc_uscsi_cmd->uscsi_rqresid);
31281 31281
31282 31282 if (senlen > 0)
31283 31283 ASSERT(sensep != NULL);
31284 31284
31285 31285 /*
31286 31286 * Initialize drv_assess to corresponding values.
31287 31287 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending
31288 31288 * on the sense-key returned back.
31289 31289 */
31290 31290 switch (drv_assess) {
31291 31291 case SD_FM_DRV_RECOVERY:
31292 31292 (void) sprintf(assessment, "%s", "recovered");
31293 31293 break;
31294 31294 case SD_FM_DRV_RETRY:
31295 31295 (void) sprintf(assessment, "%s", "retry");
31296 31296 break;
31297 31297 case SD_FM_DRV_NOTICE:
31298 31298 (void) sprintf(assessment, "%s", "info");
31299 31299 break;
31300 31300 case SD_FM_DRV_FATAL:
31301 31301 default:
31302 31302 (void) sprintf(assessment, "%s", "unknown");
31303 31303 }
31304 31304 /*
31305 31305 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered
31306 31306 * command, we will post ereport.io.scsi.cmd.disk.recovered.
31307 31307 * driver-assessment will always be "recovered" here.
31308 31308 */
31309 31309 if (drv_assess == SD_FM_DRV_RECOVERY) {
31310 31310 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL,
31311 31311 "cmd.disk.recovered", uscsi_ena, devid, NULL,
31312 31312 DDI_NOSLEEP, NULL,
31313 31313 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31314 31314 DEVID_IF_KNOWN(devid),
31315 31315 "driver-assessment", DATA_TYPE_STRING, assessment,
31316 31316 "op-code", DATA_TYPE_UINT8, op_code,
31317 31317 "cdb", DATA_TYPE_UINT8_ARRAY,
31318 31318 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31319 31319 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31320 31320 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state,
31321 31321 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics,
31322 31322 NULL);
31323 31323 return;
31324 31324 }
31325 31325
31326 31326 /*
31327 31327 * If there is un-expected/un-decodable data, we should post
31328 31328 * ereport.io.scsi.cmd.disk.dev.uderr.
31329 31329 * driver-assessment will be set based on parameter drv_assess.
31330 31330 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back.
31331 31331 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered.
31332 31332 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered.
31333 31333 * SSC_FLAGS_INVALID_DATA - invalid data sent back.
31334 31334 */
31335 31335 if (ssc->ssc_flags & ssc_invalid_flags) {
31336 31336 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) {
31337 31337 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
31338 31338 NULL, "cmd.disk.dev.uderr", uscsi_ena, devid,
31339 31339 NULL, DDI_NOSLEEP, NULL,
31340 31340 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31341 31341 DEVID_IF_KNOWN(devid),
31342 31342 "driver-assessment", DATA_TYPE_STRING,
31343 31343 drv_assess == SD_FM_DRV_FATAL ?
31344 31344 "fail" : assessment,
31345 31345 "op-code", DATA_TYPE_UINT8, op_code,
31346 31346 "cdb", DATA_TYPE_UINT8_ARRAY,
31347 31347 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31348 31348 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31349 31349 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state,
31350 31350 "pkt-stats", DATA_TYPE_UINT32,
31351 31351 uscsi_pkt_statistics,
31352 31352 "stat-code", DATA_TYPE_UINT8,
31353 31353 ssc->ssc_uscsi_cmd->uscsi_status,
31354 31354 "un-decode-info", DATA_TYPE_STRING,
31355 31355 ssc->ssc_info,
31356 31356 "un-decode-value", DATA_TYPE_UINT8_ARRAY,
31357 31357 senlen, sensep,
31358 31358 NULL);
31359 31359 } else {
31360 31360 /*
31361 31361 * For other type of invalid data, the
31362 31362 * un-decode-value field would be empty because the
31363 31363 * un-decodable content could be seen from upper
31364 31364 * level payload or inside un-decode-info.
31365 31365 */
31366 31366 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
31367 31367 NULL,
31368 31368 "cmd.disk.dev.uderr", uscsi_ena, devid,
31369 31369 NULL, DDI_NOSLEEP, NULL,
31370 31370 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31371 31371 DEVID_IF_KNOWN(devid),
31372 31372 "driver-assessment", DATA_TYPE_STRING,
31373 31373 drv_assess == SD_FM_DRV_FATAL ?
31374 31374 "fail" : assessment,
31375 31375 "op-code", DATA_TYPE_UINT8, op_code,
31376 31376 "cdb", DATA_TYPE_UINT8_ARRAY,
31377 31377 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31378 31378 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31379 31379 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state,
31380 31380 "pkt-stats", DATA_TYPE_UINT32,
31381 31381 uscsi_pkt_statistics,
31382 31382 "stat-code", DATA_TYPE_UINT8,
31383 31383 ssc->ssc_uscsi_cmd->uscsi_status,
31384 31384 "un-decode-info", DATA_TYPE_STRING,
31385 31385 ssc->ssc_info,
31386 31386 "un-decode-value", DATA_TYPE_UINT8_ARRAY,
31387 31387 0, NULL,
31388 31388 NULL);
31389 31389 }
31390 31390 ssc->ssc_flags &= ~ssc_invalid_flags;
31391 31391 return;
31392 31392 }
31393 31393
31394 31394 if (uscsi_pkt_reason != CMD_CMPLT ||
31395 31395 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) {
31396 31396 /*
31397 31397 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was
31398 31398 * set inside sd_start_cmds due to errors(bad packet or
31399 31399 * fatal transport error), we should take it as a
31400 31400 * transport error, so we post ereport.io.scsi.cmd.disk.tran.
31401 31401 * driver-assessment will be set based on drv_assess.
31402 31402 * We will set devid to NULL because it is a transport
31403 31403 * error.
31404 31404 */
31405 31405 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)
31406 31406 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT;
31407 31407
31408 31408 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL,
31409 31409 "cmd.disk.tran", uscsi_ena, NULL, NULL, DDI_NOSLEEP, NULL,
31410 31410 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31411 31411 DEVID_IF_KNOWN(devid),
31412 31412 "driver-assessment", DATA_TYPE_STRING,
31413 31413 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment,
31414 31414 "op-code", DATA_TYPE_UINT8, op_code,
31415 31415 "cdb", DATA_TYPE_UINT8_ARRAY,
31416 31416 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31417 31417 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31418 31418 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state,
31419 31419 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics,
31420 31420 NULL);
31421 31421 } else {
31422 31422 /*
31423 31423 * If we got here, we have a completed command, and we need
31424 31424 * to further investigate the sense data to see what kind
31425 31425 * of ereport we should post.
31426 31426 * No ereport is needed if sense-key is KEY_RECOVERABLE_ERROR
31427 31427 * and asc/ascq is "ATA PASS-THROUGH INFORMATION AVAILABLE".
31428 31428 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr if sense-key is
31429 31429 * KEY_MEDIUM_ERROR.
31430 31430 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise.
31431 31431 * driver-assessment will be set based on the parameter
31432 31432 * drv_assess.
31433 31433 */
31434 31434 if (senlen > 0) {
31435 31435 /*
31436 31436 * Here we have sense data available.
31437 31437 */
31438 31438 uint8_t sense_key = scsi_sense_key(sensep);
31439 31439 uint8_t sense_asc = scsi_sense_asc(sensep);
31440 31440 uint8_t sense_ascq = scsi_sense_ascq(sensep);
31441 31441
31442 31442 if (sense_key == KEY_RECOVERABLE_ERROR &&
31443 31443 sense_asc == 0x00 && sense_ascq == 0x1d)
31444 31444 return;
31445 31445
31446 31446 if (sense_key == KEY_MEDIUM_ERROR) {
31447 31447 /*
31448 31448 * driver-assessment should be "fatal" if
31449 31449 * drv_assess is SD_FM_DRV_FATAL.
31450 31450 */
31451 31451 scsi_fm_ereport_post(un->un_sd,
31452 31452 uscsi_path_instance, NULL,
31453 31453 "cmd.disk.dev.rqs.merr",
31454 31454 uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL,
31455 31455 FM_VERSION, DATA_TYPE_UINT8,
31456 31456 FM_EREPORT_VERS0,
31457 31457 DEVID_IF_KNOWN(devid),
31458 31458 "driver-assessment",
31459 31459 DATA_TYPE_STRING,
31460 31460 drv_assess == SD_FM_DRV_FATAL ?
31461 31461 "fatal" : assessment,
31462 31462 "op-code",
31463 31463 DATA_TYPE_UINT8, op_code,
31464 31464 "cdb",
31465 31465 DATA_TYPE_UINT8_ARRAY, cdblen,
31466 31466 ssc->ssc_uscsi_cmd->uscsi_cdb,
31467 31467 "pkt-reason",
31468 31468 DATA_TYPE_UINT8, uscsi_pkt_reason,
31469 31469 "pkt-state",
31470 31470 DATA_TYPE_UINT8, uscsi_pkt_state,
31471 31471 "pkt-stats",
31472 31472 DATA_TYPE_UINT32,
31473 31473 uscsi_pkt_statistics,
31474 31474 "stat-code",
31475 31475 DATA_TYPE_UINT8,
31476 31476 ssc->ssc_uscsi_cmd->uscsi_status,
31477 31477 "key",
31478 31478 DATA_TYPE_UINT8,
31479 31479 scsi_sense_key(sensep),
31480 31480 "asc",
31481 31481 DATA_TYPE_UINT8,
31482 31482 scsi_sense_asc(sensep),
31483 31483 "ascq",
31484 31484 DATA_TYPE_UINT8,
31485 31485 scsi_sense_ascq(sensep),
31486 31486 "sense-data",
31487 31487 DATA_TYPE_UINT8_ARRAY,
31488 31488 senlen, sensep,
31489 31489 "lba",
31490 31490 DATA_TYPE_UINT64,
31491 31491 ssc->ssc_uscsi_info->ui_lba,
31492 31492 NULL);
31493 31493 } else {
31494 31494 /*
31495 31495 * if sense-key == 0x4(hardware
31496 31496 * error), driver-assessment should
31497 31497 * be "fatal" if drv_assess is
31498 31498 * SD_FM_DRV_FATAL.
31499 31499 */
31500 31500 scsi_fm_ereport_post(un->un_sd,
31501 31501 uscsi_path_instance, NULL,
31502 31502 "cmd.disk.dev.rqs.derr",
31503 31503 uscsi_ena, devid,
31504 31504 NULL, DDI_NOSLEEP, NULL,
31505 31505 FM_VERSION,
31506 31506 DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31507 31507 DEVID_IF_KNOWN(devid),
31508 31508 "driver-assessment",
31509 31509 DATA_TYPE_STRING,
31510 31510 drv_assess == SD_FM_DRV_FATAL ?
31511 31511 (sense_key == 0x4 ?
31512 31512 "fatal" : "fail") : assessment,
31513 31513 "op-code",
31514 31514 DATA_TYPE_UINT8, op_code,
31515 31515 "cdb",
31516 31516 DATA_TYPE_UINT8_ARRAY, cdblen,
31517 31517 ssc->ssc_uscsi_cmd->uscsi_cdb,
31518 31518 "pkt-reason",
31519 31519 DATA_TYPE_UINT8, uscsi_pkt_reason,
31520 31520 "pkt-state",
31521 31521 DATA_TYPE_UINT8, uscsi_pkt_state,
31522 31522 "pkt-stats",
31523 31523 DATA_TYPE_UINT32,
31524 31524 uscsi_pkt_statistics,
31525 31525 "stat-code",
31526 31526 DATA_TYPE_UINT8,
31527 31527 ssc->ssc_uscsi_cmd->uscsi_status,
31528 31528 "key",
31529 31529 DATA_TYPE_UINT8,
31530 31530 scsi_sense_key(sensep),
31531 31531 "asc",
31532 31532 DATA_TYPE_UINT8,
31533 31533 scsi_sense_asc(sensep),
31534 31534 "ascq",
31535 31535 DATA_TYPE_UINT8,
31536 31536 scsi_sense_ascq(sensep),
31537 31537 "sense-data",
31538 31538 DATA_TYPE_UINT8_ARRAY,
31539 31539 senlen, sensep,
31540 31540 NULL);
31541 31541 }
31542 31542 } else {
31543 31543 /*
31544 31544 * For stat_code == STATUS_GOOD, this is not a
31545 31545 * hardware error.
31546 31546 */
31547 31547 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD)
31548 31548 return;
31549 31549
31550 31550 /*
31551 31551 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the
31552 31552 * stat-code but with sense data unavailable.
31553 31553 * driver-assessment will be set based on parameter
31554 31554 * drv_assess.
31555 31555 */
31556 31556 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
31557 31557 NULL,
31558 31558 "cmd.disk.dev.serr", uscsi_ena,
31559 31559 devid, NULL, DDI_NOSLEEP, NULL,
31560 31560 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31561 31561 DEVID_IF_KNOWN(devid),
31562 31562 "driver-assessment", DATA_TYPE_STRING,
31563 31563 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment,
31564 31564 "op-code", DATA_TYPE_UINT8, op_code,
31565 31565 "cdb",
31566 31566 DATA_TYPE_UINT8_ARRAY,
31567 31567 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31568 31568 "pkt-reason",
31569 31569 DATA_TYPE_UINT8, uscsi_pkt_reason,
31570 31570 "pkt-state",
31571 31571 DATA_TYPE_UINT8, uscsi_pkt_state,
31572 31572 "pkt-stats",
31573 31573 DATA_TYPE_UINT32, uscsi_pkt_statistics,
31574 31574 "stat-code",
31575 31575 DATA_TYPE_UINT8,
31576 31576 ssc->ssc_uscsi_cmd->uscsi_status,
31577 31577 NULL);
31578 31578 }
31579 31579 }
31580 31580 }
31581 31581
31582 31582 /*
31583 31583 * Function: sd_ssc_extract_info
31584 31584 *
31585 31585 * Description: Extract information available to help generate ereport.
31586 31586 *
31587 31587 * Context: Kernel thread or interrupt context.
31588 31588 */
31589 31589 static void
31590 31590 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp,
31591 31591 struct buf *bp, struct sd_xbuf *xp)
31592 31592 {
31593 31593 size_t senlen = 0;
31594 31594 union scsi_cdb *cdbp;
31595 31595 int path_instance;
31596 31596 /*
31597 31597 * Need scsi_cdb_size array to determine the cdb length.
31598 31598 */
31599 31599 extern uchar_t scsi_cdb_size[];
31600 31600
31601 31601 ASSERT(un != NULL);
31602 31602 ASSERT(pktp != NULL);
31603 31603 ASSERT(bp != NULL);
31604 31604 ASSERT(xp != NULL);
31605 31605 ASSERT(ssc != NULL);
31606 31606 ASSERT(mutex_owned(SD_MUTEX(un)));
31607 31607
31608 31608 /*
31609 31609 * Transfer the cdb buffer pointer here.
31610 31610 */
31611 31611 cdbp = (union scsi_cdb *)pktp->pkt_cdbp;
31612 31612
31613 31613 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)];
31614 31614 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp;
31615 31615
31616 31616 /*
31617 31617 * Transfer the sense data buffer pointer if sense data is available,
31618 31618 * calculate the sense data length first.
31619 31619 */
31620 31620 if ((xp->xb_sense_state & STATE_XARQ_DONE) ||
31621 31621 (xp->xb_sense_state & STATE_ARQ_DONE)) {
31622 31622 /*
31623 31623 * For arq case, we will enter here.
31624 31624 */
31625 31625 if (xp->xb_sense_state & STATE_XARQ_DONE) {
31626 31626 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid;
31627 31627 } else {
31628 31628 senlen = SENSE_LENGTH;
31629 31629 }
31630 31630 } else {
31631 31631 /*
31632 31632 * For non-arq case, we will enter this branch.
31633 31633 */
31634 31634 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK &&
31635 31635 (xp->xb_sense_state & STATE_XFERRED_DATA)) {
31636 31636 senlen = SENSE_LENGTH - xp->xb_sense_resid;
31637 31637 }
31638 31638
31639 31639 }
31640 31640
31641 31641 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff);
31642 31642 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0;
31643 31643 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data;
31644 31644
31645 31645 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK);
31646 31646
31647 31647 /*
31648 31648 * Only transfer path_instance when scsi_pkt was properly allocated.
31649 31649 */
31650 31650 path_instance = pktp->pkt_path_instance;
31651 31651 if (scsi_pkt_allocated_correctly(pktp) && path_instance)
31652 31652 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance;
31653 31653 else
31654 31654 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0;
31655 31655
31656 31656 /*
31657 31657 * Copy in the other fields we may need when posting ereport.
31658 31658 */
31659 31659 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason;
31660 31660 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state;
31661 31661 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics;
31662 31662 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp);
31663 31663
31664 31664 /*
31665 31665 * For partially read/write command, we will not create ena
31666 31666 * in case of a successful command be reconized as recovered.
31667 31667 */
31668 31668 if ((pktp->pkt_reason == CMD_CMPLT) &&
31669 31669 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) &&
31670 31670 (senlen == 0)) {
31671 31671 return;
31672 31672 }
31673 31673
31674 31674 /*
31675 31675 * To associate ereports of a single command execution flow, we
31676 31676 * need a shared ena for a specific command.
31677 31677 */
31678 31678 if (xp->xb_ena == 0)
31679 31679 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1);
31680 31680 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena;
31681 31681 }
31682 31682
31683 31683
31684 31684 /*
31685 31685 * Function: sd_check_solid_state
31686 31686 *
31687 31687 * Description: Query the optional INQUIRY VPD page 0xb1. If the device
31688 31688 * supports VPD page 0xb1, sd examines the MEDIUM ROTATION
31689 31689 * RATE. If the MEDIUM ROTATION RATE is 1, sd assumes the
31690 31690 * device is a solid state drive.
31691 31691 *
31692 31692 * Context: Kernel thread or interrupt context.
31693 31693 */
31694 31694
31695 31695 static void
31696 31696 sd_check_solid_state(sd_ssc_t *ssc)
31697 31697 {
31698 31698 int rval = 0;
31699 31699 uchar_t *inqb1 = NULL;
31700 31700 size_t inqb1_len = MAX_INQUIRY_SIZE;
31701 31701 size_t inqb1_resid = 0;
31702 31702 struct sd_lun *un;
31703 31703
31704 31704 ASSERT(ssc != NULL);
31705 31705 un = ssc->ssc_un;
31706 31706 ASSERT(un != NULL);
31707 31707 ASSERT(!mutex_owned(SD_MUTEX(un)));
31708 31708
31709 31709 mutex_enter(SD_MUTEX(un));
31710 31710 un->un_f_is_solid_state = FALSE;
31711 31711
31712 31712 if (ISCD(un)) {
31713 31713 mutex_exit(SD_MUTEX(un));
31714 31714 return;
31715 31715 }
31716 31716
31717 31717 if (sd_check_vpd_page_support(ssc) == 0 &&
31718 31718 un->un_vpd_page_mask & SD_VPD_DEV_CHARACTER_PG) {
31719 31719 mutex_exit(SD_MUTEX(un));
31720 31720 /* collect page b1 data */
31721 31721 inqb1 = kmem_zalloc(inqb1_len, KM_SLEEP);
31722 31722
31723 31723 rval = sd_send_scsi_INQUIRY(ssc, inqb1, inqb1_len,
31724 31724 0x01, 0xB1, &inqb1_resid);
31725 31725
31726 31726 if (rval == 0 && (inqb1_len - inqb1_resid > 5)) {
31727 31727 SD_TRACE(SD_LOG_COMMON, un,
31728 31728 "sd_check_solid_state: \
31729 31729 successfully get VPD page: %x \
31730 31730 PAGE LENGTH: %x BYTE 4: %x \
31731 31731 BYTE 5: %x", inqb1[1], inqb1[3], inqb1[4],
31732 31732 inqb1[5]);
31733 31733
31734 31734 mutex_enter(SD_MUTEX(un));
31735 31735 /*
31736 31736 * Check the MEDIUM ROTATION RATE. If it is set
31737 31737 * to 1, the device is a solid state drive.
31738 31738 */
31739 31739 if (inqb1[4] == 0 && inqb1[5] == 1) {
31740 31740 un->un_f_is_solid_state = TRUE;
31741 31741 /* solid state drives don't need disksort */
31742 31742 un->un_f_disksort_disabled = TRUE;
31743 31743 }
31744 31744 mutex_exit(SD_MUTEX(un));
31745 31745 } else if (rval != 0) {
31746 31746 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
31747 31747 }
31748 31748
31749 31749 kmem_free(inqb1, inqb1_len);
31750 31750 } else {
31751 31751 mutex_exit(SD_MUTEX(un));
31752 31752 }
31753 31753 }
31754 31754
31755 31755 /*
31756 31756 * Function: sd_check_emulation_mode
31757 31757 *
31758 31758 * Description: Check whether the SSD is at emulation mode
31759 31759 * by issuing READ_CAPACITY_16 to see whether
31760 31760 * we can get physical block size of the drive.
31761 31761 *
31762 31762 * Context: Kernel thread or interrupt context.
31763 31763 */
31764 31764
31765 31765 static void
31766 31766 sd_check_emulation_mode(sd_ssc_t *ssc)
31767 31767 {
31768 31768 int rval = 0;
31769 31769 uint64_t capacity;
31770 31770 uint_t lbasize;
31771 31771 uint_t pbsize;
31772 31772 int i;
31773 31773 int devid_len;
31774 31774 struct sd_lun *un;
31775 31775
31776 31776 ASSERT(ssc != NULL);
31777 31777 un = ssc->ssc_un;
31778 31778 ASSERT(un != NULL);
31779 31779 ASSERT(!mutex_owned(SD_MUTEX(un)));
31780 31780
31781 31781 mutex_enter(SD_MUTEX(un));
31782 31782 if (ISCD(un)) {
31783 31783 mutex_exit(SD_MUTEX(un));
31784 31784 return;
31785 31785 }
31786 31786
31787 31787 if (un->un_f_descr_format_supported) {
31788 31788 mutex_exit(SD_MUTEX(un));
31789 31789 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize,
31790 31790 &pbsize, SD_PATH_DIRECT);
31791 31791 mutex_enter(SD_MUTEX(un));
31792 31792
31793 31793 if (rval != 0) {
31794 31794 un->un_phy_blocksize = DEV_BSIZE;
31795 31795 } else {
31796 31796 if (!ISP2(pbsize % DEV_BSIZE) || pbsize == 0) {
31797 31797 un->un_phy_blocksize = DEV_BSIZE;
31798 31798 } else if (pbsize > un->un_phy_blocksize) {
31799 31799 /*
31800 31800 * Don't reset the physical blocksize
31801 31801 * unless we've detected a larger value.
31802 31802 */
31803 31803 un->un_phy_blocksize = pbsize;
31804 31804 }
31805 31805 }
31806 31806 }
31807 31807
31808 31808 for (i = 0; i < sd_flash_dev_table_size; i++) {
31809 31809 devid_len = (int)strlen(sd_flash_dev_table[i]);
31810 31810 if (sd_sdconf_id_match(un, sd_flash_dev_table[i], devid_len)
31811 31811 == SD_SUCCESS) {
31812 31812 un->un_phy_blocksize = SSD_SECSIZE;
31813 31813 if (un->un_f_is_solid_state &&
31814 31814 un->un_phy_blocksize != un->un_tgt_blocksize)
31815 31815 un->un_f_enable_rmw = TRUE;
31816 31816 }
31817 31817 }
31818 31818
31819 31819 mutex_exit(SD_MUTEX(un));
31820 31820 }
↓ open down ↓ |
30002 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX