Print this page
XXXX don't fail device detach when it's physically removed
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/scsi/targets/sd.c
+++ new/usr/src/uts/common/io/scsi/targets/sd.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25 /*
26 26 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
27 27 * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
28 28 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved.
29 29 * Copyright 2017 Nexenta Systems, Inc.
30 30 */
31 31 /*
32 32 * Copyright 2011 cyril.galibern@opensvc.com
33 33 */
34 34
35 35 /*
36 36 * SCSI disk target driver.
37 37 */
38 38 #include <sys/scsi/scsi.h>
39 39 #include <sys/dkbad.h>
40 40 #include <sys/dklabel.h>
41 41 #include <sys/dkio.h>
42 42 #include <sys/fdio.h>
43 43 #include <sys/cdio.h>
44 44 #include <sys/mhd.h>
45 45 #include <sys/vtoc.h>
46 46 #include <sys/dktp/fdisk.h>
47 47 #include <sys/kstat.h>
48 48 #include <sys/vtrace.h>
49 49 #include <sys/note.h>
50 50 #include <sys/thread.h>
51 51 #include <sys/proc.h>
52 52 #include <sys/efi_partition.h>
53 53 #include <sys/var.h>
54 54 #include <sys/aio_req.h>
55 55
56 56 #ifdef __lock_lint
57 57 #define _LP64
58 58 #define __amd64
59 59 #endif
60 60
61 61 #if (defined(__fibre))
62 62 /* Note: is there a leadville version of the following? */
63 63 #include <sys/fc4/fcal_linkapp.h>
64 64 #endif
65 65 #include <sys/taskq.h>
66 66 #include <sys/uuid.h>
67 67 #include <sys/byteorder.h>
68 68 #include <sys/sdt.h>
69 69
70 70 #include "sd_xbuf.h"
71 71
72 72 #include <sys/scsi/targets/sddef.h>
73 73 #include <sys/cmlb.h>
74 74 #include <sys/sysevent/eventdefs.h>
75 75 #include <sys/sysevent/dev.h>
76 76
77 77 #include <sys/fm/protocol.h>
78 78
79 79 /*
80 80 * Loadable module info.
81 81 */
82 82 #if (defined(__fibre))
83 83 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver"
84 84 #else /* !__fibre */
85 85 #define SD_MODULE_NAME "SCSI Disk Driver"
86 86 #endif /* !__fibre */
87 87
88 88 /*
89 89 * Define the interconnect type, to allow the driver to distinguish
90 90 * between parallel SCSI (sd) and fibre channel (ssd) behaviors.
91 91 *
92 92 * This is really for backward compatibility. In the future, the driver
93 93 * should actually check the "interconnect-type" property as reported by
94 94 * the HBA; however at present this property is not defined by all HBAs,
95 95 * so we will use this #define (1) to permit the driver to run in
96 96 * backward-compatibility mode; and (2) to print a notification message
97 97 * if an FC HBA does not support the "interconnect-type" property. The
98 98 * behavior of the driver will be to assume parallel SCSI behaviors unless
99 99 * the "interconnect-type" property is defined by the HBA **AND** has a
100 100 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or
101 101 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre
102 102 * Channel behaviors (as per the old ssd). (Note that the
103 103 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and
104 104 * will result in the driver assuming parallel SCSI behaviors.)
105 105 *
106 106 * (see common/sys/scsi/impl/services.h)
107 107 *
108 108 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default
109 109 * since some FC HBAs may already support that, and there is some code in
110 110 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the
111 111 * default would confuse that code, and besides things should work fine
112 112 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the
113 113 * "interconnect_type" property.
114 114 *
115 115 */
116 116 #if (defined(__fibre))
117 117 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE
118 118 #else
119 119 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL
120 120 #endif
121 121
122 122 /*
123 123 * The name of the driver, established from the module name in _init.
124 124 */
125 125 static char *sd_label = NULL;
126 126
127 127 /*
128 128 * Driver name is unfortunately prefixed on some driver.conf properties.
129 129 */
130 130 #if (defined(__fibre))
131 131 #define sd_max_xfer_size ssd_max_xfer_size
132 132 #define sd_config_list ssd_config_list
133 133 static char *sd_max_xfer_size = "ssd_max_xfer_size";
134 134 static char *sd_config_list = "ssd-config-list";
135 135 #else
136 136 static char *sd_max_xfer_size = "sd_max_xfer_size";
137 137 static char *sd_config_list = "sd-config-list";
138 138 #endif
139 139
140 140 /*
141 141 * Driver global variables
142 142 */
143 143
144 144 #if (defined(__fibre))
145 145 /*
146 146 * These #defines are to avoid namespace collisions that occur because this
147 147 * code is currently used to compile two separate driver modules: sd and ssd.
148 148 * All global variables need to be treated this way (even if declared static)
149 149 * in order to allow the debugger to resolve the names properly.
150 150 * It is anticipated that in the near future the ssd module will be obsoleted,
151 151 * at which time this namespace issue should go away.
152 152 */
153 153 #define sd_state ssd_state
154 154 #define sd_io_time ssd_io_time
155 155 #define sd_failfast_enable ssd_failfast_enable
156 156 #define sd_ua_retry_count ssd_ua_retry_count
157 157 #define sd_report_pfa ssd_report_pfa
158 158 #define sd_max_throttle ssd_max_throttle
159 159 #define sd_min_throttle ssd_min_throttle
160 160 #define sd_rot_delay ssd_rot_delay
161 161
162 162 #define sd_retry_on_reservation_conflict \
163 163 ssd_retry_on_reservation_conflict
164 164 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay
165 165 #define sd_resv_conflict_name ssd_resv_conflict_name
166 166
167 167 #define sd_component_mask ssd_component_mask
168 168 #define sd_level_mask ssd_level_mask
169 169 #define sd_debug_un ssd_debug_un
170 170 #define sd_error_level ssd_error_level
171 171
172 172 #define sd_xbuf_active_limit ssd_xbuf_active_limit
173 173 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit
174 174
175 175 #define sd_tr ssd_tr
176 176 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout
177 177 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout
178 178 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable
179 179 #define sd_check_media_time ssd_check_media_time
180 180 #define sd_wait_cmds_complete ssd_wait_cmds_complete
181 181 #define sd_label_mutex ssd_label_mutex
182 182 #define sd_detach_mutex ssd_detach_mutex
183 183 #define sd_log_buf ssd_log_buf
184 184 #define sd_log_mutex ssd_log_mutex
185 185
186 186 #define sd_disk_table ssd_disk_table
187 187 #define sd_disk_table_size ssd_disk_table_size
188 188 #define sd_sense_mutex ssd_sense_mutex
189 189 #define sd_cdbtab ssd_cdbtab
190 190
191 191 #define sd_cb_ops ssd_cb_ops
192 192 #define sd_ops ssd_ops
193 193 #define sd_additional_codes ssd_additional_codes
194 194 #define sd_tgops ssd_tgops
195 195
196 196 #define sd_minor_data ssd_minor_data
197 197 #define sd_minor_data_efi ssd_minor_data_efi
198 198
199 199 #define sd_tq ssd_tq
200 200 #define sd_wmr_tq ssd_wmr_tq
201 201 #define sd_taskq_name ssd_taskq_name
202 202 #define sd_wmr_taskq_name ssd_wmr_taskq_name
203 203 #define sd_taskq_minalloc ssd_taskq_minalloc
204 204 #define sd_taskq_maxalloc ssd_taskq_maxalloc
205 205
206 206 #define sd_dump_format_string ssd_dump_format_string
207 207
208 208 #define sd_iostart_chain ssd_iostart_chain
209 209 #define sd_iodone_chain ssd_iodone_chain
210 210
211 211 #define sd_pm_idletime ssd_pm_idletime
212 212
213 213 #define sd_force_pm_supported ssd_force_pm_supported
214 214
215 215 #define sd_dtype_optical_bind ssd_dtype_optical_bind
216 216
217 217 #define sd_ssc_init ssd_ssc_init
218 218 #define sd_ssc_send ssd_ssc_send
219 219 #define sd_ssc_fini ssd_ssc_fini
220 220 #define sd_ssc_assessment ssd_ssc_assessment
221 221 #define sd_ssc_post ssd_ssc_post
222 222 #define sd_ssc_print ssd_ssc_print
223 223 #define sd_ssc_ereport_post ssd_ssc_ereport_post
224 224 #define sd_ssc_set_info ssd_ssc_set_info
225 225 #define sd_ssc_extract_info ssd_ssc_extract_info
226 226
227 227 #endif
228 228
229 229 #ifdef SDDEBUG
230 230 int sd_force_pm_supported = 0;
231 231 #endif /* SDDEBUG */
232 232
233 233 void *sd_state = NULL;
234 234 int sd_io_time = SD_IO_TIME;
235 235 int sd_failfast_enable = 1;
236 236 int sd_ua_retry_count = SD_UA_RETRY_COUNT;
237 237 int sd_report_pfa = 1;
238 238 int sd_max_throttle = SD_MAX_THROTTLE;
239 239 int sd_min_throttle = SD_MIN_THROTTLE;
240 240 int sd_rot_delay = 4; /* Default 4ms Rotation delay */
241 241 int sd_qfull_throttle_enable = TRUE;
242 242
243 243 int sd_retry_on_reservation_conflict = 1;
244 244 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY;
245 245 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay))
246 246
247 247 static int sd_dtype_optical_bind = -1;
248 248
249 249 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */
250 250 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict";
251 251
252 252 /*
253 253 * Global data for debug logging. To enable debug printing, sd_component_mask
254 254 * and sd_level_mask should be set to the desired bit patterns as outlined in
255 255 * sddef.h.
256 256 */
257 257 uint_t sd_component_mask = 0x0;
258 258 uint_t sd_level_mask = 0x0;
259 259 struct sd_lun *sd_debug_un = NULL;
260 260 uint_t sd_error_level = SCSI_ERR_RETRYABLE;
261 261
262 262 /* Note: these may go away in the future... */
263 263 static uint32_t sd_xbuf_active_limit = 512;
264 264 static uint32_t sd_xbuf_reserve_limit = 16;
265 265
266 266 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 };
267 267
268 268 /*
269 269 * Timer value used to reset the throttle after it has been reduced
270 270 * (typically in response to TRAN_BUSY or STATUS_QFULL)
271 271 */
272 272 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT;
273 273 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT;
274 274
275 275 /*
276 276 * Interval value associated with the media change scsi watch.
277 277 */
278 278 static int sd_check_media_time = 3000000;
279 279
280 280 /*
281 281 * Wait value used for in progress operations during a DDI_SUSPEND
282 282 */
283 283 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE;
284 284
285 285 /*
286 286 * sd_label_mutex protects a static buffer used in the disk label
287 287 * component of the driver
288 288 */
289 289 static kmutex_t sd_label_mutex;
290 290
291 291 /*
292 292 * sd_detach_mutex protects un_layer_count, un_detach_count, and
293 293 * un_opens_in_progress in the sd_lun structure.
294 294 */
295 295 static kmutex_t sd_detach_mutex;
296 296
297 297 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex,
298 298 sd_lun::{un_layer_count un_detach_count un_opens_in_progress}))
299 299
300 300 /*
301 301 * Global buffer and mutex for debug logging
302 302 */
303 303 static char sd_log_buf[1024];
304 304 static kmutex_t sd_log_mutex;
305 305
306 306 /*
307 307 * Structs and globals for recording attached lun information.
308 308 * This maintains a chain. Each node in the chain represents a SCSI controller.
309 309 * The structure records the number of luns attached to each target connected
310 310 * with the controller.
311 311 * For parallel scsi device only.
312 312 */
313 313 struct sd_scsi_hba_tgt_lun {
314 314 struct sd_scsi_hba_tgt_lun *next;
315 315 dev_info_t *pdip;
316 316 int nlun[NTARGETS_WIDE];
317 317 };
318 318
319 319 /*
320 320 * Flag to indicate the lun is attached or detached
321 321 */
322 322 #define SD_SCSI_LUN_ATTACH 0
323 323 #define SD_SCSI_LUN_DETACH 1
324 324
325 325 static kmutex_t sd_scsi_target_lun_mutex;
326 326 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL;
327 327
328 328 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex,
329 329 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip))
330 330
331 331 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex,
332 332 sd_scsi_target_lun_head))
333 333
334 334 /*
335 335 * "Smart" Probe Caching structs, globals, #defines, etc.
336 336 * For parallel scsi and non-self-identify device only.
337 337 */
338 338
339 339 /*
340 340 * The following resources and routines are implemented to support
341 341 * "smart" probing, which caches the scsi_probe() results in an array,
342 342 * in order to help avoid long probe times.
343 343 */
344 344 struct sd_scsi_probe_cache {
345 345 struct sd_scsi_probe_cache *next;
346 346 dev_info_t *pdip;
347 347 int cache[NTARGETS_WIDE];
348 348 };
349 349
350 350 static kmutex_t sd_scsi_probe_cache_mutex;
351 351 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL;
352 352
353 353 /*
354 354 * Really we only need protection on the head of the linked list, but
355 355 * better safe than sorry.
356 356 */
357 357 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex,
358 358 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip))
359 359
360 360 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex,
361 361 sd_scsi_probe_cache_head))
362 362
363 363 /*
364 364 * Power attribute table
365 365 */
366 366 static sd_power_attr_ss sd_pwr_ss = {
367 367 { "NAME=spindle-motor", "0=off", "1=on", NULL },
368 368 {0, 100},
369 369 {30, 0},
370 370 {20000, 0}
371 371 };
372 372
373 373 static sd_power_attr_pc sd_pwr_pc = {
374 374 { "NAME=spindle-motor", "0=stopped", "1=standby", "2=idle",
375 375 "3=active", NULL },
376 376 {0, 0, 0, 100},
377 377 {90, 90, 20, 0},
378 378 {15000, 15000, 1000, 0}
379 379 };
380 380
381 381 /*
382 382 * Power level to power condition
383 383 */
384 384 static int sd_pl2pc[] = {
385 385 SD_TARGET_START_VALID,
386 386 SD_TARGET_STANDBY,
387 387 SD_TARGET_IDLE,
388 388 SD_TARGET_ACTIVE
389 389 };
390 390
391 391 /*
392 392 * Vendor specific data name property declarations
393 393 */
394 394
395 395 #if defined(__fibre) || defined(__i386) ||defined(__amd64)
396 396
397 397 static sd_tunables seagate_properties = {
398 398 SEAGATE_THROTTLE_VALUE,
399 399 0,
400 400 0,
401 401 0,
402 402 0,
403 403 0,
404 404 0,
405 405 0,
406 406 0
407 407 };
408 408
409 409
410 410 static sd_tunables fujitsu_properties = {
411 411 FUJITSU_THROTTLE_VALUE,
412 412 0,
413 413 0,
414 414 0,
415 415 0,
416 416 0,
417 417 0,
418 418 0,
419 419 0
420 420 };
421 421
422 422 static sd_tunables ibm_properties = {
423 423 IBM_THROTTLE_VALUE,
424 424 0,
425 425 0,
426 426 0,
427 427 0,
428 428 0,
429 429 0,
430 430 0,
431 431 0
432 432 };
433 433
434 434 static sd_tunables purple_properties = {
435 435 PURPLE_THROTTLE_VALUE,
436 436 0,
437 437 0,
438 438 PURPLE_BUSY_RETRIES,
439 439 PURPLE_RESET_RETRY_COUNT,
440 440 PURPLE_RESERVE_RELEASE_TIME,
441 441 0,
442 442 0,
443 443 0
444 444 };
445 445
446 446 static sd_tunables sve_properties = {
447 447 SVE_THROTTLE_VALUE,
448 448 0,
449 449 0,
450 450 SVE_BUSY_RETRIES,
451 451 SVE_RESET_RETRY_COUNT,
452 452 SVE_RESERVE_RELEASE_TIME,
453 453 SVE_MIN_THROTTLE_VALUE,
454 454 SVE_DISKSORT_DISABLED_FLAG,
455 455 0
456 456 };
457 457
458 458 static sd_tunables maserati_properties = {
459 459 0,
460 460 0,
461 461 0,
462 462 0,
463 463 0,
464 464 0,
465 465 0,
466 466 MASERATI_DISKSORT_DISABLED_FLAG,
467 467 MASERATI_LUN_RESET_ENABLED_FLAG
468 468 };
469 469
470 470 static sd_tunables pirus_properties = {
471 471 PIRUS_THROTTLE_VALUE,
472 472 0,
473 473 PIRUS_NRR_COUNT,
474 474 PIRUS_BUSY_RETRIES,
475 475 PIRUS_RESET_RETRY_COUNT,
476 476 0,
477 477 PIRUS_MIN_THROTTLE_VALUE,
478 478 PIRUS_DISKSORT_DISABLED_FLAG,
479 479 PIRUS_LUN_RESET_ENABLED_FLAG
480 480 };
481 481
482 482 #endif
483 483
484 484 #if (defined(__sparc) && !defined(__fibre)) || \
485 485 (defined(__i386) || defined(__amd64))
486 486
487 487
488 488 static sd_tunables elite_properties = {
489 489 ELITE_THROTTLE_VALUE,
490 490 0,
491 491 0,
492 492 0,
493 493 0,
494 494 0,
495 495 0,
496 496 0,
497 497 0
498 498 };
499 499
500 500 static sd_tunables st31200n_properties = {
501 501 ST31200N_THROTTLE_VALUE,
502 502 0,
503 503 0,
504 504 0,
505 505 0,
506 506 0,
507 507 0,
508 508 0,
509 509 0
510 510 };
511 511
512 512 #endif /* Fibre or not */
513 513
514 514 static sd_tunables lsi_properties_scsi = {
515 515 LSI_THROTTLE_VALUE,
516 516 0,
517 517 LSI_NOTREADY_RETRIES,
518 518 0,
519 519 0,
520 520 0,
521 521 0,
522 522 0,
523 523 0
524 524 };
525 525
526 526 static sd_tunables symbios_properties = {
527 527 SYMBIOS_THROTTLE_VALUE,
528 528 0,
529 529 SYMBIOS_NOTREADY_RETRIES,
530 530 0,
531 531 0,
532 532 0,
533 533 0,
534 534 0,
535 535 0
536 536 };
537 537
538 538 static sd_tunables lsi_properties = {
539 539 0,
540 540 0,
541 541 LSI_NOTREADY_RETRIES,
542 542 0,
543 543 0,
544 544 0,
545 545 0,
546 546 0,
547 547 0
548 548 };
549 549
550 550 static sd_tunables lsi_oem_properties = {
551 551 0,
552 552 0,
553 553 LSI_OEM_NOTREADY_RETRIES,
554 554 0,
555 555 0,
556 556 0,
557 557 0,
558 558 0,
559 559 0,
560 560 1
561 561 };
562 562
563 563
564 564
565 565 #if (defined(SD_PROP_TST))
566 566
567 567 #define SD_TST_CTYPE_VAL CTYPE_CDROM
568 568 #define SD_TST_THROTTLE_VAL 16
569 569 #define SD_TST_NOTREADY_VAL 12
570 570 #define SD_TST_BUSY_VAL 60
571 571 #define SD_TST_RST_RETRY_VAL 36
572 572 #define SD_TST_RSV_REL_TIME 60
573 573
574 574 static sd_tunables tst_properties = {
575 575 SD_TST_THROTTLE_VAL,
576 576 SD_TST_CTYPE_VAL,
577 577 SD_TST_NOTREADY_VAL,
578 578 SD_TST_BUSY_VAL,
579 579 SD_TST_RST_RETRY_VAL,
580 580 SD_TST_RSV_REL_TIME,
581 581 0,
582 582 0,
583 583 0
584 584 };
585 585 #endif
586 586
587 587 /* This is similar to the ANSI toupper implementation */
588 588 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C))
589 589
590 590 /*
591 591 * Static Driver Configuration Table
592 592 *
593 593 * This is the table of disks which need throttle adjustment (or, perhaps
594 594 * something else as defined by the flags at a future time.) device_id
595 595 * is a string consisting of concatenated vid (vendor), pid (product/model)
596 596 * and revision strings as defined in the scsi_inquiry structure. Offsets of
597 597 * the parts of the string are as defined by the sizes in the scsi_inquiry
598 598 * structure. Device type is searched as far as the device_id string is
599 599 * defined. Flags defines which values are to be set in the driver from the
600 600 * properties list.
601 601 *
602 602 * Entries below which begin and end with a "*" are a special case.
603 603 * These do not have a specific vendor, and the string which follows
604 604 * can appear anywhere in the 16 byte PID portion of the inquiry data.
605 605 *
606 606 * Entries below which begin and end with a " " (blank) are a special
607 607 * case. The comparison function will treat multiple consecutive blanks
608 608 * as equivalent to a single blank. For example, this causes a
609 609 * sd_disk_table entry of " NEC CDROM " to match a device's id string
610 610 * of "NEC CDROM".
611 611 *
612 612 * Note: The MD21 controller type has been obsoleted.
613 613 * ST318202F is a Legacy device
614 614 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been
615 615 * made with an FC connection. The entries here are a legacy.
616 616 */
617 617 static sd_disk_config_t sd_disk_table[] = {
618 618 #if defined(__fibre) || defined(__i386) || defined(__amd64)
619 619 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
620 620 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
621 621 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
622 622 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
623 623 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties },
624 624 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties },
625 625 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties },
626 626 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties },
627 627 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties },
628 628 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties },
629 629 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties },
630 630 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties },
631 631 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties },
632 632 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties },
633 633 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
634 634 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
635 635 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
636 636 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
637 637 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
638 638 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
639 639 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
640 640 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
641 641 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
642 642 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties },
643 643 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties },
644 644 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties },
645 645 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties },
646 646 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
647 647 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
648 648 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
649 649 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
650 650 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
651 651 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
652 652 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
653 653 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
654 654 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
655 655 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
656 656 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
657 657 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
658 658 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
659 659 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
660 660 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
661 661 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
662 662 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
663 663 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
664 664 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
665 665 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
666 666 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
667 667 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
668 668 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT |
669 669 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties },
670 670 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT |
671 671 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties },
672 672 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties },
673 673 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties },
674 674 { "SUN T3", SD_CONF_BSET_THROTTLE |
675 675 SD_CONF_BSET_BSY_RETRY_COUNT|
676 676 SD_CONF_BSET_RST_RETRIES|
677 677 SD_CONF_BSET_RSV_REL_TIME,
678 678 &purple_properties },
679 679 { "SUN SESS01", SD_CONF_BSET_THROTTLE |
680 680 SD_CONF_BSET_BSY_RETRY_COUNT|
681 681 SD_CONF_BSET_RST_RETRIES|
682 682 SD_CONF_BSET_RSV_REL_TIME|
683 683 SD_CONF_BSET_MIN_THROTTLE|
684 684 SD_CONF_BSET_DISKSORT_DISABLED,
685 685 &sve_properties },
686 686 { "SUN T4", SD_CONF_BSET_THROTTLE |
687 687 SD_CONF_BSET_BSY_RETRY_COUNT|
688 688 SD_CONF_BSET_RST_RETRIES|
689 689 SD_CONF_BSET_RSV_REL_TIME,
690 690 &purple_properties },
691 691 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED |
692 692 SD_CONF_BSET_LUN_RESET_ENABLED,
693 693 &maserati_properties },
694 694 { "SUN SE6920", SD_CONF_BSET_THROTTLE |
695 695 SD_CONF_BSET_NRR_COUNT|
696 696 SD_CONF_BSET_BSY_RETRY_COUNT|
697 697 SD_CONF_BSET_RST_RETRIES|
698 698 SD_CONF_BSET_MIN_THROTTLE|
699 699 SD_CONF_BSET_DISKSORT_DISABLED|
700 700 SD_CONF_BSET_LUN_RESET_ENABLED,
701 701 &pirus_properties },
702 702 { "SUN SE6940", SD_CONF_BSET_THROTTLE |
703 703 SD_CONF_BSET_NRR_COUNT|
704 704 SD_CONF_BSET_BSY_RETRY_COUNT|
705 705 SD_CONF_BSET_RST_RETRIES|
706 706 SD_CONF_BSET_MIN_THROTTLE|
707 707 SD_CONF_BSET_DISKSORT_DISABLED|
708 708 SD_CONF_BSET_LUN_RESET_ENABLED,
709 709 &pirus_properties },
710 710 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE |
711 711 SD_CONF_BSET_NRR_COUNT|
712 712 SD_CONF_BSET_BSY_RETRY_COUNT|
713 713 SD_CONF_BSET_RST_RETRIES|
714 714 SD_CONF_BSET_MIN_THROTTLE|
715 715 SD_CONF_BSET_DISKSORT_DISABLED|
716 716 SD_CONF_BSET_LUN_RESET_ENABLED,
717 717 &pirus_properties },
718 718 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE |
719 719 SD_CONF_BSET_NRR_COUNT|
720 720 SD_CONF_BSET_BSY_RETRY_COUNT|
721 721 SD_CONF_BSET_RST_RETRIES|
722 722 SD_CONF_BSET_MIN_THROTTLE|
723 723 SD_CONF_BSET_DISKSORT_DISABLED|
724 724 SD_CONF_BSET_LUN_RESET_ENABLED,
725 725 &pirus_properties },
726 726 { "SUN PSX1000", SD_CONF_BSET_THROTTLE |
727 727 SD_CONF_BSET_NRR_COUNT|
728 728 SD_CONF_BSET_BSY_RETRY_COUNT|
729 729 SD_CONF_BSET_RST_RETRIES|
730 730 SD_CONF_BSET_MIN_THROTTLE|
731 731 SD_CONF_BSET_DISKSORT_DISABLED|
732 732 SD_CONF_BSET_LUN_RESET_ENABLED,
733 733 &pirus_properties },
734 734 { "SUN SE6330", SD_CONF_BSET_THROTTLE |
735 735 SD_CONF_BSET_NRR_COUNT|
736 736 SD_CONF_BSET_BSY_RETRY_COUNT|
737 737 SD_CONF_BSET_RST_RETRIES|
738 738 SD_CONF_BSET_MIN_THROTTLE|
739 739 SD_CONF_BSET_DISKSORT_DISABLED|
740 740 SD_CONF_BSET_LUN_RESET_ENABLED,
741 741 &pirus_properties },
742 742 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
743 743 { "SUN SUN_6180", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
744 744 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
745 745 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
746 746 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
747 747 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
748 748 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties },
749 749 #endif /* fibre or NON-sparc platforms */
750 750 #if ((defined(__sparc) && !defined(__fibre)) ||\
751 751 (defined(__i386) || defined(__amd64)))
752 752 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties },
753 753 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties },
754 754 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL },
755 755 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL },
756 756 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL },
757 757 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL },
758 758 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL },
759 759 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL },
760 760 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL },
761 761 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL },
762 762 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL },
763 763 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL },
764 764 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT,
765 765 &symbios_properties },
766 766 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT,
767 767 &lsi_properties_scsi },
768 768 #if defined(__i386) || defined(__amd64)
769 769 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD
770 770 | SD_CONF_BSET_READSUB_BCD
771 771 | SD_CONF_BSET_READ_TOC_ADDR_BCD
772 772 | SD_CONF_BSET_NO_READ_HEADER
773 773 | SD_CONF_BSET_READ_CD_XD4), NULL },
774 774
775 775 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD
776 776 | SD_CONF_BSET_READSUB_BCD
777 777 | SD_CONF_BSET_READ_TOC_ADDR_BCD
778 778 | SD_CONF_BSET_NO_READ_HEADER
779 779 | SD_CONF_BSET_READ_CD_XD4), NULL },
780 780 #endif /* __i386 || __amd64 */
781 781 #endif /* sparc NON-fibre or NON-sparc platforms */
782 782
783 783 #if (defined(SD_PROP_TST))
784 784 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE
785 785 | SD_CONF_BSET_CTYPE
786 786 | SD_CONF_BSET_NRR_COUNT
787 787 | SD_CONF_BSET_FAB_DEVID
788 788 | SD_CONF_BSET_NOCACHE
789 789 | SD_CONF_BSET_BSY_RETRY_COUNT
790 790 | SD_CONF_BSET_PLAYMSF_BCD
791 791 | SD_CONF_BSET_READSUB_BCD
792 792 | SD_CONF_BSET_READ_TOC_TRK_BCD
793 793 | SD_CONF_BSET_READ_TOC_ADDR_BCD
794 794 | SD_CONF_BSET_NO_READ_HEADER
795 795 | SD_CONF_BSET_READ_CD_XD4
796 796 | SD_CONF_BSET_RST_RETRIES
797 797 | SD_CONF_BSET_RSV_REL_TIME
798 798 | SD_CONF_BSET_TUR_CHECK), &tst_properties},
799 799 #endif
800 800 };
801 801
802 802 static const int sd_disk_table_size =
803 803 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t);
804 804
805 805 /*
806 806 * Emulation mode disk drive VID/PID table
807 807 */
808 808 static char sd_flash_dev_table[][25] = {
809 809 "ATA MARVELL SD88SA02",
810 810 "MARVELL SD88SA02",
811 811 "TOSHIBA THNSNV05",
812 812 };
813 813
814 814 static const int sd_flash_dev_table_size =
815 815 sizeof (sd_flash_dev_table) / sizeof (sd_flash_dev_table[0]);
816 816
817 817 #define SD_INTERCONNECT_PARALLEL 0
818 818 #define SD_INTERCONNECT_FABRIC 1
819 819 #define SD_INTERCONNECT_FIBRE 2
820 820 #define SD_INTERCONNECT_SSA 3
821 821 #define SD_INTERCONNECT_SATA 4
822 822 #define SD_INTERCONNECT_SAS 5
823 823
824 824 #define SD_IS_PARALLEL_SCSI(un) \
825 825 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL)
826 826 #define SD_IS_SERIAL(un) \
827 827 (((un)->un_interconnect_type == SD_INTERCONNECT_SATA) ||\
828 828 ((un)->un_interconnect_type == SD_INTERCONNECT_SAS))
829 829
830 830 /*
831 831 * Definitions used by device id registration routines
832 832 */
833 833 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */
834 834 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */
835 835 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */
836 836
837 837 static kmutex_t sd_sense_mutex = {0};
838 838
839 839 /*
840 840 * Macros for updates of the driver state
841 841 */
842 842 #define New_state(un, s) \
843 843 (un)->un_last_state = (un)->un_state, (un)->un_state = (s)
844 844 #define Restore_state(un) \
845 845 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); }
846 846
847 847 static struct sd_cdbinfo sd_cdbtab[] = {
848 848 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, },
849 849 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, },
850 850 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, },
851 851 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, },
852 852 };
853 853
854 854 /*
855 855 * Specifies the number of seconds that must have elapsed since the last
856 856 * cmd. has completed for a device to be declared idle to the PM framework.
857 857 */
858 858 static int sd_pm_idletime = 1;
859 859
860 860 /*
861 861 * Internal function prototypes
862 862 */
863 863
864 864 #if (defined(__fibre))
865 865 /*
866 866 * These #defines are to avoid namespace collisions that occur because this
867 867 * code is currently used to compile two separate driver modules: sd and ssd.
868 868 * All function names need to be treated this way (even if declared static)
869 869 * in order to allow the debugger to resolve the names properly.
870 870 * It is anticipated that in the near future the ssd module will be obsoleted,
871 871 * at which time this ugliness should go away.
872 872 */
873 873 #define sd_log_trace ssd_log_trace
874 874 #define sd_log_info ssd_log_info
875 875 #define sd_log_err ssd_log_err
876 876 #define sdprobe ssdprobe
877 877 #define sdinfo ssdinfo
878 878 #define sd_prop_op ssd_prop_op
879 879 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init
880 880 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini
881 881 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache
882 882 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache
883 883 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init
884 884 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini
885 885 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count
886 886 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target
887 887 #define sd_spin_up_unit ssd_spin_up_unit
888 888 #define sd_enable_descr_sense ssd_enable_descr_sense
889 889 #define sd_reenable_dsense_task ssd_reenable_dsense_task
890 890 #define sd_set_mmc_caps ssd_set_mmc_caps
891 891 #define sd_read_unit_properties ssd_read_unit_properties
892 892 #define sd_process_sdconf_file ssd_process_sdconf_file
893 893 #define sd_process_sdconf_table ssd_process_sdconf_table
894 894 #define sd_sdconf_id_match ssd_sdconf_id_match
895 895 #define sd_blank_cmp ssd_blank_cmp
896 896 #define sd_chk_vers1_data ssd_chk_vers1_data
897 897 #define sd_set_vers1_properties ssd_set_vers1_properties
898 898 #define sd_check_bdc_vpd ssd_check_bdc_vpd
899 899 #define sd_check_emulation_mode ssd_check_emulation_mode
900 900
901 901 #define sd_get_physical_geometry ssd_get_physical_geometry
902 902 #define sd_get_virtual_geometry ssd_get_virtual_geometry
903 903 #define sd_update_block_info ssd_update_block_info
904 904 #define sd_register_devid ssd_register_devid
905 905 #define sd_get_devid ssd_get_devid
906 906 #define sd_create_devid ssd_create_devid
907 907 #define sd_write_deviceid ssd_write_deviceid
908 908 #define sd_check_vpd_page_support ssd_check_vpd_page_support
909 909 #define sd_setup_pm ssd_setup_pm
910 910 #define sd_create_pm_components ssd_create_pm_components
911 911 #define sd_ddi_suspend ssd_ddi_suspend
912 912 #define sd_ddi_resume ssd_ddi_resume
913 913 #define sd_pm_state_change ssd_pm_state_change
914 914 #define sdpower ssdpower
915 915 #define sdattach ssdattach
916 916 #define sddetach ssddetach
917 917 #define sd_unit_attach ssd_unit_attach
918 918 #define sd_unit_detach ssd_unit_detach
919 919 #define sd_set_unit_attributes ssd_set_unit_attributes
920 920 #define sd_create_errstats ssd_create_errstats
921 921 #define sd_set_errstats ssd_set_errstats
922 922 #define sd_set_pstats ssd_set_pstats
923 923 #define sddump ssddump
924 924 #define sd_scsi_poll ssd_scsi_poll
925 925 #define sd_send_polled_RQS ssd_send_polled_RQS
926 926 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll
927 927 #define sd_init_event_callbacks ssd_init_event_callbacks
928 928 #define sd_event_callback ssd_event_callback
929 929 #define sd_cache_control ssd_cache_control
930 930 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled
931 931 #define sd_get_write_cache_changeable ssd_get_write_cache_changeable
932 932 #define sd_get_nv_sup ssd_get_nv_sup
933 933 #define sd_make_device ssd_make_device
934 934 #define sdopen ssdopen
935 935 #define sdclose ssdclose
936 936 #define sd_ready_and_valid ssd_ready_and_valid
937 937 #define sdmin ssdmin
938 938 #define sdread ssdread
939 939 #define sdwrite ssdwrite
940 940 #define sdaread ssdaread
941 941 #define sdawrite ssdawrite
942 942 #define sdstrategy ssdstrategy
943 943 #define sdioctl ssdioctl
944 944 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart
945 945 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart
946 946 #define sd_checksum_iostart ssd_checksum_iostart
947 947 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart
948 948 #define sd_pm_iostart ssd_pm_iostart
949 949 #define sd_core_iostart ssd_core_iostart
950 950 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone
951 951 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone
952 952 #define sd_checksum_iodone ssd_checksum_iodone
953 953 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone
954 954 #define sd_pm_iodone ssd_pm_iodone
955 955 #define sd_initpkt_for_buf ssd_initpkt_for_buf
956 956 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf
957 957 #define sd_setup_rw_pkt ssd_setup_rw_pkt
958 958 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt
959 959 #define sd_buf_iodone ssd_buf_iodone
960 960 #define sd_uscsi_strategy ssd_uscsi_strategy
961 961 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi
962 962 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi
963 963 #define sd_uscsi_iodone ssd_uscsi_iodone
964 964 #define sd_xbuf_strategy ssd_xbuf_strategy
965 965 #define sd_xbuf_init ssd_xbuf_init
966 966 #define sd_pm_entry ssd_pm_entry
967 967 #define sd_pm_exit ssd_pm_exit
968 968
969 969 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler
970 970 #define sd_pm_timeout_handler ssd_pm_timeout_handler
971 971
972 972 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq
973 973 #define sdintr ssdintr
974 974 #define sd_start_cmds ssd_start_cmds
975 975 #define sd_send_scsi_cmd ssd_send_scsi_cmd
976 976 #define sd_bioclone_alloc ssd_bioclone_alloc
977 977 #define sd_bioclone_free ssd_bioclone_free
978 978 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc
979 979 #define sd_shadow_buf_free ssd_shadow_buf_free
980 980 #define sd_print_transport_rejected_message \
981 981 ssd_print_transport_rejected_message
982 982 #define sd_retry_command ssd_retry_command
983 983 #define sd_set_retry_bp ssd_set_retry_bp
984 984 #define sd_send_request_sense_command ssd_send_request_sense_command
985 985 #define sd_start_retry_command ssd_start_retry_command
986 986 #define sd_start_direct_priority_command \
987 987 ssd_start_direct_priority_command
988 988 #define sd_return_failed_command ssd_return_failed_command
989 989 #define sd_return_failed_command_no_restart \
990 990 ssd_return_failed_command_no_restart
991 991 #define sd_return_command ssd_return_command
992 992 #define sd_sync_with_callback ssd_sync_with_callback
993 993 #define sdrunout ssdrunout
994 994 #define sd_mark_rqs_busy ssd_mark_rqs_busy
995 995 #define sd_mark_rqs_idle ssd_mark_rqs_idle
996 996 #define sd_reduce_throttle ssd_reduce_throttle
997 997 #define sd_restore_throttle ssd_restore_throttle
998 998 #define sd_print_incomplete_msg ssd_print_incomplete_msg
999 999 #define sd_init_cdb_limits ssd_init_cdb_limits
1000 1000 #define sd_pkt_status_good ssd_pkt_status_good
1001 1001 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition
1002 1002 #define sd_pkt_status_busy ssd_pkt_status_busy
1003 1003 #define sd_pkt_status_reservation_conflict \
1004 1004 ssd_pkt_status_reservation_conflict
1005 1005 #define sd_pkt_status_qfull ssd_pkt_status_qfull
1006 1006 #define sd_handle_request_sense ssd_handle_request_sense
1007 1007 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense
1008 1008 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg
1009 1009 #define sd_validate_sense_data ssd_validate_sense_data
1010 1010 #define sd_decode_sense ssd_decode_sense
1011 1011 #define sd_print_sense_msg ssd_print_sense_msg
1012 1012 #define sd_sense_key_no_sense ssd_sense_key_no_sense
1013 1013 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error
1014 1014 #define sd_sense_key_not_ready ssd_sense_key_not_ready
1015 1015 #define sd_sense_key_medium_or_hardware_error \
1016 1016 ssd_sense_key_medium_or_hardware_error
1017 1017 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request
1018 1018 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention
1019 1019 #define sd_sense_key_fail_command ssd_sense_key_fail_command
1020 1020 #define sd_sense_key_blank_check ssd_sense_key_blank_check
1021 1021 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command
1022 1022 #define sd_sense_key_default ssd_sense_key_default
1023 1023 #define sd_print_retry_msg ssd_print_retry_msg
1024 1024 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg
1025 1025 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete
1026 1026 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err
1027 1027 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset
1028 1028 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted
1029 1029 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout
1030 1030 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free
1031 1031 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject
1032 1032 #define sd_pkt_reason_default ssd_pkt_reason_default
1033 1033 #define sd_reset_target ssd_reset_target
1034 1034 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback
1035 1035 #define sd_start_stop_unit_task ssd_start_stop_unit_task
1036 1036 #define sd_taskq_create ssd_taskq_create
1037 1037 #define sd_taskq_delete ssd_taskq_delete
1038 1038 #define sd_target_change_task ssd_target_change_task
1039 1039 #define sd_log_dev_status_event ssd_log_dev_status_event
1040 1040 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event
1041 1041 #define sd_log_eject_request_event ssd_log_eject_request_event
1042 1042 #define sd_media_change_task ssd_media_change_task
1043 1043 #define sd_handle_mchange ssd_handle_mchange
1044 1044 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK
1045 1045 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY
1046 1046 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16
1047 1047 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION
1048 1048 #define sd_send_scsi_feature_GET_CONFIGURATION \
1049 1049 sd_send_scsi_feature_GET_CONFIGURATION
1050 1050 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT
1051 1051 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY
1052 1052 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY
1053 1053 #define sd_send_scsi_PERSISTENT_RESERVE_IN \
1054 1054 ssd_send_scsi_PERSISTENT_RESERVE_IN
1055 1055 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \
1056 1056 ssd_send_scsi_PERSISTENT_RESERVE_OUT
1057 1057 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE
1058 1058 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \
1059 1059 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone
1060 1060 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE
1061 1061 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT
1062 1062 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR
1063 1063 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE
1064 1064 #define sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION \
1065 1065 ssd_send_scsi_GET_EVENT_STATUS_NOTIFICATION
1066 1066 #define sd_gesn_media_data_valid ssd_gesn_media_data_valid
1067 1067 #define sd_alloc_rqs ssd_alloc_rqs
1068 1068 #define sd_free_rqs ssd_free_rqs
1069 1069 #define sd_dump_memory ssd_dump_memory
1070 1070 #define sd_get_media_info_com ssd_get_media_info_com
1071 1071 #define sd_get_media_info ssd_get_media_info
1072 1072 #define sd_get_media_info_ext ssd_get_media_info_ext
1073 1073 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info
1074 1074 #define sd_nvpair_str_decode ssd_nvpair_str_decode
1075 1075 #define sd_strtok_r ssd_strtok_r
1076 1076 #define sd_set_properties ssd_set_properties
1077 1077 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf
1078 1078 #define sd_setup_next_xfer ssd_setup_next_xfer
1079 1079 #define sd_dkio_get_temp ssd_dkio_get_temp
1080 1080 #define sd_check_mhd ssd_check_mhd
1081 1081 #define sd_mhd_watch_cb ssd_mhd_watch_cb
1082 1082 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete
1083 1083 #define sd_sname ssd_sname
1084 1084 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover
1085 1085 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread
1086 1086 #define sd_take_ownership ssd_take_ownership
1087 1087 #define sd_reserve_release ssd_reserve_release
1088 1088 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req
1089 1089 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb
1090 1090 #define sd_persistent_reservation_in_read_keys \
1091 1091 ssd_persistent_reservation_in_read_keys
1092 1092 #define sd_persistent_reservation_in_read_resv \
1093 1093 ssd_persistent_reservation_in_read_resv
1094 1094 #define sd_mhdioc_takeown ssd_mhdioc_takeown
1095 1095 #define sd_mhdioc_failfast ssd_mhdioc_failfast
1096 1096 #define sd_mhdioc_release ssd_mhdioc_release
1097 1097 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid
1098 1098 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys
1099 1099 #define sd_mhdioc_inresv ssd_mhdioc_inresv
1100 1100 #define sr_change_blkmode ssr_change_blkmode
1101 1101 #define sr_change_speed ssr_change_speed
1102 1102 #define sr_atapi_change_speed ssr_atapi_change_speed
1103 1103 #define sr_pause_resume ssr_pause_resume
1104 1104 #define sr_play_msf ssr_play_msf
1105 1105 #define sr_play_trkind ssr_play_trkind
1106 1106 #define sr_read_all_subcodes ssr_read_all_subcodes
1107 1107 #define sr_read_subchannel ssr_read_subchannel
1108 1108 #define sr_read_tocentry ssr_read_tocentry
1109 1109 #define sr_read_tochdr ssr_read_tochdr
1110 1110 #define sr_read_cdda ssr_read_cdda
1111 1111 #define sr_read_cdxa ssr_read_cdxa
1112 1112 #define sr_read_mode1 ssr_read_mode1
1113 1113 #define sr_read_mode2 ssr_read_mode2
1114 1114 #define sr_read_cd_mode2 ssr_read_cd_mode2
1115 1115 #define sr_sector_mode ssr_sector_mode
1116 1116 #define sr_eject ssr_eject
1117 1117 #define sr_ejected ssr_ejected
1118 1118 #define sr_check_wp ssr_check_wp
1119 1119 #define sd_watch_request_submit ssd_watch_request_submit
1120 1120 #define sd_check_media ssd_check_media
1121 1121 #define sd_media_watch_cb ssd_media_watch_cb
1122 1122 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast
1123 1123 #define sr_volume_ctrl ssr_volume_ctrl
1124 1124 #define sr_read_sony_session_offset ssr_read_sony_session_offset
1125 1125 #define sd_log_page_supported ssd_log_page_supported
1126 1126 #define sd_check_for_writable_cd ssd_check_for_writable_cd
1127 1127 #define sd_wm_cache_constructor ssd_wm_cache_constructor
1128 1128 #define sd_wm_cache_destructor ssd_wm_cache_destructor
1129 1129 #define sd_range_lock ssd_range_lock
1130 1130 #define sd_get_range ssd_get_range
1131 1131 #define sd_free_inlist_wmap ssd_free_inlist_wmap
1132 1132 #define sd_range_unlock ssd_range_unlock
1133 1133 #define sd_read_modify_write_task ssd_read_modify_write_task
1134 1134 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw
1135 1135
1136 1136 #define sd_iostart_chain ssd_iostart_chain
1137 1137 #define sd_iodone_chain ssd_iodone_chain
1138 1138 #define sd_initpkt_map ssd_initpkt_map
1139 1139 #define sd_destroypkt_map ssd_destroypkt_map
1140 1140 #define sd_chain_type_map ssd_chain_type_map
1141 1141 #define sd_chain_index_map ssd_chain_index_map
1142 1142
1143 1143 #define sd_failfast_flushctl ssd_failfast_flushctl
1144 1144 #define sd_failfast_flushq ssd_failfast_flushq
1145 1145 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback
1146 1146
1147 1147 #define sd_is_lsi ssd_is_lsi
1148 1148 #define sd_tg_rdwr ssd_tg_rdwr
1149 1149 #define sd_tg_getinfo ssd_tg_getinfo
1150 1150 #define sd_rmw_msg_print_handler ssd_rmw_msg_print_handler
1151 1151
1152 1152 #endif /* #if (defined(__fibre)) */
1153 1153
1154 1154
1155 1155 int _init(void);
1156 1156 int _fini(void);
1157 1157 int _info(struct modinfo *modinfop);
1158 1158
1159 1159 /*PRINTFLIKE3*/
1160 1160 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...);
1161 1161 /*PRINTFLIKE3*/
1162 1162 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...);
1163 1163 /*PRINTFLIKE3*/
1164 1164 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...);
1165 1165
1166 1166 static int sdprobe(dev_info_t *devi);
1167 1167 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
1168 1168 void **result);
1169 1169 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1170 1170 int mod_flags, char *name, caddr_t valuep, int *lengthp);
1171 1171
1172 1172 /*
1173 1173 * Smart probe for parallel scsi
1174 1174 */
1175 1175 static void sd_scsi_probe_cache_init(void);
1176 1176 static void sd_scsi_probe_cache_fini(void);
1177 1177 static void sd_scsi_clear_probe_cache(void);
1178 1178 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)());
1179 1179
1180 1180 /*
1181 1181 * Attached luns on target for parallel scsi
1182 1182 */
1183 1183 static void sd_scsi_target_lun_init(void);
1184 1184 static void sd_scsi_target_lun_fini(void);
1185 1185 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target);
1186 1186 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag);
1187 1187
1188 1188 static int sd_spin_up_unit(sd_ssc_t *ssc);
1189 1189
1190 1190 /*
1191 1191 * Using sd_ssc_init to establish sd_ssc_t struct
1192 1192 * Using sd_ssc_send to send uscsi internal command
1193 1193 * Using sd_ssc_fini to free sd_ssc_t struct
1194 1194 */
1195 1195 static sd_ssc_t *sd_ssc_init(struct sd_lun *un);
1196 1196 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd,
1197 1197 int flag, enum uio_seg dataspace, int path_flag);
1198 1198 static void sd_ssc_fini(sd_ssc_t *ssc);
1199 1199
1200 1200 /*
1201 1201 * Using sd_ssc_assessment to set correct type-of-assessment
1202 1202 * Using sd_ssc_post to post ereport & system log
1203 1203 * sd_ssc_post will call sd_ssc_print to print system log
1204 1204 * sd_ssc_post will call sd_ssd_ereport_post to post ereport
1205 1205 */
1206 1206 static void sd_ssc_assessment(sd_ssc_t *ssc,
1207 1207 enum sd_type_assessment tp_assess);
1208 1208
1209 1209 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess);
1210 1210 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity);
1211 1211 static void sd_ssc_ereport_post(sd_ssc_t *ssc,
1212 1212 enum sd_driver_assessment drv_assess);
1213 1213
1214 1214 /*
1215 1215 * Using sd_ssc_set_info to mark an un-decodable-data error.
1216 1216 * Using sd_ssc_extract_info to transfer information from internal
1217 1217 * data structures to sd_ssc_t.
1218 1218 */
1219 1219 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp,
1220 1220 const char *fmt, ...);
1221 1221 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un,
1222 1222 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp);
1223 1223
1224 1224 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag,
1225 1225 enum uio_seg dataspace, int path_flag);
1226 1226
1227 1227 #ifdef _LP64
1228 1228 static void sd_enable_descr_sense(sd_ssc_t *ssc);
1229 1229 static void sd_reenable_dsense_task(void *arg);
1230 1230 #endif /* _LP64 */
1231 1231
1232 1232 static void sd_set_mmc_caps(sd_ssc_t *ssc);
1233 1233
1234 1234 static void sd_read_unit_properties(struct sd_lun *un);
1235 1235 static int sd_process_sdconf_file(struct sd_lun *un);
1236 1236 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str);
1237 1237 static char *sd_strtok_r(char *string, const char *sepset, char **lasts);
1238 1238 static void sd_set_properties(struct sd_lun *un, char *name, char *value);
1239 1239 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags,
1240 1240 int *data_list, sd_tunables *values);
1241 1241 static void sd_process_sdconf_table(struct sd_lun *un);
1242 1242 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen);
1243 1243 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen);
1244 1244 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list,
1245 1245 int list_len, char *dataname_ptr);
1246 1246 static void sd_set_vers1_properties(struct sd_lun *un, int flags,
1247 1247 sd_tunables *prop_list);
1248 1248
1249 1249 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi,
1250 1250 int reservation_flag);
1251 1251 static int sd_get_devid(sd_ssc_t *ssc);
1252 1252 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc);
1253 1253 static int sd_write_deviceid(sd_ssc_t *ssc);
1254 1254 static int sd_check_vpd_page_support(sd_ssc_t *ssc);
1255 1255
1256 1256 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi);
1257 1257 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un);
1258 1258
1259 1259 static int sd_ddi_suspend(dev_info_t *devi);
1260 1260 static int sd_ddi_resume(dev_info_t *devi);
1261 1261 static int sd_pm_state_change(struct sd_lun *un, int level, int flag);
1262 1262 static int sdpower(dev_info_t *devi, int component, int level);
1263 1263
1264 1264 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd);
1265 1265 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd);
1266 1266 static int sd_unit_attach(dev_info_t *devi);
1267 1267 static int sd_unit_detach(dev_info_t *devi);
1268 1268
1269 1269 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi);
1270 1270 static void sd_create_errstats(struct sd_lun *un, int instance);
1271 1271 static void sd_set_errstats(struct sd_lun *un);
1272 1272 static void sd_set_pstats(struct sd_lun *un);
1273 1273
1274 1274 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk);
1275 1275 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt);
1276 1276 static int sd_send_polled_RQS(struct sd_lun *un);
1277 1277 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt);
1278 1278
1279 1279 #if (defined(__fibre))
1280 1280 /*
1281 1281 * Event callbacks (photon)
1282 1282 */
1283 1283 static void sd_init_event_callbacks(struct sd_lun *un);
1284 1284 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *);
1285 1285 #endif
1286 1286
1287 1287 /*
1288 1288 * Defines for sd_cache_control
1289 1289 */
1290 1290
1291 1291 #define SD_CACHE_ENABLE 1
1292 1292 #define SD_CACHE_DISABLE 0
1293 1293 #define SD_CACHE_NOCHANGE -1
1294 1294
1295 1295 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag);
1296 1296 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled);
1297 1297 static void sd_get_write_cache_changeable(sd_ssc_t *ssc, int *is_changeable);
1298 1298 static void sd_get_nv_sup(sd_ssc_t *ssc);
1299 1299 static dev_t sd_make_device(dev_info_t *devi);
1300 1300 static void sd_check_bdc_vpd(sd_ssc_t *ssc);
1301 1301 static void sd_check_emulation_mode(sd_ssc_t *ssc);
1302 1302 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize,
1303 1303 uint64_t capacity);
1304 1304
1305 1305 /*
1306 1306 * Driver entry point functions.
1307 1307 */
1308 1308 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p);
1309 1309 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p);
1310 1310 static int sd_ready_and_valid(sd_ssc_t *ssc, int part);
1311 1311
1312 1312 static void sdmin(struct buf *bp);
1313 1313 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p);
1314 1314 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p);
1315 1315 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p);
1316 1316 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p);
1317 1317
1318 1318 static int sdstrategy(struct buf *bp);
1319 1319 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *);
1320 1320
1321 1321 /*
1322 1322 * Function prototypes for layering functions in the iostart chain.
1323 1323 */
1324 1324 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un,
1325 1325 struct buf *bp);
1326 1326 static void sd_mapblocksize_iostart(int index, struct sd_lun *un,
1327 1327 struct buf *bp);
1328 1328 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp);
1329 1329 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un,
1330 1330 struct buf *bp);
1331 1331 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp);
1332 1332 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp);
1333 1333
1334 1334 /*
1335 1335 * Function prototypes for layering functions in the iodone chain.
1336 1336 */
1337 1337 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp);
1338 1338 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp);
1339 1339 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un,
1340 1340 struct buf *bp);
1341 1341 static void sd_mapblocksize_iodone(int index, struct sd_lun *un,
1342 1342 struct buf *bp);
1343 1343 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp);
1344 1344 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un,
1345 1345 struct buf *bp);
1346 1346 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp);
1347 1347
1348 1348 /*
1349 1349 * Prototypes for functions to support buf(9S) based IO.
1350 1350 */
1351 1351 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg);
1352 1352 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **);
1353 1353 static void sd_destroypkt_for_buf(struct buf *);
1354 1354 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp,
1355 1355 struct buf *bp, int flags,
1356 1356 int (*callback)(caddr_t), caddr_t callback_arg,
1357 1357 diskaddr_t lba, uint32_t blockcount);
1358 1358 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp,
1359 1359 struct buf *bp, diskaddr_t lba, uint32_t blockcount);
1360 1360
1361 1361 /*
1362 1362 * Prototypes for functions to support USCSI IO.
1363 1363 */
1364 1364 static int sd_uscsi_strategy(struct buf *bp);
1365 1365 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **);
1366 1366 static void sd_destroypkt_for_uscsi(struct buf *);
1367 1367
1368 1368 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
1369 1369 uchar_t chain_type, void *pktinfop);
1370 1370
1371 1371 static int sd_pm_entry(struct sd_lun *un);
1372 1372 static void sd_pm_exit(struct sd_lun *un);
1373 1373
1374 1374 static void sd_pm_idletimeout_handler(void *arg);
1375 1375
1376 1376 /*
1377 1377 * sd_core internal functions (used at the sd_core_io layer).
1378 1378 */
1379 1379 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp);
1380 1380 static void sdintr(struct scsi_pkt *pktp);
1381 1381 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp);
1382 1382
1383 1383 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag,
1384 1384 enum uio_seg dataspace, int path_flag);
1385 1385
1386 1386 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen,
1387 1387 daddr_t blkno, int (*func)(struct buf *));
1388 1388 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen,
1389 1389 uint_t bflags, daddr_t blkno, int (*func)(struct buf *));
1390 1390 static void sd_bioclone_free(struct buf *bp);
1391 1391 static void sd_shadow_buf_free(struct buf *bp);
1392 1392
1393 1393 static void sd_print_transport_rejected_message(struct sd_lun *un,
1394 1394 struct sd_xbuf *xp, int code);
1395 1395 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp,
1396 1396 void *arg, int code);
1397 1397 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp,
1398 1398 void *arg, int code);
1399 1399 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp,
1400 1400 void *arg, int code);
1401 1401
1402 1402 static void sd_retry_command(struct sd_lun *un, struct buf *bp,
1403 1403 int retry_check_flag,
1404 1404 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp,
1405 1405 int c),
1406 1406 void *user_arg, int failure_code, clock_t retry_delay,
1407 1407 void (*statp)(kstat_io_t *));
1408 1408
1409 1409 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp,
1410 1410 clock_t retry_delay, void (*statp)(kstat_io_t *));
1411 1411
1412 1412 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp,
1413 1413 struct scsi_pkt *pktp);
1414 1414 static void sd_start_retry_command(void *arg);
1415 1415 static void sd_start_direct_priority_command(void *arg);
1416 1416 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp,
1417 1417 int errcode);
1418 1418 static void sd_return_failed_command_no_restart(struct sd_lun *un,
1419 1419 struct buf *bp, int errcode);
1420 1420 static void sd_return_command(struct sd_lun *un, struct buf *bp);
1421 1421 static void sd_sync_with_callback(struct sd_lun *un);
1422 1422 static int sdrunout(caddr_t arg);
1423 1423
1424 1424 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp);
1425 1425 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp);
1426 1426
1427 1427 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type);
1428 1428 static void sd_restore_throttle(void *arg);
1429 1429
1430 1430 static void sd_init_cdb_limits(struct sd_lun *un);
1431 1431
1432 1432 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp,
1433 1433 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1434 1434
1435 1435 /*
1436 1436 * Error handling functions
1437 1437 */
1438 1438 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp,
1439 1439 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1440 1440 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp,
1441 1441 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1442 1442 static void sd_pkt_status_reservation_conflict(struct sd_lun *un,
1443 1443 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1444 1444 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp,
1445 1445 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1446 1446
1447 1447 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp,
1448 1448 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1449 1449 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp,
1450 1450 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1451 1451 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp,
1452 1452 struct sd_xbuf *xp, size_t actual_len);
1453 1453 static void sd_decode_sense(struct sd_lun *un, struct buf *bp,
1454 1454 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1455 1455
1456 1456 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp,
1457 1457 void *arg, int code);
1458 1458
1459 1459 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp,
1460 1460 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1461 1461 static void sd_sense_key_recoverable_error(struct sd_lun *un,
1462 1462 uint8_t *sense_datap,
1463 1463 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1464 1464 static void sd_sense_key_not_ready(struct sd_lun *un,
1465 1465 uint8_t *sense_datap,
1466 1466 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1467 1467 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un,
1468 1468 uint8_t *sense_datap,
1469 1469 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1470 1470 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp,
1471 1471 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1472 1472 static void sd_sense_key_unit_attention(struct sd_lun *un,
1473 1473 uint8_t *sense_datap,
1474 1474 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1475 1475 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp,
1476 1476 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1477 1477 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp,
1478 1478 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1479 1479 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp,
1480 1480 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1481 1481 static void sd_sense_key_default(struct sd_lun *un,
1482 1482 uint8_t *sense_datap,
1483 1483 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1484 1484
1485 1485 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp,
1486 1486 void *arg, int flag);
1487 1487
1488 1488 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp,
1489 1489 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1490 1490 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp,
1491 1491 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1492 1492 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp,
1493 1493 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1494 1494 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp,
1495 1495 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1496 1496 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp,
1497 1497 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1498 1498 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp,
1499 1499 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1500 1500 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp,
1501 1501 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1502 1502 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp,
1503 1503 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1504 1504
1505 1505 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp);
1506 1506
1507 1507 static void sd_start_stop_unit_callback(void *arg);
1508 1508 static void sd_start_stop_unit_task(void *arg);
1509 1509
1510 1510 static void sd_taskq_create(void);
1511 1511 static void sd_taskq_delete(void);
1512 1512 static void sd_target_change_task(void *arg);
1513 1513 static void sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag);
1514 1514 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag);
1515 1515 static void sd_log_eject_request_event(struct sd_lun *un, int km_flag);
1516 1516 static void sd_media_change_task(void *arg);
1517 1517
1518 1518 static int sd_handle_mchange(struct sd_lun *un);
1519 1519 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag);
1520 1520 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp,
1521 1521 uint32_t *lbap, int path_flag);
1522 1522 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp,
1523 1523 uint32_t *lbap, uint32_t *psp, int path_flag);
1524 1524 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag,
1525 1525 int flag, int path_flag);
1526 1526 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr,
1527 1527 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp);
1528 1528 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag);
1529 1529 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc,
1530 1530 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp);
1531 1531 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc,
1532 1532 uchar_t usr_cmd, uchar_t *usr_bufp);
1533 1533 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un,
1534 1534 struct dk_callback *dkc);
1535 1535 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp);
1536 1536 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc,
1537 1537 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen,
1538 1538 uchar_t *bufaddr, uint_t buflen, int path_flag);
1539 1539 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc,
1540 1540 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen,
1541 1541 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag);
1542 1542 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize,
1543 1543 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag);
1544 1544 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize,
1545 1545 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag);
1546 1546 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr,
1547 1547 size_t buflen, daddr_t start_block, int path_flag);
1548 1548 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \
1549 1549 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \
1550 1550 path_flag)
1551 1551 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\
1552 1552 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\
1553 1553 path_flag)
1554 1554
1555 1555 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr,
1556 1556 uint16_t buflen, uchar_t page_code, uchar_t page_control,
1557 1557 uint16_t param_ptr, int path_flag);
1558 1558 static int sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc,
1559 1559 uchar_t *bufaddr, size_t buflen, uchar_t class_req);
1560 1560 static boolean_t sd_gesn_media_data_valid(uchar_t *data);
1561 1561
1562 1562 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un);
1563 1563 static void sd_free_rqs(struct sd_lun *un);
1564 1564
1565 1565 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title,
1566 1566 uchar_t *data, int len, int fmt);
1567 1567 static void sd_panic_for_res_conflict(struct sd_lun *un);
1568 1568
1569 1569 /*
1570 1570 * Disk Ioctl Function Prototypes
1571 1571 */
1572 1572 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag);
1573 1573 static int sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag);
1574 1574 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag);
1575 1575 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag);
1576 1576
1577 1577 /*
1578 1578 * Multi-host Ioctl Prototypes
1579 1579 */
1580 1580 static int sd_check_mhd(dev_t dev, int interval);
1581 1581 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp);
1582 1582 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt);
1583 1583 static char *sd_sname(uchar_t status);
1584 1584 static void sd_mhd_resvd_recover(void *arg);
1585 1585 static void sd_resv_reclaim_thread();
1586 1586 static int sd_take_ownership(dev_t dev, struct mhioctkown *p);
1587 1587 static int sd_reserve_release(dev_t dev, int cmd);
1588 1588 static void sd_rmv_resv_reclaim_req(dev_t dev);
1589 1589 static void sd_mhd_reset_notify_cb(caddr_t arg);
1590 1590 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un,
1591 1591 mhioc_inkeys_t *usrp, int flag);
1592 1592 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un,
1593 1593 mhioc_inresvs_t *usrp, int flag);
1594 1594 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag);
1595 1595 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag);
1596 1596 static int sd_mhdioc_release(dev_t dev);
1597 1597 static int sd_mhdioc_register_devid(dev_t dev);
1598 1598 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag);
1599 1599 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag);
1600 1600
1601 1601 /*
1602 1602 * SCSI removable prototypes
1603 1603 */
1604 1604 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag);
1605 1605 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag);
1606 1606 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag);
1607 1607 static int sr_pause_resume(dev_t dev, int mode);
1608 1608 static int sr_play_msf(dev_t dev, caddr_t data, int flag);
1609 1609 static int sr_play_trkind(dev_t dev, caddr_t data, int flag);
1610 1610 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag);
1611 1611 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag);
1612 1612 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag);
1613 1613 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag);
1614 1614 static int sr_read_cdda(dev_t dev, caddr_t data, int flag);
1615 1615 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag);
1616 1616 static int sr_read_mode1(dev_t dev, caddr_t data, int flag);
1617 1617 static int sr_read_mode2(dev_t dev, caddr_t data, int flag);
1618 1618 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag);
1619 1619 static int sr_sector_mode(dev_t dev, uint32_t blksize);
1620 1620 static int sr_eject(dev_t dev);
1621 1621 static void sr_ejected(register struct sd_lun *un);
1622 1622 static int sr_check_wp(dev_t dev);
1623 1623 static opaque_t sd_watch_request_submit(struct sd_lun *un);
1624 1624 static int sd_check_media(dev_t dev, enum dkio_state state);
1625 1625 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp);
1626 1626 static void sd_delayed_cv_broadcast(void *arg);
1627 1627 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag);
1628 1628 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag);
1629 1629
1630 1630 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page);
1631 1631
1632 1632 /*
1633 1633 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions.
1634 1634 */
1635 1635 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag);
1636 1636 static int sd_wm_cache_constructor(void *wm, void *un, int flags);
1637 1637 static void sd_wm_cache_destructor(void *wm, void *un);
1638 1638 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb,
1639 1639 daddr_t endb, ushort_t typ);
1640 1640 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb,
1641 1641 daddr_t endb);
1642 1642 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp);
1643 1643 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm);
1644 1644 static void sd_read_modify_write_task(void * arg);
1645 1645 static int
1646 1646 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk,
1647 1647 struct buf **bpp);
1648 1648
1649 1649
1650 1650 /*
1651 1651 * Function prototypes for failfast support.
1652 1652 */
1653 1653 static void sd_failfast_flushq(struct sd_lun *un);
1654 1654 static int sd_failfast_flushq_callback(struct buf *bp);
1655 1655
1656 1656 /*
1657 1657 * Function prototypes to check for lsi devices
1658 1658 */
1659 1659 static void sd_is_lsi(struct sd_lun *un);
1660 1660
1661 1661 /*
1662 1662 * Function prototypes for partial DMA support
1663 1663 */
1664 1664 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp,
1665 1665 struct scsi_pkt *pkt, struct sd_xbuf *xp);
1666 1666
1667 1667
1668 1668 /* Function prototypes for cmlb */
1669 1669 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
1670 1670 diskaddr_t start_block, size_t reqlength, void *tg_cookie);
1671 1671
1672 1672 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie);
1673 1673
1674 1674 /*
1675 1675 * For printing RMW warning message timely
1676 1676 */
1677 1677 static void sd_rmw_msg_print_handler(void *arg);
1678 1678
1679 1679 /*
1680 1680 * Constants for failfast support:
1681 1681 *
1682 1682 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO
1683 1683 * failfast processing being performed.
1684 1684 *
1685 1685 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing
1686 1686 * failfast processing on all bufs with B_FAILFAST set.
1687 1687 */
1688 1688
1689 1689 #define SD_FAILFAST_INACTIVE 0
1690 1690 #define SD_FAILFAST_ACTIVE 1
1691 1691
1692 1692 /*
1693 1693 * Bitmask to control behavior of buf(9S) flushes when a transition to
1694 1694 * the failfast state occurs. Optional bits include:
1695 1695 *
1696 1696 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that
1697 1697 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will
1698 1698 * be flushed.
1699 1699 *
1700 1700 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the
1701 1701 * driver, in addition to the regular wait queue. This includes the xbuf
1702 1702 * queues. When clear, only the driver's wait queue will be flushed.
1703 1703 */
1704 1704 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01
1705 1705 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02
1706 1706
1707 1707 /*
1708 1708 * The default behavior is to only flush bufs that have B_FAILFAST set, but
1709 1709 * to flush all queues within the driver.
1710 1710 */
1711 1711 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES;
1712 1712
1713 1713
1714 1714 /*
1715 1715 * SD Testing Fault Injection
1716 1716 */
1717 1717 #ifdef SD_FAULT_INJECTION
1718 1718 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un);
1719 1719 static void sd_faultinjection(struct scsi_pkt *pktp);
1720 1720 static void sd_injection_log(char *buf, struct sd_lun *un);
1721 1721 #endif
1722 1722
1723 1723 /*
1724 1724 * Device driver ops vector
1725 1725 */
1726 1726 static struct cb_ops sd_cb_ops = {
1727 1727 sdopen, /* open */
1728 1728 sdclose, /* close */
1729 1729 sdstrategy, /* strategy */
1730 1730 nodev, /* print */
1731 1731 sddump, /* dump */
1732 1732 sdread, /* read */
1733 1733 sdwrite, /* write */
1734 1734 sdioctl, /* ioctl */
1735 1735 nodev, /* devmap */
1736 1736 nodev, /* mmap */
1737 1737 nodev, /* segmap */
1738 1738 nochpoll, /* poll */
1739 1739 sd_prop_op, /* cb_prop_op */
1740 1740 0, /* streamtab */
1741 1741 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */
1742 1742 CB_REV, /* cb_rev */
1743 1743 sdaread, /* async I/O read entry point */
1744 1744 sdawrite /* async I/O write entry point */
1745 1745 };
1746 1746
1747 1747 struct dev_ops sd_ops = {
1748 1748 DEVO_REV, /* devo_rev, */
1749 1749 0, /* refcnt */
1750 1750 sdinfo, /* info */
1751 1751 nulldev, /* identify */
1752 1752 sdprobe, /* probe */
1753 1753 sdattach, /* attach */
1754 1754 sddetach, /* detach */
1755 1755 nodev, /* reset */
1756 1756 &sd_cb_ops, /* driver operations */
1757 1757 NULL, /* bus operations */
1758 1758 sdpower, /* power */
1759 1759 ddi_quiesce_not_needed, /* quiesce */
1760 1760 };
1761 1761
1762 1762 /*
1763 1763 * This is the loadable module wrapper.
1764 1764 */
1765 1765 #include <sys/modctl.h>
1766 1766
1767 1767 static struct modldrv modldrv = {
1768 1768 &mod_driverops, /* Type of module. This one is a driver */
1769 1769 SD_MODULE_NAME, /* Module name. */
1770 1770 &sd_ops /* driver ops */
1771 1771 };
1772 1772
1773 1773 static struct modlinkage modlinkage = {
1774 1774 MODREV_1, &modldrv, NULL
1775 1775 };
1776 1776
1777 1777 static cmlb_tg_ops_t sd_tgops = {
1778 1778 TG_DK_OPS_VERSION_1,
1779 1779 sd_tg_rdwr,
1780 1780 sd_tg_getinfo
1781 1781 };
1782 1782
1783 1783 static struct scsi_asq_key_strings sd_additional_codes[] = {
1784 1784 0x81, 0, "Logical Unit is Reserved",
1785 1785 0x85, 0, "Audio Address Not Valid",
1786 1786 0xb6, 0, "Media Load Mechanism Failed",
1787 1787 0xB9, 0, "Audio Play Operation Aborted",
1788 1788 0xbf, 0, "Buffer Overflow for Read All Subcodes Command",
1789 1789 0x53, 2, "Medium removal prevented",
1790 1790 0x6f, 0, "Authentication failed during key exchange",
1791 1791 0x6f, 1, "Key not present",
1792 1792 0x6f, 2, "Key not established",
1793 1793 0x6f, 3, "Read without proper authentication",
1794 1794 0x6f, 4, "Mismatched region to this logical unit",
1795 1795 0x6f, 5, "Region reset count error",
1796 1796 0xffff, 0x0, NULL
1797 1797 };
1798 1798
1799 1799
1800 1800 /*
1801 1801 * Struct for passing printing information for sense data messages
1802 1802 */
1803 1803 struct sd_sense_info {
1804 1804 int ssi_severity;
1805 1805 int ssi_pfa_flag;
1806 1806 };
1807 1807
1808 1808 /*
1809 1809 * Table of function pointers for iostart-side routines. Separate "chains"
1810 1810 * of layered function calls are formed by placing the function pointers
1811 1811 * sequentially in the desired order. Functions are called according to an
1812 1812 * incrementing table index ordering. The last function in each chain must
1813 1813 * be sd_core_iostart(). The corresponding iodone-side routines are expected
1814 1814 * in the sd_iodone_chain[] array.
1815 1815 *
1816 1816 * Note: It may seem more natural to organize both the iostart and iodone
1817 1817 * functions together, into an array of structures (or some similar
1818 1818 * organization) with a common index, rather than two separate arrays which
1819 1819 * must be maintained in synchronization. The purpose of this division is
1820 1820 * to achieve improved performance: individual arrays allows for more
1821 1821 * effective cache line utilization on certain platforms.
1822 1822 */
1823 1823
1824 1824 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp);
1825 1825
1826 1826
1827 1827 static sd_chain_t sd_iostart_chain[] = {
1828 1828
1829 1829 /* Chain for buf IO for disk drive targets (PM enabled) */
1830 1830 sd_mapblockaddr_iostart, /* Index: 0 */
1831 1831 sd_pm_iostart, /* Index: 1 */
1832 1832 sd_core_iostart, /* Index: 2 */
1833 1833
1834 1834 /* Chain for buf IO for disk drive targets (PM disabled) */
1835 1835 sd_mapblockaddr_iostart, /* Index: 3 */
1836 1836 sd_core_iostart, /* Index: 4 */
1837 1837
1838 1838 /*
1839 1839 * Chain for buf IO for removable-media or large sector size
1840 1840 * disk drive targets with RMW needed (PM enabled)
1841 1841 */
1842 1842 sd_mapblockaddr_iostart, /* Index: 5 */
1843 1843 sd_mapblocksize_iostart, /* Index: 6 */
1844 1844 sd_pm_iostart, /* Index: 7 */
1845 1845 sd_core_iostart, /* Index: 8 */
1846 1846
1847 1847 /*
1848 1848 * Chain for buf IO for removable-media or large sector size
1849 1849 * disk drive targets with RMW needed (PM disabled)
1850 1850 */
1851 1851 sd_mapblockaddr_iostart, /* Index: 9 */
1852 1852 sd_mapblocksize_iostart, /* Index: 10 */
1853 1853 sd_core_iostart, /* Index: 11 */
1854 1854
1855 1855 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
1856 1856 sd_mapblockaddr_iostart, /* Index: 12 */
1857 1857 sd_checksum_iostart, /* Index: 13 */
1858 1858 sd_pm_iostart, /* Index: 14 */
1859 1859 sd_core_iostart, /* Index: 15 */
1860 1860
1861 1861 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
1862 1862 sd_mapblockaddr_iostart, /* Index: 16 */
1863 1863 sd_checksum_iostart, /* Index: 17 */
1864 1864 sd_core_iostart, /* Index: 18 */
1865 1865
1866 1866 /* Chain for USCSI commands (all targets) */
1867 1867 sd_pm_iostart, /* Index: 19 */
1868 1868 sd_core_iostart, /* Index: 20 */
1869 1869
1870 1870 /* Chain for checksumming USCSI commands (all targets) */
1871 1871 sd_checksum_uscsi_iostart, /* Index: 21 */
1872 1872 sd_pm_iostart, /* Index: 22 */
1873 1873 sd_core_iostart, /* Index: 23 */
1874 1874
1875 1875 /* Chain for "direct" USCSI commands (all targets) */
1876 1876 sd_core_iostart, /* Index: 24 */
1877 1877
1878 1878 /* Chain for "direct priority" USCSI commands (all targets) */
1879 1879 sd_core_iostart, /* Index: 25 */
1880 1880
1881 1881 /*
1882 1882 * Chain for buf IO for large sector size disk drive targets
1883 1883 * with RMW needed with checksumming (PM enabled)
1884 1884 */
1885 1885 sd_mapblockaddr_iostart, /* Index: 26 */
1886 1886 sd_mapblocksize_iostart, /* Index: 27 */
1887 1887 sd_checksum_iostart, /* Index: 28 */
1888 1888 sd_pm_iostart, /* Index: 29 */
1889 1889 sd_core_iostart, /* Index: 30 */
1890 1890
1891 1891 /*
1892 1892 * Chain for buf IO for large sector size disk drive targets
1893 1893 * with RMW needed with checksumming (PM disabled)
1894 1894 */
1895 1895 sd_mapblockaddr_iostart, /* Index: 31 */
1896 1896 sd_mapblocksize_iostart, /* Index: 32 */
1897 1897 sd_checksum_iostart, /* Index: 33 */
1898 1898 sd_core_iostart, /* Index: 34 */
1899 1899
1900 1900 };
1901 1901
1902 1902 /*
1903 1903 * Macros to locate the first function of each iostart chain in the
1904 1904 * sd_iostart_chain[] array. These are located by the index in the array.
1905 1905 */
1906 1906 #define SD_CHAIN_DISK_IOSTART 0
1907 1907 #define SD_CHAIN_DISK_IOSTART_NO_PM 3
1908 1908 #define SD_CHAIN_MSS_DISK_IOSTART 5
1909 1909 #define SD_CHAIN_RMMEDIA_IOSTART 5
1910 1910 #define SD_CHAIN_MSS_DISK_IOSTART_NO_PM 9
1911 1911 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9
1912 1912 #define SD_CHAIN_CHKSUM_IOSTART 12
1913 1913 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16
1914 1914 #define SD_CHAIN_USCSI_CMD_IOSTART 19
1915 1915 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21
1916 1916 #define SD_CHAIN_DIRECT_CMD_IOSTART 24
1917 1917 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25
1918 1918 #define SD_CHAIN_MSS_CHKSUM_IOSTART 26
1919 1919 #define SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM 31
1920 1920
1921 1921
1922 1922 /*
1923 1923 * Table of function pointers for the iodone-side routines for the driver-
1924 1924 * internal layering mechanism. The calling sequence for iodone routines
1925 1925 * uses a decrementing table index, so the last routine called in a chain
1926 1926 * must be at the lowest array index location for that chain. The last
1927 1927 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs)
1928 1928 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering
1929 1929 * of the functions in an iodone side chain must correspond to the ordering
1930 1930 * of the iostart routines for that chain. Note that there is no iodone
1931 1931 * side routine that corresponds to sd_core_iostart(), so there is no
1932 1932 * entry in the table for this.
1933 1933 */
1934 1934
1935 1935 static sd_chain_t sd_iodone_chain[] = {
1936 1936
1937 1937 /* Chain for buf IO for disk drive targets (PM enabled) */
1938 1938 sd_buf_iodone, /* Index: 0 */
1939 1939 sd_mapblockaddr_iodone, /* Index: 1 */
1940 1940 sd_pm_iodone, /* Index: 2 */
1941 1941
1942 1942 /* Chain for buf IO for disk drive targets (PM disabled) */
1943 1943 sd_buf_iodone, /* Index: 3 */
1944 1944 sd_mapblockaddr_iodone, /* Index: 4 */
1945 1945
1946 1946 /*
1947 1947 * Chain for buf IO for removable-media or large sector size
1948 1948 * disk drive targets with RMW needed (PM enabled)
1949 1949 */
1950 1950 sd_buf_iodone, /* Index: 5 */
1951 1951 sd_mapblockaddr_iodone, /* Index: 6 */
1952 1952 sd_mapblocksize_iodone, /* Index: 7 */
1953 1953 sd_pm_iodone, /* Index: 8 */
1954 1954
1955 1955 /*
1956 1956 * Chain for buf IO for removable-media or large sector size
1957 1957 * disk drive targets with RMW needed (PM disabled)
1958 1958 */
1959 1959 sd_buf_iodone, /* Index: 9 */
1960 1960 sd_mapblockaddr_iodone, /* Index: 10 */
1961 1961 sd_mapblocksize_iodone, /* Index: 11 */
1962 1962
1963 1963 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
1964 1964 sd_buf_iodone, /* Index: 12 */
1965 1965 sd_mapblockaddr_iodone, /* Index: 13 */
1966 1966 sd_checksum_iodone, /* Index: 14 */
1967 1967 sd_pm_iodone, /* Index: 15 */
1968 1968
1969 1969 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
1970 1970 sd_buf_iodone, /* Index: 16 */
1971 1971 sd_mapblockaddr_iodone, /* Index: 17 */
1972 1972 sd_checksum_iodone, /* Index: 18 */
1973 1973
1974 1974 /* Chain for USCSI commands (non-checksum targets) */
1975 1975 sd_uscsi_iodone, /* Index: 19 */
1976 1976 sd_pm_iodone, /* Index: 20 */
1977 1977
1978 1978 /* Chain for USCSI commands (checksum targets) */
1979 1979 sd_uscsi_iodone, /* Index: 21 */
1980 1980 sd_checksum_uscsi_iodone, /* Index: 22 */
1981 1981 sd_pm_iodone, /* Index: 22 */
1982 1982
1983 1983 /* Chain for "direct" USCSI commands (all targets) */
1984 1984 sd_uscsi_iodone, /* Index: 24 */
1985 1985
1986 1986 /* Chain for "direct priority" USCSI commands (all targets) */
1987 1987 sd_uscsi_iodone, /* Index: 25 */
1988 1988
1989 1989 /*
1990 1990 * Chain for buf IO for large sector size disk drive targets
1991 1991 * with checksumming (PM enabled)
1992 1992 */
1993 1993 sd_buf_iodone, /* Index: 26 */
1994 1994 sd_mapblockaddr_iodone, /* Index: 27 */
1995 1995 sd_mapblocksize_iodone, /* Index: 28 */
1996 1996 sd_checksum_iodone, /* Index: 29 */
1997 1997 sd_pm_iodone, /* Index: 30 */
1998 1998
1999 1999 /*
2000 2000 * Chain for buf IO for large sector size disk drive targets
2001 2001 * with checksumming (PM disabled)
2002 2002 */
2003 2003 sd_buf_iodone, /* Index: 31 */
2004 2004 sd_mapblockaddr_iodone, /* Index: 32 */
2005 2005 sd_mapblocksize_iodone, /* Index: 33 */
2006 2006 sd_checksum_iodone, /* Index: 34 */
2007 2007 };
2008 2008
2009 2009
2010 2010 /*
2011 2011 * Macros to locate the "first" function in the sd_iodone_chain[] array for
2012 2012 * each iodone-side chain. These are located by the array index, but as the
2013 2013 * iodone side functions are called in a decrementing-index order, the
2014 2014 * highest index number in each chain must be specified (as these correspond
2015 2015 * to the first function in the iodone chain that will be called by the core
2016 2016 * at IO completion time).
2017 2017 */
2018 2018
2019 2019 #define SD_CHAIN_DISK_IODONE 2
2020 2020 #define SD_CHAIN_DISK_IODONE_NO_PM 4
2021 2021 #define SD_CHAIN_RMMEDIA_IODONE 8
2022 2022 #define SD_CHAIN_MSS_DISK_IODONE 8
2023 2023 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11
2024 2024 #define SD_CHAIN_MSS_DISK_IODONE_NO_PM 11
2025 2025 #define SD_CHAIN_CHKSUM_IODONE 15
2026 2026 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18
2027 2027 #define SD_CHAIN_USCSI_CMD_IODONE 20
2028 2028 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22
2029 2029 #define SD_CHAIN_DIRECT_CMD_IODONE 24
2030 2030 #define SD_CHAIN_PRIORITY_CMD_IODONE 25
2031 2031 #define SD_CHAIN_MSS_CHKSUM_IODONE 30
2032 2032 #define SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM 34
2033 2033
2034 2034
2035 2035
2036 2036 /*
2037 2037 * Array to map a layering chain index to the appropriate initpkt routine.
2038 2038 * The redundant entries are present so that the index used for accessing
2039 2039 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly
2040 2040 * with this table as well.
2041 2041 */
2042 2042 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **);
2043 2043
2044 2044 static sd_initpkt_t sd_initpkt_map[] = {
2045 2045
2046 2046 /* Chain for buf IO for disk drive targets (PM enabled) */
2047 2047 sd_initpkt_for_buf, /* Index: 0 */
2048 2048 sd_initpkt_for_buf, /* Index: 1 */
2049 2049 sd_initpkt_for_buf, /* Index: 2 */
2050 2050
2051 2051 /* Chain for buf IO for disk drive targets (PM disabled) */
2052 2052 sd_initpkt_for_buf, /* Index: 3 */
2053 2053 sd_initpkt_for_buf, /* Index: 4 */
2054 2054
2055 2055 /*
2056 2056 * Chain for buf IO for removable-media or large sector size
2057 2057 * disk drive targets (PM enabled)
2058 2058 */
2059 2059 sd_initpkt_for_buf, /* Index: 5 */
2060 2060 sd_initpkt_for_buf, /* Index: 6 */
2061 2061 sd_initpkt_for_buf, /* Index: 7 */
2062 2062 sd_initpkt_for_buf, /* Index: 8 */
2063 2063
2064 2064 /*
2065 2065 * Chain for buf IO for removable-media or large sector size
2066 2066 * disk drive targets (PM disabled)
2067 2067 */
2068 2068 sd_initpkt_for_buf, /* Index: 9 */
2069 2069 sd_initpkt_for_buf, /* Index: 10 */
2070 2070 sd_initpkt_for_buf, /* Index: 11 */
2071 2071
2072 2072 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
2073 2073 sd_initpkt_for_buf, /* Index: 12 */
2074 2074 sd_initpkt_for_buf, /* Index: 13 */
2075 2075 sd_initpkt_for_buf, /* Index: 14 */
2076 2076 sd_initpkt_for_buf, /* Index: 15 */
2077 2077
2078 2078 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
2079 2079 sd_initpkt_for_buf, /* Index: 16 */
2080 2080 sd_initpkt_for_buf, /* Index: 17 */
2081 2081 sd_initpkt_for_buf, /* Index: 18 */
2082 2082
2083 2083 /* Chain for USCSI commands (non-checksum targets) */
2084 2084 sd_initpkt_for_uscsi, /* Index: 19 */
2085 2085 sd_initpkt_for_uscsi, /* Index: 20 */
2086 2086
2087 2087 /* Chain for USCSI commands (checksum targets) */
2088 2088 sd_initpkt_for_uscsi, /* Index: 21 */
2089 2089 sd_initpkt_for_uscsi, /* Index: 22 */
2090 2090 sd_initpkt_for_uscsi, /* Index: 22 */
2091 2091
2092 2092 /* Chain for "direct" USCSI commands (all targets) */
2093 2093 sd_initpkt_for_uscsi, /* Index: 24 */
2094 2094
2095 2095 /* Chain for "direct priority" USCSI commands (all targets) */
2096 2096 sd_initpkt_for_uscsi, /* Index: 25 */
2097 2097
2098 2098 /*
2099 2099 * Chain for buf IO for large sector size disk drive targets
2100 2100 * with checksumming (PM enabled)
2101 2101 */
2102 2102 sd_initpkt_for_buf, /* Index: 26 */
2103 2103 sd_initpkt_for_buf, /* Index: 27 */
2104 2104 sd_initpkt_for_buf, /* Index: 28 */
2105 2105 sd_initpkt_for_buf, /* Index: 29 */
2106 2106 sd_initpkt_for_buf, /* Index: 30 */
2107 2107
2108 2108 /*
2109 2109 * Chain for buf IO for large sector size disk drive targets
2110 2110 * with checksumming (PM disabled)
2111 2111 */
2112 2112 sd_initpkt_for_buf, /* Index: 31 */
2113 2113 sd_initpkt_for_buf, /* Index: 32 */
2114 2114 sd_initpkt_for_buf, /* Index: 33 */
2115 2115 sd_initpkt_for_buf, /* Index: 34 */
2116 2116 };
2117 2117
2118 2118
2119 2119 /*
2120 2120 * Array to map a layering chain index to the appropriate destroypktpkt routine.
2121 2121 * The redundant entries are present so that the index used for accessing
2122 2122 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly
2123 2123 * with this table as well.
2124 2124 */
2125 2125 typedef void (*sd_destroypkt_t)(struct buf *);
2126 2126
2127 2127 static sd_destroypkt_t sd_destroypkt_map[] = {
2128 2128
2129 2129 /* Chain for buf IO for disk drive targets (PM enabled) */
2130 2130 sd_destroypkt_for_buf, /* Index: 0 */
2131 2131 sd_destroypkt_for_buf, /* Index: 1 */
2132 2132 sd_destroypkt_for_buf, /* Index: 2 */
2133 2133
2134 2134 /* Chain for buf IO for disk drive targets (PM disabled) */
2135 2135 sd_destroypkt_for_buf, /* Index: 3 */
2136 2136 sd_destroypkt_for_buf, /* Index: 4 */
2137 2137
2138 2138 /*
2139 2139 * Chain for buf IO for removable-media or large sector size
2140 2140 * disk drive targets (PM enabled)
2141 2141 */
2142 2142 sd_destroypkt_for_buf, /* Index: 5 */
2143 2143 sd_destroypkt_for_buf, /* Index: 6 */
2144 2144 sd_destroypkt_for_buf, /* Index: 7 */
2145 2145 sd_destroypkt_for_buf, /* Index: 8 */
2146 2146
2147 2147 /*
2148 2148 * Chain for buf IO for removable-media or large sector size
2149 2149 * disk drive targets (PM disabled)
2150 2150 */
2151 2151 sd_destroypkt_for_buf, /* Index: 9 */
2152 2152 sd_destroypkt_for_buf, /* Index: 10 */
2153 2153 sd_destroypkt_for_buf, /* Index: 11 */
2154 2154
2155 2155 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
2156 2156 sd_destroypkt_for_buf, /* Index: 12 */
2157 2157 sd_destroypkt_for_buf, /* Index: 13 */
2158 2158 sd_destroypkt_for_buf, /* Index: 14 */
2159 2159 sd_destroypkt_for_buf, /* Index: 15 */
2160 2160
2161 2161 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
2162 2162 sd_destroypkt_for_buf, /* Index: 16 */
2163 2163 sd_destroypkt_for_buf, /* Index: 17 */
2164 2164 sd_destroypkt_for_buf, /* Index: 18 */
2165 2165
2166 2166 /* Chain for USCSI commands (non-checksum targets) */
2167 2167 sd_destroypkt_for_uscsi, /* Index: 19 */
2168 2168 sd_destroypkt_for_uscsi, /* Index: 20 */
2169 2169
2170 2170 /* Chain for USCSI commands (checksum targets) */
2171 2171 sd_destroypkt_for_uscsi, /* Index: 21 */
2172 2172 sd_destroypkt_for_uscsi, /* Index: 22 */
2173 2173 sd_destroypkt_for_uscsi, /* Index: 22 */
2174 2174
2175 2175 /* Chain for "direct" USCSI commands (all targets) */
2176 2176 sd_destroypkt_for_uscsi, /* Index: 24 */
2177 2177
2178 2178 /* Chain for "direct priority" USCSI commands (all targets) */
2179 2179 sd_destroypkt_for_uscsi, /* Index: 25 */
2180 2180
2181 2181 /*
2182 2182 * Chain for buf IO for large sector size disk drive targets
2183 2183 * with checksumming (PM disabled)
2184 2184 */
2185 2185 sd_destroypkt_for_buf, /* Index: 26 */
2186 2186 sd_destroypkt_for_buf, /* Index: 27 */
2187 2187 sd_destroypkt_for_buf, /* Index: 28 */
2188 2188 sd_destroypkt_for_buf, /* Index: 29 */
2189 2189 sd_destroypkt_for_buf, /* Index: 30 */
2190 2190
2191 2191 /*
2192 2192 * Chain for buf IO for large sector size disk drive targets
2193 2193 * with checksumming (PM enabled)
2194 2194 */
2195 2195 sd_destroypkt_for_buf, /* Index: 31 */
2196 2196 sd_destroypkt_for_buf, /* Index: 32 */
2197 2197 sd_destroypkt_for_buf, /* Index: 33 */
2198 2198 sd_destroypkt_for_buf, /* Index: 34 */
2199 2199 };
2200 2200
2201 2201
2202 2202
2203 2203 /*
2204 2204 * Array to map a layering chain index to the appropriate chain "type".
2205 2205 * The chain type indicates a specific property/usage of the chain.
2206 2206 * The redundant entries are present so that the index used for accessing
2207 2207 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly
2208 2208 * with this table as well.
2209 2209 */
2210 2210
2211 2211 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */
2212 2212 #define SD_CHAIN_BUFIO 1 /* regular buf IO */
2213 2213 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */
2214 2214 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */
2215 2215 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */
2216 2216 /* (for error recovery) */
2217 2217
2218 2218 static int sd_chain_type_map[] = {
2219 2219
2220 2220 /* Chain for buf IO for disk drive targets (PM enabled) */
2221 2221 SD_CHAIN_BUFIO, /* Index: 0 */
2222 2222 SD_CHAIN_BUFIO, /* Index: 1 */
2223 2223 SD_CHAIN_BUFIO, /* Index: 2 */
2224 2224
2225 2225 /* Chain for buf IO for disk drive targets (PM disabled) */
2226 2226 SD_CHAIN_BUFIO, /* Index: 3 */
2227 2227 SD_CHAIN_BUFIO, /* Index: 4 */
2228 2228
2229 2229 /*
2230 2230 * Chain for buf IO for removable-media or large sector size
2231 2231 * disk drive targets (PM enabled)
2232 2232 */
2233 2233 SD_CHAIN_BUFIO, /* Index: 5 */
2234 2234 SD_CHAIN_BUFIO, /* Index: 6 */
2235 2235 SD_CHAIN_BUFIO, /* Index: 7 */
2236 2236 SD_CHAIN_BUFIO, /* Index: 8 */
2237 2237
2238 2238 /*
2239 2239 * Chain for buf IO for removable-media or large sector size
2240 2240 * disk drive targets (PM disabled)
2241 2241 */
2242 2242 SD_CHAIN_BUFIO, /* Index: 9 */
2243 2243 SD_CHAIN_BUFIO, /* Index: 10 */
2244 2244 SD_CHAIN_BUFIO, /* Index: 11 */
2245 2245
2246 2246 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
2247 2247 SD_CHAIN_BUFIO, /* Index: 12 */
2248 2248 SD_CHAIN_BUFIO, /* Index: 13 */
2249 2249 SD_CHAIN_BUFIO, /* Index: 14 */
2250 2250 SD_CHAIN_BUFIO, /* Index: 15 */
2251 2251
2252 2252 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
2253 2253 SD_CHAIN_BUFIO, /* Index: 16 */
2254 2254 SD_CHAIN_BUFIO, /* Index: 17 */
2255 2255 SD_CHAIN_BUFIO, /* Index: 18 */
2256 2256
2257 2257 /* Chain for USCSI commands (non-checksum targets) */
2258 2258 SD_CHAIN_USCSI, /* Index: 19 */
2259 2259 SD_CHAIN_USCSI, /* Index: 20 */
2260 2260
2261 2261 /* Chain for USCSI commands (checksum targets) */
2262 2262 SD_CHAIN_USCSI, /* Index: 21 */
2263 2263 SD_CHAIN_USCSI, /* Index: 22 */
2264 2264 SD_CHAIN_USCSI, /* Index: 23 */
2265 2265
2266 2266 /* Chain for "direct" USCSI commands (all targets) */
2267 2267 SD_CHAIN_DIRECT, /* Index: 24 */
2268 2268
2269 2269 /* Chain for "direct priority" USCSI commands (all targets) */
2270 2270 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */
2271 2271
2272 2272 /*
2273 2273 * Chain for buf IO for large sector size disk drive targets
2274 2274 * with checksumming (PM enabled)
2275 2275 */
2276 2276 SD_CHAIN_BUFIO, /* Index: 26 */
2277 2277 SD_CHAIN_BUFIO, /* Index: 27 */
2278 2278 SD_CHAIN_BUFIO, /* Index: 28 */
2279 2279 SD_CHAIN_BUFIO, /* Index: 29 */
2280 2280 SD_CHAIN_BUFIO, /* Index: 30 */
2281 2281
2282 2282 /*
2283 2283 * Chain for buf IO for large sector size disk drive targets
2284 2284 * with checksumming (PM disabled)
2285 2285 */
2286 2286 SD_CHAIN_BUFIO, /* Index: 31 */
2287 2287 SD_CHAIN_BUFIO, /* Index: 32 */
2288 2288 SD_CHAIN_BUFIO, /* Index: 33 */
2289 2289 SD_CHAIN_BUFIO, /* Index: 34 */
2290 2290 };
2291 2291
2292 2292
2293 2293 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */
2294 2294 #define SD_IS_BUFIO(xp) \
2295 2295 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO)
2296 2296
2297 2297 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */
2298 2298 #define SD_IS_DIRECT_PRIORITY(xp) \
2299 2299 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY)
2300 2300
2301 2301
2302 2302
2303 2303 /*
2304 2304 * Struct, array, and macros to map a specific chain to the appropriate
2305 2305 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays.
2306 2306 *
2307 2307 * The sd_chain_index_map[] array is used at attach time to set the various
2308 2308 * un_xxx_chain type members of the sd_lun softstate to the specific layering
2309 2309 * chain to be used with the instance. This allows different instances to use
2310 2310 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart
2311 2311 * and xb_chain_iodone index values in the sd_xbuf are initialized to these
2312 2312 * values at sd_xbuf init time, this allows (1) layering chains may be changed
2313 2313 * dynamically & without the use of locking; and (2) a layer may update the
2314 2314 * xb_chain_io[start|done] member in a given xbuf with its current index value,
2315 2315 * to allow for deferred processing of an IO within the same chain from a
2316 2316 * different execution context.
2317 2317 */
2318 2318
2319 2319 struct sd_chain_index {
2320 2320 int sci_iostart_index;
2321 2321 int sci_iodone_index;
2322 2322 };
2323 2323
2324 2324 static struct sd_chain_index sd_chain_index_map[] = {
2325 2325 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE },
2326 2326 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM },
2327 2327 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE },
2328 2328 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM },
2329 2329 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE },
2330 2330 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM },
2331 2331 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE },
2332 2332 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE },
2333 2333 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE },
2334 2334 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE },
2335 2335 { SD_CHAIN_MSS_CHKSUM_IOSTART, SD_CHAIN_MSS_CHKSUM_IODONE },
2336 2336 { SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM, SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM },
2337 2337
2338 2338 };
2339 2339
2340 2340
2341 2341 /*
2342 2342 * The following are indexes into the sd_chain_index_map[] array.
2343 2343 */
2344 2344
2345 2345 /* un->un_buf_chain_type must be set to one of these */
2346 2346 #define SD_CHAIN_INFO_DISK 0
2347 2347 #define SD_CHAIN_INFO_DISK_NO_PM 1
2348 2348 #define SD_CHAIN_INFO_RMMEDIA 2
2349 2349 #define SD_CHAIN_INFO_MSS_DISK 2
2350 2350 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3
2351 2351 #define SD_CHAIN_INFO_MSS_DSK_NO_PM 3
2352 2352 #define SD_CHAIN_INFO_CHKSUM 4
2353 2353 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5
2354 2354 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM 10
2355 2355 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM_NO_PM 11
2356 2356
2357 2357 /* un->un_uscsi_chain_type must be set to one of these */
2358 2358 #define SD_CHAIN_INFO_USCSI_CMD 6
2359 2359 /* USCSI with PM disabled is the same as DIRECT */
2360 2360 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8
2361 2361 #define SD_CHAIN_INFO_USCSI_CHKSUM 7
2362 2362
2363 2363 /* un->un_direct_chain_type must be set to one of these */
2364 2364 #define SD_CHAIN_INFO_DIRECT_CMD 8
2365 2365
2366 2366 /* un->un_priority_chain_type must be set to one of these */
2367 2367 #define SD_CHAIN_INFO_PRIORITY_CMD 9
2368 2368
2369 2369 /* size for devid inquiries */
2370 2370 #define MAX_INQUIRY_SIZE 0xF0
2371 2371
2372 2372 /*
2373 2373 * Macros used by functions to pass a given buf(9S) struct along to the
2374 2374 * next function in the layering chain for further processing.
2375 2375 *
2376 2376 * In the following macros, passing more than three arguments to the called
2377 2377 * routines causes the optimizer for the SPARC compiler to stop doing tail
2378 2378 * call elimination which results in significant performance degradation.
2379 2379 */
2380 2380 #define SD_BEGIN_IOSTART(index, un, bp) \
2381 2381 ((*(sd_iostart_chain[index]))(index, un, bp))
2382 2382
2383 2383 #define SD_BEGIN_IODONE(index, un, bp) \
2384 2384 ((*(sd_iodone_chain[index]))(index, un, bp))
2385 2385
2386 2386 #define SD_NEXT_IOSTART(index, un, bp) \
2387 2387 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp))
2388 2388
2389 2389 #define SD_NEXT_IODONE(index, un, bp) \
2390 2390 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp))
2391 2391
2392 2392 /*
2393 2393 * Function: _init
2394 2394 *
2395 2395 * Description: This is the driver _init(9E) entry point.
2396 2396 *
2397 2397 * Return Code: Returns the value from mod_install(9F) or
2398 2398 * ddi_soft_state_init(9F) as appropriate.
2399 2399 *
2400 2400 * Context: Called when driver module loaded.
2401 2401 */
2402 2402
2403 2403 int
2404 2404 _init(void)
2405 2405 {
2406 2406 int err;
2407 2407
2408 2408 /* establish driver name from module name */
2409 2409 sd_label = (char *)mod_modname(&modlinkage);
2410 2410
2411 2411 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun),
2412 2412 SD_MAXUNIT);
2413 2413 if (err != 0) {
2414 2414 return (err);
2415 2415 }
2416 2416
2417 2417 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL);
2418 2418 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL);
2419 2419 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL);
2420 2420
2421 2421 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL);
2422 2422 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL);
2423 2423 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL);
2424 2424
2425 2425 /*
2426 2426 * it's ok to init here even for fibre device
2427 2427 */
2428 2428 sd_scsi_probe_cache_init();
2429 2429
2430 2430 sd_scsi_target_lun_init();
2431 2431
2432 2432 /*
2433 2433 * Creating taskq before mod_install ensures that all callers (threads)
2434 2434 * that enter the module after a successful mod_install encounter
2435 2435 * a valid taskq.
2436 2436 */
2437 2437 sd_taskq_create();
2438 2438
2439 2439 err = mod_install(&modlinkage);
2440 2440 if (err != 0) {
2441 2441 /* delete taskq if install fails */
2442 2442 sd_taskq_delete();
2443 2443
2444 2444 mutex_destroy(&sd_detach_mutex);
2445 2445 mutex_destroy(&sd_log_mutex);
2446 2446 mutex_destroy(&sd_label_mutex);
2447 2447
2448 2448 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex);
2449 2449 cv_destroy(&sd_tr.srq_resv_reclaim_cv);
2450 2450 cv_destroy(&sd_tr.srq_inprocess_cv);
2451 2451
2452 2452 sd_scsi_probe_cache_fini();
2453 2453
2454 2454 sd_scsi_target_lun_fini();
2455 2455
2456 2456 ddi_soft_state_fini(&sd_state);
2457 2457
2458 2458 return (err);
2459 2459 }
2460 2460
2461 2461 return (err);
2462 2462 }
2463 2463
2464 2464
2465 2465 /*
2466 2466 * Function: _fini
2467 2467 *
2468 2468 * Description: This is the driver _fini(9E) entry point.
2469 2469 *
2470 2470 * Return Code: Returns the value from mod_remove(9F)
2471 2471 *
2472 2472 * Context: Called when driver module is unloaded.
2473 2473 */
2474 2474
2475 2475 int
2476 2476 _fini(void)
2477 2477 {
2478 2478 int err;
2479 2479
2480 2480 if ((err = mod_remove(&modlinkage)) != 0) {
2481 2481 return (err);
2482 2482 }
2483 2483
2484 2484 sd_taskq_delete();
2485 2485
2486 2486 mutex_destroy(&sd_detach_mutex);
2487 2487 mutex_destroy(&sd_log_mutex);
2488 2488 mutex_destroy(&sd_label_mutex);
2489 2489 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex);
2490 2490
2491 2491 sd_scsi_probe_cache_fini();
2492 2492
2493 2493 sd_scsi_target_lun_fini();
2494 2494
2495 2495 cv_destroy(&sd_tr.srq_resv_reclaim_cv);
2496 2496 cv_destroy(&sd_tr.srq_inprocess_cv);
2497 2497
2498 2498 ddi_soft_state_fini(&sd_state);
2499 2499
2500 2500 return (err);
2501 2501 }
2502 2502
2503 2503
2504 2504 /*
2505 2505 * Function: _info
2506 2506 *
2507 2507 * Description: This is the driver _info(9E) entry point.
2508 2508 *
2509 2509 * Arguments: modinfop - pointer to the driver modinfo structure
2510 2510 *
2511 2511 * Return Code: Returns the value from mod_info(9F).
2512 2512 *
2513 2513 * Context: Kernel thread context
2514 2514 */
2515 2515
2516 2516 int
2517 2517 _info(struct modinfo *modinfop)
2518 2518 {
2519 2519 return (mod_info(&modlinkage, modinfop));
2520 2520 }
2521 2521
2522 2522
2523 2523 /*
2524 2524 * The following routines implement the driver message logging facility.
2525 2525 * They provide component- and level- based debug output filtering.
2526 2526 * Output may also be restricted to messages for a single instance by
2527 2527 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set
2528 2528 * to NULL, then messages for all instances are printed.
2529 2529 *
2530 2530 * These routines have been cloned from each other due to the language
2531 2531 * constraints of macros and variable argument list processing.
2532 2532 */
2533 2533
2534 2534
2535 2535 /*
2536 2536 * Function: sd_log_err
2537 2537 *
2538 2538 * Description: This routine is called by the SD_ERROR macro for debug
2539 2539 * logging of error conditions.
2540 2540 *
2541 2541 * Arguments: comp - driver component being logged
2542 2542 * dev - pointer to driver info structure
2543 2543 * fmt - error string and format to be logged
2544 2544 */
2545 2545
2546 2546 static void
2547 2547 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...)
2548 2548 {
2549 2549 va_list ap;
2550 2550 dev_info_t *dev;
2551 2551
2552 2552 ASSERT(un != NULL);
2553 2553 dev = SD_DEVINFO(un);
2554 2554 ASSERT(dev != NULL);
2555 2555
2556 2556 /*
2557 2557 * Filter messages based on the global component and level masks.
2558 2558 * Also print if un matches the value of sd_debug_un, or if
2559 2559 * sd_debug_un is set to NULL.
2560 2560 */
2561 2561 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) &&
2562 2562 ((sd_debug_un == NULL) || (sd_debug_un == un))) {
2563 2563 mutex_enter(&sd_log_mutex);
2564 2564 va_start(ap, fmt);
2565 2565 (void) vsprintf(sd_log_buf, fmt, ap);
2566 2566 va_end(ap);
2567 2567 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
2568 2568 mutex_exit(&sd_log_mutex);
2569 2569 }
2570 2570 #ifdef SD_FAULT_INJECTION
2571 2571 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
2572 2572 if (un->sd_injection_mask & comp) {
2573 2573 mutex_enter(&sd_log_mutex);
2574 2574 va_start(ap, fmt);
2575 2575 (void) vsprintf(sd_log_buf, fmt, ap);
2576 2576 va_end(ap);
2577 2577 sd_injection_log(sd_log_buf, un);
2578 2578 mutex_exit(&sd_log_mutex);
2579 2579 }
2580 2580 #endif
2581 2581 }
2582 2582
2583 2583
2584 2584 /*
2585 2585 * Function: sd_log_info
2586 2586 *
2587 2587 * Description: This routine is called by the SD_INFO macro for debug
2588 2588 * logging of general purpose informational conditions.
2589 2589 *
2590 2590 * Arguments: comp - driver component being logged
2591 2591 * dev - pointer to driver info structure
2592 2592 * fmt - info string and format to be logged
2593 2593 */
2594 2594
2595 2595 static void
2596 2596 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...)
2597 2597 {
2598 2598 va_list ap;
2599 2599 dev_info_t *dev;
2600 2600
2601 2601 ASSERT(un != NULL);
2602 2602 dev = SD_DEVINFO(un);
2603 2603 ASSERT(dev != NULL);
2604 2604
2605 2605 /*
2606 2606 * Filter messages based on the global component and level masks.
2607 2607 * Also print if un matches the value of sd_debug_un, or if
2608 2608 * sd_debug_un is set to NULL.
2609 2609 */
2610 2610 if ((sd_component_mask & component) &&
2611 2611 (sd_level_mask & SD_LOGMASK_INFO) &&
2612 2612 ((sd_debug_un == NULL) || (sd_debug_un == un))) {
2613 2613 mutex_enter(&sd_log_mutex);
2614 2614 va_start(ap, fmt);
2615 2615 (void) vsprintf(sd_log_buf, fmt, ap);
2616 2616 va_end(ap);
2617 2617 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
2618 2618 mutex_exit(&sd_log_mutex);
2619 2619 }
2620 2620 #ifdef SD_FAULT_INJECTION
2621 2621 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
2622 2622 if (un->sd_injection_mask & component) {
2623 2623 mutex_enter(&sd_log_mutex);
2624 2624 va_start(ap, fmt);
2625 2625 (void) vsprintf(sd_log_buf, fmt, ap);
2626 2626 va_end(ap);
2627 2627 sd_injection_log(sd_log_buf, un);
2628 2628 mutex_exit(&sd_log_mutex);
2629 2629 }
2630 2630 #endif
2631 2631 }
2632 2632
2633 2633
2634 2634 /*
2635 2635 * Function: sd_log_trace
2636 2636 *
2637 2637 * Description: This routine is called by the SD_TRACE macro for debug
2638 2638 * logging of trace conditions (i.e. function entry/exit).
2639 2639 *
2640 2640 * Arguments: comp - driver component being logged
2641 2641 * dev - pointer to driver info structure
2642 2642 * fmt - trace string and format to be logged
2643 2643 */
2644 2644
2645 2645 static void
2646 2646 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...)
2647 2647 {
2648 2648 va_list ap;
2649 2649 dev_info_t *dev;
2650 2650
2651 2651 ASSERT(un != NULL);
2652 2652 dev = SD_DEVINFO(un);
2653 2653 ASSERT(dev != NULL);
2654 2654
2655 2655 /*
2656 2656 * Filter messages based on the global component and level masks.
2657 2657 * Also print if un matches the value of sd_debug_un, or if
2658 2658 * sd_debug_un is set to NULL.
2659 2659 */
2660 2660 if ((sd_component_mask & component) &&
2661 2661 (sd_level_mask & SD_LOGMASK_TRACE) &&
2662 2662 ((sd_debug_un == NULL) || (sd_debug_un == un))) {
2663 2663 mutex_enter(&sd_log_mutex);
2664 2664 va_start(ap, fmt);
2665 2665 (void) vsprintf(sd_log_buf, fmt, ap);
2666 2666 va_end(ap);
2667 2667 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
2668 2668 mutex_exit(&sd_log_mutex);
2669 2669 }
2670 2670 #ifdef SD_FAULT_INJECTION
2671 2671 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
2672 2672 if (un->sd_injection_mask & component) {
2673 2673 mutex_enter(&sd_log_mutex);
2674 2674 va_start(ap, fmt);
2675 2675 (void) vsprintf(sd_log_buf, fmt, ap);
2676 2676 va_end(ap);
2677 2677 sd_injection_log(sd_log_buf, un);
2678 2678 mutex_exit(&sd_log_mutex);
2679 2679 }
2680 2680 #endif
2681 2681 }
2682 2682
2683 2683
2684 2684 /*
2685 2685 * Function: sdprobe
2686 2686 *
2687 2687 * Description: This is the driver probe(9e) entry point function.
2688 2688 *
2689 2689 * Arguments: devi - opaque device info handle
2690 2690 *
2691 2691 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful.
2692 2692 * DDI_PROBE_FAILURE: If the probe failed.
2693 2693 * DDI_PROBE_PARTIAL: If the instance is not present now,
2694 2694 * but may be present in the future.
2695 2695 */
2696 2696
2697 2697 static int
2698 2698 sdprobe(dev_info_t *devi)
2699 2699 {
2700 2700 struct scsi_device *devp;
2701 2701 int rval;
2702 2702 int instance = ddi_get_instance(devi);
2703 2703
2704 2704 /*
2705 2705 * if it wasn't for pln, sdprobe could actually be nulldev
2706 2706 * in the "__fibre" case.
2707 2707 */
2708 2708 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) {
2709 2709 return (DDI_PROBE_DONTCARE);
2710 2710 }
2711 2711
2712 2712 devp = ddi_get_driver_private(devi);
2713 2713
2714 2714 if (devp == NULL) {
2715 2715 /* Ooops... nexus driver is mis-configured... */
2716 2716 return (DDI_PROBE_FAILURE);
2717 2717 }
2718 2718
2719 2719 if (ddi_get_soft_state(sd_state, instance) != NULL) {
2720 2720 return (DDI_PROBE_PARTIAL);
2721 2721 }
2722 2722
2723 2723 /*
2724 2724 * Call the SCSA utility probe routine to see if we actually
2725 2725 * have a target at this SCSI nexus.
2726 2726 */
2727 2727 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) {
2728 2728 case SCSIPROBE_EXISTS:
2729 2729 switch (devp->sd_inq->inq_dtype) {
2730 2730 case DTYPE_DIRECT:
2731 2731 rval = DDI_PROBE_SUCCESS;
2732 2732 break;
2733 2733 case DTYPE_RODIRECT:
2734 2734 /* CDs etc. Can be removable media */
2735 2735 rval = DDI_PROBE_SUCCESS;
2736 2736 break;
2737 2737 case DTYPE_OPTICAL:
2738 2738 /*
2739 2739 * Rewritable optical driver HP115AA
2740 2740 * Can also be removable media
2741 2741 */
2742 2742
2743 2743 /*
2744 2744 * Do not attempt to bind to DTYPE_OPTICAL if
2745 2745 * pre solaris 9 sparc sd behavior is required
2746 2746 *
2747 2747 * If first time through and sd_dtype_optical_bind
2748 2748 * has not been set in /etc/system check properties
2749 2749 */
2750 2750
2751 2751 if (sd_dtype_optical_bind < 0) {
2752 2752 sd_dtype_optical_bind = ddi_prop_get_int
2753 2753 (DDI_DEV_T_ANY, devi, 0,
2754 2754 "optical-device-bind", 1);
2755 2755 }
2756 2756
2757 2757 if (sd_dtype_optical_bind == 0) {
2758 2758 rval = DDI_PROBE_FAILURE;
2759 2759 } else {
2760 2760 rval = DDI_PROBE_SUCCESS;
2761 2761 }
2762 2762 break;
2763 2763
2764 2764 case DTYPE_NOTPRESENT:
2765 2765 default:
2766 2766 rval = DDI_PROBE_FAILURE;
2767 2767 break;
2768 2768 }
2769 2769 break;
2770 2770 default:
2771 2771 rval = DDI_PROBE_PARTIAL;
2772 2772 break;
2773 2773 }
2774 2774
2775 2775 /*
2776 2776 * This routine checks for resource allocation prior to freeing,
2777 2777 * so it will take care of the "smart probing" case where a
2778 2778 * scsi_probe() may or may not have been issued and will *not*
2779 2779 * free previously-freed resources.
2780 2780 */
2781 2781 scsi_unprobe(devp);
2782 2782 return (rval);
2783 2783 }
2784 2784
2785 2785
2786 2786 /*
2787 2787 * Function: sdinfo
2788 2788 *
2789 2789 * Description: This is the driver getinfo(9e) entry point function.
2790 2790 * Given the device number, return the devinfo pointer from
2791 2791 * the scsi_device structure or the instance number
2792 2792 * associated with the dev_t.
2793 2793 *
2794 2794 * Arguments: dip - pointer to device info structure
2795 2795 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO,
2796 2796 * DDI_INFO_DEVT2INSTANCE)
2797 2797 * arg - driver dev_t
2798 2798 * resultp - user buffer for request response
2799 2799 *
2800 2800 * Return Code: DDI_SUCCESS
2801 2801 * DDI_FAILURE
2802 2802 */
2803 2803 /* ARGSUSED */
2804 2804 static int
2805 2805 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
2806 2806 {
2807 2807 struct sd_lun *un;
2808 2808 dev_t dev;
2809 2809 int instance;
2810 2810 int error;
2811 2811
2812 2812 switch (infocmd) {
2813 2813 case DDI_INFO_DEVT2DEVINFO:
2814 2814 dev = (dev_t)arg;
2815 2815 instance = SDUNIT(dev);
2816 2816 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) {
2817 2817 return (DDI_FAILURE);
2818 2818 }
2819 2819 *result = (void *) SD_DEVINFO(un);
2820 2820 error = DDI_SUCCESS;
2821 2821 break;
2822 2822 case DDI_INFO_DEVT2INSTANCE:
2823 2823 dev = (dev_t)arg;
2824 2824 instance = SDUNIT(dev);
2825 2825 *result = (void *)(uintptr_t)instance;
2826 2826 error = DDI_SUCCESS;
2827 2827 break;
2828 2828 default:
2829 2829 error = DDI_FAILURE;
2830 2830 }
2831 2831 return (error);
2832 2832 }
2833 2833
2834 2834 /*
2835 2835 * Function: sd_prop_op
2836 2836 *
2837 2837 * Description: This is the driver prop_op(9e) entry point function.
2838 2838 * Return the number of blocks for the partition in question
2839 2839 * or forward the request to the property facilities.
2840 2840 *
2841 2841 * Arguments: dev - device number
2842 2842 * dip - pointer to device info structure
2843 2843 * prop_op - property operator
2844 2844 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent
2845 2845 * name - pointer to property name
2846 2846 * valuep - pointer or address of the user buffer
2847 2847 * lengthp - property length
2848 2848 *
2849 2849 * Return Code: DDI_PROP_SUCCESS
2850 2850 * DDI_PROP_NOT_FOUND
2851 2851 * DDI_PROP_UNDEFINED
2852 2852 * DDI_PROP_NO_MEMORY
2853 2853 * DDI_PROP_BUF_TOO_SMALL
2854 2854 */
2855 2855
2856 2856 static int
2857 2857 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
2858 2858 char *name, caddr_t valuep, int *lengthp)
2859 2859 {
2860 2860 struct sd_lun *un;
2861 2861
2862 2862 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL)
2863 2863 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
2864 2864 name, valuep, lengthp));
2865 2865
2866 2866 return (cmlb_prop_op(un->un_cmlbhandle,
2867 2867 dev, dip, prop_op, mod_flags, name, valuep, lengthp,
2868 2868 SDPART(dev), (void *)SD_PATH_DIRECT));
2869 2869 }
2870 2870
2871 2871 /*
2872 2872 * The following functions are for smart probing:
2873 2873 * sd_scsi_probe_cache_init()
2874 2874 * sd_scsi_probe_cache_fini()
2875 2875 * sd_scsi_clear_probe_cache()
2876 2876 * sd_scsi_probe_with_cache()
2877 2877 */
2878 2878
2879 2879 /*
2880 2880 * Function: sd_scsi_probe_cache_init
2881 2881 *
2882 2882 * Description: Initializes the probe response cache mutex and head pointer.
2883 2883 *
2884 2884 * Context: Kernel thread context
2885 2885 */
2886 2886
2887 2887 static void
2888 2888 sd_scsi_probe_cache_init(void)
2889 2889 {
2890 2890 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL);
2891 2891 sd_scsi_probe_cache_head = NULL;
2892 2892 }
2893 2893
2894 2894
2895 2895 /*
2896 2896 * Function: sd_scsi_probe_cache_fini
2897 2897 *
2898 2898 * Description: Frees all resources associated with the probe response cache.
2899 2899 *
2900 2900 * Context: Kernel thread context
2901 2901 */
2902 2902
2903 2903 static void
2904 2904 sd_scsi_probe_cache_fini(void)
2905 2905 {
2906 2906 struct sd_scsi_probe_cache *cp;
2907 2907 struct sd_scsi_probe_cache *ncp;
2908 2908
2909 2909 /* Clean up our smart probing linked list */
2910 2910 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) {
2911 2911 ncp = cp->next;
2912 2912 kmem_free(cp, sizeof (struct sd_scsi_probe_cache));
2913 2913 }
2914 2914 sd_scsi_probe_cache_head = NULL;
2915 2915 mutex_destroy(&sd_scsi_probe_cache_mutex);
2916 2916 }
2917 2917
2918 2918
2919 2919 /*
2920 2920 * Function: sd_scsi_clear_probe_cache
2921 2921 *
2922 2922 * Description: This routine clears the probe response cache. This is
2923 2923 * done when open() returns ENXIO so that when deferred
2924 2924 * attach is attempted (possibly after a device has been
2925 2925 * turned on) we will retry the probe. Since we don't know
2926 2926 * which target we failed to open, we just clear the
2927 2927 * entire cache.
2928 2928 *
2929 2929 * Context: Kernel thread context
2930 2930 */
2931 2931
2932 2932 static void
2933 2933 sd_scsi_clear_probe_cache(void)
2934 2934 {
2935 2935 struct sd_scsi_probe_cache *cp;
2936 2936 int i;
2937 2937
2938 2938 mutex_enter(&sd_scsi_probe_cache_mutex);
2939 2939 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) {
2940 2940 /*
2941 2941 * Reset all entries to SCSIPROBE_EXISTS. This will
2942 2942 * force probing to be performed the next time
2943 2943 * sd_scsi_probe_with_cache is called.
2944 2944 */
2945 2945 for (i = 0; i < NTARGETS_WIDE; i++) {
2946 2946 cp->cache[i] = SCSIPROBE_EXISTS;
2947 2947 }
2948 2948 }
2949 2949 mutex_exit(&sd_scsi_probe_cache_mutex);
2950 2950 }
2951 2951
2952 2952
2953 2953 /*
2954 2954 * Function: sd_scsi_probe_with_cache
2955 2955 *
2956 2956 * Description: This routine implements support for a scsi device probe
2957 2957 * with cache. The driver maintains a cache of the target
2958 2958 * responses to scsi probes. If we get no response from a
2959 2959 * target during a probe inquiry, we remember that, and we
2960 2960 * avoid additional calls to scsi_probe on non-zero LUNs
2961 2961 * on the same target until the cache is cleared. By doing
2962 2962 * so we avoid the 1/4 sec selection timeout for nonzero
2963 2963 * LUNs. lun0 of a target is always probed.
2964 2964 *
2965 2965 * Arguments: devp - Pointer to a scsi_device(9S) structure
2966 2966 * waitfunc - indicates what the allocator routines should
2967 2967 * do when resources are not available. This value
2968 2968 * is passed on to scsi_probe() when that routine
2969 2969 * is called.
2970 2970 *
2971 2971 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache;
2972 2972 * otherwise the value returned by scsi_probe(9F).
2973 2973 *
2974 2974 * Context: Kernel thread context
2975 2975 */
2976 2976
2977 2977 static int
2978 2978 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)())
2979 2979 {
2980 2980 struct sd_scsi_probe_cache *cp;
2981 2981 dev_info_t *pdip = ddi_get_parent(devp->sd_dev);
2982 2982 int lun, tgt;
2983 2983
2984 2984 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS,
2985 2985 SCSI_ADDR_PROP_LUN, 0);
2986 2986 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS,
2987 2987 SCSI_ADDR_PROP_TARGET, -1);
2988 2988
2989 2989 /* Make sure caching enabled and target in range */
2990 2990 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) {
2991 2991 /* do it the old way (no cache) */
2992 2992 return (scsi_probe(devp, waitfn));
2993 2993 }
2994 2994
2995 2995 mutex_enter(&sd_scsi_probe_cache_mutex);
2996 2996
2997 2997 /* Find the cache for this scsi bus instance */
2998 2998 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) {
2999 2999 if (cp->pdip == pdip) {
3000 3000 break;
3001 3001 }
3002 3002 }
3003 3003
3004 3004 /* If we can't find a cache for this pdip, create one */
3005 3005 if (cp == NULL) {
3006 3006 int i;
3007 3007
3008 3008 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache),
3009 3009 KM_SLEEP);
3010 3010 cp->pdip = pdip;
3011 3011 cp->next = sd_scsi_probe_cache_head;
3012 3012 sd_scsi_probe_cache_head = cp;
3013 3013 for (i = 0; i < NTARGETS_WIDE; i++) {
3014 3014 cp->cache[i] = SCSIPROBE_EXISTS;
3015 3015 }
3016 3016 }
3017 3017
3018 3018 mutex_exit(&sd_scsi_probe_cache_mutex);
3019 3019
3020 3020 /* Recompute the cache for this target if LUN zero */
3021 3021 if (lun == 0) {
3022 3022 cp->cache[tgt] = SCSIPROBE_EXISTS;
3023 3023 }
3024 3024
3025 3025 /* Don't probe if cache remembers a NORESP from a previous LUN. */
3026 3026 if (cp->cache[tgt] != SCSIPROBE_EXISTS) {
3027 3027 return (SCSIPROBE_NORESP);
3028 3028 }
3029 3029
3030 3030 /* Do the actual probe; save & return the result */
3031 3031 return (cp->cache[tgt] = scsi_probe(devp, waitfn));
3032 3032 }
3033 3033
3034 3034
3035 3035 /*
3036 3036 * Function: sd_scsi_target_lun_init
3037 3037 *
3038 3038 * Description: Initializes the attached lun chain mutex and head pointer.
3039 3039 *
3040 3040 * Context: Kernel thread context
3041 3041 */
3042 3042
3043 3043 static void
3044 3044 sd_scsi_target_lun_init(void)
3045 3045 {
3046 3046 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL);
3047 3047 sd_scsi_target_lun_head = NULL;
3048 3048 }
3049 3049
3050 3050
3051 3051 /*
3052 3052 * Function: sd_scsi_target_lun_fini
3053 3053 *
3054 3054 * Description: Frees all resources associated with the attached lun
3055 3055 * chain
3056 3056 *
3057 3057 * Context: Kernel thread context
3058 3058 */
3059 3059
3060 3060 static void
3061 3061 sd_scsi_target_lun_fini(void)
3062 3062 {
3063 3063 struct sd_scsi_hba_tgt_lun *cp;
3064 3064 struct sd_scsi_hba_tgt_lun *ncp;
3065 3065
3066 3066 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) {
3067 3067 ncp = cp->next;
3068 3068 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun));
3069 3069 }
3070 3070 sd_scsi_target_lun_head = NULL;
3071 3071 mutex_destroy(&sd_scsi_target_lun_mutex);
3072 3072 }
3073 3073
3074 3074
3075 3075 /*
3076 3076 * Function: sd_scsi_get_target_lun_count
3077 3077 *
3078 3078 * Description: This routine will check in the attached lun chain to see
3079 3079 * how many luns are attached on the required SCSI controller
3080 3080 * and target. Currently, some capabilities like tagged queue
3081 3081 * are supported per target based by HBA. So all luns in a
3082 3082 * target have the same capabilities. Based on this assumption,
3083 3083 * sd should only set these capabilities once per target. This
3084 3084 * function is called when sd needs to decide how many luns
3085 3085 * already attached on a target.
3086 3086 *
3087 3087 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI
3088 3088 * controller device.
3089 3089 * target - The target ID on the controller's SCSI bus.
3090 3090 *
3091 3091 * Return Code: The number of luns attached on the required target and
3092 3092 * controller.
3093 3093 * -1 if target ID is not in parallel SCSI scope or the given
3094 3094 * dip is not in the chain.
3095 3095 *
3096 3096 * Context: Kernel thread context
3097 3097 */
3098 3098
3099 3099 static int
3100 3100 sd_scsi_get_target_lun_count(dev_info_t *dip, int target)
3101 3101 {
3102 3102 struct sd_scsi_hba_tgt_lun *cp;
3103 3103
3104 3104 if ((target < 0) || (target >= NTARGETS_WIDE)) {
3105 3105 return (-1);
3106 3106 }
3107 3107
3108 3108 mutex_enter(&sd_scsi_target_lun_mutex);
3109 3109
3110 3110 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) {
3111 3111 if (cp->pdip == dip) {
3112 3112 break;
3113 3113 }
3114 3114 }
3115 3115
3116 3116 mutex_exit(&sd_scsi_target_lun_mutex);
3117 3117
3118 3118 if (cp == NULL) {
3119 3119 return (-1);
3120 3120 }
3121 3121
3122 3122 return (cp->nlun[target]);
3123 3123 }
3124 3124
3125 3125
3126 3126 /*
3127 3127 * Function: sd_scsi_update_lun_on_target
3128 3128 *
3129 3129 * Description: This routine is used to update the attached lun chain when a
3130 3130 * lun is attached or detached on a target.
3131 3131 *
3132 3132 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI
3133 3133 * controller device.
3134 3134 * target - The target ID on the controller's SCSI bus.
3135 3135 * flag - Indicate the lun is attached or detached.
3136 3136 *
3137 3137 * Context: Kernel thread context
3138 3138 */
3139 3139
3140 3140 static void
3141 3141 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag)
3142 3142 {
3143 3143 struct sd_scsi_hba_tgt_lun *cp;
3144 3144
3145 3145 mutex_enter(&sd_scsi_target_lun_mutex);
3146 3146
3147 3147 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) {
3148 3148 if (cp->pdip == dip) {
3149 3149 break;
3150 3150 }
3151 3151 }
3152 3152
3153 3153 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) {
3154 3154 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun),
3155 3155 KM_SLEEP);
3156 3156 cp->pdip = dip;
3157 3157 cp->next = sd_scsi_target_lun_head;
3158 3158 sd_scsi_target_lun_head = cp;
3159 3159 }
3160 3160
3161 3161 mutex_exit(&sd_scsi_target_lun_mutex);
3162 3162
3163 3163 if (cp != NULL) {
3164 3164 if (flag == SD_SCSI_LUN_ATTACH) {
3165 3165 cp->nlun[target] ++;
3166 3166 } else {
3167 3167 cp->nlun[target] --;
3168 3168 }
3169 3169 }
3170 3170 }
3171 3171
3172 3172
3173 3173 /*
3174 3174 * Function: sd_spin_up_unit
3175 3175 *
3176 3176 * Description: Issues the following commands to spin-up the device:
3177 3177 * START STOP UNIT, and INQUIRY.
3178 3178 *
3179 3179 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
3180 3180 * structure for this target.
3181 3181 *
3182 3182 * Return Code: 0 - success
3183 3183 * EIO - failure
3184 3184 * EACCES - reservation conflict
3185 3185 *
3186 3186 * Context: Kernel thread context
3187 3187 */
3188 3188
3189 3189 static int
3190 3190 sd_spin_up_unit(sd_ssc_t *ssc)
3191 3191 {
3192 3192 size_t resid = 0;
3193 3193 int has_conflict = FALSE;
3194 3194 uchar_t *bufaddr;
3195 3195 int status;
3196 3196 struct sd_lun *un;
3197 3197
3198 3198 ASSERT(ssc != NULL);
3199 3199 un = ssc->ssc_un;
3200 3200 ASSERT(un != NULL);
3201 3201
3202 3202 /*
3203 3203 * Send a throwaway START UNIT command.
3204 3204 *
3205 3205 * If we fail on this, we don't care presently what precisely
3206 3206 * is wrong. EMC's arrays will also fail this with a check
3207 3207 * condition (0x2/0x4/0x3) if the device is "inactive," but
3208 3208 * we don't want to fail the attach because it may become
3209 3209 * "active" later.
3210 3210 * We don't know if power condition is supported or not at
3211 3211 * this stage, use START STOP bit.
3212 3212 */
3213 3213 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
3214 3214 SD_TARGET_START, SD_PATH_DIRECT);
3215 3215
3216 3216 if (status != 0) {
3217 3217 if (status == EACCES)
3218 3218 has_conflict = TRUE;
3219 3219 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3220 3220 }
3221 3221
3222 3222 /*
3223 3223 * Send another INQUIRY command to the target. This is necessary for
3224 3224 * non-removable media direct access devices because their INQUIRY data
3225 3225 * may not be fully qualified until they are spun up (perhaps via the
3226 3226 * START command above). Note: This seems to be needed for some
3227 3227 * legacy devices only.) The INQUIRY command should succeed even if a
3228 3228 * Reservation Conflict is present.
3229 3229 */
3230 3230 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP);
3231 3231
3232 3232 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid)
3233 3233 != 0) {
3234 3234 kmem_free(bufaddr, SUN_INQSIZE);
3235 3235 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
3236 3236 return (EIO);
3237 3237 }
3238 3238
3239 3239 /*
3240 3240 * If we got enough INQUIRY data, copy it over the old INQUIRY data.
3241 3241 * Note that this routine does not return a failure here even if the
3242 3242 * INQUIRY command did not return any data. This is a legacy behavior.
3243 3243 */
3244 3244 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) {
3245 3245 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE);
3246 3246 }
3247 3247
3248 3248 kmem_free(bufaddr, SUN_INQSIZE);
3249 3249
3250 3250 /* If we hit a reservation conflict above, tell the caller. */
3251 3251 if (has_conflict == TRUE) {
3252 3252 return (EACCES);
3253 3253 }
3254 3254
3255 3255 return (0);
3256 3256 }
3257 3257
3258 3258 #ifdef _LP64
3259 3259 /*
3260 3260 * Function: sd_enable_descr_sense
3261 3261 *
3262 3262 * Description: This routine attempts to select descriptor sense format
3263 3263 * using the Control mode page. Devices that support 64 bit
3264 3264 * LBAs (for >2TB luns) should also implement descriptor
3265 3265 * sense data so we will call this function whenever we see
3266 3266 * a lun larger than 2TB. If for some reason the device
3267 3267 * supports 64 bit LBAs but doesn't support descriptor sense
3268 3268 * presumably the mode select will fail. Everything will
3269 3269 * continue to work normally except that we will not get
3270 3270 * complete sense data for commands that fail with an LBA
3271 3271 * larger than 32 bits.
3272 3272 *
3273 3273 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
3274 3274 * structure for this target.
3275 3275 *
3276 3276 * Context: Kernel thread context only
3277 3277 */
3278 3278
3279 3279 static void
3280 3280 sd_enable_descr_sense(sd_ssc_t *ssc)
3281 3281 {
3282 3282 uchar_t *header;
3283 3283 struct mode_control_scsi3 *ctrl_bufp;
3284 3284 size_t buflen;
3285 3285 size_t bd_len;
3286 3286 int status;
3287 3287 struct sd_lun *un;
3288 3288
3289 3289 ASSERT(ssc != NULL);
3290 3290 un = ssc->ssc_un;
3291 3291 ASSERT(un != NULL);
3292 3292
3293 3293 /*
3294 3294 * Read MODE SENSE page 0xA, Control Mode Page
3295 3295 */
3296 3296 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH +
3297 3297 sizeof (struct mode_control_scsi3);
3298 3298 header = kmem_zalloc(buflen, KM_SLEEP);
3299 3299
3300 3300 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen,
3301 3301 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT);
3302 3302
3303 3303 if (status != 0) {
3304 3304 SD_ERROR(SD_LOG_COMMON, un,
3305 3305 "sd_enable_descr_sense: mode sense ctrl page failed\n");
3306 3306 goto eds_exit;
3307 3307 }
3308 3308
3309 3309 /*
3310 3310 * Determine size of Block Descriptors in order to locate
3311 3311 * the mode page data. ATAPI devices return 0, SCSI devices
3312 3312 * should return MODE_BLK_DESC_LENGTH.
3313 3313 */
3314 3314 bd_len = ((struct mode_header *)header)->bdesc_length;
3315 3315
3316 3316 /* Clear the mode data length field for MODE SELECT */
3317 3317 ((struct mode_header *)header)->length = 0;
3318 3318
3319 3319 ctrl_bufp = (struct mode_control_scsi3 *)
3320 3320 (header + MODE_HEADER_LENGTH + bd_len);
3321 3321
3322 3322 /*
3323 3323 * If the page length is smaller than the expected value,
3324 3324 * the target device doesn't support D_SENSE. Bail out here.
3325 3325 */
3326 3326 if (ctrl_bufp->mode_page.length <
3327 3327 sizeof (struct mode_control_scsi3) - 2) {
3328 3328 SD_ERROR(SD_LOG_COMMON, un,
3329 3329 "sd_enable_descr_sense: enable D_SENSE failed\n");
3330 3330 goto eds_exit;
3331 3331 }
3332 3332
3333 3333 /*
3334 3334 * Clear PS bit for MODE SELECT
3335 3335 */
3336 3336 ctrl_bufp->mode_page.ps = 0;
3337 3337
3338 3338 /*
3339 3339 * Set D_SENSE to enable descriptor sense format.
3340 3340 */
3341 3341 ctrl_bufp->d_sense = 1;
3342 3342
3343 3343 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3344 3344
3345 3345 /*
3346 3346 * Use MODE SELECT to commit the change to the D_SENSE bit
3347 3347 */
3348 3348 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header,
3349 3349 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT);
3350 3350
3351 3351 if (status != 0) {
3352 3352 SD_INFO(SD_LOG_COMMON, un,
3353 3353 "sd_enable_descr_sense: mode select ctrl page failed\n");
3354 3354 } else {
3355 3355 kmem_free(header, buflen);
3356 3356 return;
3357 3357 }
3358 3358
3359 3359 eds_exit:
3360 3360 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3361 3361 kmem_free(header, buflen);
3362 3362 }
3363 3363
3364 3364 /*
3365 3365 * Function: sd_reenable_dsense_task
3366 3366 *
3367 3367 * Description: Re-enable descriptor sense after device or bus reset
3368 3368 *
3369 3369 * Context: Executes in a taskq() thread context
3370 3370 */
3371 3371 static void
3372 3372 sd_reenable_dsense_task(void *arg)
3373 3373 {
3374 3374 struct sd_lun *un = arg;
3375 3375 sd_ssc_t *ssc;
3376 3376
3377 3377 ASSERT(un != NULL);
3378 3378
3379 3379 ssc = sd_ssc_init(un);
3380 3380 sd_enable_descr_sense(ssc);
3381 3381 sd_ssc_fini(ssc);
3382 3382 }
3383 3383 #endif /* _LP64 */
3384 3384
3385 3385 /*
3386 3386 * Function: sd_set_mmc_caps
3387 3387 *
3388 3388 * Description: This routine determines if the device is MMC compliant and if
3389 3389 * the device supports CDDA via a mode sense of the CDVD
3390 3390 * capabilities mode page. Also checks if the device is a
3391 3391 * dvdram writable device.
3392 3392 *
3393 3393 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
3394 3394 * structure for this target.
3395 3395 *
3396 3396 * Context: Kernel thread context only
3397 3397 */
3398 3398
3399 3399 static void
3400 3400 sd_set_mmc_caps(sd_ssc_t *ssc)
3401 3401 {
3402 3402 struct mode_header_grp2 *sense_mhp;
3403 3403 uchar_t *sense_page;
3404 3404 caddr_t buf;
3405 3405 int bd_len;
3406 3406 int status;
3407 3407 struct uscsi_cmd com;
3408 3408 int rtn;
3409 3409 uchar_t *out_data_rw, *out_data_hd;
3410 3410 uchar_t *rqbuf_rw, *rqbuf_hd;
3411 3411 uchar_t *out_data_gesn;
3412 3412 int gesn_len;
3413 3413 struct sd_lun *un;
3414 3414
3415 3415 ASSERT(ssc != NULL);
3416 3416 un = ssc->ssc_un;
3417 3417 ASSERT(un != NULL);
3418 3418
3419 3419 /*
3420 3420 * The flags which will be set in this function are - mmc compliant,
3421 3421 * dvdram writable device, cdda support. Initialize them to FALSE
3422 3422 * and if a capability is detected - it will be set to TRUE.
3423 3423 */
3424 3424 un->un_f_mmc_cap = FALSE;
3425 3425 un->un_f_dvdram_writable_device = FALSE;
3426 3426 un->un_f_cfg_cdda = FALSE;
3427 3427
3428 3428 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP);
3429 3429 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf,
3430 3430 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT);
3431 3431
3432 3432 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3433 3433
3434 3434 if (status != 0) {
3435 3435 /* command failed; just return */
3436 3436 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3437 3437 return;
3438 3438 }
3439 3439 /*
3440 3440 * If the mode sense request for the CDROM CAPABILITIES
3441 3441 * page (0x2A) succeeds the device is assumed to be MMC.
3442 3442 */
3443 3443 un->un_f_mmc_cap = TRUE;
3444 3444
3445 3445 /* See if GET STATUS EVENT NOTIFICATION is supported */
3446 3446 if (un->un_f_mmc_gesn_polling) {
3447 3447 gesn_len = SD_GESN_HEADER_LEN + SD_GESN_MEDIA_DATA_LEN;
3448 3448 out_data_gesn = kmem_zalloc(gesn_len, KM_SLEEP);
3449 3449
3450 3450 rtn = sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(ssc,
3451 3451 out_data_gesn, gesn_len, 1 << SD_GESN_MEDIA_CLASS);
3452 3452
3453 3453 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3454 3454
3455 3455 if ((rtn != 0) || !sd_gesn_media_data_valid(out_data_gesn)) {
3456 3456 un->un_f_mmc_gesn_polling = FALSE;
3457 3457 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3458 3458 "sd_set_mmc_caps: gesn not supported "
3459 3459 "%d %x %x %x %x\n", rtn,
3460 3460 out_data_gesn[0], out_data_gesn[1],
3461 3461 out_data_gesn[2], out_data_gesn[3]);
3462 3462 }
3463 3463
3464 3464 kmem_free(out_data_gesn, gesn_len);
3465 3465 }
3466 3466
3467 3467 /* Get to the page data */
3468 3468 sense_mhp = (struct mode_header_grp2 *)buf;
3469 3469 bd_len = (sense_mhp->bdesc_length_hi << 8) |
3470 3470 sense_mhp->bdesc_length_lo;
3471 3471 if (bd_len > MODE_BLK_DESC_LENGTH) {
3472 3472 /*
3473 3473 * We did not get back the expected block descriptor
3474 3474 * length so we cannot determine if the device supports
3475 3475 * CDDA. However, we still indicate the device is MMC
3476 3476 * according to the successful response to the page
3477 3477 * 0x2A mode sense request.
3478 3478 */
3479 3479 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
3480 3480 "sd_set_mmc_caps: Mode Sense returned "
3481 3481 "invalid block descriptor length\n");
3482 3482 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3483 3483 return;
3484 3484 }
3485 3485
3486 3486 /* See if read CDDA is supported */
3487 3487 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 +
3488 3488 bd_len);
3489 3489 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE;
3490 3490
3491 3491 /* See if writing DVD RAM is supported. */
3492 3492 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE;
3493 3493 if (un->un_f_dvdram_writable_device == TRUE) {
3494 3494 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3495 3495 return;
3496 3496 }
3497 3497
3498 3498 /*
3499 3499 * If the device presents DVD or CD capabilities in the mode
3500 3500 * page, we can return here since a RRD will not have
3501 3501 * these capabilities.
3502 3502 */
3503 3503 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) {
3504 3504 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3505 3505 return;
3506 3506 }
3507 3507 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3508 3508
3509 3509 /*
3510 3510 * If un->un_f_dvdram_writable_device is still FALSE,
3511 3511 * check for a Removable Rigid Disk (RRD). A RRD
3512 3512 * device is identified by the features RANDOM_WRITABLE and
3513 3513 * HARDWARE_DEFECT_MANAGEMENT.
3514 3514 */
3515 3515 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3516 3516 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3517 3517
3518 3518 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw,
3519 3519 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN,
3520 3520 RANDOM_WRITABLE, SD_PATH_STANDARD);
3521 3521
3522 3522 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3523 3523
3524 3524 if (rtn != 0) {
3525 3525 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3526 3526 kmem_free(rqbuf_rw, SENSE_LENGTH);
3527 3527 return;
3528 3528 }
3529 3529
3530 3530 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3531 3531 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3532 3532
3533 3533 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd,
3534 3534 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN,
3535 3535 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD);
3536 3536
3537 3537 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3538 3538
3539 3539 if (rtn == 0) {
3540 3540 /*
3541 3541 * We have good information, check for random writable
3542 3542 * and hardware defect features.
3543 3543 */
3544 3544 if ((out_data_rw[9] & RANDOM_WRITABLE) &&
3545 3545 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) {
3546 3546 un->un_f_dvdram_writable_device = TRUE;
3547 3547 }
3548 3548 }
3549 3549
3550 3550 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3551 3551 kmem_free(rqbuf_rw, SENSE_LENGTH);
3552 3552 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN);
3553 3553 kmem_free(rqbuf_hd, SENSE_LENGTH);
3554 3554 }
3555 3555
3556 3556 /*
3557 3557 * Function: sd_check_for_writable_cd
3558 3558 *
3559 3559 * Description: This routine determines if the media in the device is
3560 3560 * writable or not. It uses the get configuration command (0x46)
3561 3561 * to determine if the media is writable
3562 3562 *
3563 3563 * Arguments: un - driver soft state (unit) structure
3564 3564 * path_flag - SD_PATH_DIRECT to use the USCSI "direct"
3565 3565 * chain and the normal command waitq, or
3566 3566 * SD_PATH_DIRECT_PRIORITY to use the USCSI
3567 3567 * "direct" chain and bypass the normal command
3568 3568 * waitq.
3569 3569 *
3570 3570 * Context: Never called at interrupt context.
3571 3571 */
3572 3572
3573 3573 static void
3574 3574 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag)
3575 3575 {
3576 3576 struct uscsi_cmd com;
3577 3577 uchar_t *out_data;
3578 3578 uchar_t *rqbuf;
3579 3579 int rtn;
3580 3580 uchar_t *out_data_rw, *out_data_hd;
3581 3581 uchar_t *rqbuf_rw, *rqbuf_hd;
3582 3582 struct mode_header_grp2 *sense_mhp;
3583 3583 uchar_t *sense_page;
3584 3584 caddr_t buf;
3585 3585 int bd_len;
3586 3586 int status;
3587 3587 struct sd_lun *un;
3588 3588
3589 3589 ASSERT(ssc != NULL);
3590 3590 un = ssc->ssc_un;
3591 3591 ASSERT(un != NULL);
3592 3592 ASSERT(mutex_owned(SD_MUTEX(un)));
3593 3593
3594 3594 /*
3595 3595 * Initialize the writable media to false, if configuration info.
3596 3596 * tells us otherwise then only we will set it.
3597 3597 */
3598 3598 un->un_f_mmc_writable_media = FALSE;
3599 3599 mutex_exit(SD_MUTEX(un));
3600 3600
3601 3601 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP);
3602 3602 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3603 3603
3604 3604 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH,
3605 3605 out_data, SD_PROFILE_HEADER_LEN, path_flag);
3606 3606
3607 3607 if (rtn != 0)
3608 3608 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3609 3609
3610 3610 mutex_enter(SD_MUTEX(un));
3611 3611 if (rtn == 0) {
3612 3612 /*
3613 3613 * We have good information, check for writable DVD.
3614 3614 */
3615 3615 if ((out_data[6] == 0) && (out_data[7] == 0x12)) {
3616 3616 un->un_f_mmc_writable_media = TRUE;
3617 3617 kmem_free(out_data, SD_PROFILE_HEADER_LEN);
3618 3618 kmem_free(rqbuf, SENSE_LENGTH);
3619 3619 return;
3620 3620 }
3621 3621 }
3622 3622
3623 3623 kmem_free(out_data, SD_PROFILE_HEADER_LEN);
3624 3624 kmem_free(rqbuf, SENSE_LENGTH);
3625 3625
3626 3626 /*
3627 3627 * Determine if this is a RRD type device.
3628 3628 */
3629 3629 mutex_exit(SD_MUTEX(un));
3630 3630 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP);
3631 3631 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf,
3632 3632 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag);
3633 3633
3634 3634 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3635 3635
3636 3636 mutex_enter(SD_MUTEX(un));
3637 3637 if (status != 0) {
3638 3638 /* command failed; just return */
3639 3639 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3640 3640 return;
3641 3641 }
3642 3642
3643 3643 /* Get to the page data */
3644 3644 sense_mhp = (struct mode_header_grp2 *)buf;
3645 3645 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo;
3646 3646 if (bd_len > MODE_BLK_DESC_LENGTH) {
3647 3647 /*
3648 3648 * We did not get back the expected block descriptor length so
3649 3649 * we cannot check the mode page.
3650 3650 */
3651 3651 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
3652 3652 "sd_check_for_writable_cd: Mode Sense returned "
3653 3653 "invalid block descriptor length\n");
3654 3654 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3655 3655 return;
3656 3656 }
3657 3657
3658 3658 /*
3659 3659 * If the device presents DVD or CD capabilities in the mode
3660 3660 * page, we can return here since a RRD device will not have
3661 3661 * these capabilities.
3662 3662 */
3663 3663 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len);
3664 3664 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) {
3665 3665 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3666 3666 return;
3667 3667 }
3668 3668 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3669 3669
3670 3670 /*
3671 3671 * If un->un_f_mmc_writable_media is still FALSE,
3672 3672 * check for RRD type media. A RRD device is identified
3673 3673 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT.
3674 3674 */
3675 3675 mutex_exit(SD_MUTEX(un));
3676 3676 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3677 3677 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3678 3678
3679 3679 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw,
3680 3680 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN,
3681 3681 RANDOM_WRITABLE, path_flag);
3682 3682
3683 3683 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3684 3684 if (rtn != 0) {
3685 3685 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3686 3686 kmem_free(rqbuf_rw, SENSE_LENGTH);
3687 3687 mutex_enter(SD_MUTEX(un));
3688 3688 return;
3689 3689 }
3690 3690
3691 3691 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3692 3692 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3693 3693
3694 3694 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd,
3695 3695 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN,
3696 3696 HARDWARE_DEFECT_MANAGEMENT, path_flag);
3697 3697
3698 3698 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3699 3699 mutex_enter(SD_MUTEX(un));
3700 3700 if (rtn == 0) {
3701 3701 /*
3702 3702 * We have good information, check for random writable
3703 3703 * and hardware defect features as current.
3704 3704 */
3705 3705 if ((out_data_rw[9] & RANDOM_WRITABLE) &&
3706 3706 (out_data_rw[10] & 0x1) &&
3707 3707 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) &&
3708 3708 (out_data_hd[10] & 0x1)) {
3709 3709 un->un_f_mmc_writable_media = TRUE;
3710 3710 }
3711 3711 }
3712 3712
3713 3713 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3714 3714 kmem_free(rqbuf_rw, SENSE_LENGTH);
3715 3715 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN);
3716 3716 kmem_free(rqbuf_hd, SENSE_LENGTH);
3717 3717 }
3718 3718
3719 3719 /*
3720 3720 * Function: sd_read_unit_properties
3721 3721 *
3722 3722 * Description: The following implements a property lookup mechanism.
3723 3723 * Properties for particular disks (keyed on vendor, model
3724 3724 * and rev numbers) are sought in the sd.conf file via
3725 3725 * sd_process_sdconf_file(), and if not found there, are
3726 3726 * looked for in a list hardcoded in this driver via
3727 3727 * sd_process_sdconf_table() Once located the properties
3728 3728 * are used to update the driver unit structure.
3729 3729 *
3730 3730 * Arguments: un - driver soft state (unit) structure
3731 3731 */
3732 3732
3733 3733 static void
3734 3734 sd_read_unit_properties(struct sd_lun *un)
3735 3735 {
3736 3736 /*
3737 3737 * sd_process_sdconf_file returns SD_FAILURE if it cannot find
3738 3738 * the "sd-config-list" property (from the sd.conf file) or if
3739 3739 * there was not a match for the inquiry vid/pid. If this event
3740 3740 * occurs the static driver configuration table is searched for
3741 3741 * a match.
3742 3742 */
3743 3743 ASSERT(un != NULL);
3744 3744 if (sd_process_sdconf_file(un) == SD_FAILURE) {
3745 3745 sd_process_sdconf_table(un);
3746 3746 }
3747 3747
3748 3748 /* check for LSI device */
3749 3749 sd_is_lsi(un);
3750 3750
3751 3751
3752 3752 }
3753 3753
3754 3754
3755 3755 /*
3756 3756 * Function: sd_process_sdconf_file
3757 3757 *
3758 3758 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the
3759 3759 * driver's config file (ie, sd.conf) and update the driver
3760 3760 * soft state structure accordingly.
3761 3761 *
3762 3762 * Arguments: un - driver soft state (unit) structure
3763 3763 *
3764 3764 * Return Code: SD_SUCCESS - The properties were successfully set according
3765 3765 * to the driver configuration file.
3766 3766 * SD_FAILURE - The driver config list was not obtained or
3767 3767 * there was no vid/pid match. This indicates that
3768 3768 * the static config table should be used.
3769 3769 *
3770 3770 * The config file has a property, "sd-config-list". Currently we support
3771 3771 * two kinds of formats. For both formats, the value of this property
3772 3772 * is a list of duplets:
3773 3773 *
3774 3774 * sd-config-list=
3775 3775 * <duplet>,
3776 3776 * [,<duplet>]*;
3777 3777 *
3778 3778 * For the improved format, where
3779 3779 *
3780 3780 * <duplet>:= "<vid+pid>","<tunable-list>"
3781 3781 *
3782 3782 * and
3783 3783 *
3784 3784 * <tunable-list>:= <tunable> [, <tunable> ]*;
3785 3785 * <tunable> = <name> : <value>
3786 3786 *
3787 3787 * The <vid+pid> is the string that is returned by the target device on a
3788 3788 * SCSI inquiry command, the <tunable-list> contains one or more tunables
3789 3789 * to apply to all target devices with the specified <vid+pid>.
3790 3790 *
3791 3791 * Each <tunable> is a "<name> : <value>" pair.
3792 3792 *
3793 3793 * For the old format, the structure of each duplet is as follows:
3794 3794 *
3795 3795 * <duplet>:= "<vid+pid>","<data-property-name_list>"
3796 3796 *
3797 3797 * The first entry of the duplet is the device ID string (the concatenated
3798 3798 * vid & pid; not to be confused with a device_id). This is defined in
3799 3799 * the same way as in the sd_disk_table.
3800 3800 *
3801 3801 * The second part of the duplet is a string that identifies a
3802 3802 * data-property-name-list. The data-property-name-list is defined as
3803 3803 * follows:
3804 3804 *
3805 3805 * <data-property-name-list>:=<data-property-name> [<data-property-name>]
3806 3806 *
3807 3807 * The syntax of <data-property-name> depends on the <version> field.
3808 3808 *
3809 3809 * If version = SD_CONF_VERSION_1 we have the following syntax:
3810 3810 *
3811 3811 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN>
3812 3812 *
3813 3813 * where the prop0 value will be used to set prop0 if bit0 set in the
3814 3814 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1
3815 3815 *
3816 3816 */
3817 3817
3818 3818 static int
3819 3819 sd_process_sdconf_file(struct sd_lun *un)
3820 3820 {
3821 3821 char **config_list = NULL;
3822 3822 uint_t nelements;
3823 3823 char *vidptr;
3824 3824 int vidlen;
3825 3825 char *dnlist_ptr;
3826 3826 char *dataname_ptr;
3827 3827 char *dataname_lasts;
3828 3828 int *data_list = NULL;
3829 3829 uint_t data_list_len;
3830 3830 int rval = SD_FAILURE;
3831 3831 int i;
3832 3832
3833 3833 ASSERT(un != NULL);
3834 3834
3835 3835 /* Obtain the configuration list associated with the .conf file */
3836 3836 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un),
3837 3837 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list,
3838 3838 &config_list, &nelements) != DDI_PROP_SUCCESS) {
3839 3839 return (SD_FAILURE);
3840 3840 }
3841 3841
3842 3842 /*
3843 3843 * Compare vids in each duplet to the inquiry vid - if a match is
3844 3844 * made, get the data value and update the soft state structure
3845 3845 * accordingly.
3846 3846 *
3847 3847 * Each duplet should show as a pair of strings, return SD_FAILURE
3848 3848 * otherwise.
3849 3849 */
3850 3850 if (nelements & 1) {
3851 3851 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
3852 3852 "sd-config-list should show as pairs of strings.\n");
3853 3853 if (config_list)
3854 3854 ddi_prop_free(config_list);
3855 3855 return (SD_FAILURE);
3856 3856 }
3857 3857
3858 3858 for (i = 0; i < nelements; i += 2) {
3859 3859 /*
3860 3860 * Note: The assumption here is that each vid entry is on
3861 3861 * a unique line from its associated duplet.
3862 3862 */
3863 3863 vidptr = config_list[i];
3864 3864 vidlen = (int)strlen(vidptr);
3865 3865 if (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS) {
3866 3866 continue;
3867 3867 }
3868 3868
3869 3869 /*
3870 3870 * dnlist contains 1 or more blank separated
3871 3871 * data-property-name entries
3872 3872 */
3873 3873 dnlist_ptr = config_list[i + 1];
3874 3874
3875 3875 if (strchr(dnlist_ptr, ':') != NULL) {
3876 3876 /*
3877 3877 * Decode the improved format sd-config-list.
3878 3878 */
3879 3879 sd_nvpair_str_decode(un, dnlist_ptr);
3880 3880 } else {
3881 3881 /*
3882 3882 * The old format sd-config-list, loop through all
3883 3883 * data-property-name entries in the
3884 3884 * data-property-name-list
3885 3885 * setting the properties for each.
3886 3886 */
3887 3887 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t",
3888 3888 &dataname_lasts); dataname_ptr != NULL;
3889 3889 dataname_ptr = sd_strtok_r(NULL, " \t",
3890 3890 &dataname_lasts)) {
3891 3891 int version;
3892 3892
3893 3893 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3894 3894 "sd_process_sdconf_file: disk:%s, "
3895 3895 "data:%s\n", vidptr, dataname_ptr);
3896 3896
3897 3897 /* Get the data list */
3898 3898 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY,
3899 3899 SD_DEVINFO(un), 0, dataname_ptr, &data_list,
3900 3900 &data_list_len) != DDI_PROP_SUCCESS) {
3901 3901 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3902 3902 "sd_process_sdconf_file: data "
3903 3903 "property (%s) has no value\n",
3904 3904 dataname_ptr);
3905 3905 continue;
3906 3906 }
3907 3907
3908 3908 version = data_list[0];
3909 3909
3910 3910 if (version == SD_CONF_VERSION_1) {
3911 3911 sd_tunables values;
3912 3912
3913 3913 /* Set the properties */
3914 3914 if (sd_chk_vers1_data(un, data_list[1],
3915 3915 &data_list[2], data_list_len,
3916 3916 dataname_ptr) == SD_SUCCESS) {
3917 3917 sd_get_tunables_from_conf(un,
3918 3918 data_list[1], &data_list[2],
3919 3919 &values);
3920 3920 sd_set_vers1_properties(un,
3921 3921 data_list[1], &values);
3922 3922 rval = SD_SUCCESS;
3923 3923 } else {
3924 3924 rval = SD_FAILURE;
3925 3925 }
3926 3926 } else {
3927 3927 scsi_log(SD_DEVINFO(un), sd_label,
3928 3928 CE_WARN, "data property %s version "
3929 3929 "0x%x is invalid.",
3930 3930 dataname_ptr, version);
3931 3931 rval = SD_FAILURE;
3932 3932 }
3933 3933 if (data_list)
3934 3934 ddi_prop_free(data_list);
3935 3935 }
3936 3936 }
3937 3937 }
3938 3938
3939 3939 /* free up the memory allocated by ddi_prop_lookup_string_array(). */
3940 3940 if (config_list) {
3941 3941 ddi_prop_free(config_list);
3942 3942 }
3943 3943
3944 3944 return (rval);
3945 3945 }
3946 3946
3947 3947 /*
3948 3948 * Function: sd_nvpair_str_decode()
3949 3949 *
3950 3950 * Description: Parse the improved format sd-config-list to get
3951 3951 * each entry of tunable, which includes a name-value pair.
3952 3952 * Then call sd_set_properties() to set the property.
3953 3953 *
3954 3954 * Arguments: un - driver soft state (unit) structure
3955 3955 * nvpair_str - the tunable list
3956 3956 */
3957 3957 static void
3958 3958 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str)
3959 3959 {
3960 3960 char *nv, *name, *value, *token;
3961 3961 char *nv_lasts, *v_lasts, *x_lasts;
3962 3962
3963 3963 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL;
3964 3964 nv = sd_strtok_r(NULL, ",", &nv_lasts)) {
3965 3965 token = sd_strtok_r(nv, ":", &v_lasts);
3966 3966 name = sd_strtok_r(token, " \t", &x_lasts);
3967 3967 token = sd_strtok_r(NULL, ":", &v_lasts);
3968 3968 value = sd_strtok_r(token, " \t", &x_lasts);
3969 3969 if (name == NULL || value == NULL) {
3970 3970 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3971 3971 "sd_nvpair_str_decode: "
3972 3972 "name or value is not valid!\n");
3973 3973 } else {
3974 3974 sd_set_properties(un, name, value);
3975 3975 }
3976 3976 }
3977 3977 }
3978 3978
3979 3979 /*
3980 3980 * Function: sd_strtok_r()
3981 3981 *
3982 3982 * Description: This function uses strpbrk and strspn to break
3983 3983 * string into tokens on sequentially subsequent calls. Return
3984 3984 * NULL when no non-separator characters remain. The first
3985 3985 * argument is NULL for subsequent calls.
3986 3986 */
3987 3987 static char *
3988 3988 sd_strtok_r(char *string, const char *sepset, char **lasts)
3989 3989 {
3990 3990 char *q, *r;
3991 3991
3992 3992 /* First or subsequent call */
3993 3993 if (string == NULL)
3994 3994 string = *lasts;
3995 3995
3996 3996 if (string == NULL)
3997 3997 return (NULL);
3998 3998
3999 3999 /* Skip leading separators */
4000 4000 q = string + strspn(string, sepset);
4001 4001
4002 4002 if (*q == '\0')
4003 4003 return (NULL);
4004 4004
4005 4005 if ((r = strpbrk(q, sepset)) == NULL)
4006 4006 *lasts = NULL;
4007 4007 else {
4008 4008 *r = '\0';
4009 4009 *lasts = r + 1;
4010 4010 }
4011 4011 return (q);
4012 4012 }
4013 4013
4014 4014 /*
4015 4015 * Function: sd_set_properties()
4016 4016 *
4017 4017 * Description: Set device properties based on the improved
4018 4018 * format sd-config-list.
4019 4019 *
4020 4020 * Arguments: un - driver soft state (unit) structure
4021 4021 * name - supported tunable name
4022 4022 * value - tunable value
4023 4023 */
4024 4024 static void
4025 4025 sd_set_properties(struct sd_lun *un, char *name, char *value)
4026 4026 {
4027 4027 char *endptr = NULL;
4028 4028 long val = 0;
4029 4029
4030 4030 if (strcasecmp(name, "cache-nonvolatile") == 0) {
4031 4031 if (strcasecmp(value, "true") == 0) {
4032 4032 un->un_f_suppress_cache_flush = TRUE;
4033 4033 } else if (strcasecmp(value, "false") == 0) {
4034 4034 un->un_f_suppress_cache_flush = FALSE;
4035 4035 } else {
4036 4036 goto value_invalid;
4037 4037 }
4038 4038 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4039 4039 "suppress_cache_flush flag set to %d\n",
4040 4040 un->un_f_suppress_cache_flush);
4041 4041 return;
4042 4042 }
4043 4043
4044 4044 if (strcasecmp(name, "controller-type") == 0) {
4045 4045 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4046 4046 un->un_ctype = val;
4047 4047 } else {
4048 4048 goto value_invalid;
4049 4049 }
4050 4050 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4051 4051 "ctype set to %d\n", un->un_ctype);
4052 4052 return;
4053 4053 }
4054 4054
4055 4055 if (strcasecmp(name, "delay-busy") == 0) {
4056 4056 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4057 4057 un->un_busy_timeout = drv_usectohz(val / 1000);
4058 4058 } else {
4059 4059 goto value_invalid;
4060 4060 }
4061 4061 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4062 4062 "busy_timeout set to %d\n", un->un_busy_timeout);
4063 4063 return;
4064 4064 }
4065 4065
4066 4066 if (strcasecmp(name, "disksort") == 0) {
4067 4067 if (strcasecmp(value, "true") == 0) {
4068 4068 un->un_f_disksort_disabled = FALSE;
4069 4069 } else if (strcasecmp(value, "false") == 0) {
4070 4070 un->un_f_disksort_disabled = TRUE;
4071 4071 } else {
4072 4072 goto value_invalid;
4073 4073 }
4074 4074 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4075 4075 "disksort disabled flag set to %d\n",
4076 4076 un->un_f_disksort_disabled);
4077 4077 return;
4078 4078 }
4079 4079
4080 4080 if (strcasecmp(name, "power-condition") == 0) {
4081 4081 if (strcasecmp(value, "true") == 0) {
4082 4082 un->un_f_power_condition_disabled = FALSE;
4083 4083 } else if (strcasecmp(value, "false") == 0) {
4084 4084 un->un_f_power_condition_disabled = TRUE;
4085 4085 } else {
4086 4086 goto value_invalid;
4087 4087 }
4088 4088 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4089 4089 "power condition disabled flag set to %d\n",
4090 4090 un->un_f_power_condition_disabled);
4091 4091 return;
4092 4092 }
4093 4093
4094 4094 if (strcasecmp(name, "timeout-releasereservation") == 0) {
4095 4095 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4096 4096 un->un_reserve_release_time = val;
4097 4097 } else {
4098 4098 goto value_invalid;
4099 4099 }
4100 4100 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4101 4101 "reservation release timeout set to %d\n",
4102 4102 un->un_reserve_release_time);
4103 4103 return;
4104 4104 }
4105 4105
4106 4106 if (strcasecmp(name, "reset-lun") == 0) {
4107 4107 if (strcasecmp(value, "true") == 0) {
4108 4108 un->un_f_lun_reset_enabled = TRUE;
4109 4109 } else if (strcasecmp(value, "false") == 0) {
4110 4110 un->un_f_lun_reset_enabled = FALSE;
4111 4111 } else {
4112 4112 goto value_invalid;
4113 4113 }
4114 4114 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4115 4115 "lun reset enabled flag set to %d\n",
4116 4116 un->un_f_lun_reset_enabled);
4117 4117 return;
4118 4118 }
4119 4119
4120 4120 if (strcasecmp(name, "retries-busy") == 0) {
4121 4121 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4122 4122 un->un_busy_retry_count = val;
4123 4123 } else {
4124 4124 goto value_invalid;
4125 4125 }
4126 4126 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4127 4127 "busy retry count set to %d\n", un->un_busy_retry_count);
4128 4128 return;
4129 4129 }
4130 4130
4131 4131 if (strcasecmp(name, "retries-timeout") == 0) {
4132 4132 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4133 4133 un->un_retry_count = val;
4134 4134 } else {
4135 4135 goto value_invalid;
4136 4136 }
4137 4137 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4138 4138 "timeout retry count set to %d\n", un->un_retry_count);
4139 4139 return;
4140 4140 }
4141 4141
4142 4142 if (strcasecmp(name, "retries-notready") == 0) {
4143 4143 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4144 4144 un->un_notready_retry_count = val;
4145 4145 } else {
4146 4146 goto value_invalid;
4147 4147 }
4148 4148 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4149 4149 "notready retry count set to %d\n",
4150 4150 un->un_notready_retry_count);
4151 4151 return;
4152 4152 }
4153 4153
4154 4154 if (strcasecmp(name, "retries-reset") == 0) {
4155 4155 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4156 4156 un->un_reset_retry_count = val;
4157 4157 } else {
4158 4158 goto value_invalid;
4159 4159 }
4160 4160 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4161 4161 "reset retry count set to %d\n",
4162 4162 un->un_reset_retry_count);
4163 4163 return;
4164 4164 }
4165 4165
4166 4166 if (strcasecmp(name, "throttle-max") == 0) {
4167 4167 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4168 4168 un->un_saved_throttle = un->un_throttle = val;
4169 4169 } else {
4170 4170 goto value_invalid;
4171 4171 }
4172 4172 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4173 4173 "throttle set to %d\n", un->un_throttle);
4174 4174 }
4175 4175
4176 4176 if (strcasecmp(name, "throttle-min") == 0) {
4177 4177 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4178 4178 un->un_min_throttle = val;
4179 4179 } else {
4180 4180 goto value_invalid;
4181 4181 }
4182 4182 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4183 4183 "min throttle set to %d\n", un->un_min_throttle);
4184 4184 }
4185 4185
4186 4186 if (strcasecmp(name, "rmw-type") == 0) {
4187 4187 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4188 4188 un->un_f_rmw_type = val;
4189 4189 } else {
4190 4190 goto value_invalid;
4191 4191 }
4192 4192 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4193 4193 "RMW type set to %d\n", un->un_f_rmw_type);
4194 4194 }
4195 4195
4196 4196 if (strcasecmp(name, "physical-block-size") == 0) {
4197 4197 if (ddi_strtol(value, &endptr, 0, &val) == 0 &&
4198 4198 ISP2(val) && val >= un->un_tgt_blocksize &&
4199 4199 val >= un->un_sys_blocksize) {
4200 4200 un->un_phy_blocksize = val;
4201 4201 } else {
4202 4202 goto value_invalid;
4203 4203 }
4204 4204 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4205 4205 "physical block size set to %d\n", un->un_phy_blocksize);
4206 4206 }
4207 4207
4208 4208 if (strcasecmp(name, "retries-victim") == 0) {
4209 4209 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4210 4210 un->un_victim_retry_count = val;
4211 4211 } else {
4212 4212 goto value_invalid;
4213 4213 }
4214 4214 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4215 4215 "victim retry count set to %d\n",
4216 4216 un->un_victim_retry_count);
4217 4217 return;
4218 4218 }
4219 4219
4220 4220 /*
4221 4221 * Validate the throttle values.
4222 4222 * If any of the numbers are invalid, set everything to defaults.
4223 4223 */
4224 4224 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) ||
4225 4225 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) ||
4226 4226 (un->un_min_throttle > un->un_throttle)) {
4227 4227 un->un_saved_throttle = un->un_throttle = sd_max_throttle;
4228 4228 un->un_min_throttle = sd_min_throttle;
4229 4229 }
4230 4230
4231 4231 if (strcasecmp(name, "mmc-gesn-polling") == 0) {
4232 4232 if (strcasecmp(value, "true") == 0) {
4233 4233 un->un_f_mmc_gesn_polling = TRUE;
4234 4234 } else if (strcasecmp(value, "false") == 0) {
4235 4235 un->un_f_mmc_gesn_polling = FALSE;
4236 4236 } else {
4237 4237 goto value_invalid;
4238 4238 }
4239 4239 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4240 4240 "mmc-gesn-polling set to %d\n",
4241 4241 un->un_f_mmc_gesn_polling);
4242 4242 }
4243 4243
4244 4244 return;
4245 4245
4246 4246 value_invalid:
4247 4247 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4248 4248 "value of prop %s is invalid\n", name);
4249 4249 }
4250 4250
4251 4251 /*
4252 4252 * Function: sd_get_tunables_from_conf()
4253 4253 *
4254 4254 *
4255 4255 * This function reads the data list from the sd.conf file and pulls
4256 4256 * the values that can have numeric values as arguments and places
4257 4257 * the values in the appropriate sd_tunables member.
4258 4258 * Since the order of the data list members varies across platforms
4259 4259 * This function reads them from the data list in a platform specific
4260 4260 * order and places them into the correct sd_tunable member that is
4261 4261 * consistent across all platforms.
4262 4262 */
4263 4263 static void
4264 4264 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list,
4265 4265 sd_tunables *values)
4266 4266 {
4267 4267 int i;
4268 4268 int mask;
4269 4269
4270 4270 bzero(values, sizeof (sd_tunables));
4271 4271
4272 4272 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) {
4273 4273
4274 4274 mask = 1 << i;
4275 4275 if (mask > flags) {
4276 4276 break;
4277 4277 }
4278 4278
4279 4279 switch (mask & flags) {
4280 4280 case 0: /* This mask bit not set in flags */
4281 4281 continue;
4282 4282 case SD_CONF_BSET_THROTTLE:
4283 4283 values->sdt_throttle = data_list[i];
4284 4284 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4285 4285 "sd_get_tunables_from_conf: throttle = %d\n",
4286 4286 values->sdt_throttle);
4287 4287 break;
4288 4288 case SD_CONF_BSET_CTYPE:
4289 4289 values->sdt_ctype = data_list[i];
4290 4290 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4291 4291 "sd_get_tunables_from_conf: ctype = %d\n",
4292 4292 values->sdt_ctype);
4293 4293 break;
4294 4294 case SD_CONF_BSET_NRR_COUNT:
4295 4295 values->sdt_not_rdy_retries = data_list[i];
4296 4296 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4297 4297 "sd_get_tunables_from_conf: not_rdy_retries = %d\n",
4298 4298 values->sdt_not_rdy_retries);
4299 4299 break;
4300 4300 case SD_CONF_BSET_BSY_RETRY_COUNT:
4301 4301 values->sdt_busy_retries = data_list[i];
4302 4302 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4303 4303 "sd_get_tunables_from_conf: busy_retries = %d\n",
4304 4304 values->sdt_busy_retries);
4305 4305 break;
4306 4306 case SD_CONF_BSET_RST_RETRIES:
4307 4307 values->sdt_reset_retries = data_list[i];
4308 4308 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4309 4309 "sd_get_tunables_from_conf: reset_retries = %d\n",
4310 4310 values->sdt_reset_retries);
4311 4311 break;
4312 4312 case SD_CONF_BSET_RSV_REL_TIME:
4313 4313 values->sdt_reserv_rel_time = data_list[i];
4314 4314 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4315 4315 "sd_get_tunables_from_conf: reserv_rel_time = %d\n",
4316 4316 values->sdt_reserv_rel_time);
4317 4317 break;
4318 4318 case SD_CONF_BSET_MIN_THROTTLE:
4319 4319 values->sdt_min_throttle = data_list[i];
4320 4320 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4321 4321 "sd_get_tunables_from_conf: min_throttle = %d\n",
4322 4322 values->sdt_min_throttle);
4323 4323 break;
4324 4324 case SD_CONF_BSET_DISKSORT_DISABLED:
4325 4325 values->sdt_disk_sort_dis = data_list[i];
4326 4326 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4327 4327 "sd_get_tunables_from_conf: disk_sort_dis = %d\n",
4328 4328 values->sdt_disk_sort_dis);
4329 4329 break;
4330 4330 case SD_CONF_BSET_LUN_RESET_ENABLED:
4331 4331 values->sdt_lun_reset_enable = data_list[i];
4332 4332 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4333 4333 "sd_get_tunables_from_conf: lun_reset_enable = %d"
4334 4334 "\n", values->sdt_lun_reset_enable);
4335 4335 break;
4336 4336 case SD_CONF_BSET_CACHE_IS_NV:
4337 4337 values->sdt_suppress_cache_flush = data_list[i];
4338 4338 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4339 4339 "sd_get_tunables_from_conf: \
4340 4340 suppress_cache_flush = %d"
4341 4341 "\n", values->sdt_suppress_cache_flush);
4342 4342 break;
4343 4343 case SD_CONF_BSET_PC_DISABLED:
4344 4344 values->sdt_disk_sort_dis = data_list[i];
4345 4345 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4346 4346 "sd_get_tunables_from_conf: power_condition_dis = "
4347 4347 "%d\n", values->sdt_power_condition_dis);
4348 4348 break;
4349 4349 }
4350 4350 }
4351 4351 }
4352 4352
4353 4353 /*
4354 4354 * Function: sd_process_sdconf_table
4355 4355 *
4356 4356 * Description: Search the static configuration table for a match on the
4357 4357 * inquiry vid/pid and update the driver soft state structure
4358 4358 * according to the table property values for the device.
4359 4359 *
4360 4360 * The form of a configuration table entry is:
4361 4361 * <vid+pid>,<flags>,<property-data>
4362 4362 * "SEAGATE ST42400N",1,0x40000,
4363 4363 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1;
4364 4364 *
4365 4365 * Arguments: un - driver soft state (unit) structure
4366 4366 */
4367 4367
4368 4368 static void
4369 4369 sd_process_sdconf_table(struct sd_lun *un)
4370 4370 {
4371 4371 char *id = NULL;
4372 4372 int table_index;
4373 4373 int idlen;
4374 4374
4375 4375 ASSERT(un != NULL);
4376 4376 for (table_index = 0; table_index < sd_disk_table_size;
4377 4377 table_index++) {
4378 4378 id = sd_disk_table[table_index].device_id;
4379 4379 idlen = strlen(id);
4380 4380
4381 4381 /*
4382 4382 * The static configuration table currently does not
4383 4383 * implement version 10 properties. Additionally,
4384 4384 * multiple data-property-name entries are not
4385 4385 * implemented in the static configuration table.
4386 4386 */
4387 4387 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) {
4388 4388 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4389 4389 "sd_process_sdconf_table: disk %s\n", id);
4390 4390 sd_set_vers1_properties(un,
4391 4391 sd_disk_table[table_index].flags,
4392 4392 sd_disk_table[table_index].properties);
4393 4393 break;
4394 4394 }
4395 4395 }
4396 4396 }
4397 4397
4398 4398
4399 4399 /*
4400 4400 * Function: sd_sdconf_id_match
4401 4401 *
4402 4402 * Description: This local function implements a case sensitive vid/pid
4403 4403 * comparison as well as the boundary cases of wild card and
4404 4404 * multiple blanks.
4405 4405 *
4406 4406 * Note: An implicit assumption made here is that the scsi
4407 4407 * inquiry structure will always keep the vid, pid and
4408 4408 * revision strings in consecutive sequence, so they can be
4409 4409 * read as a single string. If this assumption is not the
4410 4410 * case, a separate string, to be used for the check, needs
4411 4411 * to be built with these strings concatenated.
4412 4412 *
4413 4413 * Arguments: un - driver soft state (unit) structure
4414 4414 * id - table or config file vid/pid
4415 4415 * idlen - length of the vid/pid (bytes)
4416 4416 *
4417 4417 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid
4418 4418 * SD_FAILURE - Indicates no match with the inquiry vid/pid
4419 4419 */
4420 4420
4421 4421 static int
4422 4422 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen)
4423 4423 {
4424 4424 struct scsi_inquiry *sd_inq;
4425 4425 int rval = SD_SUCCESS;
4426 4426
4427 4427 ASSERT(un != NULL);
4428 4428 sd_inq = un->un_sd->sd_inq;
4429 4429 ASSERT(id != NULL);
4430 4430
4431 4431 /*
4432 4432 * We use the inq_vid as a pointer to a buffer containing the
4433 4433 * vid and pid and use the entire vid/pid length of the table
4434 4434 * entry for the comparison. This works because the inq_pid
4435 4435 * data member follows inq_vid in the scsi_inquiry structure.
4436 4436 */
4437 4437 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) {
4438 4438 /*
4439 4439 * The user id string is compared to the inquiry vid/pid
4440 4440 * using a case insensitive comparison and ignoring
4441 4441 * multiple spaces.
4442 4442 */
4443 4443 rval = sd_blank_cmp(un, id, idlen);
4444 4444 if (rval != SD_SUCCESS) {
4445 4445 /*
4446 4446 * User id strings that start and end with a "*"
4447 4447 * are a special case. These do not have a
4448 4448 * specific vendor, and the product string can
4449 4449 * appear anywhere in the 16 byte PID portion of
4450 4450 * the inquiry data. This is a simple strstr()
4451 4451 * type search for the user id in the inquiry data.
4452 4452 */
4453 4453 if ((id[0] == '*') && (id[idlen - 1] == '*')) {
4454 4454 char *pidptr = &id[1];
4455 4455 int i;
4456 4456 int j;
4457 4457 int pidstrlen = idlen - 2;
4458 4458 j = sizeof (SD_INQUIRY(un)->inq_pid) -
4459 4459 pidstrlen;
4460 4460
4461 4461 if (j < 0) {
4462 4462 return (SD_FAILURE);
4463 4463 }
4464 4464 for (i = 0; i < j; i++) {
4465 4465 if (bcmp(&SD_INQUIRY(un)->inq_pid[i],
4466 4466 pidptr, pidstrlen) == 0) {
4467 4467 rval = SD_SUCCESS;
4468 4468 break;
4469 4469 }
4470 4470 }
4471 4471 }
4472 4472 }
4473 4473 }
4474 4474 return (rval);
4475 4475 }
4476 4476
4477 4477
4478 4478 /*
4479 4479 * Function: sd_blank_cmp
4480 4480 *
4481 4481 * Description: If the id string starts and ends with a space, treat
4482 4482 * multiple consecutive spaces as equivalent to a single
4483 4483 * space. For example, this causes a sd_disk_table entry
4484 4484 * of " NEC CDROM " to match a device's id string of
4485 4485 * "NEC CDROM".
4486 4486 *
4487 4487 * Note: The success exit condition for this routine is if
4488 4488 * the pointer to the table entry is '\0' and the cnt of
4489 4489 * the inquiry length is zero. This will happen if the inquiry
4490 4490 * string returned by the device is padded with spaces to be
4491 4491 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The
4492 4492 * SCSI spec states that the inquiry string is to be padded with
4493 4493 * spaces.
4494 4494 *
4495 4495 * Arguments: un - driver soft state (unit) structure
4496 4496 * id - table or config file vid/pid
4497 4497 * idlen - length of the vid/pid (bytes)
4498 4498 *
4499 4499 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid
4500 4500 * SD_FAILURE - Indicates no match with the inquiry vid/pid
4501 4501 */
4502 4502
4503 4503 static int
4504 4504 sd_blank_cmp(struct sd_lun *un, char *id, int idlen)
4505 4505 {
4506 4506 char *p1;
4507 4507 char *p2;
4508 4508 int cnt;
4509 4509 cnt = sizeof (SD_INQUIRY(un)->inq_vid) +
4510 4510 sizeof (SD_INQUIRY(un)->inq_pid);
4511 4511
4512 4512 ASSERT(un != NULL);
4513 4513 p2 = un->un_sd->sd_inq->inq_vid;
4514 4514 ASSERT(id != NULL);
4515 4515 p1 = id;
4516 4516
4517 4517 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) {
4518 4518 /*
4519 4519 * Note: string p1 is terminated by a NUL but string p2
4520 4520 * isn't. The end of p2 is determined by cnt.
4521 4521 */
4522 4522 for (;;) {
4523 4523 /* skip over any extra blanks in both strings */
4524 4524 while ((*p1 != '\0') && (*p1 == ' ')) {
4525 4525 p1++;
4526 4526 }
4527 4527 while ((cnt != 0) && (*p2 == ' ')) {
4528 4528 p2++;
4529 4529 cnt--;
4530 4530 }
4531 4531
4532 4532 /* compare the two strings */
4533 4533 if ((cnt == 0) ||
4534 4534 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) {
4535 4535 break;
4536 4536 }
4537 4537 while ((cnt > 0) &&
4538 4538 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) {
4539 4539 p1++;
4540 4540 p2++;
4541 4541 cnt--;
4542 4542 }
4543 4543 }
4544 4544 }
4545 4545
4546 4546 /* return SD_SUCCESS if both strings match */
4547 4547 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE);
4548 4548 }
4549 4549
4550 4550
4551 4551 /*
4552 4552 * Function: sd_chk_vers1_data
4553 4553 *
4554 4554 * Description: Verify the version 1 device properties provided by the
4555 4555 * user via the configuration file
4556 4556 *
4557 4557 * Arguments: un - driver soft state (unit) structure
4558 4558 * flags - integer mask indicating properties to be set
4559 4559 * prop_list - integer list of property values
4560 4560 * list_len - number of the elements
4561 4561 *
4562 4562 * Return Code: SD_SUCCESS - Indicates the user provided data is valid
4563 4563 * SD_FAILURE - Indicates the user provided data is invalid
4564 4564 */
4565 4565
4566 4566 static int
4567 4567 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list,
4568 4568 int list_len, char *dataname_ptr)
4569 4569 {
4570 4570 int i;
4571 4571 int mask = 1;
4572 4572 int index = 0;
4573 4573
4574 4574 ASSERT(un != NULL);
4575 4575
4576 4576 /* Check for a NULL property name and list */
4577 4577 if (dataname_ptr == NULL) {
4578 4578 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4579 4579 "sd_chk_vers1_data: NULL data property name.");
4580 4580 return (SD_FAILURE);
4581 4581 }
4582 4582 if (prop_list == NULL) {
4583 4583 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4584 4584 "sd_chk_vers1_data: %s NULL data property list.",
4585 4585 dataname_ptr);
4586 4586 return (SD_FAILURE);
4587 4587 }
4588 4588
4589 4589 /* Display a warning if undefined bits are set in the flags */
4590 4590 if (flags & ~SD_CONF_BIT_MASK) {
4591 4591 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4592 4592 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. "
4593 4593 "Properties not set.",
4594 4594 (flags & ~SD_CONF_BIT_MASK), dataname_ptr);
4595 4595 return (SD_FAILURE);
4596 4596 }
4597 4597
4598 4598 /*
4599 4599 * Verify the length of the list by identifying the highest bit set
4600 4600 * in the flags and validating that the property list has a length
4601 4601 * up to the index of this bit.
4602 4602 */
4603 4603 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) {
4604 4604 if (flags & mask) {
4605 4605 index++;
4606 4606 }
4607 4607 mask = 1 << i;
4608 4608 }
4609 4609 if (list_len < (index + 2)) {
4610 4610 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4611 4611 "sd_chk_vers1_data: "
4612 4612 "Data property list %s size is incorrect. "
4613 4613 "Properties not set.", dataname_ptr);
4614 4614 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: "
4615 4615 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS);
4616 4616 return (SD_FAILURE);
4617 4617 }
4618 4618 return (SD_SUCCESS);
4619 4619 }
4620 4620
4621 4621
4622 4622 /*
4623 4623 * Function: sd_set_vers1_properties
4624 4624 *
4625 4625 * Description: Set version 1 device properties based on a property list
4626 4626 * retrieved from the driver configuration file or static
4627 4627 * configuration table. Version 1 properties have the format:
4628 4628 *
4629 4629 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN>
4630 4630 *
4631 4631 * where the prop0 value will be used to set prop0 if bit0
4632 4632 * is set in the flags
4633 4633 *
4634 4634 * Arguments: un - driver soft state (unit) structure
4635 4635 * flags - integer mask indicating properties to be set
4636 4636 * prop_list - integer list of property values
4637 4637 */
4638 4638
4639 4639 static void
4640 4640 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list)
4641 4641 {
4642 4642 ASSERT(un != NULL);
4643 4643
4644 4644 /*
4645 4645 * Set the flag to indicate cache is to be disabled. An attempt
4646 4646 * to disable the cache via sd_cache_control() will be made
4647 4647 * later during attach once the basic initialization is complete.
4648 4648 */
4649 4649 if (flags & SD_CONF_BSET_NOCACHE) {
4650 4650 un->un_f_opt_disable_cache = TRUE;
4651 4651 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4652 4652 "sd_set_vers1_properties: caching disabled flag set\n");
4653 4653 }
4654 4654
4655 4655 /* CD-specific configuration parameters */
4656 4656 if (flags & SD_CONF_BSET_PLAYMSF_BCD) {
4657 4657 un->un_f_cfg_playmsf_bcd = TRUE;
4658 4658 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4659 4659 "sd_set_vers1_properties: playmsf_bcd set\n");
4660 4660 }
4661 4661 if (flags & SD_CONF_BSET_READSUB_BCD) {
4662 4662 un->un_f_cfg_readsub_bcd = TRUE;
4663 4663 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4664 4664 "sd_set_vers1_properties: readsub_bcd set\n");
4665 4665 }
4666 4666 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) {
4667 4667 un->un_f_cfg_read_toc_trk_bcd = TRUE;
4668 4668 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4669 4669 "sd_set_vers1_properties: read_toc_trk_bcd set\n");
4670 4670 }
4671 4671 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) {
4672 4672 un->un_f_cfg_read_toc_addr_bcd = TRUE;
4673 4673 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4674 4674 "sd_set_vers1_properties: read_toc_addr_bcd set\n");
4675 4675 }
4676 4676 if (flags & SD_CONF_BSET_NO_READ_HEADER) {
4677 4677 un->un_f_cfg_no_read_header = TRUE;
4678 4678 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4679 4679 "sd_set_vers1_properties: no_read_header set\n");
4680 4680 }
4681 4681 if (flags & SD_CONF_BSET_READ_CD_XD4) {
4682 4682 un->un_f_cfg_read_cd_xd4 = TRUE;
4683 4683 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4684 4684 "sd_set_vers1_properties: read_cd_xd4 set\n");
4685 4685 }
4686 4686
4687 4687 /* Support for devices which do not have valid/unique serial numbers */
4688 4688 if (flags & SD_CONF_BSET_FAB_DEVID) {
4689 4689 un->un_f_opt_fab_devid = TRUE;
4690 4690 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4691 4691 "sd_set_vers1_properties: fab_devid bit set\n");
4692 4692 }
4693 4693
4694 4694 /* Support for user throttle configuration */
4695 4695 if (flags & SD_CONF_BSET_THROTTLE) {
4696 4696 ASSERT(prop_list != NULL);
4697 4697 un->un_saved_throttle = un->un_throttle =
4698 4698 prop_list->sdt_throttle;
4699 4699 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4700 4700 "sd_set_vers1_properties: throttle set to %d\n",
4701 4701 prop_list->sdt_throttle);
4702 4702 }
4703 4703
4704 4704 /* Set the per disk retry count according to the conf file or table. */
4705 4705 if (flags & SD_CONF_BSET_NRR_COUNT) {
4706 4706 ASSERT(prop_list != NULL);
4707 4707 if (prop_list->sdt_not_rdy_retries) {
4708 4708 un->un_notready_retry_count =
4709 4709 prop_list->sdt_not_rdy_retries;
4710 4710 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4711 4711 "sd_set_vers1_properties: not ready retry count"
4712 4712 " set to %d\n", un->un_notready_retry_count);
4713 4713 }
4714 4714 }
4715 4715
4716 4716 /* The controller type is reported for generic disk driver ioctls */
4717 4717 if (flags & SD_CONF_BSET_CTYPE) {
4718 4718 ASSERT(prop_list != NULL);
4719 4719 switch (prop_list->sdt_ctype) {
4720 4720 case CTYPE_CDROM:
4721 4721 un->un_ctype = prop_list->sdt_ctype;
4722 4722 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4723 4723 "sd_set_vers1_properties: ctype set to "
4724 4724 "CTYPE_CDROM\n");
4725 4725 break;
4726 4726 case CTYPE_CCS:
4727 4727 un->un_ctype = prop_list->sdt_ctype;
4728 4728 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4729 4729 "sd_set_vers1_properties: ctype set to "
4730 4730 "CTYPE_CCS\n");
4731 4731 break;
4732 4732 case CTYPE_ROD: /* RW optical */
4733 4733 un->un_ctype = prop_list->sdt_ctype;
4734 4734 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4735 4735 "sd_set_vers1_properties: ctype set to "
4736 4736 "CTYPE_ROD\n");
4737 4737 break;
4738 4738 default:
4739 4739 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4740 4740 "sd_set_vers1_properties: Could not set "
4741 4741 "invalid ctype value (%d)",
4742 4742 prop_list->sdt_ctype);
4743 4743 }
4744 4744 }
4745 4745
4746 4746 /* Purple failover timeout */
4747 4747 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) {
4748 4748 ASSERT(prop_list != NULL);
4749 4749 un->un_busy_retry_count =
4750 4750 prop_list->sdt_busy_retries;
4751 4751 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4752 4752 "sd_set_vers1_properties: "
4753 4753 "busy retry count set to %d\n",
4754 4754 un->un_busy_retry_count);
4755 4755 }
4756 4756
4757 4757 /* Purple reset retry count */
4758 4758 if (flags & SD_CONF_BSET_RST_RETRIES) {
4759 4759 ASSERT(prop_list != NULL);
4760 4760 un->un_reset_retry_count =
4761 4761 prop_list->sdt_reset_retries;
4762 4762 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4763 4763 "sd_set_vers1_properties: "
4764 4764 "reset retry count set to %d\n",
4765 4765 un->un_reset_retry_count);
4766 4766 }
4767 4767
4768 4768 /* Purple reservation release timeout */
4769 4769 if (flags & SD_CONF_BSET_RSV_REL_TIME) {
4770 4770 ASSERT(prop_list != NULL);
4771 4771 un->un_reserve_release_time =
4772 4772 prop_list->sdt_reserv_rel_time;
4773 4773 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4774 4774 "sd_set_vers1_properties: "
4775 4775 "reservation release timeout set to %d\n",
4776 4776 un->un_reserve_release_time);
4777 4777 }
4778 4778
4779 4779 /*
4780 4780 * Driver flag telling the driver to verify that no commands are pending
4781 4781 * for a device before issuing a Test Unit Ready. This is a workaround
4782 4782 * for a firmware bug in some Seagate eliteI drives.
4783 4783 */
4784 4784 if (flags & SD_CONF_BSET_TUR_CHECK) {
4785 4785 un->un_f_cfg_tur_check = TRUE;
4786 4786 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4787 4787 "sd_set_vers1_properties: tur queue check set\n");
4788 4788 }
4789 4789
4790 4790 if (flags & SD_CONF_BSET_MIN_THROTTLE) {
4791 4791 un->un_min_throttle = prop_list->sdt_min_throttle;
4792 4792 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4793 4793 "sd_set_vers1_properties: min throttle set to %d\n",
4794 4794 un->un_min_throttle);
4795 4795 }
4796 4796
4797 4797 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) {
4798 4798 un->un_f_disksort_disabled =
4799 4799 (prop_list->sdt_disk_sort_dis != 0) ?
4800 4800 TRUE : FALSE;
4801 4801 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4802 4802 "sd_set_vers1_properties: disksort disabled "
4803 4803 "flag set to %d\n",
4804 4804 prop_list->sdt_disk_sort_dis);
4805 4805 }
4806 4806
4807 4807 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) {
4808 4808 un->un_f_lun_reset_enabled =
4809 4809 (prop_list->sdt_lun_reset_enable != 0) ?
4810 4810 TRUE : FALSE;
4811 4811 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4812 4812 "sd_set_vers1_properties: lun reset enabled "
4813 4813 "flag set to %d\n",
4814 4814 prop_list->sdt_lun_reset_enable);
4815 4815 }
4816 4816
4817 4817 if (flags & SD_CONF_BSET_CACHE_IS_NV) {
4818 4818 un->un_f_suppress_cache_flush =
4819 4819 (prop_list->sdt_suppress_cache_flush != 0) ?
4820 4820 TRUE : FALSE;
4821 4821 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4822 4822 "sd_set_vers1_properties: suppress_cache_flush "
4823 4823 "flag set to %d\n",
4824 4824 prop_list->sdt_suppress_cache_flush);
4825 4825 }
4826 4826
4827 4827 if (flags & SD_CONF_BSET_PC_DISABLED) {
4828 4828 un->un_f_power_condition_disabled =
4829 4829 (prop_list->sdt_power_condition_dis != 0) ?
4830 4830 TRUE : FALSE;
4831 4831 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4832 4832 "sd_set_vers1_properties: power_condition_disabled "
4833 4833 "flag set to %d\n",
4834 4834 prop_list->sdt_power_condition_dis);
4835 4835 }
4836 4836
4837 4837 /*
4838 4838 * Validate the throttle values.
4839 4839 * If any of the numbers are invalid, set everything to defaults.
4840 4840 */
4841 4841 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) ||
4842 4842 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) ||
4843 4843 (un->un_min_throttle > un->un_throttle)) {
4844 4844 un->un_saved_throttle = un->un_throttle = sd_max_throttle;
4845 4845 un->un_min_throttle = sd_min_throttle;
4846 4846 }
4847 4847 }
4848 4848
4849 4849 /*
4850 4850 * Function: sd_is_lsi()
4851 4851 *
4852 4852 * Description: Check for lsi devices, step through the static device
4853 4853 * table to match vid/pid.
4854 4854 *
4855 4855 * Args: un - ptr to sd_lun
4856 4856 *
4857 4857 * Notes: When creating new LSI property, need to add the new LSI property
4858 4858 * to this function.
4859 4859 */
4860 4860 static void
4861 4861 sd_is_lsi(struct sd_lun *un)
4862 4862 {
4863 4863 char *id = NULL;
4864 4864 int table_index;
4865 4865 int idlen;
4866 4866 void *prop;
4867 4867
4868 4868 ASSERT(un != NULL);
4869 4869 for (table_index = 0; table_index < sd_disk_table_size;
4870 4870 table_index++) {
4871 4871 id = sd_disk_table[table_index].device_id;
4872 4872 idlen = strlen(id);
4873 4873 if (idlen == 0) {
4874 4874 continue;
4875 4875 }
4876 4876
4877 4877 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) {
4878 4878 prop = sd_disk_table[table_index].properties;
4879 4879 if (prop == &lsi_properties ||
4880 4880 prop == &lsi_oem_properties ||
4881 4881 prop == &lsi_properties_scsi ||
4882 4882 prop == &symbios_properties) {
4883 4883 un->un_f_cfg_is_lsi = TRUE;
4884 4884 }
4885 4885 break;
4886 4886 }
4887 4887 }
4888 4888 }
4889 4889
4890 4890 /*
4891 4891 * Function: sd_get_physical_geometry
4892 4892 *
4893 4893 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and
4894 4894 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the
4895 4895 * target, and use this information to initialize the physical
4896 4896 * geometry cache specified by pgeom_p.
4897 4897 *
4898 4898 * MODE SENSE is an optional command, so failure in this case
4899 4899 * does not necessarily denote an error. We want to use the
4900 4900 * MODE SENSE commands to derive the physical geometry of the
4901 4901 * device, but if either command fails, the logical geometry is
4902 4902 * used as the fallback for disk label geometry in cmlb.
4903 4903 *
4904 4904 * This requires that un->un_blockcount and un->un_tgt_blocksize
4905 4905 * have already been initialized for the current target and
4906 4906 * that the current values be passed as args so that we don't
4907 4907 * end up ever trying to use -1 as a valid value. This could
4908 4908 * happen if either value is reset while we're not holding
4909 4909 * the mutex.
4910 4910 *
4911 4911 * Arguments: un - driver soft state (unit) structure
4912 4912 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
4913 4913 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
4914 4914 * to use the USCSI "direct" chain and bypass the normal
4915 4915 * command waitq.
4916 4916 *
4917 4917 * Context: Kernel thread only (can sleep).
4918 4918 */
4919 4919
4920 4920 static int
4921 4921 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p,
4922 4922 diskaddr_t capacity, int lbasize, int path_flag)
4923 4923 {
4924 4924 struct mode_format *page3p;
4925 4925 struct mode_geometry *page4p;
4926 4926 struct mode_header *headerp;
4927 4927 int sector_size;
4928 4928 int nsect;
4929 4929 int nhead;
4930 4930 int ncyl;
4931 4931 int intrlv;
4932 4932 int spc;
4933 4933 diskaddr_t modesense_capacity;
4934 4934 int rpm;
4935 4935 int bd_len;
4936 4936 int mode_header_length;
4937 4937 uchar_t *p3bufp;
4938 4938 uchar_t *p4bufp;
4939 4939 int cdbsize;
4940 4940 int ret = EIO;
4941 4941 sd_ssc_t *ssc;
4942 4942 int status;
4943 4943
4944 4944 ASSERT(un != NULL);
4945 4945
4946 4946 if (lbasize == 0) {
4947 4947 if (ISCD(un)) {
4948 4948 lbasize = 2048;
4949 4949 } else {
4950 4950 lbasize = un->un_sys_blocksize;
4951 4951 }
4952 4952 }
4953 4953 pgeom_p->g_secsize = (unsigned short)lbasize;
4954 4954
4955 4955 /*
4956 4956 * If the unit is a cd/dvd drive MODE SENSE page three
4957 4957 * and MODE SENSE page four are reserved (see SBC spec
4958 4958 * and MMC spec). To prevent soft errors just return
4959 4959 * using the default LBA size.
4960 4960 *
4961 4961 * Since SATA MODE SENSE function (sata_txlt_mode_sense()) does not
4962 4962 * implement support for mode pages 3 and 4 return here to prevent
4963 4963 * illegal requests on SATA drives.
4964 4964 *
4965 4965 * These pages are also reserved in SBC-2 and later. We assume SBC-2
4966 4966 * or later for a direct-attached block device if the SCSI version is
4967 4967 * at least SPC-3.
4968 4968 */
4969 4969
4970 4970 if (ISCD(un) ||
4971 4971 un->un_interconnect_type == SD_INTERCONNECT_SATA ||
4972 4972 (un->un_ctype == CTYPE_CCS && SD_INQUIRY(un)->inq_ansi >= 5))
4973 4973 return (ret);
4974 4974
4975 4975 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0;
4976 4976
4977 4977 /*
4978 4978 * Retrieve MODE SENSE page 3 - Format Device Page
4979 4979 */
4980 4980 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP);
4981 4981 ssc = sd_ssc_init(un);
4982 4982 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp,
4983 4983 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag);
4984 4984 if (status != 0) {
4985 4985 SD_ERROR(SD_LOG_COMMON, un,
4986 4986 "sd_get_physical_geometry: mode sense page 3 failed\n");
4987 4987 goto page3_exit;
4988 4988 }
4989 4989
4990 4990 /*
4991 4991 * Determine size of Block Descriptors in order to locate the mode
4992 4992 * page data. ATAPI devices return 0, SCSI devices should return
4993 4993 * MODE_BLK_DESC_LENGTH.
4994 4994 */
4995 4995 headerp = (struct mode_header *)p3bufp;
4996 4996 if (un->un_f_cfg_is_atapi == TRUE) {
4997 4997 struct mode_header_grp2 *mhp =
4998 4998 (struct mode_header_grp2 *)headerp;
4999 4999 mode_header_length = MODE_HEADER_LENGTH_GRP2;
5000 5000 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
5001 5001 } else {
5002 5002 mode_header_length = MODE_HEADER_LENGTH;
5003 5003 bd_len = ((struct mode_header *)headerp)->bdesc_length;
5004 5004 }
5005 5005
5006 5006 if (bd_len > MODE_BLK_DESC_LENGTH) {
5007 5007 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5008 5008 "sd_get_physical_geometry: received unexpected bd_len "
5009 5009 "of %d, page3\n", bd_len);
5010 5010 status = EIO;
5011 5011 goto page3_exit;
5012 5012 }
5013 5013
5014 5014 page3p = (struct mode_format *)
5015 5015 ((caddr_t)headerp + mode_header_length + bd_len);
5016 5016
5017 5017 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) {
5018 5018 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5019 5019 "sd_get_physical_geometry: mode sense pg3 code mismatch "
5020 5020 "%d\n", page3p->mode_page.code);
5021 5021 status = EIO;
5022 5022 goto page3_exit;
5023 5023 }
5024 5024
5025 5025 /*
5026 5026 * Use this physical geometry data only if BOTH MODE SENSE commands
5027 5027 * complete successfully; otherwise, revert to the logical geometry.
5028 5028 * So, we need to save everything in temporary variables.
5029 5029 */
5030 5030 sector_size = BE_16(page3p->data_bytes_sect);
5031 5031
5032 5032 /*
5033 5033 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size
5034 5034 */
5035 5035 if (sector_size == 0) {
5036 5036 sector_size = un->un_sys_blocksize;
5037 5037 } else {
5038 5038 sector_size &= ~(un->un_sys_blocksize - 1);
5039 5039 }
5040 5040
5041 5041 nsect = BE_16(page3p->sect_track);
5042 5042 intrlv = BE_16(page3p->interleave);
5043 5043
5044 5044 SD_INFO(SD_LOG_COMMON, un,
5045 5045 "sd_get_physical_geometry: Format Parameters (page 3)\n");
5046 5046 SD_INFO(SD_LOG_COMMON, un,
5047 5047 " mode page: %d; nsect: %d; sector size: %d;\n",
5048 5048 page3p->mode_page.code, nsect, sector_size);
5049 5049 SD_INFO(SD_LOG_COMMON, un,
5050 5050 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv,
5051 5051 BE_16(page3p->track_skew),
5052 5052 BE_16(page3p->cylinder_skew));
5053 5053
5054 5054 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
5055 5055
5056 5056 /*
5057 5057 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page
5058 5058 */
5059 5059 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP);
5060 5060 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp,
5061 5061 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag);
5062 5062 if (status != 0) {
5063 5063 SD_ERROR(SD_LOG_COMMON, un,
5064 5064 "sd_get_physical_geometry: mode sense page 4 failed\n");
5065 5065 goto page4_exit;
5066 5066 }
5067 5067
5068 5068 /*
5069 5069 * Determine size of Block Descriptors in order to locate the mode
5070 5070 * page data. ATAPI devices return 0, SCSI devices should return
5071 5071 * MODE_BLK_DESC_LENGTH.
5072 5072 */
5073 5073 headerp = (struct mode_header *)p4bufp;
5074 5074 if (un->un_f_cfg_is_atapi == TRUE) {
5075 5075 struct mode_header_grp2 *mhp =
5076 5076 (struct mode_header_grp2 *)headerp;
5077 5077 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
5078 5078 } else {
5079 5079 bd_len = ((struct mode_header *)headerp)->bdesc_length;
5080 5080 }
5081 5081
5082 5082 if (bd_len > MODE_BLK_DESC_LENGTH) {
5083 5083 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5084 5084 "sd_get_physical_geometry: received unexpected bd_len of "
5085 5085 "%d, page4\n", bd_len);
5086 5086 status = EIO;
5087 5087 goto page4_exit;
5088 5088 }
5089 5089
5090 5090 page4p = (struct mode_geometry *)
5091 5091 ((caddr_t)headerp + mode_header_length + bd_len);
5092 5092
5093 5093 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) {
5094 5094 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5095 5095 "sd_get_physical_geometry: mode sense pg4 code mismatch "
5096 5096 "%d\n", page4p->mode_page.code);
5097 5097 status = EIO;
5098 5098 goto page4_exit;
5099 5099 }
5100 5100
5101 5101 /*
5102 5102 * Stash the data now, after we know that both commands completed.
5103 5103 */
5104 5104
5105 5105
5106 5106 nhead = (int)page4p->heads; /* uchar, so no conversion needed */
5107 5107 spc = nhead * nsect;
5108 5108 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb;
5109 5109 rpm = BE_16(page4p->rpm);
5110 5110
5111 5111 modesense_capacity = spc * ncyl;
5112 5112
5113 5113 SD_INFO(SD_LOG_COMMON, un,
5114 5114 "sd_get_physical_geometry: Geometry Parameters (page 4)\n");
5115 5115 SD_INFO(SD_LOG_COMMON, un,
5116 5116 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm);
5117 5117 SD_INFO(SD_LOG_COMMON, un,
5118 5118 " computed capacity(h*s*c): %d;\n", modesense_capacity);
5119 5119 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n",
5120 5120 (void *)pgeom_p, capacity);
5121 5121
5122 5122 /*
5123 5123 * Compensate if the drive's geometry is not rectangular, i.e.,
5124 5124 * the product of C * H * S returned by MODE SENSE >= that returned
5125 5125 * by read capacity. This is an idiosyncrasy of the original x86
5126 5126 * disk subsystem.
5127 5127 */
5128 5128 if (modesense_capacity >= capacity) {
5129 5129 SD_INFO(SD_LOG_COMMON, un,
5130 5130 "sd_get_physical_geometry: adjusting acyl; "
5131 5131 "old: %d; new: %d\n", pgeom_p->g_acyl,
5132 5132 (modesense_capacity - capacity + spc - 1) / spc);
5133 5133 if (sector_size != 0) {
5134 5134 /* 1243403: NEC D38x7 drives don't support sec size */
5135 5135 pgeom_p->g_secsize = (unsigned short)sector_size;
5136 5136 }
5137 5137 pgeom_p->g_nsect = (unsigned short)nsect;
5138 5138 pgeom_p->g_nhead = (unsigned short)nhead;
5139 5139 pgeom_p->g_capacity = capacity;
5140 5140 pgeom_p->g_acyl =
5141 5141 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc;
5142 5142 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl;
5143 5143 }
5144 5144
5145 5145 pgeom_p->g_rpm = (unsigned short)rpm;
5146 5146 pgeom_p->g_intrlv = (unsigned short)intrlv;
5147 5147 ret = 0;
5148 5148
5149 5149 SD_INFO(SD_LOG_COMMON, un,
5150 5150 "sd_get_physical_geometry: mode sense geometry:\n");
5151 5151 SD_INFO(SD_LOG_COMMON, un,
5152 5152 " nsect: %d; sector size: %d; interlv: %d\n",
5153 5153 nsect, sector_size, intrlv);
5154 5154 SD_INFO(SD_LOG_COMMON, un,
5155 5155 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n",
5156 5156 nhead, ncyl, rpm, modesense_capacity);
5157 5157 SD_INFO(SD_LOG_COMMON, un,
5158 5158 "sd_get_physical_geometry: (cached)\n");
5159 5159 SD_INFO(SD_LOG_COMMON, un,
5160 5160 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n",
5161 5161 pgeom_p->g_ncyl, pgeom_p->g_acyl,
5162 5162 pgeom_p->g_nhead, pgeom_p->g_nsect);
5163 5163 SD_INFO(SD_LOG_COMMON, un,
5164 5164 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n",
5165 5165 pgeom_p->g_secsize, pgeom_p->g_capacity,
5166 5166 pgeom_p->g_intrlv, pgeom_p->g_rpm);
5167 5167 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
5168 5168
5169 5169 page4_exit:
5170 5170 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH);
5171 5171
5172 5172 page3_exit:
5173 5173 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH);
5174 5174
5175 5175 if (status != 0) {
5176 5176 if (status == EIO) {
5177 5177 /*
5178 5178 * Some disks do not support mode sense(6), we
5179 5179 * should ignore this kind of error(sense key is
5180 5180 * 0x5 - illegal request).
5181 5181 */
5182 5182 uint8_t *sensep;
5183 5183 int senlen;
5184 5184
5185 5185 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
5186 5186 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
5187 5187 ssc->ssc_uscsi_cmd->uscsi_rqresid);
5188 5188
5189 5189 if (senlen > 0 &&
5190 5190 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) {
5191 5191 sd_ssc_assessment(ssc,
5192 5192 SD_FMT_IGNORE_COMPROMISE);
5193 5193 } else {
5194 5194 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
5195 5195 }
5196 5196 } else {
5197 5197 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5198 5198 }
5199 5199 }
5200 5200 sd_ssc_fini(ssc);
5201 5201 return (ret);
5202 5202 }
5203 5203
5204 5204 /*
5205 5205 * Function: sd_get_virtual_geometry
5206 5206 *
5207 5207 * Description: Ask the controller to tell us about the target device.
5208 5208 *
5209 5209 * Arguments: un - pointer to softstate
5210 5210 * capacity - disk capacity in #blocks
5211 5211 * lbasize - disk block size in bytes
5212 5212 *
5213 5213 * Context: Kernel thread only
5214 5214 */
5215 5215
5216 5216 static int
5217 5217 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p,
5218 5218 diskaddr_t capacity, int lbasize)
5219 5219 {
5220 5220 uint_t geombuf;
5221 5221 int spc;
5222 5222
5223 5223 ASSERT(un != NULL);
5224 5224
5225 5225 /* Set sector size, and total number of sectors */
5226 5226 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1);
5227 5227 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1);
5228 5228
5229 5229 /* Let the HBA tell us its geometry */
5230 5230 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1);
5231 5231
5232 5232 /* A value of -1 indicates an undefined "geometry" property */
5233 5233 if (geombuf == (-1)) {
5234 5234 return (EINVAL);
5235 5235 }
5236 5236
5237 5237 /* Initialize the logical geometry cache. */
5238 5238 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff;
5239 5239 lgeom_p->g_nsect = geombuf & 0xffff;
5240 5240 lgeom_p->g_secsize = un->un_sys_blocksize;
5241 5241
5242 5242 spc = lgeom_p->g_nhead * lgeom_p->g_nsect;
5243 5243
5244 5244 /*
5245 5245 * Note: The driver originally converted the capacity value from
5246 5246 * target blocks to system blocks. However, the capacity value passed
5247 5247 * to this routine is already in terms of system blocks (this scaling
5248 5248 * is done when the READ CAPACITY command is issued and processed).
5249 5249 * This 'error' may have gone undetected because the usage of g_ncyl
5250 5250 * (which is based upon g_capacity) is very limited within the driver
5251 5251 */
5252 5252 lgeom_p->g_capacity = capacity;
5253 5253
5254 5254 /*
5255 5255 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The
5256 5256 * hba may return zero values if the device has been removed.
5257 5257 */
5258 5258 if (spc == 0) {
5259 5259 lgeom_p->g_ncyl = 0;
5260 5260 } else {
5261 5261 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc;
5262 5262 }
5263 5263 lgeom_p->g_acyl = 0;
5264 5264
5265 5265 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n");
5266 5266 return (0);
5267 5267
5268 5268 }
5269 5269 /*
5270 5270 * Function: sd_update_block_info
5271 5271 *
5272 5272 * Description: Calculate a byte count to sector count bitshift value
5273 5273 * from sector size.
5274 5274 *
5275 5275 * Arguments: un: unit struct.
5276 5276 * lbasize: new target sector size
5277 5277 * capacity: new target capacity, ie. block count
5278 5278 *
5279 5279 * Context: Kernel thread context
5280 5280 */
5281 5281
5282 5282 static void
5283 5283 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity)
5284 5284 {
5285 5285 if (lbasize != 0) {
5286 5286 un->un_tgt_blocksize = lbasize;
5287 5287 un->un_f_tgt_blocksize_is_valid = TRUE;
5288 5288 if (!un->un_f_has_removable_media) {
5289 5289 un->un_sys_blocksize = lbasize;
5290 5290 }
5291 5291 }
5292 5292
5293 5293 if (capacity != 0) {
5294 5294 un->un_blockcount = capacity;
5295 5295 un->un_f_blockcount_is_valid = TRUE;
5296 5296
5297 5297 /*
5298 5298 * The capacity has changed so update the errstats.
5299 5299 */
5300 5300 if (un->un_errstats != NULL) {
5301 5301 struct sd_errstats *stp;
5302 5302
5303 5303 capacity *= un->un_sys_blocksize;
5304 5304 stp = (struct sd_errstats *)un->un_errstats->ks_data;
5305 5305 if (stp->sd_capacity.value.ui64 < capacity)
5306 5306 stp->sd_capacity.value.ui64 = capacity;
5307 5307 }
5308 5308 }
5309 5309 }
5310 5310
5311 5311
5312 5312 /*
5313 5313 * Function: sd_register_devid
5314 5314 *
5315 5315 * Description: This routine will obtain the device id information from the
5316 5316 * target, obtain the serial number, and register the device
5317 5317 * id with the ddi framework.
5318 5318 *
5319 5319 * Arguments: devi - the system's dev_info_t for the device.
5320 5320 * un - driver soft state (unit) structure
5321 5321 * reservation_flag - indicates if a reservation conflict
5322 5322 * occurred during attach
5323 5323 *
5324 5324 * Context: Kernel Thread
5325 5325 */
5326 5326 static void
5327 5327 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag)
5328 5328 {
5329 5329 int rval = 0;
5330 5330 uchar_t *inq80 = NULL;
5331 5331 size_t inq80_len = MAX_INQUIRY_SIZE;
5332 5332 size_t inq80_resid = 0;
5333 5333 uchar_t *inq83 = NULL;
5334 5334 size_t inq83_len = MAX_INQUIRY_SIZE;
5335 5335 size_t inq83_resid = 0;
5336 5336 int dlen, len;
5337 5337 char *sn;
5338 5338 struct sd_lun *un;
5339 5339
5340 5340 ASSERT(ssc != NULL);
5341 5341 un = ssc->ssc_un;
5342 5342 ASSERT(un != NULL);
5343 5343 ASSERT(mutex_owned(SD_MUTEX(un)));
5344 5344 ASSERT((SD_DEVINFO(un)) == devi);
5345 5345
5346 5346
5347 5347 /*
5348 5348 * We check the availability of the World Wide Name (0x83) and Unit
5349 5349 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using
5350 5350 * un_vpd_page_mask from them, we decide which way to get the WWN. If
5351 5351 * 0x83 is available, that is the best choice. Our next choice is
5352 5352 * 0x80. If neither are available, we munge the devid from the device
5353 5353 * vid/pid/serial # for Sun qualified disks, or use the ddi framework
5354 5354 * to fabricate a devid for non-Sun qualified disks.
5355 5355 */
5356 5356 if (sd_check_vpd_page_support(ssc) == 0) {
5357 5357 /* collect page 80 data if available */
5358 5358 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) {
5359 5359
5360 5360 mutex_exit(SD_MUTEX(un));
5361 5361 inq80 = kmem_zalloc(inq80_len, KM_SLEEP);
5362 5362
5363 5363 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len,
5364 5364 0x01, 0x80, &inq80_resid);
5365 5365
5366 5366 if (rval != 0) {
5367 5367 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5368 5368 kmem_free(inq80, inq80_len);
5369 5369 inq80 = NULL;
5370 5370 inq80_len = 0;
5371 5371 } else if (ddi_prop_exists(
5372 5372 DDI_DEV_T_NONE, SD_DEVINFO(un),
5373 5373 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
5374 5374 INQUIRY_SERIAL_NO) == 0) {
5375 5375 /*
5376 5376 * If we don't already have a serial number
5377 5377 * property, do quick verify of data returned
5378 5378 * and define property.
5379 5379 */
5380 5380 dlen = inq80_len - inq80_resid;
5381 5381 len = (size_t)inq80[3];
5382 5382 if ((dlen >= 4) && ((len + 4) <= dlen)) {
5383 5383 /*
5384 5384 * Ensure sn termination, skip leading
5385 5385 * blanks, and create property
5386 5386 * 'inquiry-serial-no'.
5387 5387 */
5388 5388 sn = (char *)&inq80[4];
5389 5389 sn[len] = 0;
5390 5390 while (*sn && (*sn == ' '))
5391 5391 sn++;
5392 5392 if (*sn) {
5393 5393 (void) ddi_prop_update_string(
5394 5394 DDI_DEV_T_NONE,
5395 5395 SD_DEVINFO(un),
5396 5396 INQUIRY_SERIAL_NO, sn);
5397 5397 }
5398 5398 }
5399 5399 }
5400 5400 mutex_enter(SD_MUTEX(un));
5401 5401 }
5402 5402
5403 5403 /* collect page 83 data if available */
5404 5404 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) {
5405 5405 mutex_exit(SD_MUTEX(un));
5406 5406 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
5407 5407
5408 5408 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len,
5409 5409 0x01, 0x83, &inq83_resid);
5410 5410
5411 5411 if (rval != 0) {
5412 5412 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5413 5413 kmem_free(inq83, inq83_len);
5414 5414 inq83 = NULL;
5415 5415 inq83_len = 0;
5416 5416 }
5417 5417 mutex_enter(SD_MUTEX(un));
5418 5418 }
5419 5419 }
5420 5420
5421 5421 /*
5422 5422 * If transport has already registered a devid for this target
5423 5423 * then that takes precedence over the driver's determination
5424 5424 * of the devid.
5425 5425 *
5426 5426 * NOTE: The reason this check is done here instead of at the beginning
5427 5427 * of the function is to allow the code above to create the
5428 5428 * 'inquiry-serial-no' property.
5429 5429 */
5430 5430 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) {
5431 5431 ASSERT(un->un_devid);
5432 5432 un->un_f_devid_transport_defined = TRUE;
5433 5433 goto cleanup; /* use devid registered by the transport */
5434 5434 }
5435 5435
5436 5436 /*
5437 5437 * This is the case of antiquated Sun disk drives that have the
5438 5438 * FAB_DEVID property set in the disk_table. These drives
5439 5439 * manage the devid's by storing them in last 2 available sectors
5440 5440 * on the drive and have them fabricated by the ddi layer by calling
5441 5441 * ddi_devid_init and passing the DEVID_FAB flag.
5442 5442 */
5443 5443 if (un->un_f_opt_fab_devid == TRUE) {
5444 5444 /*
5445 5445 * Depending on EINVAL isn't reliable, since a reserved disk
5446 5446 * may result in invalid geometry, so check to make sure a
5447 5447 * reservation conflict did not occur during attach.
5448 5448 */
5449 5449 if ((sd_get_devid(ssc) == EINVAL) &&
5450 5450 (reservation_flag != SD_TARGET_IS_RESERVED)) {
5451 5451 /*
5452 5452 * The devid is invalid AND there is no reservation
5453 5453 * conflict. Fabricate a new devid.
5454 5454 */
5455 5455 (void) sd_create_devid(ssc);
5456 5456 }
5457 5457
5458 5458 /* Register the devid if it exists */
5459 5459 if (un->un_devid != NULL) {
5460 5460 (void) ddi_devid_register(SD_DEVINFO(un),
5461 5461 un->un_devid);
5462 5462 SD_INFO(SD_LOG_ATTACH_DETACH, un,
5463 5463 "sd_register_devid: Devid Fabricated\n");
5464 5464 }
5465 5465 goto cleanup;
5466 5466 }
5467 5467
5468 5468 /* encode best devid possible based on data available */
5469 5469 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST,
5470 5470 (char *)ddi_driver_name(SD_DEVINFO(un)),
5471 5471 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)),
5472 5472 inq80, inq80_len - inq80_resid, inq83, inq83_len -
5473 5473 inq83_resid, &un->un_devid) == DDI_SUCCESS) {
5474 5474
5475 5475 /* devid successfully encoded, register devid */
5476 5476 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid);
5477 5477
5478 5478 } else {
5479 5479 /*
5480 5480 * Unable to encode a devid based on data available.
5481 5481 * This is not a Sun qualified disk. Older Sun disk
5482 5482 * drives that have the SD_FAB_DEVID property
5483 5483 * set in the disk_table and non Sun qualified
5484 5484 * disks are treated in the same manner. These
5485 5485 * drives manage the devid's by storing them in
5486 5486 * last 2 available sectors on the drive and
5487 5487 * have them fabricated by the ddi layer by
5488 5488 * calling ddi_devid_init and passing the
5489 5489 * DEVID_FAB flag.
5490 5490 * Create a fabricate devid only if there's no
5491 5491 * fabricate devid existed.
5492 5492 */
5493 5493 if (sd_get_devid(ssc) == EINVAL) {
5494 5494 (void) sd_create_devid(ssc);
5495 5495 }
5496 5496 un->un_f_opt_fab_devid = TRUE;
5497 5497
5498 5498 /* Register the devid if it exists */
5499 5499 if (un->un_devid != NULL) {
5500 5500 (void) ddi_devid_register(SD_DEVINFO(un),
5501 5501 un->un_devid);
5502 5502 SD_INFO(SD_LOG_ATTACH_DETACH, un,
5503 5503 "sd_register_devid: devid fabricated using "
5504 5504 "ddi framework\n");
5505 5505 }
5506 5506 }
5507 5507
5508 5508 cleanup:
5509 5509 /* clean up resources */
5510 5510 if (inq80 != NULL) {
5511 5511 kmem_free(inq80, inq80_len);
5512 5512 }
5513 5513 if (inq83 != NULL) {
5514 5514 kmem_free(inq83, inq83_len);
5515 5515 }
5516 5516 }
5517 5517
5518 5518
5519 5519
5520 5520 /*
5521 5521 * Function: sd_get_devid
5522 5522 *
5523 5523 * Description: This routine will return 0 if a valid device id has been
5524 5524 * obtained from the target and stored in the soft state. If a
5525 5525 * valid device id has not been previously read and stored, a
5526 5526 * read attempt will be made.
5527 5527 *
5528 5528 * Arguments: un - driver soft state (unit) structure
5529 5529 *
5530 5530 * Return Code: 0 if we successfully get the device id
5531 5531 *
5532 5532 * Context: Kernel Thread
5533 5533 */
5534 5534
5535 5535 static int
5536 5536 sd_get_devid(sd_ssc_t *ssc)
5537 5537 {
5538 5538 struct dk_devid *dkdevid;
5539 5539 ddi_devid_t tmpid;
5540 5540 uint_t *ip;
5541 5541 size_t sz;
5542 5542 diskaddr_t blk;
5543 5543 int status;
5544 5544 int chksum;
5545 5545 int i;
5546 5546 size_t buffer_size;
5547 5547 struct sd_lun *un;
5548 5548
5549 5549 ASSERT(ssc != NULL);
5550 5550 un = ssc->ssc_un;
5551 5551 ASSERT(un != NULL);
5552 5552 ASSERT(mutex_owned(SD_MUTEX(un)));
5553 5553
5554 5554 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n",
5555 5555 un);
5556 5556
5557 5557 if (un->un_devid != NULL) {
5558 5558 return (0);
5559 5559 }
5560 5560
5561 5561 mutex_exit(SD_MUTEX(un));
5562 5562 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk,
5563 5563 (void *)SD_PATH_DIRECT) != 0) {
5564 5564 mutex_enter(SD_MUTEX(un));
5565 5565 return (EINVAL);
5566 5566 }
5567 5567
5568 5568 /*
5569 5569 * Read and verify device id, stored in the reserved cylinders at the
5570 5570 * end of the disk. Backup label is on the odd sectors of the last
5571 5571 * track of the last cylinder. Device id will be on track of the next
5572 5572 * to last cylinder.
5573 5573 */
5574 5574 mutex_enter(SD_MUTEX(un));
5575 5575 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid));
5576 5576 mutex_exit(SD_MUTEX(un));
5577 5577 dkdevid = kmem_alloc(buffer_size, KM_SLEEP);
5578 5578 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk,
5579 5579 SD_PATH_DIRECT);
5580 5580
5581 5581 if (status != 0) {
5582 5582 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5583 5583 goto error;
5584 5584 }
5585 5585
5586 5586 /* Validate the revision */
5587 5587 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) ||
5588 5588 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) {
5589 5589 status = EINVAL;
5590 5590 goto error;
5591 5591 }
5592 5592
5593 5593 /* Calculate the checksum */
5594 5594 chksum = 0;
5595 5595 ip = (uint_t *)dkdevid;
5596 5596 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int));
5597 5597 i++) {
5598 5598 chksum ^= ip[i];
5599 5599 }
5600 5600
5601 5601 /* Compare the checksums */
5602 5602 if (DKD_GETCHKSUM(dkdevid) != chksum) {
5603 5603 status = EINVAL;
5604 5604 goto error;
5605 5605 }
5606 5606
5607 5607 /* Validate the device id */
5608 5608 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) {
5609 5609 status = EINVAL;
5610 5610 goto error;
5611 5611 }
5612 5612
5613 5613 /*
5614 5614 * Store the device id in the driver soft state
5615 5615 */
5616 5616 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid);
5617 5617 tmpid = kmem_alloc(sz, KM_SLEEP);
5618 5618
5619 5619 mutex_enter(SD_MUTEX(un));
5620 5620
5621 5621 un->un_devid = tmpid;
5622 5622 bcopy(&dkdevid->dkd_devid, un->un_devid, sz);
5623 5623
5624 5624 kmem_free(dkdevid, buffer_size);
5625 5625
5626 5626 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un);
5627 5627
5628 5628 return (status);
5629 5629 error:
5630 5630 mutex_enter(SD_MUTEX(un));
5631 5631 kmem_free(dkdevid, buffer_size);
5632 5632 return (status);
5633 5633 }
5634 5634
5635 5635
5636 5636 /*
5637 5637 * Function: sd_create_devid
5638 5638 *
5639 5639 * Description: This routine will fabricate the device id and write it
5640 5640 * to the disk.
5641 5641 *
5642 5642 * Arguments: un - driver soft state (unit) structure
5643 5643 *
5644 5644 * Return Code: value of the fabricated device id
5645 5645 *
5646 5646 * Context: Kernel Thread
5647 5647 */
5648 5648
5649 5649 static ddi_devid_t
5650 5650 sd_create_devid(sd_ssc_t *ssc)
5651 5651 {
5652 5652 struct sd_lun *un;
5653 5653
5654 5654 ASSERT(ssc != NULL);
5655 5655 un = ssc->ssc_un;
5656 5656 ASSERT(un != NULL);
5657 5657
5658 5658 /* Fabricate the devid */
5659 5659 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid)
5660 5660 == DDI_FAILURE) {
5661 5661 return (NULL);
5662 5662 }
5663 5663
5664 5664 /* Write the devid to disk */
5665 5665 if (sd_write_deviceid(ssc) != 0) {
5666 5666 ddi_devid_free(un->un_devid);
5667 5667 un->un_devid = NULL;
5668 5668 }
5669 5669
5670 5670 return (un->un_devid);
5671 5671 }
5672 5672
5673 5673
5674 5674 /*
5675 5675 * Function: sd_write_deviceid
5676 5676 *
5677 5677 * Description: This routine will write the device id to the disk
5678 5678 * reserved sector.
5679 5679 *
5680 5680 * Arguments: un - driver soft state (unit) structure
5681 5681 *
5682 5682 * Return Code: EINVAL
5683 5683 * value returned by sd_send_scsi_cmd
5684 5684 *
5685 5685 * Context: Kernel Thread
5686 5686 */
5687 5687
5688 5688 static int
5689 5689 sd_write_deviceid(sd_ssc_t *ssc)
5690 5690 {
5691 5691 struct dk_devid *dkdevid;
5692 5692 uchar_t *buf;
5693 5693 diskaddr_t blk;
5694 5694 uint_t *ip, chksum;
5695 5695 int status;
5696 5696 int i;
5697 5697 struct sd_lun *un;
5698 5698
5699 5699 ASSERT(ssc != NULL);
5700 5700 un = ssc->ssc_un;
5701 5701 ASSERT(un != NULL);
5702 5702 ASSERT(mutex_owned(SD_MUTEX(un)));
5703 5703
5704 5704 mutex_exit(SD_MUTEX(un));
5705 5705 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk,
5706 5706 (void *)SD_PATH_DIRECT) != 0) {
5707 5707 mutex_enter(SD_MUTEX(un));
5708 5708 return (-1);
5709 5709 }
5710 5710
5711 5711
5712 5712 /* Allocate the buffer */
5713 5713 buf = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP);
5714 5714 dkdevid = (struct dk_devid *)buf;
5715 5715
5716 5716 /* Fill in the revision */
5717 5717 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB;
5718 5718 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB;
5719 5719
5720 5720 /* Copy in the device id */
5721 5721 mutex_enter(SD_MUTEX(un));
5722 5722 bcopy(un->un_devid, &dkdevid->dkd_devid,
5723 5723 ddi_devid_sizeof(un->un_devid));
5724 5724 mutex_exit(SD_MUTEX(un));
5725 5725
5726 5726 /* Calculate the checksum */
5727 5727 chksum = 0;
5728 5728 ip = (uint_t *)dkdevid;
5729 5729 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int));
5730 5730 i++) {
5731 5731 chksum ^= ip[i];
5732 5732 }
5733 5733
5734 5734 /* Fill-in checksum */
5735 5735 DKD_FORMCHKSUM(chksum, dkdevid);
5736 5736
5737 5737 /* Write the reserved sector */
5738 5738 status = sd_send_scsi_WRITE(ssc, buf, un->un_sys_blocksize, blk,
5739 5739 SD_PATH_DIRECT);
5740 5740 if (status != 0)
5741 5741 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5742 5742
5743 5743 kmem_free(buf, un->un_sys_blocksize);
5744 5744
5745 5745 mutex_enter(SD_MUTEX(un));
5746 5746 return (status);
5747 5747 }
5748 5748
5749 5749
5750 5750 /*
5751 5751 * Function: sd_check_vpd_page_support
5752 5752 *
5753 5753 * Description: This routine sends an inquiry command with the EVPD bit set and
5754 5754 * a page code of 0x00 to the device. It is used to determine which
5755 5755 * vital product pages are available to find the devid. We are
5756 5756 * looking for pages 0x83 0x80 or 0xB1. If we return a negative 1,
5757 5757 * the device does not support that command.
5758 5758 *
5759 5759 * Arguments: un - driver soft state (unit) structure
5760 5760 *
5761 5761 * Return Code: 0 - success
5762 5762 * 1 - check condition
5763 5763 *
5764 5764 * Context: This routine can sleep.
5765 5765 */
5766 5766
5767 5767 static int
5768 5768 sd_check_vpd_page_support(sd_ssc_t *ssc)
5769 5769 {
5770 5770 uchar_t *page_list = NULL;
5771 5771 uchar_t page_length = 0xff; /* Use max possible length */
5772 5772 uchar_t evpd = 0x01; /* Set the EVPD bit */
5773 5773 uchar_t page_code = 0x00; /* Supported VPD Pages */
5774 5774 int rval = 0;
5775 5775 int counter;
5776 5776 struct sd_lun *un;
5777 5777
5778 5778 ASSERT(ssc != NULL);
5779 5779 un = ssc->ssc_un;
5780 5780 ASSERT(un != NULL);
5781 5781 ASSERT(mutex_owned(SD_MUTEX(un)));
5782 5782
5783 5783 mutex_exit(SD_MUTEX(un));
5784 5784
5785 5785 /*
5786 5786 * We'll set the page length to the maximum to save figuring it out
5787 5787 * with an additional call.
5788 5788 */
5789 5789 page_list = kmem_zalloc(page_length, KM_SLEEP);
5790 5790
5791 5791 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd,
5792 5792 page_code, NULL);
5793 5793
5794 5794 if (rval != 0)
5795 5795 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5796 5796
5797 5797 mutex_enter(SD_MUTEX(un));
5798 5798
5799 5799 /*
5800 5800 * Now we must validate that the device accepted the command, as some
5801 5801 * drives do not support it. If the drive does support it, we will
5802 5802 * return 0, and the supported pages will be in un_vpd_page_mask. If
5803 5803 * not, we return -1.
5804 5804 */
5805 5805 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) {
5806 5806 /* Loop to find one of the 2 pages we need */
5807 5807 counter = 4; /* Supported pages start at byte 4, with 0x00 */
5808 5808
5809 5809 /*
5810 5810 * Pages are returned in ascending order, and 0x83 is what we
5811 5811 * are hoping for.
5812 5812 */
5813 5813 while ((page_list[counter] <= 0xB1) &&
5814 5814 (counter <= (page_list[VPD_PAGE_LENGTH] +
5815 5815 VPD_HEAD_OFFSET))) {
5816 5816 /*
5817 5817 * Add 3 because page_list[3] is the number of
5818 5818 * pages minus 3
5819 5819 */
5820 5820
5821 5821 switch (page_list[counter]) {
5822 5822 case 0x00:
5823 5823 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG;
5824 5824 break;
5825 5825 case 0x80:
5826 5826 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG;
5827 5827 break;
5828 5828 case 0x81:
5829 5829 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG;
5830 5830 break;
5831 5831 case 0x82:
5832 5832 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG;
5833 5833 break;
5834 5834 case 0x83:
5835 5835 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG;
5836 5836 break;
5837 5837 case 0x86:
5838 5838 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG;
5839 5839 break;
5840 5840 case 0xB1:
5841 5841 un->un_vpd_page_mask |= SD_VPD_DEV_CHARACTER_PG;
5842 5842 break;
5843 5843 }
5844 5844 counter++;
5845 5845 }
5846 5846
5847 5847 } else {
5848 5848 rval = -1;
5849 5849
5850 5850 SD_INFO(SD_LOG_ATTACH_DETACH, un,
5851 5851 "sd_check_vpd_page_support: This drive does not implement "
5852 5852 "VPD pages.\n");
5853 5853 }
5854 5854
5855 5855 kmem_free(page_list, page_length);
5856 5856
5857 5857 return (rval);
5858 5858 }
5859 5859
5860 5860
5861 5861 /*
5862 5862 * Function: sd_setup_pm
5863 5863 *
5864 5864 * Description: Initialize Power Management on the device
5865 5865 *
5866 5866 * Context: Kernel Thread
5867 5867 */
5868 5868
5869 5869 static void
5870 5870 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi)
5871 5871 {
5872 5872 uint_t log_page_size;
5873 5873 uchar_t *log_page_data;
5874 5874 int rval = 0;
5875 5875 struct sd_lun *un;
5876 5876
5877 5877 ASSERT(ssc != NULL);
5878 5878 un = ssc->ssc_un;
5879 5879 ASSERT(un != NULL);
5880 5880
5881 5881 /*
5882 5882 * Since we are called from attach, holding a mutex for
5883 5883 * un is unnecessary. Because some of the routines called
5884 5884 * from here require SD_MUTEX to not be held, assert this
5885 5885 * right up front.
5886 5886 */
5887 5887 ASSERT(!mutex_owned(SD_MUTEX(un)));
5888 5888 /*
5889 5889 * Since the sd device does not have the 'reg' property,
5890 5890 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries.
5891 5891 * The following code is to tell cpr that this device
5892 5892 * DOES need to be suspended and resumed.
5893 5893 */
5894 5894 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi,
5895 5895 "pm-hardware-state", "needs-suspend-resume");
5896 5896
5897 5897 /*
5898 5898 * This complies with the new power management framework
5899 5899 * for certain desktop machines. Create the pm_components
5900 5900 * property as a string array property.
5901 5901 * If un_f_pm_supported is TRUE, that means the disk
5902 5902 * attached HBA has set the "pm-capable" property and
5903 5903 * the value of this property is bigger than 0.
5904 5904 */
5905 5905 if (un->un_f_pm_supported) {
5906 5906 /*
5907 5907 * not all devices have a motor, try it first.
5908 5908 * some devices may return ILLEGAL REQUEST, some
5909 5909 * will hang
5910 5910 * The following START_STOP_UNIT is used to check if target
5911 5911 * device has a motor.
5912 5912 */
5913 5913 un->un_f_start_stop_supported = TRUE;
5914 5914
5915 5915 if (un->un_f_power_condition_supported) {
5916 5916 rval = sd_send_scsi_START_STOP_UNIT(ssc,
5917 5917 SD_POWER_CONDITION, SD_TARGET_ACTIVE,
5918 5918 SD_PATH_DIRECT);
5919 5919 if (rval != 0) {
5920 5920 un->un_f_power_condition_supported = FALSE;
5921 5921 }
5922 5922 }
5923 5923 if (!un->un_f_power_condition_supported) {
5924 5924 rval = sd_send_scsi_START_STOP_UNIT(ssc,
5925 5925 SD_START_STOP, SD_TARGET_START, SD_PATH_DIRECT);
5926 5926 }
5927 5927 if (rval != 0) {
5928 5928 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5929 5929 un->un_f_start_stop_supported = FALSE;
5930 5930 }
5931 5931
5932 5932 /*
5933 5933 * create pm properties anyways otherwise the parent can't
5934 5934 * go to sleep
5935 5935 */
5936 5936 un->un_f_pm_is_enabled = TRUE;
5937 5937 (void) sd_create_pm_components(devi, un);
5938 5938
5939 5939 /*
5940 5940 * If it claims that log sense is supported, check it out.
5941 5941 */
5942 5942 if (un->un_f_log_sense_supported) {
5943 5943 rval = sd_log_page_supported(ssc,
5944 5944 START_STOP_CYCLE_PAGE);
5945 5945 if (rval == 1) {
5946 5946 /* Page found, use it. */
5947 5947 un->un_start_stop_cycle_page =
5948 5948 START_STOP_CYCLE_PAGE;
5949 5949 } else {
5950 5950 /*
5951 5951 * Page not found or log sense is not
5952 5952 * supported.
5953 5953 * Notice we do not check the old style
5954 5954 * START_STOP_CYCLE_VU_PAGE because this
5955 5955 * code path does not apply to old disks.
5956 5956 */
5957 5957 un->un_f_log_sense_supported = FALSE;
5958 5958 un->un_f_pm_log_sense_smart = FALSE;
5959 5959 }
5960 5960 }
5961 5961
5962 5962 return;
5963 5963 }
5964 5964
5965 5965 /*
5966 5966 * For the disk whose attached HBA has not set the "pm-capable"
5967 5967 * property, check if it supports the power management.
5968 5968 */
5969 5969 if (!un->un_f_log_sense_supported) {
5970 5970 un->un_power_level = SD_SPINDLE_ON;
5971 5971 un->un_f_pm_is_enabled = FALSE;
5972 5972 return;
5973 5973 }
5974 5974
5975 5975 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE);
5976 5976
5977 5977 #ifdef SDDEBUG
5978 5978 if (sd_force_pm_supported) {
5979 5979 /* Force a successful result */
5980 5980 rval = 1;
5981 5981 }
5982 5982 #endif
5983 5983
5984 5984 /*
5985 5985 * If the start-stop cycle counter log page is not supported
5986 5986 * or if the pm-capable property is set to be false (0),
5987 5987 * then we should not create the pm_components property.
5988 5988 */
5989 5989 if (rval == -1) {
5990 5990 /*
5991 5991 * Error.
5992 5992 * Reading log sense failed, most likely this is
5993 5993 * an older drive that does not support log sense.
5994 5994 * If this fails auto-pm is not supported.
5995 5995 */
5996 5996 un->un_power_level = SD_SPINDLE_ON;
5997 5997 un->un_f_pm_is_enabled = FALSE;
5998 5998
5999 5999 } else if (rval == 0) {
6000 6000 /*
6001 6001 * Page not found.
6002 6002 * The start stop cycle counter is implemented as page
6003 6003 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For
6004 6004 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE).
6005 6005 */
6006 6006 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) {
6007 6007 /*
6008 6008 * Page found, use this one.
6009 6009 */
6010 6010 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE;
6011 6011 un->un_f_pm_is_enabled = TRUE;
6012 6012 } else {
6013 6013 /*
6014 6014 * Error or page not found.
6015 6015 * auto-pm is not supported for this device.
6016 6016 */
6017 6017 un->un_power_level = SD_SPINDLE_ON;
6018 6018 un->un_f_pm_is_enabled = FALSE;
6019 6019 }
6020 6020 } else {
6021 6021 /*
6022 6022 * Page found, use it.
6023 6023 */
6024 6024 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE;
6025 6025 un->un_f_pm_is_enabled = TRUE;
6026 6026 }
6027 6027
6028 6028
6029 6029 if (un->un_f_pm_is_enabled == TRUE) {
6030 6030 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE;
6031 6031 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP);
6032 6032
6033 6033 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data,
6034 6034 log_page_size, un->un_start_stop_cycle_page,
6035 6035 0x01, 0, SD_PATH_DIRECT);
6036 6036
6037 6037 if (rval != 0) {
6038 6038 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6039 6039 }
6040 6040
6041 6041 #ifdef SDDEBUG
6042 6042 if (sd_force_pm_supported) {
6043 6043 /* Force a successful result */
6044 6044 rval = 0;
6045 6045 }
6046 6046 #endif
6047 6047
6048 6048 /*
6049 6049 * If the Log sense for Page( Start/stop cycle counter page)
6050 6050 * succeeds, then power management is supported and we can
6051 6051 * enable auto-pm.
6052 6052 */
6053 6053 if (rval == 0) {
6054 6054 (void) sd_create_pm_components(devi, un);
6055 6055 } else {
6056 6056 un->un_power_level = SD_SPINDLE_ON;
6057 6057 un->un_f_pm_is_enabled = FALSE;
6058 6058 }
6059 6059
6060 6060 kmem_free(log_page_data, log_page_size);
6061 6061 }
6062 6062 }
6063 6063
6064 6064
6065 6065 /*
6066 6066 * Function: sd_create_pm_components
6067 6067 *
6068 6068 * Description: Initialize PM property.
6069 6069 *
6070 6070 * Context: Kernel thread context
6071 6071 */
6072 6072
6073 6073 static void
6074 6074 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un)
6075 6075 {
6076 6076 ASSERT(!mutex_owned(SD_MUTEX(un)));
6077 6077
6078 6078 if (un->un_f_power_condition_supported) {
6079 6079 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi,
6080 6080 "pm-components", sd_pwr_pc.pm_comp, 5)
6081 6081 != DDI_PROP_SUCCESS) {
6082 6082 un->un_power_level = SD_SPINDLE_ACTIVE;
6083 6083 un->un_f_pm_is_enabled = FALSE;
6084 6084 return;
6085 6085 }
6086 6086 } else {
6087 6087 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi,
6088 6088 "pm-components", sd_pwr_ss.pm_comp, 3)
6089 6089 != DDI_PROP_SUCCESS) {
6090 6090 un->un_power_level = SD_SPINDLE_ON;
6091 6091 un->un_f_pm_is_enabled = FALSE;
6092 6092 return;
6093 6093 }
6094 6094 }
6095 6095 /*
6096 6096 * When components are initially created they are idle,
6097 6097 * power up any non-removables.
6098 6098 * Note: the return value of pm_raise_power can't be used
6099 6099 * for determining if PM should be enabled for this device.
6100 6100 * Even if you check the return values and remove this
6101 6101 * property created above, the PM framework will not honor the
6102 6102 * change after the first call to pm_raise_power. Hence,
6103 6103 * removal of that property does not help if pm_raise_power
6104 6104 * fails. In the case of removable media, the start/stop
6105 6105 * will fail if the media is not present.
6106 6106 */
6107 6107 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0,
6108 6108 SD_PM_STATE_ACTIVE(un)) == DDI_SUCCESS)) {
6109 6109 mutex_enter(SD_MUTEX(un));
6110 6110 un->un_power_level = SD_PM_STATE_ACTIVE(un);
6111 6111 mutex_enter(&un->un_pm_mutex);
6112 6112 /* Set to on and not busy. */
6113 6113 un->un_pm_count = 0;
6114 6114 } else {
6115 6115 mutex_enter(SD_MUTEX(un));
6116 6116 un->un_power_level = SD_PM_STATE_STOPPED(un);
6117 6117 mutex_enter(&un->un_pm_mutex);
6118 6118 /* Set to off. */
6119 6119 un->un_pm_count = -1;
6120 6120 }
6121 6121 mutex_exit(&un->un_pm_mutex);
6122 6122 mutex_exit(SD_MUTEX(un));
6123 6123 }
6124 6124
6125 6125
6126 6126 /*
6127 6127 * Function: sd_ddi_suspend
6128 6128 *
6129 6129 * Description: Performs system power-down operations. This includes
6130 6130 * setting the drive state to indicate its suspended so
6131 6131 * that no new commands will be accepted. Also, wait for
6132 6132 * all commands that are in transport or queued to a timer
6133 6133 * for retry to complete. All timeout threads are cancelled.
6134 6134 *
6135 6135 * Return Code: DDI_FAILURE or DDI_SUCCESS
6136 6136 *
6137 6137 * Context: Kernel thread context
6138 6138 */
6139 6139
6140 6140 static int
6141 6141 sd_ddi_suspend(dev_info_t *devi)
6142 6142 {
6143 6143 struct sd_lun *un;
6144 6144 clock_t wait_cmds_complete;
6145 6145
6146 6146 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
6147 6147 if (un == NULL) {
6148 6148 return (DDI_FAILURE);
6149 6149 }
6150 6150
6151 6151 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n");
6152 6152
6153 6153 mutex_enter(SD_MUTEX(un));
6154 6154
6155 6155 /* Return success if the device is already suspended. */
6156 6156 if (un->un_state == SD_STATE_SUSPENDED) {
6157 6157 mutex_exit(SD_MUTEX(un));
6158 6158 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: "
6159 6159 "device already suspended, exiting\n");
6160 6160 return (DDI_SUCCESS);
6161 6161 }
6162 6162
6163 6163 /* Return failure if the device is being used by HA */
6164 6164 if (un->un_resvd_status &
6165 6165 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) {
6166 6166 mutex_exit(SD_MUTEX(un));
6167 6167 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: "
6168 6168 "device in use by HA, exiting\n");
6169 6169 return (DDI_FAILURE);
6170 6170 }
6171 6171
6172 6172 /*
6173 6173 * Return failure if the device is in a resource wait
6174 6174 * or power changing state.
6175 6175 */
6176 6176 if ((un->un_state == SD_STATE_RWAIT) ||
6177 6177 (un->un_state == SD_STATE_PM_CHANGING)) {
6178 6178 mutex_exit(SD_MUTEX(un));
6179 6179 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: "
6180 6180 "device in resource wait state, exiting\n");
6181 6181 return (DDI_FAILURE);
6182 6182 }
6183 6183
6184 6184
6185 6185 un->un_save_state = un->un_last_state;
6186 6186 New_state(un, SD_STATE_SUSPENDED);
6187 6187
6188 6188 /*
6189 6189 * Wait for all commands that are in transport or queued to a timer
6190 6190 * for retry to complete.
6191 6191 *
6192 6192 * While waiting, no new commands will be accepted or sent because of
6193 6193 * the new state we set above.
6194 6194 *
6195 6195 * Wait till current operation has completed. If we are in the resource
6196 6196 * wait state (with an intr outstanding) then we need to wait till the
6197 6197 * intr completes and starts the next cmd. We want to wait for
6198 6198 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND.
6199 6199 */
6200 6200 wait_cmds_complete = ddi_get_lbolt() +
6201 6201 (sd_wait_cmds_complete * drv_usectohz(1000000));
6202 6202
6203 6203 while (un->un_ncmds_in_transport != 0) {
6204 6204 /*
6205 6205 * Fail if commands do not finish in the specified time.
6206 6206 */
6207 6207 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un),
6208 6208 wait_cmds_complete) == -1) {
6209 6209 /*
6210 6210 * Undo the state changes made above. Everything
6211 6211 * must go back to it's original value.
6212 6212 */
6213 6213 Restore_state(un);
6214 6214 un->un_last_state = un->un_save_state;
6215 6215 /* Wake up any threads that might be waiting. */
6216 6216 cv_broadcast(&un->un_suspend_cv);
6217 6217 mutex_exit(SD_MUTEX(un));
6218 6218 SD_ERROR(SD_LOG_IO_PM, un,
6219 6219 "sd_ddi_suspend: failed due to outstanding cmds\n");
6220 6220 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n");
6221 6221 return (DDI_FAILURE);
6222 6222 }
6223 6223 }
6224 6224
6225 6225 /*
6226 6226 * Cancel SCSI watch thread and timeouts, if any are active
6227 6227 */
6228 6228
6229 6229 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) {
6230 6230 opaque_t temp_token = un->un_swr_token;
6231 6231 mutex_exit(SD_MUTEX(un));
6232 6232 scsi_watch_suspend(temp_token);
6233 6233 mutex_enter(SD_MUTEX(un));
6234 6234 }
6235 6235
6236 6236 if (un->un_reset_throttle_timeid != NULL) {
6237 6237 timeout_id_t temp_id = un->un_reset_throttle_timeid;
6238 6238 un->un_reset_throttle_timeid = NULL;
6239 6239 mutex_exit(SD_MUTEX(un));
6240 6240 (void) untimeout(temp_id);
6241 6241 mutex_enter(SD_MUTEX(un));
6242 6242 }
6243 6243
6244 6244 if (un->un_dcvb_timeid != NULL) {
6245 6245 timeout_id_t temp_id = un->un_dcvb_timeid;
6246 6246 un->un_dcvb_timeid = NULL;
6247 6247 mutex_exit(SD_MUTEX(un));
6248 6248 (void) untimeout(temp_id);
6249 6249 mutex_enter(SD_MUTEX(un));
6250 6250 }
6251 6251
6252 6252 mutex_enter(&un->un_pm_mutex);
6253 6253 if (un->un_pm_timeid != NULL) {
6254 6254 timeout_id_t temp_id = un->un_pm_timeid;
6255 6255 un->un_pm_timeid = NULL;
6256 6256 mutex_exit(&un->un_pm_mutex);
6257 6257 mutex_exit(SD_MUTEX(un));
6258 6258 (void) untimeout(temp_id);
6259 6259 mutex_enter(SD_MUTEX(un));
6260 6260 } else {
6261 6261 mutex_exit(&un->un_pm_mutex);
6262 6262 }
6263 6263
6264 6264 if (un->un_rmw_msg_timeid != NULL) {
6265 6265 timeout_id_t temp_id = un->un_rmw_msg_timeid;
6266 6266 un->un_rmw_msg_timeid = NULL;
6267 6267 mutex_exit(SD_MUTEX(un));
6268 6268 (void) untimeout(temp_id);
6269 6269 mutex_enter(SD_MUTEX(un));
6270 6270 }
6271 6271
6272 6272 if (un->un_retry_timeid != NULL) {
6273 6273 timeout_id_t temp_id = un->un_retry_timeid;
6274 6274 un->un_retry_timeid = NULL;
6275 6275 mutex_exit(SD_MUTEX(un));
6276 6276 (void) untimeout(temp_id);
6277 6277 mutex_enter(SD_MUTEX(un));
6278 6278
6279 6279 if (un->un_retry_bp != NULL) {
6280 6280 un->un_retry_bp->av_forw = un->un_waitq_headp;
6281 6281 un->un_waitq_headp = un->un_retry_bp;
6282 6282 if (un->un_waitq_tailp == NULL) {
6283 6283 un->un_waitq_tailp = un->un_retry_bp;
6284 6284 }
6285 6285 un->un_retry_bp = NULL;
6286 6286 un->un_retry_statp = NULL;
6287 6287 }
6288 6288 }
6289 6289
6290 6290 if (un->un_direct_priority_timeid != NULL) {
6291 6291 timeout_id_t temp_id = un->un_direct_priority_timeid;
6292 6292 un->un_direct_priority_timeid = NULL;
6293 6293 mutex_exit(SD_MUTEX(un));
6294 6294 (void) untimeout(temp_id);
6295 6295 mutex_enter(SD_MUTEX(un));
6296 6296 }
6297 6297
6298 6298 if (un->un_f_is_fibre == TRUE) {
6299 6299 /*
6300 6300 * Remove callbacks for insert and remove events
6301 6301 */
6302 6302 if (un->un_insert_event != NULL) {
6303 6303 mutex_exit(SD_MUTEX(un));
6304 6304 (void) ddi_remove_event_handler(un->un_insert_cb_id);
6305 6305 mutex_enter(SD_MUTEX(un));
6306 6306 un->un_insert_event = NULL;
6307 6307 }
6308 6308
6309 6309 if (un->un_remove_event != NULL) {
6310 6310 mutex_exit(SD_MUTEX(un));
6311 6311 (void) ddi_remove_event_handler(un->un_remove_cb_id);
6312 6312 mutex_enter(SD_MUTEX(un));
6313 6313 un->un_remove_event = NULL;
6314 6314 }
6315 6315 }
6316 6316
6317 6317 mutex_exit(SD_MUTEX(un));
6318 6318
6319 6319 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n");
6320 6320
6321 6321 return (DDI_SUCCESS);
6322 6322 }
6323 6323
6324 6324
6325 6325 /*
6326 6326 * Function: sd_ddi_resume
6327 6327 *
6328 6328 * Description: Performs system power-up operations..
6329 6329 *
6330 6330 * Return Code: DDI_SUCCESS
6331 6331 * DDI_FAILURE
6332 6332 *
6333 6333 * Context: Kernel thread context
6334 6334 */
6335 6335
6336 6336 static int
6337 6337 sd_ddi_resume(dev_info_t *devi)
6338 6338 {
6339 6339 struct sd_lun *un;
6340 6340
6341 6341 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
6342 6342 if (un == NULL) {
6343 6343 return (DDI_FAILURE);
6344 6344 }
6345 6345
6346 6346 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n");
6347 6347
6348 6348 mutex_enter(SD_MUTEX(un));
6349 6349 Restore_state(un);
6350 6350
6351 6351 /*
6352 6352 * Restore the state which was saved to give the
6353 6353 * the right state in un_last_state
6354 6354 */
6355 6355 un->un_last_state = un->un_save_state;
6356 6356 /*
6357 6357 * Note: throttle comes back at full.
6358 6358 * Also note: this MUST be done before calling pm_raise_power
6359 6359 * otherwise the system can get hung in biowait. The scenario where
6360 6360 * this'll happen is under cpr suspend. Writing of the system
6361 6361 * state goes through sddump, which writes 0 to un_throttle. If
6362 6362 * writing the system state then fails, example if the partition is
6363 6363 * too small, then cpr attempts a resume. If throttle isn't restored
6364 6364 * from the saved value until after calling pm_raise_power then
6365 6365 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs
6366 6366 * in biowait.
6367 6367 */
6368 6368 un->un_throttle = un->un_saved_throttle;
6369 6369
6370 6370 /*
6371 6371 * The chance of failure is very rare as the only command done in power
6372 6372 * entry point is START command when you transition from 0->1 or
6373 6373 * unknown->1. Put it to SPINDLE ON state irrespective of the state at
6374 6374 * which suspend was done. Ignore the return value as the resume should
6375 6375 * not be failed. In the case of removable media the media need not be
6376 6376 * inserted and hence there is a chance that raise power will fail with
6377 6377 * media not present.
6378 6378 */
6379 6379 if (un->un_f_attach_spinup) {
6380 6380 mutex_exit(SD_MUTEX(un));
6381 6381 (void) pm_raise_power(SD_DEVINFO(un), 0,
6382 6382 SD_PM_STATE_ACTIVE(un));
6383 6383 mutex_enter(SD_MUTEX(un));
6384 6384 }
6385 6385
6386 6386 /*
6387 6387 * Don't broadcast to the suspend cv and therefore possibly
6388 6388 * start I/O until after power has been restored.
6389 6389 */
6390 6390 cv_broadcast(&un->un_suspend_cv);
6391 6391 cv_broadcast(&un->un_state_cv);
6392 6392
6393 6393 /* restart thread */
6394 6394 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) {
6395 6395 scsi_watch_resume(un->un_swr_token);
6396 6396 }
6397 6397
6398 6398 #if (defined(__fibre))
6399 6399 if (un->un_f_is_fibre == TRUE) {
6400 6400 /*
6401 6401 * Add callbacks for insert and remove events
6402 6402 */
6403 6403 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) {
6404 6404 sd_init_event_callbacks(un);
6405 6405 }
6406 6406 }
6407 6407 #endif
6408 6408
6409 6409 /*
6410 6410 * Transport any pending commands to the target.
6411 6411 *
6412 6412 * If this is a low-activity device commands in queue will have to wait
6413 6413 * until new commands come in, which may take awhile. Also, we
6414 6414 * specifically don't check un_ncmds_in_transport because we know that
6415 6415 * there really are no commands in progress after the unit was
6416 6416 * suspended and we could have reached the throttle level, been
6417 6417 * suspended, and have no new commands coming in for awhile. Highly
6418 6418 * unlikely, but so is the low-activity disk scenario.
6419 6419 */
6420 6420 ddi_xbuf_dispatch(un->un_xbuf_attr);
6421 6421
6422 6422 sd_start_cmds(un, NULL);
6423 6423 mutex_exit(SD_MUTEX(un));
6424 6424
6425 6425 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n");
6426 6426
6427 6427 return (DDI_SUCCESS);
6428 6428 }
6429 6429
6430 6430
6431 6431 /*
6432 6432 * Function: sd_pm_state_change
6433 6433 *
6434 6434 * Description: Change the driver power state.
6435 6435 * Someone else is required to actually change the driver
6436 6436 * power level.
6437 6437 *
6438 6438 * Arguments: un - driver soft state (unit) structure
6439 6439 * level - the power level that is changed to
6440 6440 * flag - to decide how to change the power state
6441 6441 *
6442 6442 * Return Code: DDI_SUCCESS
6443 6443 *
6444 6444 * Context: Kernel thread context
6445 6445 */
6446 6446 static int
6447 6447 sd_pm_state_change(struct sd_lun *un, int level, int flag)
6448 6448 {
6449 6449 ASSERT(un != NULL);
6450 6450 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: entry\n");
6451 6451
6452 6452 ASSERT(!mutex_owned(SD_MUTEX(un)));
6453 6453 mutex_enter(SD_MUTEX(un));
6454 6454
6455 6455 if (flag == SD_PM_STATE_ROLLBACK || SD_PM_IS_IO_CAPABLE(un, level)) {
6456 6456 un->un_power_level = level;
6457 6457 ASSERT(!mutex_owned(&un->un_pm_mutex));
6458 6458 mutex_enter(&un->un_pm_mutex);
6459 6459 if (SD_DEVICE_IS_IN_LOW_POWER(un)) {
6460 6460 un->un_pm_count++;
6461 6461 ASSERT(un->un_pm_count == 0);
6462 6462 }
6463 6463 mutex_exit(&un->un_pm_mutex);
6464 6464 } else {
6465 6465 /*
6466 6466 * Exit if power management is not enabled for this device,
6467 6467 * or if the device is being used by HA.
6468 6468 */
6469 6469 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status &
6470 6470 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) {
6471 6471 mutex_exit(SD_MUTEX(un));
6472 6472 SD_TRACE(SD_LOG_POWER, un,
6473 6473 "sd_pm_state_change: exiting\n");
6474 6474 return (DDI_FAILURE);
6475 6475 }
6476 6476
6477 6477 SD_INFO(SD_LOG_POWER, un, "sd_pm_state_change: "
6478 6478 "un_ncmds_in_driver=%ld\n", un->un_ncmds_in_driver);
6479 6479
6480 6480 /*
6481 6481 * See if the device is not busy, ie.:
6482 6482 * - we have no commands in the driver for this device
6483 6483 * - not waiting for resources
6484 6484 */
6485 6485 if ((un->un_ncmds_in_driver == 0) &&
6486 6486 (un->un_state != SD_STATE_RWAIT)) {
6487 6487 /*
6488 6488 * The device is not busy, so it is OK to go to low
6489 6489 * power state. Indicate low power, but rely on someone
6490 6490 * else to actually change it.
6491 6491 */
6492 6492 mutex_enter(&un->un_pm_mutex);
6493 6493 un->un_pm_count = -1;
6494 6494 mutex_exit(&un->un_pm_mutex);
6495 6495 un->un_power_level = level;
6496 6496 }
6497 6497 }
6498 6498
6499 6499 mutex_exit(SD_MUTEX(un));
6500 6500
6501 6501 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: exit\n");
6502 6502
6503 6503 return (DDI_SUCCESS);
6504 6504 }
6505 6505
6506 6506
6507 6507 /*
6508 6508 * Function: sd_pm_idletimeout_handler
6509 6509 *
6510 6510 * Description: A timer routine that's active only while a device is busy.
6511 6511 * The purpose is to extend slightly the pm framework's busy
6512 6512 * view of the device to prevent busy/idle thrashing for
6513 6513 * back-to-back commands. Do this by comparing the current time
6514 6514 * to the time at which the last command completed and when the
6515 6515 * difference is greater than sd_pm_idletime, call
6516 6516 * pm_idle_component. In addition to indicating idle to the pm
6517 6517 * framework, update the chain type to again use the internal pm
6518 6518 * layers of the driver.
6519 6519 *
6520 6520 * Arguments: arg - driver soft state (unit) structure
6521 6521 *
6522 6522 * Context: Executes in a timeout(9F) thread context
6523 6523 */
6524 6524
6525 6525 static void
6526 6526 sd_pm_idletimeout_handler(void *arg)
6527 6527 {
6528 6528 const hrtime_t idletime = sd_pm_idletime * NANOSEC;
6529 6529 struct sd_lun *un = arg;
6530 6530
6531 6531 mutex_enter(&sd_detach_mutex);
6532 6532 if (un->un_detach_count != 0) {
6533 6533 /* Abort if the instance is detaching */
6534 6534 mutex_exit(&sd_detach_mutex);
6535 6535 return;
6536 6536 }
6537 6537 mutex_exit(&sd_detach_mutex);
6538 6538
6539 6539 /*
6540 6540 * Grab both mutexes, in the proper order, since we're accessing
6541 6541 * both PM and softstate variables.
6542 6542 */
6543 6543 mutex_enter(SD_MUTEX(un));
6544 6544 mutex_enter(&un->un_pm_mutex);
6545 6545 if (((gethrtime() - un->un_pm_idle_time) > idletime) &&
6546 6546 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) {
6547 6547 /*
6548 6548 * Update the chain types.
6549 6549 * This takes affect on the next new command received.
6550 6550 */
6551 6551 if (un->un_f_non_devbsize_supported) {
6552 6552 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA;
6553 6553 } else {
6554 6554 un->un_buf_chain_type = SD_CHAIN_INFO_DISK;
6555 6555 }
6556 6556 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD;
6557 6557
6558 6558 SD_TRACE(SD_LOG_IO_PM, un,
6559 6559 "sd_pm_idletimeout_handler: idling device\n");
6560 6560 (void) pm_idle_component(SD_DEVINFO(un), 0);
6561 6561 un->un_pm_idle_timeid = NULL;
6562 6562 } else {
6563 6563 un->un_pm_idle_timeid =
6564 6564 timeout(sd_pm_idletimeout_handler, un,
6565 6565 (drv_usectohz((clock_t)300000))); /* 300 ms. */
6566 6566 }
6567 6567 mutex_exit(&un->un_pm_mutex);
6568 6568 mutex_exit(SD_MUTEX(un));
6569 6569 }
6570 6570
6571 6571
6572 6572 /*
6573 6573 * Function: sd_pm_timeout_handler
6574 6574 *
6575 6575 * Description: Callback to tell framework we are idle.
6576 6576 *
6577 6577 * Context: timeout(9f) thread context.
6578 6578 */
6579 6579
6580 6580 static void
6581 6581 sd_pm_timeout_handler(void *arg)
6582 6582 {
6583 6583 struct sd_lun *un = arg;
6584 6584
6585 6585 (void) pm_idle_component(SD_DEVINFO(un), 0);
6586 6586 mutex_enter(&un->un_pm_mutex);
6587 6587 un->un_pm_timeid = NULL;
6588 6588 mutex_exit(&un->un_pm_mutex);
6589 6589 }
6590 6590
6591 6591
6592 6592 /*
6593 6593 * Function: sdpower
6594 6594 *
6595 6595 * Description: PM entry point.
6596 6596 *
6597 6597 * Return Code: DDI_SUCCESS
6598 6598 * DDI_FAILURE
6599 6599 *
6600 6600 * Context: Kernel thread context
6601 6601 */
6602 6602
6603 6603 static int
6604 6604 sdpower(dev_info_t *devi, int component, int level)
6605 6605 {
6606 6606 struct sd_lun *un;
6607 6607 int instance;
6608 6608 int rval = DDI_SUCCESS;
6609 6609 uint_t i, log_page_size, maxcycles, ncycles;
6610 6610 uchar_t *log_page_data;
6611 6611 int log_sense_page;
6612 6612 int medium_present;
6613 6613 time_t intvlp;
6614 6614 struct pm_trans_data sd_pm_tran_data;
6615 6615 uchar_t save_state = SD_STATE_NORMAL;
6616 6616 int sval;
6617 6617 uchar_t state_before_pm;
6618 6618 int got_semaphore_here;
6619 6619 sd_ssc_t *ssc;
6620 6620 int last_power_level = SD_SPINDLE_UNINIT;
6621 6621
6622 6622 instance = ddi_get_instance(devi);
6623 6623
6624 6624 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) ||
6625 6625 !SD_PM_IS_LEVEL_VALID(un, level) || component != 0) {
6626 6626 return (DDI_FAILURE);
6627 6627 }
6628 6628
6629 6629 ssc = sd_ssc_init(un);
6630 6630
6631 6631 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level);
6632 6632
6633 6633 /*
6634 6634 * Must synchronize power down with close.
6635 6635 * Attempt to decrement/acquire the open/close semaphore,
6636 6636 * but do NOT wait on it. If it's not greater than zero,
6637 6637 * ie. it can't be decremented without waiting, then
6638 6638 * someone else, either open or close, already has it
6639 6639 * and the try returns 0. Use that knowledge here to determine
6640 6640 * if it's OK to change the device power level.
6641 6641 * Also, only increment it on exit if it was decremented, ie. gotten,
6642 6642 * here.
6643 6643 */
6644 6644 got_semaphore_here = sema_tryp(&un->un_semoclose);
6645 6645
6646 6646 mutex_enter(SD_MUTEX(un));
6647 6647
6648 6648 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n",
6649 6649 un->un_ncmds_in_driver);
6650 6650
6651 6651 /*
6652 6652 * If un_ncmds_in_driver is non-zero it indicates commands are
6653 6653 * already being processed in the driver, or if the semaphore was
6654 6654 * not gotten here it indicates an open or close is being processed.
6655 6655 * At the same time somebody is requesting to go to a lower power
6656 6656 * that can't perform I/O, which can't happen, therefore we need to
6657 6657 * return failure.
6658 6658 */
6659 6659 if ((!SD_PM_IS_IO_CAPABLE(un, level)) &&
6660 6660 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) {
6661 6661 mutex_exit(SD_MUTEX(un));
6662 6662
6663 6663 if (got_semaphore_here != 0) {
6664 6664 sema_v(&un->un_semoclose);
6665 6665 }
6666 6666 SD_TRACE(SD_LOG_IO_PM, un,
6667 6667 "sdpower: exit, device has queued cmds.\n");
6668 6668
6669 6669 goto sdpower_failed;
6670 6670 }
6671 6671
6672 6672 /*
6673 6673 * if it is OFFLINE that means the disk is completely dead
6674 6674 * in our case we have to put the disk in on or off by sending commands
6675 6675 * Of course that will fail anyway so return back here.
6676 6676 *
6677 6677 * Power changes to a device that's OFFLINE or SUSPENDED
6678 6678 * are not allowed.
6679 6679 */
6680 6680 if ((un->un_state == SD_STATE_OFFLINE) ||
6681 6681 (un->un_state == SD_STATE_SUSPENDED)) {
6682 6682 mutex_exit(SD_MUTEX(un));
6683 6683
6684 6684 if (got_semaphore_here != 0) {
6685 6685 sema_v(&un->un_semoclose);
6686 6686 }
6687 6687 SD_TRACE(SD_LOG_IO_PM, un,
6688 6688 "sdpower: exit, device is off-line.\n");
6689 6689
6690 6690 goto sdpower_failed;
6691 6691 }
6692 6692
6693 6693 /*
6694 6694 * Change the device's state to indicate it's power level
6695 6695 * is being changed. Do this to prevent a power off in the
6696 6696 * middle of commands, which is especially bad on devices
6697 6697 * that are really powered off instead of just spun down.
6698 6698 */
6699 6699 state_before_pm = un->un_state;
6700 6700 un->un_state = SD_STATE_PM_CHANGING;
6701 6701
6702 6702 mutex_exit(SD_MUTEX(un));
6703 6703
6704 6704 /*
6705 6705 * If log sense command is not supported, bypass the
6706 6706 * following checking, otherwise, check the log sense
6707 6707 * information for this device.
6708 6708 */
6709 6709 if (SD_PM_STOP_MOTOR_NEEDED(un, level) &&
6710 6710 un->un_f_log_sense_supported) {
6711 6711 /*
6712 6712 * Get the log sense information to understand whether the
6713 6713 * the powercycle counts have gone beyond the threshhold.
6714 6714 */
6715 6715 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE;
6716 6716 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP);
6717 6717
6718 6718 mutex_enter(SD_MUTEX(un));
6719 6719 log_sense_page = un->un_start_stop_cycle_page;
6720 6720 mutex_exit(SD_MUTEX(un));
6721 6721
6722 6722 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data,
6723 6723 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT);
6724 6724
6725 6725 if (rval != 0) {
6726 6726 if (rval == EIO)
6727 6727 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
6728 6728 else
6729 6729 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6730 6730 }
6731 6731
6732 6732 #ifdef SDDEBUG
6733 6733 if (sd_force_pm_supported) {
6734 6734 /* Force a successful result */
6735 6735 rval = 0;
6736 6736 }
6737 6737 #endif
6738 6738 if (rval != 0) {
6739 6739 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
6740 6740 "Log Sense Failed\n");
6741 6741
6742 6742 kmem_free(log_page_data, log_page_size);
6743 6743 /* Cannot support power management on those drives */
6744 6744
6745 6745 if (got_semaphore_here != 0) {
6746 6746 sema_v(&un->un_semoclose);
6747 6747 }
6748 6748 /*
6749 6749 * On exit put the state back to it's original value
6750 6750 * and broadcast to anyone waiting for the power
6751 6751 * change completion.
6752 6752 */
6753 6753 mutex_enter(SD_MUTEX(un));
6754 6754 un->un_state = state_before_pm;
6755 6755 cv_broadcast(&un->un_suspend_cv);
6756 6756 mutex_exit(SD_MUTEX(un));
6757 6757 SD_TRACE(SD_LOG_IO_PM, un,
6758 6758 "sdpower: exit, Log Sense Failed.\n");
6759 6759
6760 6760 goto sdpower_failed;
6761 6761 }
6762 6762
6763 6763 /*
6764 6764 * From the page data - Convert the essential information to
6765 6765 * pm_trans_data
6766 6766 */
6767 6767 maxcycles =
6768 6768 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) |
6769 6769 (log_page_data[0x1E] << 8) | log_page_data[0x1F];
6770 6770
6771 6771 ncycles =
6772 6772 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) |
6773 6773 (log_page_data[0x26] << 8) | log_page_data[0x27];
6774 6774
6775 6775 if (un->un_f_pm_log_sense_smart) {
6776 6776 sd_pm_tran_data.un.smart_count.allowed = maxcycles;
6777 6777 sd_pm_tran_data.un.smart_count.consumed = ncycles;
6778 6778 sd_pm_tran_data.un.smart_count.flag = 0;
6779 6779 sd_pm_tran_data.format = DC_SMART_FORMAT;
6780 6780 } else {
6781 6781 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles;
6782 6782 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles;
6783 6783 for (i = 0; i < DC_SCSI_MFR_LEN; i++) {
6784 6784 sd_pm_tran_data.un.scsi_cycles.svc_date[i] =
6785 6785 log_page_data[8+i];
6786 6786 }
6787 6787 sd_pm_tran_data.un.scsi_cycles.flag = 0;
6788 6788 sd_pm_tran_data.format = DC_SCSI_FORMAT;
6789 6789 }
6790 6790
6791 6791 kmem_free(log_page_data, log_page_size);
6792 6792
6793 6793 /*
6794 6794 * Call pm_trans_check routine to get the Ok from
6795 6795 * the global policy
6796 6796 */
6797 6797 rval = pm_trans_check(&sd_pm_tran_data, &intvlp);
6798 6798 #ifdef SDDEBUG
6799 6799 if (sd_force_pm_supported) {
6800 6800 /* Force a successful result */
6801 6801 rval = 1;
6802 6802 }
6803 6803 #endif
6804 6804 switch (rval) {
6805 6805 case 0:
6806 6806 /*
6807 6807 * Not Ok to Power cycle or error in parameters passed
6808 6808 * Would have given the advised time to consider power
6809 6809 * cycle. Based on the new intvlp parameter we are
6810 6810 * supposed to pretend we are busy so that pm framework
6811 6811 * will never call our power entry point. Because of
6812 6812 * that install a timeout handler and wait for the
6813 6813 * recommended time to elapse so that power management
6814 6814 * can be effective again.
6815 6815 *
6816 6816 * To effect this behavior, call pm_busy_component to
6817 6817 * indicate to the framework this device is busy.
6818 6818 * By not adjusting un_pm_count the rest of PM in
6819 6819 * the driver will function normally, and independent
6820 6820 * of this but because the framework is told the device
6821 6821 * is busy it won't attempt powering down until it gets
6822 6822 * a matching idle. The timeout handler sends this.
6823 6823 * Note: sd_pm_entry can't be called here to do this
6824 6824 * because sdpower may have been called as a result
6825 6825 * of a call to pm_raise_power from within sd_pm_entry.
6826 6826 *
6827 6827 * If a timeout handler is already active then
6828 6828 * don't install another.
6829 6829 */
6830 6830 mutex_enter(&un->un_pm_mutex);
6831 6831 if (un->un_pm_timeid == NULL) {
6832 6832 un->un_pm_timeid =
6833 6833 timeout(sd_pm_timeout_handler,
6834 6834 un, intvlp * drv_usectohz(1000000));
6835 6835 mutex_exit(&un->un_pm_mutex);
6836 6836 (void) pm_busy_component(SD_DEVINFO(un), 0);
6837 6837 } else {
6838 6838 mutex_exit(&un->un_pm_mutex);
6839 6839 }
6840 6840 if (got_semaphore_here != 0) {
6841 6841 sema_v(&un->un_semoclose);
6842 6842 }
6843 6843 /*
6844 6844 * On exit put the state back to it's original value
6845 6845 * and broadcast to anyone waiting for the power
6846 6846 * change completion.
6847 6847 */
6848 6848 mutex_enter(SD_MUTEX(un));
6849 6849 un->un_state = state_before_pm;
6850 6850 cv_broadcast(&un->un_suspend_cv);
6851 6851 mutex_exit(SD_MUTEX(un));
6852 6852
6853 6853 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, "
6854 6854 "trans check Failed, not ok to power cycle.\n");
6855 6855
6856 6856 goto sdpower_failed;
6857 6857 case -1:
6858 6858 if (got_semaphore_here != 0) {
6859 6859 sema_v(&un->un_semoclose);
6860 6860 }
6861 6861 /*
6862 6862 * On exit put the state back to it's original value
6863 6863 * and broadcast to anyone waiting for the power
6864 6864 * change completion.
6865 6865 */
6866 6866 mutex_enter(SD_MUTEX(un));
6867 6867 un->un_state = state_before_pm;
6868 6868 cv_broadcast(&un->un_suspend_cv);
6869 6869 mutex_exit(SD_MUTEX(un));
6870 6870 SD_TRACE(SD_LOG_IO_PM, un,
6871 6871 "sdpower: exit, trans check command Failed.\n");
6872 6872
6873 6873 goto sdpower_failed;
6874 6874 }
6875 6875 }
6876 6876
6877 6877 if (!SD_PM_IS_IO_CAPABLE(un, level)) {
6878 6878 /*
6879 6879 * Save the last state... if the STOP FAILS we need it
6880 6880 * for restoring
6881 6881 */
6882 6882 mutex_enter(SD_MUTEX(un));
6883 6883 save_state = un->un_last_state;
6884 6884 last_power_level = un->un_power_level;
6885 6885 /*
6886 6886 * There must not be any cmds. getting processed
6887 6887 * in the driver when we get here. Power to the
6888 6888 * device is potentially going off.
6889 6889 */
6890 6890 ASSERT(un->un_ncmds_in_driver == 0);
6891 6891 mutex_exit(SD_MUTEX(un));
6892 6892
6893 6893 /*
6894 6894 * For now PM suspend the device completely before spindle is
6895 6895 * turned off
6896 6896 */
6897 6897 if ((rval = sd_pm_state_change(un, level, SD_PM_STATE_CHANGE))
6898 6898 == DDI_FAILURE) {
6899 6899 if (got_semaphore_here != 0) {
6900 6900 sema_v(&un->un_semoclose);
6901 6901 }
6902 6902 /*
6903 6903 * On exit put the state back to it's original value
6904 6904 * and broadcast to anyone waiting for the power
6905 6905 * change completion.
6906 6906 */
6907 6907 mutex_enter(SD_MUTEX(un));
6908 6908 un->un_state = state_before_pm;
6909 6909 un->un_power_level = last_power_level;
6910 6910 cv_broadcast(&un->un_suspend_cv);
6911 6911 mutex_exit(SD_MUTEX(un));
6912 6912 SD_TRACE(SD_LOG_IO_PM, un,
6913 6913 "sdpower: exit, PM suspend Failed.\n");
6914 6914
6915 6915 goto sdpower_failed;
6916 6916 }
6917 6917 }
6918 6918
6919 6919 /*
6920 6920 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open,
6921 6921 * close, or strategy. Dump no long uses this routine, it uses it's
6922 6922 * own code so it can be done in polled mode.
6923 6923 */
6924 6924
6925 6925 medium_present = TRUE;
6926 6926
6927 6927 /*
6928 6928 * When powering up, issue a TUR in case the device is at unit
6929 6929 * attention. Don't do retries. Bypass the PM layer, otherwise
6930 6930 * a deadlock on un_pm_busy_cv will occur.
6931 6931 */
6932 6932 if (SD_PM_IS_IO_CAPABLE(un, level)) {
6933 6933 sval = sd_send_scsi_TEST_UNIT_READY(ssc,
6934 6934 SD_DONT_RETRY_TUR | SD_BYPASS_PM);
6935 6935 if (sval != 0)
6936 6936 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6937 6937 }
6938 6938
6939 6939 if (un->un_f_power_condition_supported) {
6940 6940 char *pm_condition_name[] = {"STOPPED", "STANDBY",
6941 6941 "IDLE", "ACTIVE"};
6942 6942 SD_TRACE(SD_LOG_IO_PM, un,
6943 6943 "sdpower: sending \'%s\' power condition",
6944 6944 pm_condition_name[level]);
6945 6945 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION,
6946 6946 sd_pl2pc[level], SD_PATH_DIRECT);
6947 6947 } else {
6948 6948 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n",
6949 6949 ((level == SD_SPINDLE_ON) ? "START" : "STOP"));
6950 6950 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
6951 6951 ((level == SD_SPINDLE_ON) ? SD_TARGET_START :
6952 6952 SD_TARGET_STOP), SD_PATH_DIRECT);
6953 6953 }
6954 6954 if (sval != 0) {
6955 6955 if (sval == EIO)
6956 6956 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
6957 6957 else
6958 6958 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6959 6959 }
6960 6960
6961 6961 /* Command failed, check for media present. */
6962 6962 if ((sval == ENXIO) && un->un_f_has_removable_media) {
6963 6963 medium_present = FALSE;
6964 6964 }
6965 6965
6966 6966 /*
6967 6967 * The conditions of interest here are:
6968 6968 * if a spindle off with media present fails,
6969 6969 * then restore the state and return an error.
6970 6970 * else if a spindle on fails,
6971 6971 * then return an error (there's no state to restore).
6972 6972 * In all other cases we setup for the new state
6973 6973 * and return success.
6974 6974 */
6975 6975 if (!SD_PM_IS_IO_CAPABLE(un, level)) {
6976 6976 if ((medium_present == TRUE) && (sval != 0)) {
6977 6977 /* The stop command from above failed */
6978 6978 rval = DDI_FAILURE;
6979 6979 /*
6980 6980 * The stop command failed, and we have media
6981 6981 * present. Put the level back by calling the
6982 6982 * sd_pm_resume() and set the state back to
6983 6983 * it's previous value.
6984 6984 */
6985 6985 (void) sd_pm_state_change(un, last_power_level,
6986 6986 SD_PM_STATE_ROLLBACK);
6987 6987 mutex_enter(SD_MUTEX(un));
6988 6988 un->un_last_state = save_state;
6989 6989 mutex_exit(SD_MUTEX(un));
6990 6990 } else if (un->un_f_monitor_media_state) {
6991 6991 /*
6992 6992 * The stop command from above succeeded.
6993 6993 * Terminate watch thread in case of removable media
6994 6994 * devices going into low power state. This is as per
6995 6995 * the requirements of pm framework, otherwise commands
6996 6996 * will be generated for the device (through watch
6997 6997 * thread), even when the device is in low power state.
6998 6998 */
6999 6999 mutex_enter(SD_MUTEX(un));
7000 7000 un->un_f_watcht_stopped = FALSE;
7001 7001 if (un->un_swr_token != NULL) {
7002 7002 opaque_t temp_token = un->un_swr_token;
7003 7003 un->un_f_watcht_stopped = TRUE;
7004 7004 un->un_swr_token = NULL;
7005 7005 mutex_exit(SD_MUTEX(un));
7006 7006 (void) scsi_watch_request_terminate(temp_token,
7007 7007 SCSI_WATCH_TERMINATE_ALL_WAIT);
7008 7008 } else {
7009 7009 mutex_exit(SD_MUTEX(un));
7010 7010 }
7011 7011 }
7012 7012 } else {
7013 7013 /*
7014 7014 * The level requested is I/O capable.
7015 7015 * Legacy behavior: return success on a failed spinup
7016 7016 * if there is no media in the drive.
7017 7017 * Do this by looking at medium_present here.
7018 7018 */
7019 7019 if ((sval != 0) && medium_present) {
7020 7020 /* The start command from above failed */
7021 7021 rval = DDI_FAILURE;
7022 7022 } else {
7023 7023 /*
7024 7024 * The start command from above succeeded
7025 7025 * PM resume the devices now that we have
7026 7026 * started the disks
7027 7027 */
7028 7028 (void) sd_pm_state_change(un, level,
7029 7029 SD_PM_STATE_CHANGE);
7030 7030
7031 7031 /*
7032 7032 * Resume the watch thread since it was suspended
7033 7033 * when the device went into low power mode.
7034 7034 */
7035 7035 if (un->un_f_monitor_media_state) {
7036 7036 mutex_enter(SD_MUTEX(un));
7037 7037 if (un->un_f_watcht_stopped == TRUE) {
7038 7038 opaque_t temp_token;
7039 7039
7040 7040 un->un_f_watcht_stopped = FALSE;
7041 7041 mutex_exit(SD_MUTEX(un));
7042 7042 temp_token =
7043 7043 sd_watch_request_submit(un);
7044 7044 mutex_enter(SD_MUTEX(un));
7045 7045 un->un_swr_token = temp_token;
7046 7046 }
7047 7047 mutex_exit(SD_MUTEX(un));
7048 7048 }
7049 7049 }
7050 7050 }
7051 7051
7052 7052 if (got_semaphore_here != 0) {
7053 7053 sema_v(&un->un_semoclose);
7054 7054 }
7055 7055 /*
7056 7056 * On exit put the state back to it's original value
7057 7057 * and broadcast to anyone waiting for the power
7058 7058 * change completion.
7059 7059 */
7060 7060 mutex_enter(SD_MUTEX(un));
7061 7061 un->un_state = state_before_pm;
7062 7062 cv_broadcast(&un->un_suspend_cv);
7063 7063 mutex_exit(SD_MUTEX(un));
7064 7064
7065 7065 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval);
7066 7066
7067 7067 sd_ssc_fini(ssc);
7068 7068 return (rval);
7069 7069
7070 7070 sdpower_failed:
7071 7071
7072 7072 sd_ssc_fini(ssc);
7073 7073 return (DDI_FAILURE);
7074 7074 }
7075 7075
7076 7076
7077 7077
7078 7078 /*
7079 7079 * Function: sdattach
7080 7080 *
7081 7081 * Description: Driver's attach(9e) entry point function.
7082 7082 *
7083 7083 * Arguments: devi - opaque device info handle
7084 7084 * cmd - attach type
7085 7085 *
7086 7086 * Return Code: DDI_SUCCESS
7087 7087 * DDI_FAILURE
7088 7088 *
7089 7089 * Context: Kernel thread context
7090 7090 */
7091 7091
7092 7092 static int
7093 7093 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
7094 7094 {
7095 7095 switch (cmd) {
7096 7096 case DDI_ATTACH:
7097 7097 return (sd_unit_attach(devi));
7098 7098 case DDI_RESUME:
7099 7099 return (sd_ddi_resume(devi));
7100 7100 default:
7101 7101 break;
7102 7102 }
7103 7103 return (DDI_FAILURE);
7104 7104 }
7105 7105
7106 7106
7107 7107 /*
7108 7108 * Function: sddetach
7109 7109 *
7110 7110 * Description: Driver's detach(9E) entry point function.
7111 7111 *
7112 7112 * Arguments: devi - opaque device info handle
7113 7113 * cmd - detach type
7114 7114 *
7115 7115 * Return Code: DDI_SUCCESS
7116 7116 * DDI_FAILURE
7117 7117 *
7118 7118 * Context: Kernel thread context
7119 7119 */
7120 7120
7121 7121 static int
7122 7122 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
7123 7123 {
7124 7124 switch (cmd) {
7125 7125 case DDI_DETACH:
7126 7126 return (sd_unit_detach(devi));
7127 7127 case DDI_SUSPEND:
7128 7128 return (sd_ddi_suspend(devi));
7129 7129 default:
7130 7130 break;
7131 7131 }
7132 7132 return (DDI_FAILURE);
7133 7133 }
7134 7134
7135 7135
7136 7136 /*
7137 7137 * Function: sd_sync_with_callback
7138 7138 *
7139 7139 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft
7140 7140 * state while the callback routine is active.
7141 7141 *
7142 7142 * Arguments: un: softstate structure for the instance
7143 7143 *
7144 7144 * Context: Kernel thread context
7145 7145 */
7146 7146
7147 7147 static void
7148 7148 sd_sync_with_callback(struct sd_lun *un)
7149 7149 {
7150 7150 ASSERT(un != NULL);
7151 7151
7152 7152 mutex_enter(SD_MUTEX(un));
7153 7153
7154 7154 ASSERT(un->un_in_callback >= 0);
7155 7155
7156 7156 while (un->un_in_callback > 0) {
7157 7157 mutex_exit(SD_MUTEX(un));
7158 7158 delay(2);
7159 7159 mutex_enter(SD_MUTEX(un));
7160 7160 }
7161 7161
7162 7162 mutex_exit(SD_MUTEX(un));
7163 7163 }
7164 7164
7165 7165 /*
7166 7166 * Function: sd_unit_attach
7167 7167 *
7168 7168 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates
7169 7169 * the soft state structure for the device and performs
7170 7170 * all necessary structure and device initializations.
7171 7171 *
7172 7172 * Arguments: devi: the system's dev_info_t for the device.
7173 7173 *
7174 7174 * Return Code: DDI_SUCCESS if attach is successful.
7175 7175 * DDI_FAILURE if any part of the attach fails.
7176 7176 *
7177 7177 * Context: Called at attach(9e) time for the DDI_ATTACH flag.
7178 7178 * Kernel thread context only. Can sleep.
7179 7179 */
7180 7180
7181 7181 static int
7182 7182 sd_unit_attach(dev_info_t *devi)
7183 7183 {
7184 7184 struct scsi_device *devp;
7185 7185 struct sd_lun *un;
7186 7186 char *variantp;
7187 7187 char name_str[48];
7188 7188 int reservation_flag = SD_TARGET_IS_UNRESERVED;
7189 7189 int instance;
7190 7190 int rval;
7191 7191 int wc_enabled;
7192 7192 int wc_changeable;
7193 7193 int tgt;
7194 7194 uint64_t capacity;
7195 7195 uint_t lbasize = 0;
7196 7196 dev_info_t *pdip = ddi_get_parent(devi);
7197 7197 int offbyone = 0;
7198 7198 int geom_label_valid = 0;
7199 7199 sd_ssc_t *ssc;
7200 7200 int status;
7201 7201 struct sd_fm_internal *sfip = NULL;
7202 7202 int max_xfer_size;
7203 7203
7204 7204 /*
7205 7205 * Retrieve the target driver's private data area. This was set
7206 7206 * up by the HBA.
7207 7207 */
7208 7208 devp = ddi_get_driver_private(devi);
7209 7209
7210 7210 /*
7211 7211 * Retrieve the target ID of the device.
7212 7212 */
7213 7213 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
7214 7214 SCSI_ADDR_PROP_TARGET, -1);
7215 7215
7216 7216 /*
7217 7217 * Since we have no idea what state things were left in by the last
7218 7218 * user of the device, set up some 'default' settings, ie. turn 'em
7219 7219 * off. The scsi_ifsetcap calls force re-negotiations with the drive.
7220 7220 * Do this before the scsi_probe, which sends an inquiry.
7221 7221 * This is a fix for bug (4430280).
7222 7222 * Of special importance is wide-xfer. The drive could have been left
7223 7223 * in wide transfer mode by the last driver to communicate with it,
7224 7224 * this includes us. If that's the case, and if the following is not
7225 7225 * setup properly or we don't re-negotiate with the drive prior to
7226 7226 * transferring data to/from the drive, it causes bus parity errors,
7227 7227 * data overruns, and unexpected interrupts. This first occurred when
7228 7228 * the fix for bug (4378686) was made.
7229 7229 */
7230 7230 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1);
7231 7231 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1);
7232 7232 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1);
7233 7233
7234 7234 /*
7235 7235 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs
7236 7236 * on a target. Setting it per lun instance actually sets the
7237 7237 * capability of this target, which affects those luns already
7238 7238 * attached on the same target. So during attach, we can only disable
7239 7239 * this capability only when no other lun has been attached on this
7240 7240 * target. By doing this, we assume a target has the same tagged-qing
7241 7241 * capability for every lun. The condition can be removed when HBA
7242 7242 * is changed to support per lun based tagged-qing capability.
7243 7243 */
7244 7244 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) {
7245 7245 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1);
7246 7246 }
7247 7247
7248 7248 /*
7249 7249 * Use scsi_probe() to issue an INQUIRY command to the device.
7250 7250 * This call will allocate and fill in the scsi_inquiry structure
7251 7251 * and point the sd_inq member of the scsi_device structure to it.
7252 7252 * If the attach succeeds, then this memory will not be de-allocated
7253 7253 * (via scsi_unprobe()) until the instance is detached.
7254 7254 */
7255 7255 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) {
7256 7256 goto probe_failed;
7257 7257 }
7258 7258
7259 7259 /*
7260 7260 * Check the device type as specified in the inquiry data and
7261 7261 * claim it if it is of a type that we support.
7262 7262 */
7263 7263 switch (devp->sd_inq->inq_dtype) {
7264 7264 case DTYPE_DIRECT:
7265 7265 break;
7266 7266 case DTYPE_RODIRECT:
7267 7267 break;
7268 7268 case DTYPE_OPTICAL:
7269 7269 break;
7270 7270 case DTYPE_NOTPRESENT:
7271 7271 default:
7272 7272 /* Unsupported device type; fail the attach. */
7273 7273 goto probe_failed;
7274 7274 }
7275 7275
7276 7276 /*
7277 7277 * Allocate the soft state structure for this unit.
7278 7278 *
7279 7279 * We rely upon this memory being set to all zeroes by
7280 7280 * ddi_soft_state_zalloc(). We assume that any member of the
7281 7281 * soft state structure that is not explicitly initialized by
7282 7282 * this routine will have a value of zero.
7283 7283 */
7284 7284 instance = ddi_get_instance(devp->sd_dev);
7285 7285 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) {
7286 7286 goto probe_failed;
7287 7287 }
7288 7288
7289 7289 /*
7290 7290 * Retrieve a pointer to the newly-allocated soft state.
7291 7291 *
7292 7292 * This should NEVER fail if the ddi_soft_state_zalloc() call above
7293 7293 * was successful, unless something has gone horribly wrong and the
7294 7294 * ddi's soft state internals are corrupt (in which case it is
7295 7295 * probably better to halt here than just fail the attach....)
7296 7296 */
7297 7297 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) {
7298 7298 panic("sd_unit_attach: NULL soft state on instance:0x%x",
7299 7299 instance);
7300 7300 /*NOTREACHED*/
7301 7301 }
7302 7302
7303 7303 /*
7304 7304 * Link the back ptr of the driver soft state to the scsi_device
7305 7305 * struct for this lun.
7306 7306 * Save a pointer to the softstate in the driver-private area of
7307 7307 * the scsi_device struct.
7308 7308 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until
7309 7309 * we first set un->un_sd below.
7310 7310 */
7311 7311 un->un_sd = devp;
7312 7312 devp->sd_private = (opaque_t)un;
7313 7313
7314 7314 /*
7315 7315 * The following must be after devp is stored in the soft state struct.
7316 7316 */
7317 7317 #ifdef SDDEBUG
7318 7318 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7319 7319 "%s_unit_attach: un:0x%p instance:%d\n",
7320 7320 ddi_driver_name(devi), un, instance);
7321 7321 #endif
7322 7322
7323 7323 /*
7324 7324 * Set up the device type and node type (for the minor nodes).
7325 7325 * By default we assume that the device can at least support the
7326 7326 * Common Command Set. Call it a CD-ROM if it reports itself
7327 7327 * as a RODIRECT device.
7328 7328 */
7329 7329 switch (devp->sd_inq->inq_dtype) {
7330 7330 case DTYPE_RODIRECT:
7331 7331 un->un_node_type = DDI_NT_CD_CHAN;
7332 7332 un->un_ctype = CTYPE_CDROM;
7333 7333 break;
7334 7334 case DTYPE_OPTICAL:
7335 7335 un->un_node_type = DDI_NT_BLOCK_CHAN;
7336 7336 un->un_ctype = CTYPE_ROD;
7337 7337 break;
7338 7338 default:
7339 7339 un->un_node_type = DDI_NT_BLOCK_CHAN;
7340 7340 un->un_ctype = CTYPE_CCS;
7341 7341 break;
7342 7342 }
7343 7343
7344 7344 /*
7345 7345 * Try to read the interconnect type from the HBA.
7346 7346 *
7347 7347 * Note: This driver is currently compiled as two binaries, a parallel
7348 7348 * scsi version (sd) and a fibre channel version (ssd). All functional
7349 7349 * differences are determined at compile time. In the future a single
7350 7350 * binary will be provided and the interconnect type will be used to
7351 7351 * differentiate between fibre and parallel scsi behaviors. At that time
7352 7352 * it will be necessary for all fibre channel HBAs to support this
7353 7353 * property.
7354 7354 *
7355 7355 * set un_f_is_fiber to TRUE ( default fiber )
7356 7356 */
7357 7357 un->un_f_is_fibre = TRUE;
7358 7358 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) {
7359 7359 case INTERCONNECT_SSA:
7360 7360 un->un_interconnect_type = SD_INTERCONNECT_SSA;
7361 7361 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7362 7362 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un);
7363 7363 break;
7364 7364 case INTERCONNECT_PARALLEL:
7365 7365 un->un_f_is_fibre = FALSE;
7366 7366 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL;
7367 7367 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7368 7368 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un);
7369 7369 break;
7370 7370 case INTERCONNECT_SAS:
7371 7371 un->un_f_is_fibre = FALSE;
7372 7372 un->un_interconnect_type = SD_INTERCONNECT_SAS;
7373 7373 un->un_node_type = DDI_NT_BLOCK_SAS;
7374 7374 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7375 7375 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un);
7376 7376 break;
7377 7377 case INTERCONNECT_SATA:
7378 7378 un->un_f_is_fibre = FALSE;
7379 7379 un->un_interconnect_type = SD_INTERCONNECT_SATA;
7380 7380 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7381 7381 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un);
7382 7382 break;
7383 7383 case INTERCONNECT_FIBRE:
7384 7384 un->un_interconnect_type = SD_INTERCONNECT_FIBRE;
7385 7385 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7386 7386 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un);
7387 7387 break;
7388 7388 case INTERCONNECT_FABRIC:
7389 7389 un->un_interconnect_type = SD_INTERCONNECT_FABRIC;
7390 7390 un->un_node_type = DDI_NT_BLOCK_FABRIC;
7391 7391 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7392 7392 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un);
7393 7393 break;
7394 7394 default:
7395 7395 #ifdef SD_DEFAULT_INTERCONNECT_TYPE
7396 7396 /*
7397 7397 * The HBA does not support the "interconnect-type" property
7398 7398 * (or did not provide a recognized type).
7399 7399 *
7400 7400 * Note: This will be obsoleted when a single fibre channel
7401 7401 * and parallel scsi driver is delivered. In the meantime the
7402 7402 * interconnect type will be set to the platform default.If that
7403 7403 * type is not parallel SCSI, it means that we should be
7404 7404 * assuming "ssd" semantics. However, here this also means that
7405 7405 * the FC HBA is not supporting the "interconnect-type" property
7406 7406 * like we expect it to, so log this occurrence.
7407 7407 */
7408 7408 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE;
7409 7409 if (!SD_IS_PARALLEL_SCSI(un)) {
7410 7410 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7411 7411 "sd_unit_attach: un:0x%p Assuming "
7412 7412 "INTERCONNECT_FIBRE\n", un);
7413 7413 } else {
7414 7414 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7415 7415 "sd_unit_attach: un:0x%p Assuming "
7416 7416 "INTERCONNECT_PARALLEL\n", un);
7417 7417 un->un_f_is_fibre = FALSE;
7418 7418 }
7419 7419 #else
7420 7420 /*
7421 7421 * Note: This source will be implemented when a single fibre
7422 7422 * channel and parallel scsi driver is delivered. The default
7423 7423 * will be to assume that if a device does not support the
7424 7424 * "interconnect-type" property it is a parallel SCSI HBA and
7425 7425 * we will set the interconnect type for parallel scsi.
7426 7426 */
7427 7427 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL;
7428 7428 un->un_f_is_fibre = FALSE;
7429 7429 #endif
7430 7430 break;
7431 7431 }
7432 7432
7433 7433 if (un->un_f_is_fibre == TRUE) {
7434 7434 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) ==
7435 7435 SCSI_VERSION_3) {
7436 7436 switch (un->un_interconnect_type) {
7437 7437 case SD_INTERCONNECT_FIBRE:
7438 7438 case SD_INTERCONNECT_SSA:
7439 7439 un->un_node_type = DDI_NT_BLOCK_WWN;
7440 7440 break;
7441 7441 default:
7442 7442 break;
7443 7443 }
7444 7444 }
7445 7445 }
7446 7446
7447 7447 /*
7448 7448 * Initialize the Request Sense command for the target
7449 7449 */
7450 7450 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) {
7451 7451 goto alloc_rqs_failed;
7452 7452 }
7453 7453
7454 7454 /*
7455 7455 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc
7456 7456 * with separate binary for sd and ssd.
7457 7457 *
7458 7458 * x86 has 1 binary, un_retry_count is set base on connection type.
7459 7459 * The hardcoded values will go away when Sparc uses 1 binary
7460 7460 * for sd and ssd. This hardcoded values need to match
7461 7461 * SD_RETRY_COUNT in sddef.h
7462 7462 * The value used is base on interconnect type.
7463 7463 * fibre = 3, parallel = 5
7464 7464 */
7465 7465 #if defined(__i386) || defined(__amd64)
7466 7466 un->un_retry_count = un->un_f_is_fibre ? 3 : 5;
7467 7467 #else
7468 7468 un->un_retry_count = SD_RETRY_COUNT;
7469 7469 #endif
7470 7470
7471 7471 /*
7472 7472 * Set the per disk retry count to the default number of retries
7473 7473 * for disks and CDROMs. This value can be overridden by the
7474 7474 * disk property list or an entry in sd.conf.
7475 7475 */
7476 7476 un->un_notready_retry_count =
7477 7477 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un)
7478 7478 : DISK_NOT_READY_RETRY_COUNT(un);
7479 7479
7480 7480 /*
7481 7481 * Set the busy retry count to the default value of un_retry_count.
7482 7482 * This can be overridden by entries in sd.conf or the device
7483 7483 * config table.
7484 7484 */
7485 7485 un->un_busy_retry_count = un->un_retry_count;
7486 7486
7487 7487 /*
7488 7488 * Init the reset threshold for retries. This number determines
7489 7489 * how many retries must be performed before a reset can be issued
7490 7490 * (for certain error conditions). This can be overridden by entries
7491 7491 * in sd.conf or the device config table.
7492 7492 */
7493 7493 un->un_reset_retry_count = (un->un_retry_count / 2);
7494 7494
7495 7495 /*
7496 7496 * Set the victim_retry_count to the default un_retry_count
7497 7497 */
7498 7498 un->un_victim_retry_count = (2 * un->un_retry_count);
7499 7499
7500 7500 /*
7501 7501 * Set the reservation release timeout to the default value of
7502 7502 * 5 seconds. This can be overridden by entries in ssd.conf or the
7503 7503 * device config table.
7504 7504 */
7505 7505 un->un_reserve_release_time = 5;
7506 7506
7507 7507 /*
7508 7508 * Set up the default maximum transfer size. Note that this may
7509 7509 * get updated later in the attach, when setting up default wide
7510 7510 * operations for disks.
7511 7511 */
7512 7512 #if defined(__i386) || defined(__amd64)
7513 7513 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE;
7514 7514 un->un_partial_dma_supported = 1;
7515 7515 #else
7516 7516 un->un_max_xfer_size = (uint_t)maxphys;
7517 7517 #endif
7518 7518
7519 7519 /*
7520 7520 * Get "allow bus device reset" property (defaults to "enabled" if
7521 7521 * the property was not defined). This is to disable bus resets for
7522 7522 * certain kinds of error recovery. Note: In the future when a run-time
7523 7523 * fibre check is available the soft state flag should default to
7524 7524 * enabled.
7525 7525 */
7526 7526 if (un->un_f_is_fibre == TRUE) {
7527 7527 un->un_f_allow_bus_device_reset = TRUE;
7528 7528 } else {
7529 7529 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
7530 7530 "allow-bus-device-reset", 1) != 0) {
7531 7531 un->un_f_allow_bus_device_reset = TRUE;
7532 7532 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7533 7533 "sd_unit_attach: un:0x%p Bus device reset "
7534 7534 "enabled\n", un);
7535 7535 } else {
7536 7536 un->un_f_allow_bus_device_reset = FALSE;
7537 7537 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7538 7538 "sd_unit_attach: un:0x%p Bus device reset "
7539 7539 "disabled\n", un);
7540 7540 }
7541 7541 }
7542 7542
7543 7543 /*
7544 7544 * Check if this is an ATAPI device. ATAPI devices use Group 1
7545 7545 * Read/Write commands and Group 2 Mode Sense/Select commands.
7546 7546 *
7547 7547 * Note: The "obsolete" way of doing this is to check for the "atapi"
7548 7548 * property. The new "variant" property with a value of "atapi" has been
7549 7549 * introduced so that future 'variants' of standard SCSI behavior (like
7550 7550 * atapi) could be specified by the underlying HBA drivers by supplying
7551 7551 * a new value for the "variant" property, instead of having to define a
7552 7552 * new property.
7553 7553 */
7554 7554 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) {
7555 7555 un->un_f_cfg_is_atapi = TRUE;
7556 7556 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7557 7557 "sd_unit_attach: un:0x%p Atapi device\n", un);
7558 7558 }
7559 7559 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant",
7560 7560 &variantp) == DDI_PROP_SUCCESS) {
7561 7561 if (strcmp(variantp, "atapi") == 0) {
7562 7562 un->un_f_cfg_is_atapi = TRUE;
7563 7563 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7564 7564 "sd_unit_attach: un:0x%p Atapi device\n", un);
7565 7565 }
7566 7566 ddi_prop_free(variantp);
7567 7567 }
7568 7568
7569 7569 un->un_cmd_timeout = SD_IO_TIME;
7570 7570
7571 7571 un->un_busy_timeout = SD_BSY_TIMEOUT;
7572 7572
7573 7573 /* Info on current states, statuses, etc. (Updated frequently) */
7574 7574 un->un_state = SD_STATE_NORMAL;
7575 7575 un->un_last_state = SD_STATE_NORMAL;
7576 7576
7577 7577 /* Control & status info for command throttling */
7578 7578 un->un_throttle = sd_max_throttle;
7579 7579 un->un_saved_throttle = sd_max_throttle;
7580 7580 un->un_min_throttle = sd_min_throttle;
7581 7581
7582 7582 if (un->un_f_is_fibre == TRUE) {
7583 7583 un->un_f_use_adaptive_throttle = TRUE;
7584 7584 } else {
7585 7585 un->un_f_use_adaptive_throttle = FALSE;
7586 7586 }
7587 7587
7588 7588 /* Removable media support. */
7589 7589 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL);
7590 7590 un->un_mediastate = DKIO_NONE;
7591 7591 un->un_specified_mediastate = DKIO_NONE;
7592 7592
7593 7593 /* CVs for suspend/resume (PM or DR) */
7594 7594 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL);
7595 7595 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL);
7596 7596
7597 7597 /* Power management support. */
7598 7598 un->un_power_level = SD_SPINDLE_UNINIT;
7599 7599
7600 7600 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL);
7601 7601 un->un_f_wcc_inprog = 0;
7602 7602
7603 7603 /*
7604 7604 * The open/close semaphore is used to serialize threads executing
7605 7605 * in the driver's open & close entry point routines for a given
7606 7606 * instance.
7607 7607 */
7608 7608 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL);
7609 7609
7610 7610 /*
7611 7611 * The conf file entry and softstate variable is a forceful override,
7612 7612 * meaning a non-zero value must be entered to change the default.
7613 7613 */
7614 7614 un->un_f_disksort_disabled = FALSE;
7615 7615 un->un_f_rmw_type = SD_RMW_TYPE_DEFAULT;
7616 7616 un->un_f_enable_rmw = FALSE;
7617 7617
7618 7618 /*
7619 7619 * GET EVENT STATUS NOTIFICATION media polling enabled by default, but
7620 7620 * can be overridden via [s]sd-config-list "mmc-gesn-polling" property.
7621 7621 */
7622 7622 un->un_f_mmc_gesn_polling = TRUE;
7623 7623
7624 7624 /*
7625 7625 * physical sector size defaults to DEV_BSIZE currently. We can
7626 7626 * override this value via the driver configuration file so we must
7627 7627 * set it before calling sd_read_unit_properties().
7628 7628 */
7629 7629 un->un_phy_blocksize = DEV_BSIZE;
7630 7630
7631 7631 /*
7632 7632 * Retrieve the properties from the static driver table or the driver
7633 7633 * configuration file (.conf) for this unit and update the soft state
7634 7634 * for the device as needed for the indicated properties.
7635 7635 * Note: the property configuration needs to occur here as some of the
7636 7636 * following routines may have dependencies on soft state flags set
7637 7637 * as part of the driver property configuration.
7638 7638 */
7639 7639 sd_read_unit_properties(un);
7640 7640 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7641 7641 "sd_unit_attach: un:0x%p property configuration complete.\n", un);
7642 7642
7643 7643 /*
7644 7644 * Only if a device has "hotpluggable" property, it is
7645 7645 * treated as hotpluggable device. Otherwise, it is
7646 7646 * regarded as non-hotpluggable one.
7647 7647 */
7648 7648 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable",
7649 7649 -1) != -1) {
7650 7650 un->un_f_is_hotpluggable = TRUE;
7651 7651 }
7652 7652
7653 7653 /*
7654 7654 * set unit's attributes(flags) according to "hotpluggable" and
7655 7655 * RMB bit in INQUIRY data.
7656 7656 */
7657 7657 sd_set_unit_attributes(un, devi);
7658 7658
7659 7659 /*
7660 7660 * By default, we mark the capacity, lbasize, and geometry
7661 7661 * as invalid. Only if we successfully read a valid capacity
7662 7662 * will we update the un_blockcount and un_tgt_blocksize with the
7663 7663 * valid values (the geometry will be validated later).
7664 7664 */
7665 7665 un->un_f_blockcount_is_valid = FALSE;
7666 7666 un->un_f_tgt_blocksize_is_valid = FALSE;
7667 7667
7668 7668 /*
7669 7669 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine
7670 7670 * otherwise.
7671 7671 */
7672 7672 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE;
7673 7673 un->un_blockcount = 0;
7674 7674
7675 7675 /*
7676 7676 * Set up the per-instance info needed to determine the correct
7677 7677 * CDBs and other info for issuing commands to the target.
7678 7678 */
7679 7679 sd_init_cdb_limits(un);
7680 7680
7681 7681 /*
7682 7682 * Set up the IO chains to use, based upon the target type.
7683 7683 */
7684 7684 if (un->un_f_non_devbsize_supported) {
7685 7685 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA;
7686 7686 } else {
7687 7687 un->un_buf_chain_type = SD_CHAIN_INFO_DISK;
7688 7688 }
7689 7689 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD;
7690 7690 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD;
7691 7691 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD;
7692 7692
7693 7693 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf),
7694 7694 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit,
7695 7695 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER);
7696 7696 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi);
7697 7697
7698 7698
7699 7699 if (ISCD(un)) {
7700 7700 un->un_additional_codes = sd_additional_codes;
7701 7701 } else {
7702 7702 un->un_additional_codes = NULL;
7703 7703 }
7704 7704
7705 7705 /*
7706 7706 * Create the kstats here so they can be available for attach-time
7707 7707 * routines that send commands to the unit (either polled or via
7708 7708 * sd_send_scsi_cmd).
7709 7709 *
7710 7710 * Note: This is a critical sequence that needs to be maintained:
7711 7711 * 1) Instantiate the kstats here, before any routines using the
7712 7712 * iopath (i.e. sd_send_scsi_cmd).
7713 7713 * 2) Instantiate and initialize the partition stats
7714 7714 * (sd_set_pstats).
7715 7715 * 3) Initialize the error stats (sd_set_errstats), following
7716 7716 * sd_validate_geometry(),sd_register_devid(),
7717 7717 * and sd_cache_control().
7718 7718 */
7719 7719
7720 7720 un->un_stats = kstat_create(sd_label, instance,
7721 7721 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
7722 7722 if (un->un_stats != NULL) {
7723 7723 un->un_stats->ks_lock = SD_MUTEX(un);
7724 7724 kstat_install(un->un_stats);
7725 7725 }
7726 7726 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7727 7727 "sd_unit_attach: un:0x%p un_stats created\n", un);
7728 7728
7729 7729 sd_create_errstats(un, instance);
7730 7730 if (un->un_errstats == NULL) {
7731 7731 goto create_errstats_failed;
7732 7732 }
7733 7733 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7734 7734 "sd_unit_attach: un:0x%p errstats created\n", un);
7735 7735
7736 7736 /*
7737 7737 * The following if/else code was relocated here from below as part
7738 7738 * of the fix for bug (4430280). However with the default setup added
7739 7739 * on entry to this routine, it's no longer absolutely necessary for
7740 7740 * this to be before the call to sd_spin_up_unit.
7741 7741 */
7742 7742 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) {
7743 7743 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) ||
7744 7744 (devp->sd_inq->inq_ansi == 5)) &&
7745 7745 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque;
7746 7746
7747 7747 /*
7748 7748 * If tagged queueing is supported by the target
7749 7749 * and by the host adapter then we will enable it
7750 7750 */
7751 7751 un->un_tagflags = 0;
7752 7752 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag &&
7753 7753 (un->un_f_arq_enabled == TRUE)) {
7754 7754 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing",
7755 7755 1, 1) == 1) {
7756 7756 un->un_tagflags = FLAG_STAG;
7757 7757 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7758 7758 "sd_unit_attach: un:0x%p tag queueing "
7759 7759 "enabled\n", un);
7760 7760 } else if (scsi_ifgetcap(SD_ADDRESS(un),
7761 7761 "untagged-qing", 0) == 1) {
7762 7762 un->un_f_opt_queueing = TRUE;
7763 7763 un->un_saved_throttle = un->un_throttle =
7764 7764 min(un->un_throttle, 3);
7765 7765 } else {
7766 7766 un->un_f_opt_queueing = FALSE;
7767 7767 un->un_saved_throttle = un->un_throttle = 1;
7768 7768 }
7769 7769 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0)
7770 7770 == 1) && (un->un_f_arq_enabled == TRUE)) {
7771 7771 /* The Host Adapter supports internal queueing. */
7772 7772 un->un_f_opt_queueing = TRUE;
7773 7773 un->un_saved_throttle = un->un_throttle =
7774 7774 min(un->un_throttle, 3);
7775 7775 } else {
7776 7776 un->un_f_opt_queueing = FALSE;
7777 7777 un->un_saved_throttle = un->un_throttle = 1;
7778 7778 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7779 7779 "sd_unit_attach: un:0x%p no tag queueing\n", un);
7780 7780 }
7781 7781
7782 7782 /*
7783 7783 * Enable large transfers for SATA/SAS drives
7784 7784 */
7785 7785 if (SD_IS_SERIAL(un)) {
7786 7786 un->un_max_xfer_size =
7787 7787 ddi_getprop(DDI_DEV_T_ANY, devi, 0,
7788 7788 sd_max_xfer_size, SD_MAX_XFER_SIZE);
7789 7789 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7790 7790 "sd_unit_attach: un:0x%p max transfer "
7791 7791 "size=0x%x\n", un, un->un_max_xfer_size);
7792 7792
7793 7793 }
7794 7794
7795 7795 /* Setup or tear down default wide operations for disks */
7796 7796
7797 7797 /*
7798 7798 * Note: Legacy: it may be possible for both "sd_max_xfer_size"
7799 7799 * and "ssd_max_xfer_size" to exist simultaneously on the same
7800 7800 * system and be set to different values. In the future this
7801 7801 * code may need to be updated when the ssd module is
7802 7802 * obsoleted and removed from the system. (4299588)
7803 7803 */
7804 7804 if (SD_IS_PARALLEL_SCSI(un) &&
7805 7805 (devp->sd_inq->inq_rdf == RDF_SCSI2) &&
7806 7806 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) {
7807 7807 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer",
7808 7808 1, 1) == 1) {
7809 7809 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7810 7810 "sd_unit_attach: un:0x%p Wide Transfer "
7811 7811 "enabled\n", un);
7812 7812 }
7813 7813
7814 7814 /*
7815 7815 * If tagged queuing has also been enabled, then
7816 7816 * enable large xfers
7817 7817 */
7818 7818 if (un->un_saved_throttle == sd_max_throttle) {
7819 7819 un->un_max_xfer_size =
7820 7820 ddi_getprop(DDI_DEV_T_ANY, devi, 0,
7821 7821 sd_max_xfer_size, SD_MAX_XFER_SIZE);
7822 7822 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7823 7823 "sd_unit_attach: un:0x%p max transfer "
7824 7824 "size=0x%x\n", un, un->un_max_xfer_size);
7825 7825 }
7826 7826 } else {
7827 7827 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer",
7828 7828 0, 1) == 1) {
7829 7829 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7830 7830 "sd_unit_attach: un:0x%p "
7831 7831 "Wide Transfer disabled\n", un);
7832 7832 }
7833 7833 }
7834 7834 } else {
7835 7835 un->un_tagflags = FLAG_STAG;
7836 7836 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY,
7837 7837 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE);
7838 7838 }
7839 7839
7840 7840 /*
7841 7841 * If this target supports LUN reset, try to enable it.
7842 7842 */
7843 7843 if (un->un_f_lun_reset_enabled) {
7844 7844 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) {
7845 7845 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
7846 7846 "un:0x%p lun_reset capability set\n", un);
7847 7847 } else {
7848 7848 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
7849 7849 "un:0x%p lun-reset capability not set\n", un);
7850 7850 }
7851 7851 }
7852 7852
7853 7853 /*
7854 7854 * Adjust the maximum transfer size. This is to fix
7855 7855 * the problem of partial DMA support on SPARC. Some
7856 7856 * HBA driver, like aac, has very small dma_attr_maxxfer
7857 7857 * size, which requires partial DMA support on SPARC.
7858 7858 * In the future the SPARC pci nexus driver may solve
7859 7859 * the problem instead of this fix.
7860 7860 */
7861 7861 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1);
7862 7862 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) {
7863 7863 /* We need DMA partial even on sparc to ensure sddump() works */
7864 7864 un->un_max_xfer_size = max_xfer_size;
7865 7865 if (un->un_partial_dma_supported == 0)
7866 7866 un->un_partial_dma_supported = 1;
7867 7867 }
7868 7868 if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
7869 7869 DDI_PROP_DONTPASS, "buf_break", 0) == 1) {
7870 7870 if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr,
7871 7871 un->un_max_xfer_size) == 1) {
7872 7872 un->un_buf_breakup_supported = 1;
7873 7873 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
7874 7874 "un:0x%p Buf breakup enabled\n", un);
7875 7875 }
7876 7876 }
7877 7877
7878 7878 /*
7879 7879 * Set PKT_DMA_PARTIAL flag.
7880 7880 */
7881 7881 if (un->un_partial_dma_supported == 1) {
7882 7882 un->un_pkt_flags = PKT_DMA_PARTIAL;
7883 7883 } else {
7884 7884 un->un_pkt_flags = 0;
7885 7885 }
7886 7886
7887 7887 /* Initialize sd_ssc_t for internal uscsi commands */
7888 7888 ssc = sd_ssc_init(un);
7889 7889 scsi_fm_init(devp);
7890 7890
7891 7891 /*
7892 7892 * Allocate memory for SCSI FMA stuffs.
7893 7893 */
7894 7894 un->un_fm_private =
7895 7895 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP);
7896 7896 sfip = (struct sd_fm_internal *)un->un_fm_private;
7897 7897 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd;
7898 7898 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo;
7899 7899 sfip->fm_ssc.ssc_un = un;
7900 7900
7901 7901 if (ISCD(un) ||
7902 7902 un->un_f_has_removable_media ||
7903 7903 devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) {
7904 7904 /*
7905 7905 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device.
7906 7906 * Their log are unchanged.
7907 7907 */
7908 7908 sfip->fm_log_level = SD_FM_LOG_NSUP;
7909 7909 } else {
7910 7910 /*
7911 7911 * If enter here, it should be non-CDROM and FM-capable
7912 7912 * device, and it will not keep the old scsi_log as before
7913 7913 * in /var/adm/messages. However, the property
7914 7914 * "fm-scsi-log" will control whether the FM telemetry will
7915 7915 * be logged in /var/adm/messages.
7916 7916 */
7917 7917 int fm_scsi_log;
7918 7918 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
7919 7919 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0);
7920 7920
7921 7921 if (fm_scsi_log)
7922 7922 sfip->fm_log_level = SD_FM_LOG_EREPORT;
7923 7923 else
7924 7924 sfip->fm_log_level = SD_FM_LOG_SILENT;
7925 7925 }
7926 7926
7927 7927 /*
7928 7928 * At this point in the attach, we have enough info in the
7929 7929 * soft state to be able to issue commands to the target.
7930 7930 *
7931 7931 * All command paths used below MUST issue their commands as
7932 7932 * SD_PATH_DIRECT. This is important as intermediate layers
7933 7933 * are not all initialized yet (such as PM).
7934 7934 */
7935 7935
7936 7936 /*
7937 7937 * Send a TEST UNIT READY command to the device. This should clear
7938 7938 * any outstanding UNIT ATTENTION that may be present.
7939 7939 *
7940 7940 * Note: Don't check for success, just track if there is a reservation,
7941 7941 * this is a throw away command to clear any unit attentions.
7942 7942 *
7943 7943 * Note: This MUST be the first command issued to the target during
7944 7944 * attach to ensure power on UNIT ATTENTIONS are cleared.
7945 7945 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated
7946 7946 * with attempts at spinning up a device with no media.
7947 7947 */
7948 7948 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR);
7949 7949 if (status != 0) {
7950 7950 if (status == EACCES)
7951 7951 reservation_flag = SD_TARGET_IS_RESERVED;
7952 7952 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
7953 7953 }
7954 7954
7955 7955 /*
7956 7956 * If the device is NOT a removable media device, attempt to spin
7957 7957 * it up (using the START_STOP_UNIT command) and read its capacity
7958 7958 * (using the READ CAPACITY command). Note, however, that either
7959 7959 * of these could fail and in some cases we would continue with
7960 7960 * the attach despite the failure (see below).
7961 7961 */
7962 7962 if (un->un_f_descr_format_supported) {
7963 7963
7964 7964 switch (sd_spin_up_unit(ssc)) {
7965 7965 case 0:
7966 7966 /*
7967 7967 * Spin-up was successful; now try to read the
7968 7968 * capacity. If successful then save the results
7969 7969 * and mark the capacity & lbasize as valid.
7970 7970 */
7971 7971 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7972 7972 "sd_unit_attach: un:0x%p spin-up successful\n", un);
7973 7973
7974 7974 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity,
7975 7975 &lbasize, SD_PATH_DIRECT);
7976 7976
7977 7977 switch (status) {
7978 7978 case 0: {
7979 7979 if (capacity > DK_MAX_BLOCKS) {
7980 7980 #ifdef _LP64
7981 7981 if ((capacity + 1) >
7982 7982 SD_GROUP1_MAX_ADDRESS) {
7983 7983 /*
7984 7984 * Enable descriptor format
7985 7985 * sense data so that we can
7986 7986 * get 64 bit sense data
7987 7987 * fields.
7988 7988 */
7989 7989 sd_enable_descr_sense(ssc);
7990 7990 }
7991 7991 #else
7992 7992 /* 32-bit kernels can't handle this */
7993 7993 scsi_log(SD_DEVINFO(un),
7994 7994 sd_label, CE_WARN,
7995 7995 "disk has %llu blocks, which "
7996 7996 "is too large for a 32-bit "
7997 7997 "kernel", capacity);
7998 7998
7999 7999 #if defined(__i386) || defined(__amd64)
8000 8000 /*
8001 8001 * 1TB disk was treated as (1T - 512)B
8002 8002 * in the past, so that it might have
8003 8003 * valid VTOC and solaris partitions,
8004 8004 * we have to allow it to continue to
8005 8005 * work.
8006 8006 */
8007 8007 if (capacity -1 > DK_MAX_BLOCKS)
8008 8008 #endif
8009 8009 goto spinup_failed;
8010 8010 #endif
8011 8011 }
8012 8012
8013 8013 /*
8014 8014 * Here it's not necessary to check the case:
8015 8015 * the capacity of the device is bigger than
8016 8016 * what the max hba cdb can support. Because
8017 8017 * sd_send_scsi_READ_CAPACITY will retrieve
8018 8018 * the capacity by sending USCSI command, which
8019 8019 * is constrained by the max hba cdb. Actually,
8020 8020 * sd_send_scsi_READ_CAPACITY will return
8021 8021 * EINVAL when using bigger cdb than required
8022 8022 * cdb length. Will handle this case in
8023 8023 * "case EINVAL".
8024 8024 */
8025 8025
8026 8026 /*
8027 8027 * The following relies on
8028 8028 * sd_send_scsi_READ_CAPACITY never
8029 8029 * returning 0 for capacity and/or lbasize.
8030 8030 */
8031 8031 sd_update_block_info(un, lbasize, capacity);
8032 8032
8033 8033 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8034 8034 "sd_unit_attach: un:0x%p capacity = %ld "
8035 8035 "blocks; lbasize= %ld.\n", un,
8036 8036 un->un_blockcount, un->un_tgt_blocksize);
8037 8037
8038 8038 break;
8039 8039 }
8040 8040 case EINVAL:
8041 8041 /*
8042 8042 * In the case where the max-cdb-length property
8043 8043 * is smaller than the required CDB length for
8044 8044 * a SCSI device, a target driver can fail to
8045 8045 * attach to that device.
8046 8046 */
8047 8047 scsi_log(SD_DEVINFO(un),
8048 8048 sd_label, CE_WARN,
8049 8049 "disk capacity is too large "
8050 8050 "for current cdb length");
8051 8051 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8052 8052
8053 8053 goto spinup_failed;
8054 8054 case EACCES:
8055 8055 /*
8056 8056 * Should never get here if the spin-up
8057 8057 * succeeded, but code it in anyway.
8058 8058 * From here, just continue with the attach...
8059 8059 */
8060 8060 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8061 8061 "sd_unit_attach: un:0x%p "
8062 8062 "sd_send_scsi_READ_CAPACITY "
8063 8063 "returned reservation conflict\n", un);
8064 8064 reservation_flag = SD_TARGET_IS_RESERVED;
8065 8065 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8066 8066 break;
8067 8067 default:
8068 8068 /*
8069 8069 * Likewise, should never get here if the
8070 8070 * spin-up succeeded. Just continue with
8071 8071 * the attach...
8072 8072 */
8073 8073 if (status == EIO)
8074 8074 sd_ssc_assessment(ssc,
8075 8075 SD_FMT_STATUS_CHECK);
8076 8076 else
8077 8077 sd_ssc_assessment(ssc,
8078 8078 SD_FMT_IGNORE);
8079 8079 break;
8080 8080 }
8081 8081 break;
8082 8082 case EACCES:
8083 8083 /*
8084 8084 * Device is reserved by another host. In this case
8085 8085 * we could not spin it up or read the capacity, but
8086 8086 * we continue with the attach anyway.
8087 8087 */
8088 8088 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8089 8089 "sd_unit_attach: un:0x%p spin-up reservation "
8090 8090 "conflict.\n", un);
8091 8091 reservation_flag = SD_TARGET_IS_RESERVED;
8092 8092 break;
8093 8093 default:
8094 8094 /* Fail the attach if the spin-up failed. */
8095 8095 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8096 8096 "sd_unit_attach: un:0x%p spin-up failed.", un);
8097 8097 goto spinup_failed;
8098 8098 }
8099 8099
8100 8100 }
8101 8101
8102 8102 /*
8103 8103 * Check to see if this is a MMC drive
8104 8104 */
8105 8105 if (ISCD(un)) {
8106 8106 sd_set_mmc_caps(ssc);
8107 8107 }
8108 8108
8109 8109 /*
8110 8110 * Add a zero-length attribute to tell the world we support
8111 8111 * kernel ioctls (for layered drivers)
8112 8112 */
8113 8113 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP,
8114 8114 DDI_KERNEL_IOCTL, NULL, 0);
8115 8115
8116 8116 /*
8117 8117 * Add a boolean property to tell the world we support
8118 8118 * the B_FAILFAST flag (for layered drivers)
8119 8119 */
8120 8120 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP,
8121 8121 "ddi-failfast-supported", NULL, 0);
8122 8122
8123 8123 /*
8124 8124 * Initialize power management
8125 8125 */
8126 8126 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL);
8127 8127 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL);
8128 8128 sd_setup_pm(ssc, devi);
8129 8129 if (un->un_f_pm_is_enabled == FALSE) {
8130 8130 /*
8131 8131 * For performance, point to a jump table that does
8132 8132 * not include pm.
8133 8133 * The direct and priority chains don't change with PM.
8134 8134 *
8135 8135 * Note: this is currently done based on individual device
8136 8136 * capabilities. When an interface for determining system
8137 8137 * power enabled state becomes available, or when additional
8138 8138 * layers are added to the command chain, these values will
8139 8139 * have to be re-evaluated for correctness.
8140 8140 */
8141 8141 if (un->un_f_non_devbsize_supported) {
8142 8142 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM;
8143 8143 } else {
8144 8144 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM;
8145 8145 }
8146 8146 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM;
8147 8147 }
8148 8148
8149 8149 /*
8150 8150 * This property is set to 0 by HA software to avoid retries
8151 8151 * on a reserved disk. (The preferred property name is
8152 8152 * "retry-on-reservation-conflict") (1189689)
8153 8153 *
8154 8154 * Note: The use of a global here can have unintended consequences. A
8155 8155 * per instance variable is preferable to match the capabilities of
8156 8156 * different underlying hba's (4402600)
8157 8157 */
8158 8158 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi,
8159 8159 DDI_PROP_DONTPASS, "retry-on-reservation-conflict",
8160 8160 sd_retry_on_reservation_conflict);
8161 8161 if (sd_retry_on_reservation_conflict != 0) {
8162 8162 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY,
8163 8163 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name,
8164 8164 sd_retry_on_reservation_conflict);
8165 8165 }
8166 8166
8167 8167 /* Set up options for QFULL handling. */
8168 8168 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0,
8169 8169 "qfull-retries", -1)) != -1) {
8170 8170 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries",
8171 8171 rval, 1);
8172 8172 }
8173 8173 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0,
8174 8174 "qfull-retry-interval", -1)) != -1) {
8175 8175 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval",
8176 8176 rval, 1);
8177 8177 }
8178 8178
8179 8179 /*
8180 8180 * This just prints a message that announces the existence of the
8181 8181 * device. The message is always printed in the system logfile, but
8182 8182 * only appears on the console if the system is booted with the
8183 8183 * -v (verbose) argument.
8184 8184 */
8185 8185 ddi_report_dev(devi);
8186 8186
8187 8187 un->un_mediastate = DKIO_NONE;
8188 8188
8189 8189 /*
8190 8190 * Check Block Device Characteristics VPD.
8191 8191 */
8192 8192 sd_check_bdc_vpd(ssc);
8193 8193
8194 8194 /*
8195 8195 * Check whether the drive is in emulation mode.
8196 8196 */
8197 8197 sd_check_emulation_mode(ssc);
8198 8198
8199 8199 cmlb_alloc_handle(&un->un_cmlbhandle);
8200 8200
8201 8201 #if defined(__i386) || defined(__amd64)
8202 8202 /*
8203 8203 * On x86, compensate for off-by-1 legacy error
8204 8204 */
8205 8205 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable &&
8206 8206 (lbasize == un->un_sys_blocksize))
8207 8207 offbyone = CMLB_OFF_BY_ONE;
8208 8208 #endif
8209 8209
8210 8210 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype,
8211 8211 VOID2BOOLEAN(un->un_f_has_removable_media != 0),
8212 8212 VOID2BOOLEAN(un->un_f_is_hotpluggable != 0),
8213 8213 un->un_node_type, offbyone, un->un_cmlbhandle,
8214 8214 (void *)SD_PATH_DIRECT) != 0) {
8215 8215 goto cmlb_attach_failed;
8216 8216 }
8217 8217
8218 8218
8219 8219 /*
8220 8220 * Read and validate the device's geometry (ie, disk label)
8221 8221 * A new unformatted drive will not have a valid geometry, but
8222 8222 * the driver needs to successfully attach to this device so
8223 8223 * the drive can be formatted via ioctls.
8224 8224 */
8225 8225 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0,
8226 8226 (void *)SD_PATH_DIRECT) == 0) ? 1: 0;
8227 8227
8228 8228 mutex_enter(SD_MUTEX(un));
8229 8229
8230 8230 /*
8231 8231 * Read and initialize the devid for the unit.
8232 8232 */
8233 8233 if (un->un_f_devid_supported) {
8234 8234 sd_register_devid(ssc, devi, reservation_flag);
8235 8235 }
8236 8236 mutex_exit(SD_MUTEX(un));
8237 8237
8238 8238 #if (defined(__fibre))
8239 8239 /*
8240 8240 * Register callbacks for fibre only. You can't do this solely
8241 8241 * on the basis of the devid_type because this is hba specific.
8242 8242 * We need to query our hba capabilities to find out whether to
8243 8243 * register or not.
8244 8244 */
8245 8245 if (un->un_f_is_fibre) {
8246 8246 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) {
8247 8247 sd_init_event_callbacks(un);
8248 8248 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
8249 8249 "sd_unit_attach: un:0x%p event callbacks inserted",
8250 8250 un);
8251 8251 }
8252 8252 }
8253 8253 #endif
8254 8254
8255 8255 if (un->un_f_opt_disable_cache == TRUE) {
8256 8256 /*
8257 8257 * Disable both read cache and write cache. This is
8258 8258 * the historic behavior of the keywords in the config file.
8259 8259 */
8260 8260 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) !=
8261 8261 0) {
8262 8262 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8263 8263 "sd_unit_attach: un:0x%p Could not disable "
8264 8264 "caching", un);
8265 8265 goto devid_failed;
8266 8266 }
8267 8267 }
8268 8268
8269 8269 /*
8270 8270 * Check the value of the WCE bit and if it's allowed to be changed,
8271 8271 * set un_f_write_cache_enabled and un_f_cache_mode_changeable
8272 8272 * accordingly.
8273 8273 */
8274 8274 (void) sd_get_write_cache_enabled(ssc, &wc_enabled);
8275 8275 sd_get_write_cache_changeable(ssc, &wc_changeable);
8276 8276 mutex_enter(SD_MUTEX(un));
8277 8277 un->un_f_write_cache_enabled = (wc_enabled != 0);
8278 8278 un->un_f_cache_mode_changeable = (wc_changeable != 0);
8279 8279 mutex_exit(SD_MUTEX(un));
8280 8280
8281 8281 if ((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR &&
8282 8282 un->un_tgt_blocksize != DEV_BSIZE) ||
8283 8283 un->un_f_enable_rmw) {
8284 8284 if (!(un->un_wm_cache)) {
8285 8285 (void) snprintf(name_str, sizeof (name_str),
8286 8286 "%s%d_cache",
8287 8287 ddi_driver_name(SD_DEVINFO(un)),
8288 8288 ddi_get_instance(SD_DEVINFO(un)));
8289 8289 un->un_wm_cache = kmem_cache_create(
8290 8290 name_str, sizeof (struct sd_w_map),
8291 8291 8, sd_wm_cache_constructor,
8292 8292 sd_wm_cache_destructor, NULL,
8293 8293 (void *)un, NULL, 0);
8294 8294 if (!(un->un_wm_cache)) {
8295 8295 goto wm_cache_failed;
8296 8296 }
8297 8297 }
8298 8298 }
8299 8299
8300 8300 /*
8301 8301 * Check the value of the NV_SUP bit and set
8302 8302 * un_f_suppress_cache_flush accordingly.
8303 8303 */
8304 8304 sd_get_nv_sup(ssc);
8305 8305
8306 8306 /*
8307 8307 * Find out what type of reservation this disk supports.
8308 8308 */
8309 8309 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL);
8310 8310
8311 8311 switch (status) {
8312 8312 case 0:
8313 8313 /*
8314 8314 * SCSI-3 reservations are supported.
8315 8315 */
8316 8316 un->un_reservation_type = SD_SCSI3_RESERVATION;
8317 8317 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8318 8318 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un);
8319 8319 break;
8320 8320 case ENOTSUP:
8321 8321 /*
8322 8322 * The PERSISTENT RESERVE IN command would not be recognized by
8323 8323 * a SCSI-2 device, so assume the reservation type is SCSI-2.
8324 8324 */
8325 8325 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8326 8326 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un);
8327 8327 un->un_reservation_type = SD_SCSI2_RESERVATION;
8328 8328
8329 8329 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8330 8330 break;
8331 8331 default:
8332 8332 /*
8333 8333 * default to SCSI-3 reservations
8334 8334 */
8335 8335 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8336 8336 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un);
8337 8337 un->un_reservation_type = SD_SCSI3_RESERVATION;
8338 8338
8339 8339 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8340 8340 break;
8341 8341 }
8342 8342
8343 8343 /*
8344 8344 * Set the pstat and error stat values here, so data obtained during the
8345 8345 * previous attach-time routines is available.
8346 8346 *
8347 8347 * Note: This is a critical sequence that needs to be maintained:
8348 8348 * 1) Instantiate the kstats before any routines using the iopath
8349 8349 * (i.e. sd_send_scsi_cmd).
8350 8350 * 2) Initialize the error stats (sd_set_errstats) and partition
8351 8351 * stats (sd_set_pstats)here, following
8352 8352 * cmlb_validate_geometry(), sd_register_devid(), and
8353 8353 * sd_cache_control().
8354 8354 */
8355 8355
8356 8356 if (un->un_f_pkstats_enabled && geom_label_valid) {
8357 8357 sd_set_pstats(un);
8358 8358 SD_TRACE(SD_LOG_IO_PARTITION, un,
8359 8359 "sd_unit_attach: un:0x%p pstats created and set\n", un);
8360 8360 }
8361 8361
8362 8362 sd_set_errstats(un);
8363 8363 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
8364 8364 "sd_unit_attach: un:0x%p errstats set\n", un);
8365 8365
8366 8366
8367 8367 /*
8368 8368 * After successfully attaching an instance, we record the information
8369 8369 * of how many luns have been attached on the relative target and
8370 8370 * controller for parallel SCSI. This information is used when sd tries
8371 8371 * to set the tagged queuing capability in HBA.
8372 8372 */
8373 8373 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) {
8374 8374 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH);
8375 8375 }
8376 8376
8377 8377 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
8378 8378 "sd_unit_attach: un:0x%p exit success\n", un);
8379 8379
8380 8380 /* Uninitialize sd_ssc_t pointer */
8381 8381 sd_ssc_fini(ssc);
8382 8382
8383 8383 return (DDI_SUCCESS);
8384 8384
8385 8385 /*
8386 8386 * An error occurred during the attach; clean up & return failure.
8387 8387 */
8388 8388 wm_cache_failed:
8389 8389 devid_failed:
8390 8390 ddi_remove_minor_node(devi, NULL);
8391 8391
8392 8392 cmlb_attach_failed:
8393 8393 /*
8394 8394 * Cleanup from the scsi_ifsetcap() calls (437868)
8395 8395 */
8396 8396 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1);
8397 8397 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1);
8398 8398
8399 8399 /*
8400 8400 * Refer to the comments of setting tagged-qing in the beginning of
8401 8401 * sd_unit_attach. We can only disable tagged queuing when there is
8402 8402 * no lun attached on the target.
8403 8403 */
8404 8404 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) {
8405 8405 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
8406 8406 }
8407 8407
8408 8408 if (un->un_f_is_fibre == FALSE) {
8409 8409 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1);
8410 8410 }
8411 8411
8412 8412 spinup_failed:
8413 8413
8414 8414 /* Uninitialize sd_ssc_t pointer */
8415 8415 sd_ssc_fini(ssc);
8416 8416
8417 8417 mutex_enter(SD_MUTEX(un));
8418 8418
8419 8419 /* Deallocate SCSI FMA memory spaces */
8420 8420 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal));
8421 8421
8422 8422 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */
8423 8423 if (un->un_direct_priority_timeid != NULL) {
8424 8424 timeout_id_t temp_id = un->un_direct_priority_timeid;
8425 8425 un->un_direct_priority_timeid = NULL;
8426 8426 mutex_exit(SD_MUTEX(un));
8427 8427 (void) untimeout(temp_id);
8428 8428 mutex_enter(SD_MUTEX(un));
8429 8429 }
8430 8430
8431 8431 /* Cancel any pending start/stop timeouts */
8432 8432 if (un->un_startstop_timeid != NULL) {
8433 8433 timeout_id_t temp_id = un->un_startstop_timeid;
8434 8434 un->un_startstop_timeid = NULL;
8435 8435 mutex_exit(SD_MUTEX(un));
8436 8436 (void) untimeout(temp_id);
8437 8437 mutex_enter(SD_MUTEX(un));
8438 8438 }
8439 8439
8440 8440 /* Cancel any pending reset-throttle timeouts */
8441 8441 if (un->un_reset_throttle_timeid != NULL) {
8442 8442 timeout_id_t temp_id = un->un_reset_throttle_timeid;
8443 8443 un->un_reset_throttle_timeid = NULL;
8444 8444 mutex_exit(SD_MUTEX(un));
8445 8445 (void) untimeout(temp_id);
8446 8446 mutex_enter(SD_MUTEX(un));
8447 8447 }
8448 8448
8449 8449 /* Cancel rmw warning message timeouts */
8450 8450 if (un->un_rmw_msg_timeid != NULL) {
8451 8451 timeout_id_t temp_id = un->un_rmw_msg_timeid;
8452 8452 un->un_rmw_msg_timeid = NULL;
8453 8453 mutex_exit(SD_MUTEX(un));
8454 8454 (void) untimeout(temp_id);
8455 8455 mutex_enter(SD_MUTEX(un));
8456 8456 }
8457 8457
8458 8458 /* Cancel any pending retry timeouts */
8459 8459 if (un->un_retry_timeid != NULL) {
8460 8460 timeout_id_t temp_id = un->un_retry_timeid;
8461 8461 un->un_retry_timeid = NULL;
8462 8462 mutex_exit(SD_MUTEX(un));
8463 8463 (void) untimeout(temp_id);
8464 8464 mutex_enter(SD_MUTEX(un));
8465 8465 }
8466 8466
8467 8467 /* Cancel any pending delayed cv broadcast timeouts */
8468 8468 if (un->un_dcvb_timeid != NULL) {
8469 8469 timeout_id_t temp_id = un->un_dcvb_timeid;
8470 8470 un->un_dcvb_timeid = NULL;
8471 8471 mutex_exit(SD_MUTEX(un));
8472 8472 (void) untimeout(temp_id);
8473 8473 mutex_enter(SD_MUTEX(un));
8474 8474 }
8475 8475
8476 8476 mutex_exit(SD_MUTEX(un));
8477 8477
8478 8478 /* There should not be any in-progress I/O so ASSERT this check */
8479 8479 ASSERT(un->un_ncmds_in_transport == 0);
8480 8480 ASSERT(un->un_ncmds_in_driver == 0);
8481 8481
8482 8482 /* Do not free the softstate if the callback routine is active */
8483 8483 sd_sync_with_callback(un);
8484 8484
8485 8485 /*
8486 8486 * Partition stats apparently are not used with removables. These would
8487 8487 * not have been created during attach, so no need to clean them up...
8488 8488 */
8489 8489 if (un->un_errstats != NULL) {
8490 8490 kstat_delete(un->un_errstats);
8491 8491 un->un_errstats = NULL;
8492 8492 }
8493 8493
8494 8494 create_errstats_failed:
8495 8495
8496 8496 if (un->un_stats != NULL) {
8497 8497 kstat_delete(un->un_stats);
8498 8498 un->un_stats = NULL;
8499 8499 }
8500 8500
8501 8501 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi);
8502 8502 ddi_xbuf_attr_destroy(un->un_xbuf_attr);
8503 8503
8504 8504 ddi_prop_remove_all(devi);
8505 8505 sema_destroy(&un->un_semoclose);
8506 8506 cv_destroy(&un->un_state_cv);
8507 8507
8508 8508 sd_free_rqs(un);
8509 8509
8510 8510 alloc_rqs_failed:
8511 8511
8512 8512 devp->sd_private = NULL;
8513 8513 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */
8514 8514
8515 8515 /*
8516 8516 * Note: the man pages are unclear as to whether or not doing a
8517 8517 * ddi_soft_state_free(sd_state, instance) is the right way to
8518 8518 * clean up after the ddi_soft_state_zalloc() if the subsequent
8519 8519 * ddi_get_soft_state() fails. The implication seems to be
8520 8520 * that the get_soft_state cannot fail if the zalloc succeeds.
8521 8521 */
8522 8522 #ifndef XPV_HVM_DRIVER
8523 8523 ddi_soft_state_free(sd_state, instance);
8524 8524 #endif /* !XPV_HVM_DRIVER */
8525 8525
8526 8526 probe_failed:
8527 8527 scsi_unprobe(devp);
8528 8528
8529 8529 return (DDI_FAILURE);
8530 8530 }
8531 8531
8532 8532
8533 8533 /*
8534 8534 * Function: sd_unit_detach
8535 8535 *
8536 8536 * Description: Performs DDI_DETACH processing for sddetach().
8537 8537 *
8538 8538 * Return Code: DDI_SUCCESS
8539 8539 * DDI_FAILURE
8540 8540 *
8541 8541 * Context: Kernel thread context
8542 8542 */
8543 8543
↓ open down ↓ |
8543 lines elided |
↑ open up ↑ |
8544 8544 static int
8545 8545 sd_unit_detach(dev_info_t *devi)
8546 8546 {
8547 8547 struct scsi_device *devp;
8548 8548 struct sd_lun *un;
8549 8549 int i;
8550 8550 int tgt;
8551 8551 dev_t dev;
8552 8552 dev_info_t *pdip = ddi_get_parent(devi);
8553 8553 int instance = ddi_get_instance(devi);
8554 + int devigone = DEVI(devi)->devi_gone;
8554 8555
8555 8556 mutex_enter(&sd_detach_mutex);
8556 8557
8557 8558 /*
8558 8559 * Fail the detach for any of the following:
8559 - * - Unable to get the sd_lun struct for the instance
8560 - * - A layered driver has an outstanding open on the instance
8561 - * - Another thread is already detaching this instance
8562 - * - Another thread is currently performing an open
8560 + * - Unable to get the sd_lun struct for the instance
8561 + * - Another thread is already detaching this instance
8562 + * - Another thread is currently performing an open
8563 + *
8564 + * Additionaly, if "device gone" flag is not set:
8565 + * - There are outstanding commands in driver
8566 + * - There are outstanding commands in transport
8563 8567 */
8564 8568 devp = ddi_get_driver_private(devi);
8565 - if ((devp == NULL) ||
8566 - ((un = (struct sd_lun *)devp->sd_private) == NULL) ||
8567 - (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) ||
8568 - (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) {
8569 + if (devp == NULL || (un = (struct sd_lun *)devp->sd_private) == NULL ||
8570 + un->un_detach_count != 0 || un->un_opens_in_progress != 0 ||
8571 + (!devigone && (un->un_ncmds_in_driver != 0 ||
8572 + un->un_ncmds_in_transport != 0 ||
8573 + un->un_state == SD_STATE_RWAIT))) {
8569 8574 mutex_exit(&sd_detach_mutex);
8570 8575 return (DDI_FAILURE);
8571 8576 }
8572 8577
8573 - SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un);
8578 + SD_TRACE(SD_LOG_ATTACH_DETACH, un, "%s: entry 0x%p\n", __func__, un);
8574 8579
8575 8580 /*
8576 8581 * Mark this instance as currently in a detach, to inhibit any
8577 8582 * opens from a layered driver.
8578 8583 */
8579 8584 un->un_detach_count++;
8580 8585 mutex_exit(&sd_detach_mutex);
8581 8586
8582 8587 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
8583 8588 SCSI_ADDR_PROP_TARGET, -1);
8584 8589
8585 8590 dev = sd_make_device(SD_DEVINFO(un));
8586 8591
8587 -#ifndef lint
8588 - _NOTE(COMPETING_THREADS_NOW);
8589 -#endif
8590 -
8591 8592 mutex_enter(SD_MUTEX(un));
8592 8593
8593 8594 /*
8594 8595 * Fail the detach if there are any outstanding layered
8595 8596 * opens on this device.
8596 8597 */
8597 8598 for (i = 0; i < NDKMAP; i++) {
8598 8599 if (un->un_ocmap.lyropen[i] != 0) {
8599 8600 goto err_notclosed;
8600 8601 }
8601 8602 }
8602 8603
8603 8604 /*
8604 - * Verify there are NO outstanding commands issued to this device.
8605 - * ie, un_ncmds_in_transport == 0.
8606 - * It's possible to have outstanding commands through the physio
8607 - * code path, even though everything's closed.
8608 - */
8609 - if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) ||
8610 - (un->un_direct_priority_timeid != NULL) ||
8611 - (un->un_state == SD_STATE_RWAIT)) {
8612 - mutex_exit(SD_MUTEX(un));
8613 - SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8614 - "sd_dr_detach: Detach failure due to outstanding cmds\n");
8615 - goto err_stillbusy;
8616 - }
8617 -
8618 - /*
8619 8605 * If we have the device reserved, release the reservation.
8620 8606 */
8621 - if ((un->un_resvd_status & SD_RESERVE) &&
8607 + if (!devigone &&
8608 + (un->un_resvd_status & SD_RESERVE) &&
8622 8609 !(un->un_resvd_status & SD_LOST_RESERVE)) {
8623 8610 mutex_exit(SD_MUTEX(un));
8624 8611 /*
8625 8612 * Note: sd_reserve_release sends a command to the device
8626 8613 * via the sd_ioctlcmd() path, and can sleep.
8627 8614 */
8628 8615 if (sd_reserve_release(dev, SD_RELEASE) != 0) {
8629 8616 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8630 - "sd_dr_detach: Cannot release reservation \n");
8617 + "%s: cannot release reservation\n", __func__);
8631 8618 }
8632 8619 } else {
8633 8620 mutex_exit(SD_MUTEX(un));
8634 8621 }
8635 8622
8636 8623 /*
8637 8624 * Untimeout any reserve recover, throttle reset, restart unit
8638 8625 * and delayed broadcast timeout threads. Protect the timeout pointer
8639 8626 * from getting nulled by their callback functions.
8640 8627 */
8641 8628 mutex_enter(SD_MUTEX(un));
8642 8629 if (un->un_resvd_timeid != NULL) {
8643 8630 timeout_id_t temp_id = un->un_resvd_timeid;
8644 8631 un->un_resvd_timeid = NULL;
8645 8632 mutex_exit(SD_MUTEX(un));
8646 8633 (void) untimeout(temp_id);
8647 8634 mutex_enter(SD_MUTEX(un));
8648 8635 }
8649 8636
8650 8637 if (un->un_reset_throttle_timeid != NULL) {
8651 8638 timeout_id_t temp_id = un->un_reset_throttle_timeid;
8652 8639 un->un_reset_throttle_timeid = NULL;
8653 8640 mutex_exit(SD_MUTEX(un));
8654 8641 (void) untimeout(temp_id);
8655 8642 mutex_enter(SD_MUTEX(un));
8656 8643 }
8657 8644
8658 8645 if (un->un_startstop_timeid != NULL) {
8659 8646 timeout_id_t temp_id = un->un_startstop_timeid;
8660 8647 un->un_startstop_timeid = NULL;
8661 8648 mutex_exit(SD_MUTEX(un));
8662 8649 (void) untimeout(temp_id);
8663 8650 mutex_enter(SD_MUTEX(un));
8664 8651 }
8665 8652
8666 8653 if (un->un_rmw_msg_timeid != NULL) {
8667 8654 timeout_id_t temp_id = un->un_rmw_msg_timeid;
8668 8655 un->un_rmw_msg_timeid = NULL;
8669 8656 mutex_exit(SD_MUTEX(un));
8670 8657 (void) untimeout(temp_id);
8671 8658 mutex_enter(SD_MUTEX(un));
8672 8659 }
8673 8660
8674 8661 if (un->un_dcvb_timeid != NULL) {
8675 8662 timeout_id_t temp_id = un->un_dcvb_timeid;
8676 8663 un->un_dcvb_timeid = NULL;
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
8677 8664 mutex_exit(SD_MUTEX(un));
8678 8665 (void) untimeout(temp_id);
8679 8666 } else {
8680 8667 mutex_exit(SD_MUTEX(un));
8681 8668 }
8682 8669
8683 8670 /* Remove any pending reservation reclaim requests for this device */
8684 8671 sd_rmv_resv_reclaim_req(dev);
8685 8672
8686 8673 mutex_enter(SD_MUTEX(un));
8674 + if (un->un_retry_timeid != NULL) {
8675 + timeout_id_t temp_id = un->un_retry_timeid;
8676 + un->un_retry_timeid = NULL;
8677 + mutex_exit(SD_MUTEX(un));
8678 + (void) untimeout(temp_id);
8679 + mutex_enter(SD_MUTEX(un));
8680 + }
8687 8681
8688 8682 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */
8689 8683 if (un->un_direct_priority_timeid != NULL) {
8690 8684 timeout_id_t temp_id = un->un_direct_priority_timeid;
8691 8685 un->un_direct_priority_timeid = NULL;
8692 8686 mutex_exit(SD_MUTEX(un));
8693 8687 (void) untimeout(temp_id);
8694 8688 mutex_enter(SD_MUTEX(un));
8695 8689 }
8696 8690
8697 8691 /* Cancel any active multi-host disk watch thread requests */
8698 8692 if (un->un_mhd_token != NULL) {
8699 8693 mutex_exit(SD_MUTEX(un));
8700 8694 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token));
8701 8695 if (scsi_watch_request_terminate(un->un_mhd_token,
8702 8696 SCSI_WATCH_TERMINATE_NOWAIT)) {
8703 8697 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8704 - "sd_dr_detach: Cannot cancel mhd watch request\n");
8698 + "%s: cannot cancel mhd watch request\n", __func__);
8705 8699 /*
8706 8700 * Note: We are returning here after having removed
8707 8701 * some driver timeouts above. This is consistent with
8708 8702 * the legacy implementation but perhaps the watch
8709 8703 * terminate call should be made with the wait flag set.
8710 8704 */
8711 8705 goto err_stillbusy;
8712 8706 }
8713 8707 mutex_enter(SD_MUTEX(un));
8714 8708 un->un_mhd_token = NULL;
8715 8709 }
8716 8710
8717 8711 if (un->un_swr_token != NULL) {
8718 8712 mutex_exit(SD_MUTEX(un));
8719 8713 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token));
8720 8714 if (scsi_watch_request_terminate(un->un_swr_token,
8721 8715 SCSI_WATCH_TERMINATE_NOWAIT)) {
8722 8716 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8723 - "sd_dr_detach: Cannot cancel swr watch request\n");
8717 + "%s: cannot cancel swr watch request\n", __func__);
8724 8718 /*
8725 8719 * Note: We are returning here after having removed
8726 8720 * some driver timeouts above. This is consistent with
8727 8721 * the legacy implementation but perhaps the watch
8728 8722 * terminate call should be made with the wait flag set.
8729 8723 */
8730 8724 goto err_stillbusy;
8731 8725 }
8732 8726 mutex_enter(SD_MUTEX(un));
8733 8727 un->un_swr_token = NULL;
8734 8728 }
8735 8729
8736 - mutex_exit(SD_MUTEX(un));
8737 -
8738 8730 /*
8739 8731 * Clear any scsi_reset_notifies. We clear the reset notifies
8740 8732 * if we have not registered one.
8741 8733 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX!
8742 8734 */
8735 + mutex_exit(SD_MUTEX(un));
8743 8736 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL,
8744 8737 sd_mhd_reset_notify_cb, (caddr_t)un);
8745 8738
8746 8739 /*
8747 8740 * protect the timeout pointers from getting nulled by
8748 8741 * their callback functions during the cancellation process.
8749 8742 * In such a scenario untimeout can be invoked with a null value.
8750 8743 */
8751 8744 _NOTE(NO_COMPETING_THREADS_NOW);
8752 8745
8753 8746 mutex_enter(&un->un_pm_mutex);
8754 8747 if (un->un_pm_idle_timeid != NULL) {
8755 8748 timeout_id_t temp_id = un->un_pm_idle_timeid;
8756 8749 un->un_pm_idle_timeid = NULL;
8757 8750 mutex_exit(&un->un_pm_mutex);
8758 8751
8759 8752 /*
8760 8753 * Timeout is active; cancel it.
8761 8754 * Note that it'll never be active on a device
8762 8755 * that does not support PM therefore we don't
8763 8756 * have to check before calling pm_idle_component.
8764 8757 */
8765 8758 (void) untimeout(temp_id);
8766 8759 (void) pm_idle_component(SD_DEVINFO(un), 0);
8767 8760 mutex_enter(&un->un_pm_mutex);
8768 8761 }
8769 8762
8770 8763 /*
8771 8764 * Check whether there is already a timeout scheduled for power
8772 8765 * management. If yes then don't lower the power here, that's.
8773 8766 * the timeout handler's job.
8774 8767 */
8775 8768 if (un->un_pm_timeid != NULL) {
8776 8769 timeout_id_t temp_id = un->un_pm_timeid;
8777 8770 un->un_pm_timeid = NULL;
8778 8771 mutex_exit(&un->un_pm_mutex);
8779 8772 /*
8780 8773 * Timeout is active; cancel it.
8781 8774 * Note that it'll never be active on a device
8782 8775 * that does not support PM therefore we don't
8783 8776 * have to check before calling pm_idle_component.
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
8784 8777 */
8785 8778 (void) untimeout(temp_id);
8786 8779 (void) pm_idle_component(SD_DEVINFO(un), 0);
8787 8780
8788 8781 } else {
8789 8782 mutex_exit(&un->un_pm_mutex);
8790 8783 if ((un->un_f_pm_is_enabled == TRUE) &&
8791 8784 (pm_lower_power(SD_DEVINFO(un), 0, SD_PM_STATE_STOPPED(un))
8792 8785 != DDI_SUCCESS)) {
8793 8786 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8794 - "sd_dr_detach: Lower power request failed, ignoring.\n");
8787 + "%s: lower power request failed, ignoring\n",
8788 + __func__);
8795 8789 /*
8796 - * Fix for bug: 4297749, item # 13
8797 8790 * The above test now includes a check to see if PM is
8798 8791 * supported by this device before call
8799 8792 * pm_lower_power().
8800 8793 * Note, the following is not dead code. The call to
8801 8794 * pm_lower_power above will generate a call back into
8802 8795 * our sdpower routine which might result in a timeout
8803 8796 * handler getting activated. Therefore the following
8804 8797 * code is valid and necessary.
8805 8798 */
8806 8799 mutex_enter(&un->un_pm_mutex);
8807 8800 if (un->un_pm_timeid != NULL) {
8808 8801 timeout_id_t temp_id = un->un_pm_timeid;
8809 8802 un->un_pm_timeid = NULL;
8810 8803 mutex_exit(&un->un_pm_mutex);
8811 8804 (void) untimeout(temp_id);
8812 8805 (void) pm_idle_component(SD_DEVINFO(un), 0);
8813 8806 } else {
8814 8807 mutex_exit(&un->un_pm_mutex);
8815 8808 }
8816 8809 }
8817 8810 }
8818 8811
8819 8812 /*
8820 8813 * Cleanup from the scsi_ifsetcap() calls (437868)
8821 8814 * Relocated here from above to be after the call to
8822 8815 * pm_lower_power, which was getting errors.
8823 8816 */
8824 8817 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1);
8825 8818 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1);
8826 8819
8827 8820 /*
8828 8821 * Currently, tagged queuing is supported per target based by HBA.
8829 8822 * Setting this per lun instance actually sets the capability of this
8830 8823 * target in HBA, which affects those luns already attached on the
8831 8824 * same target. So during detach, we can only disable this capability
8832 8825 * only when this is the only lun left on this target. By doing
8833 8826 * this, we assume a target has the same tagged queuing capability
8834 8827 * for every lun. The condition can be removed when HBA is changed to
8835 8828 * support per lun based tagged queuing capability.
8836 8829 */
8837 8830 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) {
8838 8831 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
8839 8832 }
8840 8833
8841 8834 if (un->un_f_is_fibre == FALSE) {
8842 8835 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1);
8843 8836 }
8844 8837
8845 8838 /*
8846 8839 * Remove any event callbacks, fibre only
8847 8840 */
8848 8841 if (un->un_f_is_fibre == TRUE) {
↓ open down ↓ |
42 lines elided |
↑ open up ↑ |
8849 8842 if ((un->un_insert_event != NULL) &&
8850 8843 (ddi_remove_event_handler(un->un_insert_cb_id) !=
8851 8844 DDI_SUCCESS)) {
8852 8845 /*
8853 8846 * Note: We are returning here after having done
8854 8847 * substantial cleanup above. This is consistent
8855 8848 * with the legacy implementation but this may not
8856 8849 * be the right thing to do.
8857 8850 */
8858 8851 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8859 - "sd_dr_detach: Cannot cancel insert event\n");
8852 + "%s: cannot cancel insert event\n", __func__);
8860 8853 goto err_remove_event;
8861 8854 }
8862 8855 un->un_insert_event = NULL;
8863 8856
8864 8857 if ((un->un_remove_event != NULL) &&
8865 8858 (ddi_remove_event_handler(un->un_remove_cb_id) !=
8866 8859 DDI_SUCCESS)) {
8867 8860 /*
8868 8861 * Note: We are returning here after having done
8869 8862 * substantial cleanup above. This is consistent
8870 8863 * with the legacy implementation but this may not
8871 8864 * be the right thing to do.
8872 8865 */
8873 8866 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8874 - "sd_dr_detach: Cannot cancel remove event\n");
8867 + "%s: cannot cancel remove event\n", __func__);
8875 8868 goto err_remove_event;
8876 8869 }
8877 8870 un->un_remove_event = NULL;
8878 8871 }
8879 8872
8880 8873 /* Do not free the softstate if the callback routine is active */
8881 8874 sd_sync_with_callback(un);
8882 8875
8883 8876 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT);
8884 8877 cmlb_free_handle(&un->un_cmlbhandle);
8885 8878
8886 8879 /*
8887 8880 * Hold the detach mutex here, to make sure that no other threads ever
8888 8881 * can access a (partially) freed soft state structure.
8889 8882 */
8890 8883 mutex_enter(&sd_detach_mutex);
8891 8884
8892 8885 /*
8893 8886 * Clean up the soft state struct.
8894 8887 * Cleanup is done in reverse order of allocs/inits.
8895 8888 * At this point there should be no competing threads anymore.
8896 8889 */
8897 8890
8898 8891 scsi_fm_fini(devp);
8899 8892
8900 8893 /*
8901 8894 * Deallocate memory for SCSI FMA.
8902 8895 */
8903 8896 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal));
8904 8897
8905 8898 /*
8906 8899 * Unregister and free device id if it was not registered
8907 8900 * by the transport.
8908 8901 */
8909 8902 if (un->un_f_devid_transport_defined == FALSE)
8910 8903 ddi_devid_unregister(devi);
8911 8904
8912 8905 /*
8913 8906 * free the devid structure if allocated before (by ddi_devid_init()
8914 8907 * or ddi_devid_get()).
8915 8908 */
8916 8909 if (un->un_devid) {
8917 8910 ddi_devid_free(un->un_devid);
8918 8911 un->un_devid = NULL;
8919 8912 }
8920 8913
8921 8914 /*
8922 8915 * Destroy wmap cache if it exists.
8923 8916 */
8924 8917 if (un->un_wm_cache != NULL) {
8925 8918 kmem_cache_destroy(un->un_wm_cache);
8926 8919 un->un_wm_cache = NULL;
8927 8920 }
8928 8921
8929 8922 /*
8930 8923 * kstat cleanup is done in detach for all device types (4363169).
8931 8924 * We do not want to fail detach if the device kstats are not deleted
8932 8925 * since there is a confusion about the devo_refcnt for the device.
8933 8926 * We just delete the kstats and let detach complete successfully.
8934 8927 */
8935 8928 if (un->un_stats != NULL) {
8936 8929 kstat_delete(un->un_stats);
8937 8930 un->un_stats = NULL;
8938 8931 }
8939 8932 if (un->un_errstats != NULL) {
8940 8933 kstat_delete(un->un_errstats);
8941 8934 un->un_errstats = NULL;
8942 8935 }
8943 8936
8944 8937 /* Remove partition stats */
8945 8938 if (un->un_f_pkstats_enabled) {
8946 8939 for (i = 0; i < NSDMAP; i++) {
8947 8940 if (un->un_pstats[i] != NULL) {
8948 8941 kstat_delete(un->un_pstats[i]);
8949 8942 un->un_pstats[i] = NULL;
8950 8943 }
8951 8944 }
8952 8945 }
8953 8946
8954 8947 /* Remove xbuf registration */
8955 8948 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi);
8956 8949 ddi_xbuf_attr_destroy(un->un_xbuf_attr);
8957 8950
8958 8951 /* Remove driver properties */
8959 8952 ddi_prop_remove_all(devi);
8960 8953
8961 8954 mutex_destroy(&un->un_pm_mutex);
8962 8955 cv_destroy(&un->un_pm_busy_cv);
8963 8956
8964 8957 cv_destroy(&un->un_wcc_cv);
8965 8958
8966 8959 /* Open/close semaphore */
8967 8960 sema_destroy(&un->un_semoclose);
8968 8961
8969 8962 /* Removable media condvar. */
8970 8963 cv_destroy(&un->un_state_cv);
8971 8964
8972 8965 /* Suspend/resume condvar. */
8973 8966 cv_destroy(&un->un_suspend_cv);
8974 8967 cv_destroy(&un->un_disk_busy_cv);
8975 8968
8976 8969 sd_free_rqs(un);
8977 8970
8978 8971 /* Free up soft state */
8979 8972 devp->sd_private = NULL;
8980 8973
8981 8974 bzero(un, sizeof (struct sd_lun));
8982 8975
8983 8976 ddi_soft_state_free(sd_state, instance);
8984 8977
8985 8978 mutex_exit(&sd_detach_mutex);
8986 8979
8987 8980 /* This frees up the INQUIRY data associated with the device. */
8988 8981 scsi_unprobe(devp);
8989 8982
8990 8983 /*
8991 8984 * After successfully detaching an instance, we update the information
8992 8985 * of how many luns have been attached in the relative target and
8993 8986 * controller for parallel SCSI. This information is used when sd tries
8994 8987 * to set the tagged queuing capability in HBA.
8995 8988 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to
8996 8989 * check if the device is parallel SCSI. However, we don't need to
8997 8990 * check here because we've already checked during attach. No device
8998 8991 * that is not parallel SCSI is in the chain.
8999 8992 */
9000 8993 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) {
9001 8994 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH);
9002 8995 }
9003 8996
9004 8997 return (DDI_SUCCESS);
9005 8998
9006 8999 err_notclosed:
↓ open down ↓ |
122 lines elided |
↑ open up ↑ |
9007 9000 mutex_exit(SD_MUTEX(un));
9008 9001
9009 9002 err_stillbusy:
9010 9003 _NOTE(NO_COMPETING_THREADS_NOW);
9011 9004
9012 9005 err_remove_event:
9013 9006 mutex_enter(&sd_detach_mutex);
9014 9007 un->un_detach_count--;
9015 9008 mutex_exit(&sd_detach_mutex);
9016 9009
9017 - SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n");
9010 + SD_TRACE(SD_LOG_ATTACH_DETACH, un, "%s: exit failure\n", __func__);
9018 9011 return (DDI_FAILURE);
9019 9012 }
9020 9013
9021 9014
9022 9015 /*
9023 9016 * Function: sd_create_errstats
9024 9017 *
9025 9018 * Description: This routine instantiates the device error stats.
9026 9019 *
9027 9020 * Note: During attach the stats are instantiated first so they are
9028 9021 * available for attach-time routines that utilize the driver
9029 9022 * iopath to send commands to the device. The stats are initialized
9030 9023 * separately so data obtained during some attach-time routines is
9031 9024 * available. (4362483)
9032 9025 *
9033 9026 * Arguments: un - driver soft state (unit) structure
9034 9027 * instance - driver instance
9035 9028 *
9036 9029 * Context: Kernel thread context
9037 9030 */
9038 9031
9039 9032 static void
9040 9033 sd_create_errstats(struct sd_lun *un, int instance)
9041 9034 {
9042 9035 struct sd_errstats *stp;
9043 9036 char kstatmodule_err[KSTAT_STRLEN];
9044 9037 char kstatname[KSTAT_STRLEN];
9045 9038 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t));
9046 9039
9047 9040 ASSERT(un != NULL);
9048 9041
9049 9042 if (un->un_errstats != NULL) {
9050 9043 return;
9051 9044 }
9052 9045
9053 9046 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err),
9054 9047 "%serr", sd_label);
9055 9048 (void) snprintf(kstatname, sizeof (kstatname),
9056 9049 "%s%d,err", sd_label, instance);
9057 9050
9058 9051 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname,
9059 9052 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT);
9060 9053
9061 9054 if (un->un_errstats == NULL) {
9062 9055 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
9063 9056 "sd_create_errstats: Failed kstat_create\n");
9064 9057 return;
9065 9058 }
9066 9059
9067 9060 stp = (struct sd_errstats *)un->un_errstats->ks_data;
9068 9061 kstat_named_init(&stp->sd_softerrs, "Soft Errors",
9069 9062 KSTAT_DATA_UINT32);
9070 9063 kstat_named_init(&stp->sd_harderrs, "Hard Errors",
9071 9064 KSTAT_DATA_UINT32);
9072 9065 kstat_named_init(&stp->sd_transerrs, "Transport Errors",
9073 9066 KSTAT_DATA_UINT32);
9074 9067 kstat_named_init(&stp->sd_vid, "Vendor",
9075 9068 KSTAT_DATA_CHAR);
9076 9069 kstat_named_init(&stp->sd_pid, "Product",
9077 9070 KSTAT_DATA_CHAR);
9078 9071 kstat_named_init(&stp->sd_revision, "Revision",
9079 9072 KSTAT_DATA_CHAR);
9080 9073 kstat_named_init(&stp->sd_serial, "Serial No",
9081 9074 KSTAT_DATA_CHAR);
9082 9075 kstat_named_init(&stp->sd_capacity, "Size",
9083 9076 KSTAT_DATA_ULONGLONG);
9084 9077 kstat_named_init(&stp->sd_rq_media_err, "Media Error",
9085 9078 KSTAT_DATA_UINT32);
9086 9079 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready",
9087 9080 KSTAT_DATA_UINT32);
9088 9081 kstat_named_init(&stp->sd_rq_nodev_err, "No Device",
9089 9082 KSTAT_DATA_UINT32);
9090 9083 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable",
9091 9084 KSTAT_DATA_UINT32);
9092 9085 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request",
9093 9086 KSTAT_DATA_UINT32);
9094 9087 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis",
9095 9088 KSTAT_DATA_UINT32);
9096 9089
9097 9090 un->un_errstats->ks_private = un;
9098 9091 un->un_errstats->ks_update = nulldev;
9099 9092
9100 9093 kstat_install(un->un_errstats);
9101 9094 }
9102 9095
9103 9096
9104 9097 /*
9105 9098 * Function: sd_set_errstats
9106 9099 *
9107 9100 * Description: This routine sets the value of the vendor id, product id,
9108 9101 * revision, serial number, and capacity device error stats.
9109 9102 *
9110 9103 * Note: During attach the stats are instantiated first so they are
9111 9104 * available for attach-time routines that utilize the driver
9112 9105 * iopath to send commands to the device. The stats are initialized
9113 9106 * separately so data obtained during some attach-time routines is
9114 9107 * available. (4362483)
9115 9108 *
9116 9109 * Arguments: un - driver soft state (unit) structure
9117 9110 *
9118 9111 * Context: Kernel thread context
9119 9112 */
9120 9113
9121 9114 static void
9122 9115 sd_set_errstats(struct sd_lun *un)
9123 9116 {
9124 9117 struct sd_errstats *stp;
9125 9118 char *sn;
9126 9119
9127 9120 ASSERT(un != NULL);
9128 9121 ASSERT(un->un_errstats != NULL);
9129 9122 stp = (struct sd_errstats *)un->un_errstats->ks_data;
9130 9123 ASSERT(stp != NULL);
9131 9124 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8);
9132 9125 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16);
9133 9126 (void) strncpy(stp->sd_revision.value.c,
9134 9127 un->un_sd->sd_inq->inq_revision, 4);
9135 9128
9136 9129 /*
9137 9130 * All the errstats are persistent across detach/attach,
9138 9131 * so reset all the errstats here in case of the hot
9139 9132 * replacement of disk drives, except for not changed
9140 9133 * Sun qualified drives.
9141 9134 */
9142 9135 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) ||
9143 9136 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c,
9144 9137 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) {
9145 9138 stp->sd_softerrs.value.ui32 = 0;
9146 9139 stp->sd_harderrs.value.ui32 = 0;
9147 9140 stp->sd_transerrs.value.ui32 = 0;
9148 9141 stp->sd_rq_media_err.value.ui32 = 0;
9149 9142 stp->sd_rq_ntrdy_err.value.ui32 = 0;
9150 9143 stp->sd_rq_nodev_err.value.ui32 = 0;
9151 9144 stp->sd_rq_recov_err.value.ui32 = 0;
9152 9145 stp->sd_rq_illrq_err.value.ui32 = 0;
9153 9146 stp->sd_rq_pfa_err.value.ui32 = 0;
9154 9147 }
9155 9148
9156 9149 /*
9157 9150 * Set the "Serial No" kstat for Sun qualified drives (indicated by
9158 9151 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid)
9159 9152 * (4376302))
9160 9153 */
9161 9154 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) {
9162 9155 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c,
9163 9156 sizeof (SD_INQUIRY(un)->inq_serial));
9164 9157 } else {
9165 9158 /*
9166 9159 * Set the "Serial No" kstat for non-Sun qualified drives
9167 9160 */
9168 9161 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, SD_DEVINFO(un),
9169 9162 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9170 9163 INQUIRY_SERIAL_NO, &sn) == DDI_SUCCESS) {
9171 9164 (void) strlcpy(stp->sd_serial.value.c, sn,
9172 9165 sizeof (stp->sd_serial.value.c));
9173 9166 ddi_prop_free(sn);
9174 9167 }
9175 9168 }
9176 9169
9177 9170 if (un->un_f_blockcount_is_valid != TRUE) {
9178 9171 /*
9179 9172 * Set capacity error stat to 0 for no media. This ensures
9180 9173 * a valid capacity is displayed in response to 'iostat -E'
9181 9174 * when no media is present in the device.
9182 9175 */
9183 9176 stp->sd_capacity.value.ui64 = 0;
9184 9177 } else {
9185 9178 /*
9186 9179 * Multiply un_blockcount by un->un_sys_blocksize to get
9187 9180 * capacity.
9188 9181 *
9189 9182 * Note: for non-512 blocksize devices "un_blockcount" has been
9190 9183 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by
9191 9184 * (un_tgt_blocksize / un->un_sys_blocksize).
9192 9185 */
9193 9186 stp->sd_capacity.value.ui64 = (uint64_t)
9194 9187 ((uint64_t)un->un_blockcount * un->un_sys_blocksize);
9195 9188 }
9196 9189 }
9197 9190
9198 9191
9199 9192 /*
9200 9193 * Function: sd_set_pstats
9201 9194 *
9202 9195 * Description: This routine instantiates and initializes the partition
9203 9196 * stats for each partition with more than zero blocks.
9204 9197 * (4363169)
9205 9198 *
9206 9199 * Arguments: un - driver soft state (unit) structure
9207 9200 *
9208 9201 * Context: Kernel thread context
9209 9202 */
9210 9203
9211 9204 static void
9212 9205 sd_set_pstats(struct sd_lun *un)
9213 9206 {
9214 9207 char kstatname[KSTAT_STRLEN];
9215 9208 int instance;
9216 9209 int i;
9217 9210 diskaddr_t nblks = 0;
9218 9211 char *partname = NULL;
9219 9212
9220 9213 ASSERT(un != NULL);
9221 9214
9222 9215 instance = ddi_get_instance(SD_DEVINFO(un));
9223 9216
9224 9217 /* Note:x86: is this a VTOC8/VTOC16 difference? */
9225 9218 for (i = 0; i < NSDMAP; i++) {
9226 9219
9227 9220 if (cmlb_partinfo(un->un_cmlbhandle, i,
9228 9221 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0)
9229 9222 continue;
9230 9223 mutex_enter(SD_MUTEX(un));
9231 9224
9232 9225 if ((un->un_pstats[i] == NULL) &&
9233 9226 (nblks != 0)) {
9234 9227
9235 9228 (void) snprintf(kstatname, sizeof (kstatname),
9236 9229 "%s%d,%s", sd_label, instance,
9237 9230 partname);
9238 9231
9239 9232 un->un_pstats[i] = kstat_create(sd_label,
9240 9233 instance, kstatname, "partition", KSTAT_TYPE_IO,
9241 9234 1, KSTAT_FLAG_PERSISTENT);
9242 9235 if (un->un_pstats[i] != NULL) {
9243 9236 un->un_pstats[i]->ks_lock = SD_MUTEX(un);
9244 9237 kstat_install(un->un_pstats[i]);
9245 9238 }
9246 9239 }
9247 9240 mutex_exit(SD_MUTEX(un));
9248 9241 }
9249 9242 }
9250 9243
9251 9244
9252 9245 #if (defined(__fibre))
9253 9246 /*
9254 9247 * Function: sd_init_event_callbacks
9255 9248 *
9256 9249 * Description: This routine initializes the insertion and removal event
9257 9250 * callbacks. (fibre only)
9258 9251 *
9259 9252 * Arguments: un - driver soft state (unit) structure
9260 9253 *
9261 9254 * Context: Kernel thread context
9262 9255 */
9263 9256
9264 9257 static void
9265 9258 sd_init_event_callbacks(struct sd_lun *un)
9266 9259 {
9267 9260 ASSERT(un != NULL);
9268 9261
9269 9262 if ((un->un_insert_event == NULL) &&
9270 9263 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT,
9271 9264 &un->un_insert_event) == DDI_SUCCESS)) {
9272 9265 /*
9273 9266 * Add the callback for an insertion event
9274 9267 */
9275 9268 (void) ddi_add_event_handler(SD_DEVINFO(un),
9276 9269 un->un_insert_event, sd_event_callback, (void *)un,
9277 9270 &(un->un_insert_cb_id));
9278 9271 }
9279 9272
9280 9273 if ((un->un_remove_event == NULL) &&
9281 9274 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT,
9282 9275 &un->un_remove_event) == DDI_SUCCESS)) {
9283 9276 /*
9284 9277 * Add the callback for a removal event
9285 9278 */
9286 9279 (void) ddi_add_event_handler(SD_DEVINFO(un),
9287 9280 un->un_remove_event, sd_event_callback, (void *)un,
9288 9281 &(un->un_remove_cb_id));
9289 9282 }
9290 9283 }
9291 9284
9292 9285
9293 9286 /*
9294 9287 * Function: sd_event_callback
9295 9288 *
9296 9289 * Description: This routine handles insert/remove events (photon). The
9297 9290 * state is changed to OFFLINE which can be used to supress
9298 9291 * error msgs. (fibre only)
9299 9292 *
9300 9293 * Arguments: un - driver soft state (unit) structure
9301 9294 *
9302 9295 * Context: Callout thread context
9303 9296 */
9304 9297 /* ARGSUSED */
9305 9298 static void
9306 9299 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg,
9307 9300 void *bus_impldata)
9308 9301 {
9309 9302 struct sd_lun *un = (struct sd_lun *)arg;
9310 9303
9311 9304 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event));
9312 9305 if (event == un->un_insert_event) {
9313 9306 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event");
9314 9307 mutex_enter(SD_MUTEX(un));
9315 9308 if (un->un_state == SD_STATE_OFFLINE) {
9316 9309 if (un->un_last_state != SD_STATE_SUSPENDED) {
9317 9310 un->un_state = un->un_last_state;
9318 9311 } else {
9319 9312 /*
9320 9313 * We have gone through SUSPEND/RESUME while
9321 9314 * we were offline. Restore the last state
9322 9315 */
9323 9316 un->un_state = un->un_save_state;
9324 9317 }
9325 9318 }
9326 9319 mutex_exit(SD_MUTEX(un));
9327 9320
9328 9321 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event));
9329 9322 } else if (event == un->un_remove_event) {
9330 9323 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event");
9331 9324 mutex_enter(SD_MUTEX(un));
9332 9325 /*
9333 9326 * We need to handle an event callback that occurs during
9334 9327 * the suspend operation, since we don't prevent it.
9335 9328 */
9336 9329 if (un->un_state != SD_STATE_OFFLINE) {
9337 9330 if (un->un_state != SD_STATE_SUSPENDED) {
9338 9331 New_state(un, SD_STATE_OFFLINE);
9339 9332 } else {
9340 9333 un->un_last_state = SD_STATE_OFFLINE;
9341 9334 }
9342 9335 }
9343 9336 mutex_exit(SD_MUTEX(un));
9344 9337 } else {
9345 9338 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE,
9346 9339 "!Unknown event\n");
9347 9340 }
9348 9341
9349 9342 }
9350 9343 #endif
9351 9344
9352 9345 /*
9353 9346 * Values related to caching mode page depending on whether the unit is ATAPI.
9354 9347 */
9355 9348 #define SDC_CDB_GROUP(un) ((un->un_f_cfg_is_atapi == TRUE) ? \
9356 9349 CDB_GROUP1 : CDB_GROUP0)
9357 9350 #define SDC_HDRLEN(un) ((un->un_f_cfg_is_atapi == TRUE) ? \
9358 9351 MODE_HEADER_LENGTH_GRP2 : MODE_HEADER_LENGTH)
9359 9352 /*
9360 9353 * Use mode_cache_scsi3 to ensure we get all of the mode sense data, otherwise
9361 9354 * the mode select will fail (mode_cache_scsi3 is a superset of mode_caching).
9362 9355 */
9363 9356 #define SDC_BUFLEN(un) (SDC_HDRLEN(un) + MODE_BLK_DESC_LENGTH + \
9364 9357 sizeof (struct mode_cache_scsi3))
9365 9358
9366 9359 static int
9367 9360 sd_get_caching_mode_page(sd_ssc_t *ssc, uchar_t page_control, uchar_t **header,
9368 9361 int *bdlen)
9369 9362 {
9370 9363 struct sd_lun *un = ssc->ssc_un;
9371 9364 struct mode_caching *mode_caching_page;
9372 9365 size_t buflen = SDC_BUFLEN(un);
9373 9366 int hdrlen = SDC_HDRLEN(un);
9374 9367 int rval;
9375 9368
9376 9369 /*
9377 9370 * Do a test unit ready, otherwise a mode sense may not work if this
9378 9371 * is the first command sent to the device after boot.
9379 9372 */
9380 9373 if (sd_send_scsi_TEST_UNIT_READY(ssc, 0) != 0)
9381 9374 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9382 9375
9383 9376 /*
9384 9377 * Allocate memory for the retrieved mode page and its headers. Set
9385 9378 * a pointer to the page itself.
9386 9379 */
9387 9380 *header = kmem_zalloc(buflen, KM_SLEEP);
9388 9381
9389 9382 /* Get the information from the device */
9390 9383 rval = sd_send_scsi_MODE_SENSE(ssc, SDC_CDB_GROUP(un), *header, buflen,
9391 9384 page_control | MODEPAGE_CACHING, SD_PATH_DIRECT);
9392 9385 if (rval != 0) {
9393 9386 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un, "%s: Mode Sense Failed\n",
9394 9387 __func__);
9395 9388 goto mode_sense_failed;
9396 9389 }
9397 9390
9398 9391 /*
9399 9392 * Determine size of Block Descriptors in order to locate
9400 9393 * the mode page data. ATAPI devices return 0, SCSI devices
9401 9394 * should return MODE_BLK_DESC_LENGTH.
9402 9395 */
9403 9396 if (un->un_f_cfg_is_atapi == TRUE) {
9404 9397 struct mode_header_grp2 *mhp =
9405 9398 (struct mode_header_grp2 *)(*header);
9406 9399 *bdlen = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
9407 9400 } else {
9408 9401 *bdlen = ((struct mode_header *)(*header))->bdesc_length;
9409 9402 }
9410 9403
9411 9404 if (*bdlen > MODE_BLK_DESC_LENGTH) {
9412 9405 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0,
9413 9406 "%s: Mode Sense returned invalid block descriptor length\n",
9414 9407 __func__);
9415 9408 rval = EIO;
9416 9409 goto mode_sense_failed;
9417 9410 }
9418 9411
9419 9412 mode_caching_page = (struct mode_caching *)(*header + hdrlen + *bdlen);
9420 9413 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) {
9421 9414 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
9422 9415 "%s: Mode Sense caching page code mismatch %d\n",
9423 9416 __func__, mode_caching_page->mode_page.code);
9424 9417 rval = EIO;
9425 9418 }
9426 9419
9427 9420 mode_sense_failed:
9428 9421 if (rval != 0) {
9429 9422 kmem_free(*header, buflen);
9430 9423 *header = NULL;
9431 9424 *bdlen = 0;
9432 9425 }
9433 9426 return (rval);
9434 9427 }
9435 9428
9436 9429 /*
9437 9430 * Function: sd_cache_control()
9438 9431 *
9439 9432 * Description: This routine is the driver entry point for setting
9440 9433 * read and write caching by modifying the WCE (write cache
9441 9434 * enable) and RCD (read cache disable) bits of mode
9442 9435 * page 8 (MODEPAGE_CACHING).
9443 9436 *
9444 9437 * Arguments: ssc - ssc contains pointer to driver soft state
9445 9438 * (unit) structure for this target.
9446 9439 * rcd_flag - flag for controlling the read cache
9447 9440 * wce_flag - flag for controlling the write cache
9448 9441 *
9449 9442 * Return Code: EIO
9450 9443 * code returned by sd_send_scsi_MODE_SENSE and
9451 9444 * sd_send_scsi_MODE_SELECT
9452 9445 *
9453 9446 * Context: Kernel Thread
9454 9447 */
9455 9448
9456 9449 static int
9457 9450 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag)
9458 9451 {
9459 9452 struct sd_lun *un = ssc->ssc_un;
9460 9453 struct mode_caching *mode_caching_page;
9461 9454 uchar_t *header;
9462 9455 size_t buflen = SDC_BUFLEN(un);
9463 9456 int hdrlen = SDC_HDRLEN(un);
9464 9457 int bdlen;
9465 9458 int rval;
9466 9459
9467 9460 rval = sd_get_caching_mode_page(ssc, MODEPAGE_CURRENT, &header, &bdlen);
9468 9461 switch (rval) {
9469 9462 case 0:
9470 9463 /* Check the relevant bits on successful mode sense */
9471 9464 mode_caching_page = (struct mode_caching *)(header + hdrlen +
9472 9465 bdlen);
9473 9466 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) ||
9474 9467 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) ||
9475 9468 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) ||
9476 9469 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) {
9477 9470 size_t sbuflen;
9478 9471 uchar_t save_pg;
9479 9472
9480 9473 /*
9481 9474 * Construct select buffer length based on the
9482 9475 * length of the sense data returned.
9483 9476 */
9484 9477 sbuflen = hdrlen + bdlen + sizeof (struct mode_page) +
9485 9478 (int)mode_caching_page->mode_page.length;
9486 9479
9487 9480 /* Set the caching bits as requested */
9488 9481 if (rcd_flag == SD_CACHE_ENABLE)
9489 9482 mode_caching_page->rcd = 0;
9490 9483 else if (rcd_flag == SD_CACHE_DISABLE)
9491 9484 mode_caching_page->rcd = 1;
9492 9485
9493 9486 if (wce_flag == SD_CACHE_ENABLE)
9494 9487 mode_caching_page->wce = 1;
9495 9488 else if (wce_flag == SD_CACHE_DISABLE)
9496 9489 mode_caching_page->wce = 0;
9497 9490
9498 9491 /*
9499 9492 * Save the page if the mode sense says the
9500 9493 * drive supports it.
9501 9494 */
9502 9495 save_pg = mode_caching_page->mode_page.ps ?
9503 9496 SD_SAVE_PAGE : SD_DONTSAVE_PAGE;
9504 9497
9505 9498 /* Clear reserved bits before mode select */
9506 9499 mode_caching_page->mode_page.ps = 0;
9507 9500
9508 9501 /*
9509 9502 * Clear out mode header for mode select.
9510 9503 * The rest of the retrieved page will be reused.
9511 9504 */
9512 9505 bzero(header, hdrlen);
9513 9506
9514 9507 if (un->un_f_cfg_is_atapi == TRUE) {
9515 9508 struct mode_header_grp2 *mhp =
9516 9509 (struct mode_header_grp2 *)header;
9517 9510 mhp->bdesc_length_hi = bdlen >> 8;
9518 9511 mhp->bdesc_length_lo = (uchar_t)bdlen & 0xff;
9519 9512 } else {
9520 9513 ((struct mode_header *)header)->bdesc_length =
9521 9514 bdlen;
9522 9515 }
9523 9516
9524 9517 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9525 9518
9526 9519 /* Issue mode select to change the cache settings */
9527 9520 rval = sd_send_scsi_MODE_SELECT(ssc, SDC_CDB_GROUP(un),
9528 9521 header, sbuflen, save_pg, SD_PATH_DIRECT);
9529 9522 }
9530 9523 kmem_free(header, buflen);
9531 9524 break;
9532 9525 case EIO:
9533 9526 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
9534 9527 break;
9535 9528 default:
9536 9529 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9537 9530 break;
9538 9531 }
9539 9532
9540 9533 return (rval);
9541 9534 }
9542 9535
9543 9536
9544 9537 /*
9545 9538 * Function: sd_get_write_cache_enabled()
9546 9539 *
9547 9540 * Description: This routine is the driver entry point for determining if write
9548 9541 * caching is enabled. It examines the WCE (write cache enable)
9549 9542 * bits of mode page 8 (MODEPAGE_CACHING) with Page Control field
9550 9543 * bits set to MODEPAGE_CURRENT.
9551 9544 *
9552 9545 * Arguments: ssc - ssc contains pointer to driver soft state
9553 9546 * (unit) structure for this target.
9554 9547 * is_enabled - pointer to int where write cache enabled state
9555 9548 * is returned (non-zero -> write cache enabled)
9556 9549 *
9557 9550 * Return Code: EIO
9558 9551 * code returned by sd_send_scsi_MODE_SENSE
9559 9552 *
9560 9553 * Context: Kernel Thread
9561 9554 *
9562 9555 * NOTE: If ioctl is added to disable write cache, this sequence should
9563 9556 * be followed so that no locking is required for accesses to
9564 9557 * un->un_f_write_cache_enabled:
9565 9558 * do mode select to clear wce
9566 9559 * do synchronize cache to flush cache
9567 9560 * set un->un_f_write_cache_enabled = FALSE
9568 9561 *
9569 9562 * Conversely, an ioctl to enable the write cache should be done
9570 9563 * in this order:
9571 9564 * set un->un_f_write_cache_enabled = TRUE
9572 9565 * do mode select to set wce
9573 9566 */
9574 9567
9575 9568 static int
9576 9569 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled)
9577 9570 {
9578 9571 struct sd_lun *un = ssc->ssc_un;
9579 9572 struct mode_caching *mode_caching_page;
9580 9573 uchar_t *header;
9581 9574 size_t buflen = SDC_BUFLEN(un);
9582 9575 int hdrlen = SDC_HDRLEN(un);
9583 9576 int bdlen;
9584 9577 int rval;
9585 9578
9586 9579 /* In case of error, flag as enabled */
9587 9580 *is_enabled = TRUE;
9588 9581
9589 9582 rval = sd_get_caching_mode_page(ssc, MODEPAGE_CURRENT, &header, &bdlen);
9590 9583 switch (rval) {
9591 9584 case 0:
9592 9585 mode_caching_page = (struct mode_caching *)(header + hdrlen +
9593 9586 bdlen);
9594 9587 *is_enabled = mode_caching_page->wce;
9595 9588 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
9596 9589 kmem_free(header, buflen);
9597 9590 break;
9598 9591 case EIO: {
9599 9592 /*
9600 9593 * Some disks do not support Mode Sense(6), we
9601 9594 * should ignore this kind of error (sense key is
9602 9595 * 0x5 - illegal request).
9603 9596 */
9604 9597 uint8_t *sensep;
9605 9598 int senlen;
9606 9599
9607 9600 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
9608 9601 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
9609 9602 ssc->ssc_uscsi_cmd->uscsi_rqresid);
9610 9603
9611 9604 if (senlen > 0 &&
9612 9605 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) {
9613 9606 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE);
9614 9607 } else {
9615 9608 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
9616 9609 }
9617 9610 break;
9618 9611 }
9619 9612 default:
9620 9613 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9621 9614 break;
9622 9615 }
9623 9616
9624 9617 return (rval);
9625 9618 }
9626 9619
9627 9620 /*
9628 9621 * Function: sd_get_write_cache_changeable()
9629 9622 *
9630 9623 * Description: This routine is the driver entry point for determining if write
9631 9624 * caching is changeable. It examines the WCE (write cache enable)
9632 9625 * bits of mode page 8 (MODEPAGE_CACHING) with Page Control field
9633 9626 * bits set to MODEPAGE_CHANGEABLE.
9634 9627 *
9635 9628 * Arguments: ssc - ssc contains pointer to driver soft state
9636 9629 * (unit) structure for this target.
9637 9630 * is_changeable - pointer to int where write cache changeable
9638 9631 * state is returned (non-zero -> write cache
9639 9632 * changeable)
9640 9633 *
9641 9634 * Context: Kernel Thread
9642 9635 */
9643 9636
9644 9637 static void
9645 9638 sd_get_write_cache_changeable(sd_ssc_t *ssc, int *is_changeable)
9646 9639 {
9647 9640 struct sd_lun *un = ssc->ssc_un;
9648 9641 struct mode_caching *mode_caching_page;
9649 9642 uchar_t *header;
9650 9643 size_t buflen = SDC_BUFLEN(un);
9651 9644 int hdrlen = SDC_HDRLEN(un);
9652 9645 int bdlen;
9653 9646 int rval;
9654 9647
9655 9648 /* In case of error, flag as enabled */
9656 9649 *is_changeable = TRUE;
9657 9650
9658 9651 rval = sd_get_caching_mode_page(ssc, MODEPAGE_CHANGEABLE, &header,
9659 9652 &bdlen);
9660 9653 switch (rval) {
9661 9654 case 0:
9662 9655 mode_caching_page = (struct mode_caching *)(header + hdrlen +
9663 9656 bdlen);
9664 9657 *is_changeable = mode_caching_page->wce;
9665 9658 kmem_free(header, buflen);
9666 9659 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
9667 9660 break;
9668 9661 case EIO:
9669 9662 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
9670 9663 break;
9671 9664 default:
9672 9665 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9673 9666 break;
9674 9667 }
9675 9668 }
9676 9669
9677 9670 /*
9678 9671 * Function: sd_get_nv_sup()
9679 9672 *
9680 9673 * Description: This routine is the driver entry point for
9681 9674 * determining whether non-volatile cache is supported. This
9682 9675 * determination process works as follows:
9683 9676 *
9684 9677 * 1. sd first queries sd.conf on whether
9685 9678 * suppress_cache_flush bit is set for this device.
9686 9679 *
9687 9680 * 2. if not there, then queries the internal disk table.
9688 9681 *
9689 9682 * 3. if either sd.conf or internal disk table specifies
9690 9683 * cache flush be suppressed, we don't bother checking
9691 9684 * NV_SUP bit.
9692 9685 *
9693 9686 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries
9694 9687 * the optional INQUIRY VPD page 0x86. If the device
9695 9688 * supports VPD page 0x86, sd examines the NV_SUP
9696 9689 * (non-volatile cache support) bit in the INQUIRY VPD page
9697 9690 * 0x86:
9698 9691 * o If NV_SUP bit is set, sd assumes the device has a
9699 9692 * non-volatile cache and set the
9700 9693 * un_f_sync_nv_supported to TRUE.
9701 9694 * o Otherwise cache is not non-volatile,
9702 9695 * un_f_sync_nv_supported is set to FALSE.
9703 9696 *
9704 9697 * Arguments: un - driver soft state (unit) structure
9705 9698 *
9706 9699 * Return Code:
9707 9700 *
9708 9701 * Context: Kernel Thread
9709 9702 */
9710 9703
9711 9704 static void
9712 9705 sd_get_nv_sup(sd_ssc_t *ssc)
9713 9706 {
9714 9707 int rval = 0;
9715 9708 uchar_t *inq86 = NULL;
9716 9709 size_t inq86_len = MAX_INQUIRY_SIZE;
9717 9710 size_t inq86_resid = 0;
9718 9711 struct dk_callback *dkc;
9719 9712 struct sd_lun *un;
9720 9713
9721 9714 ASSERT(ssc != NULL);
9722 9715 un = ssc->ssc_un;
9723 9716 ASSERT(un != NULL);
9724 9717
9725 9718 mutex_enter(SD_MUTEX(un));
9726 9719
9727 9720 /*
9728 9721 * Be conservative on the device's support of
9729 9722 * SYNC_NV bit: un_f_sync_nv_supported is
9730 9723 * initialized to be false.
9731 9724 */
9732 9725 un->un_f_sync_nv_supported = FALSE;
9733 9726
9734 9727 /*
9735 9728 * If either sd.conf or internal disk table
9736 9729 * specifies cache flush be suppressed, then
9737 9730 * we don't bother checking NV_SUP bit.
9738 9731 */
9739 9732 if (un->un_f_suppress_cache_flush == TRUE) {
9740 9733 mutex_exit(SD_MUTEX(un));
9741 9734 return;
9742 9735 }
9743 9736
9744 9737 if (sd_check_vpd_page_support(ssc) == 0 &&
9745 9738 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) {
9746 9739 mutex_exit(SD_MUTEX(un));
9747 9740 /* collect page 86 data if available */
9748 9741 inq86 = kmem_zalloc(inq86_len, KM_SLEEP);
9749 9742
9750 9743 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len,
9751 9744 0x01, 0x86, &inq86_resid);
9752 9745
9753 9746 if (rval == 0 && (inq86_len - inq86_resid > 6)) {
9754 9747 SD_TRACE(SD_LOG_COMMON, un,
9755 9748 "sd_get_nv_sup: \
9756 9749 successfully get VPD page: %x \
9757 9750 PAGE LENGTH: %x BYTE 6: %x\n",
9758 9751 inq86[1], inq86[3], inq86[6]);
9759 9752
9760 9753 mutex_enter(SD_MUTEX(un));
9761 9754 /*
9762 9755 * check the value of NV_SUP bit: only if the device
9763 9756 * reports NV_SUP bit to be 1, the
9764 9757 * un_f_sync_nv_supported bit will be set to true.
9765 9758 */
9766 9759 if (inq86[6] & SD_VPD_NV_SUP) {
9767 9760 un->un_f_sync_nv_supported = TRUE;
9768 9761 }
9769 9762 mutex_exit(SD_MUTEX(un));
9770 9763 } else if (rval != 0) {
9771 9764 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9772 9765 }
9773 9766
9774 9767 kmem_free(inq86, inq86_len);
9775 9768 } else {
9776 9769 mutex_exit(SD_MUTEX(un));
9777 9770 }
9778 9771
9779 9772 /*
9780 9773 * Send a SYNC CACHE command to check whether
9781 9774 * SYNC_NV bit is supported. This command should have
9782 9775 * un_f_sync_nv_supported set to correct value.
9783 9776 */
9784 9777 mutex_enter(SD_MUTEX(un));
9785 9778 if (un->un_f_sync_nv_supported) {
9786 9779 mutex_exit(SD_MUTEX(un));
9787 9780 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP);
9788 9781 dkc->dkc_flag = FLUSH_VOLATILE;
9789 9782 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc);
9790 9783
9791 9784 /*
9792 9785 * Send a TEST UNIT READY command to the device. This should
9793 9786 * clear any outstanding UNIT ATTENTION that may be present.
9794 9787 */
9795 9788 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR);
9796 9789 if (rval != 0)
9797 9790 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9798 9791
9799 9792 kmem_free(dkc, sizeof (struct dk_callback));
9800 9793 } else {
9801 9794 mutex_exit(SD_MUTEX(un));
9802 9795 }
9803 9796
9804 9797 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \
9805 9798 un_f_suppress_cache_flush is set to %d\n",
9806 9799 un->un_f_suppress_cache_flush);
9807 9800 }
9808 9801
9809 9802 /*
9810 9803 * Function: sd_make_device
9811 9804 *
9812 9805 * Description: Utility routine to return the Solaris device number from
9813 9806 * the data in the device's dev_info structure.
9814 9807 *
9815 9808 * Return Code: The Solaris device number
9816 9809 *
9817 9810 * Context: Any
9818 9811 */
9819 9812
9820 9813 static dev_t
9821 9814 sd_make_device(dev_info_t *devi)
9822 9815 {
9823 9816 return (makedevice(ddi_driver_major(devi),
9824 9817 ddi_get_instance(devi) << SDUNIT_SHIFT));
9825 9818 }
9826 9819
9827 9820
9828 9821 /*
9829 9822 * Function: sd_pm_entry
9830 9823 *
9831 9824 * Description: Called at the start of a new command to manage power
9832 9825 * and busy status of a device. This includes determining whether
9833 9826 * the current power state of the device is sufficient for
9834 9827 * performing the command or whether it must be changed.
9835 9828 * The PM framework is notified appropriately.
9836 9829 * Only with a return status of DDI_SUCCESS will the
9837 9830 * component be busy to the framework.
9838 9831 *
9839 9832 * All callers of sd_pm_entry must check the return status
9840 9833 * and only call sd_pm_exit it it was DDI_SUCCESS. A status
9841 9834 * of DDI_FAILURE indicates the device failed to power up.
9842 9835 * In this case un_pm_count has been adjusted so the result
9843 9836 * on exit is still powered down, ie. count is less than 0.
9844 9837 * Calling sd_pm_exit with this count value hits an ASSERT.
9845 9838 *
9846 9839 * Return Code: DDI_SUCCESS or DDI_FAILURE
9847 9840 *
9848 9841 * Context: Kernel thread context.
9849 9842 */
9850 9843
9851 9844 static int
9852 9845 sd_pm_entry(struct sd_lun *un)
9853 9846 {
9854 9847 int return_status = DDI_SUCCESS;
9855 9848
9856 9849 ASSERT(!mutex_owned(SD_MUTEX(un)));
9857 9850 ASSERT(!mutex_owned(&un->un_pm_mutex));
9858 9851
9859 9852 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n");
9860 9853
9861 9854 if (un->un_f_pm_is_enabled == FALSE) {
9862 9855 SD_TRACE(SD_LOG_IO_PM, un,
9863 9856 "sd_pm_entry: exiting, PM not enabled\n");
9864 9857 return (return_status);
9865 9858 }
9866 9859
9867 9860 /*
9868 9861 * Just increment a counter if PM is enabled. On the transition from
9869 9862 * 0 ==> 1, mark the device as busy. The iodone side will decrement
9870 9863 * the count with each IO and mark the device as idle when the count
9871 9864 * hits 0.
9872 9865 *
9873 9866 * If the count is less than 0 the device is powered down. If a powered
9874 9867 * down device is successfully powered up then the count must be
9875 9868 * incremented to reflect the power up. Note that it'll get incremented
9876 9869 * a second time to become busy.
9877 9870 *
9878 9871 * Because the following has the potential to change the device state
9879 9872 * and must release the un_pm_mutex to do so, only one thread can be
9880 9873 * allowed through at a time.
9881 9874 */
9882 9875
9883 9876 mutex_enter(&un->un_pm_mutex);
9884 9877 while (un->un_pm_busy == TRUE) {
9885 9878 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex);
9886 9879 }
9887 9880 un->un_pm_busy = TRUE;
9888 9881
9889 9882 if (un->un_pm_count < 1) {
9890 9883
9891 9884 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n");
9892 9885
9893 9886 /*
9894 9887 * Indicate we are now busy so the framework won't attempt to
9895 9888 * power down the device. This call will only fail if either
9896 9889 * we passed a bad component number or the device has no
9897 9890 * components. Neither of these should ever happen.
9898 9891 */
9899 9892 mutex_exit(&un->un_pm_mutex);
9900 9893 return_status = pm_busy_component(SD_DEVINFO(un), 0);
9901 9894 ASSERT(return_status == DDI_SUCCESS);
9902 9895
9903 9896 mutex_enter(&un->un_pm_mutex);
9904 9897
9905 9898 if (un->un_pm_count < 0) {
9906 9899 mutex_exit(&un->un_pm_mutex);
9907 9900
9908 9901 SD_TRACE(SD_LOG_IO_PM, un,
9909 9902 "sd_pm_entry: power up component\n");
9910 9903
9911 9904 /*
9912 9905 * pm_raise_power will cause sdpower to be called
9913 9906 * which brings the device power level to the
9914 9907 * desired state, If successful, un_pm_count and
9915 9908 * un_power_level will be updated appropriately.
9916 9909 */
9917 9910 return_status = pm_raise_power(SD_DEVINFO(un), 0,
9918 9911 SD_PM_STATE_ACTIVE(un));
9919 9912
9920 9913 mutex_enter(&un->un_pm_mutex);
9921 9914
9922 9915 if (return_status != DDI_SUCCESS) {
9923 9916 /*
9924 9917 * Power up failed.
9925 9918 * Idle the device and adjust the count
9926 9919 * so the result on exit is that we're
9927 9920 * still powered down, ie. count is less than 0.
9928 9921 */
9929 9922 SD_TRACE(SD_LOG_IO_PM, un,
9930 9923 "sd_pm_entry: power up failed,"
9931 9924 " idle the component\n");
9932 9925
9933 9926 (void) pm_idle_component(SD_DEVINFO(un), 0);
9934 9927 un->un_pm_count--;
9935 9928 } else {
9936 9929 /*
9937 9930 * Device is powered up, verify the
9938 9931 * count is non-negative.
9939 9932 * This is debug only.
9940 9933 */
9941 9934 ASSERT(un->un_pm_count == 0);
9942 9935 }
9943 9936 }
9944 9937
9945 9938 if (return_status == DDI_SUCCESS) {
9946 9939 /*
9947 9940 * For performance, now that the device has been tagged
9948 9941 * as busy, and it's known to be powered up, update the
9949 9942 * chain types to use jump tables that do not include
9950 9943 * pm. This significantly lowers the overhead and
9951 9944 * therefore improves performance.
9952 9945 */
9953 9946
9954 9947 mutex_exit(&un->un_pm_mutex);
9955 9948 mutex_enter(SD_MUTEX(un));
9956 9949 SD_TRACE(SD_LOG_IO_PM, un,
9957 9950 "sd_pm_entry: changing uscsi_chain_type from %d\n",
9958 9951 un->un_uscsi_chain_type);
9959 9952
9960 9953 if (un->un_f_non_devbsize_supported) {
9961 9954 un->un_buf_chain_type =
9962 9955 SD_CHAIN_INFO_RMMEDIA_NO_PM;
9963 9956 } else {
9964 9957 un->un_buf_chain_type =
9965 9958 SD_CHAIN_INFO_DISK_NO_PM;
9966 9959 }
9967 9960 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM;
9968 9961
9969 9962 SD_TRACE(SD_LOG_IO_PM, un,
9970 9963 " changed uscsi_chain_type to %d\n",
9971 9964 un->un_uscsi_chain_type);
9972 9965 mutex_exit(SD_MUTEX(un));
9973 9966 mutex_enter(&un->un_pm_mutex);
9974 9967
9975 9968 if (un->un_pm_idle_timeid == NULL) {
9976 9969 /* 300 ms. */
9977 9970 un->un_pm_idle_timeid =
9978 9971 timeout(sd_pm_idletimeout_handler, un,
9979 9972 (drv_usectohz((clock_t)300000)));
9980 9973 /*
9981 9974 * Include an extra call to busy which keeps the
9982 9975 * device busy with-respect-to the PM layer
9983 9976 * until the timer fires, at which time it'll
9984 9977 * get the extra idle call.
9985 9978 */
9986 9979 (void) pm_busy_component(SD_DEVINFO(un), 0);
9987 9980 }
9988 9981 }
9989 9982 }
9990 9983 un->un_pm_busy = FALSE;
9991 9984 /* Next... */
9992 9985 cv_signal(&un->un_pm_busy_cv);
9993 9986
9994 9987 un->un_pm_count++;
9995 9988
9996 9989 SD_TRACE(SD_LOG_IO_PM, un,
9997 9990 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count);
9998 9991
9999 9992 mutex_exit(&un->un_pm_mutex);
10000 9993
10001 9994 return (return_status);
10002 9995 }
10003 9996
10004 9997
10005 9998 /*
10006 9999 * Function: sd_pm_exit
10007 10000 *
10008 10001 * Description: Called at the completion of a command to manage busy
10009 10002 * status for the device. If the device becomes idle the
10010 10003 * PM framework is notified.
10011 10004 *
10012 10005 * Context: Kernel thread context
10013 10006 */
10014 10007
10015 10008 static void
10016 10009 sd_pm_exit(struct sd_lun *un)
10017 10010 {
10018 10011 ASSERT(!mutex_owned(SD_MUTEX(un)));
10019 10012 ASSERT(!mutex_owned(&un->un_pm_mutex));
10020 10013
10021 10014 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n");
10022 10015
10023 10016 /*
10024 10017 * After attach the following flag is only read, so don't
10025 10018 * take the penalty of acquiring a mutex for it.
10026 10019 */
10027 10020 if (un->un_f_pm_is_enabled == TRUE) {
10028 10021
10029 10022 mutex_enter(&un->un_pm_mutex);
10030 10023 un->un_pm_count--;
10031 10024
10032 10025 SD_TRACE(SD_LOG_IO_PM, un,
10033 10026 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count);
10034 10027
10035 10028 ASSERT(un->un_pm_count >= 0);
10036 10029 if (un->un_pm_count == 0) {
10037 10030 mutex_exit(&un->un_pm_mutex);
10038 10031
10039 10032 SD_TRACE(SD_LOG_IO_PM, un,
10040 10033 "sd_pm_exit: idle component\n");
10041 10034
10042 10035 (void) pm_idle_component(SD_DEVINFO(un), 0);
10043 10036
10044 10037 } else {
10045 10038 mutex_exit(&un->un_pm_mutex);
10046 10039 }
10047 10040 }
10048 10041
10049 10042 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n");
10050 10043 }
10051 10044
10052 10045
10053 10046 /*
10054 10047 * Function: sdopen
10055 10048 *
10056 10049 * Description: Driver's open(9e) entry point function.
10057 10050 *
10058 10051 * Arguments: dev_i - pointer to device number
10059 10052 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE)
10060 10053 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR)
10061 10054 * cred_p - user credential pointer
10062 10055 *
10063 10056 * Return Code: EINVAL
10064 10057 * ENXIO
10065 10058 * EIO
10066 10059 * EROFS
10067 10060 * EBUSY
10068 10061 *
10069 10062 * Context: Kernel thread context
10070 10063 */
10071 10064 /* ARGSUSED */
10072 10065 static int
10073 10066 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p)
10074 10067 {
10075 10068 struct sd_lun *un;
10076 10069 int nodelay;
10077 10070 int part;
10078 10071 uint64_t partmask;
10079 10072 int instance;
10080 10073 dev_t dev;
10081 10074 int rval = EIO;
10082 10075 diskaddr_t nblks = 0;
10083 10076 diskaddr_t label_cap;
10084 10077
10085 10078 /* Validate the open type */
10086 10079 if (otyp >= OTYPCNT) {
10087 10080 return (EINVAL);
10088 10081 }
10089 10082
10090 10083 dev = *dev_p;
10091 10084 instance = SDUNIT(dev);
10092 10085 mutex_enter(&sd_detach_mutex);
10093 10086
10094 10087 /*
10095 10088 * Fail the open if there is no softstate for the instance, or
10096 10089 * if another thread somewhere is trying to detach the instance.
10097 10090 */
10098 10091 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) ||
10099 10092 (un->un_detach_count != 0)) {
10100 10093 mutex_exit(&sd_detach_mutex);
10101 10094 /*
10102 10095 * The probe cache only needs to be cleared when open (9e) fails
10103 10096 * with ENXIO (4238046).
10104 10097 */
10105 10098 /*
10106 10099 * un-conditionally clearing probe cache is ok with
10107 10100 * separate sd/ssd binaries
10108 10101 * x86 platform can be an issue with both parallel
10109 10102 * and fibre in 1 binary
10110 10103 */
10111 10104 sd_scsi_clear_probe_cache();
10112 10105 return (ENXIO);
10113 10106 }
10114 10107
10115 10108 /*
10116 10109 * The un_layer_count is to prevent another thread in specfs from
10117 10110 * trying to detach the instance, which can happen when we are
10118 10111 * called from a higher-layer driver instead of thru specfs.
10119 10112 * This will not be needed when DDI provides a layered driver
10120 10113 * interface that allows specfs to know that an instance is in
10121 10114 * use by a layered driver & should not be detached.
10122 10115 *
10123 10116 * Note: the semantics for layered driver opens are exactly one
10124 10117 * close for every open.
10125 10118 */
10126 10119 if (otyp == OTYP_LYR) {
10127 10120 un->un_layer_count++;
10128 10121 }
10129 10122
10130 10123 /*
10131 10124 * Keep a count of the current # of opens in progress. This is because
10132 10125 * some layered drivers try to call us as a regular open. This can
10133 10126 * cause problems that we cannot prevent, however by keeping this count
10134 10127 * we can at least keep our open and detach routines from racing against
10135 10128 * each other under such conditions.
10136 10129 */
10137 10130 un->un_opens_in_progress++;
10138 10131 mutex_exit(&sd_detach_mutex);
10139 10132
10140 10133 nodelay = (flag & (FNDELAY | FNONBLOCK));
10141 10134 part = SDPART(dev);
10142 10135 partmask = 1 << part;
10143 10136
10144 10137 /*
10145 10138 * We use a semaphore here in order to serialize
10146 10139 * open and close requests on the device.
10147 10140 */
10148 10141 sema_p(&un->un_semoclose);
10149 10142
10150 10143 mutex_enter(SD_MUTEX(un));
10151 10144
10152 10145 /*
10153 10146 * All device accesses go thru sdstrategy() where we check
10154 10147 * on suspend status but there could be a scsi_poll command,
10155 10148 * which bypasses sdstrategy(), so we need to check pm
10156 10149 * status.
10157 10150 */
10158 10151
10159 10152 if (!nodelay) {
10160 10153 while ((un->un_state == SD_STATE_SUSPENDED) ||
10161 10154 (un->un_state == SD_STATE_PM_CHANGING)) {
10162 10155 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10163 10156 }
10164 10157
10165 10158 mutex_exit(SD_MUTEX(un));
10166 10159 if (sd_pm_entry(un) != DDI_SUCCESS) {
10167 10160 rval = EIO;
10168 10161 SD_ERROR(SD_LOG_OPEN_CLOSE, un,
10169 10162 "sdopen: sd_pm_entry failed\n");
10170 10163 goto open_failed_with_pm;
10171 10164 }
10172 10165 mutex_enter(SD_MUTEX(un));
10173 10166 }
10174 10167
10175 10168 /* check for previous exclusive open */
10176 10169 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un);
10177 10170 SD_TRACE(SD_LOG_OPEN_CLOSE, un,
10178 10171 "sdopen: exclopen=%x, flag=%x, regopen=%x\n",
10179 10172 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]);
10180 10173
10181 10174 if (un->un_exclopen & (partmask)) {
10182 10175 goto excl_open_fail;
10183 10176 }
10184 10177
10185 10178 if (flag & FEXCL) {
10186 10179 int i;
10187 10180 if (un->un_ocmap.lyropen[part]) {
10188 10181 goto excl_open_fail;
10189 10182 }
10190 10183 for (i = 0; i < (OTYPCNT - 1); i++) {
10191 10184 if (un->un_ocmap.regopen[i] & (partmask)) {
10192 10185 goto excl_open_fail;
10193 10186 }
10194 10187 }
10195 10188 }
10196 10189
10197 10190 /*
10198 10191 * Check the write permission if this is a removable media device,
10199 10192 * NDELAY has not been set, and writable permission is requested.
10200 10193 *
10201 10194 * Note: If NDELAY was set and this is write-protected media the WRITE
10202 10195 * attempt will fail with EIO as part of the I/O processing. This is a
10203 10196 * more permissive implementation that allows the open to succeed and
10204 10197 * WRITE attempts to fail when appropriate.
10205 10198 */
10206 10199 if (un->un_f_chk_wp_open) {
10207 10200 if ((flag & FWRITE) && (!nodelay)) {
10208 10201 mutex_exit(SD_MUTEX(un));
10209 10202 /*
10210 10203 * Defer the check for write permission on writable
10211 10204 * DVD drive till sdstrategy and will not fail open even
10212 10205 * if FWRITE is set as the device can be writable
10213 10206 * depending upon the media and the media can change
10214 10207 * after the call to open().
10215 10208 */
10216 10209 if (un->un_f_dvdram_writable_device == FALSE) {
10217 10210 if (ISCD(un) || sr_check_wp(dev)) {
10218 10211 rval = EROFS;
10219 10212 mutex_enter(SD_MUTEX(un));
10220 10213 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: "
10221 10214 "write to cd or write protected media\n");
10222 10215 goto open_fail;
10223 10216 }
10224 10217 }
10225 10218 mutex_enter(SD_MUTEX(un));
10226 10219 }
10227 10220 }
10228 10221
10229 10222 /*
10230 10223 * If opening in NDELAY/NONBLOCK mode, just return.
10231 10224 * Check if disk is ready and has a valid geometry later.
10232 10225 */
10233 10226 if (!nodelay) {
10234 10227 sd_ssc_t *ssc;
10235 10228
10236 10229 mutex_exit(SD_MUTEX(un));
10237 10230 ssc = sd_ssc_init(un);
10238 10231 rval = sd_ready_and_valid(ssc, part);
10239 10232 sd_ssc_fini(ssc);
10240 10233 mutex_enter(SD_MUTEX(un));
10241 10234 /*
10242 10235 * Fail if device is not ready or if the number of disk
10243 10236 * blocks is zero or negative for non CD devices.
10244 10237 */
10245 10238
10246 10239 nblks = 0;
10247 10240
10248 10241 if (rval == SD_READY_VALID && (!ISCD(un))) {
10249 10242 /* if cmlb_partinfo fails, nblks remains 0 */
10250 10243 mutex_exit(SD_MUTEX(un));
10251 10244 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks,
10252 10245 NULL, NULL, NULL, (void *)SD_PATH_DIRECT);
10253 10246 mutex_enter(SD_MUTEX(un));
10254 10247 }
10255 10248
10256 10249 if ((rval != SD_READY_VALID) ||
10257 10250 (!ISCD(un) && nblks <= 0)) {
10258 10251 rval = un->un_f_has_removable_media ? ENXIO : EIO;
10259 10252 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: "
10260 10253 "device not ready or invalid disk block value\n");
10261 10254 goto open_fail;
10262 10255 }
10263 10256 #if defined(__i386) || defined(__amd64)
10264 10257 } else {
10265 10258 uchar_t *cp;
10266 10259 /*
10267 10260 * x86 requires special nodelay handling, so that p0 is
10268 10261 * always defined and accessible.
10269 10262 * Invalidate geometry only if device is not already open.
10270 10263 */
10271 10264 cp = &un->un_ocmap.chkd[0];
10272 10265 while (cp < &un->un_ocmap.chkd[OCSIZE]) {
10273 10266 if (*cp != (uchar_t)0) {
10274 10267 break;
10275 10268 }
10276 10269 cp++;
10277 10270 }
10278 10271 if (cp == &un->un_ocmap.chkd[OCSIZE]) {
10279 10272 mutex_exit(SD_MUTEX(un));
10280 10273 cmlb_invalidate(un->un_cmlbhandle,
10281 10274 (void *)SD_PATH_DIRECT);
10282 10275 mutex_enter(SD_MUTEX(un));
10283 10276 }
10284 10277
10285 10278 #endif
10286 10279 }
10287 10280
10288 10281 if (otyp == OTYP_LYR) {
10289 10282 un->un_ocmap.lyropen[part]++;
10290 10283 } else {
10291 10284 un->un_ocmap.regopen[otyp] |= partmask;
10292 10285 }
10293 10286
10294 10287 /* Set up open and exclusive open flags */
10295 10288 if (flag & FEXCL) {
10296 10289 un->un_exclopen |= (partmask);
10297 10290 }
10298 10291
10299 10292 /*
10300 10293 * If the lun is EFI labeled and lun capacity is greater than the
10301 10294 * capacity contained in the label, log a sys-event to notify the
10302 10295 * interested module.
10303 10296 * To avoid an infinite loop of logging sys-event, we only log the
10304 10297 * event when the lun is not opened in NDELAY mode. The event handler
10305 10298 * should open the lun in NDELAY mode.
10306 10299 */
10307 10300 if (!nodelay) {
10308 10301 mutex_exit(SD_MUTEX(un));
10309 10302 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap,
10310 10303 (void*)SD_PATH_DIRECT) == 0) {
10311 10304 mutex_enter(SD_MUTEX(un));
10312 10305 if (un->un_f_blockcount_is_valid &&
10313 10306 un->un_blockcount > label_cap &&
10314 10307 un->un_f_expnevent == B_FALSE) {
10315 10308 un->un_f_expnevent = B_TRUE;
10316 10309 mutex_exit(SD_MUTEX(un));
10317 10310 sd_log_lun_expansion_event(un,
10318 10311 (nodelay ? KM_NOSLEEP : KM_SLEEP));
10319 10312 mutex_enter(SD_MUTEX(un));
10320 10313 }
10321 10314 } else {
10322 10315 mutex_enter(SD_MUTEX(un));
10323 10316 }
10324 10317 }
10325 10318
10326 10319 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: "
10327 10320 "open of part %d type %d\n", part, otyp);
10328 10321
10329 10322 mutex_exit(SD_MUTEX(un));
10330 10323 if (!nodelay) {
10331 10324 sd_pm_exit(un);
10332 10325 }
10333 10326
10334 10327 sema_v(&un->un_semoclose);
10335 10328
10336 10329 mutex_enter(&sd_detach_mutex);
10337 10330 un->un_opens_in_progress--;
10338 10331 mutex_exit(&sd_detach_mutex);
10339 10332
10340 10333 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n");
10341 10334 return (DDI_SUCCESS);
10342 10335
10343 10336 excl_open_fail:
10344 10337 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n");
10345 10338 rval = EBUSY;
10346 10339
10347 10340 open_fail:
10348 10341 mutex_exit(SD_MUTEX(un));
10349 10342
10350 10343 /*
10351 10344 * On a failed open we must exit the pm management.
10352 10345 */
10353 10346 if (!nodelay) {
10354 10347 sd_pm_exit(un);
10355 10348 }
10356 10349 open_failed_with_pm:
10357 10350 sema_v(&un->un_semoclose);
10358 10351
10359 10352 mutex_enter(&sd_detach_mutex);
10360 10353 un->un_opens_in_progress--;
10361 10354 if (otyp == OTYP_LYR) {
10362 10355 un->un_layer_count--;
10363 10356 }
10364 10357 mutex_exit(&sd_detach_mutex);
10365 10358
10366 10359 return (rval);
10367 10360 }
10368 10361
10369 10362
10370 10363 /*
10371 10364 * Function: sdclose
10372 10365 *
10373 10366 * Description: Driver's close(9e) entry point function.
10374 10367 *
10375 10368 * Arguments: dev - device number
10376 10369 * flag - file status flag, informational only
10377 10370 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR)
10378 10371 * cred_p - user credential pointer
10379 10372 *
10380 10373 * Return Code: ENXIO
10381 10374 *
10382 10375 * Context: Kernel thread context
10383 10376 */
10384 10377 /* ARGSUSED */
10385 10378 static int
10386 10379 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p)
10387 10380 {
10388 10381 struct sd_lun *un;
10389 10382 uchar_t *cp;
10390 10383 int part;
10391 10384 int nodelay;
10392 10385 int rval = 0;
10393 10386
10394 10387 /* Validate the open type */
10395 10388 if (otyp >= OTYPCNT) {
10396 10389 return (ENXIO);
10397 10390 }
10398 10391
10399 10392 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
10400 10393 return (ENXIO);
10401 10394 }
10402 10395
10403 10396 part = SDPART(dev);
10404 10397 nodelay = flag & (FNDELAY | FNONBLOCK);
10405 10398
10406 10399 SD_TRACE(SD_LOG_OPEN_CLOSE, un,
10407 10400 "sdclose: close of part %d type %d\n", part, otyp);
10408 10401
10409 10402 /*
10410 10403 * We use a semaphore here in order to serialize
10411 10404 * open and close requests on the device.
10412 10405 */
10413 10406 sema_p(&un->un_semoclose);
10414 10407
10415 10408 mutex_enter(SD_MUTEX(un));
10416 10409
10417 10410 /* Don't proceed if power is being changed. */
10418 10411 while (un->un_state == SD_STATE_PM_CHANGING) {
10419 10412 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10420 10413 }
10421 10414
10422 10415 if (un->un_exclopen & (1 << part)) {
10423 10416 un->un_exclopen &= ~(1 << part);
10424 10417 }
10425 10418
10426 10419 /* Update the open partition map */
10427 10420 if (otyp == OTYP_LYR) {
10428 10421 un->un_ocmap.lyropen[part] -= 1;
10429 10422 } else {
10430 10423 un->un_ocmap.regopen[otyp] &= ~(1 << part);
10431 10424 }
10432 10425
10433 10426 cp = &un->un_ocmap.chkd[0];
10434 10427 while (cp < &un->un_ocmap.chkd[OCSIZE]) {
10435 10428 if (*cp != NULL) {
10436 10429 break;
10437 10430 }
10438 10431 cp++;
10439 10432 }
10440 10433
10441 10434 if (cp == &un->un_ocmap.chkd[OCSIZE]) {
10442 10435 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n");
10443 10436
10444 10437 /*
10445 10438 * We avoid persistance upon the last close, and set
10446 10439 * the throttle back to the maximum.
10447 10440 */
10448 10441 un->un_throttle = un->un_saved_throttle;
10449 10442
10450 10443 if (un->un_state == SD_STATE_OFFLINE) {
10451 10444 if (un->un_f_is_fibre == FALSE) {
10452 10445 scsi_log(SD_DEVINFO(un), sd_label,
10453 10446 CE_WARN, "offline\n");
10454 10447 }
10455 10448 mutex_exit(SD_MUTEX(un));
10456 10449 cmlb_invalidate(un->un_cmlbhandle,
10457 10450 (void *)SD_PATH_DIRECT);
10458 10451 mutex_enter(SD_MUTEX(un));
10459 10452
10460 10453 } else {
10461 10454 /*
10462 10455 * Flush any outstanding writes in NVRAM cache.
10463 10456 * Note: SYNCHRONIZE CACHE is an optional SCSI-2
10464 10457 * cmd, it may not work for non-Pluto devices.
10465 10458 * SYNCHRONIZE CACHE is not required for removables,
10466 10459 * except DVD-RAM drives.
10467 10460 *
10468 10461 * Also note: because SYNCHRONIZE CACHE is currently
10469 10462 * the only command issued here that requires the
10470 10463 * drive be powered up, only do the power up before
10471 10464 * sending the Sync Cache command. If additional
10472 10465 * commands are added which require a powered up
10473 10466 * drive, the following sequence may have to change.
10474 10467 *
10475 10468 * And finally, note that parallel SCSI on SPARC
10476 10469 * only issues a Sync Cache to DVD-RAM, a newly
10477 10470 * supported device.
10478 10471 */
10479 10472 #if defined(__i386) || defined(__amd64)
10480 10473 if ((un->un_f_sync_cache_supported &&
10481 10474 un->un_f_sync_cache_required) ||
10482 10475 un->un_f_dvdram_writable_device == TRUE) {
10483 10476 #else
10484 10477 if (un->un_f_dvdram_writable_device == TRUE) {
10485 10478 #endif
10486 10479 mutex_exit(SD_MUTEX(un));
10487 10480 if (sd_pm_entry(un) == DDI_SUCCESS) {
10488 10481 rval =
10489 10482 sd_send_scsi_SYNCHRONIZE_CACHE(un,
10490 10483 NULL);
10491 10484 /* ignore error if not supported */
10492 10485 if (rval == ENOTSUP) {
10493 10486 rval = 0;
10494 10487 } else if (rval != 0) {
10495 10488 rval = EIO;
10496 10489 }
10497 10490 sd_pm_exit(un);
10498 10491 } else {
10499 10492 rval = EIO;
10500 10493 }
10501 10494 mutex_enter(SD_MUTEX(un));
10502 10495 }
10503 10496
10504 10497 /*
10505 10498 * For devices which supports DOOR_LOCK, send an ALLOW
10506 10499 * MEDIA REMOVAL command, but don't get upset if it
10507 10500 * fails. We need to raise the power of the drive before
10508 10501 * we can call sd_send_scsi_DOORLOCK()
10509 10502 */
10510 10503 if (un->un_f_doorlock_supported) {
10511 10504 mutex_exit(SD_MUTEX(un));
10512 10505 if (sd_pm_entry(un) == DDI_SUCCESS) {
10513 10506 sd_ssc_t *ssc;
10514 10507
10515 10508 ssc = sd_ssc_init(un);
10516 10509 rval = sd_send_scsi_DOORLOCK(ssc,
10517 10510 SD_REMOVAL_ALLOW, SD_PATH_DIRECT);
10518 10511 if (rval != 0)
10519 10512 sd_ssc_assessment(ssc,
10520 10513 SD_FMT_IGNORE);
10521 10514 sd_ssc_fini(ssc);
10522 10515
10523 10516 sd_pm_exit(un);
10524 10517 if (ISCD(un) && (rval != 0) &&
10525 10518 (nodelay != 0)) {
10526 10519 rval = ENXIO;
10527 10520 }
10528 10521 } else {
10529 10522 rval = EIO;
10530 10523 }
10531 10524 mutex_enter(SD_MUTEX(un));
10532 10525 }
10533 10526
10534 10527 /*
10535 10528 * If a device has removable media, invalidate all
10536 10529 * parameters related to media, such as geometry,
10537 10530 * blocksize, and blockcount.
10538 10531 */
10539 10532 if (un->un_f_has_removable_media) {
10540 10533 sr_ejected(un);
10541 10534 }
10542 10535
10543 10536 /*
10544 10537 * Destroy the cache (if it exists) which was
10545 10538 * allocated for the write maps since this is
10546 10539 * the last close for this media.
10547 10540 */
10548 10541 if (un->un_wm_cache) {
10549 10542 /*
10550 10543 * Check if there are pending commands.
10551 10544 * and if there are give a warning and
10552 10545 * do not destroy the cache.
10553 10546 */
10554 10547 if (un->un_ncmds_in_driver > 0) {
10555 10548 scsi_log(SD_DEVINFO(un),
10556 10549 sd_label, CE_WARN,
10557 10550 "Unable to clean up memory "
10558 10551 "because of pending I/O\n");
10559 10552 } else {
10560 10553 kmem_cache_destroy(
10561 10554 un->un_wm_cache);
10562 10555 un->un_wm_cache = NULL;
10563 10556 }
10564 10557 }
10565 10558 }
10566 10559 }
10567 10560
10568 10561 mutex_exit(SD_MUTEX(un));
10569 10562 sema_v(&un->un_semoclose);
10570 10563
10571 10564 if (otyp == OTYP_LYR) {
10572 10565 mutex_enter(&sd_detach_mutex);
10573 10566 /*
10574 10567 * The detach routine may run when the layer count
10575 10568 * drops to zero.
10576 10569 */
10577 10570 un->un_layer_count--;
10578 10571 mutex_exit(&sd_detach_mutex);
10579 10572 }
10580 10573
10581 10574 return (rval);
10582 10575 }
10583 10576
10584 10577
10585 10578 /*
10586 10579 * Function: sd_ready_and_valid
10587 10580 *
10588 10581 * Description: Test if device is ready and has a valid geometry.
10589 10582 *
10590 10583 * Arguments: ssc - sd_ssc_t will contain un
10591 10584 * un - driver soft state (unit) structure
10592 10585 *
10593 10586 * Return Code: SD_READY_VALID ready and valid label
10594 10587 * SD_NOT_READY_VALID not ready, no label
10595 10588 * SD_RESERVED_BY_OTHERS reservation conflict
10596 10589 *
10597 10590 * Context: Never called at interrupt context.
10598 10591 */
10599 10592
10600 10593 static int
10601 10594 sd_ready_and_valid(sd_ssc_t *ssc, int part)
10602 10595 {
10603 10596 struct sd_errstats *stp;
10604 10597 uint64_t capacity;
10605 10598 uint_t lbasize;
10606 10599 int rval = SD_READY_VALID;
10607 10600 char name_str[48];
10608 10601 boolean_t is_valid;
10609 10602 struct sd_lun *un;
10610 10603 int status;
10611 10604
10612 10605 ASSERT(ssc != NULL);
10613 10606 un = ssc->ssc_un;
10614 10607 ASSERT(un != NULL);
10615 10608 ASSERT(!mutex_owned(SD_MUTEX(un)));
10616 10609
10617 10610 mutex_enter(SD_MUTEX(un));
10618 10611 /*
10619 10612 * If a device has removable media, we must check if media is
10620 10613 * ready when checking if this device is ready and valid.
10621 10614 */
10622 10615 if (un->un_f_has_removable_media) {
10623 10616 mutex_exit(SD_MUTEX(un));
10624 10617 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
10625 10618
10626 10619 if (status != 0) {
10627 10620 rval = SD_NOT_READY_VALID;
10628 10621 mutex_enter(SD_MUTEX(un));
10629 10622
10630 10623 /* Ignore all failed status for removalbe media */
10631 10624 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10632 10625
10633 10626 goto done;
10634 10627 }
10635 10628
10636 10629 is_valid = SD_IS_VALID_LABEL(un);
10637 10630 mutex_enter(SD_MUTEX(un));
10638 10631 if (!is_valid ||
10639 10632 (un->un_f_blockcount_is_valid == FALSE) ||
10640 10633 (un->un_f_tgt_blocksize_is_valid == FALSE)) {
10641 10634
10642 10635 /* capacity has to be read every open. */
10643 10636 mutex_exit(SD_MUTEX(un));
10644 10637 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity,
10645 10638 &lbasize, SD_PATH_DIRECT);
10646 10639
10647 10640 if (status != 0) {
10648 10641 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10649 10642
10650 10643 cmlb_invalidate(un->un_cmlbhandle,
10651 10644 (void *)SD_PATH_DIRECT);
10652 10645 mutex_enter(SD_MUTEX(un));
10653 10646 rval = SD_NOT_READY_VALID;
10654 10647
10655 10648 goto done;
10656 10649 } else {
10657 10650 mutex_enter(SD_MUTEX(un));
10658 10651 sd_update_block_info(un, lbasize, capacity);
10659 10652 }
10660 10653 }
10661 10654
10662 10655 /*
10663 10656 * Check if the media in the device is writable or not.
10664 10657 */
10665 10658 if (!is_valid && ISCD(un)) {
10666 10659 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT);
10667 10660 }
10668 10661
10669 10662 } else {
10670 10663 /*
10671 10664 * Do a test unit ready to clear any unit attention from non-cd
10672 10665 * devices.
10673 10666 */
10674 10667 mutex_exit(SD_MUTEX(un));
10675 10668
10676 10669 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
10677 10670 if (status != 0) {
10678 10671 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10679 10672 }
10680 10673
10681 10674 mutex_enter(SD_MUTEX(un));
10682 10675 }
10683 10676
10684 10677
10685 10678 /*
10686 10679 * If this is a non 512 block device, allocate space for
10687 10680 * the wmap cache. This is being done here since every time
10688 10681 * a media is changed this routine will be called and the
10689 10682 * block size is a function of media rather than device.
10690 10683 */
10691 10684 if (((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR ||
10692 10685 un->un_f_non_devbsize_supported) &&
10693 10686 un->un_tgt_blocksize != DEV_BSIZE) ||
10694 10687 un->un_f_enable_rmw) {
10695 10688 if (!(un->un_wm_cache)) {
10696 10689 (void) snprintf(name_str, sizeof (name_str),
10697 10690 "%s%d_cache",
10698 10691 ddi_driver_name(SD_DEVINFO(un)),
10699 10692 ddi_get_instance(SD_DEVINFO(un)));
10700 10693 un->un_wm_cache = kmem_cache_create(
10701 10694 name_str, sizeof (struct sd_w_map),
10702 10695 8, sd_wm_cache_constructor,
10703 10696 sd_wm_cache_destructor, NULL,
10704 10697 (void *)un, NULL, 0);
10705 10698 if (!(un->un_wm_cache)) {
10706 10699 rval = ENOMEM;
10707 10700 goto done;
10708 10701 }
10709 10702 }
10710 10703 }
10711 10704
10712 10705 if (un->un_state == SD_STATE_NORMAL) {
10713 10706 /*
10714 10707 * If the target is not yet ready here (defined by a TUR
10715 10708 * failure), invalidate the geometry and print an 'offline'
10716 10709 * message. This is a legacy message, as the state of the
10717 10710 * target is not actually changed to SD_STATE_OFFLINE.
10718 10711 *
10719 10712 * If the TUR fails for EACCES (Reservation Conflict),
10720 10713 * SD_RESERVED_BY_OTHERS will be returned to indicate
10721 10714 * reservation conflict. If the TUR fails for other
10722 10715 * reasons, SD_NOT_READY_VALID will be returned.
10723 10716 */
10724 10717 int err;
10725 10718
10726 10719 mutex_exit(SD_MUTEX(un));
10727 10720 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
10728 10721 mutex_enter(SD_MUTEX(un));
10729 10722
10730 10723 if (err != 0) {
10731 10724 mutex_exit(SD_MUTEX(un));
10732 10725 cmlb_invalidate(un->un_cmlbhandle,
10733 10726 (void *)SD_PATH_DIRECT);
10734 10727 mutex_enter(SD_MUTEX(un));
10735 10728 if (err == EACCES) {
10736 10729 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
10737 10730 "reservation conflict\n");
10738 10731 rval = SD_RESERVED_BY_OTHERS;
10739 10732 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10740 10733 } else {
10741 10734 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
10742 10735 "drive offline\n");
10743 10736 rval = SD_NOT_READY_VALID;
10744 10737 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
10745 10738 }
10746 10739 goto done;
10747 10740 }
10748 10741 }
10749 10742
10750 10743 if (un->un_f_format_in_progress == FALSE) {
10751 10744 mutex_exit(SD_MUTEX(un));
10752 10745
10753 10746 (void) cmlb_validate(un->un_cmlbhandle, 0,
10754 10747 (void *)SD_PATH_DIRECT);
10755 10748 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL,
10756 10749 NULL, (void *) SD_PATH_DIRECT) != 0) {
10757 10750 rval = SD_NOT_READY_VALID;
10758 10751 mutex_enter(SD_MUTEX(un));
10759 10752
10760 10753 goto done;
10761 10754 }
10762 10755 if (un->un_f_pkstats_enabled) {
10763 10756 sd_set_pstats(un);
10764 10757 SD_TRACE(SD_LOG_IO_PARTITION, un,
10765 10758 "sd_ready_and_valid: un:0x%p pstats created and "
10766 10759 "set\n", un);
10767 10760 }
10768 10761 mutex_enter(SD_MUTEX(un));
10769 10762 }
10770 10763
10771 10764 /*
10772 10765 * If this device supports DOOR_LOCK command, try and send
10773 10766 * this command to PREVENT MEDIA REMOVAL, but don't get upset
10774 10767 * if it fails. For a CD, however, it is an error
10775 10768 */
10776 10769 if (un->un_f_doorlock_supported) {
10777 10770 mutex_exit(SD_MUTEX(un));
10778 10771 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
10779 10772 SD_PATH_DIRECT);
10780 10773
10781 10774 if ((status != 0) && ISCD(un)) {
10782 10775 rval = SD_NOT_READY_VALID;
10783 10776 mutex_enter(SD_MUTEX(un));
10784 10777
10785 10778 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10786 10779
10787 10780 goto done;
10788 10781 } else if (status != 0)
10789 10782 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10790 10783 mutex_enter(SD_MUTEX(un));
10791 10784 }
10792 10785
10793 10786 /* The state has changed, inform the media watch routines */
10794 10787 un->un_mediastate = DKIO_INSERTED;
10795 10788 cv_broadcast(&un->un_state_cv);
10796 10789 rval = SD_READY_VALID;
10797 10790
10798 10791 done:
10799 10792
10800 10793 /*
10801 10794 * Initialize the capacity kstat value, if no media previously
10802 10795 * (capacity kstat is 0) and a media has been inserted
10803 10796 * (un_blockcount > 0).
10804 10797 */
10805 10798 if (un->un_errstats != NULL) {
10806 10799 stp = (struct sd_errstats *)un->un_errstats->ks_data;
10807 10800 if ((stp->sd_capacity.value.ui64 == 0) &&
10808 10801 (un->un_f_blockcount_is_valid == TRUE)) {
10809 10802 stp->sd_capacity.value.ui64 =
10810 10803 (uint64_t)((uint64_t)un->un_blockcount *
10811 10804 un->un_sys_blocksize);
10812 10805 }
10813 10806 }
10814 10807
10815 10808 mutex_exit(SD_MUTEX(un));
10816 10809 return (rval);
10817 10810 }
10818 10811
10819 10812
10820 10813 /*
10821 10814 * Function: sdmin
10822 10815 *
10823 10816 * Description: Routine to limit the size of a data transfer. Used in
10824 10817 * conjunction with physio(9F).
10825 10818 *
10826 10819 * Arguments: bp - pointer to the indicated buf(9S) struct.
10827 10820 *
10828 10821 * Context: Kernel thread context.
10829 10822 */
10830 10823
10831 10824 static void
10832 10825 sdmin(struct buf *bp)
10833 10826 {
10834 10827 struct sd_lun *un;
10835 10828 int instance;
10836 10829
10837 10830 instance = SDUNIT(bp->b_edev);
10838 10831
10839 10832 un = ddi_get_soft_state(sd_state, instance);
10840 10833 ASSERT(un != NULL);
10841 10834
10842 10835 /*
10843 10836 * We depend on buf breakup to restrict
10844 10837 * IO size if it is enabled.
10845 10838 */
10846 10839 if (un->un_buf_breakup_supported) {
10847 10840 return;
10848 10841 }
10849 10842
10850 10843 if (bp->b_bcount > un->un_max_xfer_size) {
10851 10844 bp->b_bcount = un->un_max_xfer_size;
10852 10845 }
10853 10846 }
10854 10847
10855 10848
10856 10849 /*
10857 10850 * Function: sdread
10858 10851 *
10859 10852 * Description: Driver's read(9e) entry point function.
10860 10853 *
10861 10854 * Arguments: dev - device number
10862 10855 * uio - structure pointer describing where data is to be stored
10863 10856 * in user's space
10864 10857 * cred_p - user credential pointer
10865 10858 *
10866 10859 * Return Code: ENXIO
10867 10860 * EIO
10868 10861 * EINVAL
10869 10862 * value returned by physio
10870 10863 *
10871 10864 * Context: Kernel thread context.
10872 10865 */
10873 10866 /* ARGSUSED */
10874 10867 static int
10875 10868 sdread(dev_t dev, struct uio *uio, cred_t *cred_p)
10876 10869 {
10877 10870 struct sd_lun *un = NULL;
10878 10871 int secmask;
10879 10872 int err = 0;
10880 10873 sd_ssc_t *ssc;
10881 10874
10882 10875 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
10883 10876 return (ENXIO);
10884 10877 }
10885 10878
10886 10879 ASSERT(!mutex_owned(SD_MUTEX(un)));
10887 10880
10888 10881
10889 10882 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
10890 10883 mutex_enter(SD_MUTEX(un));
10891 10884 /*
10892 10885 * Because the call to sd_ready_and_valid will issue I/O we
10893 10886 * must wait here if either the device is suspended or
10894 10887 * if it's power level is changing.
10895 10888 */
10896 10889 while ((un->un_state == SD_STATE_SUSPENDED) ||
10897 10890 (un->un_state == SD_STATE_PM_CHANGING)) {
10898 10891 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10899 10892 }
10900 10893 un->un_ncmds_in_driver++;
10901 10894 mutex_exit(SD_MUTEX(un));
10902 10895
10903 10896 /* Initialize sd_ssc_t for internal uscsi commands */
10904 10897 ssc = sd_ssc_init(un);
10905 10898 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
10906 10899 err = EIO;
10907 10900 } else {
10908 10901 err = 0;
10909 10902 }
10910 10903 sd_ssc_fini(ssc);
10911 10904
10912 10905 mutex_enter(SD_MUTEX(un));
10913 10906 un->un_ncmds_in_driver--;
10914 10907 ASSERT(un->un_ncmds_in_driver >= 0);
10915 10908 mutex_exit(SD_MUTEX(un));
10916 10909 if (err != 0)
10917 10910 return (err);
10918 10911 }
10919 10912
10920 10913 /*
10921 10914 * Read requests are restricted to multiples of the system block size.
10922 10915 */
10923 10916 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
10924 10917 !un->un_f_enable_rmw)
10925 10918 secmask = un->un_tgt_blocksize - 1;
10926 10919 else
10927 10920 secmask = DEV_BSIZE - 1;
10928 10921
10929 10922 if (uio->uio_loffset & ((offset_t)(secmask))) {
10930 10923 SD_ERROR(SD_LOG_READ_WRITE, un,
10931 10924 "sdread: file offset not modulo %d\n",
10932 10925 secmask + 1);
10933 10926 err = EINVAL;
10934 10927 } else if (uio->uio_iov->iov_len & (secmask)) {
10935 10928 SD_ERROR(SD_LOG_READ_WRITE, un,
10936 10929 "sdread: transfer length not modulo %d\n",
10937 10930 secmask + 1);
10938 10931 err = EINVAL;
10939 10932 } else {
10940 10933 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio);
10941 10934 }
10942 10935
10943 10936 return (err);
10944 10937 }
10945 10938
10946 10939
10947 10940 /*
10948 10941 * Function: sdwrite
10949 10942 *
10950 10943 * Description: Driver's write(9e) entry point function.
10951 10944 *
10952 10945 * Arguments: dev - device number
10953 10946 * uio - structure pointer describing where data is stored in
10954 10947 * user's space
10955 10948 * cred_p - user credential pointer
10956 10949 *
10957 10950 * Return Code: ENXIO
10958 10951 * EIO
10959 10952 * EINVAL
10960 10953 * value returned by physio
10961 10954 *
10962 10955 * Context: Kernel thread context.
10963 10956 */
10964 10957 /* ARGSUSED */
10965 10958 static int
10966 10959 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p)
10967 10960 {
10968 10961 struct sd_lun *un = NULL;
10969 10962 int secmask;
10970 10963 int err = 0;
10971 10964 sd_ssc_t *ssc;
10972 10965
10973 10966 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
10974 10967 return (ENXIO);
10975 10968 }
10976 10969
10977 10970 ASSERT(!mutex_owned(SD_MUTEX(un)));
10978 10971
10979 10972 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
10980 10973 mutex_enter(SD_MUTEX(un));
10981 10974 /*
10982 10975 * Because the call to sd_ready_and_valid will issue I/O we
10983 10976 * must wait here if either the device is suspended or
10984 10977 * if it's power level is changing.
10985 10978 */
10986 10979 while ((un->un_state == SD_STATE_SUSPENDED) ||
10987 10980 (un->un_state == SD_STATE_PM_CHANGING)) {
10988 10981 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10989 10982 }
10990 10983 un->un_ncmds_in_driver++;
10991 10984 mutex_exit(SD_MUTEX(un));
10992 10985
10993 10986 /* Initialize sd_ssc_t for internal uscsi commands */
10994 10987 ssc = sd_ssc_init(un);
10995 10988 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
10996 10989 err = EIO;
10997 10990 } else {
10998 10991 err = 0;
10999 10992 }
11000 10993 sd_ssc_fini(ssc);
11001 10994
11002 10995 mutex_enter(SD_MUTEX(un));
11003 10996 un->un_ncmds_in_driver--;
11004 10997 ASSERT(un->un_ncmds_in_driver >= 0);
11005 10998 mutex_exit(SD_MUTEX(un));
11006 10999 if (err != 0)
11007 11000 return (err);
11008 11001 }
11009 11002
11010 11003 /*
11011 11004 * Write requests are restricted to multiples of the system block size.
11012 11005 */
11013 11006 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
11014 11007 !un->un_f_enable_rmw)
11015 11008 secmask = un->un_tgt_blocksize - 1;
11016 11009 else
11017 11010 secmask = DEV_BSIZE - 1;
11018 11011
11019 11012 if (uio->uio_loffset & ((offset_t)(secmask))) {
11020 11013 SD_ERROR(SD_LOG_READ_WRITE, un,
11021 11014 "sdwrite: file offset not modulo %d\n",
11022 11015 secmask + 1);
11023 11016 err = EINVAL;
11024 11017 } else if (uio->uio_iov->iov_len & (secmask)) {
11025 11018 SD_ERROR(SD_LOG_READ_WRITE, un,
11026 11019 "sdwrite: transfer length not modulo %d\n",
11027 11020 secmask + 1);
11028 11021 err = EINVAL;
11029 11022 } else {
11030 11023 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio);
11031 11024 }
11032 11025
11033 11026 return (err);
11034 11027 }
11035 11028
11036 11029
11037 11030 /*
11038 11031 * Function: sdaread
11039 11032 *
11040 11033 * Description: Driver's aread(9e) entry point function.
11041 11034 *
11042 11035 * Arguments: dev - device number
11043 11036 * aio - structure pointer describing where data is to be stored
11044 11037 * cred_p - user credential pointer
11045 11038 *
11046 11039 * Return Code: ENXIO
11047 11040 * EIO
11048 11041 * EINVAL
11049 11042 * value returned by aphysio
11050 11043 *
11051 11044 * Context: Kernel thread context.
11052 11045 */
11053 11046 /* ARGSUSED */
11054 11047 static int
11055 11048 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p)
11056 11049 {
11057 11050 struct sd_lun *un = NULL;
11058 11051 struct uio *uio = aio->aio_uio;
11059 11052 int secmask;
11060 11053 int err = 0;
11061 11054 sd_ssc_t *ssc;
11062 11055
11063 11056 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
11064 11057 return (ENXIO);
11065 11058 }
11066 11059
11067 11060 ASSERT(!mutex_owned(SD_MUTEX(un)));
11068 11061
11069 11062 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
11070 11063 mutex_enter(SD_MUTEX(un));
11071 11064 /*
11072 11065 * Because the call to sd_ready_and_valid will issue I/O we
11073 11066 * must wait here if either the device is suspended or
11074 11067 * if it's power level is changing.
11075 11068 */
11076 11069 while ((un->un_state == SD_STATE_SUSPENDED) ||
11077 11070 (un->un_state == SD_STATE_PM_CHANGING)) {
11078 11071 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11079 11072 }
11080 11073 un->un_ncmds_in_driver++;
11081 11074 mutex_exit(SD_MUTEX(un));
11082 11075
11083 11076 /* Initialize sd_ssc_t for internal uscsi commands */
11084 11077 ssc = sd_ssc_init(un);
11085 11078 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
11086 11079 err = EIO;
11087 11080 } else {
11088 11081 err = 0;
11089 11082 }
11090 11083 sd_ssc_fini(ssc);
11091 11084
11092 11085 mutex_enter(SD_MUTEX(un));
11093 11086 un->un_ncmds_in_driver--;
11094 11087 ASSERT(un->un_ncmds_in_driver >= 0);
11095 11088 mutex_exit(SD_MUTEX(un));
11096 11089 if (err != 0)
11097 11090 return (err);
11098 11091 }
11099 11092
11100 11093 /*
11101 11094 * Read requests are restricted to multiples of the system block size.
11102 11095 */
11103 11096 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
11104 11097 !un->un_f_enable_rmw)
11105 11098 secmask = un->un_tgt_blocksize - 1;
11106 11099 else
11107 11100 secmask = DEV_BSIZE - 1;
11108 11101
11109 11102 if (uio->uio_loffset & ((offset_t)(secmask))) {
11110 11103 SD_ERROR(SD_LOG_READ_WRITE, un,
11111 11104 "sdaread: file offset not modulo %d\n",
11112 11105 secmask + 1);
11113 11106 err = EINVAL;
11114 11107 } else if (uio->uio_iov->iov_len & (secmask)) {
11115 11108 SD_ERROR(SD_LOG_READ_WRITE, un,
11116 11109 "sdaread: transfer length not modulo %d\n",
11117 11110 secmask + 1);
11118 11111 err = EINVAL;
11119 11112 } else {
11120 11113 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio);
11121 11114 }
11122 11115
11123 11116 return (err);
11124 11117 }
11125 11118
11126 11119
11127 11120 /*
11128 11121 * Function: sdawrite
11129 11122 *
11130 11123 * Description: Driver's awrite(9e) entry point function.
11131 11124 *
11132 11125 * Arguments: dev - device number
11133 11126 * aio - structure pointer describing where data is stored
11134 11127 * cred_p - user credential pointer
11135 11128 *
11136 11129 * Return Code: ENXIO
11137 11130 * EIO
11138 11131 * EINVAL
11139 11132 * value returned by aphysio
11140 11133 *
11141 11134 * Context: Kernel thread context.
11142 11135 */
11143 11136 /* ARGSUSED */
11144 11137 static int
11145 11138 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p)
11146 11139 {
11147 11140 struct sd_lun *un = NULL;
11148 11141 struct uio *uio = aio->aio_uio;
11149 11142 int secmask;
11150 11143 int err = 0;
11151 11144 sd_ssc_t *ssc;
11152 11145
11153 11146 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
11154 11147 return (ENXIO);
11155 11148 }
11156 11149
11157 11150 ASSERT(!mutex_owned(SD_MUTEX(un)));
11158 11151
11159 11152 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
11160 11153 mutex_enter(SD_MUTEX(un));
11161 11154 /*
11162 11155 * Because the call to sd_ready_and_valid will issue I/O we
11163 11156 * must wait here if either the device is suspended or
11164 11157 * if it's power level is changing.
11165 11158 */
11166 11159 while ((un->un_state == SD_STATE_SUSPENDED) ||
11167 11160 (un->un_state == SD_STATE_PM_CHANGING)) {
11168 11161 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11169 11162 }
11170 11163 un->un_ncmds_in_driver++;
11171 11164 mutex_exit(SD_MUTEX(un));
11172 11165
11173 11166 /* Initialize sd_ssc_t for internal uscsi commands */
11174 11167 ssc = sd_ssc_init(un);
11175 11168 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
11176 11169 err = EIO;
11177 11170 } else {
11178 11171 err = 0;
11179 11172 }
11180 11173 sd_ssc_fini(ssc);
11181 11174
11182 11175 mutex_enter(SD_MUTEX(un));
11183 11176 un->un_ncmds_in_driver--;
11184 11177 ASSERT(un->un_ncmds_in_driver >= 0);
11185 11178 mutex_exit(SD_MUTEX(un));
11186 11179 if (err != 0)
11187 11180 return (err);
11188 11181 }
11189 11182
11190 11183 /*
11191 11184 * Write requests are restricted to multiples of the system block size.
11192 11185 */
11193 11186 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
11194 11187 !un->un_f_enable_rmw)
11195 11188 secmask = un->un_tgt_blocksize - 1;
11196 11189 else
11197 11190 secmask = DEV_BSIZE - 1;
11198 11191
11199 11192 if (uio->uio_loffset & ((offset_t)(secmask))) {
11200 11193 SD_ERROR(SD_LOG_READ_WRITE, un,
11201 11194 "sdawrite: file offset not modulo %d\n",
11202 11195 secmask + 1);
11203 11196 err = EINVAL;
11204 11197 } else if (uio->uio_iov->iov_len & (secmask)) {
11205 11198 SD_ERROR(SD_LOG_READ_WRITE, un,
11206 11199 "sdawrite: transfer length not modulo %d\n",
11207 11200 secmask + 1);
11208 11201 err = EINVAL;
11209 11202 } else {
11210 11203 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio);
11211 11204 }
11212 11205
11213 11206 return (err);
11214 11207 }
11215 11208
11216 11209
11217 11210
11218 11211
11219 11212
11220 11213 /*
11221 11214 * Driver IO processing follows the following sequence:
11222 11215 *
11223 11216 * sdioctl(9E) sdstrategy(9E) biodone(9F)
11224 11217 * | | ^
11225 11218 * v v |
11226 11219 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+
11227 11220 * | | | |
11228 11221 * v | | |
11229 11222 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone()
11230 11223 * | | ^ ^
11231 11224 * v v | |
11232 11225 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | |
11233 11226 * | | | |
11234 11227 * +---+ | +------------+ +-------+
11235 11228 * | | | |
11236 11229 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| |
11237 11230 * | v | |
11238 11231 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() |
11239 11232 * | | ^ |
11240 11233 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| |
11241 11234 * | v | |
11242 11235 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() |
11243 11236 * | | ^ |
11244 11237 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| |
11245 11238 * | v | |
11246 11239 * | sd_checksum_iostart() sd_checksum_iodone() |
11247 11240 * | | ^ |
11248 11241 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+
11249 11242 * | v | |
11250 11243 * | sd_pm_iostart() sd_pm_iodone() |
11251 11244 * | | ^ |
11252 11245 * | | | |
11253 11246 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+
11254 11247 * | ^
11255 11248 * v |
11256 11249 * sd_core_iostart() |
11257 11250 * | |
11258 11251 * | +------>(*destroypkt)()
11259 11252 * +-> sd_start_cmds() <-+ | |
11260 11253 * | | | v
11261 11254 * | | | scsi_destroy_pkt(9F)
11262 11255 * | | |
11263 11256 * +->(*initpkt)() +- sdintr()
11264 11257 * | | | |
11265 11258 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx()
11266 11259 * | +-> scsi_setup_cdb(9F) |
11267 11260 * | |
11268 11261 * +--> scsi_transport(9F) |
11269 11262 * | |
11270 11263 * +----> SCSA ---->+
11271 11264 *
11272 11265 *
11273 11266 * This code is based upon the following presumptions:
11274 11267 *
11275 11268 * - iostart and iodone functions operate on buf(9S) structures. These
11276 11269 * functions perform the necessary operations on the buf(9S) and pass
11277 11270 * them along to the next function in the chain by using the macros
11278 11271 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE()
11279 11272 * (for iodone side functions).
11280 11273 *
11281 11274 * - The iostart side functions may sleep. The iodone side functions
11282 11275 * are called under interrupt context and may NOT sleep. Therefore
11283 11276 * iodone side functions also may not call iostart side functions.
11284 11277 * (NOTE: iostart side functions should NOT sleep for memory, as
11285 11278 * this could result in deadlock.)
11286 11279 *
11287 11280 * - An iostart side function may call its corresponding iodone side
11288 11281 * function directly (if necessary).
11289 11282 *
11290 11283 * - In the event of an error, an iostart side function can return a buf(9S)
11291 11284 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and
11292 11285 * b_error in the usual way of course).
11293 11286 *
11294 11287 * - The taskq mechanism may be used by the iodone side functions to dispatch
11295 11288 * requests to the iostart side functions. The iostart side functions in
11296 11289 * this case would be called under the context of a taskq thread, so it's
11297 11290 * OK for them to block/sleep/spin in this case.
11298 11291 *
11299 11292 * - iostart side functions may allocate "shadow" buf(9S) structs and
11300 11293 * pass them along to the next function in the chain. The corresponding
11301 11294 * iodone side functions must coalesce the "shadow" bufs and return
11302 11295 * the "original" buf to the next higher layer.
11303 11296 *
11304 11297 * - The b_private field of the buf(9S) struct holds a pointer to
11305 11298 * an sd_xbuf struct, which contains information needed to
11306 11299 * construct the scsi_pkt for the command.
11307 11300 *
11308 11301 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each
11309 11302 * layer must acquire & release the SD_MUTEX(un) as needed.
11310 11303 */
11311 11304
11312 11305
11313 11306 /*
11314 11307 * Create taskq for all targets in the system. This is created at
11315 11308 * _init(9E) and destroyed at _fini(9E).
11316 11309 *
11317 11310 * Note: here we set the minalloc to a reasonably high number to ensure that
11318 11311 * we will have an adequate supply of task entries available at interrupt time.
11319 11312 * This is used in conjunction with the TASKQ_PREPOPULATE flag in
11320 11313 * sd_create_taskq(). Since we do not want to sleep for allocations at
11321 11314 * interrupt time, set maxalloc equal to minalloc. That way we will just fail
11322 11315 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq
11323 11316 * requests any one instant in time.
11324 11317 */
11325 11318 #define SD_TASKQ_NUMTHREADS 8
11326 11319 #define SD_TASKQ_MINALLOC 256
11327 11320 #define SD_TASKQ_MAXALLOC 256
11328 11321
11329 11322 static taskq_t *sd_tq = NULL;
11330 11323 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq))
11331 11324
11332 11325 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC;
11333 11326 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC;
11334 11327
11335 11328 /*
11336 11329 * The following task queue is being created for the write part of
11337 11330 * read-modify-write of non-512 block size devices.
11338 11331 * Limit the number of threads to 1 for now. This number has been chosen
11339 11332 * considering the fact that it applies only to dvd ram drives/MO drives
11340 11333 * currently. Performance for which is not main criteria at this stage.
11341 11334 * Note: It needs to be explored if we can use a single taskq in future
11342 11335 */
11343 11336 #define SD_WMR_TASKQ_NUMTHREADS 1
11344 11337 static taskq_t *sd_wmr_tq = NULL;
11345 11338 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq))
11346 11339
11347 11340 /*
11348 11341 * Function: sd_taskq_create
11349 11342 *
11350 11343 * Description: Create taskq thread(s) and preallocate task entries
11351 11344 *
11352 11345 * Return Code: Returns a pointer to the allocated taskq_t.
11353 11346 *
11354 11347 * Context: Can sleep. Requires blockable context.
11355 11348 *
11356 11349 * Notes: - The taskq() facility currently is NOT part of the DDI.
11357 11350 * (definitely NOT recommeded for 3rd-party drivers!) :-)
11358 11351 * - taskq_create() will block for memory, also it will panic
11359 11352 * if it cannot create the requested number of threads.
11360 11353 * - Currently taskq_create() creates threads that cannot be
11361 11354 * swapped.
11362 11355 * - We use TASKQ_PREPOPULATE to ensure we have an adequate
11363 11356 * supply of taskq entries at interrupt time (ie, so that we
11364 11357 * do not have to sleep for memory)
11365 11358 */
11366 11359
11367 11360 static void
11368 11361 sd_taskq_create(void)
11369 11362 {
11370 11363 char taskq_name[TASKQ_NAMELEN];
11371 11364
11372 11365 ASSERT(sd_tq == NULL);
11373 11366 ASSERT(sd_wmr_tq == NULL);
11374 11367
11375 11368 (void) snprintf(taskq_name, sizeof (taskq_name),
11376 11369 "%s_drv_taskq", sd_label);
11377 11370 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS,
11378 11371 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc,
11379 11372 TASKQ_PREPOPULATE));
11380 11373
11381 11374 (void) snprintf(taskq_name, sizeof (taskq_name),
11382 11375 "%s_rmw_taskq", sd_label);
11383 11376 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS,
11384 11377 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc,
11385 11378 TASKQ_PREPOPULATE));
11386 11379 }
11387 11380
11388 11381
11389 11382 /*
11390 11383 * Function: sd_taskq_delete
11391 11384 *
11392 11385 * Description: Complementary cleanup routine for sd_taskq_create().
11393 11386 *
11394 11387 * Context: Kernel thread context.
11395 11388 */
11396 11389
11397 11390 static void
11398 11391 sd_taskq_delete(void)
11399 11392 {
11400 11393 ASSERT(sd_tq != NULL);
11401 11394 ASSERT(sd_wmr_tq != NULL);
11402 11395 taskq_destroy(sd_tq);
11403 11396 taskq_destroy(sd_wmr_tq);
11404 11397 sd_tq = NULL;
11405 11398 sd_wmr_tq = NULL;
11406 11399 }
11407 11400
11408 11401
11409 11402 /*
11410 11403 * Function: sdstrategy
11411 11404 *
11412 11405 * Description: Driver's strategy (9E) entry point function.
11413 11406 *
11414 11407 * Arguments: bp - pointer to buf(9S)
11415 11408 *
11416 11409 * Return Code: Always returns zero
11417 11410 *
11418 11411 * Context: Kernel thread context.
11419 11412 */
11420 11413
11421 11414 static int
11422 11415 sdstrategy(struct buf *bp)
11423 11416 {
11424 11417 struct sd_lun *un;
11425 11418
11426 11419 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
11427 11420 if (un == NULL) {
11428 11421 bioerror(bp, EIO);
11429 11422 bp->b_resid = bp->b_bcount;
11430 11423 biodone(bp);
11431 11424 return (0);
11432 11425 }
11433 11426
11434 11427 /* As was done in the past, fail new cmds. if state is dumping. */
11435 11428 if (un->un_state == SD_STATE_DUMPING) {
11436 11429 bioerror(bp, ENXIO);
11437 11430 bp->b_resid = bp->b_bcount;
11438 11431 biodone(bp);
11439 11432 return (0);
11440 11433 }
11441 11434
11442 11435 ASSERT(!mutex_owned(SD_MUTEX(un)));
11443 11436
11444 11437 /*
11445 11438 * Commands may sneak in while we released the mutex in
11446 11439 * DDI_SUSPEND, we should block new commands. However, old
11447 11440 * commands that are still in the driver at this point should
11448 11441 * still be allowed to drain.
11449 11442 */
11450 11443 mutex_enter(SD_MUTEX(un));
11451 11444 /*
11452 11445 * Must wait here if either the device is suspended or
11453 11446 * if it's power level is changing.
11454 11447 */
11455 11448 while ((un->un_state == SD_STATE_SUSPENDED) ||
11456 11449 (un->un_state == SD_STATE_PM_CHANGING)) {
11457 11450 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11458 11451 }
11459 11452
11460 11453 un->un_ncmds_in_driver++;
11461 11454
11462 11455 /*
11463 11456 * atapi: Since we are running the CD for now in PIO mode we need to
11464 11457 * call bp_mapin here to avoid bp_mapin called interrupt context under
11465 11458 * the HBA's init_pkt routine.
11466 11459 */
11467 11460 if (un->un_f_cfg_is_atapi == TRUE) {
11468 11461 mutex_exit(SD_MUTEX(un));
11469 11462 bp_mapin(bp);
11470 11463 mutex_enter(SD_MUTEX(un));
11471 11464 }
11472 11465 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n",
11473 11466 un->un_ncmds_in_driver);
11474 11467
11475 11468 if (bp->b_flags & B_WRITE)
11476 11469 un->un_f_sync_cache_required = TRUE;
11477 11470
11478 11471 mutex_exit(SD_MUTEX(un));
11479 11472
11480 11473 /*
11481 11474 * This will (eventually) allocate the sd_xbuf area and
11482 11475 * call sd_xbuf_strategy(). We just want to return the
11483 11476 * result of ddi_xbuf_qstrategy so that we have an opt-
11484 11477 * imized tail call which saves us a stack frame.
11485 11478 */
11486 11479 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr));
11487 11480 }
11488 11481
11489 11482
11490 11483 /*
11491 11484 * Function: sd_xbuf_strategy
11492 11485 *
11493 11486 * Description: Function for initiating IO operations via the
11494 11487 * ddi_xbuf_qstrategy() mechanism.
11495 11488 *
11496 11489 * Context: Kernel thread context.
11497 11490 */
11498 11491
11499 11492 static void
11500 11493 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg)
11501 11494 {
11502 11495 struct sd_lun *un = arg;
11503 11496
11504 11497 ASSERT(bp != NULL);
11505 11498 ASSERT(xp != NULL);
11506 11499 ASSERT(un != NULL);
11507 11500 ASSERT(!mutex_owned(SD_MUTEX(un)));
11508 11501
11509 11502 /*
11510 11503 * Initialize the fields in the xbuf and save a pointer to the
11511 11504 * xbuf in bp->b_private.
11512 11505 */
11513 11506 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL);
11514 11507
11515 11508 /* Send the buf down the iostart chain */
11516 11509 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp);
11517 11510 }
11518 11511
11519 11512
11520 11513 /*
11521 11514 * Function: sd_xbuf_init
11522 11515 *
11523 11516 * Description: Prepare the given sd_xbuf struct for use.
11524 11517 *
11525 11518 * Arguments: un - ptr to softstate
11526 11519 * bp - ptr to associated buf(9S)
11527 11520 * xp - ptr to associated sd_xbuf
11528 11521 * chain_type - IO chain type to use:
11529 11522 * SD_CHAIN_NULL
11530 11523 * SD_CHAIN_BUFIO
11531 11524 * SD_CHAIN_USCSI
11532 11525 * SD_CHAIN_DIRECT
11533 11526 * SD_CHAIN_DIRECT_PRIORITY
11534 11527 * pktinfop - ptr to private data struct for scsi_pkt(9S)
11535 11528 * initialization; may be NULL if none.
11536 11529 *
11537 11530 * Context: Kernel thread context
11538 11531 */
11539 11532
11540 11533 static void
11541 11534 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
11542 11535 uchar_t chain_type, void *pktinfop)
11543 11536 {
11544 11537 int index;
11545 11538
11546 11539 ASSERT(un != NULL);
11547 11540 ASSERT(bp != NULL);
11548 11541 ASSERT(xp != NULL);
11549 11542
11550 11543 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n",
11551 11544 bp, chain_type);
11552 11545
11553 11546 xp->xb_un = un;
11554 11547 xp->xb_pktp = NULL;
11555 11548 xp->xb_pktinfo = pktinfop;
11556 11549 xp->xb_private = bp->b_private;
11557 11550 xp->xb_blkno = (daddr_t)bp->b_blkno;
11558 11551
11559 11552 /*
11560 11553 * Set up the iostart and iodone chain indexes in the xbuf, based
11561 11554 * upon the specified chain type to use.
11562 11555 */
11563 11556 switch (chain_type) {
11564 11557 case SD_CHAIN_NULL:
11565 11558 /*
11566 11559 * Fall thru to just use the values for the buf type, even
11567 11560 * tho for the NULL chain these values will never be used.
11568 11561 */
11569 11562 /* FALLTHRU */
11570 11563 case SD_CHAIN_BUFIO:
11571 11564 index = un->un_buf_chain_type;
11572 11565 if ((!un->un_f_has_removable_media) &&
11573 11566 (un->un_tgt_blocksize != 0) &&
11574 11567 (un->un_tgt_blocksize != DEV_BSIZE ||
11575 11568 un->un_f_enable_rmw)) {
11576 11569 int secmask = 0, blknomask = 0;
11577 11570 if (un->un_f_enable_rmw) {
11578 11571 blknomask =
11579 11572 (un->un_phy_blocksize / DEV_BSIZE) - 1;
11580 11573 secmask = un->un_phy_blocksize - 1;
11581 11574 } else {
11582 11575 blknomask =
11583 11576 (un->un_tgt_blocksize / DEV_BSIZE) - 1;
11584 11577 secmask = un->un_tgt_blocksize - 1;
11585 11578 }
11586 11579
11587 11580 if ((bp->b_lblkno & (blknomask)) ||
11588 11581 (bp->b_bcount & (secmask))) {
11589 11582 if ((un->un_f_rmw_type !=
11590 11583 SD_RMW_TYPE_RETURN_ERROR) ||
11591 11584 un->un_f_enable_rmw) {
11592 11585 if (un->un_f_pm_is_enabled == FALSE)
11593 11586 index =
11594 11587 SD_CHAIN_INFO_MSS_DSK_NO_PM;
11595 11588 else
11596 11589 index =
11597 11590 SD_CHAIN_INFO_MSS_DISK;
11598 11591 }
11599 11592 }
11600 11593 }
11601 11594 break;
11602 11595 case SD_CHAIN_USCSI:
11603 11596 index = un->un_uscsi_chain_type;
11604 11597 break;
11605 11598 case SD_CHAIN_DIRECT:
11606 11599 index = un->un_direct_chain_type;
11607 11600 break;
11608 11601 case SD_CHAIN_DIRECT_PRIORITY:
11609 11602 index = un->un_priority_chain_type;
11610 11603 break;
11611 11604 default:
11612 11605 /* We're really broken if we ever get here... */
11613 11606 panic("sd_xbuf_init: illegal chain type!");
11614 11607 /*NOTREACHED*/
11615 11608 }
11616 11609
11617 11610 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index;
11618 11611 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index;
11619 11612
11620 11613 /*
11621 11614 * It might be a bit easier to simply bzero the entire xbuf above,
11622 11615 * but it turns out that since we init a fair number of members anyway,
11623 11616 * we save a fair number cycles by doing explicit assignment of zero.
11624 11617 */
11625 11618 xp->xb_pkt_flags = 0;
11626 11619 xp->xb_dma_resid = 0;
11627 11620 xp->xb_retry_count = 0;
11628 11621 xp->xb_victim_retry_count = 0;
11629 11622 xp->xb_ua_retry_count = 0;
11630 11623 xp->xb_nr_retry_count = 0;
11631 11624 xp->xb_sense_bp = NULL;
11632 11625 xp->xb_sense_status = 0;
11633 11626 xp->xb_sense_state = 0;
11634 11627 xp->xb_sense_resid = 0;
11635 11628 xp->xb_ena = 0;
11636 11629
11637 11630 bp->b_private = xp;
11638 11631 bp->b_flags &= ~(B_DONE | B_ERROR);
11639 11632 bp->b_resid = 0;
11640 11633 bp->av_forw = NULL;
11641 11634 bp->av_back = NULL;
11642 11635 bioerror(bp, 0);
11643 11636
11644 11637 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n");
11645 11638 }
11646 11639
11647 11640
11648 11641 /*
11649 11642 * Function: sd_uscsi_strategy
11650 11643 *
11651 11644 * Description: Wrapper for calling into the USCSI chain via physio(9F)
11652 11645 *
11653 11646 * Arguments: bp - buf struct ptr
11654 11647 *
11655 11648 * Return Code: Always returns 0
11656 11649 *
11657 11650 * Context: Kernel thread context
11658 11651 */
11659 11652
11660 11653 static int
11661 11654 sd_uscsi_strategy(struct buf *bp)
11662 11655 {
11663 11656 struct sd_lun *un;
11664 11657 struct sd_uscsi_info *uip;
11665 11658 struct sd_xbuf *xp;
11666 11659 uchar_t chain_type;
11667 11660 uchar_t cmd;
11668 11661
11669 11662 ASSERT(bp != NULL);
11670 11663
11671 11664 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
11672 11665 if (un == NULL) {
11673 11666 bioerror(bp, EIO);
11674 11667 bp->b_resid = bp->b_bcount;
11675 11668 biodone(bp);
11676 11669 return (0);
11677 11670 }
11678 11671
11679 11672 ASSERT(!mutex_owned(SD_MUTEX(un)));
11680 11673
11681 11674 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp);
11682 11675
11683 11676 /*
11684 11677 * A pointer to a struct sd_uscsi_info is expected in bp->b_private
11685 11678 */
11686 11679 ASSERT(bp->b_private != NULL);
11687 11680 uip = (struct sd_uscsi_info *)bp->b_private;
11688 11681 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0];
11689 11682
11690 11683 mutex_enter(SD_MUTEX(un));
11691 11684 /*
11692 11685 * atapi: Since we are running the CD for now in PIO mode we need to
11693 11686 * call bp_mapin here to avoid bp_mapin called interrupt context under
11694 11687 * the HBA's init_pkt routine.
11695 11688 */
11696 11689 if (un->un_f_cfg_is_atapi == TRUE) {
11697 11690 mutex_exit(SD_MUTEX(un));
11698 11691 bp_mapin(bp);
11699 11692 mutex_enter(SD_MUTEX(un));
11700 11693 }
11701 11694 un->un_ncmds_in_driver++;
11702 11695 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n",
11703 11696 un->un_ncmds_in_driver);
11704 11697
11705 11698 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) &&
11706 11699 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1))
11707 11700 un->un_f_sync_cache_required = TRUE;
11708 11701
11709 11702 mutex_exit(SD_MUTEX(un));
11710 11703
11711 11704 switch (uip->ui_flags) {
11712 11705 case SD_PATH_DIRECT:
11713 11706 chain_type = SD_CHAIN_DIRECT;
11714 11707 break;
11715 11708 case SD_PATH_DIRECT_PRIORITY:
11716 11709 chain_type = SD_CHAIN_DIRECT_PRIORITY;
11717 11710 break;
11718 11711 default:
11719 11712 chain_type = SD_CHAIN_USCSI;
11720 11713 break;
11721 11714 }
11722 11715
11723 11716 /*
11724 11717 * We may allocate extra buf for external USCSI commands. If the
11725 11718 * application asks for bigger than 20-byte sense data via USCSI,
11726 11719 * SCSA layer will allocate 252 bytes sense buf for that command.
11727 11720 */
11728 11721 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen >
11729 11722 SENSE_LENGTH) {
11730 11723 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH +
11731 11724 MAX_SENSE_LENGTH, KM_SLEEP);
11732 11725 } else {
11733 11726 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP);
11734 11727 }
11735 11728
11736 11729 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp);
11737 11730
11738 11731 /* Use the index obtained within xbuf_init */
11739 11732 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp);
11740 11733
11741 11734 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp);
11742 11735
11743 11736 return (0);
11744 11737 }
11745 11738
11746 11739 /*
11747 11740 * Function: sd_send_scsi_cmd
11748 11741 *
11749 11742 * Description: Runs a USCSI command for user (when called thru sdioctl),
11750 11743 * or for the driver
11751 11744 *
11752 11745 * Arguments: dev - the dev_t for the device
11753 11746 * incmd - ptr to a valid uscsi_cmd struct
11754 11747 * flag - bit flag, indicating open settings, 32/64 bit type
11755 11748 * dataspace - UIO_USERSPACE or UIO_SYSSPACE
11756 11749 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
11757 11750 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
11758 11751 * to use the USCSI "direct" chain and bypass the normal
11759 11752 * command waitq.
11760 11753 *
11761 11754 * Return Code: 0 - successful completion of the given command
11762 11755 * EIO - scsi_uscsi_handle_command() failed
11763 11756 * ENXIO - soft state not found for specified dev
11764 11757 * EINVAL
11765 11758 * EFAULT - copyin/copyout error
11766 11759 * return code of scsi_uscsi_handle_command():
11767 11760 * EIO
11768 11761 * ENXIO
11769 11762 * EACCES
11770 11763 *
11771 11764 * Context: Waits for command to complete. Can sleep.
11772 11765 */
11773 11766
11774 11767 static int
11775 11768 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag,
11776 11769 enum uio_seg dataspace, int path_flag)
11777 11770 {
11778 11771 struct sd_lun *un;
11779 11772 sd_ssc_t *ssc;
11780 11773 int rval;
11781 11774
11782 11775 un = ddi_get_soft_state(sd_state, SDUNIT(dev));
11783 11776 if (un == NULL) {
11784 11777 return (ENXIO);
11785 11778 }
11786 11779
11787 11780 /*
11788 11781 * Using sd_ssc_send to handle uscsi cmd
11789 11782 */
11790 11783 ssc = sd_ssc_init(un);
11791 11784 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag);
11792 11785 sd_ssc_fini(ssc);
11793 11786
11794 11787 return (rval);
11795 11788 }
11796 11789
11797 11790 /*
11798 11791 * Function: sd_ssc_init
11799 11792 *
11800 11793 * Description: Uscsi end-user call this function to initialize necessary
11801 11794 * fields, such as uscsi_cmd and sd_uscsi_info struct.
11802 11795 *
11803 11796 * The return value of sd_send_scsi_cmd will be treated as a
11804 11797 * fault in various conditions. Even it is not Zero, some
11805 11798 * callers may ignore the return value. That is to say, we can
11806 11799 * not make an accurate assessment in sdintr, since if a
11807 11800 * command is failed in sdintr it does not mean the caller of
11808 11801 * sd_send_scsi_cmd will treat it as a real failure.
11809 11802 *
11810 11803 * To avoid printing too many error logs for a failed uscsi
11811 11804 * packet that the caller may not treat it as a failure, the
11812 11805 * sd will keep silent for handling all uscsi commands.
11813 11806 *
11814 11807 * During detach->attach and attach-open, for some types of
11815 11808 * problems, the driver should be providing information about
11816 11809 * the problem encountered. Device use USCSI_SILENT, which
11817 11810 * suppresses all driver information. The result is that no
11818 11811 * information about the problem is available. Being
11819 11812 * completely silent during this time is inappropriate. The
11820 11813 * driver needs a more selective filter than USCSI_SILENT, so
11821 11814 * that information related to faults is provided.
11822 11815 *
11823 11816 * To make the accurate accessment, the caller of
11824 11817 * sd_send_scsi_USCSI_CMD should take the ownership and
11825 11818 * get necessary information to print error messages.
11826 11819 *
11827 11820 * If we want to print necessary info of uscsi command, we need to
11828 11821 * keep the uscsi_cmd and sd_uscsi_info till we can make the
11829 11822 * assessment. We use sd_ssc_init to alloc necessary
11830 11823 * structs for sending an uscsi command and we are also
11831 11824 * responsible for free the memory by calling
11832 11825 * sd_ssc_fini.
11833 11826 *
11834 11827 * The calling secquences will look like:
11835 11828 * sd_ssc_init->
11836 11829 *
11837 11830 * ...
11838 11831 *
11839 11832 * sd_send_scsi_USCSI_CMD->
11840 11833 * sd_ssc_send-> - - - sdintr
11841 11834 * ...
11842 11835 *
11843 11836 * if we think the return value should be treated as a
11844 11837 * failure, we make the accessment here and print out
11845 11838 * necessary by retrieving uscsi_cmd and sd_uscsi_info'
11846 11839 *
11847 11840 * ...
11848 11841 *
11849 11842 * sd_ssc_fini
11850 11843 *
11851 11844 *
11852 11845 * Arguments: un - pointer to driver soft state (unit) structure for this
11853 11846 * target.
11854 11847 *
11855 11848 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains
11856 11849 * uscsi_cmd and sd_uscsi_info.
11857 11850 * NULL - if can not alloc memory for sd_ssc_t struct
11858 11851 *
11859 11852 * Context: Kernel Thread.
11860 11853 */
11861 11854 static sd_ssc_t *
11862 11855 sd_ssc_init(struct sd_lun *un)
11863 11856 {
11864 11857 sd_ssc_t *ssc;
11865 11858 struct uscsi_cmd *ucmdp;
11866 11859 struct sd_uscsi_info *uip;
11867 11860
11868 11861 ASSERT(un != NULL);
11869 11862 ASSERT(!mutex_owned(SD_MUTEX(un)));
11870 11863
11871 11864 /*
11872 11865 * Allocate sd_ssc_t structure
11873 11866 */
11874 11867 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP);
11875 11868
11876 11869 /*
11877 11870 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine
11878 11871 */
11879 11872 ucmdp = scsi_uscsi_alloc();
11880 11873
11881 11874 /*
11882 11875 * Allocate sd_uscsi_info structure
11883 11876 */
11884 11877 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP);
11885 11878
11886 11879 ssc->ssc_uscsi_cmd = ucmdp;
11887 11880 ssc->ssc_uscsi_info = uip;
11888 11881 ssc->ssc_un = un;
11889 11882
11890 11883 return (ssc);
11891 11884 }
11892 11885
11893 11886 /*
11894 11887 * Function: sd_ssc_fini
11895 11888 *
11896 11889 * Description: To free sd_ssc_t and it's hanging off
11897 11890 *
11898 11891 * Arguments: ssc - struct pointer of sd_ssc_t.
11899 11892 */
11900 11893 static void
11901 11894 sd_ssc_fini(sd_ssc_t *ssc)
11902 11895 {
11903 11896 scsi_uscsi_free(ssc->ssc_uscsi_cmd);
11904 11897
11905 11898 if (ssc->ssc_uscsi_info != NULL) {
11906 11899 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info));
11907 11900 ssc->ssc_uscsi_info = NULL;
11908 11901 }
11909 11902
11910 11903 kmem_free(ssc, sizeof (sd_ssc_t));
11911 11904 ssc = NULL;
11912 11905 }
11913 11906
11914 11907 /*
11915 11908 * Function: sd_ssc_send
11916 11909 *
11917 11910 * Description: Runs a USCSI command for user when called through sdioctl,
11918 11911 * or for the driver.
11919 11912 *
11920 11913 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
11921 11914 * sd_uscsi_info in.
11922 11915 * incmd - ptr to a valid uscsi_cmd struct
11923 11916 * flag - bit flag, indicating open settings, 32/64 bit type
11924 11917 * dataspace - UIO_USERSPACE or UIO_SYSSPACE
11925 11918 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
11926 11919 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
11927 11920 * to use the USCSI "direct" chain and bypass the normal
11928 11921 * command waitq.
11929 11922 *
11930 11923 * Return Code: 0 - successful completion of the given command
11931 11924 * EIO - scsi_uscsi_handle_command() failed
11932 11925 * ENXIO - soft state not found for specified dev
11933 11926 * ECANCELED - command cancelled due to low power
11934 11927 * EINVAL
11935 11928 * EFAULT - copyin/copyout error
11936 11929 * return code of scsi_uscsi_handle_command():
11937 11930 * EIO
11938 11931 * ENXIO
11939 11932 * EACCES
11940 11933 *
11941 11934 * Context: Kernel Thread;
11942 11935 * Waits for command to complete. Can sleep.
11943 11936 */
11944 11937 static int
11945 11938 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag,
11946 11939 enum uio_seg dataspace, int path_flag)
11947 11940 {
11948 11941 struct sd_uscsi_info *uip;
11949 11942 struct uscsi_cmd *uscmd;
11950 11943 struct sd_lun *un;
11951 11944 dev_t dev;
11952 11945
11953 11946 int format = 0;
11954 11947 int rval;
11955 11948
11956 11949 ASSERT(ssc != NULL);
11957 11950 un = ssc->ssc_un;
11958 11951 ASSERT(un != NULL);
11959 11952 uscmd = ssc->ssc_uscsi_cmd;
11960 11953 ASSERT(uscmd != NULL);
11961 11954 ASSERT(!mutex_owned(SD_MUTEX(un)));
11962 11955 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) {
11963 11956 /*
11964 11957 * If enter here, it indicates that the previous uscsi
11965 11958 * command has not been processed by sd_ssc_assessment.
11966 11959 * This is violating our rules of FMA telemetry processing.
11967 11960 * We should print out this message and the last undisposed
11968 11961 * uscsi command.
11969 11962 */
11970 11963 if (uscmd->uscsi_cdb != NULL) {
11971 11964 SD_INFO(SD_LOG_SDTEST, un,
11972 11965 "sd_ssc_send is missing the alternative "
11973 11966 "sd_ssc_assessment when running command 0x%x.\n",
11974 11967 uscmd->uscsi_cdb[0]);
11975 11968 }
11976 11969 /*
11977 11970 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be
11978 11971 * the initial status.
11979 11972 */
11980 11973 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
11981 11974 }
11982 11975
11983 11976 /*
11984 11977 * We need to make sure sd_ssc_send will have sd_ssc_assessment
11985 11978 * followed to avoid missing FMA telemetries.
11986 11979 */
11987 11980 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT;
11988 11981
11989 11982 /*
11990 11983 * if USCSI_PMFAILFAST is set and un is in low power, fail the
11991 11984 * command immediately.
11992 11985 */
11993 11986 mutex_enter(SD_MUTEX(un));
11994 11987 mutex_enter(&un->un_pm_mutex);
11995 11988 if ((uscmd->uscsi_flags & USCSI_PMFAILFAST) &&
11996 11989 SD_DEVICE_IS_IN_LOW_POWER(un)) {
11997 11990 SD_TRACE(SD_LOG_IO, un, "sd_ssc_send:"
11998 11991 "un:0x%p is in low power\n", un);
11999 11992 mutex_exit(&un->un_pm_mutex);
12000 11993 mutex_exit(SD_MUTEX(un));
12001 11994 return (ECANCELED);
12002 11995 }
12003 11996 mutex_exit(&un->un_pm_mutex);
12004 11997 mutex_exit(SD_MUTEX(un));
12005 11998
12006 11999 #ifdef SDDEBUG
12007 12000 switch (dataspace) {
12008 12001 case UIO_USERSPACE:
12009 12002 SD_TRACE(SD_LOG_IO, un,
12010 12003 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un);
12011 12004 break;
12012 12005 case UIO_SYSSPACE:
12013 12006 SD_TRACE(SD_LOG_IO, un,
12014 12007 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un);
12015 12008 break;
12016 12009 default:
12017 12010 SD_TRACE(SD_LOG_IO, un,
12018 12011 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un);
12019 12012 break;
12020 12013 }
12021 12014 #endif
12022 12015
12023 12016 rval = scsi_uscsi_copyin((intptr_t)incmd, flag,
12024 12017 SD_ADDRESS(un), &uscmd);
12025 12018 if (rval != 0) {
12026 12019 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: "
12027 12020 "scsi_uscsi_alloc_and_copyin failed\n", un);
12028 12021 return (rval);
12029 12022 }
12030 12023
12031 12024 if ((uscmd->uscsi_cdb != NULL) &&
12032 12025 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) {
12033 12026 mutex_enter(SD_MUTEX(un));
12034 12027 un->un_f_format_in_progress = TRUE;
12035 12028 mutex_exit(SD_MUTEX(un));
12036 12029 format = 1;
12037 12030 }
12038 12031
12039 12032 /*
12040 12033 * Allocate an sd_uscsi_info struct and fill it with the info
12041 12034 * needed by sd_initpkt_for_uscsi(). Then put the pointer into
12042 12035 * b_private in the buf for sd_initpkt_for_uscsi(). Note that
12043 12036 * since we allocate the buf here in this function, we do not
12044 12037 * need to preserve the prior contents of b_private.
12045 12038 * The sd_uscsi_info struct is also used by sd_uscsi_strategy()
12046 12039 */
12047 12040 uip = ssc->ssc_uscsi_info;
12048 12041 uip->ui_flags = path_flag;
12049 12042 uip->ui_cmdp = uscmd;
12050 12043
12051 12044 /*
12052 12045 * Commands sent with priority are intended for error recovery
12053 12046 * situations, and do not have retries performed.
12054 12047 */
12055 12048 if (path_flag == SD_PATH_DIRECT_PRIORITY) {
12056 12049 uscmd->uscsi_flags |= USCSI_DIAGNOSE;
12057 12050 }
12058 12051 uscmd->uscsi_flags &= ~USCSI_NOINTR;
12059 12052
12060 12053 dev = SD_GET_DEV(un);
12061 12054 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd,
12062 12055 sd_uscsi_strategy, NULL, uip);
12063 12056
12064 12057 /*
12065 12058 * mark ssc_flags right after handle_cmd to make sure
12066 12059 * the uscsi has been sent
12067 12060 */
12068 12061 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED;
12069 12062
12070 12063 #ifdef SDDEBUG
12071 12064 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: "
12072 12065 "uscsi_status: 0x%02x uscsi_resid:0x%x\n",
12073 12066 uscmd->uscsi_status, uscmd->uscsi_resid);
12074 12067 if (uscmd->uscsi_bufaddr != NULL) {
12075 12068 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: "
12076 12069 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n",
12077 12070 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen);
12078 12071 if (dataspace == UIO_SYSSPACE) {
12079 12072 SD_DUMP_MEMORY(un, SD_LOG_IO,
12080 12073 "data", (uchar_t *)uscmd->uscsi_bufaddr,
12081 12074 uscmd->uscsi_buflen, SD_LOG_HEX);
12082 12075 }
12083 12076 }
12084 12077 #endif
12085 12078
12086 12079 if (format == 1) {
12087 12080 mutex_enter(SD_MUTEX(un));
12088 12081 un->un_f_format_in_progress = FALSE;
12089 12082 mutex_exit(SD_MUTEX(un));
12090 12083 }
12091 12084
12092 12085 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd);
12093 12086
12094 12087 return (rval);
12095 12088 }
12096 12089
12097 12090 /*
12098 12091 * Function: sd_ssc_print
12099 12092 *
12100 12093 * Description: Print information available to the console.
12101 12094 *
12102 12095 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
12103 12096 * sd_uscsi_info in.
12104 12097 * sd_severity - log level.
12105 12098 * Context: Kernel thread or interrupt context.
12106 12099 */
12107 12100 static void
12108 12101 sd_ssc_print(sd_ssc_t *ssc, int sd_severity)
12109 12102 {
12110 12103 struct uscsi_cmd *ucmdp;
12111 12104 struct scsi_device *devp;
12112 12105 dev_info_t *devinfo;
12113 12106 uchar_t *sensep;
12114 12107 int senlen;
12115 12108 union scsi_cdb *cdbp;
12116 12109 uchar_t com;
12117 12110 extern struct scsi_key_strings scsi_cmds[];
12118 12111
12119 12112 ASSERT(ssc != NULL);
12120 12113 ASSERT(ssc->ssc_un != NULL);
12121 12114
12122 12115 if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT)
12123 12116 return;
12124 12117 ucmdp = ssc->ssc_uscsi_cmd;
12125 12118 devp = SD_SCSI_DEVP(ssc->ssc_un);
12126 12119 devinfo = SD_DEVINFO(ssc->ssc_un);
12127 12120 ASSERT(ucmdp != NULL);
12128 12121 ASSERT(devp != NULL);
12129 12122 ASSERT(devinfo != NULL);
12130 12123 sensep = (uint8_t *)ucmdp->uscsi_rqbuf;
12131 12124 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid;
12132 12125 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb;
12133 12126
12134 12127 /* In certain case (like DOORLOCK), the cdb could be NULL. */
12135 12128 if (cdbp == NULL)
12136 12129 return;
12137 12130 /* We don't print log if no sense data available. */
12138 12131 if (senlen == 0)
12139 12132 sensep = NULL;
12140 12133 com = cdbp->scc_cmd;
12141 12134 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com,
12142 12135 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL);
12143 12136 }
12144 12137
12145 12138 /*
12146 12139 * Function: sd_ssc_assessment
12147 12140 *
12148 12141 * Description: We use this function to make an assessment at the point
12149 12142 * where SD driver may encounter a potential error.
12150 12143 *
12151 12144 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
12152 12145 * sd_uscsi_info in.
12153 12146 * tp_assess - a hint of strategy for ereport posting.
12154 12147 * Possible values of tp_assess include:
12155 12148 * SD_FMT_IGNORE - we don't post any ereport because we're
12156 12149 * sure that it is ok to ignore the underlying problems.
12157 12150 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now
12158 12151 * but it might be not correct to ignore the underlying hardware
12159 12152 * error.
12160 12153 * SD_FMT_STATUS_CHECK - we will post an ereport with the
12161 12154 * payload driver-assessment of value "fail" or
12162 12155 * "fatal"(depending on what information we have here). This
12163 12156 * assessment value is usually set when SD driver think there
12164 12157 * is a potential error occurred(Typically, when return value
12165 12158 * of the SCSI command is EIO).
12166 12159 * SD_FMT_STANDARD - we will post an ereport with the payload
12167 12160 * driver-assessment of value "info". This assessment value is
12168 12161 * set when the SCSI command returned successfully and with
12169 12162 * sense data sent back.
12170 12163 *
12171 12164 * Context: Kernel thread.
12172 12165 */
12173 12166 static void
12174 12167 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess)
12175 12168 {
12176 12169 int senlen = 0;
12177 12170 struct uscsi_cmd *ucmdp = NULL;
12178 12171 struct sd_lun *un;
12179 12172
12180 12173 ASSERT(ssc != NULL);
12181 12174 un = ssc->ssc_un;
12182 12175 ASSERT(un != NULL);
12183 12176 ucmdp = ssc->ssc_uscsi_cmd;
12184 12177 ASSERT(ucmdp != NULL);
12185 12178
12186 12179 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) {
12187 12180 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT;
12188 12181 } else {
12189 12182 /*
12190 12183 * If enter here, it indicates that we have a wrong
12191 12184 * calling sequence of sd_ssc_send and sd_ssc_assessment,
12192 12185 * both of which should be called in a pair in case of
12193 12186 * loss of FMA telemetries.
12194 12187 */
12195 12188 if (ucmdp->uscsi_cdb != NULL) {
12196 12189 SD_INFO(SD_LOG_SDTEST, un,
12197 12190 "sd_ssc_assessment is missing the "
12198 12191 "alternative sd_ssc_send when running 0x%x, "
12199 12192 "or there are superfluous sd_ssc_assessment for "
12200 12193 "the same sd_ssc_send.\n",
12201 12194 ucmdp->uscsi_cdb[0]);
12202 12195 }
12203 12196 /*
12204 12197 * Set the ssc_flags to the initial value to avoid passing
12205 12198 * down dirty flags to the following sd_ssc_send function.
12206 12199 */
12207 12200 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12208 12201 return;
12209 12202 }
12210 12203
12211 12204 /*
12212 12205 * Only handle an issued command which is waiting for assessment.
12213 12206 * A command which is not issued will not have
12214 12207 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here.
12215 12208 */
12216 12209 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) {
12217 12210 sd_ssc_print(ssc, SCSI_ERR_INFO);
12218 12211 return;
12219 12212 } else {
12220 12213 /*
12221 12214 * For an issued command, we should clear this flag in
12222 12215 * order to make the sd_ssc_t structure be used off
12223 12216 * multiple uscsi commands.
12224 12217 */
12225 12218 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED;
12226 12219 }
12227 12220
12228 12221 /*
12229 12222 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set)
12230 12223 * commands here. And we should clear the ssc_flags before return.
12231 12224 */
12232 12225 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) {
12233 12226 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12234 12227 return;
12235 12228 }
12236 12229
12237 12230 switch (tp_assess) {
12238 12231 case SD_FMT_IGNORE:
12239 12232 case SD_FMT_IGNORE_COMPROMISE:
12240 12233 break;
12241 12234 case SD_FMT_STATUS_CHECK:
12242 12235 /*
12243 12236 * For a failed command(including the succeeded command
12244 12237 * with invalid data sent back).
12245 12238 */
12246 12239 sd_ssc_post(ssc, SD_FM_DRV_FATAL);
12247 12240 break;
12248 12241 case SD_FMT_STANDARD:
12249 12242 /*
12250 12243 * Always for the succeeded commands probably with sense
12251 12244 * data sent back.
12252 12245 * Limitation:
12253 12246 * We can only handle a succeeded command with sense
12254 12247 * data sent back when auto-request-sense is enabled.
12255 12248 */
12256 12249 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen -
12257 12250 ssc->ssc_uscsi_cmd->uscsi_rqresid;
12258 12251 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) &&
12259 12252 (un->un_f_arq_enabled == TRUE) &&
12260 12253 senlen > 0 &&
12261 12254 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) {
12262 12255 sd_ssc_post(ssc, SD_FM_DRV_NOTICE);
12263 12256 }
12264 12257 break;
12265 12258 default:
12266 12259 /*
12267 12260 * Should not have other type of assessment.
12268 12261 */
12269 12262 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
12270 12263 "sd_ssc_assessment got wrong "
12271 12264 "sd_type_assessment %d.\n", tp_assess);
12272 12265 break;
12273 12266 }
12274 12267 /*
12275 12268 * Clear up the ssc_flags before return.
12276 12269 */
12277 12270 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12278 12271 }
12279 12272
12280 12273 /*
12281 12274 * Function: sd_ssc_post
12282 12275 *
12283 12276 * Description: 1. read the driver property to get fm-scsi-log flag.
12284 12277 * 2. print log if fm_log_capable is non-zero.
12285 12278 * 3. call sd_ssc_ereport_post to post ereport if possible.
12286 12279 *
12287 12280 * Context: May be called from kernel thread or interrupt context.
12288 12281 */
12289 12282 static void
12290 12283 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess)
12291 12284 {
12292 12285 struct sd_lun *un;
12293 12286 int sd_severity;
12294 12287
12295 12288 ASSERT(ssc != NULL);
12296 12289 un = ssc->ssc_un;
12297 12290 ASSERT(un != NULL);
12298 12291
12299 12292 /*
12300 12293 * We may enter here from sd_ssc_assessment(for USCSI command) or
12301 12294 * by directly called from sdintr context.
12302 12295 * We don't handle a non-disk drive(CD-ROM, removable media).
12303 12296 * Clear the ssc_flags before return in case we've set
12304 12297 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk
12305 12298 * driver.
12306 12299 */
12307 12300 if (ISCD(un) || un->un_f_has_removable_media) {
12308 12301 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12309 12302 return;
12310 12303 }
12311 12304
12312 12305 switch (sd_assess) {
12313 12306 case SD_FM_DRV_FATAL:
12314 12307 sd_severity = SCSI_ERR_FATAL;
12315 12308 break;
12316 12309 case SD_FM_DRV_RECOVERY:
12317 12310 sd_severity = SCSI_ERR_RECOVERED;
12318 12311 break;
12319 12312 case SD_FM_DRV_RETRY:
12320 12313 sd_severity = SCSI_ERR_RETRYABLE;
12321 12314 break;
12322 12315 case SD_FM_DRV_NOTICE:
12323 12316 sd_severity = SCSI_ERR_INFO;
12324 12317 break;
12325 12318 default:
12326 12319 sd_severity = SCSI_ERR_UNKNOWN;
12327 12320 }
12328 12321 /* print log */
12329 12322 sd_ssc_print(ssc, sd_severity);
12330 12323
12331 12324 /* always post ereport */
12332 12325 sd_ssc_ereport_post(ssc, sd_assess);
12333 12326 }
12334 12327
12335 12328 /*
12336 12329 * Function: sd_ssc_set_info
12337 12330 *
12338 12331 * Description: Mark ssc_flags and set ssc_info which would be the
12339 12332 * payload of uderr ereport. This function will cause
12340 12333 * sd_ssc_ereport_post to post uderr ereport only.
12341 12334 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI),
12342 12335 * the function will also call SD_ERROR or scsi_log for a
12343 12336 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device.
12344 12337 *
12345 12338 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
12346 12339 * sd_uscsi_info in.
12347 12340 * ssc_flags - indicate the sub-category of a uderr.
12348 12341 * comp - this argument is meaningful only when
12349 12342 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible
12350 12343 * values include:
12351 12344 * > 0, SD_ERROR is used with comp as the driver logging
12352 12345 * component;
12353 12346 * = 0, scsi-log is used to log error telemetries;
12354 12347 * < 0, no log available for this telemetry.
12355 12348 *
12356 12349 * Context: Kernel thread or interrupt context
12357 12350 */
12358 12351 static void
12359 12352 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...)
12360 12353 {
12361 12354 va_list ap;
12362 12355
12363 12356 ASSERT(ssc != NULL);
12364 12357 ASSERT(ssc->ssc_un != NULL);
12365 12358
12366 12359 ssc->ssc_flags |= ssc_flags;
12367 12360 va_start(ap, fmt);
12368 12361 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap);
12369 12362 va_end(ap);
12370 12363
12371 12364 /*
12372 12365 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command
12373 12366 * with invalid data sent back. For non-uscsi command, the
12374 12367 * following code will be bypassed.
12375 12368 */
12376 12369 if (ssc_flags & SSC_FLAGS_INVALID_DATA) {
12377 12370 if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) {
12378 12371 /*
12379 12372 * If the error belong to certain component and we
12380 12373 * do not want it to show up on the console, we
12381 12374 * will use SD_ERROR, otherwise scsi_log is
12382 12375 * preferred.
12383 12376 */
12384 12377 if (comp > 0) {
12385 12378 SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info);
12386 12379 } else if (comp == 0) {
12387 12380 scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label,
12388 12381 CE_WARN, ssc->ssc_info);
12389 12382 }
12390 12383 }
12391 12384 }
12392 12385 }
12393 12386
12394 12387 /*
12395 12388 * Function: sd_buf_iodone
12396 12389 *
12397 12390 * Description: Frees the sd_xbuf & returns the buf to its originator.
12398 12391 *
12399 12392 * Context: May be called from interrupt context.
12400 12393 */
12401 12394 /* ARGSUSED */
12402 12395 static void
12403 12396 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp)
12404 12397 {
12405 12398 struct sd_xbuf *xp;
12406 12399
12407 12400 ASSERT(un != NULL);
12408 12401 ASSERT(bp != NULL);
12409 12402 ASSERT(!mutex_owned(SD_MUTEX(un)));
12410 12403
12411 12404 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n");
12412 12405
12413 12406 xp = SD_GET_XBUF(bp);
12414 12407 ASSERT(xp != NULL);
12415 12408
12416 12409 /* xbuf is gone after this */
12417 12410 if (ddi_xbuf_done(bp, un->un_xbuf_attr)) {
12418 12411 mutex_enter(SD_MUTEX(un));
12419 12412
12420 12413 /*
12421 12414 * Grab time when the cmd completed.
12422 12415 * This is used for determining if the system has been
12423 12416 * idle long enough to make it idle to the PM framework.
12424 12417 * This is for lowering the overhead, and therefore improving
12425 12418 * performance per I/O operation.
12426 12419 */
12427 12420 un->un_pm_idle_time = gethrtime();
12428 12421
12429 12422 un->un_ncmds_in_driver--;
12430 12423 ASSERT(un->un_ncmds_in_driver >= 0);
12431 12424 SD_INFO(SD_LOG_IO, un,
12432 12425 "sd_buf_iodone: un_ncmds_in_driver = %ld\n",
12433 12426 un->un_ncmds_in_driver);
12434 12427
12435 12428 mutex_exit(SD_MUTEX(un));
12436 12429 }
12437 12430
12438 12431 biodone(bp); /* bp is gone after this */
12439 12432
12440 12433 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n");
12441 12434 }
12442 12435
12443 12436
12444 12437 /*
12445 12438 * Function: sd_uscsi_iodone
12446 12439 *
12447 12440 * Description: Frees the sd_xbuf & returns the buf to its originator.
12448 12441 *
12449 12442 * Context: May be called from interrupt context.
12450 12443 */
12451 12444 /* ARGSUSED */
12452 12445 static void
12453 12446 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp)
12454 12447 {
12455 12448 struct sd_xbuf *xp;
12456 12449
12457 12450 ASSERT(un != NULL);
12458 12451 ASSERT(bp != NULL);
12459 12452
12460 12453 xp = SD_GET_XBUF(bp);
12461 12454 ASSERT(xp != NULL);
12462 12455 ASSERT(!mutex_owned(SD_MUTEX(un)));
12463 12456
12464 12457 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n");
12465 12458
12466 12459 bp->b_private = xp->xb_private;
12467 12460
12468 12461 mutex_enter(SD_MUTEX(un));
12469 12462
12470 12463 /*
12471 12464 * Grab time when the cmd completed.
12472 12465 * This is used for determining if the system has been
12473 12466 * idle long enough to make it idle to the PM framework.
12474 12467 * This is for lowering the overhead, and therefore improving
12475 12468 * performance per I/O operation.
12476 12469 */
12477 12470 un->un_pm_idle_time = gethrtime();
12478 12471
12479 12472 un->un_ncmds_in_driver--;
12480 12473 ASSERT(un->un_ncmds_in_driver >= 0);
12481 12474 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n",
12482 12475 un->un_ncmds_in_driver);
12483 12476
12484 12477 mutex_exit(SD_MUTEX(un));
12485 12478
12486 12479 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen >
12487 12480 SENSE_LENGTH) {
12488 12481 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH +
12489 12482 MAX_SENSE_LENGTH);
12490 12483 } else {
12491 12484 kmem_free(xp, sizeof (struct sd_xbuf));
12492 12485 }
12493 12486
12494 12487 biodone(bp);
12495 12488
12496 12489 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n");
12497 12490 }
12498 12491
12499 12492
12500 12493 /*
12501 12494 * Function: sd_mapblockaddr_iostart
12502 12495 *
12503 12496 * Description: Verify request lies within the partition limits for
12504 12497 * the indicated minor device. Issue "overrun" buf if
12505 12498 * request would exceed partition range. Converts
12506 12499 * partition-relative block address to absolute.
12507 12500 *
12508 12501 * Upon exit of this function:
12509 12502 * 1.I/O is aligned
12510 12503 * xp->xb_blkno represents the absolute sector address
12511 12504 * 2.I/O is misaligned
12512 12505 * xp->xb_blkno represents the absolute logical block address
12513 12506 * based on DEV_BSIZE. The logical block address will be
12514 12507 * converted to physical sector address in sd_mapblocksize_\
12515 12508 * iostart.
12516 12509 * 3.I/O is misaligned but is aligned in "overrun" buf
12517 12510 * xp->xb_blkno represents the absolute logical block address
12518 12511 * based on DEV_BSIZE. The logical block address will be
12519 12512 * converted to physical sector address in sd_mapblocksize_\
12520 12513 * iostart. But no RMW will be issued in this case.
12521 12514 *
12522 12515 * Context: Can sleep
12523 12516 *
12524 12517 * Issues: This follows what the old code did, in terms of accessing
12525 12518 * some of the partition info in the unit struct without holding
12526 12519 * the mutext. This is a general issue, if the partition info
12527 12520 * can be altered while IO is in progress... as soon as we send
12528 12521 * a buf, its partitioning can be invalid before it gets to the
12529 12522 * device. Probably the right fix is to move partitioning out
12530 12523 * of the driver entirely.
12531 12524 */
12532 12525
12533 12526 static void
12534 12527 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp)
12535 12528 {
12536 12529 diskaddr_t nblocks; /* #blocks in the given partition */
12537 12530 daddr_t blocknum; /* Block number specified by the buf */
12538 12531 size_t requested_nblocks;
12539 12532 size_t available_nblocks;
12540 12533 int partition;
12541 12534 diskaddr_t partition_offset;
12542 12535 struct sd_xbuf *xp;
12543 12536 int secmask = 0, blknomask = 0;
12544 12537 ushort_t is_aligned = TRUE;
12545 12538
12546 12539 ASSERT(un != NULL);
12547 12540 ASSERT(bp != NULL);
12548 12541 ASSERT(!mutex_owned(SD_MUTEX(un)));
12549 12542
12550 12543 SD_TRACE(SD_LOG_IO_PARTITION, un,
12551 12544 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp);
12552 12545
12553 12546 xp = SD_GET_XBUF(bp);
12554 12547 ASSERT(xp != NULL);
12555 12548
12556 12549 /*
12557 12550 * If the geometry is not indicated as valid, attempt to access
12558 12551 * the unit & verify the geometry/label. This can be the case for
12559 12552 * removable-media devices, of if the device was opened in
12560 12553 * NDELAY/NONBLOCK mode.
12561 12554 */
12562 12555 partition = SDPART(bp->b_edev);
12563 12556
12564 12557 if (!SD_IS_VALID_LABEL(un)) {
12565 12558 sd_ssc_t *ssc;
12566 12559 /*
12567 12560 * Initialize sd_ssc_t for internal uscsi commands
12568 12561 * In case of potential porformance issue, we need
12569 12562 * to alloc memory only if there is invalid label
12570 12563 */
12571 12564 ssc = sd_ssc_init(un);
12572 12565
12573 12566 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) {
12574 12567 /*
12575 12568 * For removable devices it is possible to start an
12576 12569 * I/O without a media by opening the device in nodelay
12577 12570 * mode. Also for writable CDs there can be many
12578 12571 * scenarios where there is no geometry yet but volume
12579 12572 * manager is trying to issue a read() just because
12580 12573 * it can see TOC on the CD. So do not print a message
12581 12574 * for removables.
12582 12575 */
12583 12576 if (!un->un_f_has_removable_media) {
12584 12577 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
12585 12578 "i/o to invalid geometry\n");
12586 12579 }
12587 12580 bioerror(bp, EIO);
12588 12581 bp->b_resid = bp->b_bcount;
12589 12582 SD_BEGIN_IODONE(index, un, bp);
12590 12583
12591 12584 sd_ssc_fini(ssc);
12592 12585 return;
12593 12586 }
12594 12587 sd_ssc_fini(ssc);
12595 12588 }
12596 12589
12597 12590 nblocks = 0;
12598 12591 (void) cmlb_partinfo(un->un_cmlbhandle, partition,
12599 12592 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT);
12600 12593
12601 12594 if (un->un_f_enable_rmw) {
12602 12595 blknomask = (un->un_phy_blocksize / DEV_BSIZE) - 1;
12603 12596 secmask = un->un_phy_blocksize - 1;
12604 12597 } else {
12605 12598 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1;
12606 12599 secmask = un->un_tgt_blocksize - 1;
12607 12600 }
12608 12601
12609 12602 if ((bp->b_lblkno & (blknomask)) || (bp->b_bcount & (secmask))) {
12610 12603 is_aligned = FALSE;
12611 12604 }
12612 12605
12613 12606 if (!(NOT_DEVBSIZE(un)) || un->un_f_enable_rmw) {
12614 12607 /*
12615 12608 * If I/O is aligned, no need to involve RMW(Read Modify Write)
12616 12609 * Convert the logical block number to target's physical sector
12617 12610 * number.
12618 12611 */
12619 12612 if (is_aligned) {
12620 12613 xp->xb_blkno = SD_SYS2TGTBLOCK(un, xp->xb_blkno);
12621 12614 } else {
12622 12615 /*
12623 12616 * There is no RMW if we're just reading, so don't
12624 12617 * warn or error out because of it.
12625 12618 */
12626 12619 if (bp->b_flags & B_READ) {
12627 12620 /*EMPTY*/
12628 12621 } else if (!un->un_f_enable_rmw &&
12629 12622 un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) {
12630 12623 bp->b_flags |= B_ERROR;
12631 12624 goto error_exit;
12632 12625 } else if (un->un_f_rmw_type == SD_RMW_TYPE_DEFAULT) {
12633 12626 mutex_enter(SD_MUTEX(un));
12634 12627 if (!un->un_f_enable_rmw &&
12635 12628 un->un_rmw_msg_timeid == NULL) {
12636 12629 scsi_log(SD_DEVINFO(un), sd_label,
12637 12630 CE_WARN, "I/O request is not "
12638 12631 "aligned with %d disk sector size. "
12639 12632 "It is handled through Read Modify "
12640 12633 "Write but the performance is "
12641 12634 "very low.\n",
12642 12635 un->un_tgt_blocksize);
12643 12636 un->un_rmw_msg_timeid =
12644 12637 timeout(sd_rmw_msg_print_handler,
12645 12638 un, SD_RMW_MSG_PRINT_TIMEOUT);
12646 12639 } else {
12647 12640 un->un_rmw_incre_count ++;
12648 12641 }
12649 12642 mutex_exit(SD_MUTEX(un));
12650 12643 }
12651 12644
12652 12645 nblocks = SD_TGT2SYSBLOCK(un, nblocks);
12653 12646 partition_offset = SD_TGT2SYSBLOCK(un,
12654 12647 partition_offset);
12655 12648 }
12656 12649 }
12657 12650
12658 12651 /*
12659 12652 * blocknum is the starting block number of the request. At this
12660 12653 * point it is still relative to the start of the minor device.
12661 12654 */
12662 12655 blocknum = xp->xb_blkno;
12663 12656
12664 12657 /*
12665 12658 * Legacy: If the starting block number is one past the last block
12666 12659 * in the partition, do not set B_ERROR in the buf.
12667 12660 */
12668 12661 if (blocknum == nblocks) {
12669 12662 goto error_exit;
12670 12663 }
12671 12664
12672 12665 /*
12673 12666 * Confirm that the first block of the request lies within the
12674 12667 * partition limits. Also the requested number of bytes must be
12675 12668 * a multiple of the system block size.
12676 12669 */
12677 12670 if ((blocknum < 0) || (blocknum >= nblocks) ||
12678 12671 ((bp->b_bcount & (DEV_BSIZE - 1)) != 0)) {
12679 12672 bp->b_flags |= B_ERROR;
12680 12673 goto error_exit;
12681 12674 }
12682 12675
12683 12676 /*
12684 12677 * If the requsted # blocks exceeds the available # blocks, that
12685 12678 * is an overrun of the partition.
12686 12679 */
12687 12680 if ((!NOT_DEVBSIZE(un)) && is_aligned) {
12688 12681 requested_nblocks = SD_BYTES2TGTBLOCKS(un, bp->b_bcount);
12689 12682 } else {
12690 12683 requested_nblocks = SD_BYTES2SYSBLOCKS(bp->b_bcount);
12691 12684 }
12692 12685
12693 12686 available_nblocks = (size_t)(nblocks - blocknum);
12694 12687 ASSERT(nblocks >= blocknum);
12695 12688
12696 12689 if (requested_nblocks > available_nblocks) {
12697 12690 size_t resid;
12698 12691
12699 12692 /*
12700 12693 * Allocate an "overrun" buf to allow the request to proceed
12701 12694 * for the amount of space available in the partition. The
12702 12695 * amount not transferred will be added into the b_resid
12703 12696 * when the operation is complete. The overrun buf
12704 12697 * replaces the original buf here, and the original buf
12705 12698 * is saved inside the overrun buf, for later use.
12706 12699 */
12707 12700 if ((!NOT_DEVBSIZE(un)) && is_aligned) {
12708 12701 resid = SD_TGTBLOCKS2BYTES(un,
12709 12702 (offset_t)(requested_nblocks - available_nblocks));
12710 12703 } else {
12711 12704 resid = SD_SYSBLOCKS2BYTES(
12712 12705 (offset_t)(requested_nblocks - available_nblocks));
12713 12706 }
12714 12707
12715 12708 size_t count = bp->b_bcount - resid;
12716 12709 /*
12717 12710 * Note: count is an unsigned entity thus it'll NEVER
12718 12711 * be less than 0 so ASSERT the original values are
12719 12712 * correct.
12720 12713 */
12721 12714 ASSERT(bp->b_bcount >= resid);
12722 12715
12723 12716 bp = sd_bioclone_alloc(bp, count, blocknum,
12724 12717 (int (*)(struct buf *)) sd_mapblockaddr_iodone);
12725 12718 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */
12726 12719 ASSERT(xp != NULL);
12727 12720 }
12728 12721
12729 12722 /* At this point there should be no residual for this buf. */
12730 12723 ASSERT(bp->b_resid == 0);
12731 12724
12732 12725 /* Convert the block number to an absolute address. */
12733 12726 xp->xb_blkno += partition_offset;
12734 12727
12735 12728 SD_NEXT_IOSTART(index, un, bp);
12736 12729
12737 12730 SD_TRACE(SD_LOG_IO_PARTITION, un,
12738 12731 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp);
12739 12732
12740 12733 return;
12741 12734
12742 12735 error_exit:
12743 12736 bp->b_resid = bp->b_bcount;
12744 12737 SD_BEGIN_IODONE(index, un, bp);
12745 12738 SD_TRACE(SD_LOG_IO_PARTITION, un,
12746 12739 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp);
12747 12740 }
12748 12741
12749 12742
12750 12743 /*
12751 12744 * Function: sd_mapblockaddr_iodone
12752 12745 *
12753 12746 * Description: Completion-side processing for partition management.
12754 12747 *
12755 12748 * Context: May be called under interrupt context
12756 12749 */
12757 12750
12758 12751 static void
12759 12752 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp)
12760 12753 {
12761 12754 /* int partition; */ /* Not used, see below. */
12762 12755 ASSERT(un != NULL);
12763 12756 ASSERT(bp != NULL);
12764 12757 ASSERT(!mutex_owned(SD_MUTEX(un)));
12765 12758
12766 12759 SD_TRACE(SD_LOG_IO_PARTITION, un,
12767 12760 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp);
12768 12761
12769 12762 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) {
12770 12763 /*
12771 12764 * We have an "overrun" buf to deal with...
12772 12765 */
12773 12766 struct sd_xbuf *xp;
12774 12767 struct buf *obp; /* ptr to the original buf */
12775 12768
12776 12769 xp = SD_GET_XBUF(bp);
12777 12770 ASSERT(xp != NULL);
12778 12771
12779 12772 /* Retrieve the pointer to the original buf */
12780 12773 obp = (struct buf *)xp->xb_private;
12781 12774 ASSERT(obp != NULL);
12782 12775
12783 12776 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid);
12784 12777 bioerror(obp, bp->b_error);
12785 12778
12786 12779 sd_bioclone_free(bp);
12787 12780
12788 12781 /*
12789 12782 * Get back the original buf.
12790 12783 * Note that since the restoration of xb_blkno below
12791 12784 * was removed, the sd_xbuf is not needed.
12792 12785 */
12793 12786 bp = obp;
12794 12787 /*
12795 12788 * xp = SD_GET_XBUF(bp);
12796 12789 * ASSERT(xp != NULL);
12797 12790 */
12798 12791 }
12799 12792
12800 12793 /*
12801 12794 * Convert sd->xb_blkno back to a minor-device relative value.
12802 12795 * Note: this has been commented out, as it is not needed in the
12803 12796 * current implementation of the driver (ie, since this function
12804 12797 * is at the top of the layering chains, so the info will be
12805 12798 * discarded) and it is in the "hot" IO path.
12806 12799 *
12807 12800 * partition = getminor(bp->b_edev) & SDPART_MASK;
12808 12801 * xp->xb_blkno -= un->un_offset[partition];
12809 12802 */
12810 12803
12811 12804 SD_NEXT_IODONE(index, un, bp);
12812 12805
12813 12806 SD_TRACE(SD_LOG_IO_PARTITION, un,
12814 12807 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp);
12815 12808 }
12816 12809
12817 12810
12818 12811 /*
12819 12812 * Function: sd_mapblocksize_iostart
12820 12813 *
12821 12814 * Description: Convert between system block size (un->un_sys_blocksize)
12822 12815 * and target block size (un->un_tgt_blocksize).
12823 12816 *
12824 12817 * Context: Can sleep to allocate resources.
12825 12818 *
12826 12819 * Assumptions: A higher layer has already performed any partition validation,
12827 12820 * and converted the xp->xb_blkno to an absolute value relative
12828 12821 * to the start of the device.
12829 12822 *
12830 12823 * It is also assumed that the higher layer has implemented
12831 12824 * an "overrun" mechanism for the case where the request would
12832 12825 * read/write beyond the end of a partition. In this case we
12833 12826 * assume (and ASSERT) that bp->b_resid == 0.
12834 12827 *
12835 12828 * Note: The implementation for this routine assumes the target
12836 12829 * block size remains constant between allocation and transport.
12837 12830 */
12838 12831
12839 12832 static void
12840 12833 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp)
12841 12834 {
12842 12835 struct sd_mapblocksize_info *bsp;
12843 12836 struct sd_xbuf *xp;
12844 12837 offset_t first_byte;
12845 12838 daddr_t start_block, end_block;
12846 12839 daddr_t request_bytes;
12847 12840 ushort_t is_aligned = FALSE;
12848 12841
12849 12842 ASSERT(un != NULL);
12850 12843 ASSERT(bp != NULL);
12851 12844 ASSERT(!mutex_owned(SD_MUTEX(un)));
12852 12845 ASSERT(bp->b_resid == 0);
12853 12846
12854 12847 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
12855 12848 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp);
12856 12849
12857 12850 /*
12858 12851 * For a non-writable CD, a write request is an error
12859 12852 */
12860 12853 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) &&
12861 12854 (un->un_f_mmc_writable_media == FALSE)) {
12862 12855 bioerror(bp, EIO);
12863 12856 bp->b_resid = bp->b_bcount;
12864 12857 SD_BEGIN_IODONE(index, un, bp);
12865 12858 return;
12866 12859 }
12867 12860
12868 12861 /*
12869 12862 * We do not need a shadow buf if the device is using
12870 12863 * un->un_sys_blocksize as its block size or if bcount == 0.
12871 12864 * In this case there is no layer-private data block allocated.
12872 12865 */
12873 12866 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) ||
12874 12867 (bp->b_bcount == 0)) {
12875 12868 goto done;
12876 12869 }
12877 12870
12878 12871 #if defined(__i386) || defined(__amd64)
12879 12872 /* We do not support non-block-aligned transfers for ROD devices */
12880 12873 ASSERT(!ISROD(un));
12881 12874 #endif
12882 12875
12883 12876 xp = SD_GET_XBUF(bp);
12884 12877 ASSERT(xp != NULL);
12885 12878
12886 12879 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
12887 12880 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n",
12888 12881 un->un_tgt_blocksize, DEV_BSIZE);
12889 12882 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
12890 12883 "request start block:0x%x\n", xp->xb_blkno);
12891 12884 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
12892 12885 "request len:0x%x\n", bp->b_bcount);
12893 12886
12894 12887 /*
12895 12888 * Allocate the layer-private data area for the mapblocksize layer.
12896 12889 * Layers are allowed to use the xp_private member of the sd_xbuf
12897 12890 * struct to store the pointer to their layer-private data block, but
12898 12891 * each layer also has the responsibility of restoring the prior
12899 12892 * contents of xb_private before returning the buf/xbuf to the
12900 12893 * higher layer that sent it.
12901 12894 *
12902 12895 * Here we save the prior contents of xp->xb_private into the
12903 12896 * bsp->mbs_oprivate field of our layer-private data area. This value
12904 12897 * is restored by sd_mapblocksize_iodone() just prior to freeing up
12905 12898 * the layer-private area and returning the buf/xbuf to the layer
12906 12899 * that sent it.
12907 12900 *
12908 12901 * Note that here we use kmem_zalloc for the allocation as there are
12909 12902 * parts of the mapblocksize code that expect certain fields to be
12910 12903 * zero unless explicitly set to a required value.
12911 12904 */
12912 12905 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP);
12913 12906 bsp->mbs_oprivate = xp->xb_private;
12914 12907 xp->xb_private = bsp;
12915 12908
12916 12909 /*
12917 12910 * This treats the data on the disk (target) as an array of bytes.
12918 12911 * first_byte is the byte offset, from the beginning of the device,
12919 12912 * to the location of the request. This is converted from a
12920 12913 * un->un_sys_blocksize block address to a byte offset, and then back
12921 12914 * to a block address based upon a un->un_tgt_blocksize block size.
12922 12915 *
12923 12916 * xp->xb_blkno should be absolute upon entry into this function,
12924 12917 * but, but it is based upon partitions that use the "system"
12925 12918 * block size. It must be adjusted to reflect the block size of
12926 12919 * the target.
12927 12920 *
12928 12921 * Note that end_block is actually the block that follows the last
12929 12922 * block of the request, but that's what is needed for the computation.
12930 12923 */
12931 12924 first_byte = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno);
12932 12925 if (un->un_f_enable_rmw) {
12933 12926 start_block = xp->xb_blkno =
12934 12927 (first_byte / un->un_phy_blocksize) *
12935 12928 (un->un_phy_blocksize / DEV_BSIZE);
12936 12929 end_block = ((first_byte + bp->b_bcount +
12937 12930 un->un_phy_blocksize - 1) / un->un_phy_blocksize) *
12938 12931 (un->un_phy_blocksize / DEV_BSIZE);
12939 12932 } else {
12940 12933 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize;
12941 12934 end_block = (first_byte + bp->b_bcount +
12942 12935 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize;
12943 12936 }
12944 12937
12945 12938 /* request_bytes is rounded up to a multiple of the target block size */
12946 12939 request_bytes = (end_block - start_block) * un->un_tgt_blocksize;
12947 12940
12948 12941 /*
12949 12942 * See if the starting address of the request and the request
12950 12943 * length are aligned on a un->un_tgt_blocksize boundary. If aligned
12951 12944 * then we do not need to allocate a shadow buf to handle the request.
12952 12945 */
12953 12946 if (un->un_f_enable_rmw) {
12954 12947 if (((first_byte % un->un_phy_blocksize) == 0) &&
12955 12948 ((bp->b_bcount % un->un_phy_blocksize) == 0)) {
12956 12949 is_aligned = TRUE;
12957 12950 }
12958 12951 } else {
12959 12952 if (((first_byte % un->un_tgt_blocksize) == 0) &&
12960 12953 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) {
12961 12954 is_aligned = TRUE;
12962 12955 }
12963 12956 }
12964 12957
12965 12958 if ((bp->b_flags & B_READ) == 0) {
12966 12959 /*
12967 12960 * Lock the range for a write operation. An aligned request is
12968 12961 * considered a simple write; otherwise the request must be a
12969 12962 * read-modify-write.
12970 12963 */
12971 12964 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1,
12972 12965 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW);
12973 12966 }
12974 12967
12975 12968 /*
12976 12969 * Alloc a shadow buf if the request is not aligned. Also, this is
12977 12970 * where the READ command is generated for a read-modify-write. (The
12978 12971 * write phase is deferred until after the read completes.)
12979 12972 */
12980 12973 if (is_aligned == FALSE) {
12981 12974
12982 12975 struct sd_mapblocksize_info *shadow_bsp;
12983 12976 struct sd_xbuf *shadow_xp;
12984 12977 struct buf *shadow_bp;
12985 12978
12986 12979 /*
12987 12980 * Allocate the shadow buf and it associated xbuf. Note that
12988 12981 * after this call the xb_blkno value in both the original
12989 12982 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the
12990 12983 * same: absolute relative to the start of the device, and
12991 12984 * adjusted for the target block size. The b_blkno in the
12992 12985 * shadow buf will also be set to this value. We should never
12993 12986 * change b_blkno in the original bp however.
12994 12987 *
12995 12988 * Note also that the shadow buf will always need to be a
12996 12989 * READ command, regardless of whether the incoming command
12997 12990 * is a READ or a WRITE.
12998 12991 */
12999 12992 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ,
13000 12993 xp->xb_blkno,
13001 12994 (int (*)(struct buf *)) sd_mapblocksize_iodone);
13002 12995
13003 12996 shadow_xp = SD_GET_XBUF(shadow_bp);
13004 12997
13005 12998 /*
13006 12999 * Allocate the layer-private data for the shadow buf.
13007 13000 * (No need to preserve xb_private in the shadow xbuf.)
13008 13001 */
13009 13002 shadow_xp->xb_private = shadow_bsp =
13010 13003 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP);
13011 13004
13012 13005 /*
13013 13006 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone
13014 13007 * to figure out where the start of the user data is (based upon
13015 13008 * the system block size) in the data returned by the READ
13016 13009 * command (which will be based upon the target blocksize). Note
13017 13010 * that this is only really used if the request is unaligned.
13018 13011 */
13019 13012 if (un->un_f_enable_rmw) {
13020 13013 bsp->mbs_copy_offset = (ssize_t)(first_byte -
13021 13014 ((offset_t)xp->xb_blkno * un->un_sys_blocksize));
13022 13015 ASSERT((bsp->mbs_copy_offset >= 0) &&
13023 13016 (bsp->mbs_copy_offset < un->un_phy_blocksize));
13024 13017 } else {
13025 13018 bsp->mbs_copy_offset = (ssize_t)(first_byte -
13026 13019 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize));
13027 13020 ASSERT((bsp->mbs_copy_offset >= 0) &&
13028 13021 (bsp->mbs_copy_offset < un->un_tgt_blocksize));
13029 13022 }
13030 13023
13031 13024 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset;
13032 13025
13033 13026 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index;
13034 13027
13035 13028 /* Transfer the wmap (if any) to the shadow buf */
13036 13029 shadow_bsp->mbs_wmp = bsp->mbs_wmp;
13037 13030 bsp->mbs_wmp = NULL;
13038 13031
13039 13032 /*
13040 13033 * The shadow buf goes on from here in place of the
13041 13034 * original buf.
13042 13035 */
13043 13036 shadow_bsp->mbs_orig_bp = bp;
13044 13037 bp = shadow_bp;
13045 13038 }
13046 13039
13047 13040 SD_INFO(SD_LOG_IO_RMMEDIA, un,
13048 13041 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno);
13049 13042 SD_INFO(SD_LOG_IO_RMMEDIA, un,
13050 13043 "sd_mapblocksize_iostart: tgt request len:0x%x\n",
13051 13044 request_bytes);
13052 13045 SD_INFO(SD_LOG_IO_RMMEDIA, un,
13053 13046 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp);
13054 13047
13055 13048 done:
13056 13049 SD_NEXT_IOSTART(index, un, bp);
13057 13050
13058 13051 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
13059 13052 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp);
13060 13053 }
13061 13054
13062 13055
13063 13056 /*
13064 13057 * Function: sd_mapblocksize_iodone
13065 13058 *
13066 13059 * Description: Completion side processing for block-size mapping.
13067 13060 *
13068 13061 * Context: May be called under interrupt context
13069 13062 */
13070 13063
13071 13064 static void
13072 13065 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp)
13073 13066 {
13074 13067 struct sd_mapblocksize_info *bsp;
13075 13068 struct sd_xbuf *xp;
13076 13069 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */
13077 13070 struct buf *orig_bp; /* ptr to the original buf */
13078 13071 offset_t shadow_end;
13079 13072 offset_t request_end;
13080 13073 offset_t shadow_start;
13081 13074 ssize_t copy_offset;
13082 13075 size_t copy_length;
13083 13076 size_t shortfall;
13084 13077 uint_t is_write; /* TRUE if this bp is a WRITE */
13085 13078 uint_t has_wmap; /* TRUE is this bp has a wmap */
13086 13079
13087 13080 ASSERT(un != NULL);
13088 13081 ASSERT(bp != NULL);
13089 13082
13090 13083 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
13091 13084 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp);
13092 13085
13093 13086 /*
13094 13087 * There is no shadow buf or layer-private data if the target is
13095 13088 * using un->un_sys_blocksize as its block size or if bcount == 0.
13096 13089 */
13097 13090 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) ||
13098 13091 (bp->b_bcount == 0)) {
13099 13092 goto exit;
13100 13093 }
13101 13094
13102 13095 xp = SD_GET_XBUF(bp);
13103 13096 ASSERT(xp != NULL);
13104 13097
13105 13098 /* Retrieve the pointer to the layer-private data area from the xbuf. */
13106 13099 bsp = xp->xb_private;
13107 13100
13108 13101 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE;
13109 13102 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE;
13110 13103
13111 13104 if (is_write) {
13112 13105 /*
13113 13106 * For a WRITE request we must free up the block range that
13114 13107 * we have locked up. This holds regardless of whether this is
13115 13108 * an aligned write request or a read-modify-write request.
13116 13109 */
13117 13110 sd_range_unlock(un, bsp->mbs_wmp);
13118 13111 bsp->mbs_wmp = NULL;
13119 13112 }
13120 13113
13121 13114 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) {
13122 13115 /*
13123 13116 * An aligned read or write command will have no shadow buf;
13124 13117 * there is not much else to do with it.
13125 13118 */
13126 13119 goto done;
13127 13120 }
13128 13121
13129 13122 orig_bp = bsp->mbs_orig_bp;
13130 13123 ASSERT(orig_bp != NULL);
13131 13124 orig_xp = SD_GET_XBUF(orig_bp);
13132 13125 ASSERT(orig_xp != NULL);
13133 13126 ASSERT(!mutex_owned(SD_MUTEX(un)));
13134 13127
13135 13128 if (!is_write && has_wmap) {
13136 13129 /*
13137 13130 * A READ with a wmap means this is the READ phase of a
13138 13131 * read-modify-write. If an error occurred on the READ then
13139 13132 * we do not proceed with the WRITE phase or copy any data.
13140 13133 * Just release the write maps and return with an error.
13141 13134 */
13142 13135 if ((bp->b_resid != 0) || (bp->b_error != 0)) {
13143 13136 orig_bp->b_resid = orig_bp->b_bcount;
13144 13137 bioerror(orig_bp, bp->b_error);
13145 13138 sd_range_unlock(un, bsp->mbs_wmp);
13146 13139 goto freebuf_done;
13147 13140 }
13148 13141 }
13149 13142
13150 13143 /*
13151 13144 * Here is where we set up to copy the data from the shadow buf
13152 13145 * into the space associated with the original buf.
13153 13146 *
13154 13147 * To deal with the conversion between block sizes, these
13155 13148 * computations treat the data as an array of bytes, with the
13156 13149 * first byte (byte 0) corresponding to the first byte in the
13157 13150 * first block on the disk.
13158 13151 */
13159 13152
13160 13153 /*
13161 13154 * shadow_start and shadow_len indicate the location and size of
13162 13155 * the data returned with the shadow IO request.
13163 13156 */
13164 13157 if (un->un_f_enable_rmw) {
13165 13158 shadow_start = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno);
13166 13159 } else {
13167 13160 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno);
13168 13161 }
13169 13162 shadow_end = shadow_start + bp->b_bcount - bp->b_resid;
13170 13163
13171 13164 /*
13172 13165 * copy_offset gives the offset (in bytes) from the start of the first
13173 13166 * block of the READ request to the beginning of the data. We retrieve
13174 13167 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved
13175 13168 * there by sd_mapblockize_iostart(). copy_length gives the amount of
13176 13169 * data to be copied (in bytes).
13177 13170 */
13178 13171 copy_offset = bsp->mbs_copy_offset;
13179 13172 if (un->un_f_enable_rmw) {
13180 13173 ASSERT((copy_offset >= 0) &&
13181 13174 (copy_offset < un->un_phy_blocksize));
13182 13175 } else {
13183 13176 ASSERT((copy_offset >= 0) &&
13184 13177 (copy_offset < un->un_tgt_blocksize));
13185 13178 }
13186 13179
13187 13180 copy_length = orig_bp->b_bcount;
13188 13181 request_end = shadow_start + copy_offset + orig_bp->b_bcount;
13189 13182
13190 13183 /*
13191 13184 * Set up the resid and error fields of orig_bp as appropriate.
13192 13185 */
13193 13186 if (shadow_end >= request_end) {
13194 13187 /* We got all the requested data; set resid to zero */
13195 13188 orig_bp->b_resid = 0;
13196 13189 } else {
13197 13190 /*
13198 13191 * We failed to get enough data to fully satisfy the original
13199 13192 * request. Just copy back whatever data we got and set
13200 13193 * up the residual and error code as required.
13201 13194 *
13202 13195 * 'shortfall' is the amount by which the data received with the
13203 13196 * shadow buf has "fallen short" of the requested amount.
13204 13197 */
13205 13198 shortfall = (size_t)(request_end - shadow_end);
13206 13199
13207 13200 if (shortfall > orig_bp->b_bcount) {
13208 13201 /*
13209 13202 * We did not get enough data to even partially
13210 13203 * fulfill the original request. The residual is
13211 13204 * equal to the amount requested.
13212 13205 */
13213 13206 orig_bp->b_resid = orig_bp->b_bcount;
13214 13207 } else {
13215 13208 /*
13216 13209 * We did not get all the data that we requested
13217 13210 * from the device, but we will try to return what
13218 13211 * portion we did get.
13219 13212 */
13220 13213 orig_bp->b_resid = shortfall;
13221 13214 }
13222 13215 ASSERT(copy_length >= orig_bp->b_resid);
13223 13216 copy_length -= orig_bp->b_resid;
13224 13217 }
13225 13218
13226 13219 /* Propagate the error code from the shadow buf to the original buf */
13227 13220 bioerror(orig_bp, bp->b_error);
13228 13221
13229 13222 if (is_write) {
13230 13223 goto freebuf_done; /* No data copying for a WRITE */
13231 13224 }
13232 13225
13233 13226 if (has_wmap) {
13234 13227 /*
13235 13228 * This is a READ command from the READ phase of a
13236 13229 * read-modify-write request. We have to copy the data given
13237 13230 * by the user OVER the data returned by the READ command,
13238 13231 * then convert the command from a READ to a WRITE and send
13239 13232 * it back to the target.
13240 13233 */
13241 13234 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset,
13242 13235 copy_length);
13243 13236
13244 13237 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */
13245 13238
13246 13239 /*
13247 13240 * Dispatch the WRITE command to the taskq thread, which
13248 13241 * will in turn send the command to the target. When the
13249 13242 * WRITE command completes, we (sd_mapblocksize_iodone())
13250 13243 * will get called again as part of the iodone chain
13251 13244 * processing for it. Note that we will still be dealing
13252 13245 * with the shadow buf at that point.
13253 13246 */
13254 13247 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp,
13255 13248 KM_NOSLEEP) != 0) {
13256 13249 /*
13257 13250 * Dispatch was successful so we are done. Return
13258 13251 * without going any higher up the iodone chain. Do
13259 13252 * not free up any layer-private data until after the
13260 13253 * WRITE completes.
13261 13254 */
13262 13255 return;
13263 13256 }
13264 13257
13265 13258 /*
13266 13259 * Dispatch of the WRITE command failed; set up the error
13267 13260 * condition and send this IO back up the iodone chain.
13268 13261 */
13269 13262 bioerror(orig_bp, EIO);
13270 13263 orig_bp->b_resid = orig_bp->b_bcount;
13271 13264
13272 13265 } else {
13273 13266 /*
13274 13267 * This is a regular READ request (ie, not a RMW). Copy the
13275 13268 * data from the shadow buf into the original buf. The
13276 13269 * copy_offset compensates for any "misalignment" between the
13277 13270 * shadow buf (with its un->un_tgt_blocksize blocks) and the
13278 13271 * original buf (with its un->un_sys_blocksize blocks).
13279 13272 */
13280 13273 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr,
13281 13274 copy_length);
13282 13275 }
13283 13276
13284 13277 freebuf_done:
13285 13278
13286 13279 /*
13287 13280 * At this point we still have both the shadow buf AND the original
13288 13281 * buf to deal with, as well as the layer-private data area in each.
13289 13282 * Local variables are as follows:
13290 13283 *
13291 13284 * bp -- points to shadow buf
13292 13285 * xp -- points to xbuf of shadow buf
13293 13286 * bsp -- points to layer-private data area of shadow buf
13294 13287 * orig_bp -- points to original buf
13295 13288 *
13296 13289 * First free the shadow buf and its associated xbuf, then free the
13297 13290 * layer-private data area from the shadow buf. There is no need to
13298 13291 * restore xb_private in the shadow xbuf.
13299 13292 */
13300 13293 sd_shadow_buf_free(bp);
13301 13294 kmem_free(bsp, sizeof (struct sd_mapblocksize_info));
13302 13295
13303 13296 /*
13304 13297 * Now update the local variables to point to the original buf, xbuf,
13305 13298 * and layer-private area.
13306 13299 */
13307 13300 bp = orig_bp;
13308 13301 xp = SD_GET_XBUF(bp);
13309 13302 ASSERT(xp != NULL);
13310 13303 ASSERT(xp == orig_xp);
13311 13304 bsp = xp->xb_private;
13312 13305 ASSERT(bsp != NULL);
13313 13306
13314 13307 done:
13315 13308 /*
13316 13309 * Restore xb_private to whatever it was set to by the next higher
13317 13310 * layer in the chain, then free the layer-private data area.
13318 13311 */
13319 13312 xp->xb_private = bsp->mbs_oprivate;
13320 13313 kmem_free(bsp, sizeof (struct sd_mapblocksize_info));
13321 13314
13322 13315 exit:
13323 13316 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp),
13324 13317 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp);
13325 13318
13326 13319 SD_NEXT_IODONE(index, un, bp);
13327 13320 }
13328 13321
13329 13322
13330 13323 /*
13331 13324 * Function: sd_checksum_iostart
13332 13325 *
13333 13326 * Description: A stub function for a layer that's currently not used.
13334 13327 * For now just a placeholder.
13335 13328 *
13336 13329 * Context: Kernel thread context
13337 13330 */
13338 13331
13339 13332 static void
13340 13333 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp)
13341 13334 {
13342 13335 ASSERT(un != NULL);
13343 13336 ASSERT(bp != NULL);
13344 13337 ASSERT(!mutex_owned(SD_MUTEX(un)));
13345 13338 SD_NEXT_IOSTART(index, un, bp);
13346 13339 }
13347 13340
13348 13341
13349 13342 /*
13350 13343 * Function: sd_checksum_iodone
13351 13344 *
13352 13345 * Description: A stub function for a layer that's currently not used.
13353 13346 * For now just a placeholder.
13354 13347 *
13355 13348 * Context: May be called under interrupt context
13356 13349 */
13357 13350
13358 13351 static void
13359 13352 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp)
13360 13353 {
13361 13354 ASSERT(un != NULL);
13362 13355 ASSERT(bp != NULL);
13363 13356 ASSERT(!mutex_owned(SD_MUTEX(un)));
13364 13357 SD_NEXT_IODONE(index, un, bp);
13365 13358 }
13366 13359
13367 13360
13368 13361 /*
13369 13362 * Function: sd_checksum_uscsi_iostart
13370 13363 *
13371 13364 * Description: A stub function for a layer that's currently not used.
13372 13365 * For now just a placeholder.
13373 13366 *
13374 13367 * Context: Kernel thread context
13375 13368 */
13376 13369
13377 13370 static void
13378 13371 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp)
13379 13372 {
13380 13373 ASSERT(un != NULL);
13381 13374 ASSERT(bp != NULL);
13382 13375 ASSERT(!mutex_owned(SD_MUTEX(un)));
13383 13376 SD_NEXT_IOSTART(index, un, bp);
13384 13377 }
13385 13378
13386 13379
13387 13380 /*
13388 13381 * Function: sd_checksum_uscsi_iodone
13389 13382 *
13390 13383 * Description: A stub function for a layer that's currently not used.
13391 13384 * For now just a placeholder.
13392 13385 *
13393 13386 * Context: May be called under interrupt context
13394 13387 */
13395 13388
13396 13389 static void
13397 13390 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp)
13398 13391 {
13399 13392 ASSERT(un != NULL);
13400 13393 ASSERT(bp != NULL);
13401 13394 ASSERT(!mutex_owned(SD_MUTEX(un)));
13402 13395 SD_NEXT_IODONE(index, un, bp);
13403 13396 }
13404 13397
13405 13398
13406 13399 /*
13407 13400 * Function: sd_pm_iostart
13408 13401 *
13409 13402 * Description: iostart-side routine for Power mangement.
13410 13403 *
13411 13404 * Context: Kernel thread context
13412 13405 */
13413 13406
13414 13407 static void
13415 13408 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp)
13416 13409 {
13417 13410 ASSERT(un != NULL);
13418 13411 ASSERT(bp != NULL);
13419 13412 ASSERT(!mutex_owned(SD_MUTEX(un)));
13420 13413 ASSERT(!mutex_owned(&un->un_pm_mutex));
13421 13414
13422 13415 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n");
13423 13416
13424 13417 if (sd_pm_entry(un) != DDI_SUCCESS) {
13425 13418 /*
13426 13419 * Set up to return the failed buf back up the 'iodone'
13427 13420 * side of the calling chain.
13428 13421 */
13429 13422 bioerror(bp, EIO);
13430 13423 bp->b_resid = bp->b_bcount;
13431 13424
13432 13425 SD_BEGIN_IODONE(index, un, bp);
13433 13426
13434 13427 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n");
13435 13428 return;
13436 13429 }
13437 13430
13438 13431 SD_NEXT_IOSTART(index, un, bp);
13439 13432
13440 13433 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n");
13441 13434 }
13442 13435
13443 13436
13444 13437 /*
13445 13438 * Function: sd_pm_iodone
13446 13439 *
13447 13440 * Description: iodone-side routine for power mangement.
13448 13441 *
13449 13442 * Context: may be called from interrupt context
13450 13443 */
13451 13444
13452 13445 static void
13453 13446 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp)
13454 13447 {
13455 13448 ASSERT(un != NULL);
13456 13449 ASSERT(bp != NULL);
13457 13450 ASSERT(!mutex_owned(&un->un_pm_mutex));
13458 13451
13459 13452 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n");
13460 13453
13461 13454 /*
13462 13455 * After attach the following flag is only read, so don't
13463 13456 * take the penalty of acquiring a mutex for it.
13464 13457 */
13465 13458 if (un->un_f_pm_is_enabled == TRUE) {
13466 13459 sd_pm_exit(un);
13467 13460 }
13468 13461
13469 13462 SD_NEXT_IODONE(index, un, bp);
13470 13463
13471 13464 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n");
13472 13465 }
13473 13466
13474 13467
13475 13468 /*
13476 13469 * Function: sd_core_iostart
13477 13470 *
13478 13471 * Description: Primary driver function for enqueuing buf(9S) structs from
13479 13472 * the system and initiating IO to the target device
13480 13473 *
13481 13474 * Context: Kernel thread context. Can sleep.
13482 13475 *
13483 13476 * Assumptions: - The given xp->xb_blkno is absolute
13484 13477 * (ie, relative to the start of the device).
13485 13478 * - The IO is to be done using the native blocksize of
13486 13479 * the device, as specified in un->un_tgt_blocksize.
13487 13480 */
13488 13481 /* ARGSUSED */
13489 13482 static void
13490 13483 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp)
13491 13484 {
13492 13485 struct sd_xbuf *xp;
13493 13486
13494 13487 ASSERT(un != NULL);
13495 13488 ASSERT(bp != NULL);
13496 13489 ASSERT(!mutex_owned(SD_MUTEX(un)));
13497 13490 ASSERT(bp->b_resid == 0);
13498 13491
13499 13492 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp);
13500 13493
13501 13494 xp = SD_GET_XBUF(bp);
13502 13495 ASSERT(xp != NULL);
13503 13496
13504 13497 mutex_enter(SD_MUTEX(un));
13505 13498
13506 13499 /*
13507 13500 * If we are currently in the failfast state, fail any new IO
13508 13501 * that has B_FAILFAST set, then return.
13509 13502 */
13510 13503 if ((bp->b_flags & B_FAILFAST) &&
13511 13504 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) {
13512 13505 mutex_exit(SD_MUTEX(un));
13513 13506 bioerror(bp, EIO);
13514 13507 bp->b_resid = bp->b_bcount;
13515 13508 SD_BEGIN_IODONE(index, un, bp);
13516 13509 return;
13517 13510 }
13518 13511
13519 13512 if (SD_IS_DIRECT_PRIORITY(xp)) {
13520 13513 /*
13521 13514 * Priority command -- transport it immediately.
13522 13515 *
13523 13516 * Note: We may want to assert that USCSI_DIAGNOSE is set,
13524 13517 * because all direct priority commands should be associated
13525 13518 * with error recovery actions which we don't want to retry.
13526 13519 */
13527 13520 sd_start_cmds(un, bp);
13528 13521 } else {
13529 13522 /*
13530 13523 * Normal command -- add it to the wait queue, then start
13531 13524 * transporting commands from the wait queue.
13532 13525 */
13533 13526 sd_add_buf_to_waitq(un, bp);
13534 13527 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp);
13535 13528 sd_start_cmds(un, NULL);
13536 13529 }
13537 13530
13538 13531 mutex_exit(SD_MUTEX(un));
13539 13532
13540 13533 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp);
13541 13534 }
13542 13535
13543 13536
13544 13537 /*
13545 13538 * Function: sd_init_cdb_limits
13546 13539 *
13547 13540 * Description: This is to handle scsi_pkt initialization differences
13548 13541 * between the driver platforms.
13549 13542 *
13550 13543 * Legacy behaviors:
13551 13544 *
13552 13545 * If the block number or the sector count exceeds the
13553 13546 * capabilities of a Group 0 command, shift over to a
13554 13547 * Group 1 command. We don't blindly use Group 1
13555 13548 * commands because a) some drives (CDC Wren IVs) get a
13556 13549 * bit confused, and b) there is probably a fair amount
13557 13550 * of speed difference for a target to receive and decode
13558 13551 * a 10 byte command instead of a 6 byte command.
13559 13552 *
13560 13553 * The xfer time difference of 6 vs 10 byte CDBs is
13561 13554 * still significant so this code is still worthwhile.
13562 13555 * 10 byte CDBs are very inefficient with the fas HBA driver
13563 13556 * and older disks. Each CDB byte took 1 usec with some
13564 13557 * popular disks.
13565 13558 *
13566 13559 * Context: Must be called at attach time
13567 13560 */
13568 13561
13569 13562 static void
13570 13563 sd_init_cdb_limits(struct sd_lun *un)
13571 13564 {
13572 13565 int hba_cdb_limit;
13573 13566
13574 13567 /*
13575 13568 * Use CDB_GROUP1 commands for most devices except for
13576 13569 * parallel SCSI fixed drives in which case we get better
13577 13570 * performance using CDB_GROUP0 commands (where applicable).
13578 13571 */
13579 13572 un->un_mincdb = SD_CDB_GROUP1;
13580 13573 #if !defined(__fibre)
13581 13574 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) &&
13582 13575 !un->un_f_has_removable_media) {
13583 13576 un->un_mincdb = SD_CDB_GROUP0;
13584 13577 }
13585 13578 #endif
13586 13579
13587 13580 /*
13588 13581 * Try to read the max-cdb-length supported by HBA.
13589 13582 */
13590 13583 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1);
13591 13584 if (0 >= un->un_max_hba_cdb) {
13592 13585 un->un_max_hba_cdb = CDB_GROUP4;
13593 13586 hba_cdb_limit = SD_CDB_GROUP4;
13594 13587 } else if (0 < un->un_max_hba_cdb &&
13595 13588 un->un_max_hba_cdb < CDB_GROUP1) {
13596 13589 hba_cdb_limit = SD_CDB_GROUP0;
13597 13590 } else if (CDB_GROUP1 <= un->un_max_hba_cdb &&
13598 13591 un->un_max_hba_cdb < CDB_GROUP5) {
13599 13592 hba_cdb_limit = SD_CDB_GROUP1;
13600 13593 } else if (CDB_GROUP5 <= un->un_max_hba_cdb &&
13601 13594 un->un_max_hba_cdb < CDB_GROUP4) {
13602 13595 hba_cdb_limit = SD_CDB_GROUP5;
13603 13596 } else {
13604 13597 hba_cdb_limit = SD_CDB_GROUP4;
13605 13598 }
13606 13599
13607 13600 /*
13608 13601 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4
13609 13602 * commands for fixed disks unless we are building for a 32 bit
13610 13603 * kernel.
13611 13604 */
13612 13605 #ifdef _LP64
13613 13606 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 :
13614 13607 min(hba_cdb_limit, SD_CDB_GROUP4);
13615 13608 #else
13616 13609 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 :
13617 13610 min(hba_cdb_limit, SD_CDB_GROUP1);
13618 13611 #endif
13619 13612
13620 13613 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE)
13621 13614 ? sizeof (struct scsi_arq_status) : 1);
13622 13615 if (!ISCD(un))
13623 13616 un->un_cmd_timeout = (ushort_t)sd_io_time;
13624 13617 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout;
13625 13618 }
13626 13619
13627 13620
13628 13621 /*
13629 13622 * Function: sd_initpkt_for_buf
13630 13623 *
13631 13624 * Description: Allocate and initialize for transport a scsi_pkt struct,
13632 13625 * based upon the info specified in the given buf struct.
13633 13626 *
13634 13627 * Assumes the xb_blkno in the request is absolute (ie,
13635 13628 * relative to the start of the device (NOT partition!).
13636 13629 * Also assumes that the request is using the native block
13637 13630 * size of the device (as returned by the READ CAPACITY
13638 13631 * command).
13639 13632 *
13640 13633 * Return Code: SD_PKT_ALLOC_SUCCESS
13641 13634 * SD_PKT_ALLOC_FAILURE
13642 13635 * SD_PKT_ALLOC_FAILURE_NO_DMA
13643 13636 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL
13644 13637 *
13645 13638 * Context: Kernel thread and may be called from software interrupt context
13646 13639 * as part of a sdrunout callback. This function may not block or
13647 13640 * call routines that block
13648 13641 */
13649 13642
13650 13643 static int
13651 13644 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp)
13652 13645 {
13653 13646 struct sd_xbuf *xp;
13654 13647 struct scsi_pkt *pktp = NULL;
13655 13648 struct sd_lun *un;
13656 13649 size_t blockcount;
13657 13650 daddr_t startblock;
13658 13651 int rval;
13659 13652 int cmd_flags;
13660 13653
13661 13654 ASSERT(bp != NULL);
13662 13655 ASSERT(pktpp != NULL);
13663 13656 xp = SD_GET_XBUF(bp);
13664 13657 ASSERT(xp != NULL);
13665 13658 un = SD_GET_UN(bp);
13666 13659 ASSERT(un != NULL);
13667 13660 ASSERT(mutex_owned(SD_MUTEX(un)));
13668 13661 ASSERT(bp->b_resid == 0);
13669 13662
13670 13663 SD_TRACE(SD_LOG_IO_CORE, un,
13671 13664 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp);
13672 13665
13673 13666 mutex_exit(SD_MUTEX(un));
13674 13667
13675 13668 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
13676 13669 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) {
13677 13670 /*
13678 13671 * Already have a scsi_pkt -- just need DMA resources.
13679 13672 * We must recompute the CDB in case the mapping returns
13680 13673 * a nonzero pkt_resid.
13681 13674 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer
13682 13675 * that is being retried, the unmap/remap of the DMA resouces
13683 13676 * will result in the entire transfer starting over again
13684 13677 * from the very first block.
13685 13678 */
13686 13679 ASSERT(xp->xb_pktp != NULL);
13687 13680 pktp = xp->xb_pktp;
13688 13681 } else {
13689 13682 pktp = NULL;
13690 13683 }
13691 13684 #endif /* __i386 || __amd64 */
13692 13685
13693 13686 startblock = xp->xb_blkno; /* Absolute block num. */
13694 13687 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount);
13695 13688
13696 13689 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK);
13697 13690
13698 13691 /*
13699 13692 * sd_setup_rw_pkt will determine the appropriate CDB group to use,
13700 13693 * call scsi_init_pkt, and build the CDB.
13701 13694 */
13702 13695 rval = sd_setup_rw_pkt(un, &pktp, bp,
13703 13696 cmd_flags, sdrunout, (caddr_t)un,
13704 13697 startblock, blockcount);
13705 13698
13706 13699 if (rval == 0) {
13707 13700 /*
13708 13701 * Success.
13709 13702 *
13710 13703 * If partial DMA is being used and required for this transfer.
13711 13704 * set it up here.
13712 13705 */
13713 13706 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 &&
13714 13707 (pktp->pkt_resid != 0)) {
13715 13708
13716 13709 /*
13717 13710 * Save the CDB length and pkt_resid for the
13718 13711 * next xfer
13719 13712 */
13720 13713 xp->xb_dma_resid = pktp->pkt_resid;
13721 13714
13722 13715 /* rezero resid */
13723 13716 pktp->pkt_resid = 0;
13724 13717
13725 13718 } else {
13726 13719 xp->xb_dma_resid = 0;
13727 13720 }
13728 13721
13729 13722 pktp->pkt_flags = un->un_tagflags;
13730 13723 pktp->pkt_time = un->un_cmd_timeout;
13731 13724 pktp->pkt_comp = sdintr;
13732 13725
13733 13726 pktp->pkt_private = bp;
13734 13727 *pktpp = pktp;
13735 13728
13736 13729 SD_TRACE(SD_LOG_IO_CORE, un,
13737 13730 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp);
13738 13731
13739 13732 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
13740 13733 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED;
13741 13734 #endif
13742 13735
13743 13736 mutex_enter(SD_MUTEX(un));
13744 13737 return (SD_PKT_ALLOC_SUCCESS);
13745 13738
13746 13739 }
13747 13740
13748 13741 /*
13749 13742 * SD_PKT_ALLOC_FAILURE is the only expected failure code
13750 13743 * from sd_setup_rw_pkt.
13751 13744 */
13752 13745 ASSERT(rval == SD_PKT_ALLOC_FAILURE);
13753 13746
13754 13747 if (rval == SD_PKT_ALLOC_FAILURE) {
13755 13748 *pktpp = NULL;
13756 13749 /*
13757 13750 * Set the driver state to RWAIT to indicate the driver
13758 13751 * is waiting on resource allocations. The driver will not
13759 13752 * suspend, pm_suspend, or detatch while the state is RWAIT.
13760 13753 */
13761 13754 mutex_enter(SD_MUTEX(un));
13762 13755 New_state(un, SD_STATE_RWAIT);
13763 13756
13764 13757 SD_ERROR(SD_LOG_IO_CORE, un,
13765 13758 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp);
13766 13759
13767 13760 if ((bp->b_flags & B_ERROR) != 0) {
13768 13761 return (SD_PKT_ALLOC_FAILURE_NO_DMA);
13769 13762 }
13770 13763 return (SD_PKT_ALLOC_FAILURE);
13771 13764 } else {
13772 13765 /*
13773 13766 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL
13774 13767 *
13775 13768 * This should never happen. Maybe someone messed with the
13776 13769 * kernel's minphys?
13777 13770 */
13778 13771 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
13779 13772 "Request rejected: too large for CDB: "
13780 13773 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount);
13781 13774 SD_ERROR(SD_LOG_IO_CORE, un,
13782 13775 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp);
13783 13776 mutex_enter(SD_MUTEX(un));
13784 13777 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
13785 13778
13786 13779 }
13787 13780 }
13788 13781
13789 13782
13790 13783 /*
13791 13784 * Function: sd_destroypkt_for_buf
13792 13785 *
13793 13786 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing).
13794 13787 *
13795 13788 * Context: Kernel thread or interrupt context
13796 13789 */
13797 13790
13798 13791 static void
13799 13792 sd_destroypkt_for_buf(struct buf *bp)
13800 13793 {
13801 13794 ASSERT(bp != NULL);
13802 13795 ASSERT(SD_GET_UN(bp) != NULL);
13803 13796
13804 13797 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp),
13805 13798 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp);
13806 13799
13807 13800 ASSERT(SD_GET_PKTP(bp) != NULL);
13808 13801 scsi_destroy_pkt(SD_GET_PKTP(bp));
13809 13802
13810 13803 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp),
13811 13804 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp);
13812 13805 }
13813 13806
13814 13807 /*
13815 13808 * Function: sd_setup_rw_pkt
13816 13809 *
13817 13810 * Description: Determines appropriate CDB group for the requested LBA
13818 13811 * and transfer length, calls scsi_init_pkt, and builds
13819 13812 * the CDB. Do not use for partial DMA transfers except
13820 13813 * for the initial transfer since the CDB size must
13821 13814 * remain constant.
13822 13815 *
13823 13816 * Context: Kernel thread and may be called from software interrupt
13824 13817 * context as part of a sdrunout callback. This function may not
13825 13818 * block or call routines that block
13826 13819 */
13827 13820
13828 13821
13829 13822 int
13830 13823 sd_setup_rw_pkt(struct sd_lun *un,
13831 13824 struct scsi_pkt **pktpp, struct buf *bp, int flags,
13832 13825 int (*callback)(caddr_t), caddr_t callback_arg,
13833 13826 diskaddr_t lba, uint32_t blockcount)
13834 13827 {
13835 13828 struct scsi_pkt *return_pktp;
13836 13829 union scsi_cdb *cdbp;
13837 13830 struct sd_cdbinfo *cp = NULL;
13838 13831 int i;
13839 13832
13840 13833 /*
13841 13834 * See which size CDB to use, based upon the request.
13842 13835 */
13843 13836 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) {
13844 13837
13845 13838 /*
13846 13839 * Check lba and block count against sd_cdbtab limits.
13847 13840 * In the partial DMA case, we have to use the same size
13848 13841 * CDB for all the transfers. Check lba + blockcount
13849 13842 * against the max LBA so we know that segment of the
13850 13843 * transfer can use the CDB we select.
13851 13844 */
13852 13845 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) &&
13853 13846 (blockcount <= sd_cdbtab[i].sc_maxlen)) {
13854 13847
13855 13848 /*
13856 13849 * The command will fit into the CDB type
13857 13850 * specified by sd_cdbtab[i].
13858 13851 */
13859 13852 cp = sd_cdbtab + i;
13860 13853
13861 13854 /*
13862 13855 * Call scsi_init_pkt so we can fill in the
13863 13856 * CDB.
13864 13857 */
13865 13858 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp,
13866 13859 bp, cp->sc_grpcode, un->un_status_len, 0,
13867 13860 flags, callback, callback_arg);
13868 13861
13869 13862 if (return_pktp != NULL) {
13870 13863
13871 13864 /*
13872 13865 * Return new value of pkt
13873 13866 */
13874 13867 *pktpp = return_pktp;
13875 13868
13876 13869 /*
13877 13870 * To be safe, zero the CDB insuring there is
13878 13871 * no leftover data from a previous command.
13879 13872 */
13880 13873 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode);
13881 13874
13882 13875 /*
13883 13876 * Handle partial DMA mapping
13884 13877 */
13885 13878 if (return_pktp->pkt_resid != 0) {
13886 13879
13887 13880 /*
13888 13881 * Not going to xfer as many blocks as
13889 13882 * originally expected
13890 13883 */
13891 13884 blockcount -=
13892 13885 SD_BYTES2TGTBLOCKS(un,
13893 13886 return_pktp->pkt_resid);
13894 13887 }
13895 13888
13896 13889 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp;
13897 13890
13898 13891 /*
13899 13892 * Set command byte based on the CDB
13900 13893 * type we matched.
13901 13894 */
13902 13895 cdbp->scc_cmd = cp->sc_grpmask |
13903 13896 ((bp->b_flags & B_READ) ?
13904 13897 SCMD_READ : SCMD_WRITE);
13905 13898
13906 13899 SD_FILL_SCSI1_LUN(un, return_pktp);
13907 13900
13908 13901 /*
13909 13902 * Fill in LBA and length
13910 13903 */
13911 13904 ASSERT((cp->sc_grpcode == CDB_GROUP1) ||
13912 13905 (cp->sc_grpcode == CDB_GROUP4) ||
13913 13906 (cp->sc_grpcode == CDB_GROUP0) ||
13914 13907 (cp->sc_grpcode == CDB_GROUP5));
13915 13908
13916 13909 if (cp->sc_grpcode == CDB_GROUP1) {
13917 13910 FORMG1ADDR(cdbp, lba);
13918 13911 FORMG1COUNT(cdbp, blockcount);
13919 13912 return (0);
13920 13913 } else if (cp->sc_grpcode == CDB_GROUP4) {
13921 13914 FORMG4LONGADDR(cdbp, lba);
13922 13915 FORMG4COUNT(cdbp, blockcount);
13923 13916 return (0);
13924 13917 } else if (cp->sc_grpcode == CDB_GROUP0) {
13925 13918 FORMG0ADDR(cdbp, lba);
13926 13919 FORMG0COUNT(cdbp, blockcount);
13927 13920 return (0);
13928 13921 } else if (cp->sc_grpcode == CDB_GROUP5) {
13929 13922 FORMG5ADDR(cdbp, lba);
13930 13923 FORMG5COUNT(cdbp, blockcount);
13931 13924 return (0);
13932 13925 }
13933 13926
13934 13927 /*
13935 13928 * It should be impossible to not match one
13936 13929 * of the CDB types above, so we should never
13937 13930 * reach this point. Set the CDB command byte
13938 13931 * to test-unit-ready to avoid writing
13939 13932 * to somewhere we don't intend.
13940 13933 */
13941 13934 cdbp->scc_cmd = SCMD_TEST_UNIT_READY;
13942 13935 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
13943 13936 } else {
13944 13937 /*
13945 13938 * Couldn't get scsi_pkt
13946 13939 */
13947 13940 return (SD_PKT_ALLOC_FAILURE);
13948 13941 }
13949 13942 }
13950 13943 }
13951 13944
13952 13945 /*
13953 13946 * None of the available CDB types were suitable. This really
13954 13947 * should never happen: on a 64 bit system we support
13955 13948 * READ16/WRITE16 which will hold an entire 64 bit disk address
13956 13949 * and on a 32 bit system we will refuse to bind to a device
13957 13950 * larger than 2TB so addresses will never be larger than 32 bits.
13958 13951 */
13959 13952 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
13960 13953 }
13961 13954
13962 13955 /*
13963 13956 * Function: sd_setup_next_rw_pkt
13964 13957 *
13965 13958 * Description: Setup packet for partial DMA transfers, except for the
13966 13959 * initial transfer. sd_setup_rw_pkt should be used for
13967 13960 * the initial transfer.
13968 13961 *
13969 13962 * Context: Kernel thread and may be called from interrupt context.
13970 13963 */
13971 13964
13972 13965 int
13973 13966 sd_setup_next_rw_pkt(struct sd_lun *un,
13974 13967 struct scsi_pkt *pktp, struct buf *bp,
13975 13968 diskaddr_t lba, uint32_t blockcount)
13976 13969 {
13977 13970 uchar_t com;
13978 13971 union scsi_cdb *cdbp;
13979 13972 uchar_t cdb_group_id;
13980 13973
13981 13974 ASSERT(pktp != NULL);
13982 13975 ASSERT(pktp->pkt_cdbp != NULL);
13983 13976
13984 13977 cdbp = (union scsi_cdb *)pktp->pkt_cdbp;
13985 13978 com = cdbp->scc_cmd;
13986 13979 cdb_group_id = CDB_GROUPID(com);
13987 13980
13988 13981 ASSERT((cdb_group_id == CDB_GROUPID_0) ||
13989 13982 (cdb_group_id == CDB_GROUPID_1) ||
13990 13983 (cdb_group_id == CDB_GROUPID_4) ||
13991 13984 (cdb_group_id == CDB_GROUPID_5));
13992 13985
13993 13986 /*
13994 13987 * Move pkt to the next portion of the xfer.
13995 13988 * func is NULL_FUNC so we do not have to release
13996 13989 * the disk mutex here.
13997 13990 */
13998 13991 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0,
13999 13992 NULL_FUNC, NULL) == pktp) {
14000 13993 /* Success. Handle partial DMA */
14001 13994 if (pktp->pkt_resid != 0) {
14002 13995 blockcount -=
14003 13996 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid);
14004 13997 }
14005 13998
14006 13999 cdbp->scc_cmd = com;
14007 14000 SD_FILL_SCSI1_LUN(un, pktp);
14008 14001 if (cdb_group_id == CDB_GROUPID_1) {
14009 14002 FORMG1ADDR(cdbp, lba);
14010 14003 FORMG1COUNT(cdbp, blockcount);
14011 14004 return (0);
14012 14005 } else if (cdb_group_id == CDB_GROUPID_4) {
14013 14006 FORMG4LONGADDR(cdbp, lba);
14014 14007 FORMG4COUNT(cdbp, blockcount);
14015 14008 return (0);
14016 14009 } else if (cdb_group_id == CDB_GROUPID_0) {
14017 14010 FORMG0ADDR(cdbp, lba);
14018 14011 FORMG0COUNT(cdbp, blockcount);
14019 14012 return (0);
14020 14013 } else if (cdb_group_id == CDB_GROUPID_5) {
14021 14014 FORMG5ADDR(cdbp, lba);
14022 14015 FORMG5COUNT(cdbp, blockcount);
14023 14016 return (0);
14024 14017 }
14025 14018
14026 14019 /* Unreachable */
14027 14020 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
14028 14021 }
14029 14022
14030 14023 /*
14031 14024 * Error setting up next portion of cmd transfer.
14032 14025 * Something is definitely very wrong and this
14033 14026 * should not happen.
14034 14027 */
14035 14028 return (SD_PKT_ALLOC_FAILURE);
14036 14029 }
14037 14030
14038 14031 /*
14039 14032 * Function: sd_initpkt_for_uscsi
14040 14033 *
14041 14034 * Description: Allocate and initialize for transport a scsi_pkt struct,
14042 14035 * based upon the info specified in the given uscsi_cmd struct.
14043 14036 *
14044 14037 * Return Code: SD_PKT_ALLOC_SUCCESS
14045 14038 * SD_PKT_ALLOC_FAILURE
14046 14039 * SD_PKT_ALLOC_FAILURE_NO_DMA
14047 14040 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL
14048 14041 *
14049 14042 * Context: Kernel thread and may be called from software interrupt context
14050 14043 * as part of a sdrunout callback. This function may not block or
14051 14044 * call routines that block
14052 14045 */
14053 14046
14054 14047 static int
14055 14048 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp)
14056 14049 {
14057 14050 struct uscsi_cmd *uscmd;
14058 14051 struct sd_xbuf *xp;
14059 14052 struct scsi_pkt *pktp;
14060 14053 struct sd_lun *un;
14061 14054 uint32_t flags = 0;
14062 14055
14063 14056 ASSERT(bp != NULL);
14064 14057 ASSERT(pktpp != NULL);
14065 14058 xp = SD_GET_XBUF(bp);
14066 14059 ASSERT(xp != NULL);
14067 14060 un = SD_GET_UN(bp);
14068 14061 ASSERT(un != NULL);
14069 14062 ASSERT(mutex_owned(SD_MUTEX(un)));
14070 14063
14071 14064 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */
14072 14065 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo;
14073 14066 ASSERT(uscmd != NULL);
14074 14067
14075 14068 SD_TRACE(SD_LOG_IO_CORE, un,
14076 14069 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp);
14077 14070
14078 14071 /*
14079 14072 * Allocate the scsi_pkt for the command.
14080 14073 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path
14081 14074 * during scsi_init_pkt time and will continue to use the
14082 14075 * same path as long as the same scsi_pkt is used without
14083 14076 * intervening scsi_dma_free(). Since uscsi command does
14084 14077 * not call scsi_dmafree() before retry failed command, it
14085 14078 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT
14086 14079 * set such that scsi_vhci can use other available path for
14087 14080 * retry. Besides, ucsci command does not allow DMA breakup,
14088 14081 * so there is no need to set PKT_DMA_PARTIAL flag.
14089 14082 */
14090 14083 if (uscmd->uscsi_rqlen > SENSE_LENGTH) {
14091 14084 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL,
14092 14085 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen,
14093 14086 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status)
14094 14087 - sizeof (struct scsi_extended_sense)), 0,
14095 14088 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ,
14096 14089 sdrunout, (caddr_t)un);
14097 14090 } else {
14098 14091 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL,
14099 14092 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen,
14100 14093 sizeof (struct scsi_arq_status), 0,
14101 14094 (un->un_pkt_flags & ~PKT_DMA_PARTIAL),
14102 14095 sdrunout, (caddr_t)un);
14103 14096 }
14104 14097
14105 14098 if (pktp == NULL) {
14106 14099 *pktpp = NULL;
14107 14100 /*
14108 14101 * Set the driver state to RWAIT to indicate the driver
14109 14102 * is waiting on resource allocations. The driver will not
14110 14103 * suspend, pm_suspend, or detatch while the state is RWAIT.
14111 14104 */
14112 14105 New_state(un, SD_STATE_RWAIT);
14113 14106
14114 14107 SD_ERROR(SD_LOG_IO_CORE, un,
14115 14108 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp);
14116 14109
14117 14110 if ((bp->b_flags & B_ERROR) != 0) {
14118 14111 return (SD_PKT_ALLOC_FAILURE_NO_DMA);
14119 14112 }
14120 14113 return (SD_PKT_ALLOC_FAILURE);
14121 14114 }
14122 14115
14123 14116 /*
14124 14117 * We do not do DMA breakup for USCSI commands, so return failure
14125 14118 * here if all the needed DMA resources were not allocated.
14126 14119 */
14127 14120 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) &&
14128 14121 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) {
14129 14122 scsi_destroy_pkt(pktp);
14130 14123 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: "
14131 14124 "No partial DMA for USCSI. exit: buf:0x%p\n", bp);
14132 14125 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL);
14133 14126 }
14134 14127
14135 14128 /* Init the cdb from the given uscsi struct */
14136 14129 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp,
14137 14130 uscmd->uscsi_cdb[0], 0, 0, 0);
14138 14131
14139 14132 SD_FILL_SCSI1_LUN(un, pktp);
14140 14133
14141 14134 /*
14142 14135 * Set up the optional USCSI flags. See the uscsi (7I) man page
14143 14136 * for listing of the supported flags.
14144 14137 */
14145 14138
14146 14139 if (uscmd->uscsi_flags & USCSI_SILENT) {
14147 14140 flags |= FLAG_SILENT;
14148 14141 }
14149 14142
14150 14143 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) {
14151 14144 flags |= FLAG_DIAGNOSE;
14152 14145 }
14153 14146
14154 14147 if (uscmd->uscsi_flags & USCSI_ISOLATE) {
14155 14148 flags |= FLAG_ISOLATE;
14156 14149 }
14157 14150
14158 14151 if (un->un_f_is_fibre == FALSE) {
14159 14152 if (uscmd->uscsi_flags & USCSI_RENEGOT) {
14160 14153 flags |= FLAG_RENEGOTIATE_WIDE_SYNC;
14161 14154 }
14162 14155 }
14163 14156
14164 14157 /*
14165 14158 * Set the pkt flags here so we save time later.
14166 14159 * Note: These flags are NOT in the uscsi man page!!!
14167 14160 */
14168 14161 if (uscmd->uscsi_flags & USCSI_HEAD) {
14169 14162 flags |= FLAG_HEAD;
14170 14163 }
14171 14164
14172 14165 if (uscmd->uscsi_flags & USCSI_NOINTR) {
14173 14166 flags |= FLAG_NOINTR;
14174 14167 }
14175 14168
14176 14169 /*
14177 14170 * For tagged queueing, things get a bit complicated.
14178 14171 * Check first for head of queue and last for ordered queue.
14179 14172 * If neither head nor order, use the default driver tag flags.
14180 14173 */
14181 14174 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) {
14182 14175 if (uscmd->uscsi_flags & USCSI_HTAG) {
14183 14176 flags |= FLAG_HTAG;
14184 14177 } else if (uscmd->uscsi_flags & USCSI_OTAG) {
14185 14178 flags |= FLAG_OTAG;
14186 14179 } else {
14187 14180 flags |= un->un_tagflags & FLAG_TAGMASK;
14188 14181 }
14189 14182 }
14190 14183
14191 14184 if (uscmd->uscsi_flags & USCSI_NODISCON) {
14192 14185 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON;
14193 14186 }
14194 14187
14195 14188 pktp->pkt_flags = flags;
14196 14189
14197 14190 /* Transfer uscsi information to scsi_pkt */
14198 14191 (void) scsi_uscsi_pktinit(uscmd, pktp);
14199 14192
14200 14193 /* Copy the caller's CDB into the pkt... */
14201 14194 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen);
14202 14195
14203 14196 if (uscmd->uscsi_timeout == 0) {
14204 14197 pktp->pkt_time = un->un_uscsi_timeout;
14205 14198 } else {
14206 14199 pktp->pkt_time = uscmd->uscsi_timeout;
14207 14200 }
14208 14201
14209 14202 /* need it later to identify USCSI request in sdintr */
14210 14203 xp->xb_pkt_flags |= SD_XB_USCSICMD;
14211 14204
14212 14205 xp->xb_sense_resid = uscmd->uscsi_rqresid;
14213 14206
14214 14207 pktp->pkt_private = bp;
14215 14208 pktp->pkt_comp = sdintr;
14216 14209 *pktpp = pktp;
14217 14210
14218 14211 SD_TRACE(SD_LOG_IO_CORE, un,
14219 14212 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp);
14220 14213
14221 14214 return (SD_PKT_ALLOC_SUCCESS);
14222 14215 }
14223 14216
14224 14217
14225 14218 /*
14226 14219 * Function: sd_destroypkt_for_uscsi
14227 14220 *
14228 14221 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi
14229 14222 * IOs.. Also saves relevant info into the associated uscsi_cmd
14230 14223 * struct.
14231 14224 *
14232 14225 * Context: May be called under interrupt context
14233 14226 */
14234 14227
14235 14228 static void
14236 14229 sd_destroypkt_for_uscsi(struct buf *bp)
14237 14230 {
14238 14231 struct uscsi_cmd *uscmd;
14239 14232 struct sd_xbuf *xp;
14240 14233 struct scsi_pkt *pktp;
14241 14234 struct sd_lun *un;
14242 14235 struct sd_uscsi_info *suip;
14243 14236
14244 14237 ASSERT(bp != NULL);
14245 14238 xp = SD_GET_XBUF(bp);
14246 14239 ASSERT(xp != NULL);
14247 14240 un = SD_GET_UN(bp);
14248 14241 ASSERT(un != NULL);
14249 14242 ASSERT(!mutex_owned(SD_MUTEX(un)));
14250 14243 pktp = SD_GET_PKTP(bp);
14251 14244 ASSERT(pktp != NULL);
14252 14245
14253 14246 SD_TRACE(SD_LOG_IO_CORE, un,
14254 14247 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp);
14255 14248
14256 14249 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */
14257 14250 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo;
14258 14251 ASSERT(uscmd != NULL);
14259 14252
14260 14253 /* Save the status and the residual into the uscsi_cmd struct */
14261 14254 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK);
14262 14255 uscmd->uscsi_resid = bp->b_resid;
14263 14256
14264 14257 /* Transfer scsi_pkt information to uscsi */
14265 14258 (void) scsi_uscsi_pktfini(pktp, uscmd);
14266 14259
14267 14260 /*
14268 14261 * If enabled, copy any saved sense data into the area specified
14269 14262 * by the uscsi command.
14270 14263 */
14271 14264 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) &&
14272 14265 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) {
14273 14266 /*
14274 14267 * Note: uscmd->uscsi_rqbuf should always point to a buffer
14275 14268 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd())
14276 14269 */
14277 14270 uscmd->uscsi_rqstatus = xp->xb_sense_status;
14278 14271 uscmd->uscsi_rqresid = xp->xb_sense_resid;
14279 14272 if (uscmd->uscsi_rqlen > SENSE_LENGTH) {
14280 14273 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf,
14281 14274 MAX_SENSE_LENGTH);
14282 14275 } else {
14283 14276 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf,
14284 14277 SENSE_LENGTH);
14285 14278 }
14286 14279 }
14287 14280 /*
14288 14281 * The following assignments are for SCSI FMA.
14289 14282 */
14290 14283 ASSERT(xp->xb_private != NULL);
14291 14284 suip = (struct sd_uscsi_info *)xp->xb_private;
14292 14285 suip->ui_pkt_reason = pktp->pkt_reason;
14293 14286 suip->ui_pkt_state = pktp->pkt_state;
14294 14287 suip->ui_pkt_statistics = pktp->pkt_statistics;
14295 14288 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp);
14296 14289
14297 14290 /* We are done with the scsi_pkt; free it now */
14298 14291 ASSERT(SD_GET_PKTP(bp) != NULL);
14299 14292 scsi_destroy_pkt(SD_GET_PKTP(bp));
14300 14293
14301 14294 SD_TRACE(SD_LOG_IO_CORE, un,
14302 14295 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp);
14303 14296 }
14304 14297
14305 14298
14306 14299 /*
14307 14300 * Function: sd_bioclone_alloc
14308 14301 *
14309 14302 * Description: Allocate a buf(9S) and init it as per the given buf
14310 14303 * and the various arguments. The associated sd_xbuf
14311 14304 * struct is (nearly) duplicated. The struct buf *bp
14312 14305 * argument is saved in new_xp->xb_private.
14313 14306 *
14314 14307 * Arguments: bp - ptr the the buf(9S) to be "shadowed"
14315 14308 * datalen - size of data area for the shadow bp
14316 14309 * blkno - starting LBA
14317 14310 * func - function pointer for b_iodone in the shadow buf. (May
14318 14311 * be NULL if none.)
14319 14312 *
14320 14313 * Return Code: Pointer to allocates buf(9S) struct
14321 14314 *
14322 14315 * Context: Can sleep.
14323 14316 */
14324 14317
14325 14318 static struct buf *
14326 14319 sd_bioclone_alloc(struct buf *bp, size_t datalen, daddr_t blkno,
14327 14320 int (*func)(struct buf *))
14328 14321 {
14329 14322 struct sd_lun *un;
14330 14323 struct sd_xbuf *xp;
14331 14324 struct sd_xbuf *new_xp;
14332 14325 struct buf *new_bp;
14333 14326
14334 14327 ASSERT(bp != NULL);
14335 14328 xp = SD_GET_XBUF(bp);
14336 14329 ASSERT(xp != NULL);
14337 14330 un = SD_GET_UN(bp);
14338 14331 ASSERT(un != NULL);
14339 14332 ASSERT(!mutex_owned(SD_MUTEX(un)));
14340 14333
14341 14334 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func,
14342 14335 NULL, KM_SLEEP);
14343 14336
14344 14337 new_bp->b_lblkno = blkno;
14345 14338
14346 14339 /*
14347 14340 * Allocate an xbuf for the shadow bp and copy the contents of the
14348 14341 * original xbuf into it.
14349 14342 */
14350 14343 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP);
14351 14344 bcopy(xp, new_xp, sizeof (struct sd_xbuf));
14352 14345
14353 14346 /*
14354 14347 * The given bp is automatically saved in the xb_private member
14355 14348 * of the new xbuf. Callers are allowed to depend on this.
14356 14349 */
14357 14350 new_xp->xb_private = bp;
14358 14351
14359 14352 new_bp->b_private = new_xp;
14360 14353
14361 14354 return (new_bp);
14362 14355 }
14363 14356
14364 14357 /*
14365 14358 * Function: sd_shadow_buf_alloc
14366 14359 *
14367 14360 * Description: Allocate a buf(9S) and init it as per the given buf
14368 14361 * and the various arguments. The associated sd_xbuf
14369 14362 * struct is (nearly) duplicated. The struct buf *bp
14370 14363 * argument is saved in new_xp->xb_private.
14371 14364 *
14372 14365 * Arguments: bp - ptr the the buf(9S) to be "shadowed"
14373 14366 * datalen - size of data area for the shadow bp
14374 14367 * bflags - B_READ or B_WRITE (pseudo flag)
14375 14368 * blkno - starting LBA
14376 14369 * func - function pointer for b_iodone in the shadow buf. (May
14377 14370 * be NULL if none.)
14378 14371 *
14379 14372 * Return Code: Pointer to allocates buf(9S) struct
14380 14373 *
14381 14374 * Context: Can sleep.
14382 14375 */
14383 14376
14384 14377 static struct buf *
14385 14378 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags,
14386 14379 daddr_t blkno, int (*func)(struct buf *))
14387 14380 {
14388 14381 struct sd_lun *un;
14389 14382 struct sd_xbuf *xp;
14390 14383 struct sd_xbuf *new_xp;
14391 14384 struct buf *new_bp;
14392 14385
14393 14386 ASSERT(bp != NULL);
14394 14387 xp = SD_GET_XBUF(bp);
14395 14388 ASSERT(xp != NULL);
14396 14389 un = SD_GET_UN(bp);
14397 14390 ASSERT(un != NULL);
14398 14391 ASSERT(!mutex_owned(SD_MUTEX(un)));
14399 14392
14400 14393 if (bp->b_flags & (B_PAGEIO | B_PHYS)) {
14401 14394 bp_mapin(bp);
14402 14395 }
14403 14396
14404 14397 bflags &= (B_READ | B_WRITE);
14405 14398 #if defined(__i386) || defined(__amd64)
14406 14399 new_bp = getrbuf(KM_SLEEP);
14407 14400 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP);
14408 14401 new_bp->b_bcount = datalen;
14409 14402 new_bp->b_flags = bflags |
14410 14403 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW));
14411 14404 #else
14412 14405 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL,
14413 14406 datalen, bflags, SLEEP_FUNC, NULL);
14414 14407 #endif
14415 14408 new_bp->av_forw = NULL;
14416 14409 new_bp->av_back = NULL;
14417 14410 new_bp->b_dev = bp->b_dev;
14418 14411 new_bp->b_blkno = blkno;
14419 14412 new_bp->b_iodone = func;
14420 14413 new_bp->b_edev = bp->b_edev;
14421 14414 new_bp->b_resid = 0;
14422 14415
14423 14416 /* We need to preserve the B_FAILFAST flag */
14424 14417 if (bp->b_flags & B_FAILFAST) {
14425 14418 new_bp->b_flags |= B_FAILFAST;
14426 14419 }
14427 14420
14428 14421 /*
14429 14422 * Allocate an xbuf for the shadow bp and copy the contents of the
14430 14423 * original xbuf into it.
14431 14424 */
14432 14425 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP);
14433 14426 bcopy(xp, new_xp, sizeof (struct sd_xbuf));
14434 14427
14435 14428 /* Need later to copy data between the shadow buf & original buf! */
14436 14429 new_xp->xb_pkt_flags |= PKT_CONSISTENT;
14437 14430
14438 14431 /*
14439 14432 * The given bp is automatically saved in the xb_private member
14440 14433 * of the new xbuf. Callers are allowed to depend on this.
14441 14434 */
14442 14435 new_xp->xb_private = bp;
14443 14436
14444 14437 new_bp->b_private = new_xp;
14445 14438
14446 14439 return (new_bp);
14447 14440 }
14448 14441
14449 14442 /*
14450 14443 * Function: sd_bioclone_free
14451 14444 *
14452 14445 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations
14453 14446 * in the larger than partition operation.
14454 14447 *
14455 14448 * Context: May be called under interrupt context
14456 14449 */
14457 14450
14458 14451 static void
14459 14452 sd_bioclone_free(struct buf *bp)
14460 14453 {
14461 14454 struct sd_xbuf *xp;
14462 14455
14463 14456 ASSERT(bp != NULL);
14464 14457 xp = SD_GET_XBUF(bp);
14465 14458 ASSERT(xp != NULL);
14466 14459
14467 14460 /*
14468 14461 * Call bp_mapout() before freeing the buf, in case a lower
14469 14462 * layer or HBA had done a bp_mapin(). we must do this here
14470 14463 * as we are the "originator" of the shadow buf.
14471 14464 */
14472 14465 bp_mapout(bp);
14473 14466
14474 14467 /*
14475 14468 * Null out b_iodone before freeing the bp, to ensure that the driver
14476 14469 * never gets confused by a stale value in this field. (Just a little
14477 14470 * extra defensiveness here.)
14478 14471 */
14479 14472 bp->b_iodone = NULL;
14480 14473
14481 14474 freerbuf(bp);
14482 14475
14483 14476 kmem_free(xp, sizeof (struct sd_xbuf));
14484 14477 }
14485 14478
14486 14479 /*
14487 14480 * Function: sd_shadow_buf_free
14488 14481 *
14489 14482 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations.
14490 14483 *
14491 14484 * Context: May be called under interrupt context
14492 14485 */
14493 14486
14494 14487 static void
14495 14488 sd_shadow_buf_free(struct buf *bp)
14496 14489 {
14497 14490 struct sd_xbuf *xp;
14498 14491
14499 14492 ASSERT(bp != NULL);
14500 14493 xp = SD_GET_XBUF(bp);
14501 14494 ASSERT(xp != NULL);
14502 14495
14503 14496 #if defined(__sparc)
14504 14497 /*
14505 14498 * Call bp_mapout() before freeing the buf, in case a lower
14506 14499 * layer or HBA had done a bp_mapin(). we must do this here
14507 14500 * as we are the "originator" of the shadow buf.
14508 14501 */
14509 14502 bp_mapout(bp);
14510 14503 #endif
14511 14504
14512 14505 /*
14513 14506 * Null out b_iodone before freeing the bp, to ensure that the driver
14514 14507 * never gets confused by a stale value in this field. (Just a little
14515 14508 * extra defensiveness here.)
14516 14509 */
14517 14510 bp->b_iodone = NULL;
14518 14511
14519 14512 #if defined(__i386) || defined(__amd64)
14520 14513 kmem_free(bp->b_un.b_addr, bp->b_bcount);
14521 14514 freerbuf(bp);
14522 14515 #else
14523 14516 scsi_free_consistent_buf(bp);
14524 14517 #endif
14525 14518
14526 14519 kmem_free(xp, sizeof (struct sd_xbuf));
14527 14520 }
14528 14521
14529 14522
14530 14523 /*
14531 14524 * Function: sd_print_transport_rejected_message
14532 14525 *
14533 14526 * Description: This implements the ludicrously complex rules for printing
14534 14527 * a "transport rejected" message. This is to address the
14535 14528 * specific problem of having a flood of this error message
14536 14529 * produced when a failover occurs.
14537 14530 *
14538 14531 * Context: Any.
14539 14532 */
14540 14533
14541 14534 static void
14542 14535 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp,
14543 14536 int code)
14544 14537 {
14545 14538 ASSERT(un != NULL);
14546 14539 ASSERT(mutex_owned(SD_MUTEX(un)));
14547 14540 ASSERT(xp != NULL);
14548 14541
14549 14542 /*
14550 14543 * Print the "transport rejected" message under the following
14551 14544 * conditions:
14552 14545 *
14553 14546 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set
14554 14547 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR.
14555 14548 * - If the error code IS a TRAN_FATAL_ERROR, then the message is
14556 14549 * printed the FIRST time a TRAN_FATAL_ERROR is returned from
14557 14550 * scsi_transport(9F) (which indicates that the target might have
14558 14551 * gone off-line). This uses the un->un_tran_fatal_count
14559 14552 * count, which is incremented whenever a TRAN_FATAL_ERROR is
14560 14553 * received, and reset to zero whenver a TRAN_ACCEPT is returned
14561 14554 * from scsi_transport().
14562 14555 *
14563 14556 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of
14564 14557 * the preceeding cases in order for the message to be printed.
14565 14558 */
14566 14559 if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) &&
14567 14560 (SD_FM_LOG(un) == SD_FM_LOG_NSUP)) {
14568 14561 if ((sd_level_mask & SD_LOGMASK_DIAG) ||
14569 14562 (code != TRAN_FATAL_ERROR) ||
14570 14563 (un->un_tran_fatal_count == 1)) {
14571 14564 switch (code) {
14572 14565 case TRAN_BADPKT:
14573 14566 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
14574 14567 "transport rejected bad packet\n");
14575 14568 break;
14576 14569 case TRAN_FATAL_ERROR:
14577 14570 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
14578 14571 "transport rejected fatal error\n");
14579 14572 break;
14580 14573 default:
14581 14574 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
14582 14575 "transport rejected (%d)\n", code);
14583 14576 break;
14584 14577 }
14585 14578 }
14586 14579 }
14587 14580 }
14588 14581
14589 14582
14590 14583 /*
14591 14584 * Function: sd_add_buf_to_waitq
14592 14585 *
14593 14586 * Description: Add the given buf(9S) struct to the wait queue for the
14594 14587 * instance. If sorting is enabled, then the buf is added
14595 14588 * to the queue via an elevator sort algorithm (a la
14596 14589 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key.
14597 14590 * If sorting is not enabled, then the buf is just added
14598 14591 * to the end of the wait queue.
14599 14592 *
14600 14593 * Return Code: void
14601 14594 *
14602 14595 * Context: Does not sleep/block, therefore technically can be called
14603 14596 * from any context. However if sorting is enabled then the
14604 14597 * execution time is indeterminate, and may take long if
14605 14598 * the wait queue grows large.
14606 14599 */
14607 14600
14608 14601 static void
14609 14602 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp)
14610 14603 {
14611 14604 struct buf *ap;
14612 14605
14613 14606 ASSERT(bp != NULL);
14614 14607 ASSERT(un != NULL);
14615 14608 ASSERT(mutex_owned(SD_MUTEX(un)));
14616 14609
14617 14610 /* If the queue is empty, add the buf as the only entry & return. */
14618 14611 if (un->un_waitq_headp == NULL) {
14619 14612 ASSERT(un->un_waitq_tailp == NULL);
14620 14613 un->un_waitq_headp = un->un_waitq_tailp = bp;
14621 14614 bp->av_forw = NULL;
14622 14615 return;
14623 14616 }
14624 14617
14625 14618 ASSERT(un->un_waitq_tailp != NULL);
14626 14619
14627 14620 /*
14628 14621 * If sorting is disabled, just add the buf to the tail end of
14629 14622 * the wait queue and return.
14630 14623 */
14631 14624 if (un->un_f_disksort_disabled || un->un_f_enable_rmw) {
14632 14625 un->un_waitq_tailp->av_forw = bp;
14633 14626 un->un_waitq_tailp = bp;
14634 14627 bp->av_forw = NULL;
14635 14628 return;
14636 14629 }
14637 14630
14638 14631 /*
14639 14632 * Sort thru the list of requests currently on the wait queue
14640 14633 * and add the new buf request at the appropriate position.
14641 14634 *
14642 14635 * The un->un_waitq_headp is an activity chain pointer on which
14643 14636 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The
14644 14637 * first queue holds those requests which are positioned after
14645 14638 * the current SD_GET_BLKNO() (in the first request); the second holds
14646 14639 * requests which came in after their SD_GET_BLKNO() number was passed.
14647 14640 * Thus we implement a one way scan, retracting after reaching
14648 14641 * the end of the drive to the first request on the second
14649 14642 * queue, at which time it becomes the first queue.
14650 14643 * A one-way scan is natural because of the way UNIX read-ahead
14651 14644 * blocks are allocated.
14652 14645 *
14653 14646 * If we lie after the first request, then we must locate the
14654 14647 * second request list and add ourselves to it.
14655 14648 */
14656 14649 ap = un->un_waitq_headp;
14657 14650 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) {
14658 14651 while (ap->av_forw != NULL) {
14659 14652 /*
14660 14653 * Look for an "inversion" in the (normally
14661 14654 * ascending) block numbers. This indicates
14662 14655 * the start of the second request list.
14663 14656 */
14664 14657 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) {
14665 14658 /*
14666 14659 * Search the second request list for the
14667 14660 * first request at a larger block number.
14668 14661 * We go before that; however if there is
14669 14662 * no such request, we go at the end.
14670 14663 */
14671 14664 do {
14672 14665 if (SD_GET_BLKNO(bp) <
14673 14666 SD_GET_BLKNO(ap->av_forw)) {
14674 14667 goto insert;
14675 14668 }
14676 14669 ap = ap->av_forw;
14677 14670 } while (ap->av_forw != NULL);
14678 14671 goto insert; /* after last */
14679 14672 }
14680 14673 ap = ap->av_forw;
14681 14674 }
14682 14675
14683 14676 /*
14684 14677 * No inversions... we will go after the last, and
14685 14678 * be the first request in the second request list.
14686 14679 */
14687 14680 goto insert;
14688 14681 }
14689 14682
14690 14683 /*
14691 14684 * Request is at/after the current request...
14692 14685 * sort in the first request list.
14693 14686 */
14694 14687 while (ap->av_forw != NULL) {
14695 14688 /*
14696 14689 * We want to go after the current request (1) if
14697 14690 * there is an inversion after it (i.e. it is the end
14698 14691 * of the first request list), or (2) if the next
14699 14692 * request is a larger block no. than our request.
14700 14693 */
14701 14694 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) ||
14702 14695 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) {
14703 14696 goto insert;
14704 14697 }
14705 14698 ap = ap->av_forw;
14706 14699 }
14707 14700
14708 14701 /*
14709 14702 * Neither a second list nor a larger request, therefore
14710 14703 * we go at the end of the first list (which is the same
14711 14704 * as the end of the whole schebang).
14712 14705 */
14713 14706 insert:
14714 14707 bp->av_forw = ap->av_forw;
14715 14708 ap->av_forw = bp;
14716 14709
14717 14710 /*
14718 14711 * If we inserted onto the tail end of the waitq, make sure the
14719 14712 * tail pointer is updated.
14720 14713 */
14721 14714 if (ap == un->un_waitq_tailp) {
14722 14715 un->un_waitq_tailp = bp;
14723 14716 }
14724 14717 }
14725 14718
14726 14719
14727 14720 /*
14728 14721 * Function: sd_start_cmds
14729 14722 *
14730 14723 * Description: Remove and transport cmds from the driver queues.
14731 14724 *
14732 14725 * Arguments: un - pointer to the unit (soft state) struct for the target.
14733 14726 *
14734 14727 * immed_bp - ptr to a buf to be transported immediately. Only
14735 14728 * the immed_bp is transported; bufs on the waitq are not
14736 14729 * processed and the un_retry_bp is not checked. If immed_bp is
14737 14730 * NULL, then normal queue processing is performed.
14738 14731 *
14739 14732 * Context: May be called from kernel thread context, interrupt context,
14740 14733 * or runout callback context. This function may not block or
14741 14734 * call routines that block.
14742 14735 */
14743 14736
14744 14737 static void
14745 14738 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp)
14746 14739 {
14747 14740 struct sd_xbuf *xp;
14748 14741 struct buf *bp;
14749 14742 void (*statp)(kstat_io_t *);
14750 14743 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14751 14744 void (*saved_statp)(kstat_io_t *);
14752 14745 #endif
14753 14746 int rval;
14754 14747 struct sd_fm_internal *sfip = NULL;
14755 14748
14756 14749 ASSERT(un != NULL);
14757 14750 ASSERT(mutex_owned(SD_MUTEX(un)));
14758 14751 ASSERT(un->un_ncmds_in_transport >= 0);
14759 14752 ASSERT(un->un_throttle >= 0);
14760 14753
14761 14754 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n");
14762 14755
14763 14756 do {
14764 14757 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14765 14758 saved_statp = NULL;
14766 14759 #endif
14767 14760
14768 14761 /*
14769 14762 * If we are syncing or dumping, fail the command to
14770 14763 * avoid recursively calling back into scsi_transport().
14771 14764 * The dump I/O itself uses a separate code path so this
14772 14765 * only prevents non-dump I/O from being sent while dumping.
14773 14766 * File system sync takes place before dumping begins.
14774 14767 * During panic, filesystem I/O is allowed provided
14775 14768 * un_in_callback is <= 1. This is to prevent recursion
14776 14769 * such as sd_start_cmds -> scsi_transport -> sdintr ->
14777 14770 * sd_start_cmds and so on. See panic.c for more information
14778 14771 * about the states the system can be in during panic.
14779 14772 */
14780 14773 if ((un->un_state == SD_STATE_DUMPING) ||
14781 14774 (ddi_in_panic() && (un->un_in_callback > 1))) {
14782 14775 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14783 14776 "sd_start_cmds: panicking\n");
14784 14777 goto exit;
14785 14778 }
14786 14779
14787 14780 if ((bp = immed_bp) != NULL) {
14788 14781 /*
14789 14782 * We have a bp that must be transported immediately.
14790 14783 * It's OK to transport the immed_bp here without doing
14791 14784 * the throttle limit check because the immed_bp is
14792 14785 * always used in a retry/recovery case. This means
14793 14786 * that we know we are not at the throttle limit by
14794 14787 * virtue of the fact that to get here we must have
14795 14788 * already gotten a command back via sdintr(). This also
14796 14789 * relies on (1) the command on un_retry_bp preventing
14797 14790 * further commands from the waitq from being issued;
14798 14791 * and (2) the code in sd_retry_command checking the
14799 14792 * throttle limit before issuing a delayed or immediate
14800 14793 * retry. This holds even if the throttle limit is
14801 14794 * currently ratcheted down from its maximum value.
14802 14795 */
14803 14796 statp = kstat_runq_enter;
14804 14797 if (bp == un->un_retry_bp) {
14805 14798 ASSERT((un->un_retry_statp == NULL) ||
14806 14799 (un->un_retry_statp == kstat_waitq_enter) ||
14807 14800 (un->un_retry_statp ==
14808 14801 kstat_runq_back_to_waitq));
14809 14802 /*
14810 14803 * If the waitq kstat was incremented when
14811 14804 * sd_set_retry_bp() queued this bp for a retry,
14812 14805 * then we must set up statp so that the waitq
14813 14806 * count will get decremented correctly below.
14814 14807 * Also we must clear un->un_retry_statp to
14815 14808 * ensure that we do not act on a stale value
14816 14809 * in this field.
14817 14810 */
14818 14811 if ((un->un_retry_statp == kstat_waitq_enter) ||
14819 14812 (un->un_retry_statp ==
14820 14813 kstat_runq_back_to_waitq)) {
14821 14814 statp = kstat_waitq_to_runq;
14822 14815 }
14823 14816 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14824 14817 saved_statp = un->un_retry_statp;
14825 14818 #endif
14826 14819 un->un_retry_statp = NULL;
14827 14820
14828 14821 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
14829 14822 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p "
14830 14823 "un_throttle:%d un_ncmds_in_transport:%d\n",
14831 14824 un, un->un_retry_bp, un->un_throttle,
14832 14825 un->un_ncmds_in_transport);
14833 14826 } else {
14834 14827 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: "
14835 14828 "processing priority bp:0x%p\n", bp);
14836 14829 }
14837 14830
14838 14831 } else if ((bp = un->un_waitq_headp) != NULL) {
14839 14832 /*
14840 14833 * A command on the waitq is ready to go, but do not
14841 14834 * send it if:
14842 14835 *
14843 14836 * (1) the throttle limit has been reached, or
14844 14837 * (2) a retry is pending, or
14845 14838 * (3) a START_STOP_UNIT callback pending, or
14846 14839 * (4) a callback for a SD_PATH_DIRECT_PRIORITY
14847 14840 * command is pending.
14848 14841 *
14849 14842 * For all of these conditions, IO processing will
14850 14843 * restart after the condition is cleared.
14851 14844 */
14852 14845 if (un->un_ncmds_in_transport >= un->un_throttle) {
14853 14846 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14854 14847 "sd_start_cmds: exiting, "
14855 14848 "throttle limit reached!\n");
14856 14849 goto exit;
14857 14850 }
14858 14851 if (un->un_retry_bp != NULL) {
14859 14852 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14860 14853 "sd_start_cmds: exiting, retry pending!\n");
14861 14854 goto exit;
14862 14855 }
14863 14856 if (un->un_startstop_timeid != NULL) {
14864 14857 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14865 14858 "sd_start_cmds: exiting, "
14866 14859 "START_STOP pending!\n");
14867 14860 goto exit;
14868 14861 }
14869 14862 if (un->un_direct_priority_timeid != NULL) {
14870 14863 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14871 14864 "sd_start_cmds: exiting, "
14872 14865 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n");
14873 14866 goto exit;
14874 14867 }
14875 14868
14876 14869 /* Dequeue the command */
14877 14870 un->un_waitq_headp = bp->av_forw;
14878 14871 if (un->un_waitq_headp == NULL) {
14879 14872 un->un_waitq_tailp = NULL;
14880 14873 }
14881 14874 bp->av_forw = NULL;
14882 14875 statp = kstat_waitq_to_runq;
14883 14876 SD_TRACE(SD_LOG_IO_CORE, un,
14884 14877 "sd_start_cmds: processing waitq bp:0x%p\n", bp);
14885 14878
14886 14879 } else {
14887 14880 /* No work to do so bail out now */
14888 14881 SD_TRACE(SD_LOG_IO_CORE, un,
14889 14882 "sd_start_cmds: no more work, exiting!\n");
14890 14883 goto exit;
14891 14884 }
14892 14885
14893 14886 /*
14894 14887 * Reset the state to normal. This is the mechanism by which
14895 14888 * the state transitions from either SD_STATE_RWAIT or
14896 14889 * SD_STATE_OFFLINE to SD_STATE_NORMAL.
14897 14890 * If state is SD_STATE_PM_CHANGING then this command is
14898 14891 * part of the device power control and the state must
14899 14892 * not be put back to normal. Doing so would would
14900 14893 * allow new commands to proceed when they shouldn't,
14901 14894 * the device may be going off.
14902 14895 */
14903 14896 if ((un->un_state != SD_STATE_SUSPENDED) &&
14904 14897 (un->un_state != SD_STATE_PM_CHANGING)) {
14905 14898 New_state(un, SD_STATE_NORMAL);
14906 14899 }
14907 14900
14908 14901 xp = SD_GET_XBUF(bp);
14909 14902 ASSERT(xp != NULL);
14910 14903
14911 14904 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14912 14905 /*
14913 14906 * Allocate the scsi_pkt if we need one, or attach DMA
14914 14907 * resources if we have a scsi_pkt that needs them. The
14915 14908 * latter should only occur for commands that are being
14916 14909 * retried.
14917 14910 */
14918 14911 if ((xp->xb_pktp == NULL) ||
14919 14912 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) {
14920 14913 #else
14921 14914 if (xp->xb_pktp == NULL) {
14922 14915 #endif
14923 14916 /*
14924 14917 * There is no scsi_pkt allocated for this buf. Call
14925 14918 * the initpkt function to allocate & init one.
14926 14919 *
14927 14920 * The scsi_init_pkt runout callback functionality is
14928 14921 * implemented as follows:
14929 14922 *
14930 14923 * 1) The initpkt function always calls
14931 14924 * scsi_init_pkt(9F) with sdrunout specified as the
14932 14925 * callback routine.
14933 14926 * 2) A successful packet allocation is initialized and
14934 14927 * the I/O is transported.
14935 14928 * 3) The I/O associated with an allocation resource
14936 14929 * failure is left on its queue to be retried via
14937 14930 * runout or the next I/O.
14938 14931 * 4) The I/O associated with a DMA error is removed
14939 14932 * from the queue and failed with EIO. Processing of
14940 14933 * the transport queues is also halted to be
14941 14934 * restarted via runout or the next I/O.
14942 14935 * 5) The I/O associated with a CDB size or packet
14943 14936 * size error is removed from the queue and failed
14944 14937 * with EIO. Processing of the transport queues is
14945 14938 * continued.
14946 14939 *
14947 14940 * Note: there is no interface for canceling a runout
14948 14941 * callback. To prevent the driver from detaching or
14949 14942 * suspending while a runout is pending the driver
14950 14943 * state is set to SD_STATE_RWAIT
14951 14944 *
14952 14945 * Note: using the scsi_init_pkt callback facility can
14953 14946 * result in an I/O request persisting at the head of
14954 14947 * the list which cannot be satisfied even after
14955 14948 * multiple retries. In the future the driver may
14956 14949 * implement some kind of maximum runout count before
14957 14950 * failing an I/O.
14958 14951 *
14959 14952 * Note: the use of funcp below may seem superfluous,
14960 14953 * but it helps warlock figure out the correct
14961 14954 * initpkt function calls (see [s]sd.wlcmd).
14962 14955 */
14963 14956 struct scsi_pkt *pktp;
14964 14957 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp);
14965 14958
14966 14959 ASSERT(bp != un->un_rqs_bp);
14967 14960
14968 14961 funcp = sd_initpkt_map[xp->xb_chain_iostart];
14969 14962 switch ((*funcp)(bp, &pktp)) {
14970 14963 case SD_PKT_ALLOC_SUCCESS:
14971 14964 xp->xb_pktp = pktp;
14972 14965 SD_TRACE(SD_LOG_IO_CORE, un,
14973 14966 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n",
14974 14967 pktp);
14975 14968 goto got_pkt;
14976 14969
14977 14970 case SD_PKT_ALLOC_FAILURE:
14978 14971 /*
14979 14972 * Temporary (hopefully) resource depletion.
14980 14973 * Since retries and RQS commands always have a
14981 14974 * scsi_pkt allocated, these cases should never
14982 14975 * get here. So the only cases this needs to
14983 14976 * handle is a bp from the waitq (which we put
14984 14977 * back onto the waitq for sdrunout), or a bp
14985 14978 * sent as an immed_bp (which we just fail).
14986 14979 */
14987 14980 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14988 14981 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n");
14989 14982
14990 14983 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14991 14984
14992 14985 if (bp == immed_bp) {
14993 14986 /*
14994 14987 * If SD_XB_DMA_FREED is clear, then
14995 14988 * this is a failure to allocate a
14996 14989 * scsi_pkt, and we must fail the
14997 14990 * command.
14998 14991 */
14999 14992 if ((xp->xb_pkt_flags &
15000 14993 SD_XB_DMA_FREED) == 0) {
15001 14994 break;
15002 14995 }
15003 14996
15004 14997 /*
15005 14998 * If this immediate command is NOT our
15006 14999 * un_retry_bp, then we must fail it.
15007 15000 */
15008 15001 if (bp != un->un_retry_bp) {
15009 15002 break;
15010 15003 }
15011 15004
15012 15005 /*
15013 15006 * We get here if this cmd is our
15014 15007 * un_retry_bp that was DMAFREED, but
15015 15008 * scsi_init_pkt() failed to reallocate
15016 15009 * DMA resources when we attempted to
15017 15010 * retry it. This can happen when an
15018 15011 * mpxio failover is in progress, but
15019 15012 * we don't want to just fail the
15020 15013 * command in this case.
15021 15014 *
15022 15015 * Use timeout(9F) to restart it after
15023 15016 * a 100ms delay. We don't want to
15024 15017 * let sdrunout() restart it, because
15025 15018 * sdrunout() is just supposed to start
15026 15019 * commands that are sitting on the
15027 15020 * wait queue. The un_retry_bp stays
15028 15021 * set until the command completes, but
15029 15022 * sdrunout can be called many times
15030 15023 * before that happens. Since sdrunout
15031 15024 * cannot tell if the un_retry_bp is
15032 15025 * already in the transport, it could
15033 15026 * end up calling scsi_transport() for
15034 15027 * the un_retry_bp multiple times.
15035 15028 *
15036 15029 * Also: don't schedule the callback
15037 15030 * if some other callback is already
15038 15031 * pending.
15039 15032 */
15040 15033 if (un->un_retry_statp == NULL) {
15041 15034 /*
15042 15035 * restore the kstat pointer to
15043 15036 * keep kstat counts coherent
15044 15037 * when we do retry the command.
15045 15038 */
15046 15039 un->un_retry_statp =
15047 15040 saved_statp;
15048 15041 }
15049 15042
15050 15043 if ((un->un_startstop_timeid == NULL) &&
15051 15044 (un->un_retry_timeid == NULL) &&
15052 15045 (un->un_direct_priority_timeid ==
15053 15046 NULL)) {
15054 15047
15055 15048 un->un_retry_timeid =
15056 15049 timeout(
15057 15050 sd_start_retry_command,
15058 15051 un, SD_RESTART_TIMEOUT);
15059 15052 }
15060 15053 goto exit;
15061 15054 }
15062 15055
15063 15056 #else
15064 15057 if (bp == immed_bp) {
15065 15058 break; /* Just fail the command */
15066 15059 }
15067 15060 #endif
15068 15061
15069 15062 /* Add the buf back to the head of the waitq */
15070 15063 bp->av_forw = un->un_waitq_headp;
15071 15064 un->un_waitq_headp = bp;
15072 15065 if (un->un_waitq_tailp == NULL) {
15073 15066 un->un_waitq_tailp = bp;
15074 15067 }
15075 15068 goto exit;
15076 15069
15077 15070 case SD_PKT_ALLOC_FAILURE_NO_DMA:
15078 15071 /*
15079 15072 * HBA DMA resource failure. Fail the command
15080 15073 * and continue processing of the queues.
15081 15074 */
15082 15075 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15083 15076 "sd_start_cmds: "
15084 15077 "SD_PKT_ALLOC_FAILURE_NO_DMA\n");
15085 15078 break;
15086 15079
15087 15080 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL:
15088 15081 /*
15089 15082 * Note:x86: Partial DMA mapping not supported
15090 15083 * for USCSI commands, and all the needed DMA
15091 15084 * resources were not allocated.
15092 15085 */
15093 15086 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15094 15087 "sd_start_cmds: "
15095 15088 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n");
15096 15089 break;
15097 15090
15098 15091 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL:
15099 15092 /*
15100 15093 * Note:x86: Request cannot fit into CDB based
15101 15094 * on lba and len.
15102 15095 */
15103 15096 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15104 15097 "sd_start_cmds: "
15105 15098 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n");
15106 15099 break;
15107 15100
15108 15101 default:
15109 15102 /* Should NEVER get here! */
15110 15103 panic("scsi_initpkt error");
15111 15104 /*NOTREACHED*/
15112 15105 }
15113 15106
15114 15107 /*
15115 15108 * Fatal error in allocating a scsi_pkt for this buf.
15116 15109 * Update kstats & return the buf with an error code.
15117 15110 * We must use sd_return_failed_command_no_restart() to
15118 15111 * avoid a recursive call back into sd_start_cmds().
15119 15112 * However this also means that we must keep processing
15120 15113 * the waitq here in order to avoid stalling.
15121 15114 */
15122 15115 if (statp == kstat_waitq_to_runq) {
15123 15116 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp);
15124 15117 }
15125 15118 sd_return_failed_command_no_restart(un, bp, EIO);
15126 15119 if (bp == immed_bp) {
15127 15120 /* immed_bp is gone by now, so clear this */
15128 15121 immed_bp = NULL;
15129 15122 }
15130 15123 continue;
15131 15124 }
15132 15125 got_pkt:
15133 15126 if (bp == immed_bp) {
15134 15127 /* goto the head of the class.... */
15135 15128 xp->xb_pktp->pkt_flags |= FLAG_HEAD;
15136 15129 }
15137 15130
15138 15131 un->un_ncmds_in_transport++;
15139 15132 SD_UPDATE_KSTATS(un, statp, bp);
15140 15133
15141 15134 /*
15142 15135 * Call scsi_transport() to send the command to the target.
15143 15136 * According to SCSA architecture, we must drop the mutex here
15144 15137 * before calling scsi_transport() in order to avoid deadlock.
15145 15138 * Note that the scsi_pkt's completion routine can be executed
15146 15139 * (from interrupt context) even before the call to
15147 15140 * scsi_transport() returns.
15148 15141 */
15149 15142 SD_TRACE(SD_LOG_IO_CORE, un,
15150 15143 "sd_start_cmds: calling scsi_transport()\n");
15151 15144 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp);
15152 15145
15153 15146 mutex_exit(SD_MUTEX(un));
15154 15147 rval = scsi_transport(xp->xb_pktp);
15155 15148 mutex_enter(SD_MUTEX(un));
15156 15149
15157 15150 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15158 15151 "sd_start_cmds: scsi_transport() returned %d\n", rval);
15159 15152
15160 15153 switch (rval) {
15161 15154 case TRAN_ACCEPT:
15162 15155 /* Clear this with every pkt accepted by the HBA */
15163 15156 un->un_tran_fatal_count = 0;
15164 15157 break; /* Success; try the next cmd (if any) */
15165 15158
15166 15159 case TRAN_BUSY:
15167 15160 un->un_ncmds_in_transport--;
15168 15161 ASSERT(un->un_ncmds_in_transport >= 0);
15169 15162
15170 15163 /*
15171 15164 * Don't retry request sense, the sense data
15172 15165 * is lost when another request is sent.
15173 15166 * Free up the rqs buf and retry
15174 15167 * the original failed cmd. Update kstat.
15175 15168 */
15176 15169 if (bp == un->un_rqs_bp) {
15177 15170 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
15178 15171 bp = sd_mark_rqs_idle(un, xp);
15179 15172 sd_retry_command(un, bp, SD_RETRIES_STANDARD,
15180 15173 NULL, NULL, EIO, un->un_busy_timeout / 500,
15181 15174 kstat_waitq_enter);
15182 15175 goto exit;
15183 15176 }
15184 15177
15185 15178 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
15186 15179 /*
15187 15180 * Free the DMA resources for the scsi_pkt. This will
15188 15181 * allow mpxio to select another path the next time
15189 15182 * we call scsi_transport() with this scsi_pkt.
15190 15183 * See sdintr() for the rationalization behind this.
15191 15184 */
15192 15185 if ((un->un_f_is_fibre == TRUE) &&
15193 15186 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
15194 15187 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) {
15195 15188 scsi_dmafree(xp->xb_pktp);
15196 15189 xp->xb_pkt_flags |= SD_XB_DMA_FREED;
15197 15190 }
15198 15191 #endif
15199 15192
15200 15193 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) {
15201 15194 /*
15202 15195 * Commands that are SD_PATH_DIRECT_PRIORITY
15203 15196 * are for error recovery situations. These do
15204 15197 * not use the normal command waitq, so if they
15205 15198 * get a TRAN_BUSY we cannot put them back onto
15206 15199 * the waitq for later retry. One possible
15207 15200 * problem is that there could already be some
15208 15201 * other command on un_retry_bp that is waiting
15209 15202 * for this one to complete, so we would be
15210 15203 * deadlocked if we put this command back onto
15211 15204 * the waitq for later retry (since un_retry_bp
15212 15205 * must complete before the driver gets back to
15213 15206 * commands on the waitq).
15214 15207 *
15215 15208 * To avoid deadlock we must schedule a callback
15216 15209 * that will restart this command after a set
15217 15210 * interval. This should keep retrying for as
15218 15211 * long as the underlying transport keeps
15219 15212 * returning TRAN_BUSY (just like for other
15220 15213 * commands). Use the same timeout interval as
15221 15214 * for the ordinary TRAN_BUSY retry.
15222 15215 */
15223 15216 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15224 15217 "sd_start_cmds: scsi_transport() returned "
15225 15218 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n");
15226 15219
15227 15220 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
15228 15221 un->un_direct_priority_timeid =
15229 15222 timeout(sd_start_direct_priority_command,
15230 15223 bp, un->un_busy_timeout / 500);
15231 15224
15232 15225 goto exit;
15233 15226 }
15234 15227
15235 15228 /*
15236 15229 * For TRAN_BUSY, we want to reduce the throttle value,
15237 15230 * unless we are retrying a command.
15238 15231 */
15239 15232 if (bp != un->un_retry_bp) {
15240 15233 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY);
15241 15234 }
15242 15235
15243 15236 /*
15244 15237 * Set up the bp to be tried again 10 ms later.
15245 15238 * Note:x86: Is there a timeout value in the sd_lun
15246 15239 * for this condition?
15247 15240 */
15248 15241 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500,
15249 15242 kstat_runq_back_to_waitq);
15250 15243 goto exit;
15251 15244
15252 15245 case TRAN_FATAL_ERROR:
15253 15246 un->un_tran_fatal_count++;
15254 15247 /* FALLTHRU */
15255 15248
15256 15249 case TRAN_BADPKT:
15257 15250 default:
15258 15251 un->un_ncmds_in_transport--;
15259 15252 ASSERT(un->un_ncmds_in_transport >= 0);
15260 15253
15261 15254 /*
15262 15255 * If this is our REQUEST SENSE command with a
15263 15256 * transport error, we must get back the pointers
15264 15257 * to the original buf, and mark the REQUEST
15265 15258 * SENSE command as "available".
15266 15259 */
15267 15260 if (bp == un->un_rqs_bp) {
15268 15261 bp = sd_mark_rqs_idle(un, xp);
15269 15262 xp = SD_GET_XBUF(bp);
15270 15263 } else {
15271 15264 /*
15272 15265 * Legacy behavior: do not update transport
15273 15266 * error count for request sense commands.
15274 15267 */
15275 15268 SD_UPDATE_ERRSTATS(un, sd_transerrs);
15276 15269 }
15277 15270
15278 15271 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
15279 15272 sd_print_transport_rejected_message(un, xp, rval);
15280 15273
15281 15274 /*
15282 15275 * This command will be terminated by SD driver due
15283 15276 * to a fatal transport error. We should post
15284 15277 * ereport.io.scsi.cmd.disk.tran with driver-assessment
15285 15278 * of "fail" for any command to indicate this
15286 15279 * situation.
15287 15280 */
15288 15281 if (xp->xb_ena > 0) {
15289 15282 ASSERT(un->un_fm_private != NULL);
15290 15283 sfip = un->un_fm_private;
15291 15284 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT;
15292 15285 sd_ssc_extract_info(&sfip->fm_ssc, un,
15293 15286 xp->xb_pktp, bp, xp);
15294 15287 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL);
15295 15288 }
15296 15289
15297 15290 /*
15298 15291 * We must use sd_return_failed_command_no_restart() to
15299 15292 * avoid a recursive call back into sd_start_cmds().
15300 15293 * However this also means that we must keep processing
15301 15294 * the waitq here in order to avoid stalling.
15302 15295 */
15303 15296 sd_return_failed_command_no_restart(un, bp, EIO);
15304 15297
15305 15298 /*
15306 15299 * Notify any threads waiting in sd_ddi_suspend() that
15307 15300 * a command completion has occurred.
15308 15301 */
15309 15302 if (un->un_state == SD_STATE_SUSPENDED) {
15310 15303 cv_broadcast(&un->un_disk_busy_cv);
15311 15304 }
15312 15305
15313 15306 if (bp == immed_bp) {
15314 15307 /* immed_bp is gone by now, so clear this */
15315 15308 immed_bp = NULL;
15316 15309 }
15317 15310 break;
15318 15311 }
15319 15312
15320 15313 } while (immed_bp == NULL);
15321 15314
15322 15315 exit:
15323 15316 ASSERT(mutex_owned(SD_MUTEX(un)));
15324 15317 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n");
15325 15318 }
15326 15319
15327 15320
15328 15321 /*
15329 15322 * Function: sd_return_command
15330 15323 *
15331 15324 * Description: Returns a command to its originator (with or without an
15332 15325 * error). Also starts commands waiting to be transported
15333 15326 * to the target.
15334 15327 *
15335 15328 * Context: May be called from interrupt, kernel, or timeout context
15336 15329 */
15337 15330
15338 15331 static void
15339 15332 sd_return_command(struct sd_lun *un, struct buf *bp)
15340 15333 {
15341 15334 struct sd_xbuf *xp;
15342 15335 struct scsi_pkt *pktp;
15343 15336 struct sd_fm_internal *sfip;
15344 15337
15345 15338 ASSERT(bp != NULL);
15346 15339 ASSERT(un != NULL);
15347 15340 ASSERT(mutex_owned(SD_MUTEX(un)));
15348 15341 ASSERT(bp != un->un_rqs_bp);
15349 15342 xp = SD_GET_XBUF(bp);
15350 15343 ASSERT(xp != NULL);
15351 15344
15352 15345 pktp = SD_GET_PKTP(bp);
15353 15346 sfip = (struct sd_fm_internal *)un->un_fm_private;
15354 15347 ASSERT(sfip != NULL);
15355 15348
15356 15349 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n");
15357 15350
15358 15351 /*
15359 15352 * Note: check for the "sdrestart failed" case.
15360 15353 */
15361 15354 if ((un->un_partial_dma_supported == 1) &&
15362 15355 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) &&
15363 15356 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) &&
15364 15357 (xp->xb_pktp->pkt_resid == 0)) {
15365 15358
15366 15359 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) {
15367 15360 /*
15368 15361 * Successfully set up next portion of cmd
15369 15362 * transfer, try sending it
15370 15363 */
15371 15364 sd_retry_command(un, bp, SD_RETRIES_NOCHECK,
15372 15365 NULL, NULL, 0, (clock_t)0, NULL);
15373 15366 sd_start_cmds(un, NULL);
15374 15367 return; /* Note:x86: need a return here? */
15375 15368 }
15376 15369 }
15377 15370
15378 15371 /*
15379 15372 * If this is the failfast bp, clear it from un_failfast_bp. This
15380 15373 * can happen if upon being re-tried the failfast bp either
15381 15374 * succeeded or encountered another error (possibly even a different
15382 15375 * error than the one that precipitated the failfast state, but in
15383 15376 * that case it would have had to exhaust retries as well). Regardless,
15384 15377 * this should not occur whenever the instance is in the active
15385 15378 * failfast state.
15386 15379 */
15387 15380 if (bp == un->un_failfast_bp) {
15388 15381 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE);
15389 15382 un->un_failfast_bp = NULL;
15390 15383 }
15391 15384
15392 15385 /*
15393 15386 * Clear the failfast state upon successful completion of ANY cmd.
15394 15387 */
15395 15388 if (bp->b_error == 0) {
15396 15389 un->un_failfast_state = SD_FAILFAST_INACTIVE;
15397 15390 /*
15398 15391 * If this is a successful command, but used to be retried,
15399 15392 * we will take it as a recovered command and post an
15400 15393 * ereport with driver-assessment of "recovered".
15401 15394 */
15402 15395 if (xp->xb_ena > 0) {
15403 15396 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp);
15404 15397 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY);
15405 15398 }
15406 15399 } else {
15407 15400 /*
15408 15401 * If this is a failed non-USCSI command we will post an
15409 15402 * ereport with driver-assessment set accordingly("fail" or
15410 15403 * "fatal").
15411 15404 */
15412 15405 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
15413 15406 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp);
15414 15407 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL);
15415 15408 }
15416 15409 }
15417 15410
15418 15411 /*
15419 15412 * This is used if the command was retried one or more times. Show that
15420 15413 * we are done with it, and allow processing of the waitq to resume.
15421 15414 */
15422 15415 if (bp == un->un_retry_bp) {
15423 15416 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15424 15417 "sd_return_command: un:0x%p: "
15425 15418 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp);
15426 15419 un->un_retry_bp = NULL;
15427 15420 un->un_retry_statp = NULL;
15428 15421 }
15429 15422
15430 15423 SD_UPDATE_RDWR_STATS(un, bp);
15431 15424 SD_UPDATE_PARTITION_STATS(un, bp);
15432 15425
15433 15426 switch (un->un_state) {
15434 15427 case SD_STATE_SUSPENDED:
15435 15428 /*
15436 15429 * Notify any threads waiting in sd_ddi_suspend() that
15437 15430 * a command completion has occurred.
15438 15431 */
15439 15432 cv_broadcast(&un->un_disk_busy_cv);
15440 15433 break;
15441 15434 default:
15442 15435 sd_start_cmds(un, NULL);
15443 15436 break;
15444 15437 }
15445 15438
15446 15439 /* Return this command up the iodone chain to its originator. */
15447 15440 mutex_exit(SD_MUTEX(un));
15448 15441
15449 15442 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp);
15450 15443 xp->xb_pktp = NULL;
15451 15444
15452 15445 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp);
15453 15446
15454 15447 ASSERT(!mutex_owned(SD_MUTEX(un)));
15455 15448 mutex_enter(SD_MUTEX(un));
15456 15449
15457 15450 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n");
15458 15451 }
15459 15452
15460 15453
15461 15454 /*
15462 15455 * Function: sd_return_failed_command
15463 15456 *
15464 15457 * Description: Command completion when an error occurred.
15465 15458 *
15466 15459 * Context: May be called from interrupt context
15467 15460 */
15468 15461
15469 15462 static void
15470 15463 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode)
15471 15464 {
15472 15465 ASSERT(bp != NULL);
15473 15466 ASSERT(un != NULL);
15474 15467 ASSERT(mutex_owned(SD_MUTEX(un)));
15475 15468
15476 15469 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15477 15470 "sd_return_failed_command: entry\n");
15478 15471
15479 15472 /*
15480 15473 * b_resid could already be nonzero due to a partial data
15481 15474 * transfer, so do not change it here.
15482 15475 */
15483 15476 SD_BIOERROR(bp, errcode);
15484 15477
15485 15478 sd_return_command(un, bp);
15486 15479 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15487 15480 "sd_return_failed_command: exit\n");
15488 15481 }
15489 15482
15490 15483
15491 15484 /*
15492 15485 * Function: sd_return_failed_command_no_restart
15493 15486 *
15494 15487 * Description: Same as sd_return_failed_command, but ensures that no
15495 15488 * call back into sd_start_cmds will be issued.
15496 15489 *
15497 15490 * Context: May be called from interrupt context
15498 15491 */
15499 15492
15500 15493 static void
15501 15494 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp,
15502 15495 int errcode)
15503 15496 {
15504 15497 struct sd_xbuf *xp;
15505 15498
15506 15499 ASSERT(bp != NULL);
15507 15500 ASSERT(un != NULL);
15508 15501 ASSERT(mutex_owned(SD_MUTEX(un)));
15509 15502 xp = SD_GET_XBUF(bp);
15510 15503 ASSERT(xp != NULL);
15511 15504 ASSERT(errcode != 0);
15512 15505
15513 15506 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15514 15507 "sd_return_failed_command_no_restart: entry\n");
15515 15508
15516 15509 /*
15517 15510 * b_resid could already be nonzero due to a partial data
15518 15511 * transfer, so do not change it here.
15519 15512 */
15520 15513 SD_BIOERROR(bp, errcode);
15521 15514
15522 15515 /*
15523 15516 * If this is the failfast bp, clear it. This can happen if the
15524 15517 * failfast bp encounterd a fatal error when we attempted to
15525 15518 * re-try it (such as a scsi_transport(9F) failure). However
15526 15519 * we should NOT be in an active failfast state if the failfast
15527 15520 * bp is not NULL.
15528 15521 */
15529 15522 if (bp == un->un_failfast_bp) {
15530 15523 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE);
15531 15524 un->un_failfast_bp = NULL;
15532 15525 }
15533 15526
15534 15527 if (bp == un->un_retry_bp) {
15535 15528 /*
15536 15529 * This command was retried one or more times. Show that we are
15537 15530 * done with it, and allow processing of the waitq to resume.
15538 15531 */
15539 15532 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15540 15533 "sd_return_failed_command_no_restart: "
15541 15534 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp);
15542 15535 un->un_retry_bp = NULL;
15543 15536 un->un_retry_statp = NULL;
15544 15537 }
15545 15538
15546 15539 SD_UPDATE_RDWR_STATS(un, bp);
15547 15540 SD_UPDATE_PARTITION_STATS(un, bp);
15548 15541
15549 15542 mutex_exit(SD_MUTEX(un));
15550 15543
15551 15544 if (xp->xb_pktp != NULL) {
15552 15545 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp);
15553 15546 xp->xb_pktp = NULL;
15554 15547 }
15555 15548
15556 15549 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp);
15557 15550
15558 15551 mutex_enter(SD_MUTEX(un));
15559 15552
15560 15553 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15561 15554 "sd_return_failed_command_no_restart: exit\n");
15562 15555 }
15563 15556
15564 15557
15565 15558 /*
15566 15559 * Function: sd_retry_command
15567 15560 *
15568 15561 * Description: queue up a command for retry, or (optionally) fail it
15569 15562 * if retry counts are exhausted.
15570 15563 *
15571 15564 * Arguments: un - Pointer to the sd_lun struct for the target.
15572 15565 *
15573 15566 * bp - Pointer to the buf for the command to be retried.
15574 15567 *
15575 15568 * retry_check_flag - Flag to see which (if any) of the retry
15576 15569 * counts should be decremented/checked. If the indicated
15577 15570 * retry count is exhausted, then the command will not be
15578 15571 * retried; it will be failed instead. This should use a
15579 15572 * value equal to one of the following:
15580 15573 *
15581 15574 * SD_RETRIES_NOCHECK
15582 15575 * SD_RESD_RETRIES_STANDARD
15583 15576 * SD_RETRIES_VICTIM
15584 15577 *
15585 15578 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE
15586 15579 * if the check should be made to see of FLAG_ISOLATE is set
15587 15580 * in the pkt. If FLAG_ISOLATE is set, then the command is
15588 15581 * not retried, it is simply failed.
15589 15582 *
15590 15583 * user_funcp - Ptr to function to call before dispatching the
15591 15584 * command. May be NULL if no action needs to be performed.
15592 15585 * (Primarily intended for printing messages.)
15593 15586 *
15594 15587 * user_arg - Optional argument to be passed along to
15595 15588 * the user_funcp call.
15596 15589 *
15597 15590 * failure_code - errno return code to set in the bp if the
15598 15591 * command is going to be failed.
15599 15592 *
15600 15593 * retry_delay - Retry delay interval in (clock_t) units. May
15601 15594 * be zero which indicates that the retry should be retried
15602 15595 * immediately (ie, without an intervening delay).
15603 15596 *
15604 15597 * statp - Ptr to kstat function to be updated if the command
15605 15598 * is queued for a delayed retry. May be NULL if no kstat
15606 15599 * update is desired.
15607 15600 *
15608 15601 * Context: May be called from interrupt context.
15609 15602 */
15610 15603
15611 15604 static void
15612 15605 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag,
15613 15606 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int code),
15614 15607 void *user_arg, int failure_code, clock_t retry_delay,
15615 15608 void (*statp)(kstat_io_t *))
15616 15609 {
15617 15610 struct sd_xbuf *xp;
15618 15611 struct scsi_pkt *pktp;
15619 15612 struct sd_fm_internal *sfip;
15620 15613
15621 15614 ASSERT(un != NULL);
15622 15615 ASSERT(mutex_owned(SD_MUTEX(un)));
15623 15616 ASSERT(bp != NULL);
15624 15617 xp = SD_GET_XBUF(bp);
15625 15618 ASSERT(xp != NULL);
15626 15619 pktp = SD_GET_PKTP(bp);
15627 15620 ASSERT(pktp != NULL);
15628 15621
15629 15622 sfip = (struct sd_fm_internal *)un->un_fm_private;
15630 15623 ASSERT(sfip != NULL);
15631 15624
15632 15625 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
15633 15626 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp);
15634 15627
15635 15628 /*
15636 15629 * If we are syncing or dumping, fail the command to avoid
15637 15630 * recursively calling back into scsi_transport().
15638 15631 */
15639 15632 if (ddi_in_panic()) {
15640 15633 goto fail_command_no_log;
15641 15634 }
15642 15635
15643 15636 /*
15644 15637 * We should never be be retrying a command with FLAG_DIAGNOSE set, so
15645 15638 * log an error and fail the command.
15646 15639 */
15647 15640 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) {
15648 15641 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE,
15649 15642 "ERROR, retrying FLAG_DIAGNOSE command.\n");
15650 15643 sd_dump_memory(un, SD_LOG_IO, "CDB",
15651 15644 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX);
15652 15645 sd_dump_memory(un, SD_LOG_IO, "Sense Data",
15653 15646 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX);
15654 15647 goto fail_command;
15655 15648 }
15656 15649
15657 15650 /*
15658 15651 * If we are suspended, then put the command onto head of the
15659 15652 * wait queue since we don't want to start more commands, and
15660 15653 * clear the un_retry_bp. Next time when we are resumed, will
15661 15654 * handle the command in the wait queue.
15662 15655 */
15663 15656 switch (un->un_state) {
15664 15657 case SD_STATE_SUSPENDED:
15665 15658 case SD_STATE_DUMPING:
15666 15659 bp->av_forw = un->un_waitq_headp;
15667 15660 un->un_waitq_headp = bp;
15668 15661 if (un->un_waitq_tailp == NULL) {
15669 15662 un->un_waitq_tailp = bp;
15670 15663 }
15671 15664 if (bp == un->un_retry_bp) {
15672 15665 un->un_retry_bp = NULL;
15673 15666 un->un_retry_statp = NULL;
15674 15667 }
15675 15668 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp);
15676 15669 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: "
15677 15670 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp);
15678 15671 return;
15679 15672 default:
15680 15673 break;
15681 15674 }
15682 15675
15683 15676 /*
15684 15677 * If the caller wants us to check FLAG_ISOLATE, then see if that
15685 15678 * is set; if it is then we do not want to retry the command.
15686 15679 * Normally, FLAG_ISOLATE is only used with USCSI cmds.
15687 15680 */
15688 15681 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) {
15689 15682 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) {
15690 15683 goto fail_command;
15691 15684 }
15692 15685 }
15693 15686
15694 15687
15695 15688 /*
15696 15689 * If SD_RETRIES_FAILFAST is set, it indicates that either a
15697 15690 * command timeout or a selection timeout has occurred. This means
15698 15691 * that we were unable to establish an kind of communication with
15699 15692 * the target, and subsequent retries and/or commands are likely
15700 15693 * to encounter similar results and take a long time to complete.
15701 15694 *
15702 15695 * If this is a failfast error condition, we need to update the
15703 15696 * failfast state, even if this bp does not have B_FAILFAST set.
15704 15697 */
15705 15698 if (retry_check_flag & SD_RETRIES_FAILFAST) {
15706 15699 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) {
15707 15700 ASSERT(un->un_failfast_bp == NULL);
15708 15701 /*
15709 15702 * If we are already in the active failfast state, and
15710 15703 * another failfast error condition has been detected,
15711 15704 * then fail this command if it has B_FAILFAST set.
15712 15705 * If B_FAILFAST is clear, then maintain the legacy
15713 15706 * behavior of retrying heroically, even tho this will
15714 15707 * take a lot more time to fail the command.
15715 15708 */
15716 15709 if (bp->b_flags & B_FAILFAST) {
15717 15710 goto fail_command;
15718 15711 }
15719 15712 } else {
15720 15713 /*
15721 15714 * We're not in the active failfast state, but we
15722 15715 * have a failfast error condition, so we must begin
15723 15716 * transition to the next state. We do this regardless
15724 15717 * of whether or not this bp has B_FAILFAST set.
15725 15718 */
15726 15719 if (un->un_failfast_bp == NULL) {
15727 15720 /*
15728 15721 * This is the first bp to meet a failfast
15729 15722 * condition so save it on un_failfast_bp &
15730 15723 * do normal retry processing. Do not enter
15731 15724 * active failfast state yet. This marks
15732 15725 * entry into the "failfast pending" state.
15733 15726 */
15734 15727 un->un_failfast_bp = bp;
15735 15728
15736 15729 } else if (un->un_failfast_bp == bp) {
15737 15730 /*
15738 15731 * This is the second time *this* bp has
15739 15732 * encountered a failfast error condition,
15740 15733 * so enter active failfast state & flush
15741 15734 * queues as appropriate.
15742 15735 */
15743 15736 un->un_failfast_state = SD_FAILFAST_ACTIVE;
15744 15737 un->un_failfast_bp = NULL;
15745 15738 sd_failfast_flushq(un);
15746 15739
15747 15740 /*
15748 15741 * Fail this bp now if B_FAILFAST set;
15749 15742 * otherwise continue with retries. (It would
15750 15743 * be pretty ironic if this bp succeeded on a
15751 15744 * subsequent retry after we just flushed all
15752 15745 * the queues).
15753 15746 */
15754 15747 if (bp->b_flags & B_FAILFAST) {
15755 15748 goto fail_command;
15756 15749 }
15757 15750
15758 15751 #if !defined(lint) && !defined(__lint)
15759 15752 } else {
15760 15753 /*
15761 15754 * If neither of the preceeding conditionals
15762 15755 * was true, it means that there is some
15763 15756 * *other* bp that has met an inital failfast
15764 15757 * condition and is currently either being
15765 15758 * retried or is waiting to be retried. In
15766 15759 * that case we should perform normal retry
15767 15760 * processing on *this* bp, since there is a
15768 15761 * chance that the current failfast condition
15769 15762 * is transient and recoverable. If that does
15770 15763 * not turn out to be the case, then retries
15771 15764 * will be cleared when the wait queue is
15772 15765 * flushed anyway.
15773 15766 */
15774 15767 #endif
15775 15768 }
15776 15769 }
15777 15770 } else {
15778 15771 /*
15779 15772 * SD_RETRIES_FAILFAST is clear, which indicates that we
15780 15773 * likely were able to at least establish some level of
15781 15774 * communication with the target and subsequent commands
15782 15775 * and/or retries are likely to get through to the target,
15783 15776 * In this case we want to be aggressive about clearing
15784 15777 * the failfast state. Note that this does not affect
15785 15778 * the "failfast pending" condition.
15786 15779 */
15787 15780 un->un_failfast_state = SD_FAILFAST_INACTIVE;
15788 15781 }
15789 15782
15790 15783
15791 15784 /*
15792 15785 * Check the specified retry count to see if we can still do
15793 15786 * any retries with this pkt before we should fail it.
15794 15787 */
15795 15788 switch (retry_check_flag & SD_RETRIES_MASK) {
15796 15789 case SD_RETRIES_VICTIM:
15797 15790 /*
15798 15791 * Check the victim retry count. If exhausted, then fall
15799 15792 * thru & check against the standard retry count.
15800 15793 */
15801 15794 if (xp->xb_victim_retry_count < un->un_victim_retry_count) {
15802 15795 /* Increment count & proceed with the retry */
15803 15796 xp->xb_victim_retry_count++;
15804 15797 break;
15805 15798 }
15806 15799 /* Victim retries exhausted, fall back to std. retries... */
15807 15800 /* FALLTHRU */
15808 15801
15809 15802 case SD_RETRIES_STANDARD:
15810 15803 if (xp->xb_retry_count >= un->un_retry_count) {
15811 15804 /* Retries exhausted, fail the command */
15812 15805 SD_TRACE(SD_LOG_IO_CORE, un,
15813 15806 "sd_retry_command: retries exhausted!\n");
15814 15807 /*
15815 15808 * update b_resid for failed SCMD_READ & SCMD_WRITE
15816 15809 * commands with nonzero pkt_resid.
15817 15810 */
15818 15811 if ((pktp->pkt_reason == CMD_CMPLT) &&
15819 15812 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) &&
15820 15813 (pktp->pkt_resid != 0)) {
15821 15814 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F;
15822 15815 if ((op == SCMD_READ) || (op == SCMD_WRITE)) {
15823 15816 SD_UPDATE_B_RESID(bp, pktp);
15824 15817 }
15825 15818 }
15826 15819 goto fail_command;
15827 15820 }
15828 15821 xp->xb_retry_count++;
15829 15822 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15830 15823 "sd_retry_command: retry count:%d\n", xp->xb_retry_count);
15831 15824 break;
15832 15825
15833 15826 case SD_RETRIES_UA:
15834 15827 if (xp->xb_ua_retry_count >= sd_ua_retry_count) {
15835 15828 /* Retries exhausted, fail the command */
15836 15829 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
15837 15830 "Unit Attention retries exhausted. "
15838 15831 "Check the target.\n");
15839 15832 goto fail_command;
15840 15833 }
15841 15834 xp->xb_ua_retry_count++;
15842 15835 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15843 15836 "sd_retry_command: retry count:%d\n",
15844 15837 xp->xb_ua_retry_count);
15845 15838 break;
15846 15839
15847 15840 case SD_RETRIES_BUSY:
15848 15841 if (xp->xb_retry_count >= un->un_busy_retry_count) {
15849 15842 /* Retries exhausted, fail the command */
15850 15843 SD_TRACE(SD_LOG_IO_CORE, un,
15851 15844 "sd_retry_command: retries exhausted!\n");
15852 15845 goto fail_command;
15853 15846 }
15854 15847 xp->xb_retry_count++;
15855 15848 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15856 15849 "sd_retry_command: retry count:%d\n", xp->xb_retry_count);
15857 15850 break;
15858 15851
15859 15852 case SD_RETRIES_NOCHECK:
15860 15853 default:
15861 15854 /* No retry count to check. Just proceed with the retry */
15862 15855 break;
15863 15856 }
15864 15857
15865 15858 xp->xb_pktp->pkt_flags |= FLAG_HEAD;
15866 15859
15867 15860 /*
15868 15861 * If this is a non-USCSI command being retried
15869 15862 * during execution last time, we should post an ereport with
15870 15863 * driver-assessment of the value "retry".
15871 15864 * For partial DMA, request sense and STATUS_QFULL, there are no
15872 15865 * hardware errors, we bypass ereport posting.
15873 15866 */
15874 15867 if (failure_code != 0) {
15875 15868 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
15876 15869 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp);
15877 15870 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY);
15878 15871 }
15879 15872 }
15880 15873
15881 15874 /*
15882 15875 * If we were given a zero timeout, we must attempt to retry the
15883 15876 * command immediately (ie, without a delay).
15884 15877 */
15885 15878 if (retry_delay == 0) {
15886 15879 /*
15887 15880 * Check some limiting conditions to see if we can actually
15888 15881 * do the immediate retry. If we cannot, then we must
15889 15882 * fall back to queueing up a delayed retry.
15890 15883 */
15891 15884 if (un->un_ncmds_in_transport >= un->un_throttle) {
15892 15885 /*
15893 15886 * We are at the throttle limit for the target,
15894 15887 * fall back to delayed retry.
15895 15888 */
15896 15889 retry_delay = un->un_busy_timeout;
15897 15890 statp = kstat_waitq_enter;
15898 15891 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15899 15892 "sd_retry_command: immed. retry hit "
15900 15893 "throttle!\n");
15901 15894 } else {
15902 15895 /*
15903 15896 * We're clear to proceed with the immediate retry.
15904 15897 * First call the user-provided function (if any)
15905 15898 */
15906 15899 if (user_funcp != NULL) {
15907 15900 (*user_funcp)(un, bp, user_arg,
15908 15901 SD_IMMEDIATE_RETRY_ISSUED);
15909 15902 #ifdef __lock_lint
15910 15903 sd_print_incomplete_msg(un, bp, user_arg,
15911 15904 SD_IMMEDIATE_RETRY_ISSUED);
15912 15905 sd_print_cmd_incomplete_msg(un, bp, user_arg,
15913 15906 SD_IMMEDIATE_RETRY_ISSUED);
15914 15907 sd_print_sense_failed_msg(un, bp, user_arg,
15915 15908 SD_IMMEDIATE_RETRY_ISSUED);
15916 15909 #endif
15917 15910 }
15918 15911
15919 15912 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15920 15913 "sd_retry_command: issuing immediate retry\n");
15921 15914
15922 15915 /*
15923 15916 * Call sd_start_cmds() to transport the command to
15924 15917 * the target.
15925 15918 */
15926 15919 sd_start_cmds(un, bp);
15927 15920
15928 15921 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15929 15922 "sd_retry_command exit\n");
15930 15923 return;
15931 15924 }
15932 15925 }
15933 15926
15934 15927 /*
15935 15928 * Set up to retry the command after a delay.
15936 15929 * First call the user-provided function (if any)
15937 15930 */
15938 15931 if (user_funcp != NULL) {
15939 15932 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED);
15940 15933 }
15941 15934
15942 15935 sd_set_retry_bp(un, bp, retry_delay, statp);
15943 15936
15944 15937 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n");
15945 15938 return;
15946 15939
15947 15940 fail_command:
15948 15941
15949 15942 if (user_funcp != NULL) {
15950 15943 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED);
15951 15944 }
15952 15945
15953 15946 fail_command_no_log:
15954 15947
15955 15948 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15956 15949 "sd_retry_command: returning failed command\n");
15957 15950
15958 15951 sd_return_failed_command(un, bp, failure_code);
15959 15952
15960 15953 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n");
15961 15954 }
15962 15955
15963 15956
15964 15957 /*
15965 15958 * Function: sd_set_retry_bp
15966 15959 *
15967 15960 * Description: Set up the given bp for retry.
15968 15961 *
15969 15962 * Arguments: un - ptr to associated softstate
15970 15963 * bp - ptr to buf(9S) for the command
15971 15964 * retry_delay - time interval before issuing retry (may be 0)
15972 15965 * statp - optional pointer to kstat function
15973 15966 *
15974 15967 * Context: May be called under interrupt context
15975 15968 */
15976 15969
15977 15970 static void
15978 15971 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay,
15979 15972 void (*statp)(kstat_io_t *))
15980 15973 {
15981 15974 ASSERT(un != NULL);
15982 15975 ASSERT(mutex_owned(SD_MUTEX(un)));
15983 15976 ASSERT(bp != NULL);
15984 15977
15985 15978 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
15986 15979 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp);
15987 15980
15988 15981 /*
15989 15982 * Indicate that the command is being retried. This will not allow any
15990 15983 * other commands on the wait queue to be transported to the target
15991 15984 * until this command has been completed (success or failure). The
15992 15985 * "retry command" is not transported to the target until the given
15993 15986 * time delay expires, unless the user specified a 0 retry_delay.
15994 15987 *
15995 15988 * Note: the timeout(9F) callback routine is what actually calls
15996 15989 * sd_start_cmds() to transport the command, with the exception of a
15997 15990 * zero retry_delay. The only current implementor of a zero retry delay
15998 15991 * is the case where a START_STOP_UNIT is sent to spin-up a device.
15999 15992 */
16000 15993 if (un->un_retry_bp == NULL) {
16001 15994 ASSERT(un->un_retry_statp == NULL);
16002 15995 un->un_retry_bp = bp;
16003 15996
16004 15997 /*
16005 15998 * If the user has not specified a delay the command should
16006 15999 * be queued and no timeout should be scheduled.
16007 16000 */
16008 16001 if (retry_delay == 0) {
16009 16002 /*
16010 16003 * Save the kstat pointer that will be used in the
16011 16004 * call to SD_UPDATE_KSTATS() below, so that
16012 16005 * sd_start_cmds() can correctly decrement the waitq
16013 16006 * count when it is time to transport this command.
16014 16007 */
16015 16008 un->un_retry_statp = statp;
16016 16009 goto done;
16017 16010 }
16018 16011 }
16019 16012
16020 16013 if (un->un_retry_bp == bp) {
16021 16014 /*
16022 16015 * Save the kstat pointer that will be used in the call to
16023 16016 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can
16024 16017 * correctly decrement the waitq count when it is time to
16025 16018 * transport this command.
16026 16019 */
16027 16020 un->un_retry_statp = statp;
16028 16021
16029 16022 /*
16030 16023 * Schedule a timeout if:
16031 16024 * 1) The user has specified a delay.
16032 16025 * 2) There is not a START_STOP_UNIT callback pending.
16033 16026 *
16034 16027 * If no delay has been specified, then it is up to the caller
16035 16028 * to ensure that IO processing continues without stalling.
16036 16029 * Effectively, this means that the caller will issue the
16037 16030 * required call to sd_start_cmds(). The START_STOP_UNIT
16038 16031 * callback does this after the START STOP UNIT command has
16039 16032 * completed. In either of these cases we should not schedule
16040 16033 * a timeout callback here. Also don't schedule the timeout if
16041 16034 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart.
16042 16035 */
16043 16036 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) &&
16044 16037 (un->un_direct_priority_timeid == NULL)) {
16045 16038 un->un_retry_timeid =
16046 16039 timeout(sd_start_retry_command, un, retry_delay);
16047 16040 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16048 16041 "sd_set_retry_bp: setting timeout: un: 0x%p"
16049 16042 " bp:0x%p un_retry_timeid:0x%p\n",
16050 16043 un, bp, un->un_retry_timeid);
16051 16044 }
16052 16045 } else {
16053 16046 /*
16054 16047 * We only get in here if there is already another command
16055 16048 * waiting to be retried. In this case, we just put the
16056 16049 * given command onto the wait queue, so it can be transported
16057 16050 * after the current retry command has completed.
16058 16051 *
16059 16052 * Also we have to make sure that if the command at the head
16060 16053 * of the wait queue is the un_failfast_bp, that we do not
16061 16054 * put ahead of it any other commands that are to be retried.
16062 16055 */
16063 16056 if ((un->un_failfast_bp != NULL) &&
16064 16057 (un->un_failfast_bp == un->un_waitq_headp)) {
16065 16058 /*
16066 16059 * Enqueue this command AFTER the first command on
16067 16060 * the wait queue (which is also un_failfast_bp).
16068 16061 */
16069 16062 bp->av_forw = un->un_waitq_headp->av_forw;
16070 16063 un->un_waitq_headp->av_forw = bp;
16071 16064 if (un->un_waitq_headp == un->un_waitq_tailp) {
16072 16065 un->un_waitq_tailp = bp;
16073 16066 }
16074 16067 } else {
16075 16068 /* Enqueue this command at the head of the waitq. */
16076 16069 bp->av_forw = un->un_waitq_headp;
16077 16070 un->un_waitq_headp = bp;
16078 16071 if (un->un_waitq_tailp == NULL) {
16079 16072 un->un_waitq_tailp = bp;
16080 16073 }
16081 16074 }
16082 16075
16083 16076 if (statp == NULL) {
16084 16077 statp = kstat_waitq_enter;
16085 16078 }
16086 16079 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16087 16080 "sd_set_retry_bp: un:0x%p already delayed retry\n", un);
16088 16081 }
16089 16082
16090 16083 done:
16091 16084 if (statp != NULL) {
16092 16085 SD_UPDATE_KSTATS(un, statp, bp);
16093 16086 }
16094 16087
16095 16088 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16096 16089 "sd_set_retry_bp: exit un:0x%p\n", un);
16097 16090 }
16098 16091
16099 16092
16100 16093 /*
16101 16094 * Function: sd_start_retry_command
16102 16095 *
16103 16096 * Description: Start the command that has been waiting on the target's
16104 16097 * retry queue. Called from timeout(9F) context after the
16105 16098 * retry delay interval has expired.
16106 16099 *
16107 16100 * Arguments: arg - pointer to associated softstate for the device.
16108 16101 *
16109 16102 * Context: timeout(9F) thread context. May not sleep.
16110 16103 */
16111 16104
16112 16105 static void
16113 16106 sd_start_retry_command(void *arg)
16114 16107 {
16115 16108 struct sd_lun *un = arg;
16116 16109
16117 16110 ASSERT(un != NULL);
16118 16111 ASSERT(!mutex_owned(SD_MUTEX(un)));
16119 16112
16120 16113 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16121 16114 "sd_start_retry_command: entry\n");
16122 16115
16123 16116 mutex_enter(SD_MUTEX(un));
16124 16117
16125 16118 un->un_retry_timeid = NULL;
16126 16119
16127 16120 if (un->un_retry_bp != NULL) {
16128 16121 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16129 16122 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n",
16130 16123 un, un->un_retry_bp);
16131 16124 sd_start_cmds(un, un->un_retry_bp);
16132 16125 }
16133 16126
16134 16127 mutex_exit(SD_MUTEX(un));
16135 16128
16136 16129 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16137 16130 "sd_start_retry_command: exit\n");
16138 16131 }
16139 16132
16140 16133 /*
16141 16134 * Function: sd_rmw_msg_print_handler
16142 16135 *
16143 16136 * Description: If RMW mode is enabled and warning message is triggered
16144 16137 * print I/O count during a fixed interval.
16145 16138 *
16146 16139 * Arguments: arg - pointer to associated softstate for the device.
16147 16140 *
16148 16141 * Context: timeout(9F) thread context. May not sleep.
16149 16142 */
16150 16143 static void
16151 16144 sd_rmw_msg_print_handler(void *arg)
16152 16145 {
16153 16146 struct sd_lun *un = arg;
16154 16147
16155 16148 ASSERT(un != NULL);
16156 16149 ASSERT(!mutex_owned(SD_MUTEX(un)));
16157 16150
16158 16151 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16159 16152 "sd_rmw_msg_print_handler: entry\n");
16160 16153
16161 16154 mutex_enter(SD_MUTEX(un));
16162 16155
16163 16156 if (un->un_rmw_incre_count > 0) {
16164 16157 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
16165 16158 "%"PRIu64" I/O requests are not aligned with %d disk "
16166 16159 "sector size in %ld seconds. They are handled through "
16167 16160 "Read Modify Write but the performance is very low!\n",
16168 16161 un->un_rmw_incre_count, un->un_tgt_blocksize,
16169 16162 drv_hztousec(SD_RMW_MSG_PRINT_TIMEOUT) / 1000000);
16170 16163 un->un_rmw_incre_count = 0;
16171 16164 un->un_rmw_msg_timeid = timeout(sd_rmw_msg_print_handler,
16172 16165 un, SD_RMW_MSG_PRINT_TIMEOUT);
16173 16166 } else {
16174 16167 un->un_rmw_msg_timeid = NULL;
16175 16168 }
16176 16169
16177 16170 mutex_exit(SD_MUTEX(un));
16178 16171
16179 16172 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16180 16173 "sd_rmw_msg_print_handler: exit\n");
16181 16174 }
16182 16175
16183 16176 /*
16184 16177 * Function: sd_start_direct_priority_command
16185 16178 *
16186 16179 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had
16187 16180 * received TRAN_BUSY when we called scsi_transport() to send it
16188 16181 * to the underlying HBA. This function is called from timeout(9F)
16189 16182 * context after the delay interval has expired.
16190 16183 *
16191 16184 * Arguments: arg - pointer to associated buf(9S) to be restarted.
16192 16185 *
16193 16186 * Context: timeout(9F) thread context. May not sleep.
16194 16187 */
16195 16188
16196 16189 static void
16197 16190 sd_start_direct_priority_command(void *arg)
16198 16191 {
16199 16192 struct buf *priority_bp = arg;
16200 16193 struct sd_lun *un;
16201 16194
16202 16195 ASSERT(priority_bp != NULL);
16203 16196 un = SD_GET_UN(priority_bp);
16204 16197 ASSERT(un != NULL);
16205 16198 ASSERT(!mutex_owned(SD_MUTEX(un)));
16206 16199
16207 16200 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16208 16201 "sd_start_direct_priority_command: entry\n");
16209 16202
16210 16203 mutex_enter(SD_MUTEX(un));
16211 16204 un->un_direct_priority_timeid = NULL;
16212 16205 sd_start_cmds(un, priority_bp);
16213 16206 mutex_exit(SD_MUTEX(un));
16214 16207
16215 16208 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16216 16209 "sd_start_direct_priority_command: exit\n");
16217 16210 }
16218 16211
16219 16212
16220 16213 /*
16221 16214 * Function: sd_send_request_sense_command
16222 16215 *
16223 16216 * Description: Sends a REQUEST SENSE command to the target
16224 16217 *
16225 16218 * Context: May be called from interrupt context.
16226 16219 */
16227 16220
16228 16221 static void
16229 16222 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp,
16230 16223 struct scsi_pkt *pktp)
16231 16224 {
16232 16225 ASSERT(bp != NULL);
16233 16226 ASSERT(un != NULL);
16234 16227 ASSERT(mutex_owned(SD_MUTEX(un)));
16235 16228
16236 16229 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: "
16237 16230 "entry: buf:0x%p\n", bp);
16238 16231
16239 16232 /*
16240 16233 * If we are syncing or dumping, then fail the command to avoid a
16241 16234 * recursive callback into scsi_transport(). Also fail the command
16242 16235 * if we are suspended (legacy behavior).
16243 16236 */
16244 16237 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) ||
16245 16238 (un->un_state == SD_STATE_DUMPING)) {
16246 16239 sd_return_failed_command(un, bp, EIO);
16247 16240 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16248 16241 "sd_send_request_sense_command: syncing/dumping, exit\n");
16249 16242 return;
16250 16243 }
16251 16244
16252 16245 /*
16253 16246 * Retry the failed command and don't issue the request sense if:
16254 16247 * 1) the sense buf is busy
16255 16248 * 2) we have 1 or more outstanding commands on the target
16256 16249 * (the sense data will be cleared or invalidated any way)
16257 16250 *
16258 16251 * Note: There could be an issue with not checking a retry limit here,
16259 16252 * the problem is determining which retry limit to check.
16260 16253 */
16261 16254 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) {
16262 16255 /* Don't retry if the command is flagged as non-retryable */
16263 16256 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) {
16264 16257 sd_retry_command(un, bp, SD_RETRIES_NOCHECK,
16265 16258 NULL, NULL, 0, un->un_busy_timeout,
16266 16259 kstat_waitq_enter);
16267 16260 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16268 16261 "sd_send_request_sense_command: "
16269 16262 "at full throttle, retrying exit\n");
16270 16263 } else {
16271 16264 sd_return_failed_command(un, bp, EIO);
16272 16265 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16273 16266 "sd_send_request_sense_command: "
16274 16267 "at full throttle, non-retryable exit\n");
16275 16268 }
16276 16269 return;
16277 16270 }
16278 16271
16279 16272 sd_mark_rqs_busy(un, bp);
16280 16273 sd_start_cmds(un, un->un_rqs_bp);
16281 16274
16282 16275 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16283 16276 "sd_send_request_sense_command: exit\n");
16284 16277 }
16285 16278
16286 16279
16287 16280 /*
16288 16281 * Function: sd_mark_rqs_busy
16289 16282 *
16290 16283 * Description: Indicate that the request sense bp for this instance is
16291 16284 * in use.
16292 16285 *
16293 16286 * Context: May be called under interrupt context
16294 16287 */
16295 16288
16296 16289 static void
16297 16290 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp)
16298 16291 {
16299 16292 struct sd_xbuf *sense_xp;
16300 16293
16301 16294 ASSERT(un != NULL);
16302 16295 ASSERT(bp != NULL);
16303 16296 ASSERT(mutex_owned(SD_MUTEX(un)));
16304 16297 ASSERT(un->un_sense_isbusy == 0);
16305 16298
16306 16299 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: "
16307 16300 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un);
16308 16301
16309 16302 sense_xp = SD_GET_XBUF(un->un_rqs_bp);
16310 16303 ASSERT(sense_xp != NULL);
16311 16304
16312 16305 SD_INFO(SD_LOG_IO, un,
16313 16306 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp);
16314 16307
16315 16308 ASSERT(sense_xp->xb_pktp != NULL);
16316 16309 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD))
16317 16310 == (FLAG_SENSING | FLAG_HEAD));
16318 16311
16319 16312 un->un_sense_isbusy = 1;
16320 16313 un->un_rqs_bp->b_resid = 0;
16321 16314 sense_xp->xb_pktp->pkt_resid = 0;
16322 16315 sense_xp->xb_pktp->pkt_reason = 0;
16323 16316
16324 16317 /* So we can get back the bp at interrupt time! */
16325 16318 sense_xp->xb_sense_bp = bp;
16326 16319
16327 16320 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH);
16328 16321
16329 16322 /*
16330 16323 * Mark this buf as awaiting sense data. (This is already set in
16331 16324 * the pkt_flags for the RQS packet.)
16332 16325 */
16333 16326 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING;
16334 16327
16335 16328 /* Request sense down same path */
16336 16329 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) &&
16337 16330 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance)
16338 16331 sense_xp->xb_pktp->pkt_path_instance =
16339 16332 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance;
16340 16333
16341 16334 sense_xp->xb_retry_count = 0;
16342 16335 sense_xp->xb_victim_retry_count = 0;
16343 16336 sense_xp->xb_ua_retry_count = 0;
16344 16337 sense_xp->xb_nr_retry_count = 0;
16345 16338 sense_xp->xb_dma_resid = 0;
16346 16339
16347 16340 /* Clean up the fields for auto-request sense */
16348 16341 sense_xp->xb_sense_status = 0;
16349 16342 sense_xp->xb_sense_state = 0;
16350 16343 sense_xp->xb_sense_resid = 0;
16351 16344 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data));
16352 16345
16353 16346 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n");
16354 16347 }
16355 16348
16356 16349
16357 16350 /*
16358 16351 * Function: sd_mark_rqs_idle
16359 16352 *
16360 16353 * Description: SD_MUTEX must be held continuously through this routine
16361 16354 * to prevent reuse of the rqs struct before the caller can
16362 16355 * complete it's processing.
16363 16356 *
16364 16357 * Return Code: Pointer to the RQS buf
16365 16358 *
16366 16359 * Context: May be called under interrupt context
16367 16360 */
16368 16361
16369 16362 static struct buf *
16370 16363 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp)
16371 16364 {
16372 16365 struct buf *bp;
16373 16366 ASSERT(un != NULL);
16374 16367 ASSERT(sense_xp != NULL);
16375 16368 ASSERT(mutex_owned(SD_MUTEX(un)));
16376 16369 ASSERT(un->un_sense_isbusy != 0);
16377 16370
16378 16371 un->un_sense_isbusy = 0;
16379 16372 bp = sense_xp->xb_sense_bp;
16380 16373 sense_xp->xb_sense_bp = NULL;
16381 16374
16382 16375 /* This pkt is no longer interested in getting sense data */
16383 16376 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING;
16384 16377
16385 16378 return (bp);
16386 16379 }
16387 16380
16388 16381
16389 16382
16390 16383 /*
16391 16384 * Function: sd_alloc_rqs
16392 16385 *
16393 16386 * Description: Set up the unit to receive auto request sense data
16394 16387 *
16395 16388 * Return Code: DDI_SUCCESS or DDI_FAILURE
16396 16389 *
16397 16390 * Context: Called under attach(9E) context
16398 16391 */
16399 16392
16400 16393 static int
16401 16394 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un)
16402 16395 {
16403 16396 struct sd_xbuf *xp;
16404 16397
16405 16398 ASSERT(un != NULL);
16406 16399 ASSERT(!mutex_owned(SD_MUTEX(un)));
16407 16400 ASSERT(un->un_rqs_bp == NULL);
16408 16401 ASSERT(un->un_rqs_pktp == NULL);
16409 16402
16410 16403 /*
16411 16404 * First allocate the required buf and scsi_pkt structs, then set up
16412 16405 * the CDB in the scsi_pkt for a REQUEST SENSE command.
16413 16406 */
16414 16407 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL,
16415 16408 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL);
16416 16409 if (un->un_rqs_bp == NULL) {
16417 16410 return (DDI_FAILURE);
16418 16411 }
16419 16412
16420 16413 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp,
16421 16414 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL);
16422 16415
16423 16416 if (un->un_rqs_pktp == NULL) {
16424 16417 sd_free_rqs(un);
16425 16418 return (DDI_FAILURE);
16426 16419 }
16427 16420
16428 16421 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */
16429 16422 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp,
16430 16423 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0);
16431 16424
16432 16425 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp);
16433 16426
16434 16427 /* Set up the other needed members in the ARQ scsi_pkt. */
16435 16428 un->un_rqs_pktp->pkt_comp = sdintr;
16436 16429 un->un_rqs_pktp->pkt_time = sd_io_time;
16437 16430 un->un_rqs_pktp->pkt_flags |=
16438 16431 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */
16439 16432
16440 16433 /*
16441 16434 * Allocate & init the sd_xbuf struct for the RQS command. Do not
16442 16435 * provide any intpkt, destroypkt routines as we take care of
16443 16436 * scsi_pkt allocation/freeing here and in sd_free_rqs().
16444 16437 */
16445 16438 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP);
16446 16439 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL);
16447 16440 xp->xb_pktp = un->un_rqs_pktp;
16448 16441 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16449 16442 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n",
16450 16443 un, xp, un->un_rqs_pktp, un->un_rqs_bp);
16451 16444
16452 16445 /*
16453 16446 * Save the pointer to the request sense private bp so it can
16454 16447 * be retrieved in sdintr.
16455 16448 */
16456 16449 un->un_rqs_pktp->pkt_private = un->un_rqs_bp;
16457 16450 ASSERT(un->un_rqs_bp->b_private == xp);
16458 16451
16459 16452 /*
16460 16453 * See if the HBA supports auto-request sense for the specified
16461 16454 * target/lun. If it does, then try to enable it (if not already
16462 16455 * enabled).
16463 16456 *
16464 16457 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return
16465 16458 * failure, while for other HBAs (pln) scsi_ifsetcap will always
16466 16459 * return success. However, in both of these cases ARQ is always
16467 16460 * enabled and scsi_ifgetcap will always return true. The best approach
16468 16461 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap().
16469 16462 *
16470 16463 * The 3rd case is the HBA (adp) always return enabled on
16471 16464 * scsi_ifgetgetcap even when it's not enable, the best approach
16472 16465 * is issue a scsi_ifsetcap then a scsi_ifgetcap
16473 16466 * Note: this case is to circumvent the Adaptec bug. (x86 only)
16474 16467 */
16475 16468
16476 16469 if (un->un_f_is_fibre == TRUE) {
16477 16470 un->un_f_arq_enabled = TRUE;
16478 16471 } else {
16479 16472 #if defined(__i386) || defined(__amd64)
16480 16473 /*
16481 16474 * Circumvent the Adaptec bug, remove this code when
16482 16475 * the bug is fixed
16483 16476 */
16484 16477 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1);
16485 16478 #endif
16486 16479 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) {
16487 16480 case 0:
16488 16481 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16489 16482 "sd_alloc_rqs: HBA supports ARQ\n");
16490 16483 /*
16491 16484 * ARQ is supported by this HBA but currently is not
16492 16485 * enabled. Attempt to enable it and if successful then
16493 16486 * mark this instance as ARQ enabled.
16494 16487 */
16495 16488 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1)
16496 16489 == 1) {
16497 16490 /* Successfully enabled ARQ in the HBA */
16498 16491 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16499 16492 "sd_alloc_rqs: ARQ enabled\n");
16500 16493 un->un_f_arq_enabled = TRUE;
16501 16494 } else {
16502 16495 /* Could not enable ARQ in the HBA */
16503 16496 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16504 16497 "sd_alloc_rqs: failed ARQ enable\n");
16505 16498 un->un_f_arq_enabled = FALSE;
16506 16499 }
16507 16500 break;
16508 16501 case 1:
16509 16502 /*
16510 16503 * ARQ is supported by this HBA and is already enabled.
16511 16504 * Just mark ARQ as enabled for this instance.
16512 16505 */
16513 16506 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16514 16507 "sd_alloc_rqs: ARQ already enabled\n");
16515 16508 un->un_f_arq_enabled = TRUE;
16516 16509 break;
16517 16510 default:
16518 16511 /*
16519 16512 * ARQ is not supported by this HBA; disable it for this
16520 16513 * instance.
16521 16514 */
16522 16515 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16523 16516 "sd_alloc_rqs: HBA does not support ARQ\n");
16524 16517 un->un_f_arq_enabled = FALSE;
16525 16518 break;
16526 16519 }
16527 16520 }
16528 16521
16529 16522 return (DDI_SUCCESS);
16530 16523 }
16531 16524
16532 16525
16533 16526 /*
16534 16527 * Function: sd_free_rqs
16535 16528 *
16536 16529 * Description: Cleanup for the pre-instance RQS command.
16537 16530 *
16538 16531 * Context: Kernel thread context
16539 16532 */
16540 16533
16541 16534 static void
16542 16535 sd_free_rqs(struct sd_lun *un)
16543 16536 {
16544 16537 ASSERT(un != NULL);
16545 16538
16546 16539 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n");
16547 16540
16548 16541 /*
16549 16542 * If consistent memory is bound to a scsi_pkt, the pkt
16550 16543 * has to be destroyed *before* freeing the consistent memory.
16551 16544 * Don't change the sequence of this operations.
16552 16545 * scsi_destroy_pkt() might access memory, which isn't allowed,
16553 16546 * after it was freed in scsi_free_consistent_buf().
16554 16547 */
16555 16548 if (un->un_rqs_pktp != NULL) {
16556 16549 scsi_destroy_pkt(un->un_rqs_pktp);
16557 16550 un->un_rqs_pktp = NULL;
16558 16551 }
16559 16552
16560 16553 if (un->un_rqs_bp != NULL) {
16561 16554 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp);
16562 16555 if (xp != NULL) {
16563 16556 kmem_free(xp, sizeof (struct sd_xbuf));
16564 16557 }
16565 16558 scsi_free_consistent_buf(un->un_rqs_bp);
16566 16559 un->un_rqs_bp = NULL;
16567 16560 }
16568 16561 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n");
16569 16562 }
16570 16563
16571 16564
16572 16565
16573 16566 /*
16574 16567 * Function: sd_reduce_throttle
16575 16568 *
16576 16569 * Description: Reduces the maximum # of outstanding commands on a
16577 16570 * target to the current number of outstanding commands.
16578 16571 * Queues a tiemout(9F) callback to restore the limit
16579 16572 * after a specified interval has elapsed.
16580 16573 * Typically used when we get a TRAN_BUSY return code
16581 16574 * back from scsi_transport().
16582 16575 *
16583 16576 * Arguments: un - ptr to the sd_lun softstate struct
16584 16577 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL
16585 16578 *
16586 16579 * Context: May be called from interrupt context
16587 16580 */
16588 16581
16589 16582 static void
16590 16583 sd_reduce_throttle(struct sd_lun *un, int throttle_type)
16591 16584 {
16592 16585 ASSERT(un != NULL);
16593 16586 ASSERT(mutex_owned(SD_MUTEX(un)));
16594 16587 ASSERT(un->un_ncmds_in_transport >= 0);
16595 16588
16596 16589 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: "
16597 16590 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n",
16598 16591 un, un->un_throttle, un->un_ncmds_in_transport);
16599 16592
16600 16593 if (un->un_throttle > 1) {
16601 16594 if (un->un_f_use_adaptive_throttle == TRUE) {
16602 16595 switch (throttle_type) {
16603 16596 case SD_THROTTLE_TRAN_BUSY:
16604 16597 if (un->un_busy_throttle == 0) {
16605 16598 un->un_busy_throttle = un->un_throttle;
16606 16599 }
16607 16600 break;
16608 16601 case SD_THROTTLE_QFULL:
16609 16602 un->un_busy_throttle = 0;
16610 16603 break;
16611 16604 default:
16612 16605 ASSERT(FALSE);
16613 16606 }
16614 16607
16615 16608 if (un->un_ncmds_in_transport > 0) {
16616 16609 un->un_throttle = un->un_ncmds_in_transport;
16617 16610 }
16618 16611
16619 16612 } else {
16620 16613 if (un->un_ncmds_in_transport == 0) {
16621 16614 un->un_throttle = 1;
16622 16615 } else {
16623 16616 un->un_throttle = un->un_ncmds_in_transport;
16624 16617 }
16625 16618 }
16626 16619 }
16627 16620
16628 16621 /* Reschedule the timeout if none is currently active */
16629 16622 if (un->un_reset_throttle_timeid == NULL) {
16630 16623 un->un_reset_throttle_timeid = timeout(sd_restore_throttle,
16631 16624 un, SD_THROTTLE_RESET_INTERVAL);
16632 16625 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16633 16626 "sd_reduce_throttle: timeout scheduled!\n");
16634 16627 }
16635 16628
16636 16629 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: "
16637 16630 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle);
16638 16631 }
16639 16632
16640 16633
16641 16634
16642 16635 /*
16643 16636 * Function: sd_restore_throttle
16644 16637 *
16645 16638 * Description: Callback function for timeout(9F). Resets the current
16646 16639 * value of un->un_throttle to its default.
16647 16640 *
16648 16641 * Arguments: arg - pointer to associated softstate for the device.
16649 16642 *
16650 16643 * Context: May be called from interrupt context
16651 16644 */
16652 16645
16653 16646 static void
16654 16647 sd_restore_throttle(void *arg)
16655 16648 {
16656 16649 struct sd_lun *un = arg;
16657 16650
16658 16651 ASSERT(un != NULL);
16659 16652 ASSERT(!mutex_owned(SD_MUTEX(un)));
16660 16653
16661 16654 mutex_enter(SD_MUTEX(un));
16662 16655
16663 16656 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: "
16664 16657 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle);
16665 16658
16666 16659 un->un_reset_throttle_timeid = NULL;
16667 16660
16668 16661 if (un->un_f_use_adaptive_throttle == TRUE) {
16669 16662 /*
16670 16663 * If un_busy_throttle is nonzero, then it contains the
16671 16664 * value that un_throttle was when we got a TRAN_BUSY back
16672 16665 * from scsi_transport(). We want to revert back to this
16673 16666 * value.
16674 16667 *
16675 16668 * In the QFULL case, the throttle limit will incrementally
16676 16669 * increase until it reaches max throttle.
16677 16670 */
16678 16671 if (un->un_busy_throttle > 0) {
16679 16672 un->un_throttle = un->un_busy_throttle;
16680 16673 un->un_busy_throttle = 0;
16681 16674 } else {
16682 16675 /*
16683 16676 * increase throttle by 10% open gate slowly, schedule
16684 16677 * another restore if saved throttle has not been
16685 16678 * reached
16686 16679 */
16687 16680 short throttle;
16688 16681 if (sd_qfull_throttle_enable) {
16689 16682 throttle = un->un_throttle +
16690 16683 max((un->un_throttle / 10), 1);
16691 16684 un->un_throttle =
16692 16685 (throttle < un->un_saved_throttle) ?
16693 16686 throttle : un->un_saved_throttle;
16694 16687 if (un->un_throttle < un->un_saved_throttle) {
16695 16688 un->un_reset_throttle_timeid =
16696 16689 timeout(sd_restore_throttle,
16697 16690 un,
16698 16691 SD_QFULL_THROTTLE_RESET_INTERVAL);
16699 16692 }
16700 16693 }
16701 16694 }
16702 16695
16703 16696 /*
16704 16697 * If un_throttle has fallen below the low-water mark, we
16705 16698 * restore the maximum value here (and allow it to ratchet
16706 16699 * down again if necessary).
16707 16700 */
16708 16701 if (un->un_throttle < un->un_min_throttle) {
16709 16702 un->un_throttle = un->un_saved_throttle;
16710 16703 }
16711 16704 } else {
16712 16705 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: "
16713 16706 "restoring limit from 0x%x to 0x%x\n",
16714 16707 un->un_throttle, un->un_saved_throttle);
16715 16708 un->un_throttle = un->un_saved_throttle;
16716 16709 }
16717 16710
16718 16711 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
16719 16712 "sd_restore_throttle: calling sd_start_cmds!\n");
16720 16713
16721 16714 sd_start_cmds(un, NULL);
16722 16715
16723 16716 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
16724 16717 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n",
16725 16718 un, un->un_throttle);
16726 16719
16727 16720 mutex_exit(SD_MUTEX(un));
16728 16721
16729 16722 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n");
16730 16723 }
16731 16724
16732 16725 /*
16733 16726 * Function: sdrunout
16734 16727 *
16735 16728 * Description: Callback routine for scsi_init_pkt when a resource allocation
16736 16729 * fails.
16737 16730 *
16738 16731 * Arguments: arg - a pointer to the sd_lun unit struct for the particular
16739 16732 * soft state instance.
16740 16733 *
16741 16734 * Return Code: The scsi_init_pkt routine allows for the callback function to
16742 16735 * return a 0 indicating the callback should be rescheduled or a 1
16743 16736 * indicating not to reschedule. This routine always returns 1
16744 16737 * because the driver always provides a callback function to
16745 16738 * scsi_init_pkt. This results in a callback always being scheduled
16746 16739 * (via the scsi_init_pkt callback implementation) if a resource
16747 16740 * failure occurs.
16748 16741 *
16749 16742 * Context: This callback function may not block or call routines that block
16750 16743 *
16751 16744 * Note: Using the scsi_init_pkt callback facility can result in an I/O
16752 16745 * request persisting at the head of the list which cannot be
16753 16746 * satisfied even after multiple retries. In the future the driver
16754 16747 * may implement some time of maximum runout count before failing
16755 16748 * an I/O.
16756 16749 */
16757 16750
16758 16751 static int
16759 16752 sdrunout(caddr_t arg)
16760 16753 {
16761 16754 struct sd_lun *un = (struct sd_lun *)arg;
16762 16755
16763 16756 ASSERT(un != NULL);
16764 16757 ASSERT(!mutex_owned(SD_MUTEX(un)));
16765 16758
16766 16759 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n");
16767 16760
16768 16761 mutex_enter(SD_MUTEX(un));
16769 16762 sd_start_cmds(un, NULL);
16770 16763 mutex_exit(SD_MUTEX(un));
16771 16764 /*
16772 16765 * This callback routine always returns 1 (i.e. do not reschedule)
16773 16766 * because we always specify sdrunout as the callback handler for
16774 16767 * scsi_init_pkt inside the call to sd_start_cmds.
16775 16768 */
16776 16769 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n");
16777 16770 return (1);
16778 16771 }
16779 16772
16780 16773
16781 16774 /*
16782 16775 * Function: sdintr
16783 16776 *
16784 16777 * Description: Completion callback routine for scsi_pkt(9S) structs
16785 16778 * sent to the HBA driver via scsi_transport(9F).
16786 16779 *
16787 16780 * Context: Interrupt context
16788 16781 */
16789 16782
16790 16783 static void
16791 16784 sdintr(struct scsi_pkt *pktp)
16792 16785 {
16793 16786 struct buf *bp;
16794 16787 struct sd_xbuf *xp;
16795 16788 struct sd_lun *un;
16796 16789 size_t actual_len;
16797 16790 sd_ssc_t *sscp;
16798 16791
16799 16792 ASSERT(pktp != NULL);
16800 16793 bp = (struct buf *)pktp->pkt_private;
16801 16794 ASSERT(bp != NULL);
16802 16795 xp = SD_GET_XBUF(bp);
16803 16796 ASSERT(xp != NULL);
16804 16797 ASSERT(xp->xb_pktp != NULL);
16805 16798 un = SD_GET_UN(bp);
16806 16799 ASSERT(un != NULL);
16807 16800 ASSERT(!mutex_owned(SD_MUTEX(un)));
16808 16801
16809 16802 #ifdef SD_FAULT_INJECTION
16810 16803
16811 16804 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n");
16812 16805 /* SD FaultInjection */
16813 16806 sd_faultinjection(pktp);
16814 16807
16815 16808 #endif /* SD_FAULT_INJECTION */
16816 16809
16817 16810 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p,"
16818 16811 " xp:0x%p, un:0x%p\n", bp, xp, un);
16819 16812
16820 16813 mutex_enter(SD_MUTEX(un));
16821 16814
16822 16815 ASSERT(un->un_fm_private != NULL);
16823 16816 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc;
16824 16817 ASSERT(sscp != NULL);
16825 16818
16826 16819 /* Reduce the count of the #commands currently in transport */
16827 16820 un->un_ncmds_in_transport--;
16828 16821 ASSERT(un->un_ncmds_in_transport >= 0);
16829 16822
16830 16823 /* Increment counter to indicate that the callback routine is active */
16831 16824 un->un_in_callback++;
16832 16825
16833 16826 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
16834 16827
16835 16828 #ifdef SDDEBUG
16836 16829 if (bp == un->un_retry_bp) {
16837 16830 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: "
16838 16831 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n",
16839 16832 un, un->un_retry_bp, un->un_ncmds_in_transport);
16840 16833 }
16841 16834 #endif
16842 16835
16843 16836 /*
16844 16837 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media
16845 16838 * state if needed.
16846 16839 */
16847 16840 if (pktp->pkt_reason == CMD_DEV_GONE) {
16848 16841 /* Prevent multiple console messages for the same failure. */
16849 16842 if (un->un_last_pkt_reason != CMD_DEV_GONE) {
16850 16843 un->un_last_pkt_reason = CMD_DEV_GONE;
16851 16844 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
16852 16845 "Command failed to complete...Device is gone\n");
16853 16846 }
16854 16847 if (un->un_mediastate != DKIO_DEV_GONE) {
16855 16848 un->un_mediastate = DKIO_DEV_GONE;
16856 16849 cv_broadcast(&un->un_state_cv);
16857 16850 }
16858 16851 /*
16859 16852 * If the command happens to be the REQUEST SENSE command,
16860 16853 * free up the rqs buf and fail the original command.
16861 16854 */
16862 16855 if (bp == un->un_rqs_bp) {
16863 16856 bp = sd_mark_rqs_idle(un, xp);
16864 16857 }
16865 16858 sd_return_failed_command(un, bp, EIO);
16866 16859 goto exit;
16867 16860 }
16868 16861
16869 16862 if (pktp->pkt_state & STATE_XARQ_DONE) {
16870 16863 SD_TRACE(SD_LOG_COMMON, un,
16871 16864 "sdintr: extra sense data received. pkt=%p\n", pktp);
16872 16865 }
16873 16866
16874 16867 /*
16875 16868 * First see if the pkt has auto-request sense data with it....
16876 16869 * Look at the packet state first so we don't take a performance
16877 16870 * hit looking at the arq enabled flag unless absolutely necessary.
16878 16871 */
16879 16872 if ((pktp->pkt_state & STATE_ARQ_DONE) &&
16880 16873 (un->un_f_arq_enabled == TRUE)) {
16881 16874 /*
16882 16875 * The HBA did an auto request sense for this command so check
16883 16876 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal
16884 16877 * driver command that should not be retried.
16885 16878 */
16886 16879 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) {
16887 16880 /*
16888 16881 * Save the relevant sense info into the xp for the
16889 16882 * original cmd.
16890 16883 */
16891 16884 struct scsi_arq_status *asp;
16892 16885 asp = (struct scsi_arq_status *)(pktp->pkt_scbp);
16893 16886 xp->xb_sense_status =
16894 16887 *((uchar_t *)(&(asp->sts_rqpkt_status)));
16895 16888 xp->xb_sense_state = asp->sts_rqpkt_state;
16896 16889 xp->xb_sense_resid = asp->sts_rqpkt_resid;
16897 16890 if (pktp->pkt_state & STATE_XARQ_DONE) {
16898 16891 actual_len = MAX_SENSE_LENGTH -
16899 16892 xp->xb_sense_resid;
16900 16893 bcopy(&asp->sts_sensedata, xp->xb_sense_data,
16901 16894 MAX_SENSE_LENGTH);
16902 16895 } else {
16903 16896 if (xp->xb_sense_resid > SENSE_LENGTH) {
16904 16897 actual_len = MAX_SENSE_LENGTH -
16905 16898 xp->xb_sense_resid;
16906 16899 } else {
16907 16900 actual_len = SENSE_LENGTH -
16908 16901 xp->xb_sense_resid;
16909 16902 }
16910 16903 if (xp->xb_pkt_flags & SD_XB_USCSICMD) {
16911 16904 if ((((struct uscsi_cmd *)
16912 16905 (xp->xb_pktinfo))->uscsi_rqlen) >
16913 16906 actual_len) {
16914 16907 xp->xb_sense_resid =
16915 16908 (((struct uscsi_cmd *)
16916 16909 (xp->xb_pktinfo))->
16917 16910 uscsi_rqlen) - actual_len;
16918 16911 } else {
16919 16912 xp->xb_sense_resid = 0;
16920 16913 }
16921 16914 }
16922 16915 bcopy(&asp->sts_sensedata, xp->xb_sense_data,
16923 16916 SENSE_LENGTH);
16924 16917 }
16925 16918
16926 16919 /* fail the command */
16927 16920 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16928 16921 "sdintr: arq done and FLAG_DIAGNOSE set\n");
16929 16922 sd_return_failed_command(un, bp, EIO);
16930 16923 goto exit;
16931 16924 }
16932 16925
16933 16926 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */
16934 16927 /*
16935 16928 * We want to either retry or fail this command, so free
16936 16929 * the DMA resources here. If we retry the command then
16937 16930 * the DMA resources will be reallocated in sd_start_cmds().
16938 16931 * Note that when PKT_DMA_PARTIAL is used, this reallocation
16939 16932 * causes the *entire* transfer to start over again from the
16940 16933 * beginning of the request, even for PARTIAL chunks that
16941 16934 * have already transferred successfully.
16942 16935 */
16943 16936 if ((un->un_f_is_fibre == TRUE) &&
16944 16937 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
16945 16938 ((pktp->pkt_flags & FLAG_SENSING) == 0)) {
16946 16939 scsi_dmafree(pktp);
16947 16940 xp->xb_pkt_flags |= SD_XB_DMA_FREED;
16948 16941 }
16949 16942 #endif
16950 16943
16951 16944 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16952 16945 "sdintr: arq done, sd_handle_auto_request_sense\n");
16953 16946
16954 16947 sd_handle_auto_request_sense(un, bp, xp, pktp);
16955 16948 goto exit;
16956 16949 }
16957 16950
16958 16951 /* Next see if this is the REQUEST SENSE pkt for the instance */
16959 16952 if (pktp->pkt_flags & FLAG_SENSING) {
16960 16953 /* This pktp is from the unit's REQUEST_SENSE command */
16961 16954 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16962 16955 "sdintr: sd_handle_request_sense\n");
16963 16956 sd_handle_request_sense(un, bp, xp, pktp);
16964 16957 goto exit;
16965 16958 }
16966 16959
16967 16960 /*
16968 16961 * Check to see if the command successfully completed as requested;
16969 16962 * this is the most common case (and also the hot performance path).
16970 16963 *
16971 16964 * Requirements for successful completion are:
16972 16965 * pkt_reason is CMD_CMPLT and packet status is status good.
16973 16966 * In addition:
16974 16967 * - A residual of zero indicates successful completion no matter what
16975 16968 * the command is.
16976 16969 * - If the residual is not zero and the command is not a read or
16977 16970 * write, then it's still defined as successful completion. In other
16978 16971 * words, if the command is a read or write the residual must be
16979 16972 * zero for successful completion.
16980 16973 * - If the residual is not zero and the command is a read or
16981 16974 * write, and it's a USCSICMD, then it's still defined as
16982 16975 * successful completion.
16983 16976 */
16984 16977 if ((pktp->pkt_reason == CMD_CMPLT) &&
16985 16978 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) {
16986 16979
16987 16980 /*
16988 16981 * Since this command is returned with a good status, we
16989 16982 * can reset the count for Sonoma failover.
16990 16983 */
16991 16984 un->un_sonoma_failure_count = 0;
16992 16985
16993 16986 /*
16994 16987 * Return all USCSI commands on good status
16995 16988 */
16996 16989 if (pktp->pkt_resid == 0) {
16997 16990 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16998 16991 "sdintr: returning command for resid == 0\n");
16999 16992 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) &&
17000 16993 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) {
17001 16994 SD_UPDATE_B_RESID(bp, pktp);
17002 16995 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17003 16996 "sdintr: returning command for resid != 0\n");
17004 16997 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) {
17005 16998 SD_UPDATE_B_RESID(bp, pktp);
17006 16999 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17007 17000 "sdintr: returning uscsi command\n");
17008 17001 } else {
17009 17002 goto not_successful;
17010 17003 }
17011 17004 sd_return_command(un, bp);
17012 17005
17013 17006 /*
17014 17007 * Decrement counter to indicate that the callback routine
17015 17008 * is done.
17016 17009 */
17017 17010 un->un_in_callback--;
17018 17011 ASSERT(un->un_in_callback >= 0);
17019 17012 mutex_exit(SD_MUTEX(un));
17020 17013
17021 17014 return;
17022 17015 }
17023 17016
17024 17017 not_successful:
17025 17018
17026 17019 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */
17027 17020 /*
17028 17021 * The following is based upon knowledge of the underlying transport
17029 17022 * and its use of DMA resources. This code should be removed when
17030 17023 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor
17031 17024 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf()
17032 17025 * and sd_start_cmds().
17033 17026 *
17034 17027 * Free any DMA resources associated with this command if there
17035 17028 * is a chance it could be retried or enqueued for later retry.
17036 17029 * If we keep the DMA binding then mpxio cannot reissue the
17037 17030 * command on another path whenever a path failure occurs.
17038 17031 *
17039 17032 * Note that when PKT_DMA_PARTIAL is used, free/reallocation
17040 17033 * causes the *entire* transfer to start over again from the
17041 17034 * beginning of the request, even for PARTIAL chunks that
17042 17035 * have already transferred successfully.
17043 17036 *
17044 17037 * This is only done for non-uscsi commands (and also skipped for the
17045 17038 * driver's internal RQS command). Also just do this for Fibre Channel
17046 17039 * devices as these are the only ones that support mpxio.
17047 17040 */
17048 17041 if ((un->un_f_is_fibre == TRUE) &&
17049 17042 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
17050 17043 ((pktp->pkt_flags & FLAG_SENSING) == 0)) {
17051 17044 scsi_dmafree(pktp);
17052 17045 xp->xb_pkt_flags |= SD_XB_DMA_FREED;
17053 17046 }
17054 17047 #endif
17055 17048
17056 17049 /*
17057 17050 * The command did not successfully complete as requested so check
17058 17051 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal
17059 17052 * driver command that should not be retried so just return. If
17060 17053 * FLAG_DIAGNOSE is not set the error will be processed below.
17061 17054 */
17062 17055 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) {
17063 17056 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17064 17057 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n");
17065 17058 /*
17066 17059 * Issue a request sense if a check condition caused the error
17067 17060 * (we handle the auto request sense case above), otherwise
17068 17061 * just fail the command.
17069 17062 */
17070 17063 if ((pktp->pkt_reason == CMD_CMPLT) &&
17071 17064 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) {
17072 17065 sd_send_request_sense_command(un, bp, pktp);
17073 17066 } else {
17074 17067 sd_return_failed_command(un, bp, EIO);
17075 17068 }
17076 17069 goto exit;
17077 17070 }
17078 17071
17079 17072 /*
17080 17073 * The command did not successfully complete as requested so process
17081 17074 * the error, retry, and/or attempt recovery.
17082 17075 */
17083 17076 switch (pktp->pkt_reason) {
17084 17077 case CMD_CMPLT:
17085 17078 switch (SD_GET_PKT_STATUS(pktp)) {
17086 17079 case STATUS_GOOD:
17087 17080 /*
17088 17081 * The command completed successfully with a non-zero
17089 17082 * residual
17090 17083 */
17091 17084 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17092 17085 "sdintr: STATUS_GOOD \n");
17093 17086 sd_pkt_status_good(un, bp, xp, pktp);
17094 17087 break;
17095 17088
17096 17089 case STATUS_CHECK:
17097 17090 case STATUS_TERMINATED:
17098 17091 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17099 17092 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n");
17100 17093 sd_pkt_status_check_condition(un, bp, xp, pktp);
17101 17094 break;
17102 17095
17103 17096 case STATUS_BUSY:
17104 17097 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17105 17098 "sdintr: STATUS_BUSY\n");
17106 17099 sd_pkt_status_busy(un, bp, xp, pktp);
17107 17100 break;
17108 17101
17109 17102 case STATUS_RESERVATION_CONFLICT:
17110 17103 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17111 17104 "sdintr: STATUS_RESERVATION_CONFLICT\n");
17112 17105 sd_pkt_status_reservation_conflict(un, bp, xp, pktp);
17113 17106 break;
17114 17107
17115 17108 case STATUS_QFULL:
17116 17109 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17117 17110 "sdintr: STATUS_QFULL\n");
17118 17111 sd_pkt_status_qfull(un, bp, xp, pktp);
17119 17112 break;
17120 17113
17121 17114 case STATUS_MET:
17122 17115 case STATUS_INTERMEDIATE:
17123 17116 case STATUS_SCSI2:
17124 17117 case STATUS_INTERMEDIATE_MET:
17125 17118 case STATUS_ACA_ACTIVE:
17126 17119 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17127 17120 "Unexpected SCSI status received: 0x%x\n",
17128 17121 SD_GET_PKT_STATUS(pktp));
17129 17122 /*
17130 17123 * Mark the ssc_flags when detected invalid status
17131 17124 * code for non-USCSI command.
17132 17125 */
17133 17126 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17134 17127 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS,
17135 17128 0, "stat-code");
17136 17129 }
17137 17130 sd_return_failed_command(un, bp, EIO);
17138 17131 break;
17139 17132
17140 17133 default:
17141 17134 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17142 17135 "Invalid SCSI status received: 0x%x\n",
17143 17136 SD_GET_PKT_STATUS(pktp));
17144 17137 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17145 17138 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS,
17146 17139 0, "stat-code");
17147 17140 }
17148 17141 sd_return_failed_command(un, bp, EIO);
17149 17142 break;
17150 17143
17151 17144 }
17152 17145 break;
17153 17146
17154 17147 case CMD_INCOMPLETE:
17155 17148 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17156 17149 "sdintr: CMD_INCOMPLETE\n");
17157 17150 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp);
17158 17151 break;
17159 17152 case CMD_TRAN_ERR:
17160 17153 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17161 17154 "sdintr: CMD_TRAN_ERR\n");
17162 17155 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp);
17163 17156 break;
17164 17157 case CMD_RESET:
17165 17158 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17166 17159 "sdintr: CMD_RESET \n");
17167 17160 sd_pkt_reason_cmd_reset(un, bp, xp, pktp);
17168 17161 break;
17169 17162 case CMD_ABORTED:
17170 17163 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17171 17164 "sdintr: CMD_ABORTED \n");
17172 17165 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp);
17173 17166 break;
17174 17167 case CMD_TIMEOUT:
17175 17168 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17176 17169 "sdintr: CMD_TIMEOUT\n");
17177 17170 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp);
17178 17171 break;
17179 17172 case CMD_UNX_BUS_FREE:
17180 17173 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17181 17174 "sdintr: CMD_UNX_BUS_FREE \n");
17182 17175 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp);
17183 17176 break;
17184 17177 case CMD_TAG_REJECT:
17185 17178 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17186 17179 "sdintr: CMD_TAG_REJECT\n");
17187 17180 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp);
17188 17181 break;
17189 17182 default:
17190 17183 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17191 17184 "sdintr: default\n");
17192 17185 /*
17193 17186 * Mark the ssc_flags for detecting invliad pkt_reason.
17194 17187 */
17195 17188 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17196 17189 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON,
17197 17190 0, "pkt-reason");
17198 17191 }
17199 17192 sd_pkt_reason_default(un, bp, xp, pktp);
17200 17193 break;
17201 17194 }
17202 17195
17203 17196 exit:
17204 17197 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n");
17205 17198
17206 17199 /* Decrement counter to indicate that the callback routine is done. */
17207 17200 un->un_in_callback--;
17208 17201 ASSERT(un->un_in_callback >= 0);
17209 17202
17210 17203 /*
17211 17204 * At this point, the pkt has been dispatched, ie, it is either
17212 17205 * being re-tried or has been returned to its caller and should
17213 17206 * not be referenced.
17214 17207 */
17215 17208
17216 17209 mutex_exit(SD_MUTEX(un));
17217 17210 }
17218 17211
17219 17212
17220 17213 /*
17221 17214 * Function: sd_print_incomplete_msg
17222 17215 *
17223 17216 * Description: Prints the error message for a CMD_INCOMPLETE error.
17224 17217 *
17225 17218 * Arguments: un - ptr to associated softstate for the device.
17226 17219 * bp - ptr to the buf(9S) for the command.
17227 17220 * arg - message string ptr
17228 17221 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED,
17229 17222 * or SD_NO_RETRY_ISSUED.
17230 17223 *
17231 17224 * Context: May be called under interrupt context
17232 17225 */
17233 17226
17234 17227 static void
17235 17228 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code)
17236 17229 {
17237 17230 struct scsi_pkt *pktp;
17238 17231 char *msgp;
17239 17232 char *cmdp = arg;
17240 17233
17241 17234 ASSERT(un != NULL);
17242 17235 ASSERT(mutex_owned(SD_MUTEX(un)));
17243 17236 ASSERT(bp != NULL);
17244 17237 ASSERT(arg != NULL);
17245 17238 pktp = SD_GET_PKTP(bp);
17246 17239 ASSERT(pktp != NULL);
17247 17240
17248 17241 switch (code) {
17249 17242 case SD_DELAYED_RETRY_ISSUED:
17250 17243 case SD_IMMEDIATE_RETRY_ISSUED:
17251 17244 msgp = "retrying";
17252 17245 break;
17253 17246 case SD_NO_RETRY_ISSUED:
17254 17247 default:
17255 17248 msgp = "giving up";
17256 17249 break;
17257 17250 }
17258 17251
17259 17252 if ((pktp->pkt_flags & FLAG_SILENT) == 0) {
17260 17253 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17261 17254 "incomplete %s- %s\n", cmdp, msgp);
17262 17255 }
17263 17256 }
17264 17257
17265 17258
17266 17259
17267 17260 /*
17268 17261 * Function: sd_pkt_status_good
17269 17262 *
17270 17263 * Description: Processing for a STATUS_GOOD code in pkt_status.
17271 17264 *
17272 17265 * Context: May be called under interrupt context
17273 17266 */
17274 17267
17275 17268 static void
17276 17269 sd_pkt_status_good(struct sd_lun *un, struct buf *bp,
17277 17270 struct sd_xbuf *xp, struct scsi_pkt *pktp)
17278 17271 {
17279 17272 char *cmdp;
17280 17273
17281 17274 ASSERT(un != NULL);
17282 17275 ASSERT(mutex_owned(SD_MUTEX(un)));
17283 17276 ASSERT(bp != NULL);
17284 17277 ASSERT(xp != NULL);
17285 17278 ASSERT(pktp != NULL);
17286 17279 ASSERT(pktp->pkt_reason == CMD_CMPLT);
17287 17280 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD);
17288 17281 ASSERT(pktp->pkt_resid != 0);
17289 17282
17290 17283 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n");
17291 17284
17292 17285 SD_UPDATE_ERRSTATS(un, sd_harderrs);
17293 17286 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) {
17294 17287 case SCMD_READ:
17295 17288 cmdp = "read";
17296 17289 break;
17297 17290 case SCMD_WRITE:
17298 17291 cmdp = "write";
17299 17292 break;
17300 17293 default:
17301 17294 SD_UPDATE_B_RESID(bp, pktp);
17302 17295 sd_return_command(un, bp);
17303 17296 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n");
17304 17297 return;
17305 17298 }
17306 17299
17307 17300 /*
17308 17301 * See if we can retry the read/write, preferrably immediately.
17309 17302 * If retries are exhaused, then sd_retry_command() will update
17310 17303 * the b_resid count.
17311 17304 */
17312 17305 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg,
17313 17306 cmdp, EIO, (clock_t)0, NULL);
17314 17307
17315 17308 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n");
17316 17309 }
17317 17310
17318 17311
17319 17312
17320 17313
17321 17314
17322 17315 /*
17323 17316 * Function: sd_handle_request_sense
17324 17317 *
17325 17318 * Description: Processing for non-auto Request Sense command.
17326 17319 *
17327 17320 * Arguments: un - ptr to associated softstate
17328 17321 * sense_bp - ptr to buf(9S) for the RQS command
17329 17322 * sense_xp - ptr to the sd_xbuf for the RQS command
17330 17323 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command
17331 17324 *
17332 17325 * Context: May be called under interrupt context
17333 17326 */
17334 17327
17335 17328 static void
17336 17329 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp,
17337 17330 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp)
17338 17331 {
17339 17332 struct buf *cmd_bp; /* buf for the original command */
17340 17333 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */
17341 17334 struct scsi_pkt *cmd_pktp; /* pkt for the original command */
17342 17335 size_t actual_len; /* actual sense data length */
17343 17336
17344 17337 ASSERT(un != NULL);
17345 17338 ASSERT(mutex_owned(SD_MUTEX(un)));
17346 17339 ASSERT(sense_bp != NULL);
17347 17340 ASSERT(sense_xp != NULL);
17348 17341 ASSERT(sense_pktp != NULL);
17349 17342
17350 17343 /*
17351 17344 * Note the sense_bp, sense_xp, and sense_pktp here are for the
17352 17345 * RQS command and not the original command.
17353 17346 */
17354 17347 ASSERT(sense_pktp == un->un_rqs_pktp);
17355 17348 ASSERT(sense_bp == un->un_rqs_bp);
17356 17349 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) ==
17357 17350 (FLAG_SENSING | FLAG_HEAD));
17358 17351 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) &
17359 17352 FLAG_SENSING) == FLAG_SENSING);
17360 17353
17361 17354 /* These are the bp, xp, and pktp for the original command */
17362 17355 cmd_bp = sense_xp->xb_sense_bp;
17363 17356 cmd_xp = SD_GET_XBUF(cmd_bp);
17364 17357 cmd_pktp = SD_GET_PKTP(cmd_bp);
17365 17358
17366 17359 if (sense_pktp->pkt_reason != CMD_CMPLT) {
17367 17360 /*
17368 17361 * The REQUEST SENSE command failed. Release the REQUEST
17369 17362 * SENSE command for re-use, get back the bp for the original
17370 17363 * command, and attempt to re-try the original command if
17371 17364 * FLAG_DIAGNOSE is not set in the original packet.
17372 17365 */
17373 17366 SD_UPDATE_ERRSTATS(un, sd_harderrs);
17374 17367 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) {
17375 17368 cmd_bp = sd_mark_rqs_idle(un, sense_xp);
17376 17369 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD,
17377 17370 NULL, NULL, EIO, (clock_t)0, NULL);
17378 17371 return;
17379 17372 }
17380 17373 }
17381 17374
17382 17375 /*
17383 17376 * Save the relevant sense info into the xp for the original cmd.
17384 17377 *
17385 17378 * Note: if the request sense failed the state info will be zero
17386 17379 * as set in sd_mark_rqs_busy()
17387 17380 */
17388 17381 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp);
17389 17382 cmd_xp->xb_sense_state = sense_pktp->pkt_state;
17390 17383 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid;
17391 17384 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) &&
17392 17385 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen >
17393 17386 SENSE_LENGTH)) {
17394 17387 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data,
17395 17388 MAX_SENSE_LENGTH);
17396 17389 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid;
17397 17390 } else {
17398 17391 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data,
17399 17392 SENSE_LENGTH);
17400 17393 if (actual_len < SENSE_LENGTH) {
17401 17394 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len;
17402 17395 } else {
17403 17396 cmd_xp->xb_sense_resid = 0;
17404 17397 }
17405 17398 }
17406 17399
17407 17400 /*
17408 17401 * Free up the RQS command....
17409 17402 * NOTE:
17410 17403 * Must do this BEFORE calling sd_validate_sense_data!
17411 17404 * sd_validate_sense_data may return the original command in
17412 17405 * which case the pkt will be freed and the flags can no
17413 17406 * longer be touched.
17414 17407 * SD_MUTEX is held through this process until the command
17415 17408 * is dispatched based upon the sense data, so there are
17416 17409 * no race conditions.
17417 17410 */
17418 17411 (void) sd_mark_rqs_idle(un, sense_xp);
17419 17412
17420 17413 /*
17421 17414 * For a retryable command see if we have valid sense data, if so then
17422 17415 * turn it over to sd_decode_sense() to figure out the right course of
17423 17416 * action. Just fail a non-retryable command.
17424 17417 */
17425 17418 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) {
17426 17419 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) ==
17427 17420 SD_SENSE_DATA_IS_VALID) {
17428 17421 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp);
17429 17422 }
17430 17423 } else {
17431 17424 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB",
17432 17425 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX);
17433 17426 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data",
17434 17427 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX);
17435 17428 sd_return_failed_command(un, cmd_bp, EIO);
17436 17429 }
17437 17430 }
17438 17431
17439 17432
17440 17433
17441 17434
17442 17435 /*
17443 17436 * Function: sd_handle_auto_request_sense
17444 17437 *
17445 17438 * Description: Processing for auto-request sense information.
17446 17439 *
17447 17440 * Arguments: un - ptr to associated softstate
17448 17441 * bp - ptr to buf(9S) for the command
17449 17442 * xp - ptr to the sd_xbuf for the command
17450 17443 * pktp - ptr to the scsi_pkt(9S) for the command
17451 17444 *
17452 17445 * Context: May be called under interrupt context
17453 17446 */
17454 17447
17455 17448 static void
17456 17449 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp,
17457 17450 struct sd_xbuf *xp, struct scsi_pkt *pktp)
17458 17451 {
17459 17452 struct scsi_arq_status *asp;
17460 17453 size_t actual_len;
17461 17454
17462 17455 ASSERT(un != NULL);
17463 17456 ASSERT(mutex_owned(SD_MUTEX(un)));
17464 17457 ASSERT(bp != NULL);
17465 17458 ASSERT(xp != NULL);
17466 17459 ASSERT(pktp != NULL);
17467 17460 ASSERT(pktp != un->un_rqs_pktp);
17468 17461 ASSERT(bp != un->un_rqs_bp);
17469 17462
17470 17463 /*
17471 17464 * For auto-request sense, we get a scsi_arq_status back from
17472 17465 * the HBA, with the sense data in the sts_sensedata member.
17473 17466 * The pkt_scbp of the packet points to this scsi_arq_status.
17474 17467 */
17475 17468 asp = (struct scsi_arq_status *)(pktp->pkt_scbp);
17476 17469
17477 17470 if (asp->sts_rqpkt_reason != CMD_CMPLT) {
17478 17471 /*
17479 17472 * The auto REQUEST SENSE failed; see if we can re-try
17480 17473 * the original command.
17481 17474 */
17482 17475 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17483 17476 "auto request sense failed (reason=%s)\n",
17484 17477 scsi_rname(asp->sts_rqpkt_reason));
17485 17478
17486 17479 sd_reset_target(un, pktp);
17487 17480
17488 17481 sd_retry_command(un, bp, SD_RETRIES_STANDARD,
17489 17482 NULL, NULL, EIO, (clock_t)0, NULL);
17490 17483 return;
17491 17484 }
17492 17485
17493 17486 /* Save the relevant sense info into the xp for the original cmd. */
17494 17487 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status)));
17495 17488 xp->xb_sense_state = asp->sts_rqpkt_state;
17496 17489 xp->xb_sense_resid = asp->sts_rqpkt_resid;
17497 17490 if (xp->xb_sense_state & STATE_XARQ_DONE) {
17498 17491 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid;
17499 17492 bcopy(&asp->sts_sensedata, xp->xb_sense_data,
17500 17493 MAX_SENSE_LENGTH);
17501 17494 } else {
17502 17495 if (xp->xb_sense_resid > SENSE_LENGTH) {
17503 17496 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid;
17504 17497 } else {
17505 17498 actual_len = SENSE_LENGTH - xp->xb_sense_resid;
17506 17499 }
17507 17500 if (xp->xb_pkt_flags & SD_XB_USCSICMD) {
17508 17501 if ((((struct uscsi_cmd *)
17509 17502 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) {
17510 17503 xp->xb_sense_resid = (((struct uscsi_cmd *)
17511 17504 (xp->xb_pktinfo))->uscsi_rqlen) -
17512 17505 actual_len;
17513 17506 } else {
17514 17507 xp->xb_sense_resid = 0;
17515 17508 }
17516 17509 }
17517 17510 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH);
17518 17511 }
17519 17512
17520 17513 /*
17521 17514 * See if we have valid sense data, if so then turn it over to
17522 17515 * sd_decode_sense() to figure out the right course of action.
17523 17516 */
17524 17517 if (sd_validate_sense_data(un, bp, xp, actual_len) ==
17525 17518 SD_SENSE_DATA_IS_VALID) {
17526 17519 sd_decode_sense(un, bp, xp, pktp);
17527 17520 }
17528 17521 }
17529 17522
17530 17523
17531 17524 /*
17532 17525 * Function: sd_print_sense_failed_msg
17533 17526 *
17534 17527 * Description: Print log message when RQS has failed.
17535 17528 *
17536 17529 * Arguments: un - ptr to associated softstate
17537 17530 * bp - ptr to buf(9S) for the command
17538 17531 * arg - generic message string ptr
17539 17532 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
17540 17533 * or SD_NO_RETRY_ISSUED
17541 17534 *
17542 17535 * Context: May be called from interrupt context
17543 17536 */
17544 17537
17545 17538 static void
17546 17539 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg,
17547 17540 int code)
17548 17541 {
17549 17542 char *msgp = arg;
17550 17543
17551 17544 ASSERT(un != NULL);
17552 17545 ASSERT(mutex_owned(SD_MUTEX(un)));
17553 17546 ASSERT(bp != NULL);
17554 17547
17555 17548 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) {
17556 17549 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp);
17557 17550 }
17558 17551 }
17559 17552
17560 17553
17561 17554 /*
17562 17555 * Function: sd_validate_sense_data
17563 17556 *
17564 17557 * Description: Check the given sense data for validity.
17565 17558 * If the sense data is not valid, the command will
17566 17559 * be either failed or retried!
17567 17560 *
17568 17561 * Return Code: SD_SENSE_DATA_IS_INVALID
17569 17562 * SD_SENSE_DATA_IS_VALID
17570 17563 *
17571 17564 * Context: May be called from interrupt context
17572 17565 */
17573 17566
17574 17567 static int
17575 17568 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
17576 17569 size_t actual_len)
17577 17570 {
17578 17571 struct scsi_extended_sense *esp;
17579 17572 struct scsi_pkt *pktp;
17580 17573 char *msgp = NULL;
17581 17574 sd_ssc_t *sscp;
17582 17575
17583 17576 ASSERT(un != NULL);
17584 17577 ASSERT(mutex_owned(SD_MUTEX(un)));
17585 17578 ASSERT(bp != NULL);
17586 17579 ASSERT(bp != un->un_rqs_bp);
17587 17580 ASSERT(xp != NULL);
17588 17581 ASSERT(un->un_fm_private != NULL);
17589 17582
17590 17583 pktp = SD_GET_PKTP(bp);
17591 17584 ASSERT(pktp != NULL);
17592 17585
17593 17586 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc;
17594 17587 ASSERT(sscp != NULL);
17595 17588
17596 17589 /*
17597 17590 * Check the status of the RQS command (auto or manual).
17598 17591 */
17599 17592 switch (xp->xb_sense_status & STATUS_MASK) {
17600 17593 case STATUS_GOOD:
17601 17594 break;
17602 17595
17603 17596 case STATUS_RESERVATION_CONFLICT:
17604 17597 sd_pkt_status_reservation_conflict(un, bp, xp, pktp);
17605 17598 return (SD_SENSE_DATA_IS_INVALID);
17606 17599
17607 17600 case STATUS_BUSY:
17608 17601 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17609 17602 "Busy Status on REQUEST SENSE\n");
17610 17603 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL,
17611 17604 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter);
17612 17605 return (SD_SENSE_DATA_IS_INVALID);
17613 17606
17614 17607 case STATUS_QFULL:
17615 17608 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17616 17609 "QFULL Status on REQUEST SENSE\n");
17617 17610 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL,
17618 17611 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter);
17619 17612 return (SD_SENSE_DATA_IS_INVALID);
17620 17613
17621 17614 case STATUS_CHECK:
17622 17615 case STATUS_TERMINATED:
17623 17616 msgp = "Check Condition on REQUEST SENSE\n";
17624 17617 goto sense_failed;
17625 17618
17626 17619 default:
17627 17620 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n";
17628 17621 goto sense_failed;
17629 17622 }
17630 17623
17631 17624 /*
17632 17625 * See if we got the minimum required amount of sense data.
17633 17626 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes
17634 17627 * or less.
17635 17628 */
17636 17629 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) ||
17637 17630 (actual_len == 0)) {
17638 17631 msgp = "Request Sense couldn't get sense data\n";
17639 17632 goto sense_failed;
17640 17633 }
17641 17634
17642 17635 if (actual_len < SUN_MIN_SENSE_LENGTH) {
17643 17636 msgp = "Not enough sense information\n";
17644 17637 /* Mark the ssc_flags for detecting invalid sense data */
17645 17638 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17646 17639 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0,
17647 17640 "sense-data");
17648 17641 }
17649 17642 goto sense_failed;
17650 17643 }
17651 17644
17652 17645 /*
17653 17646 * We require the extended sense data
17654 17647 */
17655 17648 esp = (struct scsi_extended_sense *)xp->xb_sense_data;
17656 17649 if (esp->es_class != CLASS_EXTENDED_SENSE) {
17657 17650 if ((pktp->pkt_flags & FLAG_SILENT) == 0) {
17658 17651 static char tmp[8];
17659 17652 static char buf[148];
17660 17653 char *p = (char *)(xp->xb_sense_data);
17661 17654 int i;
17662 17655
17663 17656 mutex_enter(&sd_sense_mutex);
17664 17657 (void) strcpy(buf, "undecodable sense information:");
17665 17658 for (i = 0; i < actual_len; i++) {
17666 17659 (void) sprintf(tmp, " 0x%x", *(p++)&0xff);
17667 17660 (void) strcpy(&buf[strlen(buf)], tmp);
17668 17661 }
17669 17662 i = strlen(buf);
17670 17663 (void) strcpy(&buf[i], "-(assumed fatal)\n");
17671 17664
17672 17665 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) {
17673 17666 scsi_log(SD_DEVINFO(un), sd_label,
17674 17667 CE_WARN, buf);
17675 17668 }
17676 17669 mutex_exit(&sd_sense_mutex);
17677 17670 }
17678 17671
17679 17672 /* Mark the ssc_flags for detecting invalid sense data */
17680 17673 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17681 17674 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0,
17682 17675 "sense-data");
17683 17676 }
17684 17677
17685 17678 /* Note: Legacy behavior, fail the command with no retry */
17686 17679 sd_return_failed_command(un, bp, EIO);
17687 17680 return (SD_SENSE_DATA_IS_INVALID);
17688 17681 }
17689 17682
17690 17683 /*
17691 17684 * Check that es_code is valid (es_class concatenated with es_code
17692 17685 * make up the "response code" field. es_class will always be 7, so
17693 17686 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the
17694 17687 * format.
17695 17688 */
17696 17689 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) &&
17697 17690 (esp->es_code != CODE_FMT_FIXED_DEFERRED) &&
17698 17691 (esp->es_code != CODE_FMT_DESCR_CURRENT) &&
17699 17692 (esp->es_code != CODE_FMT_DESCR_DEFERRED) &&
17700 17693 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) {
17701 17694 /* Mark the ssc_flags for detecting invalid sense data */
17702 17695 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17703 17696 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0,
17704 17697 "sense-data");
17705 17698 }
17706 17699 goto sense_failed;
17707 17700 }
17708 17701
17709 17702 return (SD_SENSE_DATA_IS_VALID);
17710 17703
17711 17704 sense_failed:
17712 17705 /*
17713 17706 * If the request sense failed (for whatever reason), attempt
17714 17707 * to retry the original command.
17715 17708 */
17716 17709 #if defined(__i386) || defined(__amd64)
17717 17710 /*
17718 17711 * SD_RETRY_DELAY is conditionally compile (#if fibre) in
17719 17712 * sddef.h for Sparc platform, and x86 uses 1 binary
17720 17713 * for both SCSI/FC.
17721 17714 * The SD_RETRY_DELAY value need to be adjusted here
17722 17715 * when SD_RETRY_DELAY change in sddef.h
17723 17716 */
17724 17717 sd_retry_command(un, bp, SD_RETRIES_STANDARD,
17725 17718 sd_print_sense_failed_msg, msgp, EIO,
17726 17719 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL);
17727 17720 #else
17728 17721 sd_retry_command(un, bp, SD_RETRIES_STANDARD,
17729 17722 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL);
17730 17723 #endif
17731 17724
17732 17725 return (SD_SENSE_DATA_IS_INVALID);
17733 17726 }
17734 17727
17735 17728 /*
17736 17729 * Function: sd_decode_sense
17737 17730 *
17738 17731 * Description: Take recovery action(s) when SCSI Sense Data is received.
17739 17732 *
17740 17733 * Context: Interrupt context.
17741 17734 */
17742 17735
17743 17736 static void
17744 17737 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
17745 17738 struct scsi_pkt *pktp)
17746 17739 {
17747 17740 uint8_t sense_key;
17748 17741
17749 17742 ASSERT(un != NULL);
17750 17743 ASSERT(mutex_owned(SD_MUTEX(un)));
17751 17744 ASSERT(bp != NULL);
17752 17745 ASSERT(bp != un->un_rqs_bp);
17753 17746 ASSERT(xp != NULL);
17754 17747 ASSERT(pktp != NULL);
17755 17748
17756 17749 sense_key = scsi_sense_key(xp->xb_sense_data);
17757 17750
17758 17751 switch (sense_key) {
17759 17752 case KEY_NO_SENSE:
17760 17753 sd_sense_key_no_sense(un, bp, xp, pktp);
17761 17754 break;
17762 17755 case KEY_RECOVERABLE_ERROR:
17763 17756 sd_sense_key_recoverable_error(un, xp->xb_sense_data,
17764 17757 bp, xp, pktp);
17765 17758 break;
17766 17759 case KEY_NOT_READY:
17767 17760 sd_sense_key_not_ready(un, xp->xb_sense_data,
17768 17761 bp, xp, pktp);
17769 17762 break;
17770 17763 case KEY_MEDIUM_ERROR:
17771 17764 case KEY_HARDWARE_ERROR:
17772 17765 sd_sense_key_medium_or_hardware_error(un,
17773 17766 xp->xb_sense_data, bp, xp, pktp);
17774 17767 break;
17775 17768 case KEY_ILLEGAL_REQUEST:
17776 17769 sd_sense_key_illegal_request(un, bp, xp, pktp);
17777 17770 break;
17778 17771 case KEY_UNIT_ATTENTION:
17779 17772 sd_sense_key_unit_attention(un, xp->xb_sense_data,
17780 17773 bp, xp, pktp);
17781 17774 break;
17782 17775 case KEY_WRITE_PROTECT:
17783 17776 case KEY_VOLUME_OVERFLOW:
17784 17777 case KEY_MISCOMPARE:
17785 17778 sd_sense_key_fail_command(un, bp, xp, pktp);
17786 17779 break;
17787 17780 case KEY_BLANK_CHECK:
17788 17781 sd_sense_key_blank_check(un, bp, xp, pktp);
17789 17782 break;
17790 17783 case KEY_ABORTED_COMMAND:
17791 17784 sd_sense_key_aborted_command(un, bp, xp, pktp);
17792 17785 break;
17793 17786 case KEY_VENDOR_UNIQUE:
17794 17787 case KEY_COPY_ABORTED:
17795 17788 case KEY_EQUAL:
17796 17789 case KEY_RESERVED:
17797 17790 default:
17798 17791 sd_sense_key_default(un, xp->xb_sense_data,
17799 17792 bp, xp, pktp);
17800 17793 break;
17801 17794 }
17802 17795 }
17803 17796
17804 17797
17805 17798 /*
17806 17799 * Function: sd_dump_memory
17807 17800 *
17808 17801 * Description: Debug logging routine to print the contents of a user provided
17809 17802 * buffer. The output of the buffer is broken up into 256 byte
17810 17803 * segments due to a size constraint of the scsi_log.
17811 17804 * implementation.
17812 17805 *
17813 17806 * Arguments: un - ptr to softstate
17814 17807 * comp - component mask
17815 17808 * title - "title" string to preceed data when printed
17816 17809 * data - ptr to data block to be printed
17817 17810 * len - size of data block to be printed
17818 17811 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c)
17819 17812 *
17820 17813 * Context: May be called from interrupt context
17821 17814 */
17822 17815
17823 17816 #define SD_DUMP_MEMORY_BUF_SIZE 256
17824 17817
17825 17818 static char *sd_dump_format_string[] = {
17826 17819 " 0x%02x",
17827 17820 " %c"
17828 17821 };
17829 17822
17830 17823 static void
17831 17824 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data,
17832 17825 int len, int fmt)
17833 17826 {
17834 17827 int i, j;
17835 17828 int avail_count;
17836 17829 int start_offset;
17837 17830 int end_offset;
17838 17831 size_t entry_len;
17839 17832 char *bufp;
17840 17833 char *local_buf;
17841 17834 char *format_string;
17842 17835
17843 17836 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR));
17844 17837
17845 17838 /*
17846 17839 * In the debug version of the driver, this function is called from a
17847 17840 * number of places which are NOPs in the release driver.
17848 17841 * The debug driver therefore has additional methods of filtering
17849 17842 * debug output.
17850 17843 */
17851 17844 #ifdef SDDEBUG
17852 17845 /*
17853 17846 * In the debug version of the driver we can reduce the amount of debug
17854 17847 * messages by setting sd_error_level to something other than
17855 17848 * SCSI_ERR_ALL and clearing bits in sd_level_mask and
17856 17849 * sd_component_mask.
17857 17850 */
17858 17851 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) ||
17859 17852 (sd_error_level != SCSI_ERR_ALL)) {
17860 17853 return;
17861 17854 }
17862 17855 if (((sd_component_mask & comp) == 0) ||
17863 17856 (sd_error_level != SCSI_ERR_ALL)) {
17864 17857 return;
17865 17858 }
17866 17859 #else
17867 17860 if (sd_error_level != SCSI_ERR_ALL) {
17868 17861 return;
17869 17862 }
17870 17863 #endif
17871 17864
17872 17865 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP);
17873 17866 bufp = local_buf;
17874 17867 /*
17875 17868 * Available length is the length of local_buf[], minus the
17876 17869 * length of the title string, minus one for the ":", minus
17877 17870 * one for the newline, minus one for the NULL terminator.
17878 17871 * This gives the #bytes available for holding the printed
17879 17872 * values from the given data buffer.
17880 17873 */
17881 17874 if (fmt == SD_LOG_HEX) {
17882 17875 format_string = sd_dump_format_string[0];
17883 17876 } else /* SD_LOG_CHAR */ {
17884 17877 format_string = sd_dump_format_string[1];
17885 17878 }
17886 17879 /*
17887 17880 * Available count is the number of elements from the given
17888 17881 * data buffer that we can fit into the available length.
17889 17882 * This is based upon the size of the format string used.
17890 17883 * Make one entry and find it's size.
17891 17884 */
17892 17885 (void) sprintf(bufp, format_string, data[0]);
17893 17886 entry_len = strlen(bufp);
17894 17887 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len;
17895 17888
17896 17889 j = 0;
17897 17890 while (j < len) {
17898 17891 bufp = local_buf;
17899 17892 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE);
17900 17893 start_offset = j;
17901 17894
17902 17895 end_offset = start_offset + avail_count;
17903 17896
17904 17897 (void) sprintf(bufp, "%s:", title);
17905 17898 bufp += strlen(bufp);
17906 17899 for (i = start_offset; ((i < end_offset) && (j < len));
17907 17900 i++, j++) {
17908 17901 (void) sprintf(bufp, format_string, data[i]);
17909 17902 bufp += entry_len;
17910 17903 }
17911 17904 (void) sprintf(bufp, "\n");
17912 17905
17913 17906 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf);
17914 17907 }
17915 17908 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE);
17916 17909 }
17917 17910
17918 17911 /*
17919 17912 * Function: sd_print_sense_msg
17920 17913 *
17921 17914 * Description: Log a message based upon the given sense data.
17922 17915 *
17923 17916 * Arguments: un - ptr to associated softstate
17924 17917 * bp - ptr to buf(9S) for the command
17925 17918 * arg - ptr to associate sd_sense_info struct
17926 17919 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
17927 17920 * or SD_NO_RETRY_ISSUED
17928 17921 *
17929 17922 * Context: May be called from interrupt context
17930 17923 */
17931 17924
17932 17925 static void
17933 17926 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code)
17934 17927 {
17935 17928 struct sd_xbuf *xp;
17936 17929 struct scsi_pkt *pktp;
17937 17930 uint8_t *sensep;
17938 17931 daddr_t request_blkno;
17939 17932 diskaddr_t err_blkno;
17940 17933 int severity;
17941 17934 int pfa_flag;
17942 17935 extern struct scsi_key_strings scsi_cmds[];
17943 17936
17944 17937 ASSERT(un != NULL);
17945 17938 ASSERT(mutex_owned(SD_MUTEX(un)));
17946 17939 ASSERT(bp != NULL);
17947 17940 xp = SD_GET_XBUF(bp);
17948 17941 ASSERT(xp != NULL);
17949 17942 pktp = SD_GET_PKTP(bp);
17950 17943 ASSERT(pktp != NULL);
17951 17944 ASSERT(arg != NULL);
17952 17945
17953 17946 severity = ((struct sd_sense_info *)(arg))->ssi_severity;
17954 17947 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag;
17955 17948
17956 17949 if ((code == SD_DELAYED_RETRY_ISSUED) ||
17957 17950 (code == SD_IMMEDIATE_RETRY_ISSUED)) {
17958 17951 severity = SCSI_ERR_RETRYABLE;
17959 17952 }
17960 17953
17961 17954 /* Use absolute block number for the request block number */
17962 17955 request_blkno = xp->xb_blkno;
17963 17956
17964 17957 /*
17965 17958 * Now try to get the error block number from the sense data
17966 17959 */
17967 17960 sensep = xp->xb_sense_data;
17968 17961
17969 17962 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH,
17970 17963 (uint64_t *)&err_blkno)) {
17971 17964 /*
17972 17965 * We retrieved the error block number from the information
17973 17966 * portion of the sense data.
17974 17967 *
17975 17968 * For USCSI commands we are better off using the error
17976 17969 * block no. as the requested block no. (This is the best
17977 17970 * we can estimate.)
17978 17971 */
17979 17972 if ((SD_IS_BUFIO(xp) == FALSE) &&
17980 17973 ((pktp->pkt_flags & FLAG_SILENT) == 0)) {
17981 17974 request_blkno = err_blkno;
17982 17975 }
17983 17976 } else {
17984 17977 /*
17985 17978 * Without the es_valid bit set (for fixed format) or an
17986 17979 * information descriptor (for descriptor format) we cannot
17987 17980 * be certain of the error blkno, so just use the
17988 17981 * request_blkno.
17989 17982 */
17990 17983 err_blkno = (diskaddr_t)request_blkno;
17991 17984 }
17992 17985
17993 17986 /*
17994 17987 * The following will log the buffer contents for the release driver
17995 17988 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error
17996 17989 * level is set to verbose.
17997 17990 */
17998 17991 sd_dump_memory(un, SD_LOG_IO, "Failed CDB",
17999 17992 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX);
18000 17993 sd_dump_memory(un, SD_LOG_IO, "Sense Data",
18001 17994 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX);
18002 17995
18003 17996 if (pfa_flag == FALSE) {
18004 17997 /* This is normally only set for USCSI */
18005 17998 if ((pktp->pkt_flags & FLAG_SILENT) != 0) {
18006 17999 return;
18007 18000 }
18008 18001
18009 18002 if ((SD_IS_BUFIO(xp) == TRUE) &&
18010 18003 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) &&
18011 18004 (severity < sd_error_level))) {
18012 18005 return;
18013 18006 }
18014 18007 }
18015 18008 /*
18016 18009 * Check for Sonoma Failover and keep a count of how many failed I/O's
18017 18010 */
18018 18011 if ((SD_IS_LSI(un)) &&
18019 18012 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) &&
18020 18013 (scsi_sense_asc(sensep) == 0x94) &&
18021 18014 (scsi_sense_ascq(sensep) == 0x01)) {
18022 18015 un->un_sonoma_failure_count++;
18023 18016 if (un->un_sonoma_failure_count > 1) {
18024 18017 return;
18025 18018 }
18026 18019 }
18027 18020
18028 18021 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP ||
18029 18022 ((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) &&
18030 18023 (pktp->pkt_resid == 0))) {
18031 18024 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity,
18032 18025 request_blkno, err_blkno, scsi_cmds,
18033 18026 (struct scsi_extended_sense *)sensep,
18034 18027 un->un_additional_codes, NULL);
18035 18028 }
18036 18029 }
18037 18030
18038 18031 /*
18039 18032 * Function: sd_sense_key_no_sense
18040 18033 *
18041 18034 * Description: Recovery action when sense data was not received.
18042 18035 *
18043 18036 * Context: May be called from interrupt context
18044 18037 */
18045 18038
18046 18039 static void
18047 18040 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
18048 18041 struct scsi_pkt *pktp)
18049 18042 {
18050 18043 struct sd_sense_info si;
18051 18044
18052 18045 ASSERT(un != NULL);
18053 18046 ASSERT(mutex_owned(SD_MUTEX(un)));
18054 18047 ASSERT(bp != NULL);
18055 18048 ASSERT(xp != NULL);
18056 18049 ASSERT(pktp != NULL);
18057 18050
18058 18051 si.ssi_severity = SCSI_ERR_FATAL;
18059 18052 si.ssi_pfa_flag = FALSE;
18060 18053
18061 18054 SD_UPDATE_ERRSTATS(un, sd_softerrs);
18062 18055
18063 18056 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18064 18057 &si, EIO, (clock_t)0, NULL);
18065 18058 }
18066 18059
18067 18060
18068 18061 /*
18069 18062 * Function: sd_sense_key_recoverable_error
18070 18063 *
18071 18064 * Description: Recovery actions for a SCSI "Recovered Error" sense key.
18072 18065 *
18073 18066 * Context: May be called from interrupt context
18074 18067 */
18075 18068
18076 18069 static void
18077 18070 sd_sense_key_recoverable_error(struct sd_lun *un, uint8_t *sense_datap,
18078 18071 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18079 18072 {
18080 18073 struct sd_sense_info si;
18081 18074 uint8_t asc = scsi_sense_asc(sense_datap);
18082 18075 uint8_t ascq = scsi_sense_ascq(sense_datap);
18083 18076
18084 18077 ASSERT(un != NULL);
18085 18078 ASSERT(mutex_owned(SD_MUTEX(un)));
18086 18079 ASSERT(bp != NULL);
18087 18080 ASSERT(xp != NULL);
18088 18081 ASSERT(pktp != NULL);
18089 18082
18090 18083 /*
18091 18084 * 0x00, 0x1D: ATA PASSTHROUGH INFORMATION AVAILABLE
18092 18085 */
18093 18086 if (asc == 0x00 && ascq == 0x1D) {
18094 18087 sd_return_command(un, bp);
18095 18088 return;
18096 18089 }
18097 18090
18098 18091 /*
18099 18092 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED
18100 18093 */
18101 18094 if ((asc == 0x5D) && (sd_report_pfa != 0)) {
18102 18095 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err);
18103 18096 si.ssi_severity = SCSI_ERR_INFO;
18104 18097 si.ssi_pfa_flag = TRUE;
18105 18098 } else {
18106 18099 SD_UPDATE_ERRSTATS(un, sd_softerrs);
18107 18100 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err);
18108 18101 si.ssi_severity = SCSI_ERR_RECOVERED;
18109 18102 si.ssi_pfa_flag = FALSE;
18110 18103 }
18111 18104
18112 18105 if (pktp->pkt_resid == 0) {
18113 18106 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18114 18107 sd_return_command(un, bp);
18115 18108 return;
18116 18109 }
18117 18110
18118 18111 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18119 18112 &si, EIO, (clock_t)0, NULL);
18120 18113 }
18121 18114
18122 18115
18123 18116
18124 18117
18125 18118 /*
18126 18119 * Function: sd_sense_key_not_ready
18127 18120 *
18128 18121 * Description: Recovery actions for a SCSI "Not Ready" sense key.
18129 18122 *
18130 18123 * Context: May be called from interrupt context
18131 18124 */
18132 18125
18133 18126 static void
18134 18127 sd_sense_key_not_ready(struct sd_lun *un, uint8_t *sense_datap, struct buf *bp,
18135 18128 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18136 18129 {
18137 18130 struct sd_sense_info si;
18138 18131 uint8_t asc = scsi_sense_asc(sense_datap);
18139 18132 uint8_t ascq = scsi_sense_ascq(sense_datap);
18140 18133
18141 18134 ASSERT(un != NULL);
18142 18135 ASSERT(mutex_owned(SD_MUTEX(un)));
18143 18136 ASSERT(bp != NULL);
18144 18137 ASSERT(xp != NULL);
18145 18138 ASSERT(pktp != NULL);
18146 18139
18147 18140 si.ssi_severity = SCSI_ERR_FATAL;
18148 18141 si.ssi_pfa_flag = FALSE;
18149 18142
18150 18143 /*
18151 18144 * Update error stats after first NOT READY error. Disks may have
18152 18145 * been powered down and may need to be restarted. For CDROMs,
18153 18146 * report NOT READY errors only if media is present.
18154 18147 */
18155 18148 if ((ISCD(un) && (asc == 0x3A)) ||
18156 18149 (xp->xb_nr_retry_count > 0)) {
18157 18150 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18158 18151 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err);
18159 18152 }
18160 18153
18161 18154 /*
18162 18155 * Just fail if the "not ready" retry limit has been reached.
18163 18156 */
18164 18157 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) {
18165 18158 /* Special check for error message printing for removables. */
18166 18159 if (un->un_f_has_removable_media && (asc == 0x04) &&
18167 18160 (ascq >= 0x04)) {
18168 18161 si.ssi_severity = SCSI_ERR_ALL;
18169 18162 }
18170 18163 goto fail_command;
18171 18164 }
18172 18165
18173 18166 /*
18174 18167 * Check the ASC and ASCQ in the sense data as needed, to determine
18175 18168 * what to do.
18176 18169 */
18177 18170 switch (asc) {
18178 18171 case 0x04: /* LOGICAL UNIT NOT READY */
18179 18172 /*
18180 18173 * disk drives that don't spin up result in a very long delay
18181 18174 * in format without warning messages. We will log a message
18182 18175 * if the error level is set to verbose.
18183 18176 */
18184 18177 if (sd_error_level < SCSI_ERR_RETRYABLE) {
18185 18178 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18186 18179 "logical unit not ready, resetting disk\n");
18187 18180 }
18188 18181
18189 18182 /*
18190 18183 * There are different requirements for CDROMs and disks for
18191 18184 * the number of retries. If a CD-ROM is giving this, it is
18192 18185 * probably reading TOC and is in the process of getting
18193 18186 * ready, so we should keep on trying for a long time to make
18194 18187 * sure that all types of media are taken in account (for
18195 18188 * some media the drive takes a long time to read TOC). For
18196 18189 * disks we do not want to retry this too many times as this
18197 18190 * can cause a long hang in format when the drive refuses to
18198 18191 * spin up (a very common failure).
18199 18192 */
18200 18193 switch (ascq) {
18201 18194 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */
18202 18195 /*
18203 18196 * Disk drives frequently refuse to spin up which
18204 18197 * results in a very long hang in format without
18205 18198 * warning messages.
18206 18199 *
18207 18200 * Note: This code preserves the legacy behavior of
18208 18201 * comparing xb_nr_retry_count against zero for fibre
18209 18202 * channel targets instead of comparing against the
18210 18203 * un_reset_retry_count value. The reason for this
18211 18204 * discrepancy has been so utterly lost beneath the
18212 18205 * Sands of Time that even Indiana Jones could not
18213 18206 * find it.
18214 18207 */
18215 18208 if (un->un_f_is_fibre == TRUE) {
18216 18209 if (((sd_level_mask & SD_LOGMASK_DIAG) ||
18217 18210 (xp->xb_nr_retry_count > 0)) &&
18218 18211 (un->un_startstop_timeid == NULL)) {
18219 18212 scsi_log(SD_DEVINFO(un), sd_label,
18220 18213 CE_WARN, "logical unit not ready, "
18221 18214 "resetting disk\n");
18222 18215 sd_reset_target(un, pktp);
18223 18216 }
18224 18217 } else {
18225 18218 if (((sd_level_mask & SD_LOGMASK_DIAG) ||
18226 18219 (xp->xb_nr_retry_count >
18227 18220 un->un_reset_retry_count)) &&
18228 18221 (un->un_startstop_timeid == NULL)) {
18229 18222 scsi_log(SD_DEVINFO(un), sd_label,
18230 18223 CE_WARN, "logical unit not ready, "
18231 18224 "resetting disk\n");
18232 18225 sd_reset_target(un, pktp);
18233 18226 }
18234 18227 }
18235 18228 break;
18236 18229
18237 18230 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */
18238 18231 /*
18239 18232 * If the target is in the process of becoming
18240 18233 * ready, just proceed with the retry. This can
18241 18234 * happen with CD-ROMs that take a long time to
18242 18235 * read TOC after a power cycle or reset.
18243 18236 */
18244 18237 goto do_retry;
18245 18238
18246 18239 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */
18247 18240 break;
18248 18241
18249 18242 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */
18250 18243 /*
18251 18244 * Retries cannot help here so just fail right away.
18252 18245 */
18253 18246 goto fail_command;
18254 18247
18255 18248 case 0x88:
18256 18249 /*
18257 18250 * Vendor-unique code for T3/T4: it indicates a
18258 18251 * path problem in a mutipathed config, but as far as
18259 18252 * the target driver is concerned it equates to a fatal
18260 18253 * error, so we should just fail the command right away
18261 18254 * (without printing anything to the console). If this
18262 18255 * is not a T3/T4, fall thru to the default recovery
18263 18256 * action.
18264 18257 * T3/T4 is FC only, don't need to check is_fibre
18265 18258 */
18266 18259 if (SD_IS_T3(un) || SD_IS_T4(un)) {
18267 18260 sd_return_failed_command(un, bp, EIO);
18268 18261 return;
18269 18262 }
18270 18263 /* FALLTHRU */
18271 18264
18272 18265 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */
18273 18266 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */
18274 18267 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */
18275 18268 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */
18276 18269 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */
18277 18270 default: /* Possible future codes in SCSI spec? */
18278 18271 /*
18279 18272 * For removable-media devices, do not retry if
18280 18273 * ASCQ > 2 as these result mostly from USCSI commands
18281 18274 * on MMC devices issued to check status of an
18282 18275 * operation initiated in immediate mode. Also for
18283 18276 * ASCQ >= 4 do not print console messages as these
18284 18277 * mainly represent a user-initiated operation
18285 18278 * instead of a system failure.
18286 18279 */
18287 18280 if (un->un_f_has_removable_media) {
18288 18281 si.ssi_severity = SCSI_ERR_ALL;
18289 18282 goto fail_command;
18290 18283 }
18291 18284 break;
18292 18285 }
18293 18286
18294 18287 /*
18295 18288 * As part of our recovery attempt for the NOT READY
18296 18289 * condition, we issue a START STOP UNIT command. However
18297 18290 * we want to wait for a short delay before attempting this
18298 18291 * as there may still be more commands coming back from the
18299 18292 * target with the check condition. To do this we use
18300 18293 * timeout(9F) to call sd_start_stop_unit_callback() after
18301 18294 * the delay interval expires. (sd_start_stop_unit_callback()
18302 18295 * dispatches sd_start_stop_unit_task(), which will issue
18303 18296 * the actual START STOP UNIT command. The delay interval
18304 18297 * is one-half of the delay that we will use to retry the
18305 18298 * command that generated the NOT READY condition.
18306 18299 *
18307 18300 * Note that we could just dispatch sd_start_stop_unit_task()
18308 18301 * from here and allow it to sleep for the delay interval,
18309 18302 * but then we would be tying up the taskq thread
18310 18303 * uncesessarily for the duration of the delay.
18311 18304 *
18312 18305 * Do not issue the START STOP UNIT if the current command
18313 18306 * is already a START STOP UNIT.
18314 18307 */
18315 18308 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) {
18316 18309 break;
18317 18310 }
18318 18311
18319 18312 /*
18320 18313 * Do not schedule the timeout if one is already pending.
18321 18314 */
18322 18315 if (un->un_startstop_timeid != NULL) {
18323 18316 SD_INFO(SD_LOG_ERROR, un,
18324 18317 "sd_sense_key_not_ready: restart already issued to"
18325 18318 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)),
18326 18319 ddi_get_instance(SD_DEVINFO(un)));
18327 18320 break;
18328 18321 }
18329 18322
18330 18323 /*
18331 18324 * Schedule the START STOP UNIT command, then queue the command
18332 18325 * for a retry.
18333 18326 *
18334 18327 * Note: A timeout is not scheduled for this retry because we
18335 18328 * want the retry to be serial with the START_STOP_UNIT. The
18336 18329 * retry will be started when the START_STOP_UNIT is completed
18337 18330 * in sd_start_stop_unit_task.
18338 18331 */
18339 18332 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback,
18340 18333 un, un->un_busy_timeout / 2);
18341 18334 xp->xb_nr_retry_count++;
18342 18335 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter);
18343 18336 return;
18344 18337
18345 18338 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */
18346 18339 if (sd_error_level < SCSI_ERR_RETRYABLE) {
18347 18340 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18348 18341 "unit does not respond to selection\n");
18349 18342 }
18350 18343 break;
18351 18344
18352 18345 case 0x3A: /* MEDIUM NOT PRESENT */
18353 18346 if (sd_error_level >= SCSI_ERR_FATAL) {
18354 18347 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18355 18348 "Caddy not inserted in drive\n");
18356 18349 }
18357 18350
18358 18351 sr_ejected(un);
18359 18352 un->un_mediastate = DKIO_EJECTED;
18360 18353 /* The state has changed, inform the media watch routines */
18361 18354 cv_broadcast(&un->un_state_cv);
18362 18355 /* Just fail if no media is present in the drive. */
18363 18356 goto fail_command;
18364 18357
18365 18358 default:
18366 18359 if (sd_error_level < SCSI_ERR_RETRYABLE) {
18367 18360 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE,
18368 18361 "Unit not Ready. Additional sense code 0x%x\n",
18369 18362 asc);
18370 18363 }
18371 18364 break;
18372 18365 }
18373 18366
18374 18367 do_retry:
18375 18368
18376 18369 /*
18377 18370 * Retry the command, as some targets may report NOT READY for
18378 18371 * several seconds after being reset.
18379 18372 */
18380 18373 xp->xb_nr_retry_count++;
18381 18374 si.ssi_severity = SCSI_ERR_RETRYABLE;
18382 18375 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg,
18383 18376 &si, EIO, un->un_busy_timeout, NULL);
18384 18377
18385 18378 return;
18386 18379
18387 18380 fail_command:
18388 18381 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18389 18382 sd_return_failed_command(un, bp, EIO);
18390 18383 }
18391 18384
18392 18385
18393 18386
18394 18387 /*
18395 18388 * Function: sd_sense_key_medium_or_hardware_error
18396 18389 *
18397 18390 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error"
18398 18391 * sense key.
18399 18392 *
18400 18393 * Context: May be called from interrupt context
18401 18394 */
18402 18395
18403 18396 static void
18404 18397 sd_sense_key_medium_or_hardware_error(struct sd_lun *un, uint8_t *sense_datap,
18405 18398 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18406 18399 {
18407 18400 struct sd_sense_info si;
18408 18401 uint8_t sense_key = scsi_sense_key(sense_datap);
18409 18402 uint8_t asc = scsi_sense_asc(sense_datap);
18410 18403
18411 18404 ASSERT(un != NULL);
18412 18405 ASSERT(mutex_owned(SD_MUTEX(un)));
18413 18406 ASSERT(bp != NULL);
18414 18407 ASSERT(xp != NULL);
18415 18408 ASSERT(pktp != NULL);
18416 18409
18417 18410 si.ssi_severity = SCSI_ERR_FATAL;
18418 18411 si.ssi_pfa_flag = FALSE;
18419 18412
18420 18413 if (sense_key == KEY_MEDIUM_ERROR) {
18421 18414 SD_UPDATE_ERRSTATS(un, sd_rq_media_err);
18422 18415 }
18423 18416
18424 18417 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18425 18418
18426 18419 if ((un->un_reset_retry_count != 0) &&
18427 18420 (xp->xb_retry_count == un->un_reset_retry_count)) {
18428 18421 mutex_exit(SD_MUTEX(un));
18429 18422 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */
18430 18423 if (un->un_f_allow_bus_device_reset == TRUE) {
18431 18424
18432 18425 boolean_t try_resetting_target = B_TRUE;
18433 18426
18434 18427 /*
18435 18428 * We need to be able to handle specific ASC when we are
18436 18429 * handling a KEY_HARDWARE_ERROR. In particular
18437 18430 * taking the default action of resetting the target may
18438 18431 * not be the appropriate way to attempt recovery.
18439 18432 * Resetting a target because of a single LUN failure
18440 18433 * victimizes all LUNs on that target.
18441 18434 *
18442 18435 * This is true for the LSI arrays, if an LSI
18443 18436 * array controller returns an ASC of 0x84 (LUN Dead) we
18444 18437 * should trust it.
18445 18438 */
18446 18439
18447 18440 if (sense_key == KEY_HARDWARE_ERROR) {
18448 18441 switch (asc) {
18449 18442 case 0x84:
18450 18443 if (SD_IS_LSI(un)) {
18451 18444 try_resetting_target = B_FALSE;
18452 18445 }
18453 18446 break;
18454 18447 default:
18455 18448 break;
18456 18449 }
18457 18450 }
18458 18451
18459 18452 if (try_resetting_target == B_TRUE) {
18460 18453 int reset_retval = 0;
18461 18454 if (un->un_f_lun_reset_enabled == TRUE) {
18462 18455 SD_TRACE(SD_LOG_IO_CORE, un,
18463 18456 "sd_sense_key_medium_or_hardware_"
18464 18457 "error: issuing RESET_LUN\n");
18465 18458 reset_retval =
18466 18459 scsi_reset(SD_ADDRESS(un),
18467 18460 RESET_LUN);
18468 18461 }
18469 18462 if (reset_retval == 0) {
18470 18463 SD_TRACE(SD_LOG_IO_CORE, un,
18471 18464 "sd_sense_key_medium_or_hardware_"
18472 18465 "error: issuing RESET_TARGET\n");
18473 18466 (void) scsi_reset(SD_ADDRESS(un),
18474 18467 RESET_TARGET);
18475 18468 }
18476 18469 }
18477 18470 }
18478 18471 mutex_enter(SD_MUTEX(un));
18479 18472 }
18480 18473
18481 18474 /*
18482 18475 * This really ought to be a fatal error, but we will retry anyway
18483 18476 * as some drives report this as a spurious error.
18484 18477 */
18485 18478 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18486 18479 &si, EIO, (clock_t)0, NULL);
18487 18480 }
18488 18481
18489 18482
18490 18483
18491 18484 /*
18492 18485 * Function: sd_sense_key_illegal_request
18493 18486 *
18494 18487 * Description: Recovery actions for a SCSI "Illegal Request" sense key.
18495 18488 *
18496 18489 * Context: May be called from interrupt context
18497 18490 */
18498 18491
18499 18492 static void
18500 18493 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp,
18501 18494 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18502 18495 {
18503 18496 struct sd_sense_info si;
18504 18497
18505 18498 ASSERT(un != NULL);
18506 18499 ASSERT(mutex_owned(SD_MUTEX(un)));
18507 18500 ASSERT(bp != NULL);
18508 18501 ASSERT(xp != NULL);
18509 18502 ASSERT(pktp != NULL);
18510 18503
18511 18504 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err);
18512 18505
18513 18506 si.ssi_severity = SCSI_ERR_INFO;
18514 18507 si.ssi_pfa_flag = FALSE;
18515 18508
18516 18509 /* Pointless to retry if the target thinks it's an illegal request */
18517 18510 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18518 18511 sd_return_failed_command(un, bp, EIO);
18519 18512 }
18520 18513
18521 18514
18522 18515
18523 18516
18524 18517 /*
18525 18518 * Function: sd_sense_key_unit_attention
18526 18519 *
18527 18520 * Description: Recovery actions for a SCSI "Unit Attention" sense key.
18528 18521 *
18529 18522 * Context: May be called from interrupt context
18530 18523 */
18531 18524
18532 18525 static void
18533 18526 sd_sense_key_unit_attention(struct sd_lun *un, uint8_t *sense_datap,
18534 18527 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18535 18528 {
18536 18529 /*
18537 18530 * For UNIT ATTENTION we allow retries for one minute. Devices
18538 18531 * like Sonoma can return UNIT ATTENTION close to a minute
18539 18532 * under certain conditions.
18540 18533 */
18541 18534 int retry_check_flag = SD_RETRIES_UA;
18542 18535 boolean_t kstat_updated = B_FALSE;
18543 18536 struct sd_sense_info si;
18544 18537 uint8_t asc = scsi_sense_asc(sense_datap);
18545 18538 uint8_t ascq = scsi_sense_ascq(sense_datap);
18546 18539
18547 18540 ASSERT(un != NULL);
18548 18541 ASSERT(mutex_owned(SD_MUTEX(un)));
18549 18542 ASSERT(bp != NULL);
18550 18543 ASSERT(xp != NULL);
18551 18544 ASSERT(pktp != NULL);
18552 18545
18553 18546 si.ssi_severity = SCSI_ERR_INFO;
18554 18547 si.ssi_pfa_flag = FALSE;
18555 18548
18556 18549
18557 18550 switch (asc) {
18558 18551 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */
18559 18552 if (sd_report_pfa != 0) {
18560 18553 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err);
18561 18554 si.ssi_pfa_flag = TRUE;
18562 18555 retry_check_flag = SD_RETRIES_STANDARD;
18563 18556 goto do_retry;
18564 18557 }
18565 18558
18566 18559 break;
18567 18560
18568 18561 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */
18569 18562 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
18570 18563 un->un_resvd_status |=
18571 18564 (SD_LOST_RESERVE | SD_WANT_RESERVE);
18572 18565 }
18573 18566 #ifdef _LP64
18574 18567 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) {
18575 18568 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task,
18576 18569 un, KM_NOSLEEP) == 0) {
18577 18570 /*
18578 18571 * If we can't dispatch the task we'll just
18579 18572 * live without descriptor sense. We can
18580 18573 * try again on the next "unit attention"
18581 18574 */
18582 18575 SD_ERROR(SD_LOG_ERROR, un,
18583 18576 "sd_sense_key_unit_attention: "
18584 18577 "Could not dispatch "
18585 18578 "sd_reenable_dsense_task\n");
18586 18579 }
18587 18580 }
18588 18581 #endif /* _LP64 */
18589 18582 /* FALLTHRU */
18590 18583
18591 18584 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */
18592 18585 if (!un->un_f_has_removable_media) {
18593 18586 break;
18594 18587 }
18595 18588
18596 18589 /*
18597 18590 * When we get a unit attention from a removable-media device,
18598 18591 * it may be in a state that will take a long time to recover
18599 18592 * (e.g., from a reset). Since we are executing in interrupt
18600 18593 * context here, we cannot wait around for the device to come
18601 18594 * back. So hand this command off to sd_media_change_task()
18602 18595 * for deferred processing under taskq thread context. (Note
18603 18596 * that the command still may be failed if a problem is
18604 18597 * encountered at a later time.)
18605 18598 */
18606 18599 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp,
18607 18600 KM_NOSLEEP) == 0) {
18608 18601 /*
18609 18602 * Cannot dispatch the request so fail the command.
18610 18603 */
18611 18604 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18612 18605 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err);
18613 18606 si.ssi_severity = SCSI_ERR_FATAL;
18614 18607 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18615 18608 sd_return_failed_command(un, bp, EIO);
18616 18609 }
18617 18610
18618 18611 /*
18619 18612 * If failed to dispatch sd_media_change_task(), we already
18620 18613 * updated kstat. If succeed to dispatch sd_media_change_task(),
18621 18614 * we should update kstat later if it encounters an error. So,
18622 18615 * we update kstat_updated flag here.
18623 18616 */
18624 18617 kstat_updated = B_TRUE;
18625 18618
18626 18619 /*
18627 18620 * Either the command has been successfully dispatched to a
18628 18621 * task Q for retrying, or the dispatch failed. In either case
18629 18622 * do NOT retry again by calling sd_retry_command. This sets up
18630 18623 * two retries of the same command and when one completes and
18631 18624 * frees the resources the other will access freed memory,
18632 18625 * a bad thing.
18633 18626 */
18634 18627 return;
18635 18628
18636 18629 default:
18637 18630 break;
18638 18631 }
18639 18632
18640 18633 /*
18641 18634 * ASC ASCQ
18642 18635 * 2A 09 Capacity data has changed
18643 18636 * 2A 01 Mode parameters changed
18644 18637 * 3F 0E Reported luns data has changed
18645 18638 * Arrays that support logical unit expansion should report
18646 18639 * capacity changes(2Ah/09). Mode parameters changed and
18647 18640 * reported luns data has changed are the approximation.
18648 18641 */
18649 18642 if (((asc == 0x2a) && (ascq == 0x09)) ||
18650 18643 ((asc == 0x2a) && (ascq == 0x01)) ||
18651 18644 ((asc == 0x3f) && (ascq == 0x0e))) {
18652 18645 if (taskq_dispatch(sd_tq, sd_target_change_task, un,
18653 18646 KM_NOSLEEP) == 0) {
18654 18647 SD_ERROR(SD_LOG_ERROR, un,
18655 18648 "sd_sense_key_unit_attention: "
18656 18649 "Could not dispatch sd_target_change_task\n");
18657 18650 }
18658 18651 }
18659 18652
18660 18653 /*
18661 18654 * Update kstat if we haven't done that.
18662 18655 */
18663 18656 if (!kstat_updated) {
18664 18657 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18665 18658 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err);
18666 18659 }
18667 18660
18668 18661 do_retry:
18669 18662 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si,
18670 18663 EIO, SD_UA_RETRY_DELAY, NULL);
18671 18664 }
18672 18665
18673 18666
18674 18667
18675 18668 /*
18676 18669 * Function: sd_sense_key_fail_command
18677 18670 *
18678 18671 * Description: Use to fail a command when we don't like the sense key that
18679 18672 * was returned.
18680 18673 *
18681 18674 * Context: May be called from interrupt context
18682 18675 */
18683 18676
18684 18677 static void
18685 18678 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
18686 18679 struct scsi_pkt *pktp)
18687 18680 {
18688 18681 struct sd_sense_info si;
18689 18682
18690 18683 ASSERT(un != NULL);
18691 18684 ASSERT(mutex_owned(SD_MUTEX(un)));
18692 18685 ASSERT(bp != NULL);
18693 18686 ASSERT(xp != NULL);
18694 18687 ASSERT(pktp != NULL);
18695 18688
18696 18689 si.ssi_severity = SCSI_ERR_FATAL;
18697 18690 si.ssi_pfa_flag = FALSE;
18698 18691
18699 18692 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18700 18693 sd_return_failed_command(un, bp, EIO);
18701 18694 }
18702 18695
18703 18696
18704 18697
18705 18698 /*
18706 18699 * Function: sd_sense_key_blank_check
18707 18700 *
18708 18701 * Description: Recovery actions for a SCSI "Blank Check" sense key.
18709 18702 * Has no monetary connotation.
18710 18703 *
18711 18704 * Context: May be called from interrupt context
18712 18705 */
18713 18706
18714 18707 static void
18715 18708 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
18716 18709 struct scsi_pkt *pktp)
18717 18710 {
18718 18711 struct sd_sense_info si;
18719 18712
18720 18713 ASSERT(un != NULL);
18721 18714 ASSERT(mutex_owned(SD_MUTEX(un)));
18722 18715 ASSERT(bp != NULL);
18723 18716 ASSERT(xp != NULL);
18724 18717 ASSERT(pktp != NULL);
18725 18718
18726 18719 /*
18727 18720 * Blank check is not fatal for removable devices, therefore
18728 18721 * it does not require a console message.
18729 18722 */
18730 18723 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL :
18731 18724 SCSI_ERR_FATAL;
18732 18725 si.ssi_pfa_flag = FALSE;
18733 18726
18734 18727 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18735 18728 sd_return_failed_command(un, bp, EIO);
18736 18729 }
18737 18730
18738 18731
18739 18732
18740 18733
18741 18734 /*
18742 18735 * Function: sd_sense_key_aborted_command
18743 18736 *
18744 18737 * Description: Recovery actions for a SCSI "Aborted Command" sense key.
18745 18738 *
18746 18739 * Context: May be called from interrupt context
18747 18740 */
18748 18741
18749 18742 static void
18750 18743 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp,
18751 18744 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18752 18745 {
18753 18746 struct sd_sense_info si;
18754 18747
18755 18748 ASSERT(un != NULL);
18756 18749 ASSERT(mutex_owned(SD_MUTEX(un)));
18757 18750 ASSERT(bp != NULL);
18758 18751 ASSERT(xp != NULL);
18759 18752 ASSERT(pktp != NULL);
18760 18753
18761 18754 si.ssi_severity = SCSI_ERR_FATAL;
18762 18755 si.ssi_pfa_flag = FALSE;
18763 18756
18764 18757 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18765 18758
18766 18759 /*
18767 18760 * This really ought to be a fatal error, but we will retry anyway
18768 18761 * as some drives report this as a spurious error.
18769 18762 */
18770 18763 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18771 18764 &si, EIO, drv_usectohz(100000), NULL);
18772 18765 }
18773 18766
18774 18767
18775 18768
18776 18769 /*
18777 18770 * Function: sd_sense_key_default
18778 18771 *
18779 18772 * Description: Default recovery action for several SCSI sense keys (basically
18780 18773 * attempts a retry).
18781 18774 *
18782 18775 * Context: May be called from interrupt context
18783 18776 */
18784 18777
18785 18778 static void
18786 18779 sd_sense_key_default(struct sd_lun *un, uint8_t *sense_datap, struct buf *bp,
18787 18780 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18788 18781 {
18789 18782 struct sd_sense_info si;
18790 18783 uint8_t sense_key = scsi_sense_key(sense_datap);
18791 18784
18792 18785 ASSERT(un != NULL);
18793 18786 ASSERT(mutex_owned(SD_MUTEX(un)));
18794 18787 ASSERT(bp != NULL);
18795 18788 ASSERT(xp != NULL);
18796 18789 ASSERT(pktp != NULL);
18797 18790
18798 18791 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18799 18792
18800 18793 /*
18801 18794 * Undecoded sense key. Attempt retries and hope that will fix
18802 18795 * the problem. Otherwise, we're dead.
18803 18796 */
18804 18797 if ((pktp->pkt_flags & FLAG_SILENT) == 0) {
18805 18798 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18806 18799 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]);
18807 18800 }
18808 18801
18809 18802 si.ssi_severity = SCSI_ERR_FATAL;
18810 18803 si.ssi_pfa_flag = FALSE;
18811 18804
18812 18805 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18813 18806 &si, EIO, (clock_t)0, NULL);
18814 18807 }
18815 18808
18816 18809
18817 18810
18818 18811 /*
18819 18812 * Function: sd_print_retry_msg
18820 18813 *
18821 18814 * Description: Print a message indicating the retry action being taken.
18822 18815 *
18823 18816 * Arguments: un - ptr to associated softstate
18824 18817 * bp - ptr to buf(9S) for the command
18825 18818 * arg - not used.
18826 18819 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
18827 18820 * or SD_NO_RETRY_ISSUED
18828 18821 *
18829 18822 * Context: May be called from interrupt context
18830 18823 */
18831 18824 /* ARGSUSED */
18832 18825 static void
18833 18826 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag)
18834 18827 {
18835 18828 struct sd_xbuf *xp;
18836 18829 struct scsi_pkt *pktp;
18837 18830 char *reasonp;
18838 18831 char *msgp;
18839 18832
18840 18833 ASSERT(un != NULL);
18841 18834 ASSERT(mutex_owned(SD_MUTEX(un)));
18842 18835 ASSERT(bp != NULL);
18843 18836 pktp = SD_GET_PKTP(bp);
18844 18837 ASSERT(pktp != NULL);
18845 18838 xp = SD_GET_XBUF(bp);
18846 18839 ASSERT(xp != NULL);
18847 18840
18848 18841 ASSERT(!mutex_owned(&un->un_pm_mutex));
18849 18842 mutex_enter(&un->un_pm_mutex);
18850 18843 if ((un->un_state == SD_STATE_SUSPENDED) ||
18851 18844 (SD_DEVICE_IS_IN_LOW_POWER(un)) ||
18852 18845 (pktp->pkt_flags & FLAG_SILENT)) {
18853 18846 mutex_exit(&un->un_pm_mutex);
18854 18847 goto update_pkt_reason;
18855 18848 }
18856 18849 mutex_exit(&un->un_pm_mutex);
18857 18850
18858 18851 /*
18859 18852 * Suppress messages if they are all the same pkt_reason; with
18860 18853 * TQ, many (up to 256) are returned with the same pkt_reason.
18861 18854 * If we are in panic, then suppress the retry messages.
18862 18855 */
18863 18856 switch (flag) {
18864 18857 case SD_NO_RETRY_ISSUED:
18865 18858 msgp = "giving up";
18866 18859 break;
18867 18860 case SD_IMMEDIATE_RETRY_ISSUED:
18868 18861 case SD_DELAYED_RETRY_ISSUED:
18869 18862 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) ||
18870 18863 ((pktp->pkt_reason == un->un_last_pkt_reason) &&
18871 18864 (sd_error_level != SCSI_ERR_ALL))) {
18872 18865 return;
18873 18866 }
18874 18867 msgp = "retrying command";
18875 18868 break;
18876 18869 default:
18877 18870 goto update_pkt_reason;
18878 18871 }
18879 18872
18880 18873 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" :
18881 18874 scsi_rname(pktp->pkt_reason));
18882 18875
18883 18876 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) {
18884 18877 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18885 18878 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp);
18886 18879 }
18887 18880
18888 18881 update_pkt_reason:
18889 18882 /*
18890 18883 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason.
18891 18884 * This is to prevent multiple console messages for the same failure
18892 18885 * condition. Note that un->un_last_pkt_reason is NOT restored if &
18893 18886 * when the command is retried successfully because there still may be
18894 18887 * more commands coming back with the same value of pktp->pkt_reason.
18895 18888 */
18896 18889 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) {
18897 18890 un->un_last_pkt_reason = pktp->pkt_reason;
18898 18891 }
18899 18892 }
18900 18893
18901 18894
18902 18895 /*
18903 18896 * Function: sd_print_cmd_incomplete_msg
18904 18897 *
18905 18898 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason.
18906 18899 *
18907 18900 * Arguments: un - ptr to associated softstate
18908 18901 * bp - ptr to buf(9S) for the command
18909 18902 * arg - passed to sd_print_retry_msg()
18910 18903 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
18911 18904 * or SD_NO_RETRY_ISSUED
18912 18905 *
18913 18906 * Context: May be called from interrupt context
18914 18907 */
18915 18908
18916 18909 static void
18917 18910 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg,
18918 18911 int code)
18919 18912 {
18920 18913 dev_info_t *dip;
18921 18914
18922 18915 ASSERT(un != NULL);
18923 18916 ASSERT(mutex_owned(SD_MUTEX(un)));
18924 18917 ASSERT(bp != NULL);
18925 18918
18926 18919 switch (code) {
18927 18920 case SD_NO_RETRY_ISSUED:
18928 18921 /* Command was failed. Someone turned off this target? */
18929 18922 if (un->un_state != SD_STATE_OFFLINE) {
18930 18923 /*
18931 18924 * Suppress message if we are detaching and
18932 18925 * device has been disconnected
18933 18926 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation
18934 18927 * private interface and not part of the DDI
18935 18928 */
18936 18929 dip = un->un_sd->sd_dev;
18937 18930 if (!(DEVI_IS_DETACHING(dip) &&
18938 18931 DEVI_IS_DEVICE_REMOVED(dip))) {
18939 18932 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18940 18933 "disk not responding to selection\n");
18941 18934 }
18942 18935 New_state(un, SD_STATE_OFFLINE);
18943 18936 }
18944 18937 break;
18945 18938
18946 18939 case SD_DELAYED_RETRY_ISSUED:
18947 18940 case SD_IMMEDIATE_RETRY_ISSUED:
18948 18941 default:
18949 18942 /* Command was successfully queued for retry */
18950 18943 sd_print_retry_msg(un, bp, arg, code);
18951 18944 break;
18952 18945 }
18953 18946 }
18954 18947
18955 18948
18956 18949 /*
18957 18950 * Function: sd_pkt_reason_cmd_incomplete
18958 18951 *
18959 18952 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason.
18960 18953 *
18961 18954 * Context: May be called from interrupt context
18962 18955 */
18963 18956
18964 18957 static void
18965 18958 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp,
18966 18959 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18967 18960 {
18968 18961 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE;
18969 18962
18970 18963 ASSERT(un != NULL);
18971 18964 ASSERT(mutex_owned(SD_MUTEX(un)));
18972 18965 ASSERT(bp != NULL);
18973 18966 ASSERT(xp != NULL);
18974 18967 ASSERT(pktp != NULL);
18975 18968
18976 18969 /* Do not do a reset if selection did not complete */
18977 18970 /* Note: Should this not just check the bit? */
18978 18971 if (pktp->pkt_state != STATE_GOT_BUS) {
18979 18972 SD_UPDATE_ERRSTATS(un, sd_transerrs);
18980 18973 sd_reset_target(un, pktp);
18981 18974 }
18982 18975
18983 18976 /*
18984 18977 * If the target was not successfully selected, then set
18985 18978 * SD_RETRIES_FAILFAST to indicate that we lost communication
18986 18979 * with the target, and further retries and/or commands are
18987 18980 * likely to take a long time.
18988 18981 */
18989 18982 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) {
18990 18983 flag |= SD_RETRIES_FAILFAST;
18991 18984 }
18992 18985
18993 18986 SD_UPDATE_RESERVATION_STATUS(un, pktp);
18994 18987
18995 18988 sd_retry_command(un, bp, flag,
18996 18989 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
18997 18990 }
18998 18991
18999 18992
19000 18993
19001 18994 /*
19002 18995 * Function: sd_pkt_reason_cmd_tran_err
19003 18996 *
19004 18997 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason.
19005 18998 *
19006 18999 * Context: May be called from interrupt context
19007 19000 */
19008 19001
19009 19002 static void
19010 19003 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp,
19011 19004 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19012 19005 {
19013 19006 ASSERT(un != NULL);
19014 19007 ASSERT(mutex_owned(SD_MUTEX(un)));
19015 19008 ASSERT(bp != NULL);
19016 19009 ASSERT(xp != NULL);
19017 19010 ASSERT(pktp != NULL);
19018 19011
19019 19012 /*
19020 19013 * Do not reset if we got a parity error, or if
19021 19014 * selection did not complete.
19022 19015 */
19023 19016 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19024 19017 /* Note: Should this not just check the bit for pkt_state? */
19025 19018 if (((pktp->pkt_statistics & STAT_PERR) == 0) &&
19026 19019 (pktp->pkt_state != STATE_GOT_BUS)) {
19027 19020 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19028 19021 sd_reset_target(un, pktp);
19029 19022 }
19030 19023
19031 19024 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19032 19025
19033 19026 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE),
19034 19027 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19035 19028 }
19036 19029
19037 19030
19038 19031
19039 19032 /*
19040 19033 * Function: sd_pkt_reason_cmd_reset
19041 19034 *
19042 19035 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason.
19043 19036 *
19044 19037 * Context: May be called from interrupt context
19045 19038 */
19046 19039
19047 19040 static void
19048 19041 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
19049 19042 struct scsi_pkt *pktp)
19050 19043 {
19051 19044 ASSERT(un != NULL);
19052 19045 ASSERT(mutex_owned(SD_MUTEX(un)));
19053 19046 ASSERT(bp != NULL);
19054 19047 ASSERT(xp != NULL);
19055 19048 ASSERT(pktp != NULL);
19056 19049
19057 19050 /* The target may still be running the command, so try to reset. */
19058 19051 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19059 19052 sd_reset_target(un, pktp);
19060 19053
19061 19054 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19062 19055
19063 19056 /*
19064 19057 * If pkt_reason is CMD_RESET chances are that this pkt got
19065 19058 * reset because another target on this bus caused it. The target
19066 19059 * that caused it should get CMD_TIMEOUT with pkt_statistics
19067 19060 * of STAT_TIMEOUT/STAT_DEV_RESET.
19068 19061 */
19069 19062
19070 19063 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE),
19071 19064 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19072 19065 }
19073 19066
19074 19067
19075 19068
19076 19069
19077 19070 /*
19078 19071 * Function: sd_pkt_reason_cmd_aborted
19079 19072 *
19080 19073 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason.
19081 19074 *
19082 19075 * Context: May be called from interrupt context
19083 19076 */
19084 19077
19085 19078 static void
19086 19079 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
19087 19080 struct scsi_pkt *pktp)
19088 19081 {
19089 19082 ASSERT(un != NULL);
19090 19083 ASSERT(mutex_owned(SD_MUTEX(un)));
19091 19084 ASSERT(bp != NULL);
19092 19085 ASSERT(xp != NULL);
19093 19086 ASSERT(pktp != NULL);
19094 19087
19095 19088 /* The target may still be running the command, so try to reset. */
19096 19089 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19097 19090 sd_reset_target(un, pktp);
19098 19091
19099 19092 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19100 19093
19101 19094 /*
19102 19095 * If pkt_reason is CMD_ABORTED chances are that this pkt got
19103 19096 * aborted because another target on this bus caused it. The target
19104 19097 * that caused it should get CMD_TIMEOUT with pkt_statistics
19105 19098 * of STAT_TIMEOUT/STAT_DEV_RESET.
19106 19099 */
19107 19100
19108 19101 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE),
19109 19102 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19110 19103 }
19111 19104
19112 19105
19113 19106
19114 19107 /*
19115 19108 * Function: sd_pkt_reason_cmd_timeout
19116 19109 *
19117 19110 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason.
19118 19111 *
19119 19112 * Context: May be called from interrupt context
19120 19113 */
19121 19114
19122 19115 static void
19123 19116 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
19124 19117 struct scsi_pkt *pktp)
19125 19118 {
19126 19119 ASSERT(un != NULL);
19127 19120 ASSERT(mutex_owned(SD_MUTEX(un)));
19128 19121 ASSERT(bp != NULL);
19129 19122 ASSERT(xp != NULL);
19130 19123 ASSERT(pktp != NULL);
19131 19124
19132 19125
19133 19126 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19134 19127 sd_reset_target(un, pktp);
19135 19128
19136 19129 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19137 19130
19138 19131 /*
19139 19132 * A command timeout indicates that we could not establish
19140 19133 * communication with the target, so set SD_RETRIES_FAILFAST
19141 19134 * as further retries/commands are likely to take a long time.
19142 19135 */
19143 19136 sd_retry_command(un, bp,
19144 19137 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST),
19145 19138 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19146 19139 }
19147 19140
19148 19141
19149 19142
19150 19143 /*
19151 19144 * Function: sd_pkt_reason_cmd_unx_bus_free
19152 19145 *
19153 19146 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason.
19154 19147 *
19155 19148 * Context: May be called from interrupt context
19156 19149 */
19157 19150
19158 19151 static void
19159 19152 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp,
19160 19153 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19161 19154 {
19162 19155 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code);
19163 19156
19164 19157 ASSERT(un != NULL);
19165 19158 ASSERT(mutex_owned(SD_MUTEX(un)));
19166 19159 ASSERT(bp != NULL);
19167 19160 ASSERT(xp != NULL);
19168 19161 ASSERT(pktp != NULL);
19169 19162
19170 19163 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19171 19164 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19172 19165
19173 19166 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ?
19174 19167 sd_print_retry_msg : NULL;
19175 19168
19176 19169 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE),
19177 19170 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19178 19171 }
19179 19172
19180 19173
19181 19174 /*
19182 19175 * Function: sd_pkt_reason_cmd_tag_reject
19183 19176 *
19184 19177 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason.
19185 19178 *
19186 19179 * Context: May be called from interrupt context
19187 19180 */
19188 19181
19189 19182 static void
19190 19183 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp,
19191 19184 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19192 19185 {
19193 19186 ASSERT(un != NULL);
19194 19187 ASSERT(mutex_owned(SD_MUTEX(un)));
19195 19188 ASSERT(bp != NULL);
19196 19189 ASSERT(xp != NULL);
19197 19190 ASSERT(pktp != NULL);
19198 19191
19199 19192 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19200 19193 pktp->pkt_flags = 0;
19201 19194 un->un_tagflags = 0;
19202 19195 if (un->un_f_opt_queueing == TRUE) {
19203 19196 un->un_throttle = min(un->un_throttle, 3);
19204 19197 } else {
19205 19198 un->un_throttle = 1;
19206 19199 }
19207 19200 mutex_exit(SD_MUTEX(un));
19208 19201 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
19209 19202 mutex_enter(SD_MUTEX(un));
19210 19203
19211 19204 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19212 19205
19213 19206 /* Legacy behavior not to check retry counts here. */
19214 19207 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE),
19215 19208 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19216 19209 }
19217 19210
19218 19211
19219 19212 /*
19220 19213 * Function: sd_pkt_reason_default
19221 19214 *
19222 19215 * Description: Default recovery actions for SCSA pkt_reason values that
19223 19216 * do not have more explicit recovery actions.
19224 19217 *
19225 19218 * Context: May be called from interrupt context
19226 19219 */
19227 19220
19228 19221 static void
19229 19222 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
19230 19223 struct scsi_pkt *pktp)
19231 19224 {
19232 19225 ASSERT(un != NULL);
19233 19226 ASSERT(mutex_owned(SD_MUTEX(un)));
19234 19227 ASSERT(bp != NULL);
19235 19228 ASSERT(xp != NULL);
19236 19229 ASSERT(pktp != NULL);
19237 19230
19238 19231 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19239 19232 sd_reset_target(un, pktp);
19240 19233
19241 19234 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19242 19235
19243 19236 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE),
19244 19237 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19245 19238 }
19246 19239
19247 19240
19248 19241
19249 19242 /*
19250 19243 * Function: sd_pkt_status_check_condition
19251 19244 *
19252 19245 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status.
19253 19246 *
19254 19247 * Context: May be called from interrupt context
19255 19248 */
19256 19249
19257 19250 static void
19258 19251 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp,
19259 19252 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19260 19253 {
19261 19254 ASSERT(un != NULL);
19262 19255 ASSERT(mutex_owned(SD_MUTEX(un)));
19263 19256 ASSERT(bp != NULL);
19264 19257 ASSERT(xp != NULL);
19265 19258 ASSERT(pktp != NULL);
19266 19259
19267 19260 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: "
19268 19261 "entry: buf:0x%p xp:0x%p\n", bp, xp);
19269 19262
19270 19263 /*
19271 19264 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the
19272 19265 * command will be retried after the request sense). Otherwise, retry
19273 19266 * the command. Note: we are issuing the request sense even though the
19274 19267 * retry limit may have been reached for the failed command.
19275 19268 */
19276 19269 if (un->un_f_arq_enabled == FALSE) {
19277 19270 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: "
19278 19271 "no ARQ, sending request sense command\n");
19279 19272 sd_send_request_sense_command(un, bp, pktp);
19280 19273 } else {
19281 19274 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: "
19282 19275 "ARQ,retrying request sense command\n");
19283 19276 #if defined(__i386) || defined(__amd64)
19284 19277 /*
19285 19278 * The SD_RETRY_DELAY value need to be adjusted here
19286 19279 * when SD_RETRY_DELAY change in sddef.h
19287 19280 */
19288 19281 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO,
19289 19282 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0,
19290 19283 NULL);
19291 19284 #else
19292 19285 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL,
19293 19286 EIO, SD_RETRY_DELAY, NULL);
19294 19287 #endif
19295 19288 }
19296 19289
19297 19290 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n");
19298 19291 }
19299 19292
19300 19293
19301 19294 /*
19302 19295 * Function: sd_pkt_status_busy
19303 19296 *
19304 19297 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status.
19305 19298 *
19306 19299 * Context: May be called from interrupt context
19307 19300 */
19308 19301
19309 19302 static void
19310 19303 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
19311 19304 struct scsi_pkt *pktp)
19312 19305 {
19313 19306 ASSERT(un != NULL);
19314 19307 ASSERT(mutex_owned(SD_MUTEX(un)));
19315 19308 ASSERT(bp != NULL);
19316 19309 ASSERT(xp != NULL);
19317 19310 ASSERT(pktp != NULL);
19318 19311
19319 19312 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19320 19313 "sd_pkt_status_busy: entry\n");
19321 19314
19322 19315 /* If retries are exhausted, just fail the command. */
19323 19316 if (xp->xb_retry_count >= un->un_busy_retry_count) {
19324 19317 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
19325 19318 "device busy too long\n");
19326 19319 sd_return_failed_command(un, bp, EIO);
19327 19320 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19328 19321 "sd_pkt_status_busy: exit\n");
19329 19322 return;
19330 19323 }
19331 19324 xp->xb_retry_count++;
19332 19325
19333 19326 /*
19334 19327 * Try to reset the target. However, we do not want to perform
19335 19328 * more than one reset if the device continues to fail. The reset
19336 19329 * will be performed when the retry count reaches the reset
19337 19330 * threshold. This threshold should be set such that at least
19338 19331 * one retry is issued before the reset is performed.
19339 19332 */
19340 19333 if (xp->xb_retry_count ==
19341 19334 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) {
19342 19335 int rval = 0;
19343 19336 mutex_exit(SD_MUTEX(un));
19344 19337 if (un->un_f_allow_bus_device_reset == TRUE) {
19345 19338 /*
19346 19339 * First try to reset the LUN; if we cannot then
19347 19340 * try to reset the target.
19348 19341 */
19349 19342 if (un->un_f_lun_reset_enabled == TRUE) {
19350 19343 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19351 19344 "sd_pkt_status_busy: RESET_LUN\n");
19352 19345 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN);
19353 19346 }
19354 19347 if (rval == 0) {
19355 19348 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19356 19349 "sd_pkt_status_busy: RESET_TARGET\n");
19357 19350 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET);
19358 19351 }
19359 19352 }
19360 19353 if (rval == 0) {
19361 19354 /*
19362 19355 * If the RESET_LUN and/or RESET_TARGET failed,
19363 19356 * try RESET_ALL
19364 19357 */
19365 19358 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19366 19359 "sd_pkt_status_busy: RESET_ALL\n");
19367 19360 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL);
19368 19361 }
19369 19362 mutex_enter(SD_MUTEX(un));
19370 19363 if (rval == 0) {
19371 19364 /*
19372 19365 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed.
19373 19366 * At this point we give up & fail the command.
19374 19367 */
19375 19368 sd_return_failed_command(un, bp, EIO);
19376 19369 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19377 19370 "sd_pkt_status_busy: exit (failed cmd)\n");
19378 19371 return;
19379 19372 }
19380 19373 }
19381 19374
19382 19375 /*
19383 19376 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as
19384 19377 * we have already checked the retry counts above.
19385 19378 */
19386 19379 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL,
19387 19380 EIO, un->un_busy_timeout, NULL);
19388 19381
19389 19382 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19390 19383 "sd_pkt_status_busy: exit\n");
19391 19384 }
19392 19385
19393 19386
19394 19387 /*
19395 19388 * Function: sd_pkt_status_reservation_conflict
19396 19389 *
19397 19390 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI
19398 19391 * command status.
19399 19392 *
19400 19393 * Context: May be called from interrupt context
19401 19394 */
19402 19395
19403 19396 static void
19404 19397 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp,
19405 19398 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19406 19399 {
19407 19400 ASSERT(un != NULL);
19408 19401 ASSERT(mutex_owned(SD_MUTEX(un)));
19409 19402 ASSERT(bp != NULL);
19410 19403 ASSERT(xp != NULL);
19411 19404 ASSERT(pktp != NULL);
19412 19405
19413 19406 /*
19414 19407 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation
19415 19408 * conflict could be due to various reasons like incorrect keys, not
19416 19409 * registered or not reserved etc. So, we return EACCES to the caller.
19417 19410 */
19418 19411 if (un->un_reservation_type == SD_SCSI3_RESERVATION) {
19419 19412 int cmd = SD_GET_PKT_OPCODE(pktp);
19420 19413 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) ||
19421 19414 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) {
19422 19415 sd_return_failed_command(un, bp, EACCES);
19423 19416 return;
19424 19417 }
19425 19418 }
19426 19419
19427 19420 un->un_resvd_status |= SD_RESERVATION_CONFLICT;
19428 19421
19429 19422 if ((un->un_resvd_status & SD_FAILFAST) != 0) {
19430 19423 if (sd_failfast_enable != 0) {
19431 19424 /* By definition, we must panic here.... */
19432 19425 sd_panic_for_res_conflict(un);
19433 19426 /*NOTREACHED*/
19434 19427 }
19435 19428 SD_ERROR(SD_LOG_IO, un,
19436 19429 "sd_handle_resv_conflict: Disk Reserved\n");
19437 19430 sd_return_failed_command(un, bp, EACCES);
19438 19431 return;
19439 19432 }
19440 19433
19441 19434 /*
19442 19435 * 1147670: retry only if sd_retry_on_reservation_conflict
19443 19436 * property is set (default is 1). Retries will not succeed
19444 19437 * on a disk reserved by another initiator. HA systems
19445 19438 * may reset this via sd.conf to avoid these retries.
19446 19439 *
19447 19440 * Note: The legacy return code for this failure is EIO, however EACCES
19448 19441 * seems more appropriate for a reservation conflict.
19449 19442 */
19450 19443 if (sd_retry_on_reservation_conflict == 0) {
19451 19444 SD_ERROR(SD_LOG_IO, un,
19452 19445 "sd_handle_resv_conflict: Device Reserved\n");
19453 19446 sd_return_failed_command(un, bp, EIO);
19454 19447 return;
19455 19448 }
19456 19449
19457 19450 /*
19458 19451 * Retry the command if we can.
19459 19452 *
19460 19453 * Note: The legacy return code for this failure is EIO, however EACCES
19461 19454 * seems more appropriate for a reservation conflict.
19462 19455 */
19463 19456 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO,
19464 19457 (clock_t)2, NULL);
19465 19458 }
19466 19459
19467 19460
19468 19461
19469 19462 /*
19470 19463 * Function: sd_pkt_status_qfull
19471 19464 *
19472 19465 * Description: Handle a QUEUE FULL condition from the target. This can
19473 19466 * occur if the HBA does not handle the queue full condition.
19474 19467 * (Basically this means third-party HBAs as Sun HBAs will
19475 19468 * handle the queue full condition.) Note that if there are
19476 19469 * some commands already in the transport, then the queue full
19477 19470 * has occurred because the queue for this nexus is actually
19478 19471 * full. If there are no commands in the transport, then the
19479 19472 * queue full is resulting from some other initiator or lun
19480 19473 * consuming all the resources at the target.
19481 19474 *
19482 19475 * Context: May be called from interrupt context
19483 19476 */
19484 19477
19485 19478 static void
19486 19479 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
19487 19480 struct scsi_pkt *pktp)
19488 19481 {
19489 19482 ASSERT(un != NULL);
19490 19483 ASSERT(mutex_owned(SD_MUTEX(un)));
19491 19484 ASSERT(bp != NULL);
19492 19485 ASSERT(xp != NULL);
19493 19486 ASSERT(pktp != NULL);
19494 19487
19495 19488 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19496 19489 "sd_pkt_status_qfull: entry\n");
19497 19490
19498 19491 /*
19499 19492 * Just lower the QFULL throttle and retry the command. Note that
19500 19493 * we do not limit the number of retries here.
19501 19494 */
19502 19495 sd_reduce_throttle(un, SD_THROTTLE_QFULL);
19503 19496 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0,
19504 19497 SD_RESTART_TIMEOUT, NULL);
19505 19498
19506 19499 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19507 19500 "sd_pkt_status_qfull: exit\n");
19508 19501 }
19509 19502
19510 19503
19511 19504 /*
19512 19505 * Function: sd_reset_target
19513 19506 *
19514 19507 * Description: Issue a scsi_reset(9F), with either RESET_LUN,
19515 19508 * RESET_TARGET, or RESET_ALL.
19516 19509 *
19517 19510 * Context: May be called under interrupt context.
19518 19511 */
19519 19512
19520 19513 static void
19521 19514 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp)
19522 19515 {
19523 19516 int rval = 0;
19524 19517
19525 19518 ASSERT(un != NULL);
19526 19519 ASSERT(mutex_owned(SD_MUTEX(un)));
19527 19520 ASSERT(pktp != NULL);
19528 19521
19529 19522 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n");
19530 19523
19531 19524 /*
19532 19525 * No need to reset if the transport layer has already done so.
19533 19526 */
19534 19527 if ((pktp->pkt_statistics &
19535 19528 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) {
19536 19529 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19537 19530 "sd_reset_target: no reset\n");
19538 19531 return;
19539 19532 }
19540 19533
19541 19534 mutex_exit(SD_MUTEX(un));
19542 19535
19543 19536 if (un->un_f_allow_bus_device_reset == TRUE) {
19544 19537 if (un->un_f_lun_reset_enabled == TRUE) {
19545 19538 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19546 19539 "sd_reset_target: RESET_LUN\n");
19547 19540 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN);
19548 19541 }
19549 19542 if (rval == 0) {
19550 19543 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19551 19544 "sd_reset_target: RESET_TARGET\n");
19552 19545 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET);
19553 19546 }
19554 19547 }
19555 19548
19556 19549 if (rval == 0) {
19557 19550 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19558 19551 "sd_reset_target: RESET_ALL\n");
19559 19552 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
19560 19553 }
19561 19554
19562 19555 mutex_enter(SD_MUTEX(un));
19563 19556
19564 19557 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n");
19565 19558 }
19566 19559
19567 19560 /*
19568 19561 * Function: sd_target_change_task
19569 19562 *
19570 19563 * Description: Handle dynamic target change
19571 19564 *
19572 19565 * Context: Executes in a taskq() thread context
19573 19566 */
19574 19567 static void
19575 19568 sd_target_change_task(void *arg)
19576 19569 {
19577 19570 struct sd_lun *un = arg;
19578 19571 uint64_t capacity;
19579 19572 diskaddr_t label_cap;
19580 19573 uint_t lbasize;
19581 19574 sd_ssc_t *ssc;
19582 19575
19583 19576 ASSERT(un != NULL);
19584 19577 ASSERT(!mutex_owned(SD_MUTEX(un)));
19585 19578
19586 19579 if ((un->un_f_blockcount_is_valid == FALSE) ||
19587 19580 (un->un_f_tgt_blocksize_is_valid == FALSE)) {
19588 19581 return;
19589 19582 }
19590 19583
19591 19584 ssc = sd_ssc_init(un);
19592 19585
19593 19586 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity,
19594 19587 &lbasize, SD_PATH_DIRECT) != 0) {
19595 19588 SD_ERROR(SD_LOG_ERROR, un,
19596 19589 "sd_target_change_task: fail to read capacity\n");
19597 19590 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
19598 19591 goto task_exit;
19599 19592 }
19600 19593
19601 19594 mutex_enter(SD_MUTEX(un));
19602 19595 if (capacity <= un->un_blockcount) {
19603 19596 mutex_exit(SD_MUTEX(un));
19604 19597 goto task_exit;
19605 19598 }
19606 19599
19607 19600 sd_update_block_info(un, lbasize, capacity);
19608 19601 mutex_exit(SD_MUTEX(un));
19609 19602
19610 19603 /*
19611 19604 * If lun is EFI labeled and lun capacity is greater than the
19612 19605 * capacity contained in the label, log a sys event.
19613 19606 */
19614 19607 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap,
19615 19608 (void*)SD_PATH_DIRECT) == 0) {
19616 19609 mutex_enter(SD_MUTEX(un));
19617 19610 if (un->un_f_blockcount_is_valid &&
19618 19611 un->un_blockcount > label_cap) {
19619 19612 mutex_exit(SD_MUTEX(un));
19620 19613 sd_log_lun_expansion_event(un, KM_SLEEP);
19621 19614 } else {
19622 19615 mutex_exit(SD_MUTEX(un));
19623 19616 }
19624 19617 }
19625 19618
19626 19619 task_exit:
19627 19620 sd_ssc_fini(ssc);
19628 19621 }
19629 19622
19630 19623
19631 19624 /*
19632 19625 * Function: sd_log_dev_status_event
19633 19626 *
19634 19627 * Description: Log EC_dev_status sysevent
19635 19628 *
19636 19629 * Context: Never called from interrupt context
19637 19630 */
19638 19631 static void
19639 19632 sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag)
19640 19633 {
19641 19634 int err;
19642 19635 char *path;
19643 19636 nvlist_t *attr_list;
19644 19637
19645 19638 /* Allocate and build sysevent attribute list */
19646 19639 err = nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, km_flag);
19647 19640 if (err != 0) {
19648 19641 SD_ERROR(SD_LOG_ERROR, un,
19649 19642 "sd_log_dev_status_event: fail to allocate space\n");
19650 19643 return;
19651 19644 }
19652 19645
19653 19646 path = kmem_alloc(MAXPATHLEN, km_flag);
19654 19647 if (path == NULL) {
19655 19648 nvlist_free(attr_list);
19656 19649 SD_ERROR(SD_LOG_ERROR, un,
19657 19650 "sd_log_dev_status_event: fail to allocate space\n");
19658 19651 return;
19659 19652 }
19660 19653 /*
19661 19654 * Add path attribute to identify the lun.
19662 19655 * We are using minor node 'a' as the sysevent attribute.
19663 19656 */
19664 19657 (void) snprintf(path, MAXPATHLEN, "/devices");
19665 19658 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path));
19666 19659 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path),
19667 19660 ":a");
19668 19661
19669 19662 err = nvlist_add_string(attr_list, DEV_PHYS_PATH, path);
19670 19663 if (err != 0) {
19671 19664 nvlist_free(attr_list);
19672 19665 kmem_free(path, MAXPATHLEN);
19673 19666 SD_ERROR(SD_LOG_ERROR, un,
19674 19667 "sd_log_dev_status_event: fail to add attribute\n");
19675 19668 return;
19676 19669 }
19677 19670
19678 19671 /* Log dynamic lun expansion sysevent */
19679 19672 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS,
19680 19673 esc, attr_list, NULL, km_flag);
19681 19674 if (err != DDI_SUCCESS) {
19682 19675 SD_ERROR(SD_LOG_ERROR, un,
19683 19676 "sd_log_dev_status_event: fail to log sysevent\n");
19684 19677 }
19685 19678
19686 19679 nvlist_free(attr_list);
19687 19680 kmem_free(path, MAXPATHLEN);
19688 19681 }
19689 19682
19690 19683
19691 19684 /*
19692 19685 * Function: sd_log_lun_expansion_event
19693 19686 *
19694 19687 * Description: Log lun expansion sys event
19695 19688 *
19696 19689 * Context: Never called from interrupt context
19697 19690 */
19698 19691 static void
19699 19692 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag)
19700 19693 {
19701 19694 sd_log_dev_status_event(un, ESC_DEV_DLE, km_flag);
19702 19695 }
19703 19696
19704 19697
19705 19698 /*
19706 19699 * Function: sd_log_eject_request_event
19707 19700 *
19708 19701 * Description: Log eject request sysevent
19709 19702 *
19710 19703 * Context: Never called from interrupt context
19711 19704 */
19712 19705 static void
19713 19706 sd_log_eject_request_event(struct sd_lun *un, int km_flag)
19714 19707 {
19715 19708 sd_log_dev_status_event(un, ESC_DEV_EJECT_REQUEST, km_flag);
19716 19709 }
19717 19710
19718 19711
19719 19712 /*
19720 19713 * Function: sd_media_change_task
19721 19714 *
19722 19715 * Description: Recovery action for CDROM to become available.
19723 19716 *
19724 19717 * Context: Executes in a taskq() thread context
19725 19718 */
19726 19719
19727 19720 static void
19728 19721 sd_media_change_task(void *arg)
19729 19722 {
19730 19723 struct scsi_pkt *pktp = arg;
19731 19724 struct sd_lun *un;
19732 19725 struct buf *bp;
19733 19726 struct sd_xbuf *xp;
19734 19727 int err = 0;
19735 19728 int retry_count = 0;
19736 19729 int retry_limit = SD_UNIT_ATTENTION_RETRY/10;
19737 19730 struct sd_sense_info si;
19738 19731
19739 19732 ASSERT(pktp != NULL);
19740 19733 bp = (struct buf *)pktp->pkt_private;
19741 19734 ASSERT(bp != NULL);
19742 19735 xp = SD_GET_XBUF(bp);
19743 19736 ASSERT(xp != NULL);
19744 19737 un = SD_GET_UN(bp);
19745 19738 ASSERT(un != NULL);
19746 19739 ASSERT(!mutex_owned(SD_MUTEX(un)));
19747 19740 ASSERT(un->un_f_monitor_media_state);
19748 19741
19749 19742 si.ssi_severity = SCSI_ERR_INFO;
19750 19743 si.ssi_pfa_flag = FALSE;
19751 19744
19752 19745 /*
19753 19746 * When a reset is issued on a CDROM, it takes a long time to
19754 19747 * recover. First few attempts to read capacity and other things
19755 19748 * related to handling unit attention fail (with a ASC 0x4 and
19756 19749 * ASCQ 0x1). In that case we want to do enough retries and we want
19757 19750 * to limit the retries in other cases of genuine failures like
19758 19751 * no media in drive.
19759 19752 */
19760 19753 while (retry_count++ < retry_limit) {
19761 19754 if ((err = sd_handle_mchange(un)) == 0) {
19762 19755 break;
19763 19756 }
19764 19757 if (err == EAGAIN) {
19765 19758 retry_limit = SD_UNIT_ATTENTION_RETRY;
19766 19759 }
19767 19760 /* Sleep for 0.5 sec. & try again */
19768 19761 delay(drv_usectohz(500000));
19769 19762 }
19770 19763
19771 19764 /*
19772 19765 * Dispatch (retry or fail) the original command here,
19773 19766 * along with appropriate console messages....
19774 19767 *
19775 19768 * Must grab the mutex before calling sd_retry_command,
19776 19769 * sd_print_sense_msg and sd_return_failed_command.
19777 19770 */
19778 19771 mutex_enter(SD_MUTEX(un));
19779 19772 if (err != SD_CMD_SUCCESS) {
19780 19773 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19781 19774 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err);
19782 19775 si.ssi_severity = SCSI_ERR_FATAL;
19783 19776 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
19784 19777 sd_return_failed_command(un, bp, EIO);
19785 19778 } else {
19786 19779 sd_retry_command(un, bp, SD_RETRIES_UA, sd_print_sense_msg,
19787 19780 &si, EIO, (clock_t)0, NULL);
19788 19781 }
19789 19782 mutex_exit(SD_MUTEX(un));
19790 19783 }
19791 19784
19792 19785
19793 19786
19794 19787 /*
19795 19788 * Function: sd_handle_mchange
19796 19789 *
19797 19790 * Description: Perform geometry validation & other recovery when CDROM
19798 19791 * has been removed from drive.
19799 19792 *
19800 19793 * Return Code: 0 for success
19801 19794 * errno-type return code of either sd_send_scsi_DOORLOCK() or
19802 19795 * sd_send_scsi_READ_CAPACITY()
19803 19796 *
19804 19797 * Context: Executes in a taskq() thread context
19805 19798 */
19806 19799
19807 19800 static int
19808 19801 sd_handle_mchange(struct sd_lun *un)
19809 19802 {
19810 19803 uint64_t capacity;
19811 19804 uint32_t lbasize;
19812 19805 int rval;
19813 19806 sd_ssc_t *ssc;
19814 19807
19815 19808 ASSERT(!mutex_owned(SD_MUTEX(un)));
19816 19809 ASSERT(un->un_f_monitor_media_state);
19817 19810
19818 19811 ssc = sd_ssc_init(un);
19819 19812 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize,
19820 19813 SD_PATH_DIRECT_PRIORITY);
19821 19814
19822 19815 if (rval != 0)
19823 19816 goto failed;
19824 19817
19825 19818 mutex_enter(SD_MUTEX(un));
19826 19819 sd_update_block_info(un, lbasize, capacity);
19827 19820
19828 19821 if (un->un_errstats != NULL) {
19829 19822 struct sd_errstats *stp =
19830 19823 (struct sd_errstats *)un->un_errstats->ks_data;
19831 19824 stp->sd_capacity.value.ui64 = (uint64_t)
19832 19825 ((uint64_t)un->un_blockcount *
19833 19826 (uint64_t)un->un_tgt_blocksize);
19834 19827 }
19835 19828
19836 19829 /*
19837 19830 * Check if the media in the device is writable or not
19838 19831 */
19839 19832 if (ISCD(un)) {
19840 19833 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY);
19841 19834 }
19842 19835
19843 19836 /*
19844 19837 * Note: Maybe let the strategy/partitioning chain worry about getting
19845 19838 * valid geometry.
19846 19839 */
19847 19840 mutex_exit(SD_MUTEX(un));
19848 19841 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY);
19849 19842
19850 19843
19851 19844 if (cmlb_validate(un->un_cmlbhandle, 0,
19852 19845 (void *)SD_PATH_DIRECT_PRIORITY) != 0) {
19853 19846 sd_ssc_fini(ssc);
19854 19847 return (EIO);
19855 19848 } else {
19856 19849 if (un->un_f_pkstats_enabled) {
19857 19850 sd_set_pstats(un);
19858 19851 SD_TRACE(SD_LOG_IO_PARTITION, un,
19859 19852 "sd_handle_mchange: un:0x%p pstats created and "
19860 19853 "set\n", un);
19861 19854 }
19862 19855 }
19863 19856
19864 19857 /*
19865 19858 * Try to lock the door
19866 19859 */
19867 19860 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
19868 19861 SD_PATH_DIRECT_PRIORITY);
19869 19862 failed:
19870 19863 if (rval != 0)
19871 19864 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
19872 19865 sd_ssc_fini(ssc);
19873 19866 return (rval);
19874 19867 }
19875 19868
19876 19869
19877 19870 /*
19878 19871 * Function: sd_send_scsi_DOORLOCK
19879 19872 *
19880 19873 * Description: Issue the scsi DOOR LOCK command
19881 19874 *
19882 19875 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
19883 19876 * structure for this target.
19884 19877 * flag - SD_REMOVAL_ALLOW
19885 19878 * SD_REMOVAL_PREVENT
19886 19879 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
19887 19880 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
19888 19881 * to use the USCSI "direct" chain and bypass the normal
19889 19882 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this
19890 19883 * command is issued as part of an error recovery action.
19891 19884 *
19892 19885 * Return Code: 0 - Success
19893 19886 * errno return code from sd_ssc_send()
19894 19887 *
19895 19888 * Context: Can sleep.
19896 19889 */
19897 19890
19898 19891 static int
19899 19892 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag)
19900 19893 {
19901 19894 struct scsi_extended_sense sense_buf;
19902 19895 union scsi_cdb cdb;
19903 19896 struct uscsi_cmd ucmd_buf;
19904 19897 int status;
19905 19898 struct sd_lun *un;
19906 19899
19907 19900 ASSERT(ssc != NULL);
19908 19901 un = ssc->ssc_un;
19909 19902 ASSERT(un != NULL);
19910 19903 ASSERT(!mutex_owned(SD_MUTEX(un)));
19911 19904
19912 19905 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un);
19913 19906
19914 19907 /* already determined doorlock is not supported, fake success */
19915 19908 if (un->un_f_doorlock_supported == FALSE) {
19916 19909 return (0);
19917 19910 }
19918 19911
19919 19912 /*
19920 19913 * If we are ejecting and see an SD_REMOVAL_PREVENT
19921 19914 * ignore the command so we can complete the eject
19922 19915 * operation.
19923 19916 */
19924 19917 if (flag == SD_REMOVAL_PREVENT) {
19925 19918 mutex_enter(SD_MUTEX(un));
19926 19919 if (un->un_f_ejecting == TRUE) {
19927 19920 mutex_exit(SD_MUTEX(un));
19928 19921 return (EAGAIN);
19929 19922 }
19930 19923 mutex_exit(SD_MUTEX(un));
19931 19924 }
19932 19925
19933 19926 bzero(&cdb, sizeof (cdb));
19934 19927 bzero(&ucmd_buf, sizeof (ucmd_buf));
19935 19928
19936 19929 cdb.scc_cmd = SCMD_DOORLOCK;
19937 19930 cdb.cdb_opaque[4] = (uchar_t)flag;
19938 19931
19939 19932 ucmd_buf.uscsi_cdb = (char *)&cdb;
19940 19933 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
19941 19934 ucmd_buf.uscsi_bufaddr = NULL;
19942 19935 ucmd_buf.uscsi_buflen = 0;
19943 19936 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
19944 19937 ucmd_buf.uscsi_rqlen = sizeof (sense_buf);
19945 19938 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
19946 19939 ucmd_buf.uscsi_timeout = 15;
19947 19940
19948 19941 SD_TRACE(SD_LOG_IO, un,
19949 19942 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n");
19950 19943
19951 19944 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
19952 19945 UIO_SYSSPACE, path_flag);
19953 19946
19954 19947 if (status == 0)
19955 19948 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
19956 19949
19957 19950 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) &&
19958 19951 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
19959 19952 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) {
19960 19953 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
19961 19954
19962 19955 /* fake success and skip subsequent doorlock commands */
19963 19956 un->un_f_doorlock_supported = FALSE;
19964 19957 return (0);
19965 19958 }
19966 19959
19967 19960 return (status);
19968 19961 }
19969 19962
19970 19963 /*
19971 19964 * Function: sd_send_scsi_READ_CAPACITY
19972 19965 *
19973 19966 * Description: This routine uses the scsi READ CAPACITY command to determine
19974 19967 * the device capacity in number of blocks and the device native
19975 19968 * block size. If this function returns a failure, then the
19976 19969 * values in *capp and *lbap are undefined. If the capacity
19977 19970 * returned is 0xffffffff then the lun is too large for a
19978 19971 * normal READ CAPACITY command and the results of a
19979 19972 * READ CAPACITY 16 will be used instead.
19980 19973 *
19981 19974 * Arguments: ssc - ssc contains ptr to soft state struct for the target
19982 19975 * capp - ptr to unsigned 64-bit variable to receive the
19983 19976 * capacity value from the command.
19984 19977 * lbap - ptr to unsigned 32-bit varaible to receive the
19985 19978 * block size value from the command
19986 19979 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
19987 19980 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
19988 19981 * to use the USCSI "direct" chain and bypass the normal
19989 19982 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this
19990 19983 * command is issued as part of an error recovery action.
19991 19984 *
19992 19985 * Return Code: 0 - Success
19993 19986 * EIO - IO error
19994 19987 * EACCES - Reservation conflict detected
19995 19988 * EAGAIN - Device is becoming ready
19996 19989 * errno return code from sd_ssc_send()
19997 19990 *
19998 19991 * Context: Can sleep. Blocks until command completes.
19999 19992 */
20000 19993
20001 19994 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity)
20002 19995
20003 19996 static int
20004 19997 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap,
20005 19998 int path_flag)
20006 19999 {
20007 20000 struct scsi_extended_sense sense_buf;
20008 20001 struct uscsi_cmd ucmd_buf;
20009 20002 union scsi_cdb cdb;
20010 20003 uint32_t *capacity_buf;
20011 20004 uint64_t capacity;
20012 20005 uint32_t lbasize;
20013 20006 uint32_t pbsize;
20014 20007 int status;
20015 20008 struct sd_lun *un;
20016 20009
20017 20010 ASSERT(ssc != NULL);
20018 20011
20019 20012 un = ssc->ssc_un;
20020 20013 ASSERT(un != NULL);
20021 20014 ASSERT(!mutex_owned(SD_MUTEX(un)));
20022 20015 ASSERT(capp != NULL);
20023 20016 ASSERT(lbap != NULL);
20024 20017
20025 20018 SD_TRACE(SD_LOG_IO, un,
20026 20019 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un);
20027 20020
20028 20021 /*
20029 20022 * First send a READ_CAPACITY command to the target.
20030 20023 * (This command is mandatory under SCSI-2.)
20031 20024 *
20032 20025 * Set up the CDB for the READ_CAPACITY command. The Partial
20033 20026 * Medium Indicator bit is cleared. The address field must be
20034 20027 * zero if the PMI bit is zero.
20035 20028 */
20036 20029 bzero(&cdb, sizeof (cdb));
20037 20030 bzero(&ucmd_buf, sizeof (ucmd_buf));
20038 20031
20039 20032 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP);
20040 20033
20041 20034 cdb.scc_cmd = SCMD_READ_CAPACITY;
20042 20035
20043 20036 ucmd_buf.uscsi_cdb = (char *)&cdb;
20044 20037 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
20045 20038 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf;
20046 20039 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE;
20047 20040 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20048 20041 ucmd_buf.uscsi_rqlen = sizeof (sense_buf);
20049 20042 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
20050 20043 ucmd_buf.uscsi_timeout = 60;
20051 20044
20052 20045 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20053 20046 UIO_SYSSPACE, path_flag);
20054 20047
20055 20048 switch (status) {
20056 20049 case 0:
20057 20050 /* Return failure if we did not get valid capacity data. */
20058 20051 if (ucmd_buf.uscsi_resid != 0) {
20059 20052 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20060 20053 "sd_send_scsi_READ_CAPACITY received invalid "
20061 20054 "capacity data");
20062 20055 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20063 20056 return (EIO);
20064 20057 }
20065 20058 /*
20066 20059 * Read capacity and block size from the READ CAPACITY 10 data.
20067 20060 * This data may be adjusted later due to device specific
20068 20061 * issues.
20069 20062 *
20070 20063 * According to the SCSI spec, the READ CAPACITY 10
20071 20064 * command returns the following:
20072 20065 *
20073 20066 * bytes 0-3: Maximum logical block address available.
20074 20067 * (MSB in byte:0 & LSB in byte:3)
20075 20068 *
20076 20069 * bytes 4-7: Block length in bytes
20077 20070 * (MSB in byte:4 & LSB in byte:7)
20078 20071 *
20079 20072 */
20080 20073 capacity = BE_32(capacity_buf[0]);
20081 20074 lbasize = BE_32(capacity_buf[1]);
20082 20075
20083 20076 /*
20084 20077 * Done with capacity_buf
20085 20078 */
20086 20079 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20087 20080
20088 20081 /*
20089 20082 * if the reported capacity is set to all 0xf's, then
20090 20083 * this disk is too large and requires SBC-2 commands.
20091 20084 * Reissue the request using READ CAPACITY 16.
20092 20085 */
20093 20086 if (capacity == 0xffffffff) {
20094 20087 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
20095 20088 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity,
20096 20089 &lbasize, &pbsize, path_flag);
20097 20090 if (status != 0) {
20098 20091 return (status);
20099 20092 } else {
20100 20093 goto rc16_done;
20101 20094 }
20102 20095 }
20103 20096 break; /* Success! */
20104 20097 case EIO:
20105 20098 switch (ucmd_buf.uscsi_status) {
20106 20099 case STATUS_RESERVATION_CONFLICT:
20107 20100 status = EACCES;
20108 20101 break;
20109 20102 case STATUS_CHECK:
20110 20103 /*
20111 20104 * Check condition; look for ASC/ASCQ of 0x04/0x01
20112 20105 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY)
20113 20106 */
20114 20107 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20115 20108 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) &&
20116 20109 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) {
20117 20110 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20118 20111 return (EAGAIN);
20119 20112 }
20120 20113 break;
20121 20114 default:
20122 20115 break;
20123 20116 }
20124 20117 /* FALLTHRU */
20125 20118 default:
20126 20119 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20127 20120 return (status);
20128 20121 }
20129 20122
20130 20123 /*
20131 20124 * Some ATAPI CD-ROM drives report inaccurate LBA size values
20132 20125 * (2352 and 0 are common) so for these devices always force the value
20133 20126 * to 2048 as required by the ATAPI specs.
20134 20127 */
20135 20128 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) {
20136 20129 lbasize = 2048;
20137 20130 }
20138 20131
20139 20132 /*
20140 20133 * Get the maximum LBA value from the READ CAPACITY data.
20141 20134 * Here we assume that the Partial Medium Indicator (PMI) bit
20142 20135 * was cleared when issuing the command. This means that the LBA
20143 20136 * returned from the device is the LBA of the last logical block
20144 20137 * on the logical unit. The actual logical block count will be
20145 20138 * this value plus one.
20146 20139 */
20147 20140 capacity += 1;
20148 20141
20149 20142 /*
20150 20143 * Currently, for removable media, the capacity is saved in terms
20151 20144 * of un->un_sys_blocksize, so scale the capacity value to reflect this.
20152 20145 */
20153 20146 if (un->un_f_has_removable_media)
20154 20147 capacity *= (lbasize / un->un_sys_blocksize);
20155 20148
20156 20149 rc16_done:
20157 20150
20158 20151 /*
20159 20152 * Copy the values from the READ CAPACITY command into the space
20160 20153 * provided by the caller.
20161 20154 */
20162 20155 *capp = capacity;
20163 20156 *lbap = lbasize;
20164 20157
20165 20158 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: "
20166 20159 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize);
20167 20160
20168 20161 /*
20169 20162 * Both the lbasize and capacity from the device must be nonzero,
20170 20163 * otherwise we assume that the values are not valid and return
20171 20164 * failure to the caller. (4203735)
20172 20165 */
20173 20166 if ((capacity == 0) || (lbasize == 0)) {
20174 20167 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20175 20168 "sd_send_scsi_READ_CAPACITY received invalid value "
20176 20169 "capacity %llu lbasize %d", capacity, lbasize);
20177 20170 return (EIO);
20178 20171 }
20179 20172 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20180 20173 return (0);
20181 20174 }
20182 20175
20183 20176 /*
20184 20177 * Function: sd_send_scsi_READ_CAPACITY_16
20185 20178 *
20186 20179 * Description: This routine uses the scsi READ CAPACITY 16 command to
20187 20180 * determine the device capacity in number of blocks and the
20188 20181 * device native block size. If this function returns a failure,
20189 20182 * then the values in *capp and *lbap are undefined.
20190 20183 * This routine should be called by sd_send_scsi_READ_CAPACITY
20191 20184 * which will apply any device specific adjustments to capacity
20192 20185 * and lbasize. One exception is it is also called by
20193 20186 * sd_get_media_info_ext. In that function, there is no need to
20194 20187 * adjust the capacity and lbasize.
20195 20188 *
20196 20189 * Arguments: ssc - ssc contains ptr to soft state struct for the target
20197 20190 * capp - ptr to unsigned 64-bit variable to receive the
20198 20191 * capacity value from the command.
20199 20192 * lbap - ptr to unsigned 32-bit varaible to receive the
20200 20193 * block size value from the command
20201 20194 * psp - ptr to unsigned 32-bit variable to receive the
20202 20195 * physical block size value from the command
20203 20196 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
20204 20197 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
20205 20198 * to use the USCSI "direct" chain and bypass the normal
20206 20199 * command waitq. SD_PATH_DIRECT_PRIORITY is used when
20207 20200 * this command is issued as part of an error recovery
20208 20201 * action.
20209 20202 *
20210 20203 * Return Code: 0 - Success
20211 20204 * EIO - IO error
20212 20205 * EACCES - Reservation conflict detected
20213 20206 * EAGAIN - Device is becoming ready
20214 20207 * errno return code from sd_ssc_send()
20215 20208 *
20216 20209 * Context: Can sleep. Blocks until command completes.
20217 20210 */
20218 20211
20219 20212 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16)
20220 20213
20221 20214 static int
20222 20215 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap,
20223 20216 uint32_t *psp, int path_flag)
20224 20217 {
20225 20218 struct scsi_extended_sense sense_buf;
20226 20219 struct uscsi_cmd ucmd_buf;
20227 20220 union scsi_cdb cdb;
20228 20221 uint64_t *capacity16_buf;
20229 20222 uint64_t capacity;
20230 20223 uint32_t lbasize;
20231 20224 uint32_t pbsize;
20232 20225 uint32_t lbpb_exp;
20233 20226 int status;
20234 20227 struct sd_lun *un;
20235 20228
20236 20229 ASSERT(ssc != NULL);
20237 20230
20238 20231 un = ssc->ssc_un;
20239 20232 ASSERT(un != NULL);
20240 20233 ASSERT(!mutex_owned(SD_MUTEX(un)));
20241 20234 ASSERT(capp != NULL);
20242 20235 ASSERT(lbap != NULL);
20243 20236
20244 20237 SD_TRACE(SD_LOG_IO, un,
20245 20238 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un);
20246 20239
20247 20240 /*
20248 20241 * First send a READ_CAPACITY_16 command to the target.
20249 20242 *
20250 20243 * Set up the CDB for the READ_CAPACITY_16 command. The Partial
20251 20244 * Medium Indicator bit is cleared. The address field must be
20252 20245 * zero if the PMI bit is zero.
20253 20246 */
20254 20247 bzero(&cdb, sizeof (cdb));
20255 20248 bzero(&ucmd_buf, sizeof (ucmd_buf));
20256 20249
20257 20250 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP);
20258 20251
20259 20252 ucmd_buf.uscsi_cdb = (char *)&cdb;
20260 20253 ucmd_buf.uscsi_cdblen = CDB_GROUP4;
20261 20254 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf;
20262 20255 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE;
20263 20256 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20264 20257 ucmd_buf.uscsi_rqlen = sizeof (sense_buf);
20265 20258 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
20266 20259 ucmd_buf.uscsi_timeout = 60;
20267 20260
20268 20261 /*
20269 20262 * Read Capacity (16) is a Service Action In command. One
20270 20263 * command byte (0x9E) is overloaded for multiple operations,
20271 20264 * with the second CDB byte specifying the desired operation
20272 20265 */
20273 20266 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4;
20274 20267 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4;
20275 20268
20276 20269 /*
20277 20270 * Fill in allocation length field
20278 20271 */
20279 20272 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen);
20280 20273
20281 20274 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20282 20275 UIO_SYSSPACE, path_flag);
20283 20276
20284 20277 switch (status) {
20285 20278 case 0:
20286 20279 /* Return failure if we did not get valid capacity data. */
20287 20280 if (ucmd_buf.uscsi_resid > 20) {
20288 20281 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20289 20282 "sd_send_scsi_READ_CAPACITY_16 received invalid "
20290 20283 "capacity data");
20291 20284 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20292 20285 return (EIO);
20293 20286 }
20294 20287
20295 20288 /*
20296 20289 * Read capacity and block size from the READ CAPACITY 16 data.
20297 20290 * This data may be adjusted later due to device specific
20298 20291 * issues.
20299 20292 *
20300 20293 * According to the SCSI spec, the READ CAPACITY 16
20301 20294 * command returns the following:
20302 20295 *
20303 20296 * bytes 0-7: Maximum logical block address available.
20304 20297 * (MSB in byte:0 & LSB in byte:7)
20305 20298 *
20306 20299 * bytes 8-11: Block length in bytes
20307 20300 * (MSB in byte:8 & LSB in byte:11)
20308 20301 *
20309 20302 * byte 13: LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT
20310 20303 */
20311 20304 capacity = BE_64(capacity16_buf[0]);
20312 20305 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]);
20313 20306 lbpb_exp = (BE_64(capacity16_buf[1]) >> 16) & 0x0f;
20314 20307
20315 20308 pbsize = lbasize << lbpb_exp;
20316 20309
20317 20310 /*
20318 20311 * Done with capacity16_buf
20319 20312 */
20320 20313 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20321 20314
20322 20315 /*
20323 20316 * if the reported capacity is set to all 0xf's, then
20324 20317 * this disk is too large. This could only happen with
20325 20318 * a device that supports LBAs larger than 64 bits which
20326 20319 * are not defined by any current T10 standards.
20327 20320 */
20328 20321 if (capacity == 0xffffffffffffffff) {
20329 20322 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20330 20323 "disk is too large");
20331 20324 return (EIO);
20332 20325 }
20333 20326 break; /* Success! */
20334 20327 case EIO:
20335 20328 switch (ucmd_buf.uscsi_status) {
20336 20329 case STATUS_RESERVATION_CONFLICT:
20337 20330 status = EACCES;
20338 20331 break;
20339 20332 case STATUS_CHECK:
20340 20333 /*
20341 20334 * Check condition; look for ASC/ASCQ of 0x04/0x01
20342 20335 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY)
20343 20336 */
20344 20337 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20345 20338 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) &&
20346 20339 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) {
20347 20340 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20348 20341 return (EAGAIN);
20349 20342 }
20350 20343 break;
20351 20344 default:
20352 20345 break;
20353 20346 }
20354 20347 /* FALLTHRU */
20355 20348 default:
20356 20349 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20357 20350 return (status);
20358 20351 }
20359 20352
20360 20353 /*
20361 20354 * Some ATAPI CD-ROM drives report inaccurate LBA size values
20362 20355 * (2352 and 0 are common) so for these devices always force the value
20363 20356 * to 2048 as required by the ATAPI specs.
20364 20357 */
20365 20358 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) {
20366 20359 lbasize = 2048;
20367 20360 }
20368 20361
20369 20362 /*
20370 20363 * Get the maximum LBA value from the READ CAPACITY 16 data.
20371 20364 * Here we assume that the Partial Medium Indicator (PMI) bit
20372 20365 * was cleared when issuing the command. This means that the LBA
20373 20366 * returned from the device is the LBA of the last logical block
20374 20367 * on the logical unit. The actual logical block count will be
20375 20368 * this value plus one.
20376 20369 */
20377 20370 capacity += 1;
20378 20371
20379 20372 /*
20380 20373 * Currently, for removable media, the capacity is saved in terms
20381 20374 * of un->un_sys_blocksize, so scale the capacity value to reflect this.
20382 20375 */
20383 20376 if (un->un_f_has_removable_media)
20384 20377 capacity *= (lbasize / un->un_sys_blocksize);
20385 20378
20386 20379 *capp = capacity;
20387 20380 *lbap = lbasize;
20388 20381 *psp = pbsize;
20389 20382
20390 20383 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: "
20391 20384 "capacity:0x%llx lbasize:0x%x, pbsize: 0x%x\n",
20392 20385 capacity, lbasize, pbsize);
20393 20386
20394 20387 if ((capacity == 0) || (lbasize == 0) || (pbsize == 0)) {
20395 20388 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20396 20389 "sd_send_scsi_READ_CAPACITY_16 received invalid value "
20397 20390 "capacity %llu lbasize %d pbsize %d", capacity, lbasize);
20398 20391 return (EIO);
20399 20392 }
20400 20393
20401 20394 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20402 20395 return (0);
20403 20396 }
20404 20397
20405 20398
20406 20399 /*
20407 20400 * Function: sd_send_scsi_START_STOP_UNIT
20408 20401 *
20409 20402 * Description: Issue a scsi START STOP UNIT command to the target.
20410 20403 *
20411 20404 * Arguments: ssc - ssc contatins pointer to driver soft state (unit)
20412 20405 * structure for this target.
20413 20406 * pc_flag - SD_POWER_CONDITION
20414 20407 * SD_START_STOP
20415 20408 * flag - SD_TARGET_START
20416 20409 * SD_TARGET_STOP
20417 20410 * SD_TARGET_EJECT
20418 20411 * SD_TARGET_CLOSE
20419 20412 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
20420 20413 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
20421 20414 * to use the USCSI "direct" chain and bypass the normal
20422 20415 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this
20423 20416 * command is issued as part of an error recovery action.
20424 20417 *
20425 20418 * Return Code: 0 - Success
20426 20419 * EIO - IO error
20427 20420 * EACCES - Reservation conflict detected
20428 20421 * ENXIO - Not Ready, medium not present
20429 20422 * errno return code from sd_ssc_send()
20430 20423 *
20431 20424 * Context: Can sleep.
20432 20425 */
20433 20426
20434 20427 static int
20435 20428 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, int flag,
20436 20429 int path_flag)
20437 20430 {
20438 20431 struct scsi_extended_sense sense_buf;
20439 20432 union scsi_cdb cdb;
20440 20433 struct uscsi_cmd ucmd_buf;
20441 20434 int status;
20442 20435 struct sd_lun *un;
20443 20436
20444 20437 ASSERT(ssc != NULL);
20445 20438 un = ssc->ssc_un;
20446 20439 ASSERT(un != NULL);
20447 20440 ASSERT(!mutex_owned(SD_MUTEX(un)));
20448 20441
20449 20442 SD_TRACE(SD_LOG_IO, un,
20450 20443 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un);
20451 20444
20452 20445 if (un->un_f_check_start_stop &&
20453 20446 (pc_flag == SD_START_STOP) &&
20454 20447 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) &&
20455 20448 (un->un_f_start_stop_supported != TRUE)) {
20456 20449 return (0);
20457 20450 }
20458 20451
20459 20452 /*
20460 20453 * If we are performing an eject operation and
20461 20454 * we receive any command other than SD_TARGET_EJECT
20462 20455 * we should immediately return.
20463 20456 */
20464 20457 if (flag != SD_TARGET_EJECT) {
20465 20458 mutex_enter(SD_MUTEX(un));
20466 20459 if (un->un_f_ejecting == TRUE) {
20467 20460 mutex_exit(SD_MUTEX(un));
20468 20461 return (EAGAIN);
20469 20462 }
20470 20463 mutex_exit(SD_MUTEX(un));
20471 20464 }
20472 20465
20473 20466 bzero(&cdb, sizeof (cdb));
20474 20467 bzero(&ucmd_buf, sizeof (ucmd_buf));
20475 20468 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
20476 20469
20477 20470 cdb.scc_cmd = SCMD_START_STOP;
20478 20471 cdb.cdb_opaque[4] = (pc_flag == SD_POWER_CONDITION) ?
20479 20472 (uchar_t)(flag << 4) : (uchar_t)flag;
20480 20473
20481 20474 ucmd_buf.uscsi_cdb = (char *)&cdb;
20482 20475 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
20483 20476 ucmd_buf.uscsi_bufaddr = NULL;
20484 20477 ucmd_buf.uscsi_buflen = 0;
20485 20478 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20486 20479 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
20487 20480 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
20488 20481 ucmd_buf.uscsi_timeout = 200;
20489 20482
20490 20483 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20491 20484 UIO_SYSSPACE, path_flag);
20492 20485
20493 20486 switch (status) {
20494 20487 case 0:
20495 20488 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20496 20489 break; /* Success! */
20497 20490 case EIO:
20498 20491 switch (ucmd_buf.uscsi_status) {
20499 20492 case STATUS_RESERVATION_CONFLICT:
20500 20493 status = EACCES;
20501 20494 break;
20502 20495 case STATUS_CHECK:
20503 20496 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) {
20504 20497 switch (scsi_sense_key(
20505 20498 (uint8_t *)&sense_buf)) {
20506 20499 case KEY_ILLEGAL_REQUEST:
20507 20500 status = ENOTSUP;
20508 20501 break;
20509 20502 case KEY_NOT_READY:
20510 20503 if (scsi_sense_asc(
20511 20504 (uint8_t *)&sense_buf)
20512 20505 == 0x3A) {
20513 20506 status = ENXIO;
20514 20507 }
20515 20508 break;
20516 20509 default:
20517 20510 break;
20518 20511 }
20519 20512 }
20520 20513 break;
20521 20514 default:
20522 20515 break;
20523 20516 }
20524 20517 break;
20525 20518 default:
20526 20519 break;
20527 20520 }
20528 20521
20529 20522 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n");
20530 20523
20531 20524 return (status);
20532 20525 }
20533 20526
20534 20527
20535 20528 /*
20536 20529 * Function: sd_start_stop_unit_callback
20537 20530 *
20538 20531 * Description: timeout(9F) callback to begin recovery process for a
20539 20532 * device that has spun down.
20540 20533 *
20541 20534 * Arguments: arg - pointer to associated softstate struct.
20542 20535 *
20543 20536 * Context: Executes in a timeout(9F) thread context
20544 20537 */
20545 20538
20546 20539 static void
20547 20540 sd_start_stop_unit_callback(void *arg)
20548 20541 {
20549 20542 struct sd_lun *un = arg;
20550 20543 ASSERT(un != NULL);
20551 20544 ASSERT(!mutex_owned(SD_MUTEX(un)));
20552 20545
20553 20546 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n");
20554 20547
20555 20548 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP);
20556 20549 }
20557 20550
20558 20551
20559 20552 /*
20560 20553 * Function: sd_start_stop_unit_task
20561 20554 *
20562 20555 * Description: Recovery procedure when a drive is spun down.
20563 20556 *
20564 20557 * Arguments: arg - pointer to associated softstate struct.
20565 20558 *
20566 20559 * Context: Executes in a taskq() thread context
20567 20560 */
20568 20561
20569 20562 static void
20570 20563 sd_start_stop_unit_task(void *arg)
20571 20564 {
20572 20565 struct sd_lun *un = arg;
20573 20566 sd_ssc_t *ssc;
20574 20567 int power_level;
20575 20568 int rval;
20576 20569
20577 20570 ASSERT(un != NULL);
20578 20571 ASSERT(!mutex_owned(SD_MUTEX(un)));
20579 20572
20580 20573 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n");
20581 20574
20582 20575 /*
20583 20576 * Some unformatted drives report not ready error, no need to
20584 20577 * restart if format has been initiated.
20585 20578 */
20586 20579 mutex_enter(SD_MUTEX(un));
20587 20580 if (un->un_f_format_in_progress == TRUE) {
20588 20581 mutex_exit(SD_MUTEX(un));
20589 20582 return;
20590 20583 }
20591 20584 mutex_exit(SD_MUTEX(un));
20592 20585
20593 20586 ssc = sd_ssc_init(un);
20594 20587 /*
20595 20588 * When a START STOP command is issued from here, it is part of a
20596 20589 * failure recovery operation and must be issued before any other
20597 20590 * commands, including any pending retries. Thus it must be sent
20598 20591 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up
20599 20592 * succeeds or not, we will start I/O after the attempt.
20600 20593 * If power condition is supported and the current power level
20601 20594 * is capable of performing I/O, we should set the power condition
20602 20595 * to that level. Otherwise, set the power condition to ACTIVE.
20603 20596 */
20604 20597 if (un->un_f_power_condition_supported) {
20605 20598 mutex_enter(SD_MUTEX(un));
20606 20599 ASSERT(SD_PM_IS_LEVEL_VALID(un, un->un_power_level));
20607 20600 power_level = sd_pwr_pc.ran_perf[un->un_power_level]
20608 20601 > 0 ? un->un_power_level : SD_SPINDLE_ACTIVE;
20609 20602 mutex_exit(SD_MUTEX(un));
20610 20603 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION,
20611 20604 sd_pl2pc[power_level], SD_PATH_DIRECT_PRIORITY);
20612 20605 } else {
20613 20606 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
20614 20607 SD_TARGET_START, SD_PATH_DIRECT_PRIORITY);
20615 20608 }
20616 20609
20617 20610 if (rval != 0)
20618 20611 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
20619 20612 sd_ssc_fini(ssc);
20620 20613 /*
20621 20614 * The above call blocks until the START_STOP_UNIT command completes.
20622 20615 * Now that it has completed, we must re-try the original IO that
20623 20616 * received the NOT READY condition in the first place. There are
20624 20617 * three possible conditions here:
20625 20618 *
20626 20619 * (1) The original IO is on un_retry_bp.
20627 20620 * (2) The original IO is on the regular wait queue, and un_retry_bp
20628 20621 * is NULL.
20629 20622 * (3) The original IO is on the regular wait queue, and un_retry_bp
20630 20623 * points to some other, unrelated bp.
20631 20624 *
20632 20625 * For each case, we must call sd_start_cmds() with un_retry_bp
20633 20626 * as the argument. If un_retry_bp is NULL, this will initiate
20634 20627 * processing of the regular wait queue. If un_retry_bp is not NULL,
20635 20628 * then this will process the bp on un_retry_bp. That may or may not
20636 20629 * be the original IO, but that does not matter: the important thing
20637 20630 * is to keep the IO processing going at this point.
20638 20631 *
20639 20632 * Note: This is a very specific error recovery sequence associated
20640 20633 * with a drive that is not spun up. We attempt a START_STOP_UNIT and
20641 20634 * serialize the I/O with completion of the spin-up.
20642 20635 */
20643 20636 mutex_enter(SD_MUTEX(un));
20644 20637 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
20645 20638 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n",
20646 20639 un, un->un_retry_bp);
20647 20640 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */
20648 20641 sd_start_cmds(un, un->un_retry_bp);
20649 20642 mutex_exit(SD_MUTEX(un));
20650 20643
20651 20644 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n");
20652 20645 }
20653 20646
20654 20647
20655 20648 /*
20656 20649 * Function: sd_send_scsi_INQUIRY
20657 20650 *
20658 20651 * Description: Issue the scsi INQUIRY command.
20659 20652 *
20660 20653 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
20661 20654 * structure for this target.
20662 20655 * bufaddr
20663 20656 * buflen
20664 20657 * evpd
20665 20658 * page_code
20666 20659 * page_length
20667 20660 *
20668 20661 * Return Code: 0 - Success
20669 20662 * errno return code from sd_ssc_send()
20670 20663 *
20671 20664 * Context: Can sleep. Does not return until command is completed.
20672 20665 */
20673 20666
20674 20667 static int
20675 20668 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen,
20676 20669 uchar_t evpd, uchar_t page_code, size_t *residp)
20677 20670 {
20678 20671 union scsi_cdb cdb;
20679 20672 struct uscsi_cmd ucmd_buf;
20680 20673 int status;
20681 20674 struct sd_lun *un;
20682 20675
20683 20676 ASSERT(ssc != NULL);
20684 20677 un = ssc->ssc_un;
20685 20678 ASSERT(un != NULL);
20686 20679 ASSERT(!mutex_owned(SD_MUTEX(un)));
20687 20680 ASSERT(bufaddr != NULL);
20688 20681
20689 20682 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un);
20690 20683
20691 20684 bzero(&cdb, sizeof (cdb));
20692 20685 bzero(&ucmd_buf, sizeof (ucmd_buf));
20693 20686 bzero(bufaddr, buflen);
20694 20687
20695 20688 cdb.scc_cmd = SCMD_INQUIRY;
20696 20689 cdb.cdb_opaque[1] = evpd;
20697 20690 cdb.cdb_opaque[2] = page_code;
20698 20691 FORMG0COUNT(&cdb, buflen);
20699 20692
20700 20693 ucmd_buf.uscsi_cdb = (char *)&cdb;
20701 20694 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
20702 20695 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
20703 20696 ucmd_buf.uscsi_buflen = buflen;
20704 20697 ucmd_buf.uscsi_rqbuf = NULL;
20705 20698 ucmd_buf.uscsi_rqlen = 0;
20706 20699 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT;
20707 20700 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */
20708 20701
20709 20702 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20710 20703 UIO_SYSSPACE, SD_PATH_DIRECT);
20711 20704
20712 20705 /*
20713 20706 * Only handle status == 0, the upper-level caller
20714 20707 * will put different assessment based on the context.
20715 20708 */
20716 20709 if (status == 0)
20717 20710 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20718 20711
20719 20712 if ((status == 0) && (residp != NULL)) {
20720 20713 *residp = ucmd_buf.uscsi_resid;
20721 20714 }
20722 20715
20723 20716 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n");
20724 20717
20725 20718 return (status);
20726 20719 }
20727 20720
20728 20721
20729 20722 /*
20730 20723 * Function: sd_send_scsi_TEST_UNIT_READY
20731 20724 *
20732 20725 * Description: Issue the scsi TEST UNIT READY command.
20733 20726 * This routine can be told to set the flag USCSI_DIAGNOSE to
20734 20727 * prevent retrying failed commands. Use this when the intent
20735 20728 * is either to check for device readiness, to clear a Unit
20736 20729 * Attention, or to clear any outstanding sense data.
20737 20730 * However under specific conditions the expected behavior
20738 20731 * is for retries to bring a device ready, so use the flag
20739 20732 * with caution.
20740 20733 *
20741 20734 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
20742 20735 * structure for this target.
20743 20736 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present
20744 20737 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE.
20745 20738 * 0: dont check for media present, do retries on cmd.
20746 20739 *
20747 20740 * Return Code: 0 - Success
20748 20741 * EIO - IO error
20749 20742 * EACCES - Reservation conflict detected
20750 20743 * ENXIO - Not Ready, medium not present
20751 20744 * errno return code from sd_ssc_send()
20752 20745 *
20753 20746 * Context: Can sleep. Does not return until command is completed.
20754 20747 */
20755 20748
20756 20749 static int
20757 20750 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag)
20758 20751 {
20759 20752 struct scsi_extended_sense sense_buf;
20760 20753 union scsi_cdb cdb;
20761 20754 struct uscsi_cmd ucmd_buf;
20762 20755 int status;
20763 20756 struct sd_lun *un;
20764 20757
20765 20758 ASSERT(ssc != NULL);
20766 20759 un = ssc->ssc_un;
20767 20760 ASSERT(un != NULL);
20768 20761 ASSERT(!mutex_owned(SD_MUTEX(un)));
20769 20762
20770 20763 SD_TRACE(SD_LOG_IO, un,
20771 20764 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un);
20772 20765
20773 20766 /*
20774 20767 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect
20775 20768 * timeouts when they receive a TUR and the queue is not empty. Check
20776 20769 * the configuration flag set during attach (indicating the drive has
20777 20770 * this firmware bug) and un_ncmds_in_transport before issuing the
20778 20771 * TUR. If there are
20779 20772 * pending commands return success, this is a bit arbitrary but is ok
20780 20773 * for non-removables (i.e. the eliteI disks) and non-clustering
20781 20774 * configurations.
20782 20775 */
20783 20776 if (un->un_f_cfg_tur_check == TRUE) {
20784 20777 mutex_enter(SD_MUTEX(un));
20785 20778 if (un->un_ncmds_in_transport != 0) {
20786 20779 mutex_exit(SD_MUTEX(un));
20787 20780 return (0);
20788 20781 }
20789 20782 mutex_exit(SD_MUTEX(un));
20790 20783 }
20791 20784
20792 20785 bzero(&cdb, sizeof (cdb));
20793 20786 bzero(&ucmd_buf, sizeof (ucmd_buf));
20794 20787 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
20795 20788
20796 20789 cdb.scc_cmd = SCMD_TEST_UNIT_READY;
20797 20790
20798 20791 ucmd_buf.uscsi_cdb = (char *)&cdb;
20799 20792 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
20800 20793 ucmd_buf.uscsi_bufaddr = NULL;
20801 20794 ucmd_buf.uscsi_buflen = 0;
20802 20795 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20803 20796 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
20804 20797 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
20805 20798
20806 20799 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */
20807 20800 if ((flag & SD_DONT_RETRY_TUR) != 0) {
20808 20801 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE;
20809 20802 }
20810 20803 ucmd_buf.uscsi_timeout = 60;
20811 20804
20812 20805 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20813 20806 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT :
20814 20807 SD_PATH_STANDARD));
20815 20808
20816 20809 switch (status) {
20817 20810 case 0:
20818 20811 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20819 20812 break; /* Success! */
20820 20813 case EIO:
20821 20814 switch (ucmd_buf.uscsi_status) {
20822 20815 case STATUS_RESERVATION_CONFLICT:
20823 20816 status = EACCES;
20824 20817 break;
20825 20818 case STATUS_CHECK:
20826 20819 if ((flag & SD_CHECK_FOR_MEDIA) == 0) {
20827 20820 break;
20828 20821 }
20829 20822 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20830 20823 (scsi_sense_key((uint8_t *)&sense_buf) ==
20831 20824 KEY_NOT_READY) &&
20832 20825 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) {
20833 20826 status = ENXIO;
20834 20827 }
20835 20828 break;
20836 20829 default:
20837 20830 break;
20838 20831 }
20839 20832 break;
20840 20833 default:
20841 20834 break;
20842 20835 }
20843 20836
20844 20837 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n");
20845 20838
20846 20839 return (status);
20847 20840 }
20848 20841
20849 20842 /*
20850 20843 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN
20851 20844 *
20852 20845 * Description: Issue the scsi PERSISTENT RESERVE IN command.
20853 20846 *
20854 20847 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
20855 20848 * structure for this target.
20856 20849 *
20857 20850 * Return Code: 0 - Success
20858 20851 * EACCES
20859 20852 * ENOTSUP
20860 20853 * errno return code from sd_ssc_send()
20861 20854 *
20862 20855 * Context: Can sleep. Does not return until command is completed.
20863 20856 */
20864 20857
20865 20858 static int
20866 20859 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd,
20867 20860 uint16_t data_len, uchar_t *data_bufp)
20868 20861 {
20869 20862 struct scsi_extended_sense sense_buf;
20870 20863 union scsi_cdb cdb;
20871 20864 struct uscsi_cmd ucmd_buf;
20872 20865 int status;
20873 20866 int no_caller_buf = FALSE;
20874 20867 struct sd_lun *un;
20875 20868
20876 20869 ASSERT(ssc != NULL);
20877 20870 un = ssc->ssc_un;
20878 20871 ASSERT(un != NULL);
20879 20872 ASSERT(!mutex_owned(SD_MUTEX(un)));
20880 20873 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV));
20881 20874
20882 20875 SD_TRACE(SD_LOG_IO, un,
20883 20876 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un);
20884 20877
20885 20878 bzero(&cdb, sizeof (cdb));
20886 20879 bzero(&ucmd_buf, sizeof (ucmd_buf));
20887 20880 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
20888 20881 if (data_bufp == NULL) {
20889 20882 /* Allocate a default buf if the caller did not give one */
20890 20883 ASSERT(data_len == 0);
20891 20884 data_len = MHIOC_RESV_KEY_SIZE;
20892 20885 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP);
20893 20886 no_caller_buf = TRUE;
20894 20887 }
20895 20888
20896 20889 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN;
20897 20890 cdb.cdb_opaque[1] = usr_cmd;
20898 20891 FORMG1COUNT(&cdb, data_len);
20899 20892
20900 20893 ucmd_buf.uscsi_cdb = (char *)&cdb;
20901 20894 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
20902 20895 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp;
20903 20896 ucmd_buf.uscsi_buflen = data_len;
20904 20897 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20905 20898 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
20906 20899 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
20907 20900 ucmd_buf.uscsi_timeout = 60;
20908 20901
20909 20902 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20910 20903 UIO_SYSSPACE, SD_PATH_STANDARD);
20911 20904
20912 20905 switch (status) {
20913 20906 case 0:
20914 20907 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20915 20908
20916 20909 break; /* Success! */
20917 20910 case EIO:
20918 20911 switch (ucmd_buf.uscsi_status) {
20919 20912 case STATUS_RESERVATION_CONFLICT:
20920 20913 status = EACCES;
20921 20914 break;
20922 20915 case STATUS_CHECK:
20923 20916 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20924 20917 (scsi_sense_key((uint8_t *)&sense_buf) ==
20925 20918 KEY_ILLEGAL_REQUEST)) {
20926 20919 status = ENOTSUP;
20927 20920 }
20928 20921 break;
20929 20922 default:
20930 20923 break;
20931 20924 }
20932 20925 break;
20933 20926 default:
20934 20927 break;
20935 20928 }
20936 20929
20937 20930 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n");
20938 20931
20939 20932 if (no_caller_buf == TRUE) {
20940 20933 kmem_free(data_bufp, data_len);
20941 20934 }
20942 20935
20943 20936 return (status);
20944 20937 }
20945 20938
20946 20939
20947 20940 /*
20948 20941 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT
20949 20942 *
20950 20943 * Description: This routine is the driver entry point for handling CD-ROM
20951 20944 * multi-host persistent reservation requests (MHIOCGRP_INKEYS,
20952 20945 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the
20953 20946 * device.
20954 20947 *
20955 20948 * Arguments: ssc - ssc contains un - pointer to soft state struct
20956 20949 * for the target.
20957 20950 * usr_cmd SCSI-3 reservation facility command (one of
20958 20951 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE,
20959 20952 * SD_SCSI3_PREEMPTANDABORT, SD_SCSI3_CLEAR)
20960 20953 * usr_bufp - user provided pointer register, reserve descriptor or
20961 20954 * preempt and abort structure (mhioc_register_t,
20962 20955 * mhioc_resv_desc_t, mhioc_preemptandabort_t)
20963 20956 *
20964 20957 * Return Code: 0 - Success
20965 20958 * EACCES
20966 20959 * ENOTSUP
20967 20960 * errno return code from sd_ssc_send()
20968 20961 *
20969 20962 * Context: Can sleep. Does not return until command is completed.
20970 20963 */
20971 20964
20972 20965 static int
20973 20966 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd,
20974 20967 uchar_t *usr_bufp)
20975 20968 {
20976 20969 struct scsi_extended_sense sense_buf;
20977 20970 union scsi_cdb cdb;
20978 20971 struct uscsi_cmd ucmd_buf;
20979 20972 int status;
20980 20973 uchar_t data_len = sizeof (sd_prout_t);
20981 20974 sd_prout_t *prp;
20982 20975 struct sd_lun *un;
20983 20976
20984 20977 ASSERT(ssc != NULL);
20985 20978 un = ssc->ssc_un;
20986 20979 ASSERT(un != NULL);
20987 20980 ASSERT(!mutex_owned(SD_MUTEX(un)));
20988 20981 ASSERT(data_len == 24); /* required by scsi spec */
20989 20982
20990 20983 SD_TRACE(SD_LOG_IO, un,
20991 20984 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un);
20992 20985
20993 20986 if (usr_bufp == NULL) {
20994 20987 return (EINVAL);
20995 20988 }
20996 20989
20997 20990 bzero(&cdb, sizeof (cdb));
20998 20991 bzero(&ucmd_buf, sizeof (ucmd_buf));
20999 20992 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21000 20993 prp = kmem_zalloc(data_len, KM_SLEEP);
21001 20994
21002 20995 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT;
21003 20996 cdb.cdb_opaque[1] = usr_cmd;
21004 20997 FORMG1COUNT(&cdb, data_len);
21005 20998
21006 20999 ucmd_buf.uscsi_cdb = (char *)&cdb;
21007 21000 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
21008 21001 ucmd_buf.uscsi_bufaddr = (caddr_t)prp;
21009 21002 ucmd_buf.uscsi_buflen = data_len;
21010 21003 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21011 21004 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21012 21005 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT;
21013 21006 ucmd_buf.uscsi_timeout = 60;
21014 21007
21015 21008 switch (usr_cmd) {
21016 21009 case SD_SCSI3_REGISTER: {
21017 21010 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp;
21018 21011
21019 21012 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21020 21013 bcopy(ptr->newkey.key, prp->service_key,
21021 21014 MHIOC_RESV_KEY_SIZE);
21022 21015 prp->aptpl = ptr->aptpl;
21023 21016 break;
21024 21017 }
21025 21018 case SD_SCSI3_CLEAR: {
21026 21019 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp;
21027 21020
21028 21021 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21029 21022 break;
21030 21023 }
21031 21024 case SD_SCSI3_RESERVE:
21032 21025 case SD_SCSI3_RELEASE: {
21033 21026 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp;
21034 21027
21035 21028 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21036 21029 prp->scope_address = BE_32(ptr->scope_specific_addr);
21037 21030 cdb.cdb_opaque[2] = ptr->type;
21038 21031 break;
21039 21032 }
21040 21033 case SD_SCSI3_PREEMPTANDABORT: {
21041 21034 mhioc_preemptandabort_t *ptr =
21042 21035 (mhioc_preemptandabort_t *)usr_bufp;
21043 21036
21044 21037 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21045 21038 bcopy(ptr->victim_key.key, prp->service_key,
21046 21039 MHIOC_RESV_KEY_SIZE);
21047 21040 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr);
21048 21041 cdb.cdb_opaque[2] = ptr->resvdesc.type;
21049 21042 ucmd_buf.uscsi_flags |= USCSI_HEAD;
21050 21043 break;
21051 21044 }
21052 21045 case SD_SCSI3_REGISTERANDIGNOREKEY:
21053 21046 {
21054 21047 mhioc_registerandignorekey_t *ptr;
21055 21048 ptr = (mhioc_registerandignorekey_t *)usr_bufp;
21056 21049 bcopy(ptr->newkey.key,
21057 21050 prp->service_key, MHIOC_RESV_KEY_SIZE);
21058 21051 prp->aptpl = ptr->aptpl;
21059 21052 break;
21060 21053 }
21061 21054 default:
21062 21055 ASSERT(FALSE);
21063 21056 break;
21064 21057 }
21065 21058
21066 21059 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21067 21060 UIO_SYSSPACE, SD_PATH_STANDARD);
21068 21061
21069 21062 switch (status) {
21070 21063 case 0:
21071 21064 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21072 21065 break; /* Success! */
21073 21066 case EIO:
21074 21067 switch (ucmd_buf.uscsi_status) {
21075 21068 case STATUS_RESERVATION_CONFLICT:
21076 21069 status = EACCES;
21077 21070 break;
21078 21071 case STATUS_CHECK:
21079 21072 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
21080 21073 (scsi_sense_key((uint8_t *)&sense_buf) ==
21081 21074 KEY_ILLEGAL_REQUEST)) {
21082 21075 status = ENOTSUP;
21083 21076 }
21084 21077 break;
21085 21078 default:
21086 21079 break;
21087 21080 }
21088 21081 break;
21089 21082 default:
21090 21083 break;
21091 21084 }
21092 21085
21093 21086 kmem_free(prp, data_len);
21094 21087 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n");
21095 21088 return (status);
21096 21089 }
21097 21090
21098 21091
21099 21092 /*
21100 21093 * Function: sd_send_scsi_SYNCHRONIZE_CACHE
21101 21094 *
21102 21095 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target
21103 21096 *
21104 21097 * Arguments: un - pointer to the target's soft state struct
21105 21098 * dkc - pointer to the callback structure
21106 21099 *
21107 21100 * Return Code: 0 - success
21108 21101 * errno-type error code
21109 21102 *
21110 21103 * Context: kernel thread context only.
21111 21104 *
21112 21105 * _______________________________________________________________
21113 21106 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE |
21114 21107 * |FLUSH_VOLATILE| | operation |
21115 21108 * |______________|______________|_________________________________|
21116 21109 * | 0 | NULL | Synchronous flush on both |
21117 21110 * | | | volatile and non-volatile cache |
21118 21111 * |______________|______________|_________________________________|
21119 21112 * | 1 | NULL | Synchronous flush on volatile |
21120 21113 * | | | cache; disk drivers may suppress|
21121 21114 * | | | flush if disk table indicates |
21122 21115 * | | | non-volatile cache |
21123 21116 * |______________|______________|_________________________________|
21124 21117 * | 0 | !NULL | Asynchronous flush on both |
21125 21118 * | | | volatile and non-volatile cache;|
21126 21119 * |______________|______________|_________________________________|
21127 21120 * | 1 | !NULL | Asynchronous flush on volatile |
21128 21121 * | | | cache; disk drivers may suppress|
21129 21122 * | | | flush if disk table indicates |
21130 21123 * | | | non-volatile cache |
21131 21124 * |______________|______________|_________________________________|
21132 21125 *
21133 21126 */
21134 21127
21135 21128 static int
21136 21129 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc)
21137 21130 {
21138 21131 struct sd_uscsi_info *uip;
21139 21132 struct uscsi_cmd *uscmd;
21140 21133 union scsi_cdb *cdb;
21141 21134 struct buf *bp;
21142 21135 int rval = 0;
21143 21136 int is_async;
21144 21137
21145 21138 SD_TRACE(SD_LOG_IO, un,
21146 21139 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un);
21147 21140
21148 21141 ASSERT(un != NULL);
21149 21142 ASSERT(!mutex_owned(SD_MUTEX(un)));
21150 21143
21151 21144 if (dkc == NULL || dkc->dkc_callback == NULL) {
21152 21145 is_async = FALSE;
21153 21146 } else {
21154 21147 is_async = TRUE;
21155 21148 }
21156 21149
21157 21150 mutex_enter(SD_MUTEX(un));
21158 21151 /* check whether cache flush should be suppressed */
21159 21152 if (un->un_f_suppress_cache_flush == TRUE) {
21160 21153 mutex_exit(SD_MUTEX(un));
21161 21154 /*
21162 21155 * suppress the cache flush if the device is told to do
21163 21156 * so by sd.conf or disk table
21164 21157 */
21165 21158 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \
21166 21159 skip the cache flush since suppress_cache_flush is %d!\n",
21167 21160 un->un_f_suppress_cache_flush);
21168 21161
21169 21162 if (is_async == TRUE) {
21170 21163 /* invoke callback for asynchronous flush */
21171 21164 (*dkc->dkc_callback)(dkc->dkc_cookie, 0);
21172 21165 }
21173 21166 return (rval);
21174 21167 }
21175 21168 mutex_exit(SD_MUTEX(un));
21176 21169
21177 21170 /*
21178 21171 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be
21179 21172 * set properly
21180 21173 */
21181 21174 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP);
21182 21175 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE;
21183 21176
21184 21177 mutex_enter(SD_MUTEX(un));
21185 21178 if (dkc != NULL && un->un_f_sync_nv_supported &&
21186 21179 (dkc->dkc_flag & FLUSH_VOLATILE)) {
21187 21180 /*
21188 21181 * if the device supports SYNC_NV bit, turn on
21189 21182 * the SYNC_NV bit to only flush volatile cache
21190 21183 */
21191 21184 cdb->cdb_un.tag |= SD_SYNC_NV_BIT;
21192 21185 }
21193 21186 mutex_exit(SD_MUTEX(un));
21194 21187
21195 21188 /*
21196 21189 * First get some memory for the uscsi_cmd struct and cdb
21197 21190 * and initialize for SYNCHRONIZE_CACHE cmd.
21198 21191 */
21199 21192 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP);
21200 21193 uscmd->uscsi_cdblen = CDB_GROUP1;
21201 21194 uscmd->uscsi_cdb = (caddr_t)cdb;
21202 21195 uscmd->uscsi_bufaddr = NULL;
21203 21196 uscmd->uscsi_buflen = 0;
21204 21197 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
21205 21198 uscmd->uscsi_rqlen = SENSE_LENGTH;
21206 21199 uscmd->uscsi_rqresid = SENSE_LENGTH;
21207 21200 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
21208 21201 uscmd->uscsi_timeout = sd_io_time;
21209 21202
21210 21203 /*
21211 21204 * Allocate an sd_uscsi_info struct and fill it with the info
21212 21205 * needed by sd_initpkt_for_uscsi(). Then put the pointer into
21213 21206 * b_private in the buf for sd_initpkt_for_uscsi(). Note that
21214 21207 * since we allocate the buf here in this function, we do not
21215 21208 * need to preserve the prior contents of b_private.
21216 21209 * The sd_uscsi_info struct is also used by sd_uscsi_strategy()
21217 21210 */
21218 21211 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP);
21219 21212 uip->ui_flags = SD_PATH_DIRECT;
21220 21213 uip->ui_cmdp = uscmd;
21221 21214
21222 21215 bp = getrbuf(KM_SLEEP);
21223 21216 bp->b_private = uip;
21224 21217
21225 21218 /*
21226 21219 * Setup buffer to carry uscsi request.
21227 21220 */
21228 21221 bp->b_flags = B_BUSY;
21229 21222 bp->b_bcount = 0;
21230 21223 bp->b_blkno = 0;
21231 21224
21232 21225 if (is_async == TRUE) {
21233 21226 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone;
21234 21227 uip->ui_dkc = *dkc;
21235 21228 }
21236 21229
21237 21230 bp->b_edev = SD_GET_DEV(un);
21238 21231 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */
21239 21232
21240 21233 /*
21241 21234 * Unset un_f_sync_cache_required flag
21242 21235 */
21243 21236 mutex_enter(SD_MUTEX(un));
21244 21237 un->un_f_sync_cache_required = FALSE;
21245 21238 mutex_exit(SD_MUTEX(un));
21246 21239
21247 21240 (void) sd_uscsi_strategy(bp);
21248 21241
21249 21242 /*
21250 21243 * If synchronous request, wait for completion
21251 21244 * If async just return and let b_iodone callback
21252 21245 * cleanup.
21253 21246 * NOTE: On return, u_ncmds_in_driver will be decremented,
21254 21247 * but it was also incremented in sd_uscsi_strategy(), so
21255 21248 * we should be ok.
21256 21249 */
21257 21250 if (is_async == FALSE) {
21258 21251 (void) biowait(bp);
21259 21252 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp);
21260 21253 }
21261 21254
21262 21255 return (rval);
21263 21256 }
21264 21257
21265 21258
21266 21259 static int
21267 21260 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp)
21268 21261 {
21269 21262 struct sd_uscsi_info *uip;
21270 21263 struct uscsi_cmd *uscmd;
21271 21264 uint8_t *sense_buf;
21272 21265 struct sd_lun *un;
21273 21266 int status;
21274 21267 union scsi_cdb *cdb;
21275 21268
21276 21269 uip = (struct sd_uscsi_info *)(bp->b_private);
21277 21270 ASSERT(uip != NULL);
21278 21271
21279 21272 uscmd = uip->ui_cmdp;
21280 21273 ASSERT(uscmd != NULL);
21281 21274
21282 21275 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf;
21283 21276 ASSERT(sense_buf != NULL);
21284 21277
21285 21278 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
21286 21279 ASSERT(un != NULL);
21287 21280
21288 21281 cdb = (union scsi_cdb *)uscmd->uscsi_cdb;
21289 21282
21290 21283 status = geterror(bp);
21291 21284 switch (status) {
21292 21285 case 0:
21293 21286 break; /* Success! */
21294 21287 case EIO:
21295 21288 switch (uscmd->uscsi_status) {
21296 21289 case STATUS_RESERVATION_CONFLICT:
21297 21290 /* Ignore reservation conflict */
21298 21291 status = 0;
21299 21292 goto done;
21300 21293
21301 21294 case STATUS_CHECK:
21302 21295 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) &&
21303 21296 (scsi_sense_key(sense_buf) ==
21304 21297 KEY_ILLEGAL_REQUEST)) {
21305 21298 /* Ignore Illegal Request error */
21306 21299 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) {
21307 21300 mutex_enter(SD_MUTEX(un));
21308 21301 un->un_f_sync_nv_supported = FALSE;
21309 21302 mutex_exit(SD_MUTEX(un));
21310 21303 status = 0;
21311 21304 SD_TRACE(SD_LOG_IO, un,
21312 21305 "un_f_sync_nv_supported \
21313 21306 is set to false.\n");
21314 21307 goto done;
21315 21308 }
21316 21309
21317 21310 mutex_enter(SD_MUTEX(un));
21318 21311 un->un_f_sync_cache_supported = FALSE;
21319 21312 mutex_exit(SD_MUTEX(un));
21320 21313 SD_TRACE(SD_LOG_IO, un,
21321 21314 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \
21322 21315 un_f_sync_cache_supported set to false \
21323 21316 with asc = %x, ascq = %x\n",
21324 21317 scsi_sense_asc(sense_buf),
21325 21318 scsi_sense_ascq(sense_buf));
21326 21319 status = ENOTSUP;
21327 21320 goto done;
21328 21321 }
21329 21322 break;
21330 21323 default:
21331 21324 break;
21332 21325 }
21333 21326 /* FALLTHRU */
21334 21327 default:
21335 21328 /*
21336 21329 * Turn on the un_f_sync_cache_required flag
21337 21330 * since the SYNC CACHE command failed
21338 21331 */
21339 21332 mutex_enter(SD_MUTEX(un));
21340 21333 un->un_f_sync_cache_required = TRUE;
21341 21334 mutex_exit(SD_MUTEX(un));
21342 21335
21343 21336 /*
21344 21337 * Don't log an error message if this device
21345 21338 * has removable media.
21346 21339 */
21347 21340 if (!un->un_f_has_removable_media) {
21348 21341 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
21349 21342 "SYNCHRONIZE CACHE command failed (%d)\n", status);
21350 21343 }
21351 21344 break;
21352 21345 }
21353 21346
21354 21347 done:
21355 21348 if (uip->ui_dkc.dkc_callback != NULL) {
21356 21349 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status);
21357 21350 }
21358 21351
21359 21352 ASSERT((bp->b_flags & B_REMAPPED) == 0);
21360 21353 freerbuf(bp);
21361 21354 kmem_free(uip, sizeof (struct sd_uscsi_info));
21362 21355 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH);
21363 21356 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen);
21364 21357 kmem_free(uscmd, sizeof (struct uscsi_cmd));
21365 21358
21366 21359 return (status);
21367 21360 }
21368 21361
21369 21362
21370 21363 /*
21371 21364 * Function: sd_send_scsi_GET_CONFIGURATION
21372 21365 *
21373 21366 * Description: Issues the get configuration command to the device.
21374 21367 * Called from sd_check_for_writable_cd & sd_get_media_info
21375 21368 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN
21376 21369 * Arguments: ssc
21377 21370 * ucmdbuf
21378 21371 * rqbuf
21379 21372 * rqbuflen
21380 21373 * bufaddr
21381 21374 * buflen
21382 21375 * path_flag
21383 21376 *
21384 21377 * Return Code: 0 - Success
21385 21378 * errno return code from sd_ssc_send()
21386 21379 *
21387 21380 * Context: Can sleep. Does not return until command is completed.
21388 21381 *
21389 21382 */
21390 21383
21391 21384 static int
21392 21385 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf,
21393 21386 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen,
21394 21387 int path_flag)
21395 21388 {
21396 21389 char cdb[CDB_GROUP1];
21397 21390 int status;
21398 21391 struct sd_lun *un;
21399 21392
21400 21393 ASSERT(ssc != NULL);
21401 21394 un = ssc->ssc_un;
21402 21395 ASSERT(un != NULL);
21403 21396 ASSERT(!mutex_owned(SD_MUTEX(un)));
21404 21397 ASSERT(bufaddr != NULL);
21405 21398 ASSERT(ucmdbuf != NULL);
21406 21399 ASSERT(rqbuf != NULL);
21407 21400
21408 21401 SD_TRACE(SD_LOG_IO, un,
21409 21402 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un);
21410 21403
21411 21404 bzero(cdb, sizeof (cdb));
21412 21405 bzero(ucmdbuf, sizeof (struct uscsi_cmd));
21413 21406 bzero(rqbuf, rqbuflen);
21414 21407 bzero(bufaddr, buflen);
21415 21408
21416 21409 /*
21417 21410 * Set up cdb field for the get configuration command.
21418 21411 */
21419 21412 cdb[0] = SCMD_GET_CONFIGURATION;
21420 21413 cdb[1] = 0x02; /* Requested Type */
21421 21414 cdb[8] = SD_PROFILE_HEADER_LEN;
21422 21415 ucmdbuf->uscsi_cdb = cdb;
21423 21416 ucmdbuf->uscsi_cdblen = CDB_GROUP1;
21424 21417 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr;
21425 21418 ucmdbuf->uscsi_buflen = buflen;
21426 21419 ucmdbuf->uscsi_timeout = sd_io_time;
21427 21420 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf;
21428 21421 ucmdbuf->uscsi_rqlen = rqbuflen;
21429 21422 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ;
21430 21423
21431 21424 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL,
21432 21425 UIO_SYSSPACE, path_flag);
21433 21426
21434 21427 switch (status) {
21435 21428 case 0:
21436 21429 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21437 21430 break; /* Success! */
21438 21431 case EIO:
21439 21432 switch (ucmdbuf->uscsi_status) {
21440 21433 case STATUS_RESERVATION_CONFLICT:
21441 21434 status = EACCES;
21442 21435 break;
21443 21436 default:
21444 21437 break;
21445 21438 }
21446 21439 break;
21447 21440 default:
21448 21441 break;
21449 21442 }
21450 21443
21451 21444 if (status == 0) {
21452 21445 SD_DUMP_MEMORY(un, SD_LOG_IO,
21453 21446 "sd_send_scsi_GET_CONFIGURATION: data",
21454 21447 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX);
21455 21448 }
21456 21449
21457 21450 SD_TRACE(SD_LOG_IO, un,
21458 21451 "sd_send_scsi_GET_CONFIGURATION: exit\n");
21459 21452
21460 21453 return (status);
21461 21454 }
21462 21455
21463 21456 /*
21464 21457 * Function: sd_send_scsi_feature_GET_CONFIGURATION
21465 21458 *
21466 21459 * Description: Issues the get configuration command to the device to
21467 21460 * retrieve a specific feature. Called from
21468 21461 * sd_check_for_writable_cd & sd_set_mmc_caps.
21469 21462 * Arguments: ssc
21470 21463 * ucmdbuf
21471 21464 * rqbuf
21472 21465 * rqbuflen
21473 21466 * bufaddr
21474 21467 * buflen
21475 21468 * feature
21476 21469 *
21477 21470 * Return Code: 0 - Success
21478 21471 * errno return code from sd_ssc_send()
21479 21472 *
21480 21473 * Context: Can sleep. Does not return until command is completed.
21481 21474 *
21482 21475 */
21483 21476 static int
21484 21477 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf,
21485 21478 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen,
21486 21479 char feature, int path_flag)
21487 21480 {
21488 21481 char cdb[CDB_GROUP1];
21489 21482 int status;
21490 21483 struct sd_lun *un;
21491 21484
21492 21485 ASSERT(ssc != NULL);
21493 21486 un = ssc->ssc_un;
21494 21487 ASSERT(un != NULL);
21495 21488 ASSERT(!mutex_owned(SD_MUTEX(un)));
21496 21489 ASSERT(bufaddr != NULL);
21497 21490 ASSERT(ucmdbuf != NULL);
21498 21491 ASSERT(rqbuf != NULL);
21499 21492
21500 21493 SD_TRACE(SD_LOG_IO, un,
21501 21494 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un);
21502 21495
21503 21496 bzero(cdb, sizeof (cdb));
21504 21497 bzero(ucmdbuf, sizeof (struct uscsi_cmd));
21505 21498 bzero(rqbuf, rqbuflen);
21506 21499 bzero(bufaddr, buflen);
21507 21500
21508 21501 /*
21509 21502 * Set up cdb field for the get configuration command.
21510 21503 */
21511 21504 cdb[0] = SCMD_GET_CONFIGURATION;
21512 21505 cdb[1] = 0x02; /* Requested Type */
21513 21506 cdb[3] = feature;
21514 21507 cdb[8] = buflen;
21515 21508 ucmdbuf->uscsi_cdb = cdb;
21516 21509 ucmdbuf->uscsi_cdblen = CDB_GROUP1;
21517 21510 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr;
21518 21511 ucmdbuf->uscsi_buflen = buflen;
21519 21512 ucmdbuf->uscsi_timeout = sd_io_time;
21520 21513 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf;
21521 21514 ucmdbuf->uscsi_rqlen = rqbuflen;
21522 21515 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ;
21523 21516
21524 21517 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL,
21525 21518 UIO_SYSSPACE, path_flag);
21526 21519
21527 21520 switch (status) {
21528 21521 case 0:
21529 21522
21530 21523 break; /* Success! */
21531 21524 case EIO:
21532 21525 switch (ucmdbuf->uscsi_status) {
21533 21526 case STATUS_RESERVATION_CONFLICT:
21534 21527 status = EACCES;
21535 21528 break;
21536 21529 default:
21537 21530 break;
21538 21531 }
21539 21532 break;
21540 21533 default:
21541 21534 break;
21542 21535 }
21543 21536
21544 21537 if (status == 0) {
21545 21538 SD_DUMP_MEMORY(un, SD_LOG_IO,
21546 21539 "sd_send_scsi_feature_GET_CONFIGURATION: data",
21547 21540 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX);
21548 21541 }
21549 21542
21550 21543 SD_TRACE(SD_LOG_IO, un,
21551 21544 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n");
21552 21545
21553 21546 return (status);
21554 21547 }
21555 21548
21556 21549
21557 21550 /*
21558 21551 * Function: sd_send_scsi_MODE_SENSE
21559 21552 *
21560 21553 * Description: Utility function for issuing a scsi MODE SENSE command.
21561 21554 * Note: This routine uses a consistent implementation for Group0,
21562 21555 * Group1, and Group2 commands across all platforms. ATAPI devices
21563 21556 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select
21564 21557 *
21565 21558 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21566 21559 * structure for this target.
21567 21560 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or
21568 21561 * CDB_GROUP[1|2] (10 byte).
21569 21562 * bufaddr - buffer for page data retrieved from the target.
21570 21563 * buflen - size of page to be retrieved.
21571 21564 * page_code - page code of data to be retrieved from the target.
21572 21565 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
21573 21566 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
21574 21567 * to use the USCSI "direct" chain and bypass the normal
21575 21568 * command waitq.
21576 21569 *
21577 21570 * Return Code: 0 - Success
21578 21571 * errno return code from sd_ssc_send()
21579 21572 *
21580 21573 * Context: Can sleep. Does not return until command is completed.
21581 21574 */
21582 21575
21583 21576 static int
21584 21577 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr,
21585 21578 size_t buflen, uchar_t page_code, int path_flag)
21586 21579 {
21587 21580 struct scsi_extended_sense sense_buf;
21588 21581 union scsi_cdb cdb;
21589 21582 struct uscsi_cmd ucmd_buf;
21590 21583 int status;
21591 21584 int headlen;
21592 21585 struct sd_lun *un;
21593 21586
21594 21587 ASSERT(ssc != NULL);
21595 21588 un = ssc->ssc_un;
21596 21589 ASSERT(un != NULL);
21597 21590 ASSERT(!mutex_owned(SD_MUTEX(un)));
21598 21591 ASSERT(bufaddr != NULL);
21599 21592 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) ||
21600 21593 (cdbsize == CDB_GROUP2));
21601 21594
21602 21595 SD_TRACE(SD_LOG_IO, un,
21603 21596 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un);
21604 21597
21605 21598 bzero(&cdb, sizeof (cdb));
21606 21599 bzero(&ucmd_buf, sizeof (ucmd_buf));
21607 21600 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21608 21601 bzero(bufaddr, buflen);
21609 21602
21610 21603 if (cdbsize == CDB_GROUP0) {
21611 21604 cdb.scc_cmd = SCMD_MODE_SENSE;
21612 21605 cdb.cdb_opaque[2] = page_code;
21613 21606 FORMG0COUNT(&cdb, buflen);
21614 21607 headlen = MODE_HEADER_LENGTH;
21615 21608 } else {
21616 21609 cdb.scc_cmd = SCMD_MODE_SENSE_G1;
21617 21610 cdb.cdb_opaque[2] = page_code;
21618 21611 FORMG1COUNT(&cdb, buflen);
21619 21612 headlen = MODE_HEADER_LENGTH_GRP2;
21620 21613 }
21621 21614
21622 21615 ASSERT(headlen <= buflen);
21623 21616 SD_FILL_SCSI1_LUN_CDB(un, &cdb);
21624 21617
21625 21618 ucmd_buf.uscsi_cdb = (char *)&cdb;
21626 21619 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize;
21627 21620 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
21628 21621 ucmd_buf.uscsi_buflen = buflen;
21629 21622 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21630 21623 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21631 21624 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
21632 21625 ucmd_buf.uscsi_timeout = 60;
21633 21626
21634 21627 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21635 21628 UIO_SYSSPACE, path_flag);
21636 21629
21637 21630 switch (status) {
21638 21631 case 0:
21639 21632 /*
21640 21633 * sr_check_wp() uses 0x3f page code and check the header of
21641 21634 * mode page to determine if target device is write-protected.
21642 21635 * But some USB devices return 0 bytes for 0x3f page code. For
21643 21636 * this case, make sure that mode page header is returned at
21644 21637 * least.
21645 21638 */
21646 21639 if (buflen - ucmd_buf.uscsi_resid < headlen) {
21647 21640 status = EIO;
21648 21641 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
21649 21642 "mode page header is not returned");
21650 21643 }
21651 21644 break; /* Success! */
21652 21645 case EIO:
21653 21646 switch (ucmd_buf.uscsi_status) {
21654 21647 case STATUS_RESERVATION_CONFLICT:
21655 21648 status = EACCES;
21656 21649 break;
21657 21650 default:
21658 21651 break;
21659 21652 }
21660 21653 break;
21661 21654 default:
21662 21655 break;
21663 21656 }
21664 21657
21665 21658 if (status == 0) {
21666 21659 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data",
21667 21660 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
21668 21661 }
21669 21662 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n");
21670 21663
21671 21664 return (status);
21672 21665 }
21673 21666
21674 21667
21675 21668 /*
21676 21669 * Function: sd_send_scsi_MODE_SELECT
21677 21670 *
21678 21671 * Description: Utility function for issuing a scsi MODE SELECT command.
21679 21672 * Note: This routine uses a consistent implementation for Group0,
21680 21673 * Group1, and Group2 commands across all platforms. ATAPI devices
21681 21674 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select
21682 21675 *
21683 21676 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21684 21677 * structure for this target.
21685 21678 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or
21686 21679 * CDB_GROUP[1|2] (10 byte).
21687 21680 * bufaddr - buffer for page data retrieved from the target.
21688 21681 * buflen - size of page to be retrieved.
21689 21682 * save_page - boolean to determin if SP bit should be set.
21690 21683 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
21691 21684 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
21692 21685 * to use the USCSI "direct" chain and bypass the normal
21693 21686 * command waitq.
21694 21687 *
21695 21688 * Return Code: 0 - Success
21696 21689 * errno return code from sd_ssc_send()
21697 21690 *
21698 21691 * Context: Can sleep. Does not return until command is completed.
21699 21692 */
21700 21693
21701 21694 static int
21702 21695 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr,
21703 21696 size_t buflen, uchar_t save_page, int path_flag)
21704 21697 {
21705 21698 struct scsi_extended_sense sense_buf;
21706 21699 union scsi_cdb cdb;
21707 21700 struct uscsi_cmd ucmd_buf;
21708 21701 int status;
21709 21702 struct sd_lun *un;
21710 21703
21711 21704 ASSERT(ssc != NULL);
21712 21705 un = ssc->ssc_un;
21713 21706 ASSERT(un != NULL);
21714 21707 ASSERT(!mutex_owned(SD_MUTEX(un)));
21715 21708 ASSERT(bufaddr != NULL);
21716 21709 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) ||
21717 21710 (cdbsize == CDB_GROUP2));
21718 21711
21719 21712 SD_TRACE(SD_LOG_IO, un,
21720 21713 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un);
21721 21714
21722 21715 bzero(&cdb, sizeof (cdb));
21723 21716 bzero(&ucmd_buf, sizeof (ucmd_buf));
21724 21717 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21725 21718
21726 21719 /* Set the PF bit for many third party drives */
21727 21720 cdb.cdb_opaque[1] = 0x10;
21728 21721
21729 21722 /* Set the savepage(SP) bit if given */
21730 21723 if (save_page == SD_SAVE_PAGE) {
21731 21724 cdb.cdb_opaque[1] |= 0x01;
21732 21725 }
21733 21726
21734 21727 if (cdbsize == CDB_GROUP0) {
21735 21728 cdb.scc_cmd = SCMD_MODE_SELECT;
21736 21729 FORMG0COUNT(&cdb, buflen);
21737 21730 } else {
21738 21731 cdb.scc_cmd = SCMD_MODE_SELECT_G1;
21739 21732 FORMG1COUNT(&cdb, buflen);
21740 21733 }
21741 21734
21742 21735 SD_FILL_SCSI1_LUN_CDB(un, &cdb);
21743 21736
21744 21737 ucmd_buf.uscsi_cdb = (char *)&cdb;
21745 21738 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize;
21746 21739 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
21747 21740 ucmd_buf.uscsi_buflen = buflen;
21748 21741 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21749 21742 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21750 21743 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT;
21751 21744 ucmd_buf.uscsi_timeout = 60;
21752 21745
21753 21746 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21754 21747 UIO_SYSSPACE, path_flag);
21755 21748
21756 21749 switch (status) {
21757 21750 case 0:
21758 21751 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21759 21752 break; /* Success! */
21760 21753 case EIO:
21761 21754 switch (ucmd_buf.uscsi_status) {
21762 21755 case STATUS_RESERVATION_CONFLICT:
21763 21756 status = EACCES;
21764 21757 break;
21765 21758 default:
21766 21759 break;
21767 21760 }
21768 21761 break;
21769 21762 default:
21770 21763 break;
21771 21764 }
21772 21765
21773 21766 if (status == 0) {
21774 21767 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data",
21775 21768 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
21776 21769 }
21777 21770 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n");
21778 21771
21779 21772 return (status);
21780 21773 }
21781 21774
21782 21775
21783 21776 /*
21784 21777 * Function: sd_send_scsi_RDWR
21785 21778 *
21786 21779 * Description: Issue a scsi READ or WRITE command with the given parameters.
21787 21780 *
21788 21781 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21789 21782 * structure for this target.
21790 21783 * cmd: SCMD_READ or SCMD_WRITE
21791 21784 * bufaddr: Address of caller's buffer to receive the RDWR data
21792 21785 * buflen: Length of caller's buffer receive the RDWR data.
21793 21786 * start_block: Block number for the start of the RDWR operation.
21794 21787 * (Assumes target-native block size.)
21795 21788 * residp: Pointer to variable to receive the redisual of the
21796 21789 * RDWR operation (may be NULL of no residual requested).
21797 21790 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
21798 21791 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
21799 21792 * to use the USCSI "direct" chain and bypass the normal
21800 21793 * command waitq.
21801 21794 *
21802 21795 * Return Code: 0 - Success
21803 21796 * errno return code from sd_ssc_send()
21804 21797 *
21805 21798 * Context: Can sleep. Does not return until command is completed.
21806 21799 */
21807 21800
21808 21801 static int
21809 21802 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr,
21810 21803 size_t buflen, daddr_t start_block, int path_flag)
21811 21804 {
21812 21805 struct scsi_extended_sense sense_buf;
21813 21806 union scsi_cdb cdb;
21814 21807 struct uscsi_cmd ucmd_buf;
21815 21808 uint32_t block_count;
21816 21809 int status;
21817 21810 int cdbsize;
21818 21811 uchar_t flag;
21819 21812 struct sd_lun *un;
21820 21813
21821 21814 ASSERT(ssc != NULL);
21822 21815 un = ssc->ssc_un;
21823 21816 ASSERT(un != NULL);
21824 21817 ASSERT(!mutex_owned(SD_MUTEX(un)));
21825 21818 ASSERT(bufaddr != NULL);
21826 21819 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE));
21827 21820
21828 21821 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un);
21829 21822
21830 21823 if (un->un_f_tgt_blocksize_is_valid != TRUE) {
21831 21824 return (EINVAL);
21832 21825 }
21833 21826
21834 21827 mutex_enter(SD_MUTEX(un));
21835 21828 block_count = SD_BYTES2TGTBLOCKS(un, buflen);
21836 21829 mutex_exit(SD_MUTEX(un));
21837 21830
21838 21831 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE;
21839 21832
21840 21833 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: "
21841 21834 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n",
21842 21835 bufaddr, buflen, start_block, block_count);
21843 21836
21844 21837 bzero(&cdb, sizeof (cdb));
21845 21838 bzero(&ucmd_buf, sizeof (ucmd_buf));
21846 21839 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21847 21840
21848 21841 /* Compute CDB size to use */
21849 21842 if (start_block > 0xffffffff)
21850 21843 cdbsize = CDB_GROUP4;
21851 21844 else if ((start_block & 0xFFE00000) ||
21852 21845 (un->un_f_cfg_is_atapi == TRUE))
21853 21846 cdbsize = CDB_GROUP1;
21854 21847 else
21855 21848 cdbsize = CDB_GROUP0;
21856 21849
21857 21850 switch (cdbsize) {
21858 21851 case CDB_GROUP0: /* 6-byte CDBs */
21859 21852 cdb.scc_cmd = cmd;
21860 21853 FORMG0ADDR(&cdb, start_block);
21861 21854 FORMG0COUNT(&cdb, block_count);
21862 21855 break;
21863 21856 case CDB_GROUP1: /* 10-byte CDBs */
21864 21857 cdb.scc_cmd = cmd | SCMD_GROUP1;
21865 21858 FORMG1ADDR(&cdb, start_block);
21866 21859 FORMG1COUNT(&cdb, block_count);
21867 21860 break;
21868 21861 case CDB_GROUP4: /* 16-byte CDBs */
21869 21862 cdb.scc_cmd = cmd | SCMD_GROUP4;
21870 21863 FORMG4LONGADDR(&cdb, (uint64_t)start_block);
21871 21864 FORMG4COUNT(&cdb, block_count);
21872 21865 break;
21873 21866 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */
21874 21867 default:
21875 21868 /* All others reserved */
21876 21869 return (EINVAL);
21877 21870 }
21878 21871
21879 21872 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */
21880 21873 SD_FILL_SCSI1_LUN_CDB(un, &cdb);
21881 21874
21882 21875 ucmd_buf.uscsi_cdb = (char *)&cdb;
21883 21876 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize;
21884 21877 ucmd_buf.uscsi_bufaddr = bufaddr;
21885 21878 ucmd_buf.uscsi_buflen = buflen;
21886 21879 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21887 21880 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21888 21881 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT;
21889 21882 ucmd_buf.uscsi_timeout = 60;
21890 21883 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21891 21884 UIO_SYSSPACE, path_flag);
21892 21885
21893 21886 switch (status) {
21894 21887 case 0:
21895 21888 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21896 21889 break; /* Success! */
21897 21890 case EIO:
21898 21891 switch (ucmd_buf.uscsi_status) {
21899 21892 case STATUS_RESERVATION_CONFLICT:
21900 21893 status = EACCES;
21901 21894 break;
21902 21895 default:
21903 21896 break;
21904 21897 }
21905 21898 break;
21906 21899 default:
21907 21900 break;
21908 21901 }
21909 21902
21910 21903 if (status == 0) {
21911 21904 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data",
21912 21905 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
21913 21906 }
21914 21907
21915 21908 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n");
21916 21909
21917 21910 return (status);
21918 21911 }
21919 21912
21920 21913
21921 21914 /*
21922 21915 * Function: sd_send_scsi_LOG_SENSE
21923 21916 *
21924 21917 * Description: Issue a scsi LOG_SENSE command with the given parameters.
21925 21918 *
21926 21919 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21927 21920 * structure for this target.
21928 21921 *
21929 21922 * Return Code: 0 - Success
21930 21923 * errno return code from sd_ssc_send()
21931 21924 *
21932 21925 * Context: Can sleep. Does not return until command is completed.
21933 21926 */
21934 21927
21935 21928 static int
21936 21929 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen,
21937 21930 uchar_t page_code, uchar_t page_control, uint16_t param_ptr, int path_flag)
21938 21931 {
21939 21932 struct scsi_extended_sense sense_buf;
21940 21933 union scsi_cdb cdb;
21941 21934 struct uscsi_cmd ucmd_buf;
21942 21935 int status;
21943 21936 struct sd_lun *un;
21944 21937
21945 21938 ASSERT(ssc != NULL);
21946 21939 un = ssc->ssc_un;
21947 21940 ASSERT(un != NULL);
21948 21941 ASSERT(!mutex_owned(SD_MUTEX(un)));
21949 21942
21950 21943 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un);
21951 21944
21952 21945 bzero(&cdb, sizeof (cdb));
21953 21946 bzero(&ucmd_buf, sizeof (ucmd_buf));
21954 21947 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21955 21948
21956 21949 cdb.scc_cmd = SCMD_LOG_SENSE_G1;
21957 21950 cdb.cdb_opaque[2] = (page_control << 6) | page_code;
21958 21951 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8);
21959 21952 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF);
21960 21953 FORMG1COUNT(&cdb, buflen);
21961 21954
21962 21955 ucmd_buf.uscsi_cdb = (char *)&cdb;
21963 21956 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
21964 21957 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
21965 21958 ucmd_buf.uscsi_buflen = buflen;
21966 21959 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21967 21960 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21968 21961 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
21969 21962 ucmd_buf.uscsi_timeout = 60;
21970 21963
21971 21964 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21972 21965 UIO_SYSSPACE, path_flag);
21973 21966
21974 21967 switch (status) {
21975 21968 case 0:
21976 21969 break;
21977 21970 case EIO:
21978 21971 switch (ucmd_buf.uscsi_status) {
21979 21972 case STATUS_RESERVATION_CONFLICT:
21980 21973 status = EACCES;
21981 21974 break;
21982 21975 case STATUS_CHECK:
21983 21976 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
21984 21977 (scsi_sense_key((uint8_t *)&sense_buf) ==
21985 21978 KEY_ILLEGAL_REQUEST) &&
21986 21979 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) {
21987 21980 /*
21988 21981 * ASC 0x24: INVALID FIELD IN CDB
21989 21982 */
21990 21983 switch (page_code) {
21991 21984 case START_STOP_CYCLE_PAGE:
21992 21985 /*
21993 21986 * The start stop cycle counter is
21994 21987 * implemented as page 0x31 in earlier
21995 21988 * generation disks. In new generation
21996 21989 * disks the start stop cycle counter is
21997 21990 * implemented as page 0xE. To properly
21998 21991 * handle this case if an attempt for
21999 21992 * log page 0xE is made and fails we
22000 21993 * will try again using page 0x31.
22001 21994 *
22002 21995 * Network storage BU committed to
22003 21996 * maintain the page 0x31 for this
22004 21997 * purpose and will not have any other
22005 21998 * page implemented with page code 0x31
22006 21999 * until all disks transition to the
22007 22000 * standard page.
22008 22001 */
22009 22002 mutex_enter(SD_MUTEX(un));
22010 22003 un->un_start_stop_cycle_page =
22011 22004 START_STOP_CYCLE_VU_PAGE;
22012 22005 cdb.cdb_opaque[2] =
22013 22006 (char)(page_control << 6) |
22014 22007 un->un_start_stop_cycle_page;
22015 22008 mutex_exit(SD_MUTEX(un));
22016 22009 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
22017 22010 status = sd_ssc_send(
22018 22011 ssc, &ucmd_buf, FKIOCTL,
22019 22012 UIO_SYSSPACE, path_flag);
22020 22013
22021 22014 break;
22022 22015 case TEMPERATURE_PAGE:
22023 22016 status = ENOTTY;
22024 22017 break;
22025 22018 default:
22026 22019 break;
22027 22020 }
22028 22021 }
22029 22022 break;
22030 22023 default:
22031 22024 break;
22032 22025 }
22033 22026 break;
22034 22027 default:
22035 22028 break;
22036 22029 }
22037 22030
22038 22031 if (status == 0) {
22039 22032 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
22040 22033 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data",
22041 22034 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
22042 22035 }
22043 22036
22044 22037 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n");
22045 22038
22046 22039 return (status);
22047 22040 }
22048 22041
22049 22042
22050 22043 /*
22051 22044 * Function: sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION
22052 22045 *
22053 22046 * Description: Issue the scsi GET EVENT STATUS NOTIFICATION command.
22054 22047 *
22055 22048 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
22056 22049 * structure for this target.
22057 22050 * bufaddr
22058 22051 * buflen
22059 22052 * class_req
22060 22053 *
22061 22054 * Return Code: 0 - Success
22062 22055 * errno return code from sd_ssc_send()
22063 22056 *
22064 22057 * Context: Can sleep. Does not return until command is completed.
22065 22058 */
22066 22059
22067 22060 static int
22068 22061 sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc, uchar_t *bufaddr,
22069 22062 size_t buflen, uchar_t class_req)
22070 22063 {
22071 22064 union scsi_cdb cdb;
22072 22065 struct uscsi_cmd ucmd_buf;
22073 22066 int status;
22074 22067 struct sd_lun *un;
22075 22068
22076 22069 ASSERT(ssc != NULL);
22077 22070 un = ssc->ssc_un;
22078 22071 ASSERT(un != NULL);
22079 22072 ASSERT(!mutex_owned(SD_MUTEX(un)));
22080 22073 ASSERT(bufaddr != NULL);
22081 22074
22082 22075 SD_TRACE(SD_LOG_IO, un,
22083 22076 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: entry: un:0x%p\n", un);
22084 22077
22085 22078 bzero(&cdb, sizeof (cdb));
22086 22079 bzero(&ucmd_buf, sizeof (ucmd_buf));
22087 22080 bzero(bufaddr, buflen);
22088 22081
22089 22082 cdb.scc_cmd = SCMD_GET_EVENT_STATUS_NOTIFICATION;
22090 22083 cdb.cdb_opaque[1] = 1; /* polled */
22091 22084 cdb.cdb_opaque[4] = class_req;
22092 22085 FORMG1COUNT(&cdb, buflen);
22093 22086
22094 22087 ucmd_buf.uscsi_cdb = (char *)&cdb;
22095 22088 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
22096 22089 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
22097 22090 ucmd_buf.uscsi_buflen = buflen;
22098 22091 ucmd_buf.uscsi_rqbuf = NULL;
22099 22092 ucmd_buf.uscsi_rqlen = 0;
22100 22093 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT;
22101 22094 ucmd_buf.uscsi_timeout = 60;
22102 22095
22103 22096 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
22104 22097 UIO_SYSSPACE, SD_PATH_DIRECT);
22105 22098
22106 22099 /*
22107 22100 * Only handle status == 0, the upper-level caller
22108 22101 * will put different assessment based on the context.
22109 22102 */
22110 22103 if (status == 0) {
22111 22104 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
22112 22105
22113 22106 if (ucmd_buf.uscsi_resid != 0) {
22114 22107 status = EIO;
22115 22108 }
22116 22109 }
22117 22110
22118 22111 SD_TRACE(SD_LOG_IO, un,
22119 22112 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: exit\n");
22120 22113
22121 22114 return (status);
22122 22115 }
22123 22116
22124 22117
22125 22118 static boolean_t
22126 22119 sd_gesn_media_data_valid(uchar_t *data)
22127 22120 {
22128 22121 uint16_t len;
22129 22122
22130 22123 len = (data[1] << 8) | data[0];
22131 22124 return ((len >= 6) &&
22132 22125 ((data[2] & SD_GESN_HEADER_NEA) == 0) &&
22133 22126 ((data[2] & SD_GESN_HEADER_CLASS) == SD_GESN_MEDIA_CLASS) &&
22134 22127 ((data[3] & (1 << SD_GESN_MEDIA_CLASS)) != 0));
22135 22128 }
22136 22129
22137 22130
22138 22131 /*
22139 22132 * Function: sdioctl
22140 22133 *
22141 22134 * Description: Driver's ioctl(9e) entry point function.
22142 22135 *
22143 22136 * Arguments: dev - device number
22144 22137 * cmd - ioctl operation to be performed
22145 22138 * arg - user argument, contains data to be set or reference
22146 22139 * parameter for get
22147 22140 * flag - bit flag, indicating open settings, 32/64 bit type
22148 22141 * cred_p - user credential pointer
22149 22142 * rval_p - calling process return value (OPT)
22150 22143 *
22151 22144 * Return Code: EINVAL
22152 22145 * ENOTTY
22153 22146 * ENXIO
22154 22147 * EIO
22155 22148 * EFAULT
22156 22149 * ENOTSUP
22157 22150 * EPERM
22158 22151 *
22159 22152 * Context: Called from the device switch at normal priority.
22160 22153 */
22161 22154
22162 22155 static int
22163 22156 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p)
22164 22157 {
22165 22158 struct sd_lun *un = NULL;
22166 22159 int err = 0;
22167 22160 int i = 0;
22168 22161 cred_t *cr;
22169 22162 int tmprval = EINVAL;
22170 22163 boolean_t is_valid;
22171 22164 sd_ssc_t *ssc;
22172 22165
22173 22166 /*
22174 22167 * All device accesses go thru sdstrategy where we check on suspend
22175 22168 * status
22176 22169 */
22177 22170 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
22178 22171 return (ENXIO);
22179 22172 }
22180 22173
22181 22174 ASSERT(!mutex_owned(SD_MUTEX(un)));
22182 22175
22183 22176 /* Initialize sd_ssc_t for internal uscsi commands */
22184 22177 ssc = sd_ssc_init(un);
22185 22178
22186 22179 is_valid = SD_IS_VALID_LABEL(un);
22187 22180
22188 22181 /*
22189 22182 * Moved this wait from sd_uscsi_strategy to here for
22190 22183 * reasons of deadlock prevention. Internal driver commands,
22191 22184 * specifically those to change a devices power level, result
22192 22185 * in a call to sd_uscsi_strategy.
22193 22186 */
22194 22187 mutex_enter(SD_MUTEX(un));
22195 22188 while ((un->un_state == SD_STATE_SUSPENDED) ||
22196 22189 (un->un_state == SD_STATE_PM_CHANGING)) {
22197 22190 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
22198 22191 }
22199 22192 /*
22200 22193 * Twiddling the counter here protects commands from now
22201 22194 * through to the top of sd_uscsi_strategy. Without the
22202 22195 * counter inc. a power down, for example, could get in
22203 22196 * after the above check for state is made and before
22204 22197 * execution gets to the top of sd_uscsi_strategy.
22205 22198 * That would cause problems.
22206 22199 */
22207 22200 un->un_ncmds_in_driver++;
22208 22201
22209 22202 if (!is_valid &&
22210 22203 (flag & (FNDELAY | FNONBLOCK))) {
22211 22204 switch (cmd) {
22212 22205 case DKIOCGGEOM: /* SD_PATH_DIRECT */
22213 22206 case DKIOCGVTOC:
22214 22207 case DKIOCGEXTVTOC:
22215 22208 case DKIOCGAPART:
22216 22209 case DKIOCPARTINFO:
22217 22210 case DKIOCEXTPARTINFO:
22218 22211 case DKIOCSGEOM:
22219 22212 case DKIOCSAPART:
22220 22213 case DKIOCGETEFI:
22221 22214 case DKIOCPARTITION:
22222 22215 case DKIOCSVTOC:
22223 22216 case DKIOCSEXTVTOC:
22224 22217 case DKIOCSETEFI:
22225 22218 case DKIOCGMBOOT:
22226 22219 case DKIOCSMBOOT:
22227 22220 case DKIOCG_PHYGEOM:
22228 22221 case DKIOCG_VIRTGEOM:
22229 22222 #if defined(__i386) || defined(__amd64)
22230 22223 case DKIOCSETEXTPART:
22231 22224 #endif
22232 22225 /* let cmlb handle it */
22233 22226 goto skip_ready_valid;
22234 22227
22235 22228 case CDROMPAUSE:
22236 22229 case CDROMRESUME:
22237 22230 case CDROMPLAYMSF:
22238 22231 case CDROMPLAYTRKIND:
22239 22232 case CDROMREADTOCHDR:
22240 22233 case CDROMREADTOCENTRY:
22241 22234 case CDROMSTOP:
22242 22235 case CDROMSTART:
22243 22236 case CDROMVOLCTRL:
22244 22237 case CDROMSUBCHNL:
22245 22238 case CDROMREADMODE2:
22246 22239 case CDROMREADMODE1:
22247 22240 case CDROMREADOFFSET:
22248 22241 case CDROMSBLKMODE:
22249 22242 case CDROMGBLKMODE:
22250 22243 case CDROMGDRVSPEED:
22251 22244 case CDROMSDRVSPEED:
22252 22245 case CDROMCDDA:
22253 22246 case CDROMCDXA:
22254 22247 case CDROMSUBCODE:
22255 22248 if (!ISCD(un)) {
22256 22249 un->un_ncmds_in_driver--;
22257 22250 ASSERT(un->un_ncmds_in_driver >= 0);
22258 22251 mutex_exit(SD_MUTEX(un));
22259 22252 err = ENOTTY;
22260 22253 goto done_without_assess;
22261 22254 }
22262 22255 break;
22263 22256 case FDEJECT:
22264 22257 case DKIOCEJECT:
22265 22258 case CDROMEJECT:
22266 22259 if (!un->un_f_eject_media_supported) {
22267 22260 un->un_ncmds_in_driver--;
22268 22261 ASSERT(un->un_ncmds_in_driver >= 0);
22269 22262 mutex_exit(SD_MUTEX(un));
22270 22263 err = ENOTTY;
22271 22264 goto done_without_assess;
22272 22265 }
22273 22266 break;
22274 22267 case DKIOCFLUSHWRITECACHE:
22275 22268 mutex_exit(SD_MUTEX(un));
22276 22269 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
22277 22270 if (err != 0) {
22278 22271 mutex_enter(SD_MUTEX(un));
22279 22272 un->un_ncmds_in_driver--;
22280 22273 ASSERT(un->un_ncmds_in_driver >= 0);
22281 22274 mutex_exit(SD_MUTEX(un));
22282 22275 err = EIO;
22283 22276 goto done_quick_assess;
22284 22277 }
22285 22278 mutex_enter(SD_MUTEX(un));
22286 22279 /* FALLTHROUGH */
22287 22280 case DKIOCREMOVABLE:
22288 22281 case DKIOCHOTPLUGGABLE:
22289 22282 case DKIOCINFO:
22290 22283 case DKIOCGMEDIAINFO:
22291 22284 case DKIOCGMEDIAINFOEXT:
22292 22285 case DKIOCSOLIDSTATE:
22293 22286 case MHIOCENFAILFAST:
22294 22287 case MHIOCSTATUS:
22295 22288 case MHIOCTKOWN:
22296 22289 case MHIOCRELEASE:
22297 22290 case MHIOCGRP_INKEYS:
22298 22291 case MHIOCGRP_INRESV:
22299 22292 case MHIOCGRP_REGISTER:
22300 22293 case MHIOCGRP_CLEAR:
22301 22294 case MHIOCGRP_RESERVE:
22302 22295 case MHIOCGRP_PREEMPTANDABORT:
22303 22296 case MHIOCGRP_REGISTERANDIGNOREKEY:
22304 22297 case CDROMCLOSETRAY:
22305 22298 case USCSICMD:
22306 22299 goto skip_ready_valid;
22307 22300 default:
22308 22301 break;
22309 22302 }
22310 22303
22311 22304 mutex_exit(SD_MUTEX(un));
22312 22305 err = sd_ready_and_valid(ssc, SDPART(dev));
22313 22306 mutex_enter(SD_MUTEX(un));
22314 22307
22315 22308 if (err != SD_READY_VALID) {
22316 22309 switch (cmd) {
22317 22310 case DKIOCSTATE:
22318 22311 case CDROMGDRVSPEED:
22319 22312 case CDROMSDRVSPEED:
22320 22313 case FDEJECT: /* for eject command */
22321 22314 case DKIOCEJECT:
22322 22315 case CDROMEJECT:
22323 22316 case DKIOCREMOVABLE:
22324 22317 case DKIOCHOTPLUGGABLE:
22325 22318 break;
22326 22319 default:
22327 22320 if (un->un_f_has_removable_media) {
22328 22321 err = ENXIO;
22329 22322 } else {
22330 22323 /* Do not map SD_RESERVED_BY_OTHERS to EIO */
22331 22324 if (err == SD_RESERVED_BY_OTHERS) {
22332 22325 err = EACCES;
22333 22326 } else {
22334 22327 err = EIO;
22335 22328 }
22336 22329 }
22337 22330 un->un_ncmds_in_driver--;
22338 22331 ASSERT(un->un_ncmds_in_driver >= 0);
22339 22332 mutex_exit(SD_MUTEX(un));
22340 22333
22341 22334 goto done_without_assess;
22342 22335 }
22343 22336 }
22344 22337 }
22345 22338
22346 22339 skip_ready_valid:
22347 22340 mutex_exit(SD_MUTEX(un));
22348 22341
22349 22342 switch (cmd) {
22350 22343 case DKIOCINFO:
22351 22344 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n");
22352 22345 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag);
22353 22346 break;
22354 22347
22355 22348 case DKIOCGMEDIAINFO:
22356 22349 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n");
22357 22350 err = sd_get_media_info(dev, (caddr_t)arg, flag);
22358 22351 break;
22359 22352
22360 22353 case DKIOCGMEDIAINFOEXT:
22361 22354 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFOEXT\n");
22362 22355 err = sd_get_media_info_ext(dev, (caddr_t)arg, flag);
22363 22356 break;
22364 22357
22365 22358 case DKIOCGGEOM:
22366 22359 case DKIOCGVTOC:
22367 22360 case DKIOCGEXTVTOC:
22368 22361 case DKIOCGAPART:
22369 22362 case DKIOCPARTINFO:
22370 22363 case DKIOCEXTPARTINFO:
22371 22364 case DKIOCSGEOM:
22372 22365 case DKIOCSAPART:
22373 22366 case DKIOCGETEFI:
22374 22367 case DKIOCPARTITION:
22375 22368 case DKIOCSVTOC:
22376 22369 case DKIOCSEXTVTOC:
22377 22370 case DKIOCSETEFI:
22378 22371 case DKIOCGMBOOT:
22379 22372 case DKIOCSMBOOT:
22380 22373 case DKIOCG_PHYGEOM:
22381 22374 case DKIOCG_VIRTGEOM:
22382 22375 #if defined(__i386) || defined(__amd64)
22383 22376 case DKIOCSETEXTPART:
22384 22377 #endif
22385 22378 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd);
22386 22379
22387 22380 /* TUR should spin up */
22388 22381
22389 22382 if (un->un_f_has_removable_media)
22390 22383 err = sd_send_scsi_TEST_UNIT_READY(ssc,
22391 22384 SD_CHECK_FOR_MEDIA);
22392 22385
22393 22386 else
22394 22387 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
22395 22388
22396 22389 if (err != 0)
22397 22390 goto done_with_assess;
22398 22391
22399 22392 err = cmlb_ioctl(un->un_cmlbhandle, dev,
22400 22393 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT);
22401 22394
22402 22395 if ((err == 0) &&
22403 22396 ((cmd == DKIOCSETEFI) ||
22404 22397 ((un->un_f_pkstats_enabled) &&
22405 22398 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC ||
22406 22399 cmd == DKIOCSEXTVTOC)))) {
22407 22400
22408 22401 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT,
22409 22402 (void *)SD_PATH_DIRECT);
22410 22403 if ((tmprval == 0) && un->un_f_pkstats_enabled) {
22411 22404 sd_set_pstats(un);
22412 22405 SD_TRACE(SD_LOG_IO_PARTITION, un,
22413 22406 "sd_ioctl: un:0x%p pstats created and "
22414 22407 "set\n", un);
22415 22408 }
22416 22409 }
22417 22410
22418 22411 if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) ||
22419 22412 ((cmd == DKIOCSETEFI) && (tmprval == 0))) {
22420 22413
22421 22414 mutex_enter(SD_MUTEX(un));
22422 22415 if (un->un_f_devid_supported &&
22423 22416 (un->un_f_opt_fab_devid == TRUE)) {
22424 22417 if (un->un_devid == NULL) {
22425 22418 sd_register_devid(ssc, SD_DEVINFO(un),
22426 22419 SD_TARGET_IS_UNRESERVED);
22427 22420 } else {
22428 22421 /*
22429 22422 * The device id for this disk
22430 22423 * has been fabricated. The
22431 22424 * device id must be preserved
22432 22425 * by writing it back out to
22433 22426 * disk.
22434 22427 */
22435 22428 if (sd_write_deviceid(ssc) != 0) {
22436 22429 ddi_devid_free(un->un_devid);
22437 22430 un->un_devid = NULL;
22438 22431 }
22439 22432 }
22440 22433 }
22441 22434 mutex_exit(SD_MUTEX(un));
22442 22435 }
22443 22436
22444 22437 break;
22445 22438
22446 22439 case DKIOCLOCK:
22447 22440 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n");
22448 22441 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
22449 22442 SD_PATH_STANDARD);
22450 22443 goto done_with_assess;
22451 22444
22452 22445 case DKIOCUNLOCK:
22453 22446 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n");
22454 22447 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW,
22455 22448 SD_PATH_STANDARD);
22456 22449 goto done_with_assess;
22457 22450
22458 22451 case DKIOCSTATE: {
22459 22452 enum dkio_state state;
22460 22453 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n");
22461 22454
22462 22455 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) {
22463 22456 err = EFAULT;
22464 22457 } else {
22465 22458 err = sd_check_media(dev, state);
22466 22459 if (err == 0) {
22467 22460 if (ddi_copyout(&un->un_mediastate, (void *)arg,
22468 22461 sizeof (int), flag) != 0)
22469 22462 err = EFAULT;
22470 22463 }
22471 22464 }
22472 22465 break;
22473 22466 }
22474 22467
22475 22468 case DKIOCREMOVABLE:
22476 22469 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n");
22477 22470 i = un->un_f_has_removable_media ? 1 : 0;
22478 22471 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
22479 22472 err = EFAULT;
22480 22473 } else {
22481 22474 err = 0;
22482 22475 }
22483 22476 break;
22484 22477
22485 22478 case DKIOCSOLIDSTATE:
22486 22479 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSOLIDSTATE\n");
22487 22480 i = un->un_f_is_solid_state ? 1 : 0;
22488 22481 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
22489 22482 err = EFAULT;
22490 22483 } else {
22491 22484 err = 0;
22492 22485 }
22493 22486 break;
22494 22487
22495 22488 case DKIOCHOTPLUGGABLE:
22496 22489 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n");
22497 22490 i = un->un_f_is_hotpluggable ? 1 : 0;
22498 22491 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
22499 22492 err = EFAULT;
22500 22493 } else {
22501 22494 err = 0;
22502 22495 }
22503 22496 break;
22504 22497
22505 22498 case DKIOCREADONLY:
22506 22499 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREADONLY\n");
22507 22500 i = 0;
22508 22501 if ((ISCD(un) && !un->un_f_mmc_writable_media) ||
22509 22502 (sr_check_wp(dev) != 0)) {
22510 22503 i = 1;
22511 22504 }
22512 22505 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
22513 22506 err = EFAULT;
22514 22507 } else {
22515 22508 err = 0;
22516 22509 }
22517 22510 break;
22518 22511
22519 22512 case DKIOCGTEMPERATURE:
22520 22513 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n");
22521 22514 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag);
22522 22515 break;
22523 22516
22524 22517 case MHIOCENFAILFAST:
22525 22518 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n");
22526 22519 if ((err = drv_priv(cred_p)) == 0) {
22527 22520 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag);
22528 22521 }
22529 22522 break;
22530 22523
22531 22524 case MHIOCTKOWN:
22532 22525 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n");
22533 22526 if ((err = drv_priv(cred_p)) == 0) {
22534 22527 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag);
22535 22528 }
22536 22529 break;
22537 22530
22538 22531 case MHIOCRELEASE:
22539 22532 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n");
22540 22533 if ((err = drv_priv(cred_p)) == 0) {
22541 22534 err = sd_mhdioc_release(dev);
22542 22535 }
22543 22536 break;
22544 22537
22545 22538 case MHIOCSTATUS:
22546 22539 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n");
22547 22540 if ((err = drv_priv(cred_p)) == 0) {
22548 22541 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) {
22549 22542 case 0:
22550 22543 err = 0;
22551 22544 break;
22552 22545 case EACCES:
22553 22546 *rval_p = 1;
22554 22547 err = 0;
22555 22548 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
22556 22549 break;
22557 22550 default:
22558 22551 err = EIO;
22559 22552 goto done_with_assess;
22560 22553 }
22561 22554 }
22562 22555 break;
22563 22556
22564 22557 case MHIOCQRESERVE:
22565 22558 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n");
22566 22559 if ((err = drv_priv(cred_p)) == 0) {
22567 22560 err = sd_reserve_release(dev, SD_RESERVE);
22568 22561 }
22569 22562 break;
22570 22563
22571 22564 case MHIOCREREGISTERDEVID:
22572 22565 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n");
22573 22566 if (drv_priv(cred_p) == EPERM) {
22574 22567 err = EPERM;
22575 22568 } else if (!un->un_f_devid_supported) {
22576 22569 err = ENOTTY;
22577 22570 } else {
22578 22571 err = sd_mhdioc_register_devid(dev);
22579 22572 }
22580 22573 break;
22581 22574
22582 22575 case MHIOCGRP_INKEYS:
22583 22576 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n");
22584 22577 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) {
22585 22578 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22586 22579 err = ENOTSUP;
22587 22580 } else {
22588 22581 err = sd_mhdioc_inkeys(dev, (caddr_t)arg,
22589 22582 flag);
22590 22583 }
22591 22584 }
22592 22585 break;
22593 22586
22594 22587 case MHIOCGRP_INRESV:
22595 22588 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n");
22596 22589 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) {
22597 22590 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22598 22591 err = ENOTSUP;
22599 22592 } else {
22600 22593 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag);
22601 22594 }
22602 22595 }
22603 22596 break;
22604 22597
22605 22598 case MHIOCGRP_REGISTER:
22606 22599 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n");
22607 22600 if ((err = drv_priv(cred_p)) != EPERM) {
22608 22601 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22609 22602 err = ENOTSUP;
22610 22603 } else if (arg != NULL) {
22611 22604 mhioc_register_t reg;
22612 22605 if (ddi_copyin((void *)arg, ®,
22613 22606 sizeof (mhioc_register_t), flag) != 0) {
22614 22607 err = EFAULT;
22615 22608 } else {
22616 22609 err =
22617 22610 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22618 22611 ssc, SD_SCSI3_REGISTER,
22619 22612 (uchar_t *)®);
22620 22613 if (err != 0)
22621 22614 goto done_with_assess;
22622 22615 }
22623 22616 }
22624 22617 }
22625 22618 break;
22626 22619
22627 22620 case MHIOCGRP_CLEAR:
22628 22621 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_CLEAR\n");
22629 22622 if ((err = drv_priv(cred_p)) != EPERM) {
22630 22623 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22631 22624 err = ENOTSUP;
22632 22625 } else if (arg != NULL) {
22633 22626 mhioc_register_t reg;
22634 22627 if (ddi_copyin((void *)arg, ®,
22635 22628 sizeof (mhioc_register_t), flag) != 0) {
22636 22629 err = EFAULT;
22637 22630 } else {
22638 22631 err =
22639 22632 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22640 22633 ssc, SD_SCSI3_CLEAR,
22641 22634 (uchar_t *)®);
22642 22635 if (err != 0)
22643 22636 goto done_with_assess;
22644 22637 }
22645 22638 }
22646 22639 }
22647 22640 break;
22648 22641
22649 22642 case MHIOCGRP_RESERVE:
22650 22643 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n");
22651 22644 if ((err = drv_priv(cred_p)) != EPERM) {
22652 22645 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22653 22646 err = ENOTSUP;
22654 22647 } else if (arg != NULL) {
22655 22648 mhioc_resv_desc_t resv_desc;
22656 22649 if (ddi_copyin((void *)arg, &resv_desc,
22657 22650 sizeof (mhioc_resv_desc_t), flag) != 0) {
22658 22651 err = EFAULT;
22659 22652 } else {
22660 22653 err =
22661 22654 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22662 22655 ssc, SD_SCSI3_RESERVE,
22663 22656 (uchar_t *)&resv_desc);
22664 22657 if (err != 0)
22665 22658 goto done_with_assess;
22666 22659 }
22667 22660 }
22668 22661 }
22669 22662 break;
22670 22663
22671 22664 case MHIOCGRP_PREEMPTANDABORT:
22672 22665 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n");
22673 22666 if ((err = drv_priv(cred_p)) != EPERM) {
22674 22667 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22675 22668 err = ENOTSUP;
22676 22669 } else if (arg != NULL) {
22677 22670 mhioc_preemptandabort_t preempt_abort;
22678 22671 if (ddi_copyin((void *)arg, &preempt_abort,
22679 22672 sizeof (mhioc_preemptandabort_t),
22680 22673 flag) != 0) {
22681 22674 err = EFAULT;
22682 22675 } else {
22683 22676 err =
22684 22677 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22685 22678 ssc, SD_SCSI3_PREEMPTANDABORT,
22686 22679 (uchar_t *)&preempt_abort);
22687 22680 if (err != 0)
22688 22681 goto done_with_assess;
22689 22682 }
22690 22683 }
22691 22684 }
22692 22685 break;
22693 22686
22694 22687 case MHIOCGRP_REGISTERANDIGNOREKEY:
22695 22688 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n");
22696 22689 if ((err = drv_priv(cred_p)) != EPERM) {
22697 22690 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22698 22691 err = ENOTSUP;
22699 22692 } else if (arg != NULL) {
22700 22693 mhioc_registerandignorekey_t r_and_i;
22701 22694 if (ddi_copyin((void *)arg, (void *)&r_and_i,
22702 22695 sizeof (mhioc_registerandignorekey_t),
22703 22696 flag) != 0) {
22704 22697 err = EFAULT;
22705 22698 } else {
22706 22699 err =
22707 22700 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22708 22701 ssc, SD_SCSI3_REGISTERANDIGNOREKEY,
22709 22702 (uchar_t *)&r_and_i);
22710 22703 if (err != 0)
22711 22704 goto done_with_assess;
22712 22705 }
22713 22706 }
22714 22707 }
22715 22708 break;
22716 22709
22717 22710 case USCSICMD:
22718 22711 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n");
22719 22712 cr = ddi_get_cred();
22720 22713 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) {
22721 22714 err = EPERM;
22722 22715 } else {
22723 22716 enum uio_seg uioseg;
22724 22717
22725 22718 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE :
22726 22719 UIO_USERSPACE;
22727 22720 if (un->un_f_format_in_progress == TRUE) {
22728 22721 err = EAGAIN;
22729 22722 break;
22730 22723 }
22731 22724
22732 22725 err = sd_ssc_send(ssc,
22733 22726 (struct uscsi_cmd *)arg,
22734 22727 flag, uioseg, SD_PATH_STANDARD);
22735 22728 if (err != 0)
22736 22729 goto done_with_assess;
22737 22730 else
22738 22731 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
22739 22732 }
22740 22733 break;
22741 22734
22742 22735 case CDROMPAUSE:
22743 22736 case CDROMRESUME:
22744 22737 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n");
22745 22738 if (!ISCD(un)) {
22746 22739 err = ENOTTY;
22747 22740 } else {
22748 22741 err = sr_pause_resume(dev, cmd);
22749 22742 }
22750 22743 break;
22751 22744
22752 22745 case CDROMPLAYMSF:
22753 22746 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n");
22754 22747 if (!ISCD(un)) {
22755 22748 err = ENOTTY;
22756 22749 } else {
22757 22750 err = sr_play_msf(dev, (caddr_t)arg, flag);
22758 22751 }
22759 22752 break;
22760 22753
22761 22754 case CDROMPLAYTRKIND:
22762 22755 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n");
22763 22756 #if defined(__i386) || defined(__amd64)
22764 22757 /*
22765 22758 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead
22766 22759 */
22767 22760 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) {
22768 22761 #else
22769 22762 if (!ISCD(un)) {
22770 22763 #endif
22771 22764 err = ENOTTY;
22772 22765 } else {
22773 22766 err = sr_play_trkind(dev, (caddr_t)arg, flag);
22774 22767 }
22775 22768 break;
22776 22769
22777 22770 case CDROMREADTOCHDR:
22778 22771 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n");
22779 22772 if (!ISCD(un)) {
22780 22773 err = ENOTTY;
22781 22774 } else {
22782 22775 err = sr_read_tochdr(dev, (caddr_t)arg, flag);
22783 22776 }
22784 22777 break;
22785 22778
22786 22779 case CDROMREADTOCENTRY:
22787 22780 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n");
22788 22781 if (!ISCD(un)) {
22789 22782 err = ENOTTY;
22790 22783 } else {
22791 22784 err = sr_read_tocentry(dev, (caddr_t)arg, flag);
22792 22785 }
22793 22786 break;
22794 22787
22795 22788 case CDROMSTOP:
22796 22789 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n");
22797 22790 if (!ISCD(un)) {
22798 22791 err = ENOTTY;
22799 22792 } else {
22800 22793 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
22801 22794 SD_TARGET_STOP, SD_PATH_STANDARD);
22802 22795 goto done_with_assess;
22803 22796 }
22804 22797 break;
22805 22798
22806 22799 case CDROMSTART:
22807 22800 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n");
22808 22801 if (!ISCD(un)) {
22809 22802 err = ENOTTY;
22810 22803 } else {
22811 22804 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
22812 22805 SD_TARGET_START, SD_PATH_STANDARD);
22813 22806 goto done_with_assess;
22814 22807 }
22815 22808 break;
22816 22809
22817 22810 case CDROMCLOSETRAY:
22818 22811 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n");
22819 22812 if (!ISCD(un)) {
22820 22813 err = ENOTTY;
22821 22814 } else {
22822 22815 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
22823 22816 SD_TARGET_CLOSE, SD_PATH_STANDARD);
22824 22817 goto done_with_assess;
22825 22818 }
22826 22819 break;
22827 22820
22828 22821 case FDEJECT: /* for eject command */
22829 22822 case DKIOCEJECT:
22830 22823 case CDROMEJECT:
22831 22824 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n");
22832 22825 if (!un->un_f_eject_media_supported) {
22833 22826 err = ENOTTY;
22834 22827 } else {
22835 22828 err = sr_eject(dev);
22836 22829 }
22837 22830 break;
22838 22831
22839 22832 case CDROMVOLCTRL:
22840 22833 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n");
22841 22834 if (!ISCD(un)) {
22842 22835 err = ENOTTY;
22843 22836 } else {
22844 22837 err = sr_volume_ctrl(dev, (caddr_t)arg, flag);
22845 22838 }
22846 22839 break;
22847 22840
22848 22841 case CDROMSUBCHNL:
22849 22842 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n");
22850 22843 if (!ISCD(un)) {
22851 22844 err = ENOTTY;
22852 22845 } else {
22853 22846 err = sr_read_subchannel(dev, (caddr_t)arg, flag);
22854 22847 }
22855 22848 break;
22856 22849
22857 22850 case CDROMREADMODE2:
22858 22851 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n");
22859 22852 if (!ISCD(un)) {
22860 22853 err = ENOTTY;
22861 22854 } else if (un->un_f_cfg_is_atapi == TRUE) {
22862 22855 /*
22863 22856 * If the drive supports READ CD, use that instead of
22864 22857 * switching the LBA size via a MODE SELECT
22865 22858 * Block Descriptor
22866 22859 */
22867 22860 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag);
22868 22861 } else {
22869 22862 err = sr_read_mode2(dev, (caddr_t)arg, flag);
22870 22863 }
22871 22864 break;
22872 22865
22873 22866 case CDROMREADMODE1:
22874 22867 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n");
22875 22868 if (!ISCD(un)) {
22876 22869 err = ENOTTY;
22877 22870 } else {
22878 22871 err = sr_read_mode1(dev, (caddr_t)arg, flag);
22879 22872 }
22880 22873 break;
22881 22874
22882 22875 case CDROMREADOFFSET:
22883 22876 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n");
22884 22877 if (!ISCD(un)) {
22885 22878 err = ENOTTY;
22886 22879 } else {
22887 22880 err = sr_read_sony_session_offset(dev, (caddr_t)arg,
22888 22881 flag);
22889 22882 }
22890 22883 break;
22891 22884
22892 22885 case CDROMSBLKMODE:
22893 22886 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n");
22894 22887 /*
22895 22888 * There is no means of changing block size in case of atapi
22896 22889 * drives, thus return ENOTTY if drive type is atapi
22897 22890 */
22898 22891 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) {
22899 22892 err = ENOTTY;
22900 22893 } else if (un->un_f_mmc_cap == TRUE) {
22901 22894
22902 22895 /*
22903 22896 * MMC Devices do not support changing the
22904 22897 * logical block size
22905 22898 *
22906 22899 * Note: EINVAL is being returned instead of ENOTTY to
22907 22900 * maintain consistancy with the original mmc
22908 22901 * driver update.
22909 22902 */
22910 22903 err = EINVAL;
22911 22904 } else {
22912 22905 mutex_enter(SD_MUTEX(un));
22913 22906 if ((!(un->un_exclopen & (1<<SDPART(dev)))) ||
22914 22907 (un->un_ncmds_in_transport > 0)) {
22915 22908 mutex_exit(SD_MUTEX(un));
22916 22909 err = EINVAL;
22917 22910 } else {
22918 22911 mutex_exit(SD_MUTEX(un));
22919 22912 err = sr_change_blkmode(dev, cmd, arg, flag);
22920 22913 }
22921 22914 }
22922 22915 break;
22923 22916
22924 22917 case CDROMGBLKMODE:
22925 22918 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n");
22926 22919 if (!ISCD(un)) {
22927 22920 err = ENOTTY;
22928 22921 } else if ((un->un_f_cfg_is_atapi != FALSE) &&
22929 22922 (un->un_f_blockcount_is_valid != FALSE)) {
22930 22923 /*
22931 22924 * Drive is an ATAPI drive so return target block
22932 22925 * size for ATAPI drives since we cannot change the
22933 22926 * blocksize on ATAPI drives. Used primarily to detect
22934 22927 * if an ATAPI cdrom is present.
22935 22928 */
22936 22929 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg,
22937 22930 sizeof (int), flag) != 0) {
22938 22931 err = EFAULT;
22939 22932 } else {
22940 22933 err = 0;
22941 22934 }
22942 22935
22943 22936 } else {
22944 22937 /*
22945 22938 * Drive supports changing block sizes via a Mode
22946 22939 * Select.
22947 22940 */
22948 22941 err = sr_change_blkmode(dev, cmd, arg, flag);
22949 22942 }
22950 22943 break;
22951 22944
22952 22945 case CDROMGDRVSPEED:
22953 22946 case CDROMSDRVSPEED:
22954 22947 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n");
22955 22948 if (!ISCD(un)) {
22956 22949 err = ENOTTY;
22957 22950 } else if (un->un_f_mmc_cap == TRUE) {
22958 22951 /*
22959 22952 * Note: In the future the driver implementation
22960 22953 * for getting and
22961 22954 * setting cd speed should entail:
22962 22955 * 1) If non-mmc try the Toshiba mode page
22963 22956 * (sr_change_speed)
22964 22957 * 2) If mmc but no support for Real Time Streaming try
22965 22958 * the SET CD SPEED (0xBB) command
22966 22959 * (sr_atapi_change_speed)
22967 22960 * 3) If mmc and support for Real Time Streaming
22968 22961 * try the GET PERFORMANCE and SET STREAMING
22969 22962 * commands (not yet implemented, 4380808)
22970 22963 */
22971 22964 /*
22972 22965 * As per recent MMC spec, CD-ROM speed is variable
22973 22966 * and changes with LBA. Since there is no such
22974 22967 * things as drive speed now, fail this ioctl.
22975 22968 *
22976 22969 * Note: EINVAL is returned for consistancy of original
22977 22970 * implementation which included support for getting
22978 22971 * the drive speed of mmc devices but not setting
22979 22972 * the drive speed. Thus EINVAL would be returned
22980 22973 * if a set request was made for an mmc device.
22981 22974 * We no longer support get or set speed for
22982 22975 * mmc but need to remain consistent with regard
22983 22976 * to the error code returned.
22984 22977 */
22985 22978 err = EINVAL;
22986 22979 } else if (un->un_f_cfg_is_atapi == TRUE) {
22987 22980 err = sr_atapi_change_speed(dev, cmd, arg, flag);
22988 22981 } else {
22989 22982 err = sr_change_speed(dev, cmd, arg, flag);
22990 22983 }
22991 22984 break;
22992 22985
22993 22986 case CDROMCDDA:
22994 22987 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n");
22995 22988 if (!ISCD(un)) {
22996 22989 err = ENOTTY;
22997 22990 } else {
22998 22991 err = sr_read_cdda(dev, (void *)arg, flag);
22999 22992 }
23000 22993 break;
23001 22994
23002 22995 case CDROMCDXA:
23003 22996 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n");
23004 22997 if (!ISCD(un)) {
23005 22998 err = ENOTTY;
23006 22999 } else {
23007 23000 err = sr_read_cdxa(dev, (caddr_t)arg, flag);
23008 23001 }
23009 23002 break;
23010 23003
23011 23004 case CDROMSUBCODE:
23012 23005 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n");
23013 23006 if (!ISCD(un)) {
23014 23007 err = ENOTTY;
23015 23008 } else {
23016 23009 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag);
23017 23010 }
23018 23011 break;
23019 23012
23020 23013
23021 23014 #ifdef SDDEBUG
23022 23015 /* RESET/ABORTS testing ioctls */
23023 23016 case DKIOCRESET: {
23024 23017 int reset_level;
23025 23018
23026 23019 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) {
23027 23020 err = EFAULT;
23028 23021 } else {
23029 23022 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: "
23030 23023 "reset_level = 0x%lx\n", reset_level);
23031 23024 if (scsi_reset(SD_ADDRESS(un), reset_level)) {
23032 23025 err = 0;
23033 23026 } else {
23034 23027 err = EIO;
23035 23028 }
23036 23029 }
23037 23030 break;
23038 23031 }
23039 23032
23040 23033 case DKIOCABORT:
23041 23034 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n");
23042 23035 if (scsi_abort(SD_ADDRESS(un), NULL)) {
23043 23036 err = 0;
23044 23037 } else {
23045 23038 err = EIO;
23046 23039 }
23047 23040 break;
23048 23041 #endif
23049 23042
23050 23043 #ifdef SD_FAULT_INJECTION
23051 23044 /* SDIOC FaultInjection testing ioctls */
23052 23045 case SDIOCSTART:
23053 23046 case SDIOCSTOP:
23054 23047 case SDIOCINSERTPKT:
23055 23048 case SDIOCINSERTXB:
23056 23049 case SDIOCINSERTUN:
23057 23050 case SDIOCINSERTARQ:
23058 23051 case SDIOCPUSH:
23059 23052 case SDIOCRETRIEVE:
23060 23053 case SDIOCRUN:
23061 23054 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:"
23062 23055 "SDIOC detected cmd:0x%X:\n", cmd);
23063 23056 /* call error generator */
23064 23057 sd_faultinjection_ioctl(cmd, arg, un);
23065 23058 err = 0;
23066 23059 break;
23067 23060
23068 23061 #endif /* SD_FAULT_INJECTION */
23069 23062
23070 23063 case DKIOCFLUSHWRITECACHE:
23071 23064 {
23072 23065 struct dk_callback *dkc = (struct dk_callback *)arg;
23073 23066
23074 23067 mutex_enter(SD_MUTEX(un));
23075 23068 if (!un->un_f_sync_cache_supported ||
23076 23069 !un->un_f_write_cache_enabled) {
23077 23070 err = un->un_f_sync_cache_supported ?
23078 23071 0 : ENOTSUP;
23079 23072 mutex_exit(SD_MUTEX(un));
23080 23073 if ((flag & FKIOCTL) && dkc != NULL &&
23081 23074 dkc->dkc_callback != NULL) {
23082 23075 (*dkc->dkc_callback)(dkc->dkc_cookie,
23083 23076 err);
23084 23077 /*
23085 23078 * Did callback and reported error.
23086 23079 * Since we did a callback, ioctl
23087 23080 * should return 0.
23088 23081 */
23089 23082 err = 0;
23090 23083 }
23091 23084 break;
23092 23085 }
23093 23086 mutex_exit(SD_MUTEX(un));
23094 23087
23095 23088 if ((flag & FKIOCTL) && dkc != NULL &&
23096 23089 dkc->dkc_callback != NULL) {
23097 23090 /* async SYNC CACHE request */
23098 23091 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc);
23099 23092 } else {
23100 23093 /* synchronous SYNC CACHE request */
23101 23094 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL);
23102 23095 }
23103 23096 }
23104 23097 break;
23105 23098
23106 23099 case DKIOCGETWCE: {
23107 23100
23108 23101 int wce;
23109 23102
23110 23103 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) {
23111 23104 break;
23112 23105 }
23113 23106
23114 23107 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) {
23115 23108 err = EFAULT;
23116 23109 }
23117 23110 break;
23118 23111 }
23119 23112
23120 23113 case DKIOCSETWCE: {
23121 23114
23122 23115 int wce, sync_supported;
23123 23116 int cur_wce = 0;
23124 23117
23125 23118 if (!un->un_f_cache_mode_changeable) {
23126 23119 err = EINVAL;
23127 23120 break;
23128 23121 }
23129 23122
23130 23123 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) {
23131 23124 err = EFAULT;
23132 23125 break;
23133 23126 }
23134 23127
23135 23128 /*
23136 23129 * Synchronize multiple threads trying to enable
23137 23130 * or disable the cache via the un_f_wcc_cv
23138 23131 * condition variable.
23139 23132 */
23140 23133 mutex_enter(SD_MUTEX(un));
23141 23134
23142 23135 /*
23143 23136 * Don't allow the cache to be enabled if the
23144 23137 * config file has it disabled.
23145 23138 */
23146 23139 if (un->un_f_opt_disable_cache && wce) {
23147 23140 mutex_exit(SD_MUTEX(un));
23148 23141 err = EINVAL;
23149 23142 break;
23150 23143 }
23151 23144
23152 23145 /*
23153 23146 * Wait for write cache change in progress
23154 23147 * bit to be clear before proceeding.
23155 23148 */
23156 23149 while (un->un_f_wcc_inprog)
23157 23150 cv_wait(&un->un_wcc_cv, SD_MUTEX(un));
23158 23151
23159 23152 un->un_f_wcc_inprog = 1;
23160 23153
23161 23154 mutex_exit(SD_MUTEX(un));
23162 23155
23163 23156 /*
23164 23157 * Get the current write cache state
23165 23158 */
23166 23159 if ((err = sd_get_write_cache_enabled(ssc, &cur_wce)) != 0) {
23167 23160 mutex_enter(SD_MUTEX(un));
23168 23161 un->un_f_wcc_inprog = 0;
23169 23162 cv_broadcast(&un->un_wcc_cv);
23170 23163 mutex_exit(SD_MUTEX(un));
23171 23164 break;
23172 23165 }
23173 23166
23174 23167 mutex_enter(SD_MUTEX(un));
23175 23168 un->un_f_write_cache_enabled = (cur_wce != 0);
23176 23169
23177 23170 if (un->un_f_write_cache_enabled && wce == 0) {
23178 23171 /*
23179 23172 * Disable the write cache. Don't clear
23180 23173 * un_f_write_cache_enabled until after
23181 23174 * the mode select and flush are complete.
23182 23175 */
23183 23176 sync_supported = un->un_f_sync_cache_supported;
23184 23177
23185 23178 /*
23186 23179 * If cache flush is suppressed, we assume that the
23187 23180 * controller firmware will take care of managing the
23188 23181 * write cache for us: no need to explicitly
23189 23182 * disable it.
23190 23183 */
23191 23184 if (!un->un_f_suppress_cache_flush) {
23192 23185 mutex_exit(SD_MUTEX(un));
23193 23186 if ((err = sd_cache_control(ssc,
23194 23187 SD_CACHE_NOCHANGE,
23195 23188 SD_CACHE_DISABLE)) == 0 &&
23196 23189 sync_supported) {
23197 23190 err = sd_send_scsi_SYNCHRONIZE_CACHE(un,
23198 23191 NULL);
23199 23192 }
23200 23193 } else {
23201 23194 mutex_exit(SD_MUTEX(un));
23202 23195 }
23203 23196
23204 23197 mutex_enter(SD_MUTEX(un));
23205 23198 if (err == 0) {
23206 23199 un->un_f_write_cache_enabled = 0;
23207 23200 }
23208 23201
23209 23202 } else if (!un->un_f_write_cache_enabled && wce != 0) {
23210 23203 /*
23211 23204 * Set un_f_write_cache_enabled first, so there is
23212 23205 * no window where the cache is enabled, but the
23213 23206 * bit says it isn't.
23214 23207 */
23215 23208 un->un_f_write_cache_enabled = 1;
23216 23209
23217 23210 /*
23218 23211 * If cache flush is suppressed, we assume that the
23219 23212 * controller firmware will take care of managing the
23220 23213 * write cache for us: no need to explicitly
23221 23214 * enable it.
23222 23215 */
23223 23216 if (!un->un_f_suppress_cache_flush) {
23224 23217 mutex_exit(SD_MUTEX(un));
23225 23218 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE,
23226 23219 SD_CACHE_ENABLE);
23227 23220 } else {
23228 23221 mutex_exit(SD_MUTEX(un));
23229 23222 }
23230 23223
23231 23224 mutex_enter(SD_MUTEX(un));
23232 23225
23233 23226 if (err) {
23234 23227 un->un_f_write_cache_enabled = 0;
23235 23228 }
23236 23229 }
23237 23230
23238 23231 un->un_f_wcc_inprog = 0;
23239 23232 cv_broadcast(&un->un_wcc_cv);
23240 23233 mutex_exit(SD_MUTEX(un));
23241 23234 break;
23242 23235 }
23243 23236
23244 23237 default:
23245 23238 err = ENOTTY;
23246 23239 break;
23247 23240 }
23248 23241 mutex_enter(SD_MUTEX(un));
23249 23242 un->un_ncmds_in_driver--;
23250 23243 ASSERT(un->un_ncmds_in_driver >= 0);
23251 23244 mutex_exit(SD_MUTEX(un));
23252 23245
23253 23246
23254 23247 done_without_assess:
23255 23248 sd_ssc_fini(ssc);
23256 23249
23257 23250 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err);
23258 23251 return (err);
23259 23252
23260 23253 done_with_assess:
23261 23254 mutex_enter(SD_MUTEX(un));
23262 23255 un->un_ncmds_in_driver--;
23263 23256 ASSERT(un->un_ncmds_in_driver >= 0);
23264 23257 mutex_exit(SD_MUTEX(un));
23265 23258
23266 23259 done_quick_assess:
23267 23260 if (err != 0)
23268 23261 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23269 23262 /* Uninitialize sd_ssc_t pointer */
23270 23263 sd_ssc_fini(ssc);
23271 23264
23272 23265 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err);
23273 23266 return (err);
23274 23267 }
23275 23268
23276 23269
23277 23270 /*
23278 23271 * Function: sd_dkio_ctrl_info
23279 23272 *
23280 23273 * Description: This routine is the driver entry point for handling controller
23281 23274 * information ioctl requests (DKIOCINFO).
23282 23275 *
23283 23276 * Arguments: dev - the device number
23284 23277 * arg - pointer to user provided dk_cinfo structure
23285 23278 * specifying the controller type and attributes.
23286 23279 * flag - this argument is a pass through to ddi_copyxxx()
23287 23280 * directly from the mode argument of ioctl().
23288 23281 *
23289 23282 * Return Code: 0
23290 23283 * EFAULT
23291 23284 * ENXIO
23292 23285 */
23293 23286
23294 23287 static int
23295 23288 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag)
23296 23289 {
23297 23290 struct sd_lun *un = NULL;
23298 23291 struct dk_cinfo *info;
23299 23292 dev_info_t *pdip;
23300 23293 int lun, tgt;
23301 23294
23302 23295 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
23303 23296 return (ENXIO);
23304 23297 }
23305 23298
23306 23299 info = (struct dk_cinfo *)
23307 23300 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP);
23308 23301
23309 23302 switch (un->un_ctype) {
23310 23303 case CTYPE_CDROM:
23311 23304 info->dki_ctype = DKC_CDROM;
23312 23305 break;
23313 23306 default:
23314 23307 info->dki_ctype = DKC_SCSI_CCS;
23315 23308 break;
23316 23309 }
23317 23310 pdip = ddi_get_parent(SD_DEVINFO(un));
23318 23311 info->dki_cnum = ddi_get_instance(pdip);
23319 23312 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) {
23320 23313 (void) strcpy(info->dki_cname, ddi_get_name(pdip));
23321 23314 } else {
23322 23315 (void) strncpy(info->dki_cname, ddi_node_name(pdip),
23323 23316 DK_DEVLEN - 1);
23324 23317 }
23325 23318
23326 23319 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
23327 23320 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0);
23328 23321 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
23329 23322 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0);
23330 23323
23331 23324 /* Unit Information */
23332 23325 info->dki_unit = ddi_get_instance(SD_DEVINFO(un));
23333 23326 info->dki_slave = ((tgt << 3) | lun);
23334 23327 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)),
23335 23328 DK_DEVLEN - 1);
23336 23329 info->dki_flags = DKI_FMTVOL;
23337 23330 info->dki_partition = SDPART(dev);
23338 23331
23339 23332 /* Max Transfer size of this device in blocks */
23340 23333 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize;
23341 23334 info->dki_addr = 0;
23342 23335 info->dki_space = 0;
23343 23336 info->dki_prio = 0;
23344 23337 info->dki_vec = 0;
23345 23338
23346 23339 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) {
23347 23340 kmem_free(info, sizeof (struct dk_cinfo));
23348 23341 return (EFAULT);
23349 23342 } else {
23350 23343 kmem_free(info, sizeof (struct dk_cinfo));
23351 23344 return (0);
23352 23345 }
23353 23346 }
23354 23347
23355 23348 /*
23356 23349 * Function: sd_get_media_info_com
23357 23350 *
23358 23351 * Description: This routine returns the information required to populate
23359 23352 * the fields for the dk_minfo/dk_minfo_ext structures.
23360 23353 *
23361 23354 * Arguments: dev - the device number
23362 23355 * dki_media_type - media_type
23363 23356 * dki_lbsize - logical block size
23364 23357 * dki_capacity - capacity in blocks
23365 23358 * dki_pbsize - physical block size (if requested)
23366 23359 *
23367 23360 * Return Code: 0
23368 23361 * EACCESS
23369 23362 * EFAULT
23370 23363 * ENXIO
23371 23364 * EIO
23372 23365 */
23373 23366 static int
23374 23367 sd_get_media_info_com(dev_t dev, uint_t *dki_media_type, uint_t *dki_lbsize,
23375 23368 diskaddr_t *dki_capacity, uint_t *dki_pbsize)
23376 23369 {
23377 23370 struct sd_lun *un = NULL;
23378 23371 struct uscsi_cmd com;
23379 23372 struct scsi_inquiry *sinq;
23380 23373 u_longlong_t media_capacity;
23381 23374 uint64_t capacity;
23382 23375 uint_t lbasize;
23383 23376 uint_t pbsize;
23384 23377 uchar_t *out_data;
23385 23378 uchar_t *rqbuf;
23386 23379 int rval = 0;
23387 23380 int rtn;
23388 23381 sd_ssc_t *ssc;
23389 23382
23390 23383 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
23391 23384 (un->un_state == SD_STATE_OFFLINE)) {
23392 23385 return (ENXIO);
23393 23386 }
23394 23387
23395 23388 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info_com: entry\n");
23396 23389
23397 23390 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP);
23398 23391 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
23399 23392 ssc = sd_ssc_init(un);
23400 23393
23401 23394 /* Issue a TUR to determine if the drive is ready with media present */
23402 23395 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA);
23403 23396 if (rval == ENXIO) {
23404 23397 goto done;
23405 23398 } else if (rval != 0) {
23406 23399 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23407 23400 }
23408 23401
23409 23402 /* Now get configuration data */
23410 23403 if (ISCD(un)) {
23411 23404 *dki_media_type = DK_CDROM;
23412 23405
23413 23406 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */
23414 23407 if (un->un_f_mmc_cap == TRUE) {
23415 23408 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf,
23416 23409 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN,
23417 23410 SD_PATH_STANDARD);
23418 23411
23419 23412 if (rtn) {
23420 23413 /*
23421 23414 * We ignore all failures for CD and need to
23422 23415 * put the assessment before processing code
23423 23416 * to avoid missing assessment for FMA.
23424 23417 */
23425 23418 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23426 23419 /*
23427 23420 * Failed for other than an illegal request
23428 23421 * or command not supported
23429 23422 */
23430 23423 if ((com.uscsi_status == STATUS_CHECK) &&
23431 23424 (com.uscsi_rqstatus == STATUS_GOOD)) {
23432 23425 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) ||
23433 23426 (rqbuf[12] != 0x20)) {
23434 23427 rval = EIO;
23435 23428 goto no_assessment;
23436 23429 }
23437 23430 }
23438 23431 } else {
23439 23432 /*
23440 23433 * The GET CONFIGURATION command succeeded
23441 23434 * so set the media type according to the
23442 23435 * returned data
23443 23436 */
23444 23437 *dki_media_type = out_data[6];
23445 23438 *dki_media_type <<= 8;
23446 23439 *dki_media_type |= out_data[7];
23447 23440 }
23448 23441 }
23449 23442 } else {
23450 23443 /*
23451 23444 * The profile list is not available, so we attempt to identify
23452 23445 * the media type based on the inquiry data
23453 23446 */
23454 23447 sinq = un->un_sd->sd_inq;
23455 23448 if ((sinq->inq_dtype == DTYPE_DIRECT) ||
23456 23449 (sinq->inq_dtype == DTYPE_OPTICAL)) {
23457 23450 /* This is a direct access device or optical disk */
23458 23451 *dki_media_type = DK_FIXED_DISK;
23459 23452
23460 23453 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) ||
23461 23454 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) {
23462 23455 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) {
23463 23456 *dki_media_type = DK_ZIP;
23464 23457 } else if (
23465 23458 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) {
23466 23459 *dki_media_type = DK_JAZ;
23467 23460 }
23468 23461 }
23469 23462 } else {
23470 23463 /*
23471 23464 * Not a CD, direct access or optical disk so return
23472 23465 * unknown media
23473 23466 */
23474 23467 *dki_media_type = DK_UNKNOWN;
23475 23468 }
23476 23469 }
23477 23470
23478 23471 /*
23479 23472 * Now read the capacity so we can provide the lbasize,
23480 23473 * pbsize and capacity.
23481 23474 */
23482 23475 if (dki_pbsize && un->un_f_descr_format_supported) {
23483 23476 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize,
23484 23477 &pbsize, SD_PATH_DIRECT);
23485 23478
23486 23479 /*
23487 23480 * Override the physical blocksize if the instance already
23488 23481 * has a larger value.
23489 23482 */
23490 23483 pbsize = MAX(pbsize, un->un_phy_blocksize);
23491 23484 }
23492 23485
23493 23486 if (dki_pbsize == NULL || rval != 0 ||
23494 23487 !un->un_f_descr_format_supported) {
23495 23488 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize,
23496 23489 SD_PATH_DIRECT);
23497 23490
23498 23491 switch (rval) {
23499 23492 case 0:
23500 23493 if (un->un_f_enable_rmw &&
23501 23494 un->un_phy_blocksize != 0) {
23502 23495 pbsize = un->un_phy_blocksize;
23503 23496 } else {
23504 23497 pbsize = lbasize;
23505 23498 }
23506 23499 media_capacity = capacity;
23507 23500
23508 23501 /*
23509 23502 * sd_send_scsi_READ_CAPACITY() reports capacity in
23510 23503 * un->un_sys_blocksize chunks. So we need to convert
23511 23504 * it into cap.lbsize chunks.
23512 23505 */
23513 23506 if (un->un_f_has_removable_media) {
23514 23507 media_capacity *= un->un_sys_blocksize;
23515 23508 media_capacity /= lbasize;
23516 23509 }
23517 23510 break;
23518 23511 case EACCES:
23519 23512 rval = EACCES;
23520 23513 goto done;
23521 23514 default:
23522 23515 rval = EIO;
23523 23516 goto done;
23524 23517 }
23525 23518 } else {
23526 23519 if (un->un_f_enable_rmw &&
23527 23520 !ISP2(pbsize % DEV_BSIZE)) {
23528 23521 pbsize = SSD_SECSIZE;
23529 23522 } else if (!ISP2(lbasize % DEV_BSIZE) ||
23530 23523 !ISP2(pbsize % DEV_BSIZE)) {
23531 23524 pbsize = lbasize = DEV_BSIZE;
23532 23525 }
23533 23526 media_capacity = capacity;
23534 23527 }
23535 23528
23536 23529 /*
23537 23530 * If lun is expanded dynamically, update the un structure.
23538 23531 */
23539 23532 mutex_enter(SD_MUTEX(un));
23540 23533 if ((un->un_f_blockcount_is_valid == TRUE) &&
23541 23534 (un->un_f_tgt_blocksize_is_valid == TRUE) &&
23542 23535 (capacity > un->un_blockcount)) {
23543 23536 un->un_f_expnevent = B_FALSE;
23544 23537 sd_update_block_info(un, lbasize, capacity);
23545 23538 }
23546 23539 mutex_exit(SD_MUTEX(un));
23547 23540
23548 23541 *dki_lbsize = lbasize;
23549 23542 *dki_capacity = media_capacity;
23550 23543 if (dki_pbsize)
23551 23544 *dki_pbsize = pbsize;
23552 23545
23553 23546 done:
23554 23547 if (rval != 0) {
23555 23548 if (rval == EIO)
23556 23549 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
23557 23550 else
23558 23551 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23559 23552 }
23560 23553 no_assessment:
23561 23554 sd_ssc_fini(ssc);
23562 23555 kmem_free(out_data, SD_PROFILE_HEADER_LEN);
23563 23556 kmem_free(rqbuf, SENSE_LENGTH);
23564 23557 return (rval);
23565 23558 }
23566 23559
23567 23560 /*
23568 23561 * Function: sd_get_media_info
23569 23562 *
23570 23563 * Description: This routine is the driver entry point for handling ioctl
23571 23564 * requests for the media type or command set profile used by the
23572 23565 * drive to operate on the media (DKIOCGMEDIAINFO).
23573 23566 *
23574 23567 * Arguments: dev - the device number
23575 23568 * arg - pointer to user provided dk_minfo structure
23576 23569 * specifying the media type, logical block size and
23577 23570 * drive capacity.
23578 23571 * flag - this argument is a pass through to ddi_copyxxx()
23579 23572 * directly from the mode argument of ioctl().
23580 23573 *
23581 23574 * Return Code: returns the value from sd_get_media_info_com
23582 23575 */
23583 23576 static int
23584 23577 sd_get_media_info(dev_t dev, caddr_t arg, int flag)
23585 23578 {
23586 23579 struct dk_minfo mi;
23587 23580 int rval;
23588 23581
23589 23582 rval = sd_get_media_info_com(dev, &mi.dki_media_type,
23590 23583 &mi.dki_lbsize, &mi.dki_capacity, NULL);
23591 23584
23592 23585 if (rval)
23593 23586 return (rval);
23594 23587 if (ddi_copyout(&mi, arg, sizeof (struct dk_minfo), flag))
23595 23588 rval = EFAULT;
23596 23589 return (rval);
23597 23590 }
23598 23591
23599 23592 /*
23600 23593 * Function: sd_get_media_info_ext
23601 23594 *
23602 23595 * Description: This routine is the driver entry point for handling ioctl
23603 23596 * requests for the media type or command set profile used by the
23604 23597 * drive to operate on the media (DKIOCGMEDIAINFOEXT). The
23605 23598 * difference this ioctl and DKIOCGMEDIAINFO is the return value
23606 23599 * of this ioctl contains both logical block size and physical
23607 23600 * block size.
23608 23601 *
23609 23602 *
23610 23603 * Arguments: dev - the device number
23611 23604 * arg - pointer to user provided dk_minfo_ext structure
23612 23605 * specifying the media type, logical block size,
23613 23606 * physical block size and disk capacity.
23614 23607 * flag - this argument is a pass through to ddi_copyxxx()
23615 23608 * directly from the mode argument of ioctl().
23616 23609 *
23617 23610 * Return Code: returns the value from sd_get_media_info_com
23618 23611 */
23619 23612 static int
23620 23613 sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag)
23621 23614 {
23622 23615 struct dk_minfo_ext mie;
23623 23616 int rval = 0;
23624 23617
23625 23618 rval = sd_get_media_info_com(dev, &mie.dki_media_type,
23626 23619 &mie.dki_lbsize, &mie.dki_capacity, &mie.dki_pbsize);
23627 23620
23628 23621 if (rval)
23629 23622 return (rval);
23630 23623 if (ddi_copyout(&mie, arg, sizeof (struct dk_minfo_ext), flag))
23631 23624 rval = EFAULT;
23632 23625 return (rval);
23633 23626
23634 23627 }
23635 23628
23636 23629 /*
23637 23630 * Function: sd_watch_request_submit
23638 23631 *
23639 23632 * Description: Call scsi_watch_request_submit or scsi_mmc_watch_request_submit
23640 23633 * depending on which is supported by device.
23641 23634 */
23642 23635 static opaque_t
23643 23636 sd_watch_request_submit(struct sd_lun *un)
23644 23637 {
23645 23638 dev_t dev;
23646 23639
23647 23640 /* All submissions are unified to use same device number */
23648 23641 dev = sd_make_device(SD_DEVINFO(un));
23649 23642
23650 23643 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) {
23651 23644 return (scsi_mmc_watch_request_submit(SD_SCSI_DEVP(un),
23652 23645 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb,
23653 23646 (caddr_t)dev));
23654 23647 } else {
23655 23648 return (scsi_watch_request_submit(SD_SCSI_DEVP(un),
23656 23649 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb,
23657 23650 (caddr_t)dev));
23658 23651 }
23659 23652 }
23660 23653
23661 23654
23662 23655 /*
23663 23656 * Function: sd_check_media
23664 23657 *
23665 23658 * Description: This utility routine implements the functionality for the
23666 23659 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the
23667 23660 * driver state changes from that specified by the user
23668 23661 * (inserted or ejected). For example, if the user specifies
23669 23662 * DKIO_EJECTED and the current media state is inserted this
23670 23663 * routine will immediately return DKIO_INSERTED. However, if the
23671 23664 * current media state is not inserted the user thread will be
23672 23665 * blocked until the drive state changes. If DKIO_NONE is specified
23673 23666 * the user thread will block until a drive state change occurs.
23674 23667 *
23675 23668 * Arguments: dev - the device number
23676 23669 * state - user pointer to a dkio_state, updated with the current
23677 23670 * drive state at return.
23678 23671 *
23679 23672 * Return Code: ENXIO
23680 23673 * EIO
23681 23674 * EAGAIN
23682 23675 * EINTR
23683 23676 */
23684 23677
23685 23678 static int
23686 23679 sd_check_media(dev_t dev, enum dkio_state state)
23687 23680 {
23688 23681 struct sd_lun *un = NULL;
23689 23682 enum dkio_state prev_state;
23690 23683 opaque_t token = NULL;
23691 23684 int rval = 0;
23692 23685 sd_ssc_t *ssc;
23693 23686
23694 23687 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
23695 23688 return (ENXIO);
23696 23689 }
23697 23690
23698 23691 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n");
23699 23692
23700 23693 ssc = sd_ssc_init(un);
23701 23694
23702 23695 mutex_enter(SD_MUTEX(un));
23703 23696
23704 23697 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: "
23705 23698 "state=%x, mediastate=%x\n", state, un->un_mediastate);
23706 23699
23707 23700 prev_state = un->un_mediastate;
23708 23701
23709 23702 /* is there anything to do? */
23710 23703 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) {
23711 23704 /*
23712 23705 * submit the request to the scsi_watch service;
23713 23706 * scsi_media_watch_cb() does the real work
23714 23707 */
23715 23708 mutex_exit(SD_MUTEX(un));
23716 23709
23717 23710 /*
23718 23711 * This change handles the case where a scsi watch request is
23719 23712 * added to a device that is powered down. To accomplish this
23720 23713 * we power up the device before adding the scsi watch request,
23721 23714 * since the scsi watch sends a TUR directly to the device
23722 23715 * which the device cannot handle if it is powered down.
23723 23716 */
23724 23717 if (sd_pm_entry(un) != DDI_SUCCESS) {
23725 23718 mutex_enter(SD_MUTEX(un));
23726 23719 goto done;
23727 23720 }
23728 23721
23729 23722 token = sd_watch_request_submit(un);
23730 23723
23731 23724 sd_pm_exit(un);
23732 23725
23733 23726 mutex_enter(SD_MUTEX(un));
23734 23727 if (token == NULL) {
23735 23728 rval = EAGAIN;
23736 23729 goto done;
23737 23730 }
23738 23731
23739 23732 /*
23740 23733 * This is a special case IOCTL that doesn't return
23741 23734 * until the media state changes. Routine sdpower
23742 23735 * knows about and handles this so don't count it
23743 23736 * as an active cmd in the driver, which would
23744 23737 * keep the device busy to the pm framework.
23745 23738 * If the count isn't decremented the device can't
23746 23739 * be powered down.
23747 23740 */
23748 23741 un->un_ncmds_in_driver--;
23749 23742 ASSERT(un->un_ncmds_in_driver >= 0);
23750 23743
23751 23744 /*
23752 23745 * if a prior request had been made, this will be the same
23753 23746 * token, as scsi_watch was designed that way.
23754 23747 */
23755 23748 un->un_swr_token = token;
23756 23749 un->un_specified_mediastate = state;
23757 23750
23758 23751 /*
23759 23752 * now wait for media change
23760 23753 * we will not be signalled unless mediastate == state but it is
23761 23754 * still better to test for this condition, since there is a
23762 23755 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED
23763 23756 */
23764 23757 SD_TRACE(SD_LOG_COMMON, un,
23765 23758 "sd_check_media: waiting for media state change\n");
23766 23759 while (un->un_mediastate == state) {
23767 23760 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) {
23768 23761 SD_TRACE(SD_LOG_COMMON, un,
23769 23762 "sd_check_media: waiting for media state "
23770 23763 "was interrupted\n");
23771 23764 un->un_ncmds_in_driver++;
23772 23765 rval = EINTR;
23773 23766 goto done;
23774 23767 }
23775 23768 SD_TRACE(SD_LOG_COMMON, un,
23776 23769 "sd_check_media: received signal, state=%x\n",
23777 23770 un->un_mediastate);
23778 23771 }
23779 23772 /*
23780 23773 * Inc the counter to indicate the device once again
23781 23774 * has an active outstanding cmd.
23782 23775 */
23783 23776 un->un_ncmds_in_driver++;
23784 23777 }
23785 23778
23786 23779 /* invalidate geometry */
23787 23780 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) {
23788 23781 sr_ejected(un);
23789 23782 }
23790 23783
23791 23784 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) {
23792 23785 uint64_t capacity;
23793 23786 uint_t lbasize;
23794 23787
23795 23788 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n");
23796 23789 mutex_exit(SD_MUTEX(un));
23797 23790 /*
23798 23791 * Since the following routines use SD_PATH_DIRECT, we must
23799 23792 * call PM directly before the upcoming disk accesses. This
23800 23793 * may cause the disk to be power/spin up.
23801 23794 */
23802 23795
23803 23796 if (sd_pm_entry(un) == DDI_SUCCESS) {
23804 23797 rval = sd_send_scsi_READ_CAPACITY(ssc,
23805 23798 &capacity, &lbasize, SD_PATH_DIRECT);
23806 23799 if (rval != 0) {
23807 23800 sd_pm_exit(un);
23808 23801 if (rval == EIO)
23809 23802 sd_ssc_assessment(ssc,
23810 23803 SD_FMT_STATUS_CHECK);
23811 23804 else
23812 23805 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23813 23806 mutex_enter(SD_MUTEX(un));
23814 23807 goto done;
23815 23808 }
23816 23809 } else {
23817 23810 rval = EIO;
23818 23811 mutex_enter(SD_MUTEX(un));
23819 23812 goto done;
23820 23813 }
23821 23814 mutex_enter(SD_MUTEX(un));
23822 23815
23823 23816 sd_update_block_info(un, lbasize, capacity);
23824 23817
23825 23818 /*
23826 23819 * Check if the media in the device is writable or not
23827 23820 */
23828 23821 if (ISCD(un)) {
23829 23822 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT);
23830 23823 }
23831 23824
23832 23825 mutex_exit(SD_MUTEX(un));
23833 23826 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT);
23834 23827 if ((cmlb_validate(un->un_cmlbhandle, 0,
23835 23828 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) {
23836 23829 sd_set_pstats(un);
23837 23830 SD_TRACE(SD_LOG_IO_PARTITION, un,
23838 23831 "sd_check_media: un:0x%p pstats created and "
23839 23832 "set\n", un);
23840 23833 }
23841 23834
23842 23835 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
23843 23836 SD_PATH_DIRECT);
23844 23837
23845 23838 sd_pm_exit(un);
23846 23839
23847 23840 if (rval != 0) {
23848 23841 if (rval == EIO)
23849 23842 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
23850 23843 else
23851 23844 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23852 23845 }
23853 23846
23854 23847 mutex_enter(SD_MUTEX(un));
23855 23848 }
23856 23849 done:
23857 23850 sd_ssc_fini(ssc);
23858 23851 un->un_f_watcht_stopped = FALSE;
23859 23852 if (token != NULL && un->un_swr_token != NULL) {
23860 23853 /*
23861 23854 * Use of this local token and the mutex ensures that we avoid
23862 23855 * some race conditions associated with terminating the
23863 23856 * scsi watch.
23864 23857 */
23865 23858 token = un->un_swr_token;
23866 23859 mutex_exit(SD_MUTEX(un));
23867 23860 (void) scsi_watch_request_terminate(token,
23868 23861 SCSI_WATCH_TERMINATE_WAIT);
23869 23862 if (scsi_watch_get_ref_count(token) == 0) {
23870 23863 mutex_enter(SD_MUTEX(un));
23871 23864 un->un_swr_token = (opaque_t)NULL;
23872 23865 } else {
23873 23866 mutex_enter(SD_MUTEX(un));
23874 23867 }
23875 23868 }
23876 23869
23877 23870 /*
23878 23871 * Update the capacity kstat value, if no media previously
23879 23872 * (capacity kstat is 0) and a media has been inserted
23880 23873 * (un_f_blockcount_is_valid == TRUE)
23881 23874 */
23882 23875 if (un->un_errstats) {
23883 23876 struct sd_errstats *stp = NULL;
23884 23877
23885 23878 stp = (struct sd_errstats *)un->un_errstats->ks_data;
23886 23879 if ((stp->sd_capacity.value.ui64 == 0) &&
23887 23880 (un->un_f_blockcount_is_valid == TRUE)) {
23888 23881 stp->sd_capacity.value.ui64 =
23889 23882 (uint64_t)((uint64_t)un->un_blockcount *
23890 23883 un->un_sys_blocksize);
23891 23884 }
23892 23885 }
23893 23886 mutex_exit(SD_MUTEX(un));
23894 23887 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n");
23895 23888 return (rval);
23896 23889 }
23897 23890
23898 23891
23899 23892 /*
23900 23893 * Function: sd_delayed_cv_broadcast
23901 23894 *
23902 23895 * Description: Delayed cv_broadcast to allow for target to recover from media
23903 23896 * insertion.
23904 23897 *
23905 23898 * Arguments: arg - driver soft state (unit) structure
23906 23899 */
23907 23900
23908 23901 static void
23909 23902 sd_delayed_cv_broadcast(void *arg)
23910 23903 {
23911 23904 struct sd_lun *un = arg;
23912 23905
23913 23906 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n");
23914 23907
23915 23908 mutex_enter(SD_MUTEX(un));
23916 23909 un->un_dcvb_timeid = NULL;
23917 23910 cv_broadcast(&un->un_state_cv);
23918 23911 mutex_exit(SD_MUTEX(un));
23919 23912 }
23920 23913
23921 23914
23922 23915 /*
23923 23916 * Function: sd_media_watch_cb
23924 23917 *
23925 23918 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This
23926 23919 * routine processes the TUR sense data and updates the driver
23927 23920 * state if a transition has occurred. The user thread
23928 23921 * (sd_check_media) is then signalled.
23929 23922 *
23930 23923 * Arguments: arg - the device 'dev_t' is used for context to discriminate
23931 23924 * among multiple watches that share this callback function
23932 23925 * resultp - scsi watch facility result packet containing scsi
23933 23926 * packet, status byte and sense data
23934 23927 *
23935 23928 * Return Code: 0 for success, -1 for failure
23936 23929 */
23937 23930
23938 23931 static int
23939 23932 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
23940 23933 {
23941 23934 struct sd_lun *un;
23942 23935 struct scsi_status *statusp = resultp->statusp;
23943 23936 uint8_t *sensep = (uint8_t *)resultp->sensep;
23944 23937 enum dkio_state state = DKIO_NONE;
23945 23938 dev_t dev = (dev_t)arg;
23946 23939 uchar_t actual_sense_length;
23947 23940 uint8_t skey, asc, ascq;
23948 23941
23949 23942 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
23950 23943 return (-1);
23951 23944 }
23952 23945 actual_sense_length = resultp->actual_sense_length;
23953 23946
23954 23947 mutex_enter(SD_MUTEX(un));
23955 23948 SD_TRACE(SD_LOG_COMMON, un,
23956 23949 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n",
23957 23950 *((char *)statusp), (void *)sensep, actual_sense_length);
23958 23951
23959 23952 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) {
23960 23953 un->un_mediastate = DKIO_DEV_GONE;
23961 23954 cv_broadcast(&un->un_state_cv);
23962 23955 mutex_exit(SD_MUTEX(un));
23963 23956
23964 23957 return (0);
23965 23958 }
23966 23959
23967 23960 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) {
23968 23961 if (sd_gesn_media_data_valid(resultp->mmc_data)) {
23969 23962 if ((resultp->mmc_data[5] &
23970 23963 SD_GESN_MEDIA_EVENT_STATUS_PRESENT) != 0) {
23971 23964 state = DKIO_INSERTED;
23972 23965 } else {
23973 23966 state = DKIO_EJECTED;
23974 23967 }
23975 23968 if ((resultp->mmc_data[4] & SD_GESN_MEDIA_EVENT_CODE) ==
23976 23969 SD_GESN_MEDIA_EVENT_EJECTREQUEST) {
23977 23970 sd_log_eject_request_event(un, KM_NOSLEEP);
23978 23971 }
23979 23972 }
23980 23973 } else if (sensep != NULL) {
23981 23974 /*
23982 23975 * If there was a check condition then sensep points to valid
23983 23976 * sense data. If status was not a check condition but a
23984 23977 * reservation or busy status then the new state is DKIO_NONE.
23985 23978 */
23986 23979 skey = scsi_sense_key(sensep);
23987 23980 asc = scsi_sense_asc(sensep);
23988 23981 ascq = scsi_sense_ascq(sensep);
23989 23982
23990 23983 SD_INFO(SD_LOG_COMMON, un,
23991 23984 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n",
23992 23985 skey, asc, ascq);
23993 23986 /* This routine only uses up to 13 bytes of sense data. */
23994 23987 if (actual_sense_length >= 13) {
23995 23988 if (skey == KEY_UNIT_ATTENTION) {
23996 23989 if (asc == 0x28) {
23997 23990 state = DKIO_INSERTED;
23998 23991 }
23999 23992 } else if (skey == KEY_NOT_READY) {
24000 23993 /*
24001 23994 * Sense data of 02/06/00 means that the
24002 23995 * drive could not read the media (No
24003 23996 * reference position found). In this case
24004 23997 * to prevent a hang on the DKIOCSTATE IOCTL
24005 23998 * we set the media state to DKIO_INSERTED.
24006 23999 */
24007 24000 if (asc == 0x06 && ascq == 0x00)
24008 24001 state = DKIO_INSERTED;
24009 24002
24010 24003 /*
24011 24004 * if 02/04/02 means that the host
24012 24005 * should send start command. Explicitly
24013 24006 * leave the media state as is
24014 24007 * (inserted) as the media is inserted
24015 24008 * and host has stopped device for PM
24016 24009 * reasons. Upon next true read/write
24017 24010 * to this media will bring the
24018 24011 * device to the right state good for
24019 24012 * media access.
24020 24013 */
24021 24014 if (asc == 0x3a) {
24022 24015 state = DKIO_EJECTED;
24023 24016 } else {
24024 24017 /*
24025 24018 * If the drive is busy with an
24026 24019 * operation or long write, keep the
24027 24020 * media in an inserted state.
24028 24021 */
24029 24022
24030 24023 if ((asc == 0x04) &&
24031 24024 ((ascq == 0x02) ||
24032 24025 (ascq == 0x07) ||
24033 24026 (ascq == 0x08))) {
24034 24027 state = DKIO_INSERTED;
24035 24028 }
24036 24029 }
24037 24030 } else if (skey == KEY_NO_SENSE) {
24038 24031 if ((asc == 0x00) && (ascq == 0x00)) {
24039 24032 /*
24040 24033 * Sense Data 00/00/00 does not provide
24041 24034 * any information about the state of
24042 24035 * the media. Ignore it.
24043 24036 */
24044 24037 mutex_exit(SD_MUTEX(un));
24045 24038 return (0);
24046 24039 }
24047 24040 }
24048 24041 }
24049 24042 } else if ((*((char *)statusp) == STATUS_GOOD) &&
24050 24043 (resultp->pkt->pkt_reason == CMD_CMPLT)) {
24051 24044 state = DKIO_INSERTED;
24052 24045 }
24053 24046
24054 24047 SD_TRACE(SD_LOG_COMMON, un,
24055 24048 "sd_media_watch_cb: state=%x, specified=%x\n",
24056 24049 state, un->un_specified_mediastate);
24057 24050
24058 24051 /*
24059 24052 * now signal the waiting thread if this is *not* the specified state;
24060 24053 * delay the signal if the state is DKIO_INSERTED to allow the target
24061 24054 * to recover
24062 24055 */
24063 24056 if (state != un->un_specified_mediastate) {
24064 24057 un->un_mediastate = state;
24065 24058 if (state == DKIO_INSERTED) {
24066 24059 /*
24067 24060 * delay the signal to give the drive a chance
24068 24061 * to do what it apparently needs to do
24069 24062 */
24070 24063 SD_TRACE(SD_LOG_COMMON, un,
24071 24064 "sd_media_watch_cb: delayed cv_broadcast\n");
24072 24065 if (un->un_dcvb_timeid == NULL) {
24073 24066 un->un_dcvb_timeid =
24074 24067 timeout(sd_delayed_cv_broadcast, un,
24075 24068 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY));
24076 24069 }
24077 24070 } else {
24078 24071 SD_TRACE(SD_LOG_COMMON, un,
24079 24072 "sd_media_watch_cb: immediate cv_broadcast\n");
24080 24073 cv_broadcast(&un->un_state_cv);
24081 24074 }
24082 24075 }
24083 24076 mutex_exit(SD_MUTEX(un));
24084 24077 return (0);
24085 24078 }
24086 24079
24087 24080
24088 24081 /*
24089 24082 * Function: sd_dkio_get_temp
24090 24083 *
24091 24084 * Description: This routine is the driver entry point for handling ioctl
24092 24085 * requests to get the disk temperature.
24093 24086 *
24094 24087 * Arguments: dev - the device number
24095 24088 * arg - pointer to user provided dk_temperature structure.
24096 24089 * flag - this argument is a pass through to ddi_copyxxx()
24097 24090 * directly from the mode argument of ioctl().
24098 24091 *
24099 24092 * Return Code: 0
24100 24093 * EFAULT
24101 24094 * ENXIO
24102 24095 * EAGAIN
24103 24096 */
24104 24097
24105 24098 static int
24106 24099 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag)
24107 24100 {
24108 24101 struct sd_lun *un = NULL;
24109 24102 struct dk_temperature *dktemp = NULL;
24110 24103 uchar_t *temperature_page;
24111 24104 int rval = 0;
24112 24105 int path_flag = SD_PATH_STANDARD;
24113 24106 sd_ssc_t *ssc;
24114 24107
24115 24108 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24116 24109 return (ENXIO);
24117 24110 }
24118 24111
24119 24112 ssc = sd_ssc_init(un);
24120 24113 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP);
24121 24114
24122 24115 /* copyin the disk temp argument to get the user flags */
24123 24116 if (ddi_copyin((void *)arg, dktemp,
24124 24117 sizeof (struct dk_temperature), flag) != 0) {
24125 24118 rval = EFAULT;
24126 24119 goto done;
24127 24120 }
24128 24121
24129 24122 /* Initialize the temperature to invalid. */
24130 24123 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP;
24131 24124 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP;
24132 24125
24133 24126 /*
24134 24127 * Note: Investigate removing the "bypass pm" semantic.
24135 24128 * Can we just bypass PM always?
24136 24129 */
24137 24130 if (dktemp->dkt_flags & DKT_BYPASS_PM) {
24138 24131 path_flag = SD_PATH_DIRECT;
24139 24132 ASSERT(!mutex_owned(&un->un_pm_mutex));
24140 24133 mutex_enter(&un->un_pm_mutex);
24141 24134 if (SD_DEVICE_IS_IN_LOW_POWER(un)) {
24142 24135 /*
24143 24136 * If DKT_BYPASS_PM is set, and the drive happens to be
24144 24137 * in low power mode, we can not wake it up, Need to
24145 24138 * return EAGAIN.
24146 24139 */
24147 24140 mutex_exit(&un->un_pm_mutex);
24148 24141 rval = EAGAIN;
24149 24142 goto done;
24150 24143 } else {
24151 24144 /*
24152 24145 * Indicate to PM the device is busy. This is required
24153 24146 * to avoid a race - i.e. the ioctl is issuing a
24154 24147 * command and the pm framework brings down the device
24155 24148 * to low power mode (possible power cut-off on some
24156 24149 * platforms).
24157 24150 */
24158 24151 mutex_exit(&un->un_pm_mutex);
24159 24152 if (sd_pm_entry(un) != DDI_SUCCESS) {
24160 24153 rval = EAGAIN;
24161 24154 goto done;
24162 24155 }
24163 24156 }
24164 24157 }
24165 24158
24166 24159 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP);
24167 24160
24168 24161 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page,
24169 24162 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag);
24170 24163 if (rval != 0)
24171 24164 goto done2;
24172 24165
24173 24166 /*
24174 24167 * For the current temperature verify that the parameter length is 0x02
24175 24168 * and the parameter code is 0x00
24176 24169 */
24177 24170 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) &&
24178 24171 (temperature_page[5] == 0x00)) {
24179 24172 if (temperature_page[9] == 0xFF) {
24180 24173 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP;
24181 24174 } else {
24182 24175 dktemp->dkt_cur_temp = (short)(temperature_page[9]);
24183 24176 }
24184 24177 }
24185 24178
24186 24179 /*
24187 24180 * For the reference temperature verify that the parameter
24188 24181 * length is 0x02 and the parameter code is 0x01
24189 24182 */
24190 24183 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) &&
24191 24184 (temperature_page[11] == 0x01)) {
24192 24185 if (temperature_page[15] == 0xFF) {
24193 24186 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP;
24194 24187 } else {
24195 24188 dktemp->dkt_ref_temp = (short)(temperature_page[15]);
24196 24189 }
24197 24190 }
24198 24191
24199 24192 /* Do the copyout regardless of the temperature commands status. */
24200 24193 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature),
24201 24194 flag) != 0) {
24202 24195 rval = EFAULT;
24203 24196 goto done1;
24204 24197 }
24205 24198
24206 24199 done2:
24207 24200 if (rval != 0) {
24208 24201 if (rval == EIO)
24209 24202 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
24210 24203 else
24211 24204 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
24212 24205 }
24213 24206 done1:
24214 24207 if (path_flag == SD_PATH_DIRECT) {
24215 24208 sd_pm_exit(un);
24216 24209 }
24217 24210
24218 24211 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE);
24219 24212 done:
24220 24213 sd_ssc_fini(ssc);
24221 24214 if (dktemp != NULL) {
24222 24215 kmem_free(dktemp, sizeof (struct dk_temperature));
24223 24216 }
24224 24217
24225 24218 return (rval);
24226 24219 }
24227 24220
24228 24221
24229 24222 /*
24230 24223 * Function: sd_log_page_supported
24231 24224 *
24232 24225 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of
24233 24226 * supported log pages.
24234 24227 *
24235 24228 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
24236 24229 * structure for this target.
24237 24230 * log_page -
24238 24231 *
24239 24232 * Return Code: -1 - on error (log sense is optional and may not be supported).
24240 24233 * 0 - log page not found.
24241 24234 * 1 - log page found.
24242 24235 */
24243 24236
24244 24237 static int
24245 24238 sd_log_page_supported(sd_ssc_t *ssc, int log_page)
24246 24239 {
24247 24240 uchar_t *log_page_data;
24248 24241 int i;
24249 24242 int match = 0;
24250 24243 int log_size;
24251 24244 int status = 0;
24252 24245 struct sd_lun *un;
24253 24246
24254 24247 ASSERT(ssc != NULL);
24255 24248 un = ssc->ssc_un;
24256 24249 ASSERT(un != NULL);
24257 24250
24258 24251 log_page_data = kmem_zalloc(0xFF, KM_SLEEP);
24259 24252
24260 24253 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0,
24261 24254 SD_PATH_DIRECT);
24262 24255
24263 24256 if (status != 0) {
24264 24257 if (status == EIO) {
24265 24258 /*
24266 24259 * Some disks do not support log sense, we
24267 24260 * should ignore this kind of error(sense key is
24268 24261 * 0x5 - illegal request).
24269 24262 */
24270 24263 uint8_t *sensep;
24271 24264 int senlen;
24272 24265
24273 24266 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
24274 24267 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
24275 24268 ssc->ssc_uscsi_cmd->uscsi_rqresid);
24276 24269
24277 24270 if (senlen > 0 &&
24278 24271 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) {
24279 24272 sd_ssc_assessment(ssc,
24280 24273 SD_FMT_IGNORE_COMPROMISE);
24281 24274 } else {
24282 24275 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
24283 24276 }
24284 24277 } else {
24285 24278 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
24286 24279 }
24287 24280
24288 24281 SD_ERROR(SD_LOG_COMMON, un,
24289 24282 "sd_log_page_supported: failed log page retrieval\n");
24290 24283 kmem_free(log_page_data, 0xFF);
24291 24284 return (-1);
24292 24285 }
24293 24286
24294 24287 log_size = log_page_data[3];
24295 24288
24296 24289 /*
24297 24290 * The list of supported log pages start from the fourth byte. Check
24298 24291 * until we run out of log pages or a match is found.
24299 24292 */
24300 24293 for (i = 4; (i < (log_size + 4)) && !match; i++) {
24301 24294 if (log_page_data[i] == log_page) {
24302 24295 match++;
24303 24296 }
24304 24297 }
24305 24298 kmem_free(log_page_data, 0xFF);
24306 24299 return (match);
24307 24300 }
24308 24301
24309 24302
24310 24303 /*
24311 24304 * Function: sd_mhdioc_failfast
24312 24305 *
24313 24306 * Description: This routine is the driver entry point for handling ioctl
24314 24307 * requests to enable/disable the multihost failfast option.
24315 24308 * (MHIOCENFAILFAST)
24316 24309 *
24317 24310 * Arguments: dev - the device number
24318 24311 * arg - user specified probing interval.
24319 24312 * flag - this argument is a pass through to ddi_copyxxx()
24320 24313 * directly from the mode argument of ioctl().
24321 24314 *
24322 24315 * Return Code: 0
24323 24316 * EFAULT
24324 24317 * ENXIO
24325 24318 */
24326 24319
24327 24320 static int
24328 24321 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag)
24329 24322 {
24330 24323 struct sd_lun *un = NULL;
24331 24324 int mh_time;
24332 24325 int rval = 0;
24333 24326
24334 24327 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24335 24328 return (ENXIO);
24336 24329 }
24337 24330
24338 24331 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag))
24339 24332 return (EFAULT);
24340 24333
24341 24334 if (mh_time) {
24342 24335 mutex_enter(SD_MUTEX(un));
24343 24336 un->un_resvd_status |= SD_FAILFAST;
24344 24337 mutex_exit(SD_MUTEX(un));
24345 24338 /*
24346 24339 * If mh_time is INT_MAX, then this ioctl is being used for
24347 24340 * SCSI-3 PGR purposes, and we don't need to spawn watch thread.
24348 24341 */
24349 24342 if (mh_time != INT_MAX) {
24350 24343 rval = sd_check_mhd(dev, mh_time);
24351 24344 }
24352 24345 } else {
24353 24346 (void) sd_check_mhd(dev, 0);
24354 24347 mutex_enter(SD_MUTEX(un));
24355 24348 un->un_resvd_status &= ~SD_FAILFAST;
24356 24349 mutex_exit(SD_MUTEX(un));
24357 24350 }
24358 24351 return (rval);
24359 24352 }
24360 24353
24361 24354
24362 24355 /*
24363 24356 * Function: sd_mhdioc_takeown
24364 24357 *
24365 24358 * Description: This routine is the driver entry point for handling ioctl
24366 24359 * requests to forcefully acquire exclusive access rights to the
24367 24360 * multihost disk (MHIOCTKOWN).
24368 24361 *
24369 24362 * Arguments: dev - the device number
24370 24363 * arg - user provided structure specifying the delay
24371 24364 * parameters in milliseconds
24372 24365 * flag - this argument is a pass through to ddi_copyxxx()
24373 24366 * directly from the mode argument of ioctl().
24374 24367 *
24375 24368 * Return Code: 0
24376 24369 * EFAULT
24377 24370 * ENXIO
24378 24371 */
24379 24372
24380 24373 static int
24381 24374 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag)
24382 24375 {
24383 24376 struct sd_lun *un = NULL;
24384 24377 struct mhioctkown *tkown = NULL;
24385 24378 int rval = 0;
24386 24379
24387 24380 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24388 24381 return (ENXIO);
24389 24382 }
24390 24383
24391 24384 if (arg != NULL) {
24392 24385 tkown = (struct mhioctkown *)
24393 24386 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP);
24394 24387 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag);
24395 24388 if (rval != 0) {
24396 24389 rval = EFAULT;
24397 24390 goto error;
24398 24391 }
24399 24392 }
24400 24393
24401 24394 rval = sd_take_ownership(dev, tkown);
24402 24395 mutex_enter(SD_MUTEX(un));
24403 24396 if (rval == 0) {
24404 24397 un->un_resvd_status |= SD_RESERVE;
24405 24398 if (tkown != NULL && tkown->reinstate_resv_delay != 0) {
24406 24399 sd_reinstate_resv_delay =
24407 24400 tkown->reinstate_resv_delay * 1000;
24408 24401 } else {
24409 24402 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY;
24410 24403 }
24411 24404 /*
24412 24405 * Give the scsi_watch routine interval set by
24413 24406 * the MHIOCENFAILFAST ioctl precedence here.
24414 24407 */
24415 24408 if ((un->un_resvd_status & SD_FAILFAST) == 0) {
24416 24409 mutex_exit(SD_MUTEX(un));
24417 24410 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000);
24418 24411 SD_TRACE(SD_LOG_IOCTL_MHD, un,
24419 24412 "sd_mhdioc_takeown : %d\n",
24420 24413 sd_reinstate_resv_delay);
24421 24414 } else {
24422 24415 mutex_exit(SD_MUTEX(un));
24423 24416 }
24424 24417 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY,
24425 24418 sd_mhd_reset_notify_cb, (caddr_t)un);
24426 24419 } else {
24427 24420 un->un_resvd_status &= ~SD_RESERVE;
24428 24421 mutex_exit(SD_MUTEX(un));
24429 24422 }
24430 24423
24431 24424 error:
24432 24425 if (tkown != NULL) {
24433 24426 kmem_free(tkown, sizeof (struct mhioctkown));
24434 24427 }
24435 24428 return (rval);
24436 24429 }
24437 24430
24438 24431
24439 24432 /*
24440 24433 * Function: sd_mhdioc_release
24441 24434 *
24442 24435 * Description: This routine is the driver entry point for handling ioctl
24443 24436 * requests to release exclusive access rights to the multihost
24444 24437 * disk (MHIOCRELEASE).
24445 24438 *
24446 24439 * Arguments: dev - the device number
24447 24440 *
24448 24441 * Return Code: 0
24449 24442 * ENXIO
24450 24443 */
24451 24444
24452 24445 static int
24453 24446 sd_mhdioc_release(dev_t dev)
24454 24447 {
24455 24448 struct sd_lun *un = NULL;
24456 24449 timeout_id_t resvd_timeid_save;
24457 24450 int resvd_status_save;
24458 24451 int rval = 0;
24459 24452
24460 24453 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24461 24454 return (ENXIO);
24462 24455 }
24463 24456
24464 24457 mutex_enter(SD_MUTEX(un));
24465 24458 resvd_status_save = un->un_resvd_status;
24466 24459 un->un_resvd_status &=
24467 24460 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE);
24468 24461 if (un->un_resvd_timeid) {
24469 24462 resvd_timeid_save = un->un_resvd_timeid;
24470 24463 un->un_resvd_timeid = NULL;
24471 24464 mutex_exit(SD_MUTEX(un));
24472 24465 (void) untimeout(resvd_timeid_save);
24473 24466 } else {
24474 24467 mutex_exit(SD_MUTEX(un));
24475 24468 }
24476 24469
24477 24470 /*
24478 24471 * destroy any pending timeout thread that may be attempting to
24479 24472 * reinstate reservation on this device.
24480 24473 */
24481 24474 sd_rmv_resv_reclaim_req(dev);
24482 24475
24483 24476 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) {
24484 24477 mutex_enter(SD_MUTEX(un));
24485 24478 if ((un->un_mhd_token) &&
24486 24479 ((un->un_resvd_status & SD_FAILFAST) == 0)) {
24487 24480 mutex_exit(SD_MUTEX(un));
24488 24481 (void) sd_check_mhd(dev, 0);
24489 24482 } else {
24490 24483 mutex_exit(SD_MUTEX(un));
24491 24484 }
24492 24485 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL,
24493 24486 sd_mhd_reset_notify_cb, (caddr_t)un);
24494 24487 } else {
24495 24488 /*
24496 24489 * sd_mhd_watch_cb will restart the resvd recover timeout thread
24497 24490 */
24498 24491 mutex_enter(SD_MUTEX(un));
24499 24492 un->un_resvd_status = resvd_status_save;
24500 24493 mutex_exit(SD_MUTEX(un));
24501 24494 }
24502 24495 return (rval);
24503 24496 }
24504 24497
24505 24498
24506 24499 /*
24507 24500 * Function: sd_mhdioc_register_devid
24508 24501 *
24509 24502 * Description: This routine is the driver entry point for handling ioctl
24510 24503 * requests to register the device id (MHIOCREREGISTERDEVID).
24511 24504 *
24512 24505 * Note: The implementation for this ioctl has been updated to
24513 24506 * be consistent with the original PSARC case (1999/357)
24514 24507 * (4375899, 4241671, 4220005)
24515 24508 *
24516 24509 * Arguments: dev - the device number
24517 24510 *
24518 24511 * Return Code: 0
24519 24512 * ENXIO
24520 24513 */
24521 24514
24522 24515 static int
24523 24516 sd_mhdioc_register_devid(dev_t dev)
24524 24517 {
24525 24518 struct sd_lun *un = NULL;
24526 24519 int rval = 0;
24527 24520 sd_ssc_t *ssc;
24528 24521
24529 24522 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24530 24523 return (ENXIO);
24531 24524 }
24532 24525
24533 24526 ASSERT(!mutex_owned(SD_MUTEX(un)));
24534 24527
24535 24528 mutex_enter(SD_MUTEX(un));
24536 24529
24537 24530 /* If a devid already exists, de-register it */
24538 24531 if (un->un_devid != NULL) {
24539 24532 ddi_devid_unregister(SD_DEVINFO(un));
24540 24533 /*
24541 24534 * After unregister devid, needs to free devid memory
24542 24535 */
24543 24536 ddi_devid_free(un->un_devid);
24544 24537 un->un_devid = NULL;
24545 24538 }
24546 24539
24547 24540 /* Check for reservation conflict */
24548 24541 mutex_exit(SD_MUTEX(un));
24549 24542 ssc = sd_ssc_init(un);
24550 24543 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
24551 24544 mutex_enter(SD_MUTEX(un));
24552 24545
24553 24546 switch (rval) {
24554 24547 case 0:
24555 24548 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED);
24556 24549 break;
24557 24550 case EACCES:
24558 24551 break;
24559 24552 default:
24560 24553 rval = EIO;
24561 24554 }
24562 24555
24563 24556 mutex_exit(SD_MUTEX(un));
24564 24557 if (rval != 0) {
24565 24558 if (rval == EIO)
24566 24559 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
24567 24560 else
24568 24561 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
24569 24562 }
24570 24563 sd_ssc_fini(ssc);
24571 24564 return (rval);
24572 24565 }
24573 24566
24574 24567
24575 24568 /*
24576 24569 * Function: sd_mhdioc_inkeys
24577 24570 *
24578 24571 * Description: This routine is the driver entry point for handling ioctl
24579 24572 * requests to issue the SCSI-3 Persistent In Read Keys command
24580 24573 * to the device (MHIOCGRP_INKEYS).
24581 24574 *
24582 24575 * Arguments: dev - the device number
24583 24576 * arg - user provided in_keys structure
24584 24577 * flag - this argument is a pass through to ddi_copyxxx()
24585 24578 * directly from the mode argument of ioctl().
24586 24579 *
24587 24580 * Return Code: code returned by sd_persistent_reservation_in_read_keys()
24588 24581 * ENXIO
24589 24582 * EFAULT
24590 24583 */
24591 24584
24592 24585 static int
24593 24586 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag)
24594 24587 {
24595 24588 struct sd_lun *un;
24596 24589 mhioc_inkeys_t inkeys;
24597 24590 int rval = 0;
24598 24591
24599 24592 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24600 24593 return (ENXIO);
24601 24594 }
24602 24595
24603 24596 #ifdef _MULTI_DATAMODEL
24604 24597 switch (ddi_model_convert_from(flag & FMODELS)) {
24605 24598 case DDI_MODEL_ILP32: {
24606 24599 struct mhioc_inkeys32 inkeys32;
24607 24600
24608 24601 if (ddi_copyin(arg, &inkeys32,
24609 24602 sizeof (struct mhioc_inkeys32), flag) != 0) {
24610 24603 return (EFAULT);
24611 24604 }
24612 24605 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li;
24613 24606 if ((rval = sd_persistent_reservation_in_read_keys(un,
24614 24607 &inkeys, flag)) != 0) {
24615 24608 return (rval);
24616 24609 }
24617 24610 inkeys32.generation = inkeys.generation;
24618 24611 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32),
24619 24612 flag) != 0) {
24620 24613 return (EFAULT);
24621 24614 }
24622 24615 break;
24623 24616 }
24624 24617 case DDI_MODEL_NONE:
24625 24618 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t),
24626 24619 flag) != 0) {
24627 24620 return (EFAULT);
24628 24621 }
24629 24622 if ((rval = sd_persistent_reservation_in_read_keys(un,
24630 24623 &inkeys, flag)) != 0) {
24631 24624 return (rval);
24632 24625 }
24633 24626 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t),
24634 24627 flag) != 0) {
24635 24628 return (EFAULT);
24636 24629 }
24637 24630 break;
24638 24631 }
24639 24632
24640 24633 #else /* ! _MULTI_DATAMODEL */
24641 24634
24642 24635 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) {
24643 24636 return (EFAULT);
24644 24637 }
24645 24638 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag);
24646 24639 if (rval != 0) {
24647 24640 return (rval);
24648 24641 }
24649 24642 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) {
24650 24643 return (EFAULT);
24651 24644 }
24652 24645
24653 24646 #endif /* _MULTI_DATAMODEL */
24654 24647
24655 24648 return (rval);
24656 24649 }
24657 24650
24658 24651
24659 24652 /*
24660 24653 * Function: sd_mhdioc_inresv
24661 24654 *
24662 24655 * Description: This routine is the driver entry point for handling ioctl
24663 24656 * requests to issue the SCSI-3 Persistent In Read Reservations
24664 24657 * command to the device (MHIOCGRP_INKEYS).
24665 24658 *
24666 24659 * Arguments: dev - the device number
24667 24660 * arg - user provided in_resv structure
24668 24661 * flag - this argument is a pass through to ddi_copyxxx()
24669 24662 * directly from the mode argument of ioctl().
24670 24663 *
24671 24664 * Return Code: code returned by sd_persistent_reservation_in_read_resv()
24672 24665 * ENXIO
24673 24666 * EFAULT
24674 24667 */
24675 24668
24676 24669 static int
24677 24670 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag)
24678 24671 {
24679 24672 struct sd_lun *un;
24680 24673 mhioc_inresvs_t inresvs;
24681 24674 int rval = 0;
24682 24675
24683 24676 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24684 24677 return (ENXIO);
24685 24678 }
24686 24679
24687 24680 #ifdef _MULTI_DATAMODEL
24688 24681
24689 24682 switch (ddi_model_convert_from(flag & FMODELS)) {
24690 24683 case DDI_MODEL_ILP32: {
24691 24684 struct mhioc_inresvs32 inresvs32;
24692 24685
24693 24686 if (ddi_copyin(arg, &inresvs32,
24694 24687 sizeof (struct mhioc_inresvs32), flag) != 0) {
24695 24688 return (EFAULT);
24696 24689 }
24697 24690 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li;
24698 24691 if ((rval = sd_persistent_reservation_in_read_resv(un,
24699 24692 &inresvs, flag)) != 0) {
24700 24693 return (rval);
24701 24694 }
24702 24695 inresvs32.generation = inresvs.generation;
24703 24696 if (ddi_copyout(&inresvs32, arg,
24704 24697 sizeof (struct mhioc_inresvs32), flag) != 0) {
24705 24698 return (EFAULT);
24706 24699 }
24707 24700 break;
24708 24701 }
24709 24702 case DDI_MODEL_NONE:
24710 24703 if (ddi_copyin(arg, &inresvs,
24711 24704 sizeof (mhioc_inresvs_t), flag) != 0) {
24712 24705 return (EFAULT);
24713 24706 }
24714 24707 if ((rval = sd_persistent_reservation_in_read_resv(un,
24715 24708 &inresvs, flag)) != 0) {
24716 24709 return (rval);
24717 24710 }
24718 24711 if (ddi_copyout(&inresvs, arg,
24719 24712 sizeof (mhioc_inresvs_t), flag) != 0) {
24720 24713 return (EFAULT);
24721 24714 }
24722 24715 break;
24723 24716 }
24724 24717
24725 24718 #else /* ! _MULTI_DATAMODEL */
24726 24719
24727 24720 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) {
24728 24721 return (EFAULT);
24729 24722 }
24730 24723 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag);
24731 24724 if (rval != 0) {
24732 24725 return (rval);
24733 24726 }
24734 24727 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) {
24735 24728 return (EFAULT);
24736 24729 }
24737 24730
24738 24731 #endif /* ! _MULTI_DATAMODEL */
24739 24732
24740 24733 return (rval);
24741 24734 }
24742 24735
24743 24736
24744 24737 /*
24745 24738 * The following routines support the clustering functionality described below
24746 24739 * and implement lost reservation reclaim functionality.
24747 24740 *
24748 24741 * Clustering
24749 24742 * ----------
24750 24743 * The clustering code uses two different, independent forms of SCSI
24751 24744 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3
24752 24745 * Persistent Group Reservations. For any particular disk, it will use either
24753 24746 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk.
24754 24747 *
24755 24748 * SCSI-2
24756 24749 * The cluster software takes ownership of a multi-hosted disk by issuing the
24757 24750 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the
24758 24751 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a
24759 24752 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl
24760 24753 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the
24761 24754 * driver. The meaning of failfast is that if the driver (on this host) ever
24762 24755 * encounters the scsi error return code RESERVATION_CONFLICT from the device,
24763 24756 * it should immediately panic the host. The motivation for this ioctl is that
24764 24757 * if this host does encounter reservation conflict, the underlying cause is
24765 24758 * that some other host of the cluster has decided that this host is no longer
24766 24759 * in the cluster and has seized control of the disks for itself. Since this
24767 24760 * host is no longer in the cluster, it ought to panic itself. The
24768 24761 * MHIOCENFAILFAST ioctl does two things:
24769 24762 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT
24770 24763 * error to panic the host
24771 24764 * (b) it sets up a periodic timer to test whether this host still has
24772 24765 * "access" (in that no other host has reserved the device): if the
24773 24766 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The
24774 24767 * purpose of that periodic timer is to handle scenarios where the host is
24775 24768 * otherwise temporarily quiescent, temporarily doing no real i/o.
24776 24769 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host,
24777 24770 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for
24778 24771 * the device itself.
24779 24772 *
24780 24773 * SCSI-3 PGR
24781 24774 * A direct semantic implementation of the SCSI-3 Persistent Reservation
24782 24775 * facility is supported through the shared multihost disk ioctls
24783 24776 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE,
24784 24777 * MHIOCGRP_PREEMPTANDABORT, MHIOCGRP_CLEAR)
24785 24778 *
24786 24779 * Reservation Reclaim:
24787 24780 * --------------------
24788 24781 * To support the lost reservation reclaim operations this driver creates a
24789 24782 * single thread to handle reinstating reservations on all devices that have
24790 24783 * lost reservations sd_resv_reclaim_requests are logged for all devices that
24791 24784 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb
24792 24785 * and the reservation reclaim thread loops through the requests to regain the
24793 24786 * lost reservations.
24794 24787 */
24795 24788
24796 24789 /*
24797 24790 * Function: sd_check_mhd()
24798 24791 *
24799 24792 * Description: This function sets up and submits a scsi watch request or
24800 24793 * terminates an existing watch request. This routine is used in
24801 24794 * support of reservation reclaim.
24802 24795 *
24803 24796 * Arguments: dev - the device 'dev_t' is used for context to discriminate
24804 24797 * among multiple watches that share the callback function
24805 24798 * interval - the number of microseconds specifying the watch
24806 24799 * interval for issuing TEST UNIT READY commands. If
24807 24800 * set to 0 the watch should be terminated. If the
24808 24801 * interval is set to 0 and if the device is required
24809 24802 * to hold reservation while disabling failfast, the
24810 24803 * watch is restarted with an interval of
24811 24804 * reinstate_resv_delay.
24812 24805 *
24813 24806 * Return Code: 0 - Successful submit/terminate of scsi watch request
24814 24807 * ENXIO - Indicates an invalid device was specified
24815 24808 * EAGAIN - Unable to submit the scsi watch request
24816 24809 */
24817 24810
24818 24811 static int
24819 24812 sd_check_mhd(dev_t dev, int interval)
24820 24813 {
24821 24814 struct sd_lun *un;
24822 24815 opaque_t token;
24823 24816
24824 24817 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24825 24818 return (ENXIO);
24826 24819 }
24827 24820
24828 24821 /* is this a watch termination request? */
24829 24822 if (interval == 0) {
24830 24823 mutex_enter(SD_MUTEX(un));
24831 24824 /* if there is an existing watch task then terminate it */
24832 24825 if (un->un_mhd_token) {
24833 24826 token = un->un_mhd_token;
24834 24827 un->un_mhd_token = NULL;
24835 24828 mutex_exit(SD_MUTEX(un));
24836 24829 (void) scsi_watch_request_terminate(token,
24837 24830 SCSI_WATCH_TERMINATE_ALL_WAIT);
24838 24831 mutex_enter(SD_MUTEX(un));
24839 24832 } else {
24840 24833 mutex_exit(SD_MUTEX(un));
24841 24834 /*
24842 24835 * Note: If we return here we don't check for the
24843 24836 * failfast case. This is the original legacy
24844 24837 * implementation but perhaps we should be checking
24845 24838 * the failfast case.
24846 24839 */
24847 24840 return (0);
24848 24841 }
24849 24842 /*
24850 24843 * If the device is required to hold reservation while
24851 24844 * disabling failfast, we need to restart the scsi_watch
24852 24845 * routine with an interval of reinstate_resv_delay.
24853 24846 */
24854 24847 if (un->un_resvd_status & SD_RESERVE) {
24855 24848 interval = sd_reinstate_resv_delay/1000;
24856 24849 } else {
24857 24850 /* no failfast so bail */
24858 24851 mutex_exit(SD_MUTEX(un));
24859 24852 return (0);
24860 24853 }
24861 24854 mutex_exit(SD_MUTEX(un));
24862 24855 }
24863 24856
24864 24857 /*
24865 24858 * adjust minimum time interval to 1 second,
24866 24859 * and convert from msecs to usecs
24867 24860 */
24868 24861 if (interval > 0 && interval < 1000) {
24869 24862 interval = 1000;
24870 24863 }
24871 24864 interval *= 1000;
24872 24865
24873 24866 /*
24874 24867 * submit the request to the scsi_watch service
24875 24868 */
24876 24869 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval,
24877 24870 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev);
24878 24871 if (token == NULL) {
24879 24872 return (EAGAIN);
24880 24873 }
24881 24874
24882 24875 /*
24883 24876 * save token for termination later on
24884 24877 */
24885 24878 mutex_enter(SD_MUTEX(un));
24886 24879 un->un_mhd_token = token;
24887 24880 mutex_exit(SD_MUTEX(un));
24888 24881 return (0);
24889 24882 }
24890 24883
24891 24884
24892 24885 /*
24893 24886 * Function: sd_mhd_watch_cb()
24894 24887 *
24895 24888 * Description: This function is the call back function used by the scsi watch
24896 24889 * facility. The scsi watch facility sends the "Test Unit Ready"
24897 24890 * and processes the status. If applicable (i.e. a "Unit Attention"
24898 24891 * status and automatic "Request Sense" not used) the scsi watch
24899 24892 * facility will send a "Request Sense" and retrieve the sense data
24900 24893 * to be passed to this callback function. In either case the
24901 24894 * automatic "Request Sense" or the facility submitting one, this
24902 24895 * callback is passed the status and sense data.
24903 24896 *
24904 24897 * Arguments: arg - the device 'dev_t' is used for context to discriminate
24905 24898 * among multiple watches that share this callback function
24906 24899 * resultp - scsi watch facility result packet containing scsi
24907 24900 * packet, status byte and sense data
24908 24901 *
24909 24902 * Return Code: 0 - continue the watch task
24910 24903 * non-zero - terminate the watch task
24911 24904 */
24912 24905
24913 24906 static int
24914 24907 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
24915 24908 {
24916 24909 struct sd_lun *un;
24917 24910 struct scsi_status *statusp;
24918 24911 uint8_t *sensep;
24919 24912 struct scsi_pkt *pkt;
24920 24913 uchar_t actual_sense_length;
24921 24914 dev_t dev = (dev_t)arg;
24922 24915
24923 24916 ASSERT(resultp != NULL);
24924 24917 statusp = resultp->statusp;
24925 24918 sensep = (uint8_t *)resultp->sensep;
24926 24919 pkt = resultp->pkt;
24927 24920 actual_sense_length = resultp->actual_sense_length;
24928 24921
24929 24922 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24930 24923 return (ENXIO);
24931 24924 }
24932 24925
24933 24926 SD_TRACE(SD_LOG_IOCTL_MHD, un,
24934 24927 "sd_mhd_watch_cb: reason '%s', status '%s'\n",
24935 24928 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp)));
24936 24929
24937 24930 /* Begin processing of the status and/or sense data */
24938 24931 if (pkt->pkt_reason != CMD_CMPLT) {
24939 24932 /* Handle the incomplete packet */
24940 24933 sd_mhd_watch_incomplete(un, pkt);
24941 24934 return (0);
24942 24935 } else if (*((unsigned char *)statusp) != STATUS_GOOD) {
24943 24936 if (*((unsigned char *)statusp)
24944 24937 == STATUS_RESERVATION_CONFLICT) {
24945 24938 /*
24946 24939 * Handle a reservation conflict by panicking if
24947 24940 * configured for failfast or by logging the conflict
24948 24941 * and updating the reservation status
24949 24942 */
24950 24943 mutex_enter(SD_MUTEX(un));
24951 24944 if ((un->un_resvd_status & SD_FAILFAST) &&
24952 24945 (sd_failfast_enable)) {
24953 24946 sd_panic_for_res_conflict(un);
24954 24947 /*NOTREACHED*/
24955 24948 }
24956 24949 SD_INFO(SD_LOG_IOCTL_MHD, un,
24957 24950 "sd_mhd_watch_cb: Reservation Conflict\n");
24958 24951 un->un_resvd_status |= SD_RESERVATION_CONFLICT;
24959 24952 mutex_exit(SD_MUTEX(un));
24960 24953 }
24961 24954 }
24962 24955
24963 24956 if (sensep != NULL) {
24964 24957 if (actual_sense_length >= (SENSE_LENGTH - 2)) {
24965 24958 mutex_enter(SD_MUTEX(un));
24966 24959 if ((scsi_sense_asc(sensep) ==
24967 24960 SD_SCSI_RESET_SENSE_CODE) &&
24968 24961 (un->un_resvd_status & SD_RESERVE)) {
24969 24962 /*
24970 24963 * The additional sense code indicates a power
24971 24964 * on or bus device reset has occurred; update
24972 24965 * the reservation status.
24973 24966 */
24974 24967 un->un_resvd_status |=
24975 24968 (SD_LOST_RESERVE | SD_WANT_RESERVE);
24976 24969 SD_INFO(SD_LOG_IOCTL_MHD, un,
24977 24970 "sd_mhd_watch_cb: Lost Reservation\n");
24978 24971 }
24979 24972 } else {
24980 24973 return (0);
24981 24974 }
24982 24975 } else {
24983 24976 mutex_enter(SD_MUTEX(un));
24984 24977 }
24985 24978
24986 24979 if ((un->un_resvd_status & SD_RESERVE) &&
24987 24980 (un->un_resvd_status & SD_LOST_RESERVE)) {
24988 24981 if (un->un_resvd_status & SD_WANT_RESERVE) {
24989 24982 /*
24990 24983 * A reset occurred in between the last probe and this
24991 24984 * one so if a timeout is pending cancel it.
24992 24985 */
24993 24986 if (un->un_resvd_timeid) {
24994 24987 timeout_id_t temp_id = un->un_resvd_timeid;
24995 24988 un->un_resvd_timeid = NULL;
24996 24989 mutex_exit(SD_MUTEX(un));
24997 24990 (void) untimeout(temp_id);
24998 24991 mutex_enter(SD_MUTEX(un));
24999 24992 }
25000 24993 un->un_resvd_status &= ~SD_WANT_RESERVE;
25001 24994 }
25002 24995 if (un->un_resvd_timeid == 0) {
25003 24996 /* Schedule a timeout to handle the lost reservation */
25004 24997 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover,
25005 24998 (void *)dev,
25006 24999 drv_usectohz(sd_reinstate_resv_delay));
25007 25000 }
25008 25001 }
25009 25002 mutex_exit(SD_MUTEX(un));
25010 25003 return (0);
25011 25004 }
25012 25005
25013 25006
25014 25007 /*
25015 25008 * Function: sd_mhd_watch_incomplete()
25016 25009 *
25017 25010 * Description: This function is used to find out why a scsi pkt sent by the
25018 25011 * scsi watch facility was not completed. Under some scenarios this
25019 25012 * routine will return. Otherwise it will send a bus reset to see
25020 25013 * if the drive is still online.
25021 25014 *
25022 25015 * Arguments: un - driver soft state (unit) structure
25023 25016 * pkt - incomplete scsi pkt
25024 25017 */
25025 25018
25026 25019 static void
25027 25020 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt)
25028 25021 {
25029 25022 int be_chatty;
25030 25023 int perr;
25031 25024
25032 25025 ASSERT(pkt != NULL);
25033 25026 ASSERT(un != NULL);
25034 25027 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT));
25035 25028 perr = (pkt->pkt_statistics & STAT_PERR);
25036 25029
25037 25030 mutex_enter(SD_MUTEX(un));
25038 25031 if (un->un_state == SD_STATE_DUMPING) {
25039 25032 mutex_exit(SD_MUTEX(un));
25040 25033 return;
25041 25034 }
25042 25035
25043 25036 switch (pkt->pkt_reason) {
25044 25037 case CMD_UNX_BUS_FREE:
25045 25038 /*
25046 25039 * If we had a parity error that caused the target to drop BSY*,
25047 25040 * don't be chatty about it.
25048 25041 */
25049 25042 if (perr && be_chatty) {
25050 25043 be_chatty = 0;
25051 25044 }
25052 25045 break;
25053 25046 case CMD_TAG_REJECT:
25054 25047 /*
25055 25048 * The SCSI-2 spec states that a tag reject will be sent by the
25056 25049 * target if tagged queuing is not supported. A tag reject may
25057 25050 * also be sent during certain initialization periods or to
25058 25051 * control internal resources. For the latter case the target
25059 25052 * may also return Queue Full.
25060 25053 *
25061 25054 * If this driver receives a tag reject from a target that is
25062 25055 * going through an init period or controlling internal
25063 25056 * resources tagged queuing will be disabled. This is a less
25064 25057 * than optimal behavior but the driver is unable to determine
25065 25058 * the target state and assumes tagged queueing is not supported
25066 25059 */
25067 25060 pkt->pkt_flags = 0;
25068 25061 un->un_tagflags = 0;
25069 25062
25070 25063 if (un->un_f_opt_queueing == TRUE) {
25071 25064 un->un_throttle = min(un->un_throttle, 3);
25072 25065 } else {
25073 25066 un->un_throttle = 1;
25074 25067 }
25075 25068 mutex_exit(SD_MUTEX(un));
25076 25069 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
25077 25070 mutex_enter(SD_MUTEX(un));
25078 25071 break;
25079 25072 case CMD_INCOMPLETE:
25080 25073 /*
25081 25074 * The transport stopped with an abnormal state, fallthrough and
25082 25075 * reset the target and/or bus unless selection did not complete
25083 25076 * (indicated by STATE_GOT_BUS) in which case we don't want to
25084 25077 * go through a target/bus reset
25085 25078 */
25086 25079 if (pkt->pkt_state == STATE_GOT_BUS) {
25087 25080 break;
25088 25081 }
25089 25082 /*FALLTHROUGH*/
25090 25083
25091 25084 case CMD_TIMEOUT:
25092 25085 default:
25093 25086 /*
25094 25087 * The lun may still be running the command, so a lun reset
25095 25088 * should be attempted. If the lun reset fails or cannot be
25096 25089 * issued, than try a target reset. Lastly try a bus reset.
25097 25090 */
25098 25091 if ((pkt->pkt_statistics &
25099 25092 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) {
25100 25093 int reset_retval = 0;
25101 25094 mutex_exit(SD_MUTEX(un));
25102 25095 if (un->un_f_allow_bus_device_reset == TRUE) {
25103 25096 if (un->un_f_lun_reset_enabled == TRUE) {
25104 25097 reset_retval =
25105 25098 scsi_reset(SD_ADDRESS(un),
25106 25099 RESET_LUN);
25107 25100 }
25108 25101 if (reset_retval == 0) {
25109 25102 reset_retval =
25110 25103 scsi_reset(SD_ADDRESS(un),
25111 25104 RESET_TARGET);
25112 25105 }
25113 25106 }
25114 25107 if (reset_retval == 0) {
25115 25108 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
25116 25109 }
25117 25110 mutex_enter(SD_MUTEX(un));
25118 25111 }
25119 25112 break;
25120 25113 }
25121 25114
25122 25115 /* A device/bus reset has occurred; update the reservation status. */
25123 25116 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics &
25124 25117 (STAT_BUS_RESET | STAT_DEV_RESET))) {
25125 25118 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
25126 25119 un->un_resvd_status |=
25127 25120 (SD_LOST_RESERVE | SD_WANT_RESERVE);
25128 25121 SD_INFO(SD_LOG_IOCTL_MHD, un,
25129 25122 "sd_mhd_watch_incomplete: Lost Reservation\n");
25130 25123 }
25131 25124 }
25132 25125
25133 25126 /*
25134 25127 * The disk has been turned off; Update the device state.
25135 25128 *
25136 25129 * Note: Should we be offlining the disk here?
25137 25130 */
25138 25131 if (pkt->pkt_state == STATE_GOT_BUS) {
25139 25132 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: "
25140 25133 "Disk not responding to selection\n");
25141 25134 if (un->un_state != SD_STATE_OFFLINE) {
25142 25135 New_state(un, SD_STATE_OFFLINE);
25143 25136 }
25144 25137 } else if (be_chatty) {
25145 25138 /*
25146 25139 * suppress messages if they are all the same pkt reason;
25147 25140 * with TQ, many (up to 256) are returned with the same
25148 25141 * pkt_reason
25149 25142 */
25150 25143 if (pkt->pkt_reason != un->un_last_pkt_reason) {
25151 25144 SD_ERROR(SD_LOG_IOCTL_MHD, un,
25152 25145 "sd_mhd_watch_incomplete: "
25153 25146 "SCSI transport failed: reason '%s'\n",
25154 25147 scsi_rname(pkt->pkt_reason));
25155 25148 }
25156 25149 }
25157 25150 un->un_last_pkt_reason = pkt->pkt_reason;
25158 25151 mutex_exit(SD_MUTEX(un));
25159 25152 }
25160 25153
25161 25154
25162 25155 /*
25163 25156 * Function: sd_sname()
25164 25157 *
25165 25158 * Description: This is a simple little routine to return a string containing
25166 25159 * a printable description of command status byte for use in
25167 25160 * logging.
25168 25161 *
25169 25162 * Arguments: status - pointer to a status byte
25170 25163 *
25171 25164 * Return Code: char * - string containing status description.
25172 25165 */
25173 25166
25174 25167 static char *
25175 25168 sd_sname(uchar_t status)
25176 25169 {
25177 25170 switch (status & STATUS_MASK) {
25178 25171 case STATUS_GOOD:
25179 25172 return ("good status");
25180 25173 case STATUS_CHECK:
25181 25174 return ("check condition");
25182 25175 case STATUS_MET:
25183 25176 return ("condition met");
25184 25177 case STATUS_BUSY:
25185 25178 return ("busy");
25186 25179 case STATUS_INTERMEDIATE:
25187 25180 return ("intermediate");
25188 25181 case STATUS_INTERMEDIATE_MET:
25189 25182 return ("intermediate - condition met");
25190 25183 case STATUS_RESERVATION_CONFLICT:
25191 25184 return ("reservation_conflict");
25192 25185 case STATUS_TERMINATED:
25193 25186 return ("command terminated");
25194 25187 case STATUS_QFULL:
25195 25188 return ("queue full");
25196 25189 default:
25197 25190 return ("<unknown status>");
25198 25191 }
25199 25192 }
25200 25193
25201 25194
25202 25195 /*
25203 25196 * Function: sd_mhd_resvd_recover()
25204 25197 *
25205 25198 * Description: This function adds a reservation entry to the
25206 25199 * sd_resv_reclaim_request list and signals the reservation
25207 25200 * reclaim thread that there is work pending. If the reservation
25208 25201 * reclaim thread has not been previously created this function
25209 25202 * will kick it off.
25210 25203 *
25211 25204 * Arguments: arg - the device 'dev_t' is used for context to discriminate
25212 25205 * among multiple watches that share this callback function
25213 25206 *
25214 25207 * Context: This routine is called by timeout() and is run in interrupt
25215 25208 * context. It must not sleep or call other functions which may
25216 25209 * sleep.
25217 25210 */
25218 25211
25219 25212 static void
25220 25213 sd_mhd_resvd_recover(void *arg)
25221 25214 {
25222 25215 dev_t dev = (dev_t)arg;
25223 25216 struct sd_lun *un;
25224 25217 struct sd_thr_request *sd_treq = NULL;
25225 25218 struct sd_thr_request *sd_cur = NULL;
25226 25219 struct sd_thr_request *sd_prev = NULL;
25227 25220 int already_there = 0;
25228 25221
25229 25222 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
25230 25223 return;
25231 25224 }
25232 25225
25233 25226 mutex_enter(SD_MUTEX(un));
25234 25227 un->un_resvd_timeid = NULL;
25235 25228 if (un->un_resvd_status & SD_WANT_RESERVE) {
25236 25229 /*
25237 25230 * There was a reset so don't issue the reserve, allow the
25238 25231 * sd_mhd_watch_cb callback function to notice this and
25239 25232 * reschedule the timeout for reservation.
25240 25233 */
25241 25234 mutex_exit(SD_MUTEX(un));
25242 25235 return;
25243 25236 }
25244 25237 mutex_exit(SD_MUTEX(un));
25245 25238
25246 25239 /*
25247 25240 * Add this device to the sd_resv_reclaim_request list and the
25248 25241 * sd_resv_reclaim_thread should take care of the rest.
25249 25242 *
25250 25243 * Note: We can't sleep in this context so if the memory allocation
25251 25244 * fails allow the sd_mhd_watch_cb callback function to notice this and
25252 25245 * reschedule the timeout for reservation. (4378460)
25253 25246 */
25254 25247 sd_treq = (struct sd_thr_request *)
25255 25248 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP);
25256 25249 if (sd_treq == NULL) {
25257 25250 return;
25258 25251 }
25259 25252
25260 25253 sd_treq->sd_thr_req_next = NULL;
25261 25254 sd_treq->dev = dev;
25262 25255 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25263 25256 if (sd_tr.srq_thr_req_head == NULL) {
25264 25257 sd_tr.srq_thr_req_head = sd_treq;
25265 25258 } else {
25266 25259 sd_cur = sd_prev = sd_tr.srq_thr_req_head;
25267 25260 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) {
25268 25261 if (sd_cur->dev == dev) {
25269 25262 /*
25270 25263 * already in Queue so don't log
25271 25264 * another request for the device
25272 25265 */
25273 25266 already_there = 1;
25274 25267 break;
25275 25268 }
25276 25269 sd_prev = sd_cur;
25277 25270 }
25278 25271 if (!already_there) {
25279 25272 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: "
25280 25273 "logging request for %lx\n", dev);
25281 25274 sd_prev->sd_thr_req_next = sd_treq;
25282 25275 } else {
25283 25276 kmem_free(sd_treq, sizeof (struct sd_thr_request));
25284 25277 }
25285 25278 }
25286 25279
25287 25280 /*
25288 25281 * Create a kernel thread to do the reservation reclaim and free up this
25289 25282 * thread. We cannot block this thread while we go away to do the
25290 25283 * reservation reclaim
25291 25284 */
25292 25285 if (sd_tr.srq_resv_reclaim_thread == NULL)
25293 25286 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0,
25294 25287 sd_resv_reclaim_thread, NULL,
25295 25288 0, &p0, TS_RUN, v.v_maxsyspri - 2);
25296 25289
25297 25290 /* Tell the reservation reclaim thread that it has work to do */
25298 25291 cv_signal(&sd_tr.srq_resv_reclaim_cv);
25299 25292 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25300 25293 }
25301 25294
25302 25295 /*
25303 25296 * Function: sd_resv_reclaim_thread()
25304 25297 *
25305 25298 * Description: This function implements the reservation reclaim operations
25306 25299 *
25307 25300 * Arguments: arg - the device 'dev_t' is used for context to discriminate
25308 25301 * among multiple watches that share this callback function
25309 25302 */
25310 25303
25311 25304 static void
25312 25305 sd_resv_reclaim_thread()
25313 25306 {
25314 25307 struct sd_lun *un;
25315 25308 struct sd_thr_request *sd_mhreq;
25316 25309
25317 25310 /* Wait for work */
25318 25311 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25319 25312 if (sd_tr.srq_thr_req_head == NULL) {
25320 25313 cv_wait(&sd_tr.srq_resv_reclaim_cv,
25321 25314 &sd_tr.srq_resv_reclaim_mutex);
25322 25315 }
25323 25316
25324 25317 /* Loop while we have work */
25325 25318 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) {
25326 25319 un = ddi_get_soft_state(sd_state,
25327 25320 SDUNIT(sd_tr.srq_thr_cur_req->dev));
25328 25321 if (un == NULL) {
25329 25322 /*
25330 25323 * softstate structure is NULL so just
25331 25324 * dequeue the request and continue
25332 25325 */
25333 25326 sd_tr.srq_thr_req_head =
25334 25327 sd_tr.srq_thr_cur_req->sd_thr_req_next;
25335 25328 kmem_free(sd_tr.srq_thr_cur_req,
25336 25329 sizeof (struct sd_thr_request));
25337 25330 continue;
25338 25331 }
25339 25332
25340 25333 /* dequeue the request */
25341 25334 sd_mhreq = sd_tr.srq_thr_cur_req;
25342 25335 sd_tr.srq_thr_req_head =
25343 25336 sd_tr.srq_thr_cur_req->sd_thr_req_next;
25344 25337 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25345 25338
25346 25339 /*
25347 25340 * Reclaim reservation only if SD_RESERVE is still set. There
25348 25341 * may have been a call to MHIOCRELEASE before we got here.
25349 25342 */
25350 25343 mutex_enter(SD_MUTEX(un));
25351 25344 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
25352 25345 /*
25353 25346 * Note: The SD_LOST_RESERVE flag is cleared before
25354 25347 * reclaiming the reservation. If this is done after the
25355 25348 * call to sd_reserve_release a reservation loss in the
25356 25349 * window between pkt completion of reserve cmd and
25357 25350 * mutex_enter below may not be recognized
25358 25351 */
25359 25352 un->un_resvd_status &= ~SD_LOST_RESERVE;
25360 25353 mutex_exit(SD_MUTEX(un));
25361 25354
25362 25355 if (sd_reserve_release(sd_mhreq->dev,
25363 25356 SD_RESERVE) == 0) {
25364 25357 mutex_enter(SD_MUTEX(un));
25365 25358 un->un_resvd_status |= SD_RESERVE;
25366 25359 mutex_exit(SD_MUTEX(un));
25367 25360 SD_INFO(SD_LOG_IOCTL_MHD, un,
25368 25361 "sd_resv_reclaim_thread: "
25369 25362 "Reservation Recovered\n");
25370 25363 } else {
25371 25364 mutex_enter(SD_MUTEX(un));
25372 25365 un->un_resvd_status |= SD_LOST_RESERVE;
25373 25366 mutex_exit(SD_MUTEX(un));
25374 25367 SD_INFO(SD_LOG_IOCTL_MHD, un,
25375 25368 "sd_resv_reclaim_thread: Failed "
25376 25369 "Reservation Recovery\n");
25377 25370 }
25378 25371 } else {
25379 25372 mutex_exit(SD_MUTEX(un));
25380 25373 }
25381 25374 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25382 25375 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req);
25383 25376 kmem_free(sd_mhreq, sizeof (struct sd_thr_request));
25384 25377 sd_mhreq = sd_tr.srq_thr_cur_req = NULL;
25385 25378 /*
25386 25379 * wakeup the destroy thread if anyone is waiting on
25387 25380 * us to complete.
25388 25381 */
25389 25382 cv_signal(&sd_tr.srq_inprocess_cv);
25390 25383 SD_TRACE(SD_LOG_IOCTL_MHD, un,
25391 25384 "sd_resv_reclaim_thread: cv_signalling current request \n");
25392 25385 }
25393 25386
25394 25387 /*
25395 25388 * cleanup the sd_tr structure now that this thread will not exist
25396 25389 */
25397 25390 ASSERT(sd_tr.srq_thr_req_head == NULL);
25398 25391 ASSERT(sd_tr.srq_thr_cur_req == NULL);
25399 25392 sd_tr.srq_resv_reclaim_thread = NULL;
25400 25393 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25401 25394 thread_exit();
25402 25395 }
25403 25396
25404 25397
25405 25398 /*
25406 25399 * Function: sd_rmv_resv_reclaim_req()
25407 25400 *
25408 25401 * Description: This function removes any pending reservation reclaim requests
25409 25402 * for the specified device.
25410 25403 *
25411 25404 * Arguments: dev - the device 'dev_t'
25412 25405 */
25413 25406
25414 25407 static void
25415 25408 sd_rmv_resv_reclaim_req(dev_t dev)
25416 25409 {
25417 25410 struct sd_thr_request *sd_mhreq;
25418 25411 struct sd_thr_request *sd_prev;
25419 25412
25420 25413 /* Remove a reservation reclaim request from the list */
25421 25414 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25422 25415 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) {
25423 25416 /*
25424 25417 * We are attempting to reinstate reservation for
25425 25418 * this device. We wait for sd_reserve_release()
25426 25419 * to return before we return.
25427 25420 */
25428 25421 cv_wait(&sd_tr.srq_inprocess_cv,
25429 25422 &sd_tr.srq_resv_reclaim_mutex);
25430 25423 } else {
25431 25424 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head;
25432 25425 if (sd_mhreq && sd_mhreq->dev == dev) {
25433 25426 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next;
25434 25427 kmem_free(sd_mhreq, sizeof (struct sd_thr_request));
25435 25428 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25436 25429 return;
25437 25430 }
25438 25431 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) {
25439 25432 if (sd_mhreq && sd_mhreq->dev == dev) {
25440 25433 break;
25441 25434 }
25442 25435 sd_prev = sd_mhreq;
25443 25436 }
25444 25437 if (sd_mhreq != NULL) {
25445 25438 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next;
25446 25439 kmem_free(sd_mhreq, sizeof (struct sd_thr_request));
25447 25440 }
25448 25441 }
25449 25442 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25450 25443 }
25451 25444
25452 25445
25453 25446 /*
25454 25447 * Function: sd_mhd_reset_notify_cb()
25455 25448 *
25456 25449 * Description: This is a call back function for scsi_reset_notify. This
25457 25450 * function updates the softstate reserved status and logs the
25458 25451 * reset. The driver scsi watch facility callback function
25459 25452 * (sd_mhd_watch_cb) and reservation reclaim thread functionality
25460 25453 * will reclaim the reservation.
25461 25454 *
25462 25455 * Arguments: arg - driver soft state (unit) structure
25463 25456 */
25464 25457
25465 25458 static void
25466 25459 sd_mhd_reset_notify_cb(caddr_t arg)
25467 25460 {
25468 25461 struct sd_lun *un = (struct sd_lun *)arg;
25469 25462
25470 25463 mutex_enter(SD_MUTEX(un));
25471 25464 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
25472 25465 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE);
25473 25466 SD_INFO(SD_LOG_IOCTL_MHD, un,
25474 25467 "sd_mhd_reset_notify_cb: Lost Reservation\n");
25475 25468 }
25476 25469 mutex_exit(SD_MUTEX(un));
25477 25470 }
25478 25471
25479 25472
25480 25473 /*
25481 25474 * Function: sd_take_ownership()
25482 25475 *
25483 25476 * Description: This routine implements an algorithm to achieve a stable
25484 25477 * reservation on disks which don't implement priority reserve,
25485 25478 * and makes sure that other host lose re-reservation attempts.
25486 25479 * This algorithm contains of a loop that keeps issuing the RESERVE
25487 25480 * for some period of time (min_ownership_delay, default 6 seconds)
25488 25481 * During that loop, it looks to see if there has been a bus device
25489 25482 * reset or bus reset (both of which cause an existing reservation
25490 25483 * to be lost). If the reservation is lost issue RESERVE until a
25491 25484 * period of min_ownership_delay with no resets has gone by, or
25492 25485 * until max_ownership_delay has expired. This loop ensures that
25493 25486 * the host really did manage to reserve the device, in spite of
25494 25487 * resets. The looping for min_ownership_delay (default six
25495 25488 * seconds) is important to early generation clustering products,
25496 25489 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an
25497 25490 * MHIOCENFAILFAST periodic timer of two seconds. By having
25498 25491 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having
25499 25492 * MHIOCENFAILFAST poll every two seconds, the idea is that by the
25500 25493 * time the MHIOCTKOWN ioctl returns, the other host (if any) will
25501 25494 * have already noticed, via the MHIOCENFAILFAST polling, that it
25502 25495 * no longer "owns" the disk and will have panicked itself. Thus,
25503 25496 * the host issuing the MHIOCTKOWN is assured (with timing
25504 25497 * dependencies) that by the time it actually starts to use the
25505 25498 * disk for real work, the old owner is no longer accessing it.
25506 25499 *
25507 25500 * min_ownership_delay is the minimum amount of time for which the
25508 25501 * disk must be reserved continuously devoid of resets before the
25509 25502 * MHIOCTKOWN ioctl will return success.
25510 25503 *
25511 25504 * max_ownership_delay indicates the amount of time by which the
25512 25505 * take ownership should succeed or timeout with an error.
25513 25506 *
25514 25507 * Arguments: dev - the device 'dev_t'
25515 25508 * *p - struct containing timing info.
25516 25509 *
25517 25510 * Return Code: 0 for success or error code
25518 25511 */
25519 25512
25520 25513 static int
25521 25514 sd_take_ownership(dev_t dev, struct mhioctkown *p)
25522 25515 {
25523 25516 struct sd_lun *un;
25524 25517 int rval;
25525 25518 int err;
25526 25519 int reservation_count = 0;
25527 25520 int min_ownership_delay = 6000000; /* in usec */
25528 25521 int max_ownership_delay = 30000000; /* in usec */
25529 25522 clock_t start_time; /* starting time of this algorithm */
25530 25523 clock_t end_time; /* time limit for giving up */
25531 25524 clock_t ownership_time; /* time limit for stable ownership */
25532 25525 clock_t current_time;
25533 25526 clock_t previous_current_time;
25534 25527
25535 25528 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
25536 25529 return (ENXIO);
25537 25530 }
25538 25531
25539 25532 /*
25540 25533 * Attempt a device reservation. A priority reservation is requested.
25541 25534 */
25542 25535 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE))
25543 25536 != SD_SUCCESS) {
25544 25537 SD_ERROR(SD_LOG_IOCTL_MHD, un,
25545 25538 "sd_take_ownership: return(1)=%d\n", rval);
25546 25539 return (rval);
25547 25540 }
25548 25541
25549 25542 /* Update the softstate reserved status to indicate the reservation */
25550 25543 mutex_enter(SD_MUTEX(un));
25551 25544 un->un_resvd_status |= SD_RESERVE;
25552 25545 un->un_resvd_status &=
25553 25546 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT);
25554 25547 mutex_exit(SD_MUTEX(un));
25555 25548
25556 25549 if (p != NULL) {
25557 25550 if (p->min_ownership_delay != 0) {
25558 25551 min_ownership_delay = p->min_ownership_delay * 1000;
25559 25552 }
25560 25553 if (p->max_ownership_delay != 0) {
25561 25554 max_ownership_delay = p->max_ownership_delay * 1000;
25562 25555 }
25563 25556 }
25564 25557 SD_INFO(SD_LOG_IOCTL_MHD, un,
25565 25558 "sd_take_ownership: min, max delays: %d, %d\n",
25566 25559 min_ownership_delay, max_ownership_delay);
25567 25560
25568 25561 start_time = ddi_get_lbolt();
25569 25562 current_time = start_time;
25570 25563 ownership_time = current_time + drv_usectohz(min_ownership_delay);
25571 25564 end_time = start_time + drv_usectohz(max_ownership_delay);
25572 25565
25573 25566 while (current_time - end_time < 0) {
25574 25567 delay(drv_usectohz(500000));
25575 25568
25576 25569 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) {
25577 25570 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) {
25578 25571 mutex_enter(SD_MUTEX(un));
25579 25572 rval = (un->un_resvd_status &
25580 25573 SD_RESERVATION_CONFLICT) ? EACCES : EIO;
25581 25574 mutex_exit(SD_MUTEX(un));
25582 25575 break;
25583 25576 }
25584 25577 }
25585 25578 previous_current_time = current_time;
25586 25579 current_time = ddi_get_lbolt();
25587 25580 mutex_enter(SD_MUTEX(un));
25588 25581 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) {
25589 25582 ownership_time = ddi_get_lbolt() +
25590 25583 drv_usectohz(min_ownership_delay);
25591 25584 reservation_count = 0;
25592 25585 } else {
25593 25586 reservation_count++;
25594 25587 }
25595 25588 un->un_resvd_status |= SD_RESERVE;
25596 25589 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE);
25597 25590 mutex_exit(SD_MUTEX(un));
25598 25591
25599 25592 SD_INFO(SD_LOG_IOCTL_MHD, un,
25600 25593 "sd_take_ownership: ticks for loop iteration=%ld, "
25601 25594 "reservation=%s\n", (current_time - previous_current_time),
25602 25595 reservation_count ? "ok" : "reclaimed");
25603 25596
25604 25597 if (current_time - ownership_time >= 0 &&
25605 25598 reservation_count >= 4) {
25606 25599 rval = 0; /* Achieved a stable ownership */
25607 25600 break;
25608 25601 }
25609 25602 if (current_time - end_time >= 0) {
25610 25603 rval = EACCES; /* No ownership in max possible time */
25611 25604 break;
25612 25605 }
25613 25606 }
25614 25607 SD_TRACE(SD_LOG_IOCTL_MHD, un,
25615 25608 "sd_take_ownership: return(2)=%d\n", rval);
25616 25609 return (rval);
25617 25610 }
25618 25611
25619 25612
25620 25613 /*
25621 25614 * Function: sd_reserve_release()
25622 25615 *
25623 25616 * Description: This function builds and sends scsi RESERVE, RELEASE, and
25624 25617 * PRIORITY RESERVE commands based on a user specified command type
25625 25618 *
25626 25619 * Arguments: dev - the device 'dev_t'
25627 25620 * cmd - user specified command type; one of SD_PRIORITY_RESERVE,
25628 25621 * SD_RESERVE, SD_RELEASE
25629 25622 *
25630 25623 * Return Code: 0 or Error Code
25631 25624 */
25632 25625
25633 25626 static int
25634 25627 sd_reserve_release(dev_t dev, int cmd)
25635 25628 {
25636 25629 struct uscsi_cmd *com = NULL;
25637 25630 struct sd_lun *un = NULL;
25638 25631 char cdb[CDB_GROUP0];
25639 25632 int rval;
25640 25633
25641 25634 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) ||
25642 25635 (cmd == SD_PRIORITY_RESERVE));
25643 25636
25644 25637 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
25645 25638 return (ENXIO);
25646 25639 }
25647 25640
25648 25641 /* instantiate and initialize the command and cdb */
25649 25642 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
25650 25643 bzero(cdb, CDB_GROUP0);
25651 25644 com->uscsi_flags = USCSI_SILENT;
25652 25645 com->uscsi_timeout = un->un_reserve_release_time;
25653 25646 com->uscsi_cdblen = CDB_GROUP0;
25654 25647 com->uscsi_cdb = cdb;
25655 25648 if (cmd == SD_RELEASE) {
25656 25649 cdb[0] = SCMD_RELEASE;
25657 25650 } else {
25658 25651 cdb[0] = SCMD_RESERVE;
25659 25652 }
25660 25653
25661 25654 /* Send the command. */
25662 25655 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
25663 25656 SD_PATH_STANDARD);
25664 25657
25665 25658 /*
25666 25659 * "break" a reservation that is held by another host, by issuing a
25667 25660 * reset if priority reserve is desired, and we could not get the
25668 25661 * device.
25669 25662 */
25670 25663 if ((cmd == SD_PRIORITY_RESERVE) &&
25671 25664 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) {
25672 25665 /*
25673 25666 * First try to reset the LUN. If we cannot, then try a target
25674 25667 * reset, followed by a bus reset if the target reset fails.
25675 25668 */
25676 25669 int reset_retval = 0;
25677 25670 if (un->un_f_lun_reset_enabled == TRUE) {
25678 25671 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN);
25679 25672 }
25680 25673 if (reset_retval == 0) {
25681 25674 /* The LUN reset either failed or was not issued */
25682 25675 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET);
25683 25676 }
25684 25677 if ((reset_retval == 0) &&
25685 25678 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) {
25686 25679 rval = EIO;
25687 25680 kmem_free(com, sizeof (*com));
25688 25681 return (rval);
25689 25682 }
25690 25683
25691 25684 bzero(com, sizeof (struct uscsi_cmd));
25692 25685 com->uscsi_flags = USCSI_SILENT;
25693 25686 com->uscsi_cdb = cdb;
25694 25687 com->uscsi_cdblen = CDB_GROUP0;
25695 25688 com->uscsi_timeout = 5;
25696 25689
25697 25690 /*
25698 25691 * Reissue the last reserve command, this time without request
25699 25692 * sense. Assume that it is just a regular reserve command.
25700 25693 */
25701 25694 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
25702 25695 SD_PATH_STANDARD);
25703 25696 }
25704 25697
25705 25698 /* Return an error if still getting a reservation conflict. */
25706 25699 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) {
25707 25700 rval = EACCES;
25708 25701 }
25709 25702
25710 25703 kmem_free(com, sizeof (*com));
25711 25704 return (rval);
25712 25705 }
25713 25706
25714 25707
25715 25708 #define SD_NDUMP_RETRIES 12
25716 25709 /*
25717 25710 * System Crash Dump routine
25718 25711 */
25719 25712
25720 25713 static int
25721 25714 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
25722 25715 {
25723 25716 int instance;
25724 25717 int partition;
25725 25718 int i;
25726 25719 int err;
25727 25720 struct sd_lun *un;
25728 25721 struct scsi_pkt *wr_pktp;
25729 25722 struct buf *wr_bp;
25730 25723 struct buf wr_buf;
25731 25724 daddr_t tgt_byte_offset; /* rmw - byte offset for target */
25732 25725 daddr_t tgt_blkno; /* rmw - blkno for target */
25733 25726 size_t tgt_byte_count; /* rmw - # of bytes to xfer */
25734 25727 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */
25735 25728 size_t io_start_offset;
25736 25729 int doing_rmw = FALSE;
25737 25730 int rval;
25738 25731 ssize_t dma_resid;
25739 25732 daddr_t oblkno;
25740 25733 diskaddr_t nblks = 0;
25741 25734 diskaddr_t start_block;
25742 25735
25743 25736 instance = SDUNIT(dev);
25744 25737 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) ||
25745 25738 !SD_IS_VALID_LABEL(un) || ISCD(un)) {
25746 25739 return (ENXIO);
25747 25740 }
25748 25741
25749 25742 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un))
25750 25743
25751 25744 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n");
25752 25745
25753 25746 partition = SDPART(dev);
25754 25747 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition);
25755 25748
25756 25749 if (!(NOT_DEVBSIZE(un))) {
25757 25750 int secmask = 0;
25758 25751 int blknomask = 0;
25759 25752
25760 25753 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1;
25761 25754 secmask = un->un_tgt_blocksize - 1;
25762 25755
25763 25756 if (blkno & blknomask) {
25764 25757 SD_TRACE(SD_LOG_DUMP, un,
25765 25758 "sddump: dump start block not modulo %d\n",
25766 25759 un->un_tgt_blocksize);
25767 25760 return (EINVAL);
25768 25761 }
25769 25762
25770 25763 if ((nblk * DEV_BSIZE) & secmask) {
25771 25764 SD_TRACE(SD_LOG_DUMP, un,
25772 25765 "sddump: dump length not modulo %d\n",
25773 25766 un->un_tgt_blocksize);
25774 25767 return (EINVAL);
25775 25768 }
25776 25769
25777 25770 }
25778 25771
25779 25772 /* Validate blocks to dump at against partition size. */
25780 25773
25781 25774 (void) cmlb_partinfo(un->un_cmlbhandle, partition,
25782 25775 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT);
25783 25776
25784 25777 if (NOT_DEVBSIZE(un)) {
25785 25778 if ((blkno + nblk) > nblks) {
25786 25779 SD_TRACE(SD_LOG_DUMP, un,
25787 25780 "sddump: dump range larger than partition: "
25788 25781 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n",
25789 25782 blkno, nblk, nblks);
25790 25783 return (EINVAL);
25791 25784 }
25792 25785 } else {
25793 25786 if (((blkno / (un->un_tgt_blocksize / DEV_BSIZE)) +
25794 25787 (nblk / (un->un_tgt_blocksize / DEV_BSIZE))) > nblks) {
25795 25788 SD_TRACE(SD_LOG_DUMP, un,
25796 25789 "sddump: dump range larger than partition: "
25797 25790 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n",
25798 25791 blkno, nblk, nblks);
25799 25792 return (EINVAL);
25800 25793 }
25801 25794 }
25802 25795
25803 25796 mutex_enter(&un->un_pm_mutex);
25804 25797 if (SD_DEVICE_IS_IN_LOW_POWER(un)) {
25805 25798 struct scsi_pkt *start_pktp;
25806 25799
25807 25800 mutex_exit(&un->un_pm_mutex);
25808 25801
25809 25802 /*
25810 25803 * use pm framework to power on HBA 1st
25811 25804 */
25812 25805 (void) pm_raise_power(SD_DEVINFO(un), 0,
25813 25806 SD_PM_STATE_ACTIVE(un));
25814 25807
25815 25808 /*
25816 25809 * Dump no long uses sdpower to power on a device, it's
25817 25810 * in-line here so it can be done in polled mode.
25818 25811 */
25819 25812
25820 25813 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n");
25821 25814
25822 25815 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL,
25823 25816 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL);
25824 25817
25825 25818 if (start_pktp == NULL) {
25826 25819 /* We were not given a SCSI packet, fail. */
25827 25820 return (EIO);
25828 25821 }
25829 25822 bzero(start_pktp->pkt_cdbp, CDB_GROUP0);
25830 25823 start_pktp->pkt_cdbp[0] = SCMD_START_STOP;
25831 25824 start_pktp->pkt_cdbp[4] = SD_TARGET_START;
25832 25825 start_pktp->pkt_flags = FLAG_NOINTR;
25833 25826
25834 25827 mutex_enter(SD_MUTEX(un));
25835 25828 SD_FILL_SCSI1_LUN(un, start_pktp);
25836 25829 mutex_exit(SD_MUTEX(un));
25837 25830 /*
25838 25831 * Scsi_poll returns 0 (success) if the command completes and
25839 25832 * the status block is STATUS_GOOD.
25840 25833 */
25841 25834 if (sd_scsi_poll(un, start_pktp) != 0) {
25842 25835 scsi_destroy_pkt(start_pktp);
25843 25836 return (EIO);
25844 25837 }
25845 25838 scsi_destroy_pkt(start_pktp);
25846 25839 (void) sd_pm_state_change(un, SD_PM_STATE_ACTIVE(un),
25847 25840 SD_PM_STATE_CHANGE);
25848 25841 } else {
25849 25842 mutex_exit(&un->un_pm_mutex);
25850 25843 }
25851 25844
25852 25845 mutex_enter(SD_MUTEX(un));
25853 25846 un->un_throttle = 0;
25854 25847
25855 25848 /*
25856 25849 * The first time through, reset the specific target device.
25857 25850 * However, when cpr calls sddump we know that sd is in a
25858 25851 * a good state so no bus reset is required.
25859 25852 * Clear sense data via Request Sense cmd.
25860 25853 * In sddump we don't care about allow_bus_device_reset anymore
25861 25854 */
25862 25855
25863 25856 if ((un->un_state != SD_STATE_SUSPENDED) &&
25864 25857 (un->un_state != SD_STATE_DUMPING)) {
25865 25858
25866 25859 New_state(un, SD_STATE_DUMPING);
25867 25860
25868 25861 if (un->un_f_is_fibre == FALSE) {
25869 25862 mutex_exit(SD_MUTEX(un));
25870 25863 /*
25871 25864 * Attempt a bus reset for parallel scsi.
25872 25865 *
25873 25866 * Note: A bus reset is required because on some host
25874 25867 * systems (i.e. E420R) a bus device reset is
25875 25868 * insufficient to reset the state of the target.
25876 25869 *
25877 25870 * Note: Don't issue the reset for fibre-channel,
25878 25871 * because this tends to hang the bus (loop) for
25879 25872 * too long while everyone is logging out and in
25880 25873 * and the deadman timer for dumping will fire
25881 25874 * before the dump is complete.
25882 25875 */
25883 25876 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) {
25884 25877 mutex_enter(SD_MUTEX(un));
25885 25878 Restore_state(un);
25886 25879 mutex_exit(SD_MUTEX(un));
25887 25880 return (EIO);
25888 25881 }
25889 25882
25890 25883 /* Delay to give the device some recovery time. */
25891 25884 drv_usecwait(10000);
25892 25885
25893 25886 if (sd_send_polled_RQS(un) == SD_FAILURE) {
25894 25887 SD_INFO(SD_LOG_DUMP, un,
25895 25888 "sddump: sd_send_polled_RQS failed\n");
25896 25889 }
25897 25890 mutex_enter(SD_MUTEX(un));
25898 25891 }
25899 25892 }
25900 25893
25901 25894 /*
25902 25895 * Convert the partition-relative block number to a
25903 25896 * disk physical block number.
25904 25897 */
25905 25898 if (NOT_DEVBSIZE(un)) {
25906 25899 blkno += start_block;
25907 25900 } else {
25908 25901 blkno = blkno / (un->un_tgt_blocksize / DEV_BSIZE);
25909 25902 blkno += start_block;
25910 25903 }
25911 25904
25912 25905 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno);
25913 25906
25914 25907
25915 25908 /*
25916 25909 * Check if the device has a non-512 block size.
25917 25910 */
25918 25911 wr_bp = NULL;
25919 25912 if (NOT_DEVBSIZE(un)) {
25920 25913 tgt_byte_offset = blkno * un->un_sys_blocksize;
25921 25914 tgt_byte_count = nblk * un->un_sys_blocksize;
25922 25915 if ((tgt_byte_offset % un->un_tgt_blocksize) ||
25923 25916 (tgt_byte_count % un->un_tgt_blocksize)) {
25924 25917 doing_rmw = TRUE;
25925 25918 /*
25926 25919 * Calculate the block number and number of block
25927 25920 * in terms of the media block size.
25928 25921 */
25929 25922 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize;
25930 25923 tgt_nblk =
25931 25924 ((tgt_byte_offset + tgt_byte_count +
25932 25925 (un->un_tgt_blocksize - 1)) /
25933 25926 un->un_tgt_blocksize) - tgt_blkno;
25934 25927
25935 25928 /*
25936 25929 * Invoke the routine which is going to do read part
25937 25930 * of read-modify-write.
25938 25931 * Note that this routine returns a pointer to
25939 25932 * a valid bp in wr_bp.
25940 25933 */
25941 25934 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk,
25942 25935 &wr_bp);
25943 25936 if (err) {
25944 25937 mutex_exit(SD_MUTEX(un));
25945 25938 return (err);
25946 25939 }
25947 25940 /*
25948 25941 * Offset is being calculated as -
25949 25942 * (original block # * system block size) -
25950 25943 * (new block # * target block size)
25951 25944 */
25952 25945 io_start_offset =
25953 25946 ((uint64_t)(blkno * un->un_sys_blocksize)) -
25954 25947 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize));
25955 25948
25956 25949 ASSERT(io_start_offset < un->un_tgt_blocksize);
25957 25950 /*
25958 25951 * Do the modify portion of read modify write.
25959 25952 */
25960 25953 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset],
25961 25954 (size_t)nblk * un->un_sys_blocksize);
25962 25955 } else {
25963 25956 doing_rmw = FALSE;
25964 25957 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize;
25965 25958 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize;
25966 25959 }
25967 25960
25968 25961 /* Convert blkno and nblk to target blocks */
25969 25962 blkno = tgt_blkno;
25970 25963 nblk = tgt_nblk;
25971 25964 } else {
25972 25965 wr_bp = &wr_buf;
25973 25966 bzero(wr_bp, sizeof (struct buf));
25974 25967 wr_bp->b_flags = B_BUSY;
25975 25968 wr_bp->b_un.b_addr = addr;
25976 25969 wr_bp->b_bcount = nblk << DEV_BSHIFT;
25977 25970 wr_bp->b_resid = 0;
25978 25971 }
25979 25972
25980 25973 mutex_exit(SD_MUTEX(un));
25981 25974
25982 25975 /*
25983 25976 * Obtain a SCSI packet for the write command.
25984 25977 * It should be safe to call the allocator here without
25985 25978 * worrying about being locked for DVMA mapping because
25986 25979 * the address we're passed is already a DVMA mapping
25987 25980 *
25988 25981 * We are also not going to worry about semaphore ownership
25989 25982 * in the dump buffer. Dumping is single threaded at present.
25990 25983 */
25991 25984
25992 25985 wr_pktp = NULL;
25993 25986
25994 25987 dma_resid = wr_bp->b_bcount;
25995 25988 oblkno = blkno;
25996 25989
25997 25990 if (!(NOT_DEVBSIZE(un))) {
25998 25991 nblk = nblk / (un->un_tgt_blocksize / DEV_BSIZE);
25999 25992 }
26000 25993
26001 25994 while (dma_resid != 0) {
26002 25995
26003 25996 for (i = 0; i < SD_NDUMP_RETRIES; i++) {
26004 25997 wr_bp->b_flags &= ~B_ERROR;
26005 25998
26006 25999 if (un->un_partial_dma_supported == 1) {
26007 26000 blkno = oblkno +
26008 26001 ((wr_bp->b_bcount - dma_resid) /
26009 26002 un->un_tgt_blocksize);
26010 26003 nblk = dma_resid / un->un_tgt_blocksize;
26011 26004
26012 26005 if (wr_pktp) {
26013 26006 /*
26014 26007 * Partial DMA transfers after initial transfer
26015 26008 */
26016 26009 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp,
26017 26010 blkno, nblk);
26018 26011 } else {
26019 26012 /* Initial transfer */
26020 26013 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp,
26021 26014 un->un_pkt_flags, NULL_FUNC, NULL,
26022 26015 blkno, nblk);
26023 26016 }
26024 26017 } else {
26025 26018 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp,
26026 26019 0, NULL_FUNC, NULL, blkno, nblk);
26027 26020 }
26028 26021
26029 26022 if (rval == 0) {
26030 26023 /* We were given a SCSI packet, continue. */
26031 26024 break;
26032 26025 }
26033 26026
26034 26027 if (i == 0) {
26035 26028 if (wr_bp->b_flags & B_ERROR) {
26036 26029 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26037 26030 "no resources for dumping; "
26038 26031 "error code: 0x%x, retrying",
26039 26032 geterror(wr_bp));
26040 26033 } else {
26041 26034 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26042 26035 "no resources for dumping; retrying");
26043 26036 }
26044 26037 } else if (i != (SD_NDUMP_RETRIES - 1)) {
26045 26038 if (wr_bp->b_flags & B_ERROR) {
26046 26039 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
26047 26040 "no resources for dumping; error code: "
26048 26041 "0x%x, retrying\n", geterror(wr_bp));
26049 26042 }
26050 26043 } else {
26051 26044 if (wr_bp->b_flags & B_ERROR) {
26052 26045 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
26053 26046 "no resources for dumping; "
26054 26047 "error code: 0x%x, retries failed, "
26055 26048 "giving up.\n", geterror(wr_bp));
26056 26049 } else {
26057 26050 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
26058 26051 "no resources for dumping; "
26059 26052 "retries failed, giving up.\n");
26060 26053 }
26061 26054 mutex_enter(SD_MUTEX(un));
26062 26055 Restore_state(un);
26063 26056 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) {
26064 26057 mutex_exit(SD_MUTEX(un));
26065 26058 scsi_free_consistent_buf(wr_bp);
26066 26059 } else {
26067 26060 mutex_exit(SD_MUTEX(un));
26068 26061 }
26069 26062 return (EIO);
26070 26063 }
26071 26064 drv_usecwait(10000);
26072 26065 }
26073 26066
26074 26067 if (un->un_partial_dma_supported == 1) {
26075 26068 /*
26076 26069 * save the resid from PARTIAL_DMA
26077 26070 */
26078 26071 dma_resid = wr_pktp->pkt_resid;
26079 26072 if (dma_resid != 0)
26080 26073 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid);
26081 26074 wr_pktp->pkt_resid = 0;
26082 26075 } else {
26083 26076 dma_resid = 0;
26084 26077 }
26085 26078
26086 26079 /* SunBug 1222170 */
26087 26080 wr_pktp->pkt_flags = FLAG_NOINTR;
26088 26081
26089 26082 err = EIO;
26090 26083 for (i = 0; i < SD_NDUMP_RETRIES; i++) {
26091 26084
26092 26085 /*
26093 26086 * Scsi_poll returns 0 (success) if the command completes and
26094 26087 * the status block is STATUS_GOOD. We should only check
26095 26088 * errors if this condition is not true. Even then we should
26096 26089 * send our own request sense packet only if we have a check
26097 26090 * condition and auto request sense has not been performed by
26098 26091 * the hba.
26099 26092 */
26100 26093 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n");
26101 26094
26102 26095 if ((sd_scsi_poll(un, wr_pktp) == 0) &&
26103 26096 (wr_pktp->pkt_resid == 0)) {
26104 26097 err = SD_SUCCESS;
26105 26098 break;
26106 26099 }
26107 26100
26108 26101 /*
26109 26102 * Check CMD_DEV_GONE 1st, give up if device is gone.
26110 26103 */
26111 26104 if (wr_pktp->pkt_reason == CMD_DEV_GONE) {
26112 26105 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26113 26106 "Error while dumping state...Device is gone\n");
26114 26107 break;
26115 26108 }
26116 26109
26117 26110 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) {
26118 26111 SD_INFO(SD_LOG_DUMP, un,
26119 26112 "sddump: write failed with CHECK, try # %d\n", i);
26120 26113 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) {
26121 26114 (void) sd_send_polled_RQS(un);
26122 26115 }
26123 26116
26124 26117 continue;
26125 26118 }
26126 26119
26127 26120 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) {
26128 26121 int reset_retval = 0;
26129 26122
26130 26123 SD_INFO(SD_LOG_DUMP, un,
26131 26124 "sddump: write failed with BUSY, try # %d\n", i);
26132 26125
26133 26126 if (un->un_f_lun_reset_enabled == TRUE) {
26134 26127 reset_retval = scsi_reset(SD_ADDRESS(un),
26135 26128 RESET_LUN);
26136 26129 }
26137 26130 if (reset_retval == 0) {
26138 26131 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET);
26139 26132 }
26140 26133 (void) sd_send_polled_RQS(un);
26141 26134
26142 26135 } else {
26143 26136 SD_INFO(SD_LOG_DUMP, un,
26144 26137 "sddump: write failed with 0x%x, try # %d\n",
26145 26138 SD_GET_PKT_STATUS(wr_pktp), i);
26146 26139 mutex_enter(SD_MUTEX(un));
26147 26140 sd_reset_target(un, wr_pktp);
26148 26141 mutex_exit(SD_MUTEX(un));
26149 26142 }
26150 26143
26151 26144 /*
26152 26145 * If we are not getting anywhere with lun/target resets,
26153 26146 * let's reset the bus.
26154 26147 */
26155 26148 if (i == SD_NDUMP_RETRIES/2) {
26156 26149 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
26157 26150 (void) sd_send_polled_RQS(un);
26158 26151 }
26159 26152 }
26160 26153 }
26161 26154
26162 26155 scsi_destroy_pkt(wr_pktp);
26163 26156 mutex_enter(SD_MUTEX(un));
26164 26157 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) {
26165 26158 mutex_exit(SD_MUTEX(un));
26166 26159 scsi_free_consistent_buf(wr_bp);
26167 26160 } else {
26168 26161 mutex_exit(SD_MUTEX(un));
26169 26162 }
26170 26163 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err);
26171 26164 return (err);
26172 26165 }
26173 26166
26174 26167 /*
26175 26168 * Function: sd_scsi_poll()
26176 26169 *
26177 26170 * Description: This is a wrapper for the scsi_poll call.
26178 26171 *
26179 26172 * Arguments: sd_lun - The unit structure
26180 26173 * scsi_pkt - The scsi packet being sent to the device.
26181 26174 *
26182 26175 * Return Code: 0 - Command completed successfully with good status
26183 26176 * -1 - Command failed. This could indicate a check condition
26184 26177 * or other status value requiring recovery action.
26185 26178 *
26186 26179 * NOTE: This code is only called off sddump().
26187 26180 */
26188 26181
26189 26182 static int
26190 26183 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp)
26191 26184 {
26192 26185 int status;
26193 26186
26194 26187 ASSERT(un != NULL);
26195 26188 ASSERT(!mutex_owned(SD_MUTEX(un)));
26196 26189 ASSERT(pktp != NULL);
26197 26190
26198 26191 status = SD_SUCCESS;
26199 26192
26200 26193 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) {
26201 26194 pktp->pkt_flags |= un->un_tagflags;
26202 26195 pktp->pkt_flags &= ~FLAG_NODISCON;
26203 26196 }
26204 26197
26205 26198 status = sd_ddi_scsi_poll(pktp);
26206 26199 /*
26207 26200 * Scsi_poll returns 0 (success) if the command completes and the
26208 26201 * status block is STATUS_GOOD. We should only check errors if this
26209 26202 * condition is not true. Even then we should send our own request
26210 26203 * sense packet only if we have a check condition and auto
26211 26204 * request sense has not been performed by the hba.
26212 26205 * Don't get RQS data if pkt_reason is CMD_DEV_GONE.
26213 26206 */
26214 26207 if ((status != SD_SUCCESS) &&
26215 26208 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) &&
26216 26209 (pktp->pkt_state & STATE_ARQ_DONE) == 0 &&
26217 26210 (pktp->pkt_reason != CMD_DEV_GONE))
26218 26211 (void) sd_send_polled_RQS(un);
26219 26212
26220 26213 return (status);
26221 26214 }
26222 26215
26223 26216 /*
26224 26217 * Function: sd_send_polled_RQS()
26225 26218 *
26226 26219 * Description: This sends the request sense command to a device.
26227 26220 *
26228 26221 * Arguments: sd_lun - The unit structure
26229 26222 *
26230 26223 * Return Code: 0 - Command completed successfully with good status
26231 26224 * -1 - Command failed.
26232 26225 *
26233 26226 */
26234 26227
26235 26228 static int
26236 26229 sd_send_polled_RQS(struct sd_lun *un)
26237 26230 {
26238 26231 int ret_val;
26239 26232 struct scsi_pkt *rqs_pktp;
26240 26233 struct buf *rqs_bp;
26241 26234
26242 26235 ASSERT(un != NULL);
26243 26236 ASSERT(!mutex_owned(SD_MUTEX(un)));
26244 26237
26245 26238 ret_val = SD_SUCCESS;
26246 26239
26247 26240 rqs_pktp = un->un_rqs_pktp;
26248 26241 rqs_bp = un->un_rqs_bp;
26249 26242
26250 26243 mutex_enter(SD_MUTEX(un));
26251 26244
26252 26245 if (un->un_sense_isbusy) {
26253 26246 ret_val = SD_FAILURE;
26254 26247 mutex_exit(SD_MUTEX(un));
26255 26248 return (ret_val);
26256 26249 }
26257 26250
26258 26251 /*
26259 26252 * If the request sense buffer (and packet) is not in use,
26260 26253 * let's set the un_sense_isbusy and send our packet
26261 26254 */
26262 26255 un->un_sense_isbusy = 1;
26263 26256 rqs_pktp->pkt_resid = 0;
26264 26257 rqs_pktp->pkt_reason = 0;
26265 26258 rqs_pktp->pkt_flags |= FLAG_NOINTR;
26266 26259 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH);
26267 26260
26268 26261 mutex_exit(SD_MUTEX(un));
26269 26262
26270 26263 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at"
26271 26264 " 0x%p\n", rqs_bp->b_un.b_addr);
26272 26265
26273 26266 /*
26274 26267 * Can't send this to sd_scsi_poll, we wrap ourselves around the
26275 26268 * axle - it has a call into us!
26276 26269 */
26277 26270 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) {
26278 26271 SD_INFO(SD_LOG_COMMON, un,
26279 26272 "sd_send_polled_RQS: RQS failed\n");
26280 26273 }
26281 26274
26282 26275 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:",
26283 26276 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX);
26284 26277
26285 26278 mutex_enter(SD_MUTEX(un));
26286 26279 un->un_sense_isbusy = 0;
26287 26280 mutex_exit(SD_MUTEX(un));
26288 26281
26289 26282 return (ret_val);
26290 26283 }
26291 26284
26292 26285 /*
26293 26286 * Defines needed for localized version of the scsi_poll routine.
26294 26287 */
26295 26288 #define CSEC 10000 /* usecs */
26296 26289 #define SEC_TO_CSEC (1000000/CSEC)
26297 26290
26298 26291 /*
26299 26292 * Function: sd_ddi_scsi_poll()
26300 26293 *
26301 26294 * Description: Localized version of the scsi_poll routine. The purpose is to
26302 26295 * send a scsi_pkt to a device as a polled command. This version
26303 26296 * is to ensure more robust handling of transport errors.
26304 26297 * Specifically this routine cures not ready, coming ready
26305 26298 * transition for power up and reset of sonoma's. This can take
26306 26299 * up to 45 seconds for power-on and 20 seconds for reset of a
26307 26300 * sonoma lun.
26308 26301 *
26309 26302 * Arguments: scsi_pkt - The scsi_pkt being sent to a device
26310 26303 *
26311 26304 * Return Code: 0 - Command completed successfully with good status
26312 26305 * -1 - Command failed.
26313 26306 *
26314 26307 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can
26315 26308 * be fixed (removing this code), we need to determine how to handle the
26316 26309 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump().
26317 26310 *
26318 26311 * NOTE: This code is only called off sddump().
26319 26312 */
26320 26313 static int
26321 26314 sd_ddi_scsi_poll(struct scsi_pkt *pkt)
26322 26315 {
26323 26316 int rval = -1;
26324 26317 int savef;
26325 26318 long savet;
26326 26319 void (*savec)();
26327 26320 int timeout;
26328 26321 int busy_count;
26329 26322 int poll_delay;
26330 26323 int rc;
26331 26324 uint8_t *sensep;
26332 26325 struct scsi_arq_status *arqstat;
26333 26326 extern int do_polled_io;
26334 26327
26335 26328 ASSERT(pkt->pkt_scbp);
26336 26329
26337 26330 /*
26338 26331 * save old flags..
26339 26332 */
26340 26333 savef = pkt->pkt_flags;
26341 26334 savec = pkt->pkt_comp;
26342 26335 savet = pkt->pkt_time;
26343 26336
26344 26337 pkt->pkt_flags |= FLAG_NOINTR;
26345 26338
26346 26339 /*
26347 26340 * XXX there is nothing in the SCSA spec that states that we should not
26348 26341 * do a callback for polled cmds; however, removing this will break sd
26349 26342 * and probably other target drivers
26350 26343 */
26351 26344 pkt->pkt_comp = NULL;
26352 26345
26353 26346 /*
26354 26347 * we don't like a polled command without timeout.
26355 26348 * 60 seconds seems long enough.
26356 26349 */
26357 26350 if (pkt->pkt_time == 0)
26358 26351 pkt->pkt_time = SCSI_POLL_TIMEOUT;
26359 26352
26360 26353 /*
26361 26354 * Send polled cmd.
26362 26355 *
26363 26356 * We do some error recovery for various errors. Tran_busy,
26364 26357 * queue full, and non-dispatched commands are retried every 10 msec.
26365 26358 * as they are typically transient failures. Busy status and Not
26366 26359 * Ready are retried every second as this status takes a while to
26367 26360 * change.
26368 26361 */
26369 26362 timeout = pkt->pkt_time * SEC_TO_CSEC;
26370 26363
26371 26364 for (busy_count = 0; busy_count < timeout; busy_count++) {
26372 26365 /*
26373 26366 * Initialize pkt status variables.
26374 26367 */
26375 26368 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0;
26376 26369
26377 26370 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) {
26378 26371 if (rc != TRAN_BUSY) {
26379 26372 /* Transport failed - give up. */
26380 26373 break;
26381 26374 } else {
26382 26375 /* Transport busy - try again. */
26383 26376 poll_delay = 1 * CSEC; /* 10 msec. */
26384 26377 }
26385 26378 } else {
26386 26379 /*
26387 26380 * Transport accepted - check pkt status.
26388 26381 */
26389 26382 rc = (*pkt->pkt_scbp) & STATUS_MASK;
26390 26383 if ((pkt->pkt_reason == CMD_CMPLT) &&
26391 26384 (rc == STATUS_CHECK) &&
26392 26385 (pkt->pkt_state & STATE_ARQ_DONE)) {
26393 26386 arqstat =
26394 26387 (struct scsi_arq_status *)(pkt->pkt_scbp);
26395 26388 sensep = (uint8_t *)&arqstat->sts_sensedata;
26396 26389 } else {
26397 26390 sensep = NULL;
26398 26391 }
26399 26392
26400 26393 if ((pkt->pkt_reason == CMD_CMPLT) &&
26401 26394 (rc == STATUS_GOOD)) {
26402 26395 /* No error - we're done */
26403 26396 rval = 0;
26404 26397 break;
26405 26398
26406 26399 } else if (pkt->pkt_reason == CMD_DEV_GONE) {
26407 26400 /* Lost connection - give up */
26408 26401 break;
26409 26402
26410 26403 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) &&
26411 26404 (pkt->pkt_state == 0)) {
26412 26405 /* Pkt not dispatched - try again. */
26413 26406 poll_delay = 1 * CSEC; /* 10 msec. */
26414 26407
26415 26408 } else if ((pkt->pkt_reason == CMD_CMPLT) &&
26416 26409 (rc == STATUS_QFULL)) {
26417 26410 /* Queue full - try again. */
26418 26411 poll_delay = 1 * CSEC; /* 10 msec. */
26419 26412
26420 26413 } else if ((pkt->pkt_reason == CMD_CMPLT) &&
26421 26414 (rc == STATUS_BUSY)) {
26422 26415 /* Busy - try again. */
26423 26416 poll_delay = 100 * CSEC; /* 1 sec. */
26424 26417 busy_count += (SEC_TO_CSEC - 1);
26425 26418
26426 26419 } else if ((sensep != NULL) &&
26427 26420 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) {
26428 26421 /*
26429 26422 * Unit Attention - try again.
26430 26423 * Pretend it took 1 sec.
26431 26424 * NOTE: 'continue' avoids poll_delay
26432 26425 */
26433 26426 busy_count += (SEC_TO_CSEC - 1);
26434 26427 continue;
26435 26428
26436 26429 } else if ((sensep != NULL) &&
26437 26430 (scsi_sense_key(sensep) == KEY_NOT_READY) &&
26438 26431 (scsi_sense_asc(sensep) == 0x04) &&
26439 26432 (scsi_sense_ascq(sensep) == 0x01)) {
26440 26433 /*
26441 26434 * Not ready -> ready - try again.
26442 26435 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY
26443 26436 * ...same as STATUS_BUSY
26444 26437 */
26445 26438 poll_delay = 100 * CSEC; /* 1 sec. */
26446 26439 busy_count += (SEC_TO_CSEC - 1);
26447 26440
26448 26441 } else {
26449 26442 /* BAD status - give up. */
26450 26443 break;
26451 26444 }
26452 26445 }
26453 26446
26454 26447 if (((curthread->t_flag & T_INTR_THREAD) == 0) &&
26455 26448 !do_polled_io) {
26456 26449 delay(drv_usectohz(poll_delay));
26457 26450 } else {
26458 26451 /* we busy wait during cpr_dump or interrupt threads */
26459 26452 drv_usecwait(poll_delay);
26460 26453 }
26461 26454 }
26462 26455
26463 26456 pkt->pkt_flags = savef;
26464 26457 pkt->pkt_comp = savec;
26465 26458 pkt->pkt_time = savet;
26466 26459
26467 26460 /* return on error */
26468 26461 if (rval)
26469 26462 return (rval);
26470 26463
26471 26464 /*
26472 26465 * This is not a performance critical code path.
26473 26466 *
26474 26467 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync()
26475 26468 * issues associated with looking at DMA memory prior to
26476 26469 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return.
26477 26470 */
26478 26471 scsi_sync_pkt(pkt);
26479 26472 return (0);
26480 26473 }
26481 26474
26482 26475
26483 26476
26484 26477 /*
26485 26478 * Function: sd_persistent_reservation_in_read_keys
26486 26479 *
26487 26480 * Description: This routine is the driver entry point for handling CD-ROM
26488 26481 * multi-host persistent reservation requests (MHIOCGRP_INKEYS)
26489 26482 * by sending the SCSI-3 PRIN commands to the device.
26490 26483 * Processes the read keys command response by copying the
26491 26484 * reservation key information into the user provided buffer.
26492 26485 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented.
26493 26486 *
26494 26487 * Arguments: un - Pointer to soft state struct for the target.
26495 26488 * usrp - user provided pointer to multihost Persistent In Read
26496 26489 * Keys structure (mhioc_inkeys_t)
26497 26490 * flag - this argument is a pass through to ddi_copyxxx()
26498 26491 * directly from the mode argument of ioctl().
26499 26492 *
26500 26493 * Return Code: 0 - Success
26501 26494 * EACCES
26502 26495 * ENOTSUP
26503 26496 * errno return code from sd_send_scsi_cmd()
26504 26497 *
26505 26498 * Context: Can sleep. Does not return until command is completed.
26506 26499 */
26507 26500
26508 26501 static int
26509 26502 sd_persistent_reservation_in_read_keys(struct sd_lun *un,
26510 26503 mhioc_inkeys_t *usrp, int flag)
26511 26504 {
26512 26505 #ifdef _MULTI_DATAMODEL
26513 26506 struct mhioc_key_list32 li32;
26514 26507 #endif
26515 26508 sd_prin_readkeys_t *in;
26516 26509 mhioc_inkeys_t *ptr;
26517 26510 mhioc_key_list_t li;
26518 26511 uchar_t *data_bufp = NULL;
26519 26512 int data_len = 0;
26520 26513 int rval = 0;
26521 26514 size_t copysz = 0;
26522 26515 sd_ssc_t *ssc;
26523 26516
26524 26517 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) {
26525 26518 return (EINVAL);
26526 26519 }
26527 26520 bzero(&li, sizeof (mhioc_key_list_t));
26528 26521
26529 26522 ssc = sd_ssc_init(un);
26530 26523
26531 26524 /*
26532 26525 * Get the listsize from user
26533 26526 */
26534 26527 #ifdef _MULTI_DATAMODEL
26535 26528 switch (ddi_model_convert_from(flag & FMODELS)) {
26536 26529 case DDI_MODEL_ILP32:
26537 26530 copysz = sizeof (struct mhioc_key_list32);
26538 26531 if (ddi_copyin(ptr->li, &li32, copysz, flag)) {
26539 26532 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26540 26533 "sd_persistent_reservation_in_read_keys: "
26541 26534 "failed ddi_copyin: mhioc_key_list32_t\n");
26542 26535 rval = EFAULT;
26543 26536 goto done;
26544 26537 }
26545 26538 li.listsize = li32.listsize;
26546 26539 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list;
26547 26540 break;
26548 26541
26549 26542 case DDI_MODEL_NONE:
26550 26543 copysz = sizeof (mhioc_key_list_t);
26551 26544 if (ddi_copyin(ptr->li, &li, copysz, flag)) {
26552 26545 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26553 26546 "sd_persistent_reservation_in_read_keys: "
26554 26547 "failed ddi_copyin: mhioc_key_list_t\n");
26555 26548 rval = EFAULT;
26556 26549 goto done;
26557 26550 }
26558 26551 break;
26559 26552 }
26560 26553
26561 26554 #else /* ! _MULTI_DATAMODEL */
26562 26555 copysz = sizeof (mhioc_key_list_t);
26563 26556 if (ddi_copyin(ptr->li, &li, copysz, flag)) {
26564 26557 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26565 26558 "sd_persistent_reservation_in_read_keys: "
26566 26559 "failed ddi_copyin: mhioc_key_list_t\n");
26567 26560 rval = EFAULT;
26568 26561 goto done;
26569 26562 }
26570 26563 #endif
26571 26564
26572 26565 data_len = li.listsize * MHIOC_RESV_KEY_SIZE;
26573 26566 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t));
26574 26567 data_bufp = kmem_zalloc(data_len, KM_SLEEP);
26575 26568
26576 26569 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS,
26577 26570 data_len, data_bufp);
26578 26571 if (rval != 0) {
26579 26572 if (rval == EIO)
26580 26573 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE);
26581 26574 else
26582 26575 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
26583 26576 goto done;
26584 26577 }
26585 26578 in = (sd_prin_readkeys_t *)data_bufp;
26586 26579 ptr->generation = BE_32(in->generation);
26587 26580 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE;
26588 26581
26589 26582 /*
26590 26583 * Return the min(listsize, listlen) keys
26591 26584 */
26592 26585 #ifdef _MULTI_DATAMODEL
26593 26586
26594 26587 switch (ddi_model_convert_from(flag & FMODELS)) {
26595 26588 case DDI_MODEL_ILP32:
26596 26589 li32.listlen = li.listlen;
26597 26590 if (ddi_copyout(&li32, ptr->li, copysz, flag)) {
26598 26591 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26599 26592 "sd_persistent_reservation_in_read_keys: "
26600 26593 "failed ddi_copyout: mhioc_key_list32_t\n");
26601 26594 rval = EFAULT;
26602 26595 goto done;
26603 26596 }
26604 26597 break;
26605 26598
26606 26599 case DDI_MODEL_NONE:
26607 26600 if (ddi_copyout(&li, ptr->li, copysz, flag)) {
26608 26601 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26609 26602 "sd_persistent_reservation_in_read_keys: "
26610 26603 "failed ddi_copyout: mhioc_key_list_t\n");
26611 26604 rval = EFAULT;
26612 26605 goto done;
26613 26606 }
26614 26607 break;
26615 26608 }
26616 26609
26617 26610 #else /* ! _MULTI_DATAMODEL */
26618 26611
26619 26612 if (ddi_copyout(&li, ptr->li, copysz, flag)) {
26620 26613 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26621 26614 "sd_persistent_reservation_in_read_keys: "
26622 26615 "failed ddi_copyout: mhioc_key_list_t\n");
26623 26616 rval = EFAULT;
26624 26617 goto done;
26625 26618 }
26626 26619
26627 26620 #endif /* _MULTI_DATAMODEL */
26628 26621
26629 26622 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE,
26630 26623 li.listsize * MHIOC_RESV_KEY_SIZE);
26631 26624 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) {
26632 26625 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26633 26626 "sd_persistent_reservation_in_read_keys: "
26634 26627 "failed ddi_copyout: keylist\n");
26635 26628 rval = EFAULT;
26636 26629 }
26637 26630 done:
26638 26631 sd_ssc_fini(ssc);
26639 26632 kmem_free(data_bufp, data_len);
26640 26633 return (rval);
26641 26634 }
26642 26635
26643 26636
26644 26637 /*
26645 26638 * Function: sd_persistent_reservation_in_read_resv
26646 26639 *
26647 26640 * Description: This routine is the driver entry point for handling CD-ROM
26648 26641 * multi-host persistent reservation requests (MHIOCGRP_INRESV)
26649 26642 * by sending the SCSI-3 PRIN commands to the device.
26650 26643 * Process the read persistent reservations command response by
26651 26644 * copying the reservation information into the user provided
26652 26645 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented.
26653 26646 *
26654 26647 * Arguments: un - Pointer to soft state struct for the target.
26655 26648 * usrp - user provided pointer to multihost Persistent In Read
26656 26649 * Keys structure (mhioc_inkeys_t)
26657 26650 * flag - this argument is a pass through to ddi_copyxxx()
26658 26651 * directly from the mode argument of ioctl().
26659 26652 *
26660 26653 * Return Code: 0 - Success
26661 26654 * EACCES
26662 26655 * ENOTSUP
26663 26656 * errno return code from sd_send_scsi_cmd()
26664 26657 *
26665 26658 * Context: Can sleep. Does not return until command is completed.
26666 26659 */
26667 26660
26668 26661 static int
26669 26662 sd_persistent_reservation_in_read_resv(struct sd_lun *un,
26670 26663 mhioc_inresvs_t *usrp, int flag)
26671 26664 {
26672 26665 #ifdef _MULTI_DATAMODEL
26673 26666 struct mhioc_resv_desc_list32 resvlist32;
26674 26667 #endif
26675 26668 sd_prin_readresv_t *in;
26676 26669 mhioc_inresvs_t *ptr;
26677 26670 sd_readresv_desc_t *readresv_ptr;
26678 26671 mhioc_resv_desc_list_t resvlist;
26679 26672 mhioc_resv_desc_t resvdesc;
26680 26673 uchar_t *data_bufp = NULL;
26681 26674 int data_len;
26682 26675 int rval = 0;
26683 26676 int i;
26684 26677 size_t copysz = 0;
26685 26678 mhioc_resv_desc_t *bufp;
26686 26679 sd_ssc_t *ssc;
26687 26680
26688 26681 if ((ptr = usrp) == NULL) {
26689 26682 return (EINVAL);
26690 26683 }
26691 26684
26692 26685 ssc = sd_ssc_init(un);
26693 26686
26694 26687 /*
26695 26688 * Get the listsize from user
26696 26689 */
26697 26690 #ifdef _MULTI_DATAMODEL
26698 26691 switch (ddi_model_convert_from(flag & FMODELS)) {
26699 26692 case DDI_MODEL_ILP32:
26700 26693 copysz = sizeof (struct mhioc_resv_desc_list32);
26701 26694 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) {
26702 26695 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26703 26696 "sd_persistent_reservation_in_read_resv: "
26704 26697 "failed ddi_copyin: mhioc_resv_desc_list_t\n");
26705 26698 rval = EFAULT;
26706 26699 goto done;
26707 26700 }
26708 26701 resvlist.listsize = resvlist32.listsize;
26709 26702 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list;
26710 26703 break;
26711 26704
26712 26705 case DDI_MODEL_NONE:
26713 26706 copysz = sizeof (mhioc_resv_desc_list_t);
26714 26707 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) {
26715 26708 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26716 26709 "sd_persistent_reservation_in_read_resv: "
26717 26710 "failed ddi_copyin: mhioc_resv_desc_list_t\n");
26718 26711 rval = EFAULT;
26719 26712 goto done;
26720 26713 }
26721 26714 break;
26722 26715 }
26723 26716 #else /* ! _MULTI_DATAMODEL */
26724 26717 copysz = sizeof (mhioc_resv_desc_list_t);
26725 26718 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) {
26726 26719 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26727 26720 "sd_persistent_reservation_in_read_resv: "
26728 26721 "failed ddi_copyin: mhioc_resv_desc_list_t\n");
26729 26722 rval = EFAULT;
26730 26723 goto done;
26731 26724 }
26732 26725 #endif /* ! _MULTI_DATAMODEL */
26733 26726
26734 26727 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN;
26735 26728 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t));
26736 26729 data_bufp = kmem_zalloc(data_len, KM_SLEEP);
26737 26730
26738 26731 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV,
26739 26732 data_len, data_bufp);
26740 26733 if (rval != 0) {
26741 26734 if (rval == EIO)
26742 26735 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE);
26743 26736 else
26744 26737 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
26745 26738 goto done;
26746 26739 }
26747 26740 in = (sd_prin_readresv_t *)data_bufp;
26748 26741 ptr->generation = BE_32(in->generation);
26749 26742 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN;
26750 26743
26751 26744 /*
26752 26745 * Return the min(listsize, listlen( keys
26753 26746 */
26754 26747 #ifdef _MULTI_DATAMODEL
26755 26748
26756 26749 switch (ddi_model_convert_from(flag & FMODELS)) {
26757 26750 case DDI_MODEL_ILP32:
26758 26751 resvlist32.listlen = resvlist.listlen;
26759 26752 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) {
26760 26753 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26761 26754 "sd_persistent_reservation_in_read_resv: "
26762 26755 "failed ddi_copyout: mhioc_resv_desc_list_t\n");
26763 26756 rval = EFAULT;
26764 26757 goto done;
26765 26758 }
26766 26759 break;
26767 26760
26768 26761 case DDI_MODEL_NONE:
26769 26762 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) {
26770 26763 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26771 26764 "sd_persistent_reservation_in_read_resv: "
26772 26765 "failed ddi_copyout: mhioc_resv_desc_list_t\n");
26773 26766 rval = EFAULT;
26774 26767 goto done;
26775 26768 }
26776 26769 break;
26777 26770 }
26778 26771
26779 26772 #else /* ! _MULTI_DATAMODEL */
26780 26773
26781 26774 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) {
26782 26775 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26783 26776 "sd_persistent_reservation_in_read_resv: "
26784 26777 "failed ddi_copyout: mhioc_resv_desc_list_t\n");
26785 26778 rval = EFAULT;
26786 26779 goto done;
26787 26780 }
26788 26781
26789 26782 #endif /* ! _MULTI_DATAMODEL */
26790 26783
26791 26784 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc;
26792 26785 bufp = resvlist.list;
26793 26786 copysz = sizeof (mhioc_resv_desc_t);
26794 26787 for (i = 0; i < min(resvlist.listlen, resvlist.listsize);
26795 26788 i++, readresv_ptr++, bufp++) {
26796 26789
26797 26790 bcopy(&readresv_ptr->resvkey, &resvdesc.key,
26798 26791 MHIOC_RESV_KEY_SIZE);
26799 26792 resvdesc.type = readresv_ptr->type;
26800 26793 resvdesc.scope = readresv_ptr->scope;
26801 26794 resvdesc.scope_specific_addr =
26802 26795 BE_32(readresv_ptr->scope_specific_addr);
26803 26796
26804 26797 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) {
26805 26798 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26806 26799 "sd_persistent_reservation_in_read_resv: "
26807 26800 "failed ddi_copyout: resvlist\n");
26808 26801 rval = EFAULT;
26809 26802 goto done;
26810 26803 }
26811 26804 }
26812 26805 done:
26813 26806 sd_ssc_fini(ssc);
26814 26807 /* only if data_bufp is allocated, we need to free it */
26815 26808 if (data_bufp) {
26816 26809 kmem_free(data_bufp, data_len);
26817 26810 }
26818 26811 return (rval);
26819 26812 }
26820 26813
26821 26814
26822 26815 /*
26823 26816 * Function: sr_change_blkmode()
26824 26817 *
26825 26818 * Description: This routine is the driver entry point for handling CD-ROM
26826 26819 * block mode ioctl requests. Support for returning and changing
26827 26820 * the current block size in use by the device is implemented. The
26828 26821 * LBA size is changed via a MODE SELECT Block Descriptor.
26829 26822 *
26830 26823 * This routine issues a mode sense with an allocation length of
26831 26824 * 12 bytes for the mode page header and a single block descriptor.
26832 26825 *
26833 26826 * Arguments: dev - the device 'dev_t'
26834 26827 * cmd - the request type; one of CDROMGBLKMODE (get) or
26835 26828 * CDROMSBLKMODE (set)
26836 26829 * data - current block size or requested block size
26837 26830 * flag - this argument is a pass through to ddi_copyxxx() directly
26838 26831 * from the mode argument of ioctl().
26839 26832 *
26840 26833 * Return Code: the code returned by sd_send_scsi_cmd()
26841 26834 * EINVAL if invalid arguments are provided
26842 26835 * EFAULT if ddi_copyxxx() fails
26843 26836 * ENXIO if fail ddi_get_soft_state
26844 26837 * EIO if invalid mode sense block descriptor length
26845 26838 *
26846 26839 */
26847 26840
26848 26841 static int
26849 26842 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag)
26850 26843 {
26851 26844 struct sd_lun *un = NULL;
26852 26845 struct mode_header *sense_mhp, *select_mhp;
26853 26846 struct block_descriptor *sense_desc, *select_desc;
26854 26847 int current_bsize;
26855 26848 int rval = EINVAL;
26856 26849 uchar_t *sense = NULL;
26857 26850 uchar_t *select = NULL;
26858 26851 sd_ssc_t *ssc;
26859 26852
26860 26853 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE));
26861 26854
26862 26855 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
26863 26856 return (ENXIO);
26864 26857 }
26865 26858
26866 26859 /*
26867 26860 * The block length is changed via the Mode Select block descriptor, the
26868 26861 * "Read/Write Error Recovery" mode page (0x1) contents are not actually
26869 26862 * required as part of this routine. Therefore the mode sense allocation
26870 26863 * length is specified to be the length of a mode page header and a
26871 26864 * block descriptor.
26872 26865 */
26873 26866 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP);
26874 26867
26875 26868 ssc = sd_ssc_init(un);
26876 26869 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense,
26877 26870 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD);
26878 26871 sd_ssc_fini(ssc);
26879 26872 if (rval != 0) {
26880 26873 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26881 26874 "sr_change_blkmode: Mode Sense Failed\n");
26882 26875 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
26883 26876 return (rval);
26884 26877 }
26885 26878
26886 26879 /* Check the block descriptor len to handle only 1 block descriptor */
26887 26880 sense_mhp = (struct mode_header *)sense;
26888 26881 if ((sense_mhp->bdesc_length == 0) ||
26889 26882 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) {
26890 26883 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26891 26884 "sr_change_blkmode: Mode Sense returned invalid block"
26892 26885 " descriptor length\n");
26893 26886 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
26894 26887 return (EIO);
26895 26888 }
26896 26889 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH);
26897 26890 current_bsize = ((sense_desc->blksize_hi << 16) |
26898 26891 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo);
26899 26892
26900 26893 /* Process command */
26901 26894 switch (cmd) {
26902 26895 case CDROMGBLKMODE:
26903 26896 /* Return the block size obtained during the mode sense */
26904 26897 if (ddi_copyout(¤t_bsize, (void *)data,
26905 26898 sizeof (int), flag) != 0)
26906 26899 rval = EFAULT;
26907 26900 break;
26908 26901 case CDROMSBLKMODE:
26909 26902 /* Validate the requested block size */
26910 26903 switch (data) {
26911 26904 case CDROM_BLK_512:
26912 26905 case CDROM_BLK_1024:
26913 26906 case CDROM_BLK_2048:
26914 26907 case CDROM_BLK_2056:
26915 26908 case CDROM_BLK_2336:
26916 26909 case CDROM_BLK_2340:
26917 26910 case CDROM_BLK_2352:
26918 26911 case CDROM_BLK_2368:
26919 26912 case CDROM_BLK_2448:
26920 26913 case CDROM_BLK_2646:
26921 26914 case CDROM_BLK_2647:
26922 26915 break;
26923 26916 default:
26924 26917 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26925 26918 "sr_change_blkmode: "
26926 26919 "Block Size '%ld' Not Supported\n", data);
26927 26920 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
26928 26921 return (EINVAL);
26929 26922 }
26930 26923
26931 26924 /*
26932 26925 * The current block size matches the requested block size so
26933 26926 * there is no need to send the mode select to change the size
26934 26927 */
26935 26928 if (current_bsize == data) {
26936 26929 break;
26937 26930 }
26938 26931
26939 26932 /* Build the select data for the requested block size */
26940 26933 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP);
26941 26934 select_mhp = (struct mode_header *)select;
26942 26935 select_desc =
26943 26936 (struct block_descriptor *)(select + MODE_HEADER_LENGTH);
26944 26937 /*
26945 26938 * The LBA size is changed via the block descriptor, so the
26946 26939 * descriptor is built according to the user data
26947 26940 */
26948 26941 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH;
26949 26942 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16);
26950 26943 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8);
26951 26944 select_desc->blksize_lo = (char)((data) & 0x000000ff);
26952 26945
26953 26946 /* Send the mode select for the requested block size */
26954 26947 ssc = sd_ssc_init(un);
26955 26948 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0,
26956 26949 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE,
26957 26950 SD_PATH_STANDARD);
26958 26951 sd_ssc_fini(ssc);
26959 26952 if (rval != 0) {
26960 26953 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26961 26954 "sr_change_blkmode: Mode Select Failed\n");
26962 26955 /*
26963 26956 * The mode select failed for the requested block size,
26964 26957 * so reset the data for the original block size and
26965 26958 * send it to the target. The error is indicated by the
26966 26959 * return value for the failed mode select.
26967 26960 */
26968 26961 select_desc->blksize_hi = sense_desc->blksize_hi;
26969 26962 select_desc->blksize_mid = sense_desc->blksize_mid;
26970 26963 select_desc->blksize_lo = sense_desc->blksize_lo;
26971 26964 ssc = sd_ssc_init(un);
26972 26965 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0,
26973 26966 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE,
26974 26967 SD_PATH_STANDARD);
26975 26968 sd_ssc_fini(ssc);
26976 26969 } else {
26977 26970 ASSERT(!mutex_owned(SD_MUTEX(un)));
26978 26971 mutex_enter(SD_MUTEX(un));
26979 26972 sd_update_block_info(un, (uint32_t)data, 0);
26980 26973 mutex_exit(SD_MUTEX(un));
26981 26974 }
26982 26975 break;
26983 26976 default:
26984 26977 /* should not reach here, but check anyway */
26985 26978 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26986 26979 "sr_change_blkmode: Command '%x' Not Supported\n", cmd);
26987 26980 rval = EINVAL;
26988 26981 break;
26989 26982 }
26990 26983
26991 26984 if (select) {
26992 26985 kmem_free(select, BUFLEN_CHG_BLK_MODE);
26993 26986 }
26994 26987 if (sense) {
26995 26988 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
26996 26989 }
26997 26990 return (rval);
26998 26991 }
26999 26992
27000 26993
27001 26994 /*
27002 26995 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines
27003 26996 * implement driver support for getting and setting the CD speed. The command
27004 26997 * set used will be based on the device type. If the device has not been
27005 26998 * identified as MMC the Toshiba vendor specific mode page will be used. If
27006 26999 * the device is MMC but does not support the Real Time Streaming feature
27007 27000 * the SET CD SPEED command will be used to set speed and mode page 0x2A will
27008 27001 * be used to read the speed.
27009 27002 */
27010 27003
27011 27004 /*
27012 27005 * Function: sr_change_speed()
27013 27006 *
27014 27007 * Description: This routine is the driver entry point for handling CD-ROM
27015 27008 * drive speed ioctl requests for devices supporting the Toshiba
27016 27009 * vendor specific drive speed mode page. Support for returning
27017 27010 * and changing the current drive speed in use by the device is
27018 27011 * implemented.
27019 27012 *
27020 27013 * Arguments: dev - the device 'dev_t'
27021 27014 * cmd - the request type; one of CDROMGDRVSPEED (get) or
27022 27015 * CDROMSDRVSPEED (set)
27023 27016 * data - current drive speed or requested drive speed
27024 27017 * flag - this argument is a pass through to ddi_copyxxx() directly
27025 27018 * from the mode argument of ioctl().
27026 27019 *
27027 27020 * Return Code: the code returned by sd_send_scsi_cmd()
27028 27021 * EINVAL if invalid arguments are provided
27029 27022 * EFAULT if ddi_copyxxx() fails
27030 27023 * ENXIO if fail ddi_get_soft_state
27031 27024 * EIO if invalid mode sense block descriptor length
27032 27025 */
27033 27026
27034 27027 static int
27035 27028 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag)
27036 27029 {
27037 27030 struct sd_lun *un = NULL;
27038 27031 struct mode_header *sense_mhp, *select_mhp;
27039 27032 struct mode_speed *sense_page, *select_page;
27040 27033 int current_speed;
27041 27034 int rval = EINVAL;
27042 27035 int bd_len;
27043 27036 uchar_t *sense = NULL;
27044 27037 uchar_t *select = NULL;
27045 27038 sd_ssc_t *ssc;
27046 27039
27047 27040 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED));
27048 27041 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27049 27042 return (ENXIO);
27050 27043 }
27051 27044
27052 27045 /*
27053 27046 * Note: The drive speed is being modified here according to a Toshiba
27054 27047 * vendor specific mode page (0x31).
27055 27048 */
27056 27049 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP);
27057 27050
27058 27051 ssc = sd_ssc_init(un);
27059 27052 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense,
27060 27053 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED,
27061 27054 SD_PATH_STANDARD);
27062 27055 sd_ssc_fini(ssc);
27063 27056 if (rval != 0) {
27064 27057 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27065 27058 "sr_change_speed: Mode Sense Failed\n");
27066 27059 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27067 27060 return (rval);
27068 27061 }
27069 27062 sense_mhp = (struct mode_header *)sense;
27070 27063
27071 27064 /* Check the block descriptor len to handle only 1 block descriptor */
27072 27065 bd_len = sense_mhp->bdesc_length;
27073 27066 if (bd_len > MODE_BLK_DESC_LENGTH) {
27074 27067 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27075 27068 "sr_change_speed: Mode Sense returned invalid block "
27076 27069 "descriptor length\n");
27077 27070 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27078 27071 return (EIO);
27079 27072 }
27080 27073
27081 27074 sense_page = (struct mode_speed *)
27082 27075 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length);
27083 27076 current_speed = sense_page->speed;
27084 27077
27085 27078 /* Process command */
27086 27079 switch (cmd) {
27087 27080 case CDROMGDRVSPEED:
27088 27081 /* Return the drive speed obtained during the mode sense */
27089 27082 if (current_speed == 0x2) {
27090 27083 current_speed = CDROM_TWELVE_SPEED;
27091 27084 }
27092 27085 if (ddi_copyout(¤t_speed, (void *)data,
27093 27086 sizeof (int), flag) != 0) {
27094 27087 rval = EFAULT;
27095 27088 }
27096 27089 break;
27097 27090 case CDROMSDRVSPEED:
27098 27091 /* Validate the requested drive speed */
27099 27092 switch ((uchar_t)data) {
27100 27093 case CDROM_TWELVE_SPEED:
27101 27094 data = 0x2;
27102 27095 /*FALLTHROUGH*/
27103 27096 case CDROM_NORMAL_SPEED:
27104 27097 case CDROM_DOUBLE_SPEED:
27105 27098 case CDROM_QUAD_SPEED:
27106 27099 case CDROM_MAXIMUM_SPEED:
27107 27100 break;
27108 27101 default:
27109 27102 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27110 27103 "sr_change_speed: "
27111 27104 "Drive Speed '%d' Not Supported\n", (uchar_t)data);
27112 27105 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27113 27106 return (EINVAL);
27114 27107 }
27115 27108
27116 27109 /*
27117 27110 * The current drive speed matches the requested drive speed so
27118 27111 * there is no need to send the mode select to change the speed
27119 27112 */
27120 27113 if (current_speed == data) {
27121 27114 break;
27122 27115 }
27123 27116
27124 27117 /* Build the select data for the requested drive speed */
27125 27118 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP);
27126 27119 select_mhp = (struct mode_header *)select;
27127 27120 select_mhp->bdesc_length = 0;
27128 27121 select_page =
27129 27122 (struct mode_speed *)(select + MODE_HEADER_LENGTH);
27130 27123 select_page =
27131 27124 (struct mode_speed *)(select + MODE_HEADER_LENGTH);
27132 27125 select_page->mode_page.code = CDROM_MODE_SPEED;
27133 27126 select_page->mode_page.length = 2;
27134 27127 select_page->speed = (uchar_t)data;
27135 27128
27136 27129 /* Send the mode select for the requested block size */
27137 27130 ssc = sd_ssc_init(un);
27138 27131 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select,
27139 27132 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH,
27140 27133 SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
27141 27134 sd_ssc_fini(ssc);
27142 27135 if (rval != 0) {
27143 27136 /*
27144 27137 * The mode select failed for the requested drive speed,
27145 27138 * so reset the data for the original drive speed and
27146 27139 * send it to the target. The error is indicated by the
27147 27140 * return value for the failed mode select.
27148 27141 */
27149 27142 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27150 27143 "sr_drive_speed: Mode Select Failed\n");
27151 27144 select_page->speed = sense_page->speed;
27152 27145 ssc = sd_ssc_init(un);
27153 27146 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select,
27154 27147 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH,
27155 27148 SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
27156 27149 sd_ssc_fini(ssc);
27157 27150 }
27158 27151 break;
27159 27152 default:
27160 27153 /* should not reach here, but check anyway */
27161 27154 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27162 27155 "sr_change_speed: Command '%x' Not Supported\n", cmd);
27163 27156 rval = EINVAL;
27164 27157 break;
27165 27158 }
27166 27159
27167 27160 if (select) {
27168 27161 kmem_free(select, BUFLEN_MODE_CDROM_SPEED);
27169 27162 }
27170 27163 if (sense) {
27171 27164 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27172 27165 }
27173 27166
27174 27167 return (rval);
27175 27168 }
27176 27169
27177 27170
27178 27171 /*
27179 27172 * Function: sr_atapi_change_speed()
27180 27173 *
27181 27174 * Description: This routine is the driver entry point for handling CD-ROM
27182 27175 * drive speed ioctl requests for MMC devices that do not support
27183 27176 * the Real Time Streaming feature (0x107).
27184 27177 *
27185 27178 * Note: This routine will use the SET SPEED command which may not
27186 27179 * be supported by all devices.
27187 27180 *
27188 27181 * Arguments: dev- the device 'dev_t'
27189 27182 * cmd- the request type; one of CDROMGDRVSPEED (get) or
27190 27183 * CDROMSDRVSPEED (set)
27191 27184 * data- current drive speed or requested drive speed
27192 27185 * flag- this argument is a pass through to ddi_copyxxx() directly
27193 27186 * from the mode argument of ioctl().
27194 27187 *
27195 27188 * Return Code: the code returned by sd_send_scsi_cmd()
27196 27189 * EINVAL if invalid arguments are provided
27197 27190 * EFAULT if ddi_copyxxx() fails
27198 27191 * ENXIO if fail ddi_get_soft_state
27199 27192 * EIO if invalid mode sense block descriptor length
27200 27193 */
27201 27194
27202 27195 static int
27203 27196 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag)
27204 27197 {
27205 27198 struct sd_lun *un;
27206 27199 struct uscsi_cmd *com = NULL;
27207 27200 struct mode_header_grp2 *sense_mhp;
27208 27201 uchar_t *sense_page;
27209 27202 uchar_t *sense = NULL;
27210 27203 char cdb[CDB_GROUP5];
27211 27204 int bd_len;
27212 27205 int current_speed = 0;
27213 27206 int max_speed = 0;
27214 27207 int rval;
27215 27208 sd_ssc_t *ssc;
27216 27209
27217 27210 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED));
27218 27211
27219 27212 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27220 27213 return (ENXIO);
27221 27214 }
27222 27215
27223 27216 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP);
27224 27217
27225 27218 ssc = sd_ssc_init(un);
27226 27219 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense,
27227 27220 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP,
27228 27221 SD_PATH_STANDARD);
27229 27222 sd_ssc_fini(ssc);
27230 27223 if (rval != 0) {
27231 27224 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27232 27225 "sr_atapi_change_speed: Mode Sense Failed\n");
27233 27226 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27234 27227 return (rval);
27235 27228 }
27236 27229
27237 27230 /* Check the block descriptor len to handle only 1 block descriptor */
27238 27231 sense_mhp = (struct mode_header_grp2 *)sense;
27239 27232 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo;
27240 27233 if (bd_len > MODE_BLK_DESC_LENGTH) {
27241 27234 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27242 27235 "sr_atapi_change_speed: Mode Sense returned invalid "
27243 27236 "block descriptor length\n");
27244 27237 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27245 27238 return (EIO);
27246 27239 }
27247 27240
27248 27241 /* Calculate the current and maximum drive speeds */
27249 27242 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len);
27250 27243 current_speed = (sense_page[14] << 8) | sense_page[15];
27251 27244 max_speed = (sense_page[8] << 8) | sense_page[9];
27252 27245
27253 27246 /* Process the command */
27254 27247 switch (cmd) {
27255 27248 case CDROMGDRVSPEED:
27256 27249 current_speed /= SD_SPEED_1X;
27257 27250 if (ddi_copyout(¤t_speed, (void *)data,
27258 27251 sizeof (int), flag) != 0)
27259 27252 rval = EFAULT;
27260 27253 break;
27261 27254 case CDROMSDRVSPEED:
27262 27255 /* Convert the speed code to KB/sec */
27263 27256 switch ((uchar_t)data) {
27264 27257 case CDROM_NORMAL_SPEED:
27265 27258 current_speed = SD_SPEED_1X;
27266 27259 break;
27267 27260 case CDROM_DOUBLE_SPEED:
27268 27261 current_speed = 2 * SD_SPEED_1X;
27269 27262 break;
27270 27263 case CDROM_QUAD_SPEED:
27271 27264 current_speed = 4 * SD_SPEED_1X;
27272 27265 break;
27273 27266 case CDROM_TWELVE_SPEED:
27274 27267 current_speed = 12 * SD_SPEED_1X;
27275 27268 break;
27276 27269 case CDROM_MAXIMUM_SPEED:
27277 27270 current_speed = 0xffff;
27278 27271 break;
27279 27272 default:
27280 27273 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27281 27274 "sr_atapi_change_speed: invalid drive speed %d\n",
27282 27275 (uchar_t)data);
27283 27276 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27284 27277 return (EINVAL);
27285 27278 }
27286 27279
27287 27280 /* Check the request against the drive's max speed. */
27288 27281 if (current_speed != 0xffff) {
27289 27282 if (current_speed > max_speed) {
27290 27283 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27291 27284 return (EINVAL);
27292 27285 }
27293 27286 }
27294 27287
27295 27288 /*
27296 27289 * Build and send the SET SPEED command
27297 27290 *
27298 27291 * Note: The SET SPEED (0xBB) command used in this routine is
27299 27292 * obsolete per the SCSI MMC spec but still supported in the
27300 27293 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI
27301 27294 * therefore the command is still implemented in this routine.
27302 27295 */
27303 27296 bzero(cdb, sizeof (cdb));
27304 27297 cdb[0] = (char)SCMD_SET_CDROM_SPEED;
27305 27298 cdb[2] = (uchar_t)(current_speed >> 8);
27306 27299 cdb[3] = (uchar_t)current_speed;
27307 27300 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27308 27301 com->uscsi_cdb = (caddr_t)cdb;
27309 27302 com->uscsi_cdblen = CDB_GROUP5;
27310 27303 com->uscsi_bufaddr = NULL;
27311 27304 com->uscsi_buflen = 0;
27312 27305 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27313 27306 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD);
27314 27307 break;
27315 27308 default:
27316 27309 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27317 27310 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd);
27318 27311 rval = EINVAL;
27319 27312 }
27320 27313
27321 27314 if (sense) {
27322 27315 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27323 27316 }
27324 27317 if (com) {
27325 27318 kmem_free(com, sizeof (*com));
27326 27319 }
27327 27320 return (rval);
27328 27321 }
27329 27322
27330 27323
27331 27324 /*
27332 27325 * Function: sr_pause_resume()
27333 27326 *
27334 27327 * Description: This routine is the driver entry point for handling CD-ROM
27335 27328 * pause/resume ioctl requests. This only affects the audio play
27336 27329 * operation.
27337 27330 *
27338 27331 * Arguments: dev - the device 'dev_t'
27339 27332 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used
27340 27333 * for setting the resume bit of the cdb.
27341 27334 *
27342 27335 * Return Code: the code returned by sd_send_scsi_cmd()
27343 27336 * EINVAL if invalid mode specified
27344 27337 *
27345 27338 */
27346 27339
27347 27340 static int
27348 27341 sr_pause_resume(dev_t dev, int cmd)
27349 27342 {
27350 27343 struct sd_lun *un;
27351 27344 struct uscsi_cmd *com;
27352 27345 char cdb[CDB_GROUP1];
27353 27346 int rval;
27354 27347
27355 27348 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27356 27349 return (ENXIO);
27357 27350 }
27358 27351
27359 27352 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27360 27353 bzero(cdb, CDB_GROUP1);
27361 27354 cdb[0] = SCMD_PAUSE_RESUME;
27362 27355 switch (cmd) {
27363 27356 case CDROMRESUME:
27364 27357 cdb[8] = 1;
27365 27358 break;
27366 27359 case CDROMPAUSE:
27367 27360 cdb[8] = 0;
27368 27361 break;
27369 27362 default:
27370 27363 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:"
27371 27364 " Command '%x' Not Supported\n", cmd);
27372 27365 rval = EINVAL;
27373 27366 goto done;
27374 27367 }
27375 27368
27376 27369 com->uscsi_cdb = cdb;
27377 27370 com->uscsi_cdblen = CDB_GROUP1;
27378 27371 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27379 27372
27380 27373 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27381 27374 SD_PATH_STANDARD);
27382 27375
27383 27376 done:
27384 27377 kmem_free(com, sizeof (*com));
27385 27378 return (rval);
27386 27379 }
27387 27380
27388 27381
27389 27382 /*
27390 27383 * Function: sr_play_msf()
27391 27384 *
27392 27385 * Description: This routine is the driver entry point for handling CD-ROM
27393 27386 * ioctl requests to output the audio signals at the specified
27394 27387 * starting address and continue the audio play until the specified
27395 27388 * ending address (CDROMPLAYMSF) The address is in Minute Second
27396 27389 * Frame (MSF) format.
27397 27390 *
27398 27391 * Arguments: dev - the device 'dev_t'
27399 27392 * data - pointer to user provided audio msf structure,
27400 27393 * specifying start/end addresses.
27401 27394 * flag - this argument is a pass through to ddi_copyxxx()
27402 27395 * directly from the mode argument of ioctl().
27403 27396 *
27404 27397 * Return Code: the code returned by sd_send_scsi_cmd()
27405 27398 * EFAULT if ddi_copyxxx() fails
27406 27399 * ENXIO if fail ddi_get_soft_state
27407 27400 * EINVAL if data pointer is NULL
27408 27401 */
27409 27402
27410 27403 static int
27411 27404 sr_play_msf(dev_t dev, caddr_t data, int flag)
27412 27405 {
27413 27406 struct sd_lun *un;
27414 27407 struct uscsi_cmd *com;
27415 27408 struct cdrom_msf msf_struct;
27416 27409 struct cdrom_msf *msf = &msf_struct;
27417 27410 char cdb[CDB_GROUP1];
27418 27411 int rval;
27419 27412
27420 27413 if (data == NULL) {
27421 27414 return (EINVAL);
27422 27415 }
27423 27416
27424 27417 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27425 27418 return (ENXIO);
27426 27419 }
27427 27420
27428 27421 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) {
27429 27422 return (EFAULT);
27430 27423 }
27431 27424
27432 27425 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27433 27426 bzero(cdb, CDB_GROUP1);
27434 27427 cdb[0] = SCMD_PLAYAUDIO_MSF;
27435 27428 if (un->un_f_cfg_playmsf_bcd == TRUE) {
27436 27429 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0);
27437 27430 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0);
27438 27431 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0);
27439 27432 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1);
27440 27433 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1);
27441 27434 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1);
27442 27435 } else {
27443 27436 cdb[3] = msf->cdmsf_min0;
27444 27437 cdb[4] = msf->cdmsf_sec0;
27445 27438 cdb[5] = msf->cdmsf_frame0;
27446 27439 cdb[6] = msf->cdmsf_min1;
27447 27440 cdb[7] = msf->cdmsf_sec1;
27448 27441 cdb[8] = msf->cdmsf_frame1;
27449 27442 }
27450 27443 com->uscsi_cdb = cdb;
27451 27444 com->uscsi_cdblen = CDB_GROUP1;
27452 27445 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27453 27446 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27454 27447 SD_PATH_STANDARD);
27455 27448 kmem_free(com, sizeof (*com));
27456 27449 return (rval);
27457 27450 }
27458 27451
27459 27452
27460 27453 /*
27461 27454 * Function: sr_play_trkind()
27462 27455 *
27463 27456 * Description: This routine is the driver entry point for handling CD-ROM
27464 27457 * ioctl requests to output the audio signals at the specified
27465 27458 * starting address and continue the audio play until the specified
27466 27459 * ending address (CDROMPLAYTRKIND). The address is in Track Index
27467 27460 * format.
27468 27461 *
27469 27462 * Arguments: dev - the device 'dev_t'
27470 27463 * data - pointer to user provided audio track/index structure,
27471 27464 * specifying start/end addresses.
27472 27465 * flag - this argument is a pass through to ddi_copyxxx()
27473 27466 * directly from the mode argument of ioctl().
27474 27467 *
27475 27468 * Return Code: the code returned by sd_send_scsi_cmd()
27476 27469 * EFAULT if ddi_copyxxx() fails
27477 27470 * ENXIO if fail ddi_get_soft_state
27478 27471 * EINVAL if data pointer is NULL
27479 27472 */
27480 27473
27481 27474 static int
27482 27475 sr_play_trkind(dev_t dev, caddr_t data, int flag)
27483 27476 {
27484 27477 struct cdrom_ti ti_struct;
27485 27478 struct cdrom_ti *ti = &ti_struct;
27486 27479 struct uscsi_cmd *com = NULL;
27487 27480 char cdb[CDB_GROUP1];
27488 27481 int rval;
27489 27482
27490 27483 if (data == NULL) {
27491 27484 return (EINVAL);
27492 27485 }
27493 27486
27494 27487 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) {
27495 27488 return (EFAULT);
27496 27489 }
27497 27490
27498 27491 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27499 27492 bzero(cdb, CDB_GROUP1);
27500 27493 cdb[0] = SCMD_PLAYAUDIO_TI;
27501 27494 cdb[4] = ti->cdti_trk0;
27502 27495 cdb[5] = ti->cdti_ind0;
27503 27496 cdb[7] = ti->cdti_trk1;
27504 27497 cdb[8] = ti->cdti_ind1;
27505 27498 com->uscsi_cdb = cdb;
27506 27499 com->uscsi_cdblen = CDB_GROUP1;
27507 27500 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27508 27501 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27509 27502 SD_PATH_STANDARD);
27510 27503 kmem_free(com, sizeof (*com));
27511 27504 return (rval);
27512 27505 }
27513 27506
27514 27507
27515 27508 /*
27516 27509 * Function: sr_read_all_subcodes()
27517 27510 *
27518 27511 * Description: This routine is the driver entry point for handling CD-ROM
27519 27512 * ioctl requests to return raw subcode data while the target is
27520 27513 * playing audio (CDROMSUBCODE).
27521 27514 *
27522 27515 * Arguments: dev - the device 'dev_t'
27523 27516 * data - pointer to user provided cdrom subcode structure,
27524 27517 * specifying the transfer length and address.
27525 27518 * flag - this argument is a pass through to ddi_copyxxx()
27526 27519 * directly from the mode argument of ioctl().
27527 27520 *
27528 27521 * Return Code: the code returned by sd_send_scsi_cmd()
27529 27522 * EFAULT if ddi_copyxxx() fails
27530 27523 * ENXIO if fail ddi_get_soft_state
27531 27524 * EINVAL if data pointer is NULL
27532 27525 */
27533 27526
27534 27527 static int
27535 27528 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag)
27536 27529 {
27537 27530 struct sd_lun *un = NULL;
27538 27531 struct uscsi_cmd *com = NULL;
27539 27532 struct cdrom_subcode *subcode = NULL;
27540 27533 int rval;
27541 27534 size_t buflen;
27542 27535 char cdb[CDB_GROUP5];
27543 27536
27544 27537 #ifdef _MULTI_DATAMODEL
27545 27538 /* To support ILP32 applications in an LP64 world */
27546 27539 struct cdrom_subcode32 cdrom_subcode32;
27547 27540 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32;
27548 27541 #endif
27549 27542 if (data == NULL) {
27550 27543 return (EINVAL);
27551 27544 }
27552 27545
27553 27546 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27554 27547 return (ENXIO);
27555 27548 }
27556 27549
27557 27550 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP);
27558 27551
27559 27552 #ifdef _MULTI_DATAMODEL
27560 27553 switch (ddi_model_convert_from(flag & FMODELS)) {
27561 27554 case DDI_MODEL_ILP32:
27562 27555 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) {
27563 27556 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27564 27557 "sr_read_all_subcodes: ddi_copyin Failed\n");
27565 27558 kmem_free(subcode, sizeof (struct cdrom_subcode));
27566 27559 return (EFAULT);
27567 27560 }
27568 27561 /* Convert the ILP32 uscsi data from the application to LP64 */
27569 27562 cdrom_subcode32tocdrom_subcode(cdsc32, subcode);
27570 27563 break;
27571 27564 case DDI_MODEL_NONE:
27572 27565 if (ddi_copyin(data, subcode,
27573 27566 sizeof (struct cdrom_subcode), flag)) {
27574 27567 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27575 27568 "sr_read_all_subcodes: ddi_copyin Failed\n");
27576 27569 kmem_free(subcode, sizeof (struct cdrom_subcode));
27577 27570 return (EFAULT);
27578 27571 }
27579 27572 break;
27580 27573 }
27581 27574 #else /* ! _MULTI_DATAMODEL */
27582 27575 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) {
27583 27576 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27584 27577 "sr_read_all_subcodes: ddi_copyin Failed\n");
27585 27578 kmem_free(subcode, sizeof (struct cdrom_subcode));
27586 27579 return (EFAULT);
27587 27580 }
27588 27581 #endif /* _MULTI_DATAMODEL */
27589 27582
27590 27583 /*
27591 27584 * Since MMC-2 expects max 3 bytes for length, check if the
27592 27585 * length input is greater than 3 bytes
27593 27586 */
27594 27587 if ((subcode->cdsc_length & 0xFF000000) != 0) {
27595 27588 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27596 27589 "sr_read_all_subcodes: "
27597 27590 "cdrom transfer length too large: %d (limit %d)\n",
27598 27591 subcode->cdsc_length, 0xFFFFFF);
27599 27592 kmem_free(subcode, sizeof (struct cdrom_subcode));
27600 27593 return (EINVAL);
27601 27594 }
27602 27595
27603 27596 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length;
27604 27597 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27605 27598 bzero(cdb, CDB_GROUP5);
27606 27599
27607 27600 if (un->un_f_mmc_cap == TRUE) {
27608 27601 cdb[0] = (char)SCMD_READ_CD;
27609 27602 cdb[2] = (char)0xff;
27610 27603 cdb[3] = (char)0xff;
27611 27604 cdb[4] = (char)0xff;
27612 27605 cdb[5] = (char)0xff;
27613 27606 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16);
27614 27607 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8);
27615 27608 cdb[8] = ((subcode->cdsc_length) & 0x000000ff);
27616 27609 cdb[10] = 1;
27617 27610 } else {
27618 27611 /*
27619 27612 * Note: A vendor specific command (0xDF) is being used here to
27620 27613 * request a read of all subcodes.
27621 27614 */
27622 27615 cdb[0] = (char)SCMD_READ_ALL_SUBCODES;
27623 27616 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24);
27624 27617 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16);
27625 27618 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8);
27626 27619 cdb[9] = ((subcode->cdsc_length) & 0x000000ff);
27627 27620 }
27628 27621 com->uscsi_cdb = cdb;
27629 27622 com->uscsi_cdblen = CDB_GROUP5;
27630 27623 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr;
27631 27624 com->uscsi_buflen = buflen;
27632 27625 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
27633 27626 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
27634 27627 SD_PATH_STANDARD);
27635 27628 kmem_free(subcode, sizeof (struct cdrom_subcode));
27636 27629 kmem_free(com, sizeof (*com));
27637 27630 return (rval);
27638 27631 }
27639 27632
27640 27633
27641 27634 /*
27642 27635 * Function: sr_read_subchannel()
27643 27636 *
27644 27637 * Description: This routine is the driver entry point for handling CD-ROM
27645 27638 * ioctl requests to return the Q sub-channel data of the CD
27646 27639 * current position block. (CDROMSUBCHNL) The data includes the
27647 27640 * track number, index number, absolute CD-ROM address (LBA or MSF
27648 27641 * format per the user) , track relative CD-ROM address (LBA or MSF
27649 27642 * format per the user), control data and audio status.
27650 27643 *
27651 27644 * Arguments: dev - the device 'dev_t'
27652 27645 * data - pointer to user provided cdrom sub-channel structure
27653 27646 * flag - this argument is a pass through to ddi_copyxxx()
27654 27647 * directly from the mode argument of ioctl().
27655 27648 *
27656 27649 * Return Code: the code returned by sd_send_scsi_cmd()
27657 27650 * EFAULT if ddi_copyxxx() fails
27658 27651 * ENXIO if fail ddi_get_soft_state
27659 27652 * EINVAL if data pointer is NULL
27660 27653 */
27661 27654
27662 27655 static int
27663 27656 sr_read_subchannel(dev_t dev, caddr_t data, int flag)
27664 27657 {
27665 27658 struct sd_lun *un;
27666 27659 struct uscsi_cmd *com;
27667 27660 struct cdrom_subchnl subchanel;
27668 27661 struct cdrom_subchnl *subchnl = &subchanel;
27669 27662 char cdb[CDB_GROUP1];
27670 27663 caddr_t buffer;
27671 27664 int rval;
27672 27665
27673 27666 if (data == NULL) {
27674 27667 return (EINVAL);
27675 27668 }
27676 27669
27677 27670 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
27678 27671 (un->un_state == SD_STATE_OFFLINE)) {
27679 27672 return (ENXIO);
27680 27673 }
27681 27674
27682 27675 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) {
27683 27676 return (EFAULT);
27684 27677 }
27685 27678
27686 27679 buffer = kmem_zalloc((size_t)16, KM_SLEEP);
27687 27680 bzero(cdb, CDB_GROUP1);
27688 27681 cdb[0] = SCMD_READ_SUBCHANNEL;
27689 27682 /* Set the MSF bit based on the user requested address format */
27690 27683 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02;
27691 27684 /*
27692 27685 * Set the Q bit in byte 2 to indicate that Q sub-channel data be
27693 27686 * returned
27694 27687 */
27695 27688 cdb[2] = 0x40;
27696 27689 /*
27697 27690 * Set byte 3 to specify the return data format. A value of 0x01
27698 27691 * indicates that the CD-ROM current position should be returned.
27699 27692 */
27700 27693 cdb[3] = 0x01;
27701 27694 cdb[8] = 0x10;
27702 27695 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27703 27696 com->uscsi_cdb = cdb;
27704 27697 com->uscsi_cdblen = CDB_GROUP1;
27705 27698 com->uscsi_bufaddr = buffer;
27706 27699 com->uscsi_buflen = 16;
27707 27700 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
27708 27701 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27709 27702 SD_PATH_STANDARD);
27710 27703 if (rval != 0) {
27711 27704 kmem_free(buffer, 16);
27712 27705 kmem_free(com, sizeof (*com));
27713 27706 return (rval);
27714 27707 }
27715 27708
27716 27709 /* Process the returned Q sub-channel data */
27717 27710 subchnl->cdsc_audiostatus = buffer[1];
27718 27711 subchnl->cdsc_adr = (buffer[5] & 0xF0) >> 4;
27719 27712 subchnl->cdsc_ctrl = (buffer[5] & 0x0F);
27720 27713 subchnl->cdsc_trk = buffer[6];
27721 27714 subchnl->cdsc_ind = buffer[7];
27722 27715 if (subchnl->cdsc_format & CDROM_LBA) {
27723 27716 subchnl->cdsc_absaddr.lba =
27724 27717 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) +
27725 27718 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]);
27726 27719 subchnl->cdsc_reladdr.lba =
27727 27720 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) +
27728 27721 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]);
27729 27722 } else if (un->un_f_cfg_readsub_bcd == TRUE) {
27730 27723 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]);
27731 27724 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]);
27732 27725 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]);
27733 27726 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]);
27734 27727 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]);
27735 27728 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]);
27736 27729 } else {
27737 27730 subchnl->cdsc_absaddr.msf.minute = buffer[9];
27738 27731 subchnl->cdsc_absaddr.msf.second = buffer[10];
27739 27732 subchnl->cdsc_absaddr.msf.frame = buffer[11];
27740 27733 subchnl->cdsc_reladdr.msf.minute = buffer[13];
27741 27734 subchnl->cdsc_reladdr.msf.second = buffer[14];
27742 27735 subchnl->cdsc_reladdr.msf.frame = buffer[15];
27743 27736 }
27744 27737 kmem_free(buffer, 16);
27745 27738 kmem_free(com, sizeof (*com));
27746 27739 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag)
27747 27740 != 0) {
27748 27741 return (EFAULT);
27749 27742 }
27750 27743 return (rval);
27751 27744 }
27752 27745
27753 27746
27754 27747 /*
27755 27748 * Function: sr_read_tocentry()
27756 27749 *
27757 27750 * Description: This routine is the driver entry point for handling CD-ROM
27758 27751 * ioctl requests to read from the Table of Contents (TOC)
27759 27752 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL
27760 27753 * fields, the starting address (LBA or MSF format per the user)
27761 27754 * and the data mode if the user specified track is a data track.
27762 27755 *
27763 27756 * Note: The READ HEADER (0x44) command used in this routine is
27764 27757 * obsolete per the SCSI MMC spec but still supported in the
27765 27758 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI
27766 27759 * therefore the command is still implemented in this routine.
27767 27760 *
27768 27761 * Arguments: dev - the device 'dev_t'
27769 27762 * data - pointer to user provided toc entry structure,
27770 27763 * specifying the track # and the address format
27771 27764 * (LBA or MSF).
27772 27765 * flag - this argument is a pass through to ddi_copyxxx()
27773 27766 * directly from the mode argument of ioctl().
27774 27767 *
27775 27768 * Return Code: the code returned by sd_send_scsi_cmd()
27776 27769 * EFAULT if ddi_copyxxx() fails
27777 27770 * ENXIO if fail ddi_get_soft_state
27778 27771 * EINVAL if data pointer is NULL
27779 27772 */
27780 27773
27781 27774 static int
27782 27775 sr_read_tocentry(dev_t dev, caddr_t data, int flag)
27783 27776 {
27784 27777 struct sd_lun *un = NULL;
27785 27778 struct uscsi_cmd *com;
27786 27779 struct cdrom_tocentry toc_entry;
27787 27780 struct cdrom_tocentry *entry = &toc_entry;
27788 27781 caddr_t buffer;
27789 27782 int rval;
27790 27783 char cdb[CDB_GROUP1];
27791 27784
27792 27785 if (data == NULL) {
27793 27786 return (EINVAL);
27794 27787 }
27795 27788
27796 27789 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
27797 27790 (un->un_state == SD_STATE_OFFLINE)) {
27798 27791 return (ENXIO);
27799 27792 }
27800 27793
27801 27794 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) {
27802 27795 return (EFAULT);
27803 27796 }
27804 27797
27805 27798 /* Validate the requested track and address format */
27806 27799 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) {
27807 27800 return (EINVAL);
27808 27801 }
27809 27802
27810 27803 if (entry->cdte_track == 0) {
27811 27804 return (EINVAL);
27812 27805 }
27813 27806
27814 27807 buffer = kmem_zalloc((size_t)12, KM_SLEEP);
27815 27808 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27816 27809 bzero(cdb, CDB_GROUP1);
27817 27810
27818 27811 cdb[0] = SCMD_READ_TOC;
27819 27812 /* Set the MSF bit based on the user requested address format */
27820 27813 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2);
27821 27814 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) {
27822 27815 cdb[6] = BYTE_TO_BCD(entry->cdte_track);
27823 27816 } else {
27824 27817 cdb[6] = entry->cdte_track;
27825 27818 }
27826 27819
27827 27820 /*
27828 27821 * Bytes 7 & 8 are the 12 byte allocation length for a single entry.
27829 27822 * (4 byte TOC response header + 8 byte track descriptor)
27830 27823 */
27831 27824 cdb[8] = 12;
27832 27825 com->uscsi_cdb = cdb;
27833 27826 com->uscsi_cdblen = CDB_GROUP1;
27834 27827 com->uscsi_bufaddr = buffer;
27835 27828 com->uscsi_buflen = 0x0C;
27836 27829 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ);
27837 27830 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27838 27831 SD_PATH_STANDARD);
27839 27832 if (rval != 0) {
27840 27833 kmem_free(buffer, 12);
27841 27834 kmem_free(com, sizeof (*com));
27842 27835 return (rval);
27843 27836 }
27844 27837
27845 27838 /* Process the toc entry */
27846 27839 entry->cdte_adr = (buffer[5] & 0xF0) >> 4;
27847 27840 entry->cdte_ctrl = (buffer[5] & 0x0F);
27848 27841 if (entry->cdte_format & CDROM_LBA) {
27849 27842 entry->cdte_addr.lba =
27850 27843 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) +
27851 27844 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]);
27852 27845 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) {
27853 27846 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]);
27854 27847 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]);
27855 27848 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]);
27856 27849 /*
27857 27850 * Send a READ TOC command using the LBA address format to get
27858 27851 * the LBA for the track requested so it can be used in the
27859 27852 * READ HEADER request
27860 27853 *
27861 27854 * Note: The MSF bit of the READ HEADER command specifies the
27862 27855 * output format. The block address specified in that command
27863 27856 * must be in LBA format.
27864 27857 */
27865 27858 cdb[1] = 0;
27866 27859 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27867 27860 SD_PATH_STANDARD);
27868 27861 if (rval != 0) {
27869 27862 kmem_free(buffer, 12);
27870 27863 kmem_free(com, sizeof (*com));
27871 27864 return (rval);
27872 27865 }
27873 27866 } else {
27874 27867 entry->cdte_addr.msf.minute = buffer[9];
27875 27868 entry->cdte_addr.msf.second = buffer[10];
27876 27869 entry->cdte_addr.msf.frame = buffer[11];
27877 27870 /*
27878 27871 * Send a READ TOC command using the LBA address format to get
27879 27872 * the LBA for the track requested so it can be used in the
27880 27873 * READ HEADER request
27881 27874 *
27882 27875 * Note: The MSF bit of the READ HEADER command specifies the
27883 27876 * output format. The block address specified in that command
27884 27877 * must be in LBA format.
27885 27878 */
27886 27879 cdb[1] = 0;
27887 27880 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27888 27881 SD_PATH_STANDARD);
27889 27882 if (rval != 0) {
27890 27883 kmem_free(buffer, 12);
27891 27884 kmem_free(com, sizeof (*com));
27892 27885 return (rval);
27893 27886 }
27894 27887 }
27895 27888
27896 27889 /*
27897 27890 * Build and send the READ HEADER command to determine the data mode of
27898 27891 * the user specified track.
27899 27892 */
27900 27893 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) &&
27901 27894 (entry->cdte_track != CDROM_LEADOUT)) {
27902 27895 bzero(cdb, CDB_GROUP1);
27903 27896 cdb[0] = SCMD_READ_HEADER;
27904 27897 cdb[2] = buffer[8];
27905 27898 cdb[3] = buffer[9];
27906 27899 cdb[4] = buffer[10];
27907 27900 cdb[5] = buffer[11];
27908 27901 cdb[8] = 0x08;
27909 27902 com->uscsi_buflen = 0x08;
27910 27903 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27911 27904 SD_PATH_STANDARD);
27912 27905 if (rval == 0) {
27913 27906 entry->cdte_datamode = buffer[0];
27914 27907 } else {
27915 27908 /*
27916 27909 * READ HEADER command failed, since this is
27917 27910 * obsoleted in one spec, its better to return
27918 27911 * -1 for an invlid track so that we can still
27919 27912 * receive the rest of the TOC data.
27920 27913 */
27921 27914 entry->cdte_datamode = (uchar_t)-1;
27922 27915 }
27923 27916 } else {
27924 27917 entry->cdte_datamode = (uchar_t)-1;
27925 27918 }
27926 27919
27927 27920 kmem_free(buffer, 12);
27928 27921 kmem_free(com, sizeof (*com));
27929 27922 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0)
27930 27923 return (EFAULT);
27931 27924
27932 27925 return (rval);
27933 27926 }
27934 27927
27935 27928
27936 27929 /*
27937 27930 * Function: sr_read_tochdr()
27938 27931 *
27939 27932 * Description: This routine is the driver entry point for handling CD-ROM
27940 27933 * ioctl requests to read the Table of Contents (TOC) header
27941 27934 * (CDROMREADTOHDR). The TOC header consists of the disk starting
27942 27935 * and ending track numbers
27943 27936 *
27944 27937 * Arguments: dev - the device 'dev_t'
27945 27938 * data - pointer to user provided toc header structure,
27946 27939 * specifying the starting and ending track numbers.
27947 27940 * flag - this argument is a pass through to ddi_copyxxx()
27948 27941 * directly from the mode argument of ioctl().
27949 27942 *
27950 27943 * Return Code: the code returned by sd_send_scsi_cmd()
27951 27944 * EFAULT if ddi_copyxxx() fails
27952 27945 * ENXIO if fail ddi_get_soft_state
27953 27946 * EINVAL if data pointer is NULL
27954 27947 */
27955 27948
27956 27949 static int
27957 27950 sr_read_tochdr(dev_t dev, caddr_t data, int flag)
27958 27951 {
27959 27952 struct sd_lun *un;
27960 27953 struct uscsi_cmd *com;
27961 27954 struct cdrom_tochdr toc_header;
27962 27955 struct cdrom_tochdr *hdr = &toc_header;
27963 27956 char cdb[CDB_GROUP1];
27964 27957 int rval;
27965 27958 caddr_t buffer;
27966 27959
27967 27960 if (data == NULL) {
27968 27961 return (EINVAL);
27969 27962 }
27970 27963
27971 27964 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
27972 27965 (un->un_state == SD_STATE_OFFLINE)) {
27973 27966 return (ENXIO);
27974 27967 }
27975 27968
27976 27969 buffer = kmem_zalloc(4, KM_SLEEP);
27977 27970 bzero(cdb, CDB_GROUP1);
27978 27971 cdb[0] = SCMD_READ_TOC;
27979 27972 /*
27980 27973 * Specifying a track number of 0x00 in the READ TOC command indicates
27981 27974 * that the TOC header should be returned
27982 27975 */
27983 27976 cdb[6] = 0x00;
27984 27977 /*
27985 27978 * Bytes 7 & 8 are the 4 byte allocation length for TOC header.
27986 27979 * (2 byte data len + 1 byte starting track # + 1 byte ending track #)
27987 27980 */
27988 27981 cdb[8] = 0x04;
27989 27982 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27990 27983 com->uscsi_cdb = cdb;
27991 27984 com->uscsi_cdblen = CDB_GROUP1;
27992 27985 com->uscsi_bufaddr = buffer;
27993 27986 com->uscsi_buflen = 0x04;
27994 27987 com->uscsi_timeout = 300;
27995 27988 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
27996 27989
27997 27990 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27998 27991 SD_PATH_STANDARD);
27999 27992 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) {
28000 27993 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]);
28001 27994 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]);
28002 27995 } else {
28003 27996 hdr->cdth_trk0 = buffer[2];
28004 27997 hdr->cdth_trk1 = buffer[3];
28005 27998 }
28006 27999 kmem_free(buffer, 4);
28007 28000 kmem_free(com, sizeof (*com));
28008 28001 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) {
28009 28002 return (EFAULT);
28010 28003 }
28011 28004 return (rval);
28012 28005 }
28013 28006
28014 28007
28015 28008 /*
28016 28009 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(),
28017 28010 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for
28018 28011 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data,
28019 28012 * digital audio and extended architecture digital audio. These modes are
28020 28013 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3
28021 28014 * MMC specs.
28022 28015 *
28023 28016 * In addition to support for the various data formats these routines also
28024 28017 * include support for devices that implement only the direct access READ
28025 28018 * commands (0x08, 0x28), devices that implement the READ_CD commands
28026 28019 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and
28027 28020 * READ CDXA commands (0xD8, 0xDB)
28028 28021 */
28029 28022
28030 28023 /*
28031 28024 * Function: sr_read_mode1()
28032 28025 *
28033 28026 * Description: This routine is the driver entry point for handling CD-ROM
28034 28027 * ioctl read mode1 requests (CDROMREADMODE1).
28035 28028 *
28036 28029 * Arguments: dev - the device 'dev_t'
28037 28030 * data - pointer to user provided cd read structure specifying
28038 28031 * the lba buffer address and length.
28039 28032 * flag - this argument is a pass through to ddi_copyxxx()
28040 28033 * directly from the mode argument of ioctl().
28041 28034 *
28042 28035 * Return Code: the code returned by sd_send_scsi_cmd()
28043 28036 * EFAULT if ddi_copyxxx() fails
28044 28037 * ENXIO if fail ddi_get_soft_state
28045 28038 * EINVAL if data pointer is NULL
28046 28039 */
28047 28040
28048 28041 static int
28049 28042 sr_read_mode1(dev_t dev, caddr_t data, int flag)
28050 28043 {
28051 28044 struct sd_lun *un;
28052 28045 struct cdrom_read mode1_struct;
28053 28046 struct cdrom_read *mode1 = &mode1_struct;
28054 28047 int rval;
28055 28048 sd_ssc_t *ssc;
28056 28049
28057 28050 #ifdef _MULTI_DATAMODEL
28058 28051 /* To support ILP32 applications in an LP64 world */
28059 28052 struct cdrom_read32 cdrom_read32;
28060 28053 struct cdrom_read32 *cdrd32 = &cdrom_read32;
28061 28054 #endif /* _MULTI_DATAMODEL */
28062 28055
28063 28056 if (data == NULL) {
28064 28057 return (EINVAL);
28065 28058 }
28066 28059
28067 28060 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28068 28061 (un->un_state == SD_STATE_OFFLINE)) {
28069 28062 return (ENXIO);
28070 28063 }
28071 28064
28072 28065 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28073 28066 "sd_read_mode1: entry: un:0x%p\n", un);
28074 28067
28075 28068 #ifdef _MULTI_DATAMODEL
28076 28069 switch (ddi_model_convert_from(flag & FMODELS)) {
28077 28070 case DDI_MODEL_ILP32:
28078 28071 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) {
28079 28072 return (EFAULT);
28080 28073 }
28081 28074 /* Convert the ILP32 uscsi data from the application to LP64 */
28082 28075 cdrom_read32tocdrom_read(cdrd32, mode1);
28083 28076 break;
28084 28077 case DDI_MODEL_NONE:
28085 28078 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) {
28086 28079 return (EFAULT);
28087 28080 }
28088 28081 }
28089 28082 #else /* ! _MULTI_DATAMODEL */
28090 28083 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) {
28091 28084 return (EFAULT);
28092 28085 }
28093 28086 #endif /* _MULTI_DATAMODEL */
28094 28087
28095 28088 ssc = sd_ssc_init(un);
28096 28089 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr,
28097 28090 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD);
28098 28091 sd_ssc_fini(ssc);
28099 28092
28100 28093 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28101 28094 "sd_read_mode1: exit: un:0x%p\n", un);
28102 28095
28103 28096 return (rval);
28104 28097 }
28105 28098
28106 28099
28107 28100 /*
28108 28101 * Function: sr_read_cd_mode2()
28109 28102 *
28110 28103 * Description: This routine is the driver entry point for handling CD-ROM
28111 28104 * ioctl read mode2 requests (CDROMREADMODE2) for devices that
28112 28105 * support the READ CD (0xBE) command or the 1st generation
28113 28106 * READ CD (0xD4) command.
28114 28107 *
28115 28108 * Arguments: dev - the device 'dev_t'
28116 28109 * data - pointer to user provided cd read structure specifying
28117 28110 * the lba buffer address and length.
28118 28111 * flag - this argument is a pass through to ddi_copyxxx()
28119 28112 * directly from the mode argument of ioctl().
28120 28113 *
28121 28114 * Return Code: the code returned by sd_send_scsi_cmd()
28122 28115 * EFAULT if ddi_copyxxx() fails
28123 28116 * ENXIO if fail ddi_get_soft_state
28124 28117 * EINVAL if data pointer is NULL
28125 28118 */
28126 28119
28127 28120 static int
28128 28121 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag)
28129 28122 {
28130 28123 struct sd_lun *un;
28131 28124 struct uscsi_cmd *com;
28132 28125 struct cdrom_read mode2_struct;
28133 28126 struct cdrom_read *mode2 = &mode2_struct;
28134 28127 uchar_t cdb[CDB_GROUP5];
28135 28128 int nblocks;
28136 28129 int rval;
28137 28130 #ifdef _MULTI_DATAMODEL
28138 28131 /* To support ILP32 applications in an LP64 world */
28139 28132 struct cdrom_read32 cdrom_read32;
28140 28133 struct cdrom_read32 *cdrd32 = &cdrom_read32;
28141 28134 #endif /* _MULTI_DATAMODEL */
28142 28135
28143 28136 if (data == NULL) {
28144 28137 return (EINVAL);
28145 28138 }
28146 28139
28147 28140 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28148 28141 (un->un_state == SD_STATE_OFFLINE)) {
28149 28142 return (ENXIO);
28150 28143 }
28151 28144
28152 28145 #ifdef _MULTI_DATAMODEL
28153 28146 switch (ddi_model_convert_from(flag & FMODELS)) {
28154 28147 case DDI_MODEL_ILP32:
28155 28148 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) {
28156 28149 return (EFAULT);
28157 28150 }
28158 28151 /* Convert the ILP32 uscsi data from the application to LP64 */
28159 28152 cdrom_read32tocdrom_read(cdrd32, mode2);
28160 28153 break;
28161 28154 case DDI_MODEL_NONE:
28162 28155 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) {
28163 28156 return (EFAULT);
28164 28157 }
28165 28158 break;
28166 28159 }
28167 28160
28168 28161 #else /* ! _MULTI_DATAMODEL */
28169 28162 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) {
28170 28163 return (EFAULT);
28171 28164 }
28172 28165 #endif /* _MULTI_DATAMODEL */
28173 28166
28174 28167 bzero(cdb, sizeof (cdb));
28175 28168 if (un->un_f_cfg_read_cd_xd4 == TRUE) {
28176 28169 /* Read command supported by 1st generation atapi drives */
28177 28170 cdb[0] = SCMD_READ_CDD4;
28178 28171 } else {
28179 28172 /* Universal CD Access Command */
28180 28173 cdb[0] = SCMD_READ_CD;
28181 28174 }
28182 28175
28183 28176 /*
28184 28177 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book
28185 28178 */
28186 28179 cdb[1] = CDROM_SECTOR_TYPE_MODE2;
28187 28180
28188 28181 /* set the start address */
28189 28182 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF);
28190 28183 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF);
28191 28184 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF);
28192 28185 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF);
28193 28186
28194 28187 /* set the transfer length */
28195 28188 nblocks = mode2->cdread_buflen / 2336;
28196 28189 cdb[6] = (uchar_t)(nblocks >> 16);
28197 28190 cdb[7] = (uchar_t)(nblocks >> 8);
28198 28191 cdb[8] = (uchar_t)nblocks;
28199 28192
28200 28193 /* set the filter bits */
28201 28194 cdb[9] = CDROM_READ_CD_USERDATA;
28202 28195
28203 28196 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28204 28197 com->uscsi_cdb = (caddr_t)cdb;
28205 28198 com->uscsi_cdblen = sizeof (cdb);
28206 28199 com->uscsi_bufaddr = mode2->cdread_bufaddr;
28207 28200 com->uscsi_buflen = mode2->cdread_buflen;
28208 28201 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28209 28202
28210 28203 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28211 28204 SD_PATH_STANDARD);
28212 28205 kmem_free(com, sizeof (*com));
28213 28206 return (rval);
28214 28207 }
28215 28208
28216 28209
28217 28210 /*
28218 28211 * Function: sr_read_mode2()
28219 28212 *
28220 28213 * Description: This routine is the driver entry point for handling CD-ROM
28221 28214 * ioctl read mode2 requests (CDROMREADMODE2) for devices that
28222 28215 * do not support the READ CD (0xBE) command.
28223 28216 *
28224 28217 * Arguments: dev - the device 'dev_t'
28225 28218 * data - pointer to user provided cd read structure specifying
28226 28219 * the lba buffer address and length.
28227 28220 * flag - this argument is a pass through to ddi_copyxxx()
28228 28221 * directly from the mode argument of ioctl().
28229 28222 *
28230 28223 * Return Code: the code returned by sd_send_scsi_cmd()
28231 28224 * EFAULT if ddi_copyxxx() fails
28232 28225 * ENXIO if fail ddi_get_soft_state
28233 28226 * EINVAL if data pointer is NULL
28234 28227 * EIO if fail to reset block size
28235 28228 * EAGAIN if commands are in progress in the driver
28236 28229 */
28237 28230
28238 28231 static int
28239 28232 sr_read_mode2(dev_t dev, caddr_t data, int flag)
28240 28233 {
28241 28234 struct sd_lun *un;
28242 28235 struct cdrom_read mode2_struct;
28243 28236 struct cdrom_read *mode2 = &mode2_struct;
28244 28237 int rval;
28245 28238 uint32_t restore_blksize;
28246 28239 struct uscsi_cmd *com;
28247 28240 uchar_t cdb[CDB_GROUP0];
28248 28241 int nblocks;
28249 28242
28250 28243 #ifdef _MULTI_DATAMODEL
28251 28244 /* To support ILP32 applications in an LP64 world */
28252 28245 struct cdrom_read32 cdrom_read32;
28253 28246 struct cdrom_read32 *cdrd32 = &cdrom_read32;
28254 28247 #endif /* _MULTI_DATAMODEL */
28255 28248
28256 28249 if (data == NULL) {
28257 28250 return (EINVAL);
28258 28251 }
28259 28252
28260 28253 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28261 28254 (un->un_state == SD_STATE_OFFLINE)) {
28262 28255 return (ENXIO);
28263 28256 }
28264 28257
28265 28258 /*
28266 28259 * Because this routine will update the device and driver block size
28267 28260 * being used we want to make sure there are no commands in progress.
28268 28261 * If commands are in progress the user will have to try again.
28269 28262 *
28270 28263 * We check for 1 instead of 0 because we increment un_ncmds_in_driver
28271 28264 * in sdioctl to protect commands from sdioctl through to the top of
28272 28265 * sd_uscsi_strategy. See sdioctl for details.
28273 28266 */
28274 28267 mutex_enter(SD_MUTEX(un));
28275 28268 if (un->un_ncmds_in_driver != 1) {
28276 28269 mutex_exit(SD_MUTEX(un));
28277 28270 return (EAGAIN);
28278 28271 }
28279 28272 mutex_exit(SD_MUTEX(un));
28280 28273
28281 28274 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28282 28275 "sd_read_mode2: entry: un:0x%p\n", un);
28283 28276
28284 28277 #ifdef _MULTI_DATAMODEL
28285 28278 switch (ddi_model_convert_from(flag & FMODELS)) {
28286 28279 case DDI_MODEL_ILP32:
28287 28280 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) {
28288 28281 return (EFAULT);
28289 28282 }
28290 28283 /* Convert the ILP32 uscsi data from the application to LP64 */
28291 28284 cdrom_read32tocdrom_read(cdrd32, mode2);
28292 28285 break;
28293 28286 case DDI_MODEL_NONE:
28294 28287 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) {
28295 28288 return (EFAULT);
28296 28289 }
28297 28290 break;
28298 28291 }
28299 28292 #else /* ! _MULTI_DATAMODEL */
28300 28293 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) {
28301 28294 return (EFAULT);
28302 28295 }
28303 28296 #endif /* _MULTI_DATAMODEL */
28304 28297
28305 28298 /* Store the current target block size for restoration later */
28306 28299 restore_blksize = un->un_tgt_blocksize;
28307 28300
28308 28301 /* Change the device and soft state target block size to 2336 */
28309 28302 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) {
28310 28303 rval = EIO;
28311 28304 goto done;
28312 28305 }
28313 28306
28314 28307
28315 28308 bzero(cdb, sizeof (cdb));
28316 28309
28317 28310 /* set READ operation */
28318 28311 cdb[0] = SCMD_READ;
28319 28312
28320 28313 /* adjust lba for 2kbyte blocks from 512 byte blocks */
28321 28314 mode2->cdread_lba >>= 2;
28322 28315
28323 28316 /* set the start address */
28324 28317 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F);
28325 28318 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF);
28326 28319 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF);
28327 28320
28328 28321 /* set the transfer length */
28329 28322 nblocks = mode2->cdread_buflen / 2336;
28330 28323 cdb[4] = (uchar_t)nblocks & 0xFF;
28331 28324
28332 28325 /* build command */
28333 28326 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28334 28327 com->uscsi_cdb = (caddr_t)cdb;
28335 28328 com->uscsi_cdblen = sizeof (cdb);
28336 28329 com->uscsi_bufaddr = mode2->cdread_bufaddr;
28337 28330 com->uscsi_buflen = mode2->cdread_buflen;
28338 28331 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28339 28332
28340 28333 /*
28341 28334 * Issue SCSI command with user space address for read buffer.
28342 28335 *
28343 28336 * This sends the command through main channel in the driver.
28344 28337 *
28345 28338 * Since this is accessed via an IOCTL call, we go through the
28346 28339 * standard path, so that if the device was powered down, then
28347 28340 * it would be 'awakened' to handle the command.
28348 28341 */
28349 28342 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28350 28343 SD_PATH_STANDARD);
28351 28344
28352 28345 kmem_free(com, sizeof (*com));
28353 28346
28354 28347 /* Restore the device and soft state target block size */
28355 28348 if (sr_sector_mode(dev, restore_blksize) != 0) {
28356 28349 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28357 28350 "can't do switch back to mode 1\n");
28358 28351 /*
28359 28352 * If sd_send_scsi_READ succeeded we still need to report
28360 28353 * an error because we failed to reset the block size
28361 28354 */
28362 28355 if (rval == 0) {
28363 28356 rval = EIO;
28364 28357 }
28365 28358 }
28366 28359
28367 28360 done:
28368 28361 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28369 28362 "sd_read_mode2: exit: un:0x%p\n", un);
28370 28363
28371 28364 return (rval);
28372 28365 }
28373 28366
28374 28367
28375 28368 /*
28376 28369 * Function: sr_sector_mode()
28377 28370 *
28378 28371 * Description: This utility function is used by sr_read_mode2 to set the target
28379 28372 * block size based on the user specified size. This is a legacy
28380 28373 * implementation based upon a vendor specific mode page
28381 28374 *
28382 28375 * Arguments: dev - the device 'dev_t'
28383 28376 * data - flag indicating if block size is being set to 2336 or
28384 28377 * 512.
28385 28378 *
28386 28379 * Return Code: the code returned by sd_send_scsi_cmd()
28387 28380 * EFAULT if ddi_copyxxx() fails
28388 28381 * ENXIO if fail ddi_get_soft_state
28389 28382 * EINVAL if data pointer is NULL
28390 28383 */
28391 28384
28392 28385 static int
28393 28386 sr_sector_mode(dev_t dev, uint32_t blksize)
28394 28387 {
28395 28388 struct sd_lun *un;
28396 28389 uchar_t *sense;
28397 28390 uchar_t *select;
28398 28391 int rval;
28399 28392 sd_ssc_t *ssc;
28400 28393
28401 28394 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28402 28395 (un->un_state == SD_STATE_OFFLINE)) {
28403 28396 return (ENXIO);
28404 28397 }
28405 28398
28406 28399 sense = kmem_zalloc(20, KM_SLEEP);
28407 28400
28408 28401 /* Note: This is a vendor specific mode page (0x81) */
28409 28402 ssc = sd_ssc_init(un);
28410 28403 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81,
28411 28404 SD_PATH_STANDARD);
28412 28405 sd_ssc_fini(ssc);
28413 28406 if (rval != 0) {
28414 28407 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
28415 28408 "sr_sector_mode: Mode Sense failed\n");
28416 28409 kmem_free(sense, 20);
28417 28410 return (rval);
28418 28411 }
28419 28412 select = kmem_zalloc(20, KM_SLEEP);
28420 28413 select[3] = 0x08;
28421 28414 select[10] = ((blksize >> 8) & 0xff);
28422 28415 select[11] = (blksize & 0xff);
28423 28416 select[12] = 0x01;
28424 28417 select[13] = 0x06;
28425 28418 select[14] = sense[14];
28426 28419 select[15] = sense[15];
28427 28420 if (blksize == SD_MODE2_BLKSIZE) {
28428 28421 select[14] |= 0x01;
28429 28422 }
28430 28423
28431 28424 ssc = sd_ssc_init(un);
28432 28425 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20,
28433 28426 SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
28434 28427 sd_ssc_fini(ssc);
28435 28428 if (rval != 0) {
28436 28429 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
28437 28430 "sr_sector_mode: Mode Select failed\n");
28438 28431 } else {
28439 28432 /*
28440 28433 * Only update the softstate block size if we successfully
28441 28434 * changed the device block mode.
28442 28435 */
28443 28436 mutex_enter(SD_MUTEX(un));
28444 28437 sd_update_block_info(un, blksize, 0);
28445 28438 mutex_exit(SD_MUTEX(un));
28446 28439 }
28447 28440 kmem_free(sense, 20);
28448 28441 kmem_free(select, 20);
28449 28442 return (rval);
28450 28443 }
28451 28444
28452 28445
28453 28446 /*
28454 28447 * Function: sr_read_cdda()
28455 28448 *
28456 28449 * Description: This routine is the driver entry point for handling CD-ROM
28457 28450 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If
28458 28451 * the target supports CDDA these requests are handled via a vendor
28459 28452 * specific command (0xD8) If the target does not support CDDA
28460 28453 * these requests are handled via the READ CD command (0xBE).
28461 28454 *
28462 28455 * Arguments: dev - the device 'dev_t'
28463 28456 * data - pointer to user provided CD-DA structure specifying
28464 28457 * the track starting address, transfer length, and
28465 28458 * subcode options.
28466 28459 * flag - this argument is a pass through to ddi_copyxxx()
28467 28460 * directly from the mode argument of ioctl().
28468 28461 *
28469 28462 * Return Code: the code returned by sd_send_scsi_cmd()
28470 28463 * EFAULT if ddi_copyxxx() fails
28471 28464 * ENXIO if fail ddi_get_soft_state
28472 28465 * EINVAL if invalid arguments are provided
28473 28466 * ENOTTY
28474 28467 */
28475 28468
28476 28469 static int
28477 28470 sr_read_cdda(dev_t dev, caddr_t data, int flag)
28478 28471 {
28479 28472 struct sd_lun *un;
28480 28473 struct uscsi_cmd *com;
28481 28474 struct cdrom_cdda *cdda;
28482 28475 int rval;
28483 28476 size_t buflen;
28484 28477 char cdb[CDB_GROUP5];
28485 28478
28486 28479 #ifdef _MULTI_DATAMODEL
28487 28480 /* To support ILP32 applications in an LP64 world */
28488 28481 struct cdrom_cdda32 cdrom_cdda32;
28489 28482 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32;
28490 28483 #endif /* _MULTI_DATAMODEL */
28491 28484
28492 28485 if (data == NULL) {
28493 28486 return (EINVAL);
28494 28487 }
28495 28488
28496 28489 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
28497 28490 return (ENXIO);
28498 28491 }
28499 28492
28500 28493 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP);
28501 28494
28502 28495 #ifdef _MULTI_DATAMODEL
28503 28496 switch (ddi_model_convert_from(flag & FMODELS)) {
28504 28497 case DDI_MODEL_ILP32:
28505 28498 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) {
28506 28499 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28507 28500 "sr_read_cdda: ddi_copyin Failed\n");
28508 28501 kmem_free(cdda, sizeof (struct cdrom_cdda));
28509 28502 return (EFAULT);
28510 28503 }
28511 28504 /* Convert the ILP32 uscsi data from the application to LP64 */
28512 28505 cdrom_cdda32tocdrom_cdda(cdda32, cdda);
28513 28506 break;
28514 28507 case DDI_MODEL_NONE:
28515 28508 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) {
28516 28509 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28517 28510 "sr_read_cdda: ddi_copyin Failed\n");
28518 28511 kmem_free(cdda, sizeof (struct cdrom_cdda));
28519 28512 return (EFAULT);
28520 28513 }
28521 28514 break;
28522 28515 }
28523 28516 #else /* ! _MULTI_DATAMODEL */
28524 28517 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) {
28525 28518 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28526 28519 "sr_read_cdda: ddi_copyin Failed\n");
28527 28520 kmem_free(cdda, sizeof (struct cdrom_cdda));
28528 28521 return (EFAULT);
28529 28522 }
28530 28523 #endif /* _MULTI_DATAMODEL */
28531 28524
28532 28525 /*
28533 28526 * Since MMC-2 expects max 3 bytes for length, check if the
28534 28527 * length input is greater than 3 bytes
28535 28528 */
28536 28529 if ((cdda->cdda_length & 0xFF000000) != 0) {
28537 28530 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: "
28538 28531 "cdrom transfer length too large: %d (limit %d)\n",
28539 28532 cdda->cdda_length, 0xFFFFFF);
28540 28533 kmem_free(cdda, sizeof (struct cdrom_cdda));
28541 28534 return (EINVAL);
28542 28535 }
28543 28536
28544 28537 switch (cdda->cdda_subcode) {
28545 28538 case CDROM_DA_NO_SUBCODE:
28546 28539 buflen = CDROM_BLK_2352 * cdda->cdda_length;
28547 28540 break;
28548 28541 case CDROM_DA_SUBQ:
28549 28542 buflen = CDROM_BLK_2368 * cdda->cdda_length;
28550 28543 break;
28551 28544 case CDROM_DA_ALL_SUBCODE:
28552 28545 buflen = CDROM_BLK_2448 * cdda->cdda_length;
28553 28546 break;
28554 28547 case CDROM_DA_SUBCODE_ONLY:
28555 28548 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length;
28556 28549 break;
28557 28550 default:
28558 28551 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28559 28552 "sr_read_cdda: Subcode '0x%x' Not Supported\n",
28560 28553 cdda->cdda_subcode);
28561 28554 kmem_free(cdda, sizeof (struct cdrom_cdda));
28562 28555 return (EINVAL);
28563 28556 }
28564 28557
28565 28558 /* Build and send the command */
28566 28559 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28567 28560 bzero(cdb, CDB_GROUP5);
28568 28561
28569 28562 if (un->un_f_cfg_cdda == TRUE) {
28570 28563 cdb[0] = (char)SCMD_READ_CD;
28571 28564 cdb[1] = 0x04;
28572 28565 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24);
28573 28566 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16);
28574 28567 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8);
28575 28568 cdb[5] = ((cdda->cdda_addr) & 0x000000ff);
28576 28569 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16);
28577 28570 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8);
28578 28571 cdb[8] = ((cdda->cdda_length) & 0x000000ff);
28579 28572 cdb[9] = 0x10;
28580 28573 switch (cdda->cdda_subcode) {
28581 28574 case CDROM_DA_NO_SUBCODE :
28582 28575 cdb[10] = 0x0;
28583 28576 break;
28584 28577 case CDROM_DA_SUBQ :
28585 28578 cdb[10] = 0x2;
28586 28579 break;
28587 28580 case CDROM_DA_ALL_SUBCODE :
28588 28581 cdb[10] = 0x1;
28589 28582 break;
28590 28583 case CDROM_DA_SUBCODE_ONLY :
28591 28584 /* FALLTHROUGH */
28592 28585 default :
28593 28586 kmem_free(cdda, sizeof (struct cdrom_cdda));
28594 28587 kmem_free(com, sizeof (*com));
28595 28588 return (ENOTTY);
28596 28589 }
28597 28590 } else {
28598 28591 cdb[0] = (char)SCMD_READ_CDDA;
28599 28592 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24);
28600 28593 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16);
28601 28594 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8);
28602 28595 cdb[5] = ((cdda->cdda_addr) & 0x000000ff);
28603 28596 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24);
28604 28597 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16);
28605 28598 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8);
28606 28599 cdb[9] = ((cdda->cdda_length) & 0x000000ff);
28607 28600 cdb[10] = cdda->cdda_subcode;
28608 28601 }
28609 28602
28610 28603 com->uscsi_cdb = cdb;
28611 28604 com->uscsi_cdblen = CDB_GROUP5;
28612 28605 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data;
28613 28606 com->uscsi_buflen = buflen;
28614 28607 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28615 28608
28616 28609 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28617 28610 SD_PATH_STANDARD);
28618 28611
28619 28612 kmem_free(cdda, sizeof (struct cdrom_cdda));
28620 28613 kmem_free(com, sizeof (*com));
28621 28614 return (rval);
28622 28615 }
28623 28616
28624 28617
28625 28618 /*
28626 28619 * Function: sr_read_cdxa()
28627 28620 *
28628 28621 * Description: This routine is the driver entry point for handling CD-ROM
28629 28622 * ioctl requests to return CD-XA (Extended Architecture) data.
28630 28623 * (CDROMCDXA).
28631 28624 *
28632 28625 * Arguments: dev - the device 'dev_t'
28633 28626 * data - pointer to user provided CD-XA structure specifying
28634 28627 * the data starting address, transfer length, and format
28635 28628 * flag - this argument is a pass through to ddi_copyxxx()
28636 28629 * directly from the mode argument of ioctl().
28637 28630 *
28638 28631 * Return Code: the code returned by sd_send_scsi_cmd()
28639 28632 * EFAULT if ddi_copyxxx() fails
28640 28633 * ENXIO if fail ddi_get_soft_state
28641 28634 * EINVAL if data pointer is NULL
28642 28635 */
28643 28636
28644 28637 static int
28645 28638 sr_read_cdxa(dev_t dev, caddr_t data, int flag)
28646 28639 {
28647 28640 struct sd_lun *un;
28648 28641 struct uscsi_cmd *com;
28649 28642 struct cdrom_cdxa *cdxa;
28650 28643 int rval;
28651 28644 size_t buflen;
28652 28645 char cdb[CDB_GROUP5];
28653 28646 uchar_t read_flags;
28654 28647
28655 28648 #ifdef _MULTI_DATAMODEL
28656 28649 /* To support ILP32 applications in an LP64 world */
28657 28650 struct cdrom_cdxa32 cdrom_cdxa32;
28658 28651 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32;
28659 28652 #endif /* _MULTI_DATAMODEL */
28660 28653
28661 28654 if (data == NULL) {
28662 28655 return (EINVAL);
28663 28656 }
28664 28657
28665 28658 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
28666 28659 return (ENXIO);
28667 28660 }
28668 28661
28669 28662 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP);
28670 28663
28671 28664 #ifdef _MULTI_DATAMODEL
28672 28665 switch (ddi_model_convert_from(flag & FMODELS)) {
28673 28666 case DDI_MODEL_ILP32:
28674 28667 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) {
28675 28668 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28676 28669 return (EFAULT);
28677 28670 }
28678 28671 /*
28679 28672 * Convert the ILP32 uscsi data from the
28680 28673 * application to LP64 for internal use.
28681 28674 */
28682 28675 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa);
28683 28676 break;
28684 28677 case DDI_MODEL_NONE:
28685 28678 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) {
28686 28679 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28687 28680 return (EFAULT);
28688 28681 }
28689 28682 break;
28690 28683 }
28691 28684 #else /* ! _MULTI_DATAMODEL */
28692 28685 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) {
28693 28686 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28694 28687 return (EFAULT);
28695 28688 }
28696 28689 #endif /* _MULTI_DATAMODEL */
28697 28690
28698 28691 /*
28699 28692 * Since MMC-2 expects max 3 bytes for length, check if the
28700 28693 * length input is greater than 3 bytes
28701 28694 */
28702 28695 if ((cdxa->cdxa_length & 0xFF000000) != 0) {
28703 28696 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: "
28704 28697 "cdrom transfer length too large: %d (limit %d)\n",
28705 28698 cdxa->cdxa_length, 0xFFFFFF);
28706 28699 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28707 28700 return (EINVAL);
28708 28701 }
28709 28702
28710 28703 switch (cdxa->cdxa_format) {
28711 28704 case CDROM_XA_DATA:
28712 28705 buflen = CDROM_BLK_2048 * cdxa->cdxa_length;
28713 28706 read_flags = 0x10;
28714 28707 break;
28715 28708 case CDROM_XA_SECTOR_DATA:
28716 28709 buflen = CDROM_BLK_2352 * cdxa->cdxa_length;
28717 28710 read_flags = 0xf8;
28718 28711 break;
28719 28712 case CDROM_XA_DATA_W_ERROR:
28720 28713 buflen = CDROM_BLK_2646 * cdxa->cdxa_length;
28721 28714 read_flags = 0xfc;
28722 28715 break;
28723 28716 default:
28724 28717 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28725 28718 "sr_read_cdxa: Format '0x%x' Not Supported\n",
28726 28719 cdxa->cdxa_format);
28727 28720 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28728 28721 return (EINVAL);
28729 28722 }
28730 28723
28731 28724 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28732 28725 bzero(cdb, CDB_GROUP5);
28733 28726 if (un->un_f_mmc_cap == TRUE) {
28734 28727 cdb[0] = (char)SCMD_READ_CD;
28735 28728 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24);
28736 28729 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16);
28737 28730 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8);
28738 28731 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff);
28739 28732 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16);
28740 28733 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8);
28741 28734 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff);
28742 28735 cdb[9] = (char)read_flags;
28743 28736 } else {
28744 28737 /*
28745 28738 * Note: A vendor specific command (0xDB) is being used her to
28746 28739 * request a read of all subcodes.
28747 28740 */
28748 28741 cdb[0] = (char)SCMD_READ_CDXA;
28749 28742 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24);
28750 28743 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16);
28751 28744 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8);
28752 28745 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff);
28753 28746 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24);
28754 28747 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16);
28755 28748 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8);
28756 28749 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff);
28757 28750 cdb[10] = cdxa->cdxa_format;
28758 28751 }
28759 28752 com->uscsi_cdb = cdb;
28760 28753 com->uscsi_cdblen = CDB_GROUP5;
28761 28754 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data;
28762 28755 com->uscsi_buflen = buflen;
28763 28756 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28764 28757 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28765 28758 SD_PATH_STANDARD);
28766 28759 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28767 28760 kmem_free(com, sizeof (*com));
28768 28761 return (rval);
28769 28762 }
28770 28763
28771 28764
28772 28765 /*
28773 28766 * Function: sr_eject()
28774 28767 *
28775 28768 * Description: This routine is the driver entry point for handling CD-ROM
28776 28769 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT)
28777 28770 *
28778 28771 * Arguments: dev - the device 'dev_t'
28779 28772 *
28780 28773 * Return Code: the code returned by sd_send_scsi_cmd()
28781 28774 */
28782 28775
28783 28776 static int
28784 28777 sr_eject(dev_t dev)
28785 28778 {
28786 28779 struct sd_lun *un;
28787 28780 int rval;
28788 28781 sd_ssc_t *ssc;
28789 28782
28790 28783 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28791 28784 (un->un_state == SD_STATE_OFFLINE)) {
28792 28785 return (ENXIO);
28793 28786 }
28794 28787
28795 28788 /*
28796 28789 * To prevent race conditions with the eject
28797 28790 * command, keep track of an eject command as
28798 28791 * it progresses. If we are already handling
28799 28792 * an eject command in the driver for the given
28800 28793 * unit and another request to eject is received
28801 28794 * immediately return EAGAIN so we don't lose
28802 28795 * the command if the current eject command fails.
28803 28796 */
28804 28797 mutex_enter(SD_MUTEX(un));
28805 28798 if (un->un_f_ejecting == TRUE) {
28806 28799 mutex_exit(SD_MUTEX(un));
28807 28800 return (EAGAIN);
28808 28801 }
28809 28802 un->un_f_ejecting = TRUE;
28810 28803 mutex_exit(SD_MUTEX(un));
28811 28804
28812 28805 ssc = sd_ssc_init(un);
28813 28806 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW,
28814 28807 SD_PATH_STANDARD);
28815 28808 sd_ssc_fini(ssc);
28816 28809
28817 28810 if (rval != 0) {
28818 28811 mutex_enter(SD_MUTEX(un));
28819 28812 un->un_f_ejecting = FALSE;
28820 28813 mutex_exit(SD_MUTEX(un));
28821 28814 return (rval);
28822 28815 }
28823 28816
28824 28817 ssc = sd_ssc_init(un);
28825 28818 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
28826 28819 SD_TARGET_EJECT, SD_PATH_STANDARD);
28827 28820 sd_ssc_fini(ssc);
28828 28821
28829 28822 if (rval == 0) {
28830 28823 mutex_enter(SD_MUTEX(un));
28831 28824 sr_ejected(un);
28832 28825 un->un_mediastate = DKIO_EJECTED;
28833 28826 un->un_f_ejecting = FALSE;
28834 28827 cv_broadcast(&un->un_state_cv);
28835 28828 mutex_exit(SD_MUTEX(un));
28836 28829 } else {
28837 28830 mutex_enter(SD_MUTEX(un));
28838 28831 un->un_f_ejecting = FALSE;
28839 28832 mutex_exit(SD_MUTEX(un));
28840 28833 }
28841 28834 return (rval);
28842 28835 }
28843 28836
28844 28837
28845 28838 /*
28846 28839 * Function: sr_ejected()
28847 28840 *
28848 28841 * Description: This routine updates the soft state structure to invalidate the
28849 28842 * geometry information after the media has been ejected or a
28850 28843 * media eject has been detected.
28851 28844 *
28852 28845 * Arguments: un - driver soft state (unit) structure
28853 28846 */
28854 28847
28855 28848 static void
28856 28849 sr_ejected(struct sd_lun *un)
28857 28850 {
28858 28851 struct sd_errstats *stp;
28859 28852
28860 28853 ASSERT(un != NULL);
28861 28854 ASSERT(mutex_owned(SD_MUTEX(un)));
28862 28855
28863 28856 un->un_f_blockcount_is_valid = FALSE;
28864 28857 un->un_f_tgt_blocksize_is_valid = FALSE;
28865 28858 mutex_exit(SD_MUTEX(un));
28866 28859 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY);
28867 28860 mutex_enter(SD_MUTEX(un));
28868 28861
28869 28862 if (un->un_errstats != NULL) {
28870 28863 stp = (struct sd_errstats *)un->un_errstats->ks_data;
28871 28864 stp->sd_capacity.value.ui64 = 0;
28872 28865 }
28873 28866 }
28874 28867
28875 28868
28876 28869 /*
28877 28870 * Function: sr_check_wp()
28878 28871 *
28879 28872 * Description: This routine checks the write protection of a removable
28880 28873 * media disk and hotpluggable devices via the write protect bit of
28881 28874 * the Mode Page Header device specific field. Some devices choke
28882 28875 * on unsupported mode page. In order to workaround this issue,
28883 28876 * this routine has been implemented to use 0x3f mode page(request
28884 28877 * for all pages) for all device types.
28885 28878 *
28886 28879 * Arguments: dev - the device 'dev_t'
28887 28880 *
28888 28881 * Return Code: int indicating if the device is write protected (1) or not (0)
28889 28882 *
28890 28883 * Context: Kernel thread.
28891 28884 *
28892 28885 */
28893 28886
28894 28887 static int
28895 28888 sr_check_wp(dev_t dev)
28896 28889 {
28897 28890 struct sd_lun *un;
28898 28891 uchar_t device_specific;
28899 28892 uchar_t *sense;
28900 28893 int hdrlen;
28901 28894 int rval = FALSE;
28902 28895 int status;
28903 28896 sd_ssc_t *ssc;
28904 28897
28905 28898 /*
28906 28899 * Note: The return codes for this routine should be reworked to
28907 28900 * properly handle the case of a NULL softstate.
28908 28901 */
28909 28902 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
28910 28903 return (FALSE);
28911 28904 }
28912 28905
28913 28906 if (un->un_f_cfg_is_atapi == TRUE) {
28914 28907 /*
28915 28908 * The mode page contents are not required; set the allocation
28916 28909 * length for the mode page header only
28917 28910 */
28918 28911 hdrlen = MODE_HEADER_LENGTH_GRP2;
28919 28912 sense = kmem_zalloc(hdrlen, KM_SLEEP);
28920 28913 ssc = sd_ssc_init(un);
28921 28914 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen,
28922 28915 MODEPAGE_ALLPAGES, SD_PATH_STANDARD);
28923 28916 sd_ssc_fini(ssc);
28924 28917 if (status != 0)
28925 28918 goto err_exit;
28926 28919 device_specific =
28927 28920 ((struct mode_header_grp2 *)sense)->device_specific;
28928 28921 } else {
28929 28922 hdrlen = MODE_HEADER_LENGTH;
28930 28923 sense = kmem_zalloc(hdrlen, KM_SLEEP);
28931 28924 ssc = sd_ssc_init(un);
28932 28925 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen,
28933 28926 MODEPAGE_ALLPAGES, SD_PATH_STANDARD);
28934 28927 sd_ssc_fini(ssc);
28935 28928 if (status != 0)
28936 28929 goto err_exit;
28937 28930 device_specific =
28938 28931 ((struct mode_header *)sense)->device_specific;
28939 28932 }
28940 28933
28941 28934
28942 28935 /*
28943 28936 * Write protect mode sense failed; not all disks
28944 28937 * understand this query. Return FALSE assuming that
28945 28938 * these devices are not writable.
28946 28939 */
28947 28940 if (device_specific & WRITE_PROTECT) {
28948 28941 rval = TRUE;
28949 28942 }
28950 28943
28951 28944 err_exit:
28952 28945 kmem_free(sense, hdrlen);
28953 28946 return (rval);
28954 28947 }
28955 28948
28956 28949 /*
28957 28950 * Function: sr_volume_ctrl()
28958 28951 *
28959 28952 * Description: This routine is the driver entry point for handling CD-ROM
28960 28953 * audio output volume ioctl requests. (CDROMVOLCTRL)
28961 28954 *
28962 28955 * Arguments: dev - the device 'dev_t'
28963 28956 * data - pointer to user audio volume control structure
28964 28957 * flag - this argument is a pass through to ddi_copyxxx()
28965 28958 * directly from the mode argument of ioctl().
28966 28959 *
28967 28960 * Return Code: the code returned by sd_send_scsi_cmd()
28968 28961 * EFAULT if ddi_copyxxx() fails
28969 28962 * ENXIO if fail ddi_get_soft_state
28970 28963 * EINVAL if data pointer is NULL
28971 28964 *
28972 28965 */
28973 28966
28974 28967 static int
28975 28968 sr_volume_ctrl(dev_t dev, caddr_t data, int flag)
28976 28969 {
28977 28970 struct sd_lun *un;
28978 28971 struct cdrom_volctrl volume;
28979 28972 struct cdrom_volctrl *vol = &volume;
28980 28973 uchar_t *sense_page;
28981 28974 uchar_t *select_page;
28982 28975 uchar_t *sense;
28983 28976 uchar_t *select;
28984 28977 int sense_buflen;
28985 28978 int select_buflen;
28986 28979 int rval;
28987 28980 sd_ssc_t *ssc;
28988 28981
28989 28982 if (data == NULL) {
28990 28983 return (EINVAL);
28991 28984 }
28992 28985
28993 28986 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28994 28987 (un->un_state == SD_STATE_OFFLINE)) {
28995 28988 return (ENXIO);
28996 28989 }
28997 28990
28998 28991 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) {
28999 28992 return (EFAULT);
29000 28993 }
29001 28994
29002 28995 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) {
29003 28996 struct mode_header_grp2 *sense_mhp;
29004 28997 struct mode_header_grp2 *select_mhp;
29005 28998 int bd_len;
29006 28999
29007 29000 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN;
29008 29001 select_buflen = MODE_HEADER_LENGTH_GRP2 +
29009 29002 MODEPAGE_AUDIO_CTRL_LEN;
29010 29003 sense = kmem_zalloc(sense_buflen, KM_SLEEP);
29011 29004 select = kmem_zalloc(select_buflen, KM_SLEEP);
29012 29005 ssc = sd_ssc_init(un);
29013 29006 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense,
29014 29007 sense_buflen, MODEPAGE_AUDIO_CTRL,
29015 29008 SD_PATH_STANDARD);
29016 29009 sd_ssc_fini(ssc);
29017 29010
29018 29011 if (rval != 0) {
29019 29012 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
29020 29013 "sr_volume_ctrl: Mode Sense Failed\n");
29021 29014 kmem_free(sense, sense_buflen);
29022 29015 kmem_free(select, select_buflen);
29023 29016 return (rval);
29024 29017 }
29025 29018 sense_mhp = (struct mode_header_grp2 *)sense;
29026 29019 select_mhp = (struct mode_header_grp2 *)select;
29027 29020 bd_len = (sense_mhp->bdesc_length_hi << 8) |
29028 29021 sense_mhp->bdesc_length_lo;
29029 29022 if (bd_len > MODE_BLK_DESC_LENGTH) {
29030 29023 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29031 29024 "sr_volume_ctrl: Mode Sense returned invalid "
29032 29025 "block descriptor length\n");
29033 29026 kmem_free(sense, sense_buflen);
29034 29027 kmem_free(select, select_buflen);
29035 29028 return (EIO);
29036 29029 }
29037 29030 sense_page = (uchar_t *)
29038 29031 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len);
29039 29032 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2);
29040 29033 select_mhp->length_msb = 0;
29041 29034 select_mhp->length_lsb = 0;
29042 29035 select_mhp->bdesc_length_hi = 0;
29043 29036 select_mhp->bdesc_length_lo = 0;
29044 29037 } else {
29045 29038 struct mode_header *sense_mhp, *select_mhp;
29046 29039
29047 29040 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN;
29048 29041 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN;
29049 29042 sense = kmem_zalloc(sense_buflen, KM_SLEEP);
29050 29043 select = kmem_zalloc(select_buflen, KM_SLEEP);
29051 29044 ssc = sd_ssc_init(un);
29052 29045 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense,
29053 29046 sense_buflen, MODEPAGE_AUDIO_CTRL,
29054 29047 SD_PATH_STANDARD);
29055 29048 sd_ssc_fini(ssc);
29056 29049
29057 29050 if (rval != 0) {
29058 29051 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29059 29052 "sr_volume_ctrl: Mode Sense Failed\n");
29060 29053 kmem_free(sense, sense_buflen);
29061 29054 kmem_free(select, select_buflen);
29062 29055 return (rval);
29063 29056 }
29064 29057 sense_mhp = (struct mode_header *)sense;
29065 29058 select_mhp = (struct mode_header *)select;
29066 29059 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) {
29067 29060 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29068 29061 "sr_volume_ctrl: Mode Sense returned invalid "
29069 29062 "block descriptor length\n");
29070 29063 kmem_free(sense, sense_buflen);
29071 29064 kmem_free(select, select_buflen);
29072 29065 return (EIO);
29073 29066 }
29074 29067 sense_page = (uchar_t *)
29075 29068 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length);
29076 29069 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH);
29077 29070 select_mhp->length = 0;
29078 29071 select_mhp->bdesc_length = 0;
29079 29072 }
29080 29073 /*
29081 29074 * Note: An audio control data structure could be created and overlayed
29082 29075 * on the following in place of the array indexing method implemented.
29083 29076 */
29084 29077
29085 29078 /* Build the select data for the user volume data */
29086 29079 select_page[0] = MODEPAGE_AUDIO_CTRL;
29087 29080 select_page[1] = 0xE;
29088 29081 /* Set the immediate bit */
29089 29082 select_page[2] = 0x04;
29090 29083 /* Zero out reserved fields */
29091 29084 select_page[3] = 0x00;
29092 29085 select_page[4] = 0x00;
29093 29086 /* Return sense data for fields not to be modified */
29094 29087 select_page[5] = sense_page[5];
29095 29088 select_page[6] = sense_page[6];
29096 29089 select_page[7] = sense_page[7];
29097 29090 /* Set the user specified volume levels for channel 0 and 1 */
29098 29091 select_page[8] = 0x01;
29099 29092 select_page[9] = vol->channel0;
29100 29093 select_page[10] = 0x02;
29101 29094 select_page[11] = vol->channel1;
29102 29095 /* Channel 2 and 3 are currently unsupported so return the sense data */
29103 29096 select_page[12] = sense_page[12];
29104 29097 select_page[13] = sense_page[13];
29105 29098 select_page[14] = sense_page[14];
29106 29099 select_page[15] = sense_page[15];
29107 29100
29108 29101 ssc = sd_ssc_init(un);
29109 29102 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) {
29110 29103 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select,
29111 29104 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
29112 29105 } else {
29113 29106 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select,
29114 29107 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
29115 29108 }
29116 29109 sd_ssc_fini(ssc);
29117 29110
29118 29111 kmem_free(sense, sense_buflen);
29119 29112 kmem_free(select, select_buflen);
29120 29113 return (rval);
29121 29114 }
29122 29115
29123 29116
29124 29117 /*
29125 29118 * Function: sr_read_sony_session_offset()
29126 29119 *
29127 29120 * Description: This routine is the driver entry point for handling CD-ROM
29128 29121 * ioctl requests for session offset information. (CDROMREADOFFSET)
29129 29122 * The address of the first track in the last session of a
29130 29123 * multi-session CD-ROM is returned
29131 29124 *
29132 29125 * Note: This routine uses a vendor specific key value in the
29133 29126 * command control field without implementing any vendor check here
29134 29127 * or in the ioctl routine.
29135 29128 *
29136 29129 * Arguments: dev - the device 'dev_t'
29137 29130 * data - pointer to an int to hold the requested address
29138 29131 * flag - this argument is a pass through to ddi_copyxxx()
29139 29132 * directly from the mode argument of ioctl().
29140 29133 *
29141 29134 * Return Code: the code returned by sd_send_scsi_cmd()
29142 29135 * EFAULT if ddi_copyxxx() fails
29143 29136 * ENXIO if fail ddi_get_soft_state
29144 29137 * EINVAL if data pointer is NULL
29145 29138 */
29146 29139
29147 29140 static int
29148 29141 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag)
29149 29142 {
29150 29143 struct sd_lun *un;
29151 29144 struct uscsi_cmd *com;
29152 29145 caddr_t buffer;
29153 29146 char cdb[CDB_GROUP1];
29154 29147 int session_offset = 0;
29155 29148 int rval;
29156 29149
29157 29150 if (data == NULL) {
29158 29151 return (EINVAL);
29159 29152 }
29160 29153
29161 29154 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
29162 29155 (un->un_state == SD_STATE_OFFLINE)) {
29163 29156 return (ENXIO);
29164 29157 }
29165 29158
29166 29159 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP);
29167 29160 bzero(cdb, CDB_GROUP1);
29168 29161 cdb[0] = SCMD_READ_TOC;
29169 29162 /*
29170 29163 * Bytes 7 & 8 are the 12 byte allocation length for a single entry.
29171 29164 * (4 byte TOC response header + 8 byte response data)
29172 29165 */
29173 29166 cdb[8] = SONY_SESSION_OFFSET_LEN;
29174 29167 /* Byte 9 is the control byte. A vendor specific value is used */
29175 29168 cdb[9] = SONY_SESSION_OFFSET_KEY;
29176 29169 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
29177 29170 com->uscsi_cdb = cdb;
29178 29171 com->uscsi_cdblen = CDB_GROUP1;
29179 29172 com->uscsi_bufaddr = buffer;
29180 29173 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN;
29181 29174 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
29182 29175
29183 29176 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
29184 29177 SD_PATH_STANDARD);
29185 29178 if (rval != 0) {
29186 29179 kmem_free(buffer, SONY_SESSION_OFFSET_LEN);
29187 29180 kmem_free(com, sizeof (*com));
29188 29181 return (rval);
29189 29182 }
29190 29183 if (buffer[1] == SONY_SESSION_OFFSET_VALID) {
29191 29184 session_offset =
29192 29185 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) +
29193 29186 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]);
29194 29187 /*
29195 29188 * Offset returned offset in current lbasize block's. Convert to
29196 29189 * 2k block's to return to the user
29197 29190 */
29198 29191 if (un->un_tgt_blocksize == CDROM_BLK_512) {
29199 29192 session_offset >>= 2;
29200 29193 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) {
29201 29194 session_offset >>= 1;
29202 29195 }
29203 29196 }
29204 29197
29205 29198 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) {
29206 29199 rval = EFAULT;
29207 29200 }
29208 29201
29209 29202 kmem_free(buffer, SONY_SESSION_OFFSET_LEN);
29210 29203 kmem_free(com, sizeof (*com));
29211 29204 return (rval);
29212 29205 }
29213 29206
29214 29207
29215 29208 /*
29216 29209 * Function: sd_wm_cache_constructor()
29217 29210 *
29218 29211 * Description: Cache Constructor for the wmap cache for the read/modify/write
29219 29212 * devices.
29220 29213 *
29221 29214 * Arguments: wm - A pointer to the sd_w_map to be initialized.
29222 29215 * un - sd_lun structure for the device.
29223 29216 * flag - the km flags passed to constructor
29224 29217 *
29225 29218 * Return Code: 0 on success.
29226 29219 * -1 on failure.
29227 29220 */
29228 29221
29229 29222 /*ARGSUSED*/
29230 29223 static int
29231 29224 sd_wm_cache_constructor(void *wm, void *un, int flags)
29232 29225 {
29233 29226 bzero(wm, sizeof (struct sd_w_map));
29234 29227 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL);
29235 29228 return (0);
29236 29229 }
29237 29230
29238 29231
29239 29232 /*
29240 29233 * Function: sd_wm_cache_destructor()
29241 29234 *
29242 29235 * Description: Cache destructor for the wmap cache for the read/modify/write
29243 29236 * devices.
29244 29237 *
29245 29238 * Arguments: wm - A pointer to the sd_w_map to be initialized.
29246 29239 * un - sd_lun structure for the device.
29247 29240 */
29248 29241 /*ARGSUSED*/
29249 29242 static void
29250 29243 sd_wm_cache_destructor(void *wm, void *un)
29251 29244 {
29252 29245 cv_destroy(&((struct sd_w_map *)wm)->wm_avail);
29253 29246 }
29254 29247
29255 29248
29256 29249 /*
29257 29250 * Function: sd_range_lock()
29258 29251 *
29259 29252 * Description: Lock the range of blocks specified as parameter to ensure
29260 29253 * that read, modify write is atomic and no other i/o writes
29261 29254 * to the same location. The range is specified in terms
29262 29255 * of start and end blocks. Block numbers are the actual
29263 29256 * media block numbers and not system.
29264 29257 *
29265 29258 * Arguments: un - sd_lun structure for the device.
29266 29259 * startb - The starting block number
29267 29260 * endb - The end block number
29268 29261 * typ - type of i/o - simple/read_modify_write
29269 29262 *
29270 29263 * Return Code: wm - pointer to the wmap structure.
29271 29264 *
29272 29265 * Context: This routine can sleep.
29273 29266 */
29274 29267
29275 29268 static struct sd_w_map *
29276 29269 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ)
29277 29270 {
29278 29271 struct sd_w_map *wmp = NULL;
29279 29272 struct sd_w_map *sl_wmp = NULL;
29280 29273 struct sd_w_map *tmp_wmp;
29281 29274 wm_state state = SD_WM_CHK_LIST;
29282 29275
29283 29276
29284 29277 ASSERT(un != NULL);
29285 29278 ASSERT(!mutex_owned(SD_MUTEX(un)));
29286 29279
29287 29280 mutex_enter(SD_MUTEX(un));
29288 29281
29289 29282 while (state != SD_WM_DONE) {
29290 29283
29291 29284 switch (state) {
29292 29285 case SD_WM_CHK_LIST:
29293 29286 /*
29294 29287 * This is the starting state. Check the wmap list
29295 29288 * to see if the range is currently available.
29296 29289 */
29297 29290 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) {
29298 29291 /*
29299 29292 * If this is a simple write and no rmw
29300 29293 * i/o is pending then try to lock the
29301 29294 * range as the range should be available.
29302 29295 */
29303 29296 state = SD_WM_LOCK_RANGE;
29304 29297 } else {
29305 29298 tmp_wmp = sd_get_range(un, startb, endb);
29306 29299 if (tmp_wmp != NULL) {
29307 29300 if ((wmp != NULL) && ONLIST(un, wmp)) {
29308 29301 /*
29309 29302 * Should not keep onlist wmps
29310 29303 * while waiting this macro
29311 29304 * will also do wmp = NULL;
29312 29305 */
29313 29306 FREE_ONLIST_WMAP(un, wmp);
29314 29307 }
29315 29308 /*
29316 29309 * sl_wmp is the wmap on which wait
29317 29310 * is done, since the tmp_wmp points
29318 29311 * to the inuse wmap, set sl_wmp to
29319 29312 * tmp_wmp and change the state to sleep
29320 29313 */
29321 29314 sl_wmp = tmp_wmp;
29322 29315 state = SD_WM_WAIT_MAP;
29323 29316 } else {
29324 29317 state = SD_WM_LOCK_RANGE;
29325 29318 }
29326 29319
29327 29320 }
29328 29321 break;
29329 29322
29330 29323 case SD_WM_LOCK_RANGE:
29331 29324 ASSERT(un->un_wm_cache);
29332 29325 /*
29333 29326 * The range need to be locked, try to get a wmap.
29334 29327 * First attempt it with NO_SLEEP, want to avoid a sleep
29335 29328 * if possible as we will have to release the sd mutex
29336 29329 * if we have to sleep.
29337 29330 */
29338 29331 if (wmp == NULL)
29339 29332 wmp = kmem_cache_alloc(un->un_wm_cache,
29340 29333 KM_NOSLEEP);
29341 29334 if (wmp == NULL) {
29342 29335 mutex_exit(SD_MUTEX(un));
29343 29336 _NOTE(DATA_READABLE_WITHOUT_LOCK
29344 29337 (sd_lun::un_wm_cache))
29345 29338 wmp = kmem_cache_alloc(un->un_wm_cache,
29346 29339 KM_SLEEP);
29347 29340 mutex_enter(SD_MUTEX(un));
29348 29341 /*
29349 29342 * we released the mutex so recheck and go to
29350 29343 * check list state.
29351 29344 */
29352 29345 state = SD_WM_CHK_LIST;
29353 29346 } else {
29354 29347 /*
29355 29348 * We exit out of state machine since we
29356 29349 * have the wmap. Do the housekeeping first.
29357 29350 * place the wmap on the wmap list if it is not
29358 29351 * on it already and then set the state to done.
29359 29352 */
29360 29353 wmp->wm_start = startb;
29361 29354 wmp->wm_end = endb;
29362 29355 wmp->wm_flags = typ | SD_WM_BUSY;
29363 29356 if (typ & SD_WTYPE_RMW) {
29364 29357 un->un_rmw_count++;
29365 29358 }
29366 29359 /*
29367 29360 * If not already on the list then link
29368 29361 */
29369 29362 if (!ONLIST(un, wmp)) {
29370 29363 wmp->wm_next = un->un_wm;
29371 29364 wmp->wm_prev = NULL;
29372 29365 if (wmp->wm_next)
29373 29366 wmp->wm_next->wm_prev = wmp;
29374 29367 un->un_wm = wmp;
29375 29368 }
29376 29369 state = SD_WM_DONE;
29377 29370 }
29378 29371 break;
29379 29372
29380 29373 case SD_WM_WAIT_MAP:
29381 29374 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY);
29382 29375 /*
29383 29376 * Wait is done on sl_wmp, which is set in the
29384 29377 * check_list state.
29385 29378 */
29386 29379 sl_wmp->wm_wanted_count++;
29387 29380 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un));
29388 29381 sl_wmp->wm_wanted_count--;
29389 29382 /*
29390 29383 * We can reuse the memory from the completed sl_wmp
29391 29384 * lock range for our new lock, but only if noone is
29392 29385 * waiting for it.
29393 29386 */
29394 29387 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY));
29395 29388 if (sl_wmp->wm_wanted_count == 0) {
29396 29389 if (wmp != NULL) {
29397 29390 CHK_N_FREEWMP(un, wmp);
29398 29391 }
29399 29392 wmp = sl_wmp;
29400 29393 }
29401 29394 sl_wmp = NULL;
29402 29395 /*
29403 29396 * After waking up, need to recheck for availability of
29404 29397 * range.
29405 29398 */
29406 29399 state = SD_WM_CHK_LIST;
29407 29400 break;
29408 29401
29409 29402 default:
29410 29403 panic("sd_range_lock: "
29411 29404 "Unknown state %d in sd_range_lock", state);
29412 29405 /*NOTREACHED*/
29413 29406 } /* switch(state) */
29414 29407
29415 29408 } /* while(state != SD_WM_DONE) */
29416 29409
29417 29410 mutex_exit(SD_MUTEX(un));
29418 29411
29419 29412 ASSERT(wmp != NULL);
29420 29413
29421 29414 return (wmp);
29422 29415 }
29423 29416
29424 29417
29425 29418 /*
29426 29419 * Function: sd_get_range()
29427 29420 *
29428 29421 * Description: Find if there any overlapping I/O to this one
29429 29422 * Returns the write-map of 1st such I/O, NULL otherwise.
29430 29423 *
29431 29424 * Arguments: un - sd_lun structure for the device.
29432 29425 * startb - The starting block number
29433 29426 * endb - The end block number
29434 29427 *
29435 29428 * Return Code: wm - pointer to the wmap structure.
29436 29429 */
29437 29430
29438 29431 static struct sd_w_map *
29439 29432 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb)
29440 29433 {
29441 29434 struct sd_w_map *wmp;
29442 29435
29443 29436 ASSERT(un != NULL);
29444 29437
29445 29438 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) {
29446 29439 if (!(wmp->wm_flags & SD_WM_BUSY)) {
29447 29440 continue;
29448 29441 }
29449 29442 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) {
29450 29443 break;
29451 29444 }
29452 29445 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) {
29453 29446 break;
29454 29447 }
29455 29448 }
29456 29449
29457 29450 return (wmp);
29458 29451 }
29459 29452
29460 29453
29461 29454 /*
29462 29455 * Function: sd_free_inlist_wmap()
29463 29456 *
29464 29457 * Description: Unlink and free a write map struct.
29465 29458 *
29466 29459 * Arguments: un - sd_lun structure for the device.
29467 29460 * wmp - sd_w_map which needs to be unlinked.
29468 29461 */
29469 29462
29470 29463 static void
29471 29464 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp)
29472 29465 {
29473 29466 ASSERT(un != NULL);
29474 29467
29475 29468 if (un->un_wm == wmp) {
29476 29469 un->un_wm = wmp->wm_next;
29477 29470 } else {
29478 29471 wmp->wm_prev->wm_next = wmp->wm_next;
29479 29472 }
29480 29473
29481 29474 if (wmp->wm_next) {
29482 29475 wmp->wm_next->wm_prev = wmp->wm_prev;
29483 29476 }
29484 29477
29485 29478 wmp->wm_next = wmp->wm_prev = NULL;
29486 29479
29487 29480 kmem_cache_free(un->un_wm_cache, wmp);
29488 29481 }
29489 29482
29490 29483
29491 29484 /*
29492 29485 * Function: sd_range_unlock()
29493 29486 *
29494 29487 * Description: Unlock the range locked by wm.
29495 29488 * Free write map if nobody else is waiting on it.
29496 29489 *
29497 29490 * Arguments: un - sd_lun structure for the device.
29498 29491 * wmp - sd_w_map which needs to be unlinked.
29499 29492 */
29500 29493
29501 29494 static void
29502 29495 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm)
29503 29496 {
29504 29497 ASSERT(un != NULL);
29505 29498 ASSERT(wm != NULL);
29506 29499 ASSERT(!mutex_owned(SD_MUTEX(un)));
29507 29500
29508 29501 mutex_enter(SD_MUTEX(un));
29509 29502
29510 29503 if (wm->wm_flags & SD_WTYPE_RMW) {
29511 29504 un->un_rmw_count--;
29512 29505 }
29513 29506
29514 29507 if (wm->wm_wanted_count) {
29515 29508 wm->wm_flags = 0;
29516 29509 /*
29517 29510 * Broadcast that the wmap is available now.
29518 29511 */
29519 29512 cv_broadcast(&wm->wm_avail);
29520 29513 } else {
29521 29514 /*
29522 29515 * If no one is waiting on the map, it should be free'ed.
29523 29516 */
29524 29517 sd_free_inlist_wmap(un, wm);
29525 29518 }
29526 29519
29527 29520 mutex_exit(SD_MUTEX(un));
29528 29521 }
29529 29522
29530 29523
29531 29524 /*
29532 29525 * Function: sd_read_modify_write_task
29533 29526 *
29534 29527 * Description: Called from a taskq thread to initiate the write phase of
29535 29528 * a read-modify-write request. This is used for targets where
29536 29529 * un->un_sys_blocksize != un->un_tgt_blocksize.
29537 29530 *
29538 29531 * Arguments: arg - a pointer to the buf(9S) struct for the write command.
29539 29532 *
29540 29533 * Context: Called under taskq thread context.
29541 29534 */
29542 29535
29543 29536 static void
29544 29537 sd_read_modify_write_task(void *arg)
29545 29538 {
29546 29539 struct sd_mapblocksize_info *bsp;
29547 29540 struct buf *bp;
29548 29541 struct sd_xbuf *xp;
29549 29542 struct sd_lun *un;
29550 29543
29551 29544 bp = arg; /* The bp is given in arg */
29552 29545 ASSERT(bp != NULL);
29553 29546
29554 29547 /* Get the pointer to the layer-private data struct */
29555 29548 xp = SD_GET_XBUF(bp);
29556 29549 ASSERT(xp != NULL);
29557 29550 bsp = xp->xb_private;
29558 29551 ASSERT(bsp != NULL);
29559 29552
29560 29553 un = SD_GET_UN(bp);
29561 29554 ASSERT(un != NULL);
29562 29555 ASSERT(!mutex_owned(SD_MUTEX(un)));
29563 29556
29564 29557 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
29565 29558 "sd_read_modify_write_task: entry: buf:0x%p\n", bp);
29566 29559
29567 29560 /*
29568 29561 * This is the write phase of a read-modify-write request, called
29569 29562 * under the context of a taskq thread in response to the completion
29570 29563 * of the read portion of the rmw request completing under interrupt
29571 29564 * context. The write request must be sent from here down the iostart
29572 29565 * chain as if it were being sent from sd_mapblocksize_iostart(), so
29573 29566 * we use the layer index saved in the layer-private data area.
29574 29567 */
29575 29568 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp);
29576 29569
29577 29570 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
29578 29571 "sd_read_modify_write_task: exit: buf:0x%p\n", bp);
29579 29572 }
29580 29573
29581 29574
29582 29575 /*
29583 29576 * Function: sddump_do_read_of_rmw()
29584 29577 *
29585 29578 * Description: This routine will be called from sddump, If sddump is called
29586 29579 * with an I/O which not aligned on device blocksize boundary
29587 29580 * then the write has to be converted to read-modify-write.
29588 29581 * Do the read part here in order to keep sddump simple.
29589 29582 * Note - That the sd_mutex is held across the call to this
29590 29583 * routine.
29591 29584 *
29592 29585 * Arguments: un - sd_lun
29593 29586 * blkno - block number in terms of media block size.
29594 29587 * nblk - number of blocks.
29595 29588 * bpp - pointer to pointer to the buf structure. On return
29596 29589 * from this function, *bpp points to the valid buffer
29597 29590 * to which the write has to be done.
29598 29591 *
29599 29592 * Return Code: 0 for success or errno-type return code
29600 29593 */
29601 29594
29602 29595 static int
29603 29596 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk,
29604 29597 struct buf **bpp)
29605 29598 {
29606 29599 int err;
29607 29600 int i;
29608 29601 int rval;
29609 29602 struct buf *bp;
29610 29603 struct scsi_pkt *pkt = NULL;
29611 29604 uint32_t target_blocksize;
29612 29605
29613 29606 ASSERT(un != NULL);
29614 29607 ASSERT(mutex_owned(SD_MUTEX(un)));
29615 29608
29616 29609 target_blocksize = un->un_tgt_blocksize;
29617 29610
29618 29611 mutex_exit(SD_MUTEX(un));
29619 29612
29620 29613 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL,
29621 29614 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL);
29622 29615 if (bp == NULL) {
29623 29616 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29624 29617 "no resources for dumping; giving up");
29625 29618 err = ENOMEM;
29626 29619 goto done;
29627 29620 }
29628 29621
29629 29622 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL,
29630 29623 blkno, nblk);
29631 29624 if (rval != 0) {
29632 29625 scsi_free_consistent_buf(bp);
29633 29626 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29634 29627 "no resources for dumping; giving up");
29635 29628 err = ENOMEM;
29636 29629 goto done;
29637 29630 }
29638 29631
29639 29632 pkt->pkt_flags |= FLAG_NOINTR;
29640 29633
29641 29634 err = EIO;
29642 29635 for (i = 0; i < SD_NDUMP_RETRIES; i++) {
29643 29636
29644 29637 /*
29645 29638 * Scsi_poll returns 0 (success) if the command completes and
29646 29639 * the status block is STATUS_GOOD. We should only check
29647 29640 * errors if this condition is not true. Even then we should
29648 29641 * send our own request sense packet only if we have a check
29649 29642 * condition and auto request sense has not been performed by
29650 29643 * the hba.
29651 29644 */
29652 29645 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n");
29653 29646
29654 29647 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) {
29655 29648 err = 0;
29656 29649 break;
29657 29650 }
29658 29651
29659 29652 /*
29660 29653 * Check CMD_DEV_GONE 1st, give up if device is gone,
29661 29654 * no need to read RQS data.
29662 29655 */
29663 29656 if (pkt->pkt_reason == CMD_DEV_GONE) {
29664 29657 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29665 29658 "Error while dumping state with rmw..."
29666 29659 "Device is gone\n");
29667 29660 break;
29668 29661 }
29669 29662
29670 29663 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) {
29671 29664 SD_INFO(SD_LOG_DUMP, un,
29672 29665 "sddump: read failed with CHECK, try # %d\n", i);
29673 29666 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) {
29674 29667 (void) sd_send_polled_RQS(un);
29675 29668 }
29676 29669
29677 29670 continue;
29678 29671 }
29679 29672
29680 29673 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) {
29681 29674 int reset_retval = 0;
29682 29675
29683 29676 SD_INFO(SD_LOG_DUMP, un,
29684 29677 "sddump: read failed with BUSY, try # %d\n", i);
29685 29678
29686 29679 if (un->un_f_lun_reset_enabled == TRUE) {
29687 29680 reset_retval = scsi_reset(SD_ADDRESS(un),
29688 29681 RESET_LUN);
29689 29682 }
29690 29683 if (reset_retval == 0) {
29691 29684 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET);
29692 29685 }
29693 29686 (void) sd_send_polled_RQS(un);
29694 29687
29695 29688 } else {
29696 29689 SD_INFO(SD_LOG_DUMP, un,
29697 29690 "sddump: read failed with 0x%x, try # %d\n",
29698 29691 SD_GET_PKT_STATUS(pkt), i);
29699 29692 mutex_enter(SD_MUTEX(un));
29700 29693 sd_reset_target(un, pkt);
29701 29694 mutex_exit(SD_MUTEX(un));
29702 29695 }
29703 29696
29704 29697 /*
29705 29698 * If we are not getting anywhere with lun/target resets,
29706 29699 * let's reset the bus.
29707 29700 */
29708 29701 if (i > SD_NDUMP_RETRIES/2) {
29709 29702 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
29710 29703 (void) sd_send_polled_RQS(un);
29711 29704 }
29712 29705
29713 29706 }
29714 29707 scsi_destroy_pkt(pkt);
29715 29708
29716 29709 if (err != 0) {
29717 29710 scsi_free_consistent_buf(bp);
29718 29711 *bpp = NULL;
29719 29712 } else {
29720 29713 *bpp = bp;
29721 29714 }
29722 29715
29723 29716 done:
29724 29717 mutex_enter(SD_MUTEX(un));
29725 29718 return (err);
29726 29719 }
29727 29720
29728 29721
29729 29722 /*
29730 29723 * Function: sd_failfast_flushq
29731 29724 *
29732 29725 * Description: Take all bp's on the wait queue that have B_FAILFAST set
29733 29726 * in b_flags and move them onto the failfast queue, then kick
29734 29727 * off a thread to return all bp's on the failfast queue to
29735 29728 * their owners with an error set.
29736 29729 *
29737 29730 * Arguments: un - pointer to the soft state struct for the instance.
29738 29731 *
29739 29732 * Context: may execute in interrupt context.
29740 29733 */
29741 29734
29742 29735 static void
29743 29736 sd_failfast_flushq(struct sd_lun *un)
29744 29737 {
29745 29738 struct buf *bp;
29746 29739 struct buf *next_waitq_bp;
29747 29740 struct buf *prev_waitq_bp = NULL;
29748 29741
29749 29742 ASSERT(un != NULL);
29750 29743 ASSERT(mutex_owned(SD_MUTEX(un)));
29751 29744 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE);
29752 29745 ASSERT(un->un_failfast_bp == NULL);
29753 29746
29754 29747 SD_TRACE(SD_LOG_IO_FAILFAST, un,
29755 29748 "sd_failfast_flushq: entry: un:0x%p\n", un);
29756 29749
29757 29750 /*
29758 29751 * Check if we should flush all bufs when entering failfast state, or
29759 29752 * just those with B_FAILFAST set.
29760 29753 */
29761 29754 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) {
29762 29755 /*
29763 29756 * Move *all* bp's on the wait queue to the failfast flush
29764 29757 * queue, including those that do NOT have B_FAILFAST set.
29765 29758 */
29766 29759 if (un->un_failfast_headp == NULL) {
29767 29760 ASSERT(un->un_failfast_tailp == NULL);
29768 29761 un->un_failfast_headp = un->un_waitq_headp;
29769 29762 } else {
29770 29763 ASSERT(un->un_failfast_tailp != NULL);
29771 29764 un->un_failfast_tailp->av_forw = un->un_waitq_headp;
29772 29765 }
29773 29766
29774 29767 un->un_failfast_tailp = un->un_waitq_tailp;
29775 29768
29776 29769 /* update kstat for each bp moved out of the waitq */
29777 29770 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) {
29778 29771 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp);
29779 29772 }
29780 29773
29781 29774 /* empty the waitq */
29782 29775 un->un_waitq_headp = un->un_waitq_tailp = NULL;
29783 29776
29784 29777 } else {
29785 29778 /*
29786 29779 * Go thru the wait queue, pick off all entries with
29787 29780 * B_FAILFAST set, and move these onto the failfast queue.
29788 29781 */
29789 29782 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) {
29790 29783 /*
29791 29784 * Save the pointer to the next bp on the wait queue,
29792 29785 * so we get to it on the next iteration of this loop.
29793 29786 */
29794 29787 next_waitq_bp = bp->av_forw;
29795 29788
29796 29789 /*
29797 29790 * If this bp from the wait queue does NOT have
29798 29791 * B_FAILFAST set, just move on to the next element
29799 29792 * in the wait queue. Note, this is the only place
29800 29793 * where it is correct to set prev_waitq_bp.
29801 29794 */
29802 29795 if ((bp->b_flags & B_FAILFAST) == 0) {
29803 29796 prev_waitq_bp = bp;
29804 29797 continue;
29805 29798 }
29806 29799
29807 29800 /*
29808 29801 * Remove the bp from the wait queue.
29809 29802 */
29810 29803 if (bp == un->un_waitq_headp) {
29811 29804 /* The bp is the first element of the waitq. */
29812 29805 un->un_waitq_headp = next_waitq_bp;
29813 29806 if (un->un_waitq_headp == NULL) {
29814 29807 /* The wait queue is now empty */
29815 29808 un->un_waitq_tailp = NULL;
29816 29809 }
29817 29810 } else {
29818 29811 /*
29819 29812 * The bp is either somewhere in the middle
29820 29813 * or at the end of the wait queue.
29821 29814 */
29822 29815 ASSERT(un->un_waitq_headp != NULL);
29823 29816 ASSERT(prev_waitq_bp != NULL);
29824 29817 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST)
29825 29818 == 0);
29826 29819 if (bp == un->un_waitq_tailp) {
29827 29820 /* bp is the last entry on the waitq. */
29828 29821 ASSERT(next_waitq_bp == NULL);
29829 29822 un->un_waitq_tailp = prev_waitq_bp;
29830 29823 }
29831 29824 prev_waitq_bp->av_forw = next_waitq_bp;
29832 29825 }
29833 29826 bp->av_forw = NULL;
29834 29827
29835 29828 /*
29836 29829 * update kstat since the bp is moved out of
29837 29830 * the waitq
29838 29831 */
29839 29832 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp);
29840 29833
29841 29834 /*
29842 29835 * Now put the bp onto the failfast queue.
29843 29836 */
29844 29837 if (un->un_failfast_headp == NULL) {
29845 29838 /* failfast queue is currently empty */
29846 29839 ASSERT(un->un_failfast_tailp == NULL);
29847 29840 un->un_failfast_headp =
29848 29841 un->un_failfast_tailp = bp;
29849 29842 } else {
29850 29843 /* Add the bp to the end of the failfast q */
29851 29844 ASSERT(un->un_failfast_tailp != NULL);
29852 29845 ASSERT(un->un_failfast_tailp->b_flags &
29853 29846 B_FAILFAST);
29854 29847 un->un_failfast_tailp->av_forw = bp;
29855 29848 un->un_failfast_tailp = bp;
29856 29849 }
29857 29850 }
29858 29851 }
29859 29852
29860 29853 /*
29861 29854 * Now return all bp's on the failfast queue to their owners.
29862 29855 */
29863 29856 while ((bp = un->un_failfast_headp) != NULL) {
29864 29857
29865 29858 un->un_failfast_headp = bp->av_forw;
29866 29859 if (un->un_failfast_headp == NULL) {
29867 29860 un->un_failfast_tailp = NULL;
29868 29861 }
29869 29862
29870 29863 /*
29871 29864 * We want to return the bp with a failure error code, but
29872 29865 * we do not want a call to sd_start_cmds() to occur here,
29873 29866 * so use sd_return_failed_command_no_restart() instead of
29874 29867 * sd_return_failed_command().
29875 29868 */
29876 29869 sd_return_failed_command_no_restart(un, bp, EIO);
29877 29870 }
29878 29871
29879 29872 /* Flush the xbuf queues if required. */
29880 29873 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) {
29881 29874 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback);
29882 29875 }
29883 29876
29884 29877 SD_TRACE(SD_LOG_IO_FAILFAST, un,
29885 29878 "sd_failfast_flushq: exit: un:0x%p\n", un);
29886 29879 }
29887 29880
29888 29881
29889 29882 /*
29890 29883 * Function: sd_failfast_flushq_callback
29891 29884 *
29892 29885 * Description: Return TRUE if the given bp meets the criteria for failfast
29893 29886 * flushing. Used with ddi_xbuf_flushq(9F).
29894 29887 *
29895 29888 * Arguments: bp - ptr to buf struct to be examined.
29896 29889 *
29897 29890 * Context: Any
29898 29891 */
29899 29892
29900 29893 static int
29901 29894 sd_failfast_flushq_callback(struct buf *bp)
29902 29895 {
29903 29896 /*
29904 29897 * Return TRUE if (1) we want to flush ALL bufs when the failfast
29905 29898 * state is entered; OR (2) the given bp has B_FAILFAST set.
29906 29899 */
29907 29900 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) ||
29908 29901 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE);
29909 29902 }
29910 29903
29911 29904
29912 29905
29913 29906 /*
29914 29907 * Function: sd_setup_next_xfer
29915 29908 *
29916 29909 * Description: Prepare next I/O operation using DMA_PARTIAL
29917 29910 *
29918 29911 */
29919 29912
29920 29913 static int
29921 29914 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp,
29922 29915 struct scsi_pkt *pkt, struct sd_xbuf *xp)
29923 29916 {
29924 29917 ssize_t num_blks_not_xfered;
29925 29918 daddr_t strt_blk_num;
29926 29919 ssize_t bytes_not_xfered;
29927 29920 int rval;
29928 29921
29929 29922 ASSERT(pkt->pkt_resid == 0);
29930 29923
29931 29924 /*
29932 29925 * Calculate next block number and amount to be transferred.
29933 29926 *
29934 29927 * How much data NOT transfered to the HBA yet.
29935 29928 */
29936 29929 bytes_not_xfered = xp->xb_dma_resid;
29937 29930
29938 29931 /*
29939 29932 * figure how many blocks NOT transfered to the HBA yet.
29940 29933 */
29941 29934 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered);
29942 29935
29943 29936 /*
29944 29937 * set starting block number to the end of what WAS transfered.
29945 29938 */
29946 29939 strt_blk_num = xp->xb_blkno +
29947 29940 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered);
29948 29941
29949 29942 /*
29950 29943 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt
29951 29944 * will call scsi_initpkt with NULL_FUNC so we do not have to release
29952 29945 * the disk mutex here.
29953 29946 */
29954 29947 rval = sd_setup_next_rw_pkt(un, pkt, bp,
29955 29948 strt_blk_num, num_blks_not_xfered);
29956 29949
29957 29950 if (rval == 0) {
29958 29951
29959 29952 /*
29960 29953 * Success.
29961 29954 *
29962 29955 * Adjust things if there are still more blocks to be
29963 29956 * transfered.
29964 29957 */
29965 29958 xp->xb_dma_resid = pkt->pkt_resid;
29966 29959 pkt->pkt_resid = 0;
29967 29960
29968 29961 return (1);
29969 29962 }
29970 29963
29971 29964 /*
29972 29965 * There's really only one possible return value from
29973 29966 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt
29974 29967 * returns NULL.
29975 29968 */
29976 29969 ASSERT(rval == SD_PKT_ALLOC_FAILURE);
29977 29970
29978 29971 bp->b_resid = bp->b_bcount;
29979 29972 bp->b_flags |= B_ERROR;
29980 29973
29981 29974 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29982 29975 "Error setting up next portion of DMA transfer\n");
29983 29976
29984 29977 return (0);
29985 29978 }
29986 29979
29987 29980 /*
29988 29981 * Function: sd_panic_for_res_conflict
29989 29982 *
29990 29983 * Description: Call panic with a string formatted with "Reservation Conflict"
29991 29984 * and a human readable identifier indicating the SD instance
29992 29985 * that experienced the reservation conflict.
29993 29986 *
29994 29987 * Arguments: un - pointer to the soft state struct for the instance.
29995 29988 *
29996 29989 * Context: may execute in interrupt context.
29997 29990 */
29998 29991
29999 29992 #define SD_RESV_CONFLICT_FMT_LEN 40
30000 29993 void
30001 29994 sd_panic_for_res_conflict(struct sd_lun *un)
30002 29995 {
30003 29996 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN];
30004 29997 char path_str[MAXPATHLEN];
30005 29998
30006 29999 (void) snprintf(panic_str, sizeof (panic_str),
30007 30000 "Reservation Conflict\nDisk: %s",
30008 30001 ddi_pathname(SD_DEVINFO(un), path_str));
30009 30002
30010 30003 panic(panic_str);
30011 30004 }
30012 30005
30013 30006 /*
30014 30007 * Note: The following sd_faultinjection_ioctl( ) routines implement
30015 30008 * driver support for handling fault injection for error analysis
30016 30009 * causing faults in multiple layers of the driver.
30017 30010 *
30018 30011 */
30019 30012
30020 30013 #ifdef SD_FAULT_INJECTION
30021 30014 static uint_t sd_fault_injection_on = 0;
30022 30015
30023 30016 /*
30024 30017 * Function: sd_faultinjection_ioctl()
30025 30018 *
30026 30019 * Description: This routine is the driver entry point for handling
30027 30020 * faultinjection ioctls to inject errors into the
30028 30021 * layer model
30029 30022 *
30030 30023 * Arguments: cmd - the ioctl cmd received
30031 30024 * arg - the arguments from user and returns
30032 30025 */
30033 30026
30034 30027 static void
30035 30028 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un)
30036 30029 {
30037 30030 uint_t i = 0;
30038 30031 uint_t rval;
30039 30032
30040 30033 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n");
30041 30034
30042 30035 mutex_enter(SD_MUTEX(un));
30043 30036
30044 30037 switch (cmd) {
30045 30038 case SDIOCRUN:
30046 30039 /* Allow pushed faults to be injected */
30047 30040 SD_INFO(SD_LOG_SDTEST, un,
30048 30041 "sd_faultinjection_ioctl: Injecting Fault Run\n");
30049 30042
30050 30043 sd_fault_injection_on = 1;
30051 30044
30052 30045 SD_INFO(SD_LOG_IOERR, un,
30053 30046 "sd_faultinjection_ioctl: run finished\n");
30054 30047 break;
30055 30048
30056 30049 case SDIOCSTART:
30057 30050 /* Start Injection Session */
30058 30051 SD_INFO(SD_LOG_SDTEST, un,
30059 30052 "sd_faultinjection_ioctl: Injecting Fault Start\n");
30060 30053
30061 30054 sd_fault_injection_on = 0;
30062 30055 un->sd_injection_mask = 0xFFFFFFFF;
30063 30056 for (i = 0; i < SD_FI_MAX_ERROR; i++) {
30064 30057 un->sd_fi_fifo_pkt[i] = NULL;
30065 30058 un->sd_fi_fifo_xb[i] = NULL;
30066 30059 un->sd_fi_fifo_un[i] = NULL;
30067 30060 un->sd_fi_fifo_arq[i] = NULL;
30068 30061 }
30069 30062 un->sd_fi_fifo_start = 0;
30070 30063 un->sd_fi_fifo_end = 0;
30071 30064
30072 30065 mutex_enter(&(un->un_fi_mutex));
30073 30066 un->sd_fi_log[0] = '\0';
30074 30067 un->sd_fi_buf_len = 0;
30075 30068 mutex_exit(&(un->un_fi_mutex));
30076 30069
30077 30070 SD_INFO(SD_LOG_IOERR, un,
30078 30071 "sd_faultinjection_ioctl: start finished\n");
30079 30072 break;
30080 30073
30081 30074 case SDIOCSTOP:
30082 30075 /* Stop Injection Session */
30083 30076 SD_INFO(SD_LOG_SDTEST, un,
30084 30077 "sd_faultinjection_ioctl: Injecting Fault Stop\n");
30085 30078 sd_fault_injection_on = 0;
30086 30079 un->sd_injection_mask = 0x0;
30087 30080
30088 30081 /* Empty stray or unuseds structs from fifo */
30089 30082 for (i = 0; i < SD_FI_MAX_ERROR; i++) {
30090 30083 if (un->sd_fi_fifo_pkt[i] != NULL) {
30091 30084 kmem_free(un->sd_fi_fifo_pkt[i],
30092 30085 sizeof (struct sd_fi_pkt));
30093 30086 }
30094 30087 if (un->sd_fi_fifo_xb[i] != NULL) {
30095 30088 kmem_free(un->sd_fi_fifo_xb[i],
30096 30089 sizeof (struct sd_fi_xb));
30097 30090 }
30098 30091 if (un->sd_fi_fifo_un[i] != NULL) {
30099 30092 kmem_free(un->sd_fi_fifo_un[i],
30100 30093 sizeof (struct sd_fi_un));
30101 30094 }
30102 30095 if (un->sd_fi_fifo_arq[i] != NULL) {
30103 30096 kmem_free(un->sd_fi_fifo_arq[i],
30104 30097 sizeof (struct sd_fi_arq));
30105 30098 }
30106 30099 un->sd_fi_fifo_pkt[i] = NULL;
30107 30100 un->sd_fi_fifo_un[i] = NULL;
30108 30101 un->sd_fi_fifo_xb[i] = NULL;
30109 30102 un->sd_fi_fifo_arq[i] = NULL;
30110 30103 }
30111 30104 un->sd_fi_fifo_start = 0;
30112 30105 un->sd_fi_fifo_end = 0;
30113 30106
30114 30107 SD_INFO(SD_LOG_IOERR, un,
30115 30108 "sd_faultinjection_ioctl: stop finished\n");
30116 30109 break;
30117 30110
30118 30111 case SDIOCINSERTPKT:
30119 30112 /* Store a packet struct to be pushed onto fifo */
30120 30113 SD_INFO(SD_LOG_SDTEST, un,
30121 30114 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n");
30122 30115
30123 30116 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30124 30117
30125 30118 sd_fault_injection_on = 0;
30126 30119
30127 30120 /* No more that SD_FI_MAX_ERROR allowed in Queue */
30128 30121 if (un->sd_fi_fifo_pkt[i] != NULL) {
30129 30122 kmem_free(un->sd_fi_fifo_pkt[i],
30130 30123 sizeof (struct sd_fi_pkt));
30131 30124 }
30132 30125 if (arg != NULL) {
30133 30126 un->sd_fi_fifo_pkt[i] =
30134 30127 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP);
30135 30128 if (un->sd_fi_fifo_pkt[i] == NULL) {
30136 30129 /* Alloc failed don't store anything */
30137 30130 break;
30138 30131 }
30139 30132 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i],
30140 30133 sizeof (struct sd_fi_pkt), 0);
30141 30134 if (rval == -1) {
30142 30135 kmem_free(un->sd_fi_fifo_pkt[i],
30143 30136 sizeof (struct sd_fi_pkt));
30144 30137 un->sd_fi_fifo_pkt[i] = NULL;
30145 30138 }
30146 30139 } else {
30147 30140 SD_INFO(SD_LOG_IOERR, un,
30148 30141 "sd_faultinjection_ioctl: pkt null\n");
30149 30142 }
30150 30143 break;
30151 30144
30152 30145 case SDIOCINSERTXB:
30153 30146 /* Store a xb struct to be pushed onto fifo */
30154 30147 SD_INFO(SD_LOG_SDTEST, un,
30155 30148 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n");
30156 30149
30157 30150 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30158 30151
30159 30152 sd_fault_injection_on = 0;
30160 30153
30161 30154 if (un->sd_fi_fifo_xb[i] != NULL) {
30162 30155 kmem_free(un->sd_fi_fifo_xb[i],
30163 30156 sizeof (struct sd_fi_xb));
30164 30157 un->sd_fi_fifo_xb[i] = NULL;
30165 30158 }
30166 30159 if (arg != NULL) {
30167 30160 un->sd_fi_fifo_xb[i] =
30168 30161 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP);
30169 30162 if (un->sd_fi_fifo_xb[i] == NULL) {
30170 30163 /* Alloc failed don't store anything */
30171 30164 break;
30172 30165 }
30173 30166 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i],
30174 30167 sizeof (struct sd_fi_xb), 0);
30175 30168
30176 30169 if (rval == -1) {
30177 30170 kmem_free(un->sd_fi_fifo_xb[i],
30178 30171 sizeof (struct sd_fi_xb));
30179 30172 un->sd_fi_fifo_xb[i] = NULL;
30180 30173 }
30181 30174 } else {
30182 30175 SD_INFO(SD_LOG_IOERR, un,
30183 30176 "sd_faultinjection_ioctl: xb null\n");
30184 30177 }
30185 30178 break;
30186 30179
30187 30180 case SDIOCINSERTUN:
30188 30181 /* Store a un struct to be pushed onto fifo */
30189 30182 SD_INFO(SD_LOG_SDTEST, un,
30190 30183 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n");
30191 30184
30192 30185 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30193 30186
30194 30187 sd_fault_injection_on = 0;
30195 30188
30196 30189 if (un->sd_fi_fifo_un[i] != NULL) {
30197 30190 kmem_free(un->sd_fi_fifo_un[i],
30198 30191 sizeof (struct sd_fi_un));
30199 30192 un->sd_fi_fifo_un[i] = NULL;
30200 30193 }
30201 30194 if (arg != NULL) {
30202 30195 un->sd_fi_fifo_un[i] =
30203 30196 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP);
30204 30197 if (un->sd_fi_fifo_un[i] == NULL) {
30205 30198 /* Alloc failed don't store anything */
30206 30199 break;
30207 30200 }
30208 30201 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i],
30209 30202 sizeof (struct sd_fi_un), 0);
30210 30203 if (rval == -1) {
30211 30204 kmem_free(un->sd_fi_fifo_un[i],
30212 30205 sizeof (struct sd_fi_un));
30213 30206 un->sd_fi_fifo_un[i] = NULL;
30214 30207 }
30215 30208
30216 30209 } else {
30217 30210 SD_INFO(SD_LOG_IOERR, un,
30218 30211 "sd_faultinjection_ioctl: un null\n");
30219 30212 }
30220 30213
30221 30214 break;
30222 30215
30223 30216 case SDIOCINSERTARQ:
30224 30217 /* Store a arq struct to be pushed onto fifo */
30225 30218 SD_INFO(SD_LOG_SDTEST, un,
30226 30219 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n");
30227 30220 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30228 30221
30229 30222 sd_fault_injection_on = 0;
30230 30223
30231 30224 if (un->sd_fi_fifo_arq[i] != NULL) {
30232 30225 kmem_free(un->sd_fi_fifo_arq[i],
30233 30226 sizeof (struct sd_fi_arq));
30234 30227 un->sd_fi_fifo_arq[i] = NULL;
30235 30228 }
30236 30229 if (arg != NULL) {
30237 30230 un->sd_fi_fifo_arq[i] =
30238 30231 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP);
30239 30232 if (un->sd_fi_fifo_arq[i] == NULL) {
30240 30233 /* Alloc failed don't store anything */
30241 30234 break;
30242 30235 }
30243 30236 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i],
30244 30237 sizeof (struct sd_fi_arq), 0);
30245 30238 if (rval == -1) {
30246 30239 kmem_free(un->sd_fi_fifo_arq[i],
30247 30240 sizeof (struct sd_fi_arq));
30248 30241 un->sd_fi_fifo_arq[i] = NULL;
30249 30242 }
30250 30243
30251 30244 } else {
30252 30245 SD_INFO(SD_LOG_IOERR, un,
30253 30246 "sd_faultinjection_ioctl: arq null\n");
30254 30247 }
30255 30248
30256 30249 break;
30257 30250
30258 30251 case SDIOCPUSH:
30259 30252 /* Push stored xb, pkt, un, and arq onto fifo */
30260 30253 sd_fault_injection_on = 0;
30261 30254
30262 30255 if (arg != NULL) {
30263 30256 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0);
30264 30257 if (rval != -1 &&
30265 30258 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) {
30266 30259 un->sd_fi_fifo_end += i;
30267 30260 }
30268 30261 } else {
30269 30262 SD_INFO(SD_LOG_IOERR, un,
30270 30263 "sd_faultinjection_ioctl: push arg null\n");
30271 30264 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) {
30272 30265 un->sd_fi_fifo_end++;
30273 30266 }
30274 30267 }
30275 30268 SD_INFO(SD_LOG_IOERR, un,
30276 30269 "sd_faultinjection_ioctl: push to end=%d\n",
30277 30270 un->sd_fi_fifo_end);
30278 30271 break;
30279 30272
30280 30273 case SDIOCRETRIEVE:
30281 30274 /* Return buffer of log from Injection session */
30282 30275 SD_INFO(SD_LOG_SDTEST, un,
30283 30276 "sd_faultinjection_ioctl: Injecting Fault Retreive");
30284 30277
30285 30278 sd_fault_injection_on = 0;
30286 30279
30287 30280 mutex_enter(&(un->un_fi_mutex));
30288 30281 rval = ddi_copyout(un->sd_fi_log, (void *)arg,
30289 30282 un->sd_fi_buf_len+1, 0);
30290 30283 mutex_exit(&(un->un_fi_mutex));
30291 30284
30292 30285 if (rval == -1) {
30293 30286 /*
30294 30287 * arg is possibly invalid setting
30295 30288 * it to NULL for return
30296 30289 */
30297 30290 arg = NULL;
30298 30291 }
30299 30292 break;
30300 30293 }
30301 30294
30302 30295 mutex_exit(SD_MUTEX(un));
30303 30296 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: exit\n");
30304 30297 }
30305 30298
30306 30299
30307 30300 /*
30308 30301 * Function: sd_injection_log()
30309 30302 *
30310 30303 * Description: This routine adds buff to the already existing injection log
30311 30304 * for retrieval via faultinjection_ioctl for use in fault
30312 30305 * detection and recovery
30313 30306 *
30314 30307 * Arguments: buf - the string to add to the log
30315 30308 */
30316 30309
30317 30310 static void
30318 30311 sd_injection_log(char *buf, struct sd_lun *un)
30319 30312 {
30320 30313 uint_t len;
30321 30314
30322 30315 ASSERT(un != NULL);
30323 30316 ASSERT(buf != NULL);
30324 30317
30325 30318 mutex_enter(&(un->un_fi_mutex));
30326 30319
30327 30320 len = min(strlen(buf), 255);
30328 30321 /* Add logged value to Injection log to be returned later */
30329 30322 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) {
30330 30323 uint_t offset = strlen((char *)un->sd_fi_log);
30331 30324 char *destp = (char *)un->sd_fi_log + offset;
30332 30325 int i;
30333 30326 for (i = 0; i < len; i++) {
30334 30327 *destp++ = *buf++;
30335 30328 }
30336 30329 un->sd_fi_buf_len += len;
30337 30330 un->sd_fi_log[un->sd_fi_buf_len] = '\0';
30338 30331 }
30339 30332
30340 30333 mutex_exit(&(un->un_fi_mutex));
30341 30334 }
30342 30335
30343 30336
30344 30337 /*
30345 30338 * Function: sd_faultinjection()
30346 30339 *
30347 30340 * Description: This routine takes the pkt and changes its
30348 30341 * content based on error injection scenerio.
30349 30342 *
30350 30343 * Arguments: pktp - packet to be changed
30351 30344 */
30352 30345
30353 30346 static void
30354 30347 sd_faultinjection(struct scsi_pkt *pktp)
30355 30348 {
30356 30349 uint_t i;
30357 30350 struct sd_fi_pkt *fi_pkt;
30358 30351 struct sd_fi_xb *fi_xb;
30359 30352 struct sd_fi_un *fi_un;
30360 30353 struct sd_fi_arq *fi_arq;
30361 30354 struct buf *bp;
30362 30355 struct sd_xbuf *xb;
30363 30356 struct sd_lun *un;
30364 30357
30365 30358 ASSERT(pktp != NULL);
30366 30359
30367 30360 /* pull bp xb and un from pktp */
30368 30361 bp = (struct buf *)pktp->pkt_private;
30369 30362 xb = SD_GET_XBUF(bp);
30370 30363 un = SD_GET_UN(bp);
30371 30364
30372 30365 ASSERT(un != NULL);
30373 30366
30374 30367 mutex_enter(SD_MUTEX(un));
30375 30368
30376 30369 SD_TRACE(SD_LOG_SDTEST, un,
30377 30370 "sd_faultinjection: entry Injection from sdintr\n");
30378 30371
30379 30372 /* if injection is off return */
30380 30373 if (sd_fault_injection_on == 0 ||
30381 30374 un->sd_fi_fifo_start == un->sd_fi_fifo_end) {
30382 30375 mutex_exit(SD_MUTEX(un));
30383 30376 return;
30384 30377 }
30385 30378
30386 30379 SD_INFO(SD_LOG_SDTEST, un,
30387 30380 "sd_faultinjection: is working for copying\n");
30388 30381
30389 30382 /* take next set off fifo */
30390 30383 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR;
30391 30384
30392 30385 fi_pkt = un->sd_fi_fifo_pkt[i];
30393 30386 fi_xb = un->sd_fi_fifo_xb[i];
30394 30387 fi_un = un->sd_fi_fifo_un[i];
30395 30388 fi_arq = un->sd_fi_fifo_arq[i];
30396 30389
30397 30390
30398 30391 /* set variables accordingly */
30399 30392 /* set pkt if it was on fifo */
30400 30393 if (fi_pkt != NULL) {
30401 30394 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags");
30402 30395 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp");
30403 30396 if (fi_pkt->pkt_cdbp != 0xff)
30404 30397 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp");
30405 30398 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state");
30406 30399 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics");
30407 30400 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason");
30408 30401
30409 30402 }
30410 30403 /* set xb if it was on fifo */
30411 30404 if (fi_xb != NULL) {
30412 30405 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno");
30413 30406 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid");
30414 30407 if (fi_xb->xb_retry_count != 0)
30415 30408 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count");
30416 30409 SD_CONDSET(xb, xb, xb_victim_retry_count,
30417 30410 "xb_victim_retry_count");
30418 30411 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status");
30419 30412 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state");
30420 30413 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid");
30421 30414
30422 30415 /* copy in block data from sense */
30423 30416 /*
30424 30417 * if (fi_xb->xb_sense_data[0] != -1) {
30425 30418 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data,
30426 30419 * SENSE_LENGTH);
30427 30420 * }
30428 30421 */
30429 30422 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH);
30430 30423
30431 30424 /* copy in extended sense codes */
30432 30425 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30433 30426 xb, es_code, "es_code");
30434 30427 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30435 30428 xb, es_key, "es_key");
30436 30429 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30437 30430 xb, es_add_code, "es_add_code");
30438 30431 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30439 30432 xb, es_qual_code, "es_qual_code");
30440 30433 struct scsi_extended_sense *esp;
30441 30434 esp = (struct scsi_extended_sense *)xb->xb_sense_data;
30442 30435 esp->es_class = CLASS_EXTENDED_SENSE;
30443 30436 }
30444 30437
30445 30438 /* set un if it was on fifo */
30446 30439 if (fi_un != NULL) {
30447 30440 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb");
30448 30441 SD_CONDSET(un, un, un_ctype, "un_ctype");
30449 30442 SD_CONDSET(un, un, un_reset_retry_count,
30450 30443 "un_reset_retry_count");
30451 30444 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type");
30452 30445 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status");
30453 30446 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled");
30454 30447 SD_CONDSET(un, un, un_f_allow_bus_device_reset,
30455 30448 "un_f_allow_bus_device_reset");
30456 30449 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing");
30457 30450
30458 30451 }
30459 30452
30460 30453 /* copy in auto request sense if it was on fifo */
30461 30454 if (fi_arq != NULL) {
30462 30455 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq));
30463 30456 }
30464 30457
30465 30458 /* free structs */
30466 30459 if (un->sd_fi_fifo_pkt[i] != NULL) {
30467 30460 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt));
30468 30461 }
30469 30462 if (un->sd_fi_fifo_xb[i] != NULL) {
30470 30463 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb));
30471 30464 }
30472 30465 if (un->sd_fi_fifo_un[i] != NULL) {
30473 30466 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un));
30474 30467 }
30475 30468 if (un->sd_fi_fifo_arq[i] != NULL) {
30476 30469 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq));
30477 30470 }
30478 30471
30479 30472 /*
30480 30473 * kmem_free does not gurantee to set to NULL
30481 30474 * since we uses these to determine if we set
30482 30475 * values or not lets confirm they are always
30483 30476 * NULL after free
30484 30477 */
30485 30478 un->sd_fi_fifo_pkt[i] = NULL;
30486 30479 un->sd_fi_fifo_un[i] = NULL;
30487 30480 un->sd_fi_fifo_xb[i] = NULL;
30488 30481 un->sd_fi_fifo_arq[i] = NULL;
30489 30482
30490 30483 un->sd_fi_fifo_start++;
30491 30484
30492 30485 mutex_exit(SD_MUTEX(un));
30493 30486
30494 30487 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n");
30495 30488 }
30496 30489
30497 30490 #endif /* SD_FAULT_INJECTION */
30498 30491
30499 30492 /*
30500 30493 * This routine is invoked in sd_unit_attach(). Before calling it, the
30501 30494 * properties in conf file should be processed already, and "hotpluggable"
30502 30495 * property was processed also.
30503 30496 *
30504 30497 * The sd driver distinguishes 3 different type of devices: removable media,
30505 30498 * non-removable media, and hotpluggable. Below the differences are defined:
30506 30499 *
30507 30500 * 1. Device ID
30508 30501 *
30509 30502 * The device ID of a device is used to identify this device. Refer to
30510 30503 * ddi_devid_register(9F).
30511 30504 *
30512 30505 * For a non-removable media disk device which can provide 0x80 or 0x83
30513 30506 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique
30514 30507 * device ID is created to identify this device. For other non-removable
30515 30508 * media devices, a default device ID is created only if this device has
30516 30509 * at least 2 alter cylinders. Otherwise, this device has no devid.
30517 30510 *
30518 30511 * -------------------------------------------------------
30519 30512 * removable media hotpluggable | Can Have Device ID
30520 30513 * -------------------------------------------------------
30521 30514 * false false | Yes
30522 30515 * false true | Yes
30523 30516 * true x | No
30524 30517 * ------------------------------------------------------
30525 30518 *
30526 30519 *
30527 30520 * 2. SCSI group 4 commands
30528 30521 *
30529 30522 * In SCSI specs, only some commands in group 4 command set can use
30530 30523 * 8-byte addresses that can be used to access >2TB storage spaces.
30531 30524 * Other commands have no such capability. Without supporting group4,
30532 30525 * it is impossible to make full use of storage spaces of a disk with
30533 30526 * capacity larger than 2TB.
30534 30527 *
30535 30528 * -----------------------------------------------
30536 30529 * removable media hotpluggable LP64 | Group
30537 30530 * -----------------------------------------------
30538 30531 * false false false | 1
30539 30532 * false false true | 4
30540 30533 * false true false | 1
30541 30534 * false true true | 4
30542 30535 * true x x | 5
30543 30536 * -----------------------------------------------
30544 30537 *
30545 30538 *
30546 30539 * 3. Check for VTOC Label
30547 30540 *
30548 30541 * If a direct-access disk has no EFI label, sd will check if it has a
30549 30542 * valid VTOC label. Now, sd also does that check for removable media
30550 30543 * and hotpluggable devices.
30551 30544 *
30552 30545 * --------------------------------------------------------------
30553 30546 * Direct-Access removable media hotpluggable | Check Label
30554 30547 * -------------------------------------------------------------
30555 30548 * false false false | No
30556 30549 * false false true | No
30557 30550 * false true false | Yes
30558 30551 * false true true | Yes
30559 30552 * true x x | Yes
30560 30553 * --------------------------------------------------------------
30561 30554 *
30562 30555 *
30563 30556 * 4. Building default VTOC label
30564 30557 *
30565 30558 * As section 3 says, sd checks if some kinds of devices have VTOC label.
30566 30559 * If those devices have no valid VTOC label, sd(7d) will attempt to
30567 30560 * create default VTOC for them. Currently sd creates default VTOC label
30568 30561 * for all devices on x86 platform (VTOC_16), but only for removable
30569 30562 * media devices on SPARC (VTOC_8).
30570 30563 *
30571 30564 * -----------------------------------------------------------
30572 30565 * removable media hotpluggable platform | Default Label
30573 30566 * -----------------------------------------------------------
30574 30567 * false false sparc | No
30575 30568 * false true x86 | Yes
30576 30569 * false true sparc | Yes
30577 30570 * true x x | Yes
30578 30571 * ----------------------------------------------------------
30579 30572 *
30580 30573 *
30581 30574 * 5. Supported blocksizes of target devices
30582 30575 *
30583 30576 * Sd supports non-512-byte blocksize for removable media devices only.
30584 30577 * For other devices, only 512-byte blocksize is supported. This may be
30585 30578 * changed in near future because some RAID devices require non-512-byte
30586 30579 * blocksize
30587 30580 *
30588 30581 * -----------------------------------------------------------
30589 30582 * removable media hotpluggable | non-512-byte blocksize
30590 30583 * -----------------------------------------------------------
30591 30584 * false false | No
30592 30585 * false true | No
30593 30586 * true x | Yes
30594 30587 * -----------------------------------------------------------
30595 30588 *
30596 30589 *
30597 30590 * 6. Automatic mount & unmount
30598 30591 *
30599 30592 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query
30600 30593 * if a device is removable media device. It return 1 for removable media
30601 30594 * devices, and 0 for others.
30602 30595 *
30603 30596 * The automatic mounting subsystem should distinguish between the types
30604 30597 * of devices and apply automounting policies to each.
30605 30598 *
30606 30599 *
30607 30600 * 7. fdisk partition management
30608 30601 *
30609 30602 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver
30610 30603 * just supports fdisk partitions on x86 platform. On sparc platform, sd
30611 30604 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize
30612 30605 * fdisk partitions on both x86 and SPARC platform.
30613 30606 *
30614 30607 * -----------------------------------------------------------
30615 30608 * platform removable media USB/1394 | fdisk supported
30616 30609 * -----------------------------------------------------------
30617 30610 * x86 X X | true
30618 30611 * ------------------------------------------------------------
30619 30612 * sparc X X | false
30620 30613 * ------------------------------------------------------------
30621 30614 *
30622 30615 *
30623 30616 * 8. MBOOT/MBR
30624 30617 *
30625 30618 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support
30626 30619 * read/write mboot for removable media devices on sparc platform.
30627 30620 *
30628 30621 * -----------------------------------------------------------
30629 30622 * platform removable media USB/1394 | mboot supported
30630 30623 * -----------------------------------------------------------
30631 30624 * x86 X X | true
30632 30625 * ------------------------------------------------------------
30633 30626 * sparc false false | false
30634 30627 * sparc false true | true
30635 30628 * sparc true false | true
30636 30629 * sparc true true | true
30637 30630 * ------------------------------------------------------------
30638 30631 *
30639 30632 *
30640 30633 * 9. error handling during opening device
30641 30634 *
30642 30635 * If failed to open a disk device, an errno is returned. For some kinds
30643 30636 * of errors, different errno is returned depending on if this device is
30644 30637 * a removable media device. This brings USB/1394 hard disks in line with
30645 30638 * expected hard disk behavior. It is not expected that this breaks any
30646 30639 * application.
30647 30640 *
30648 30641 * ------------------------------------------------------
30649 30642 * removable media hotpluggable | errno
30650 30643 * ------------------------------------------------------
30651 30644 * false false | EIO
30652 30645 * false true | EIO
30653 30646 * true x | ENXIO
30654 30647 * ------------------------------------------------------
30655 30648 *
30656 30649 *
30657 30650 * 11. ioctls: DKIOCEJECT, CDROMEJECT
30658 30651 *
30659 30652 * These IOCTLs are applicable only to removable media devices.
30660 30653 *
30661 30654 * -----------------------------------------------------------
30662 30655 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT
30663 30656 * -----------------------------------------------------------
30664 30657 * false false | No
30665 30658 * false true | No
30666 30659 * true x | Yes
30667 30660 * -----------------------------------------------------------
30668 30661 *
30669 30662 *
30670 30663 * 12. Kstats for partitions
30671 30664 *
30672 30665 * sd creates partition kstat for non-removable media devices. USB and
30673 30666 * Firewire hard disks now have partition kstats
30674 30667 *
30675 30668 * ------------------------------------------------------
30676 30669 * removable media hotpluggable | kstat
30677 30670 * ------------------------------------------------------
30678 30671 * false false | Yes
30679 30672 * false true | Yes
30680 30673 * true x | No
30681 30674 * ------------------------------------------------------
30682 30675 *
30683 30676 *
30684 30677 * 13. Removable media & hotpluggable properties
30685 30678 *
30686 30679 * Sd driver creates a "removable-media" property for removable media
30687 30680 * devices. Parent nexus drivers create a "hotpluggable" property if
30688 30681 * it supports hotplugging.
30689 30682 *
30690 30683 * ---------------------------------------------------------------------
30691 30684 * removable media hotpluggable | "removable-media" " hotpluggable"
30692 30685 * ---------------------------------------------------------------------
30693 30686 * false false | No No
30694 30687 * false true | No Yes
30695 30688 * true false | Yes No
30696 30689 * true true | Yes Yes
30697 30690 * ---------------------------------------------------------------------
30698 30691 *
30699 30692 *
30700 30693 * 14. Power Management
30701 30694 *
30702 30695 * sd only power manages removable media devices or devices that support
30703 30696 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250)
30704 30697 *
30705 30698 * A parent nexus that supports hotplugging can also set "pm-capable"
30706 30699 * if the disk can be power managed.
30707 30700 *
30708 30701 * ------------------------------------------------------------
30709 30702 * removable media hotpluggable pm-capable | power manage
30710 30703 * ------------------------------------------------------------
30711 30704 * false false false | No
30712 30705 * false false true | Yes
30713 30706 * false true false | No
30714 30707 * false true true | Yes
30715 30708 * true x x | Yes
30716 30709 * ------------------------------------------------------------
30717 30710 *
30718 30711 * USB and firewire hard disks can now be power managed independently
30719 30712 * of the framebuffer
30720 30713 *
30721 30714 *
30722 30715 * 15. Support for USB disks with capacity larger than 1TB
30723 30716 *
30724 30717 * Currently, sd doesn't permit a fixed disk device with capacity
30725 30718 * larger than 1TB to be used in a 32-bit operating system environment.
30726 30719 * However, sd doesn't do that for removable media devices. Instead, it
30727 30720 * assumes that removable media devices cannot have a capacity larger
30728 30721 * than 1TB. Therefore, using those devices on 32-bit system is partially
30729 30722 * supported, which can cause some unexpected results.
30730 30723 *
30731 30724 * ---------------------------------------------------------------------
30732 30725 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env
30733 30726 * ---------------------------------------------------------------------
30734 30727 * false false | true | no
30735 30728 * false true | true | no
30736 30729 * true false | true | Yes
30737 30730 * true true | true | Yes
30738 30731 * ---------------------------------------------------------------------
30739 30732 *
30740 30733 *
30741 30734 * 16. Check write-protection at open time
30742 30735 *
30743 30736 * When a removable media device is being opened for writing without NDELAY
30744 30737 * flag, sd will check if this device is writable. If attempting to open
30745 30738 * without NDELAY flag a write-protected device, this operation will abort.
30746 30739 *
30747 30740 * ------------------------------------------------------------
30748 30741 * removable media USB/1394 | WP Check
30749 30742 * ------------------------------------------------------------
30750 30743 * false false | No
30751 30744 * false true | No
30752 30745 * true false | Yes
30753 30746 * true true | Yes
30754 30747 * ------------------------------------------------------------
30755 30748 *
30756 30749 *
30757 30750 * 17. syslog when corrupted VTOC is encountered
30758 30751 *
30759 30752 * Currently, if an invalid VTOC is encountered, sd only print syslog
30760 30753 * for fixed SCSI disks.
30761 30754 * ------------------------------------------------------------
30762 30755 * removable media USB/1394 | print syslog
30763 30756 * ------------------------------------------------------------
30764 30757 * false false | Yes
30765 30758 * false true | No
30766 30759 * true false | No
30767 30760 * true true | No
30768 30761 * ------------------------------------------------------------
30769 30762 */
30770 30763 static void
30771 30764 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi)
30772 30765 {
30773 30766 int pm_cap;
30774 30767
30775 30768 ASSERT(un->un_sd);
30776 30769 ASSERT(un->un_sd->sd_inq);
30777 30770
30778 30771 /*
30779 30772 * Enable SYNC CACHE support for all devices.
30780 30773 */
30781 30774 un->un_f_sync_cache_supported = TRUE;
30782 30775
30783 30776 /*
30784 30777 * Set the sync cache required flag to false.
30785 30778 * This would ensure that there is no SYNC CACHE
30786 30779 * sent when there are no writes
30787 30780 */
30788 30781 un->un_f_sync_cache_required = FALSE;
30789 30782
30790 30783 if (un->un_sd->sd_inq->inq_rmb) {
30791 30784 /*
30792 30785 * The media of this device is removable. And for this kind
30793 30786 * of devices, it is possible to change medium after opening
30794 30787 * devices. Thus we should support this operation.
30795 30788 */
30796 30789 un->un_f_has_removable_media = TRUE;
30797 30790
30798 30791 /*
30799 30792 * support non-512-byte blocksize of removable media devices
30800 30793 */
30801 30794 un->un_f_non_devbsize_supported = TRUE;
30802 30795
30803 30796 /*
30804 30797 * Assume that all removable media devices support DOOR_LOCK
30805 30798 */
30806 30799 un->un_f_doorlock_supported = TRUE;
30807 30800
30808 30801 /*
30809 30802 * For a removable media device, it is possible to be opened
30810 30803 * with NDELAY flag when there is no media in drive, in this
30811 30804 * case we don't care if device is writable. But if without
30812 30805 * NDELAY flag, we need to check if media is write-protected.
30813 30806 */
30814 30807 un->un_f_chk_wp_open = TRUE;
30815 30808
30816 30809 /*
30817 30810 * need to start a SCSI watch thread to monitor media state,
30818 30811 * when media is being inserted or ejected, notify syseventd.
30819 30812 */
30820 30813 un->un_f_monitor_media_state = TRUE;
30821 30814
30822 30815 /*
30823 30816 * Some devices don't support START_STOP_UNIT command.
30824 30817 * Therefore, we'd better check if a device supports it
30825 30818 * before sending it.
30826 30819 */
30827 30820 un->un_f_check_start_stop = TRUE;
30828 30821
30829 30822 /*
30830 30823 * support eject media ioctl:
30831 30824 * FDEJECT, DKIOCEJECT, CDROMEJECT
30832 30825 */
30833 30826 un->un_f_eject_media_supported = TRUE;
30834 30827
30835 30828 /*
30836 30829 * Because many removable-media devices don't support
30837 30830 * LOG_SENSE, we couldn't use this command to check if
30838 30831 * a removable media device support power-management.
30839 30832 * We assume that they support power-management via
30840 30833 * START_STOP_UNIT command and can be spun up and down
30841 30834 * without limitations.
30842 30835 */
30843 30836 un->un_f_pm_supported = TRUE;
30844 30837
30845 30838 /*
30846 30839 * Need to create a zero length (Boolean) property
30847 30840 * removable-media for the removable media devices.
30848 30841 * Note that the return value of the property is not being
30849 30842 * checked, since if unable to create the property
30850 30843 * then do not want the attach to fail altogether. Consistent
30851 30844 * with other property creation in attach.
30852 30845 */
30853 30846 (void) ddi_prop_create(DDI_DEV_T_NONE, devi,
30854 30847 DDI_PROP_CANSLEEP, "removable-media", NULL, 0);
30855 30848
30856 30849 } else {
30857 30850 /*
30858 30851 * create device ID for device
30859 30852 */
30860 30853 un->un_f_devid_supported = TRUE;
30861 30854
30862 30855 /*
30863 30856 * Spin up non-removable-media devices once it is attached
30864 30857 */
30865 30858 un->un_f_attach_spinup = TRUE;
30866 30859
30867 30860 /*
30868 30861 * According to SCSI specification, Sense data has two kinds of
30869 30862 * format: fixed format, and descriptor format. At present, we
30870 30863 * don't support descriptor format sense data for removable
30871 30864 * media.
30872 30865 */
30873 30866 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) {
30874 30867 un->un_f_descr_format_supported = TRUE;
30875 30868 }
30876 30869
30877 30870 /*
30878 30871 * kstats are created only for non-removable media devices.
30879 30872 *
30880 30873 * Set this in sd.conf to 0 in order to disable kstats. The
30881 30874 * default is 1, so they are enabled by default.
30882 30875 */
30883 30876 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY,
30884 30877 SD_DEVINFO(un), DDI_PROP_DONTPASS,
30885 30878 "enable-partition-kstats", 1));
30886 30879
30887 30880 /*
30888 30881 * Check if HBA has set the "pm-capable" property.
30889 30882 * If "pm-capable" exists and is non-zero then we can
30890 30883 * power manage the device without checking the start/stop
30891 30884 * cycle count log sense page.
30892 30885 *
30893 30886 * If "pm-capable" exists and is set to be false (0),
30894 30887 * then we should not power manage the device.
30895 30888 *
30896 30889 * If "pm-capable" doesn't exist then pm_cap will
30897 30890 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case,
30898 30891 * sd will check the start/stop cycle count log sense page
30899 30892 * and power manage the device if the cycle count limit has
30900 30893 * not been exceeded.
30901 30894 */
30902 30895 pm_cap = ddi_prop_get_int(DDI_DEV_T_ANY, devi,
30903 30896 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED);
30904 30897 if (SD_PM_CAPABLE_IS_UNDEFINED(pm_cap)) {
30905 30898 un->un_f_log_sense_supported = TRUE;
30906 30899 if (!un->un_f_power_condition_disabled &&
30907 30900 SD_INQUIRY(un)->inq_ansi == 6) {
30908 30901 un->un_f_power_condition_supported = TRUE;
30909 30902 }
30910 30903 } else {
30911 30904 /*
30912 30905 * pm-capable property exists.
30913 30906 *
30914 30907 * Convert "TRUE" values for pm_cap to
30915 30908 * SD_PM_CAPABLE_IS_TRUE to make it easier to check
30916 30909 * later. "TRUE" values are any values defined in
30917 30910 * inquiry.h.
30918 30911 */
30919 30912 if (SD_PM_CAPABLE_IS_FALSE(pm_cap)) {
30920 30913 un->un_f_log_sense_supported = FALSE;
30921 30914 } else {
30922 30915 /* SD_PM_CAPABLE_IS_TRUE case */
30923 30916 un->un_f_pm_supported = TRUE;
30924 30917 if (!un->un_f_power_condition_disabled &&
30925 30918 SD_PM_CAPABLE_IS_SPC_4(pm_cap)) {
30926 30919 un->un_f_power_condition_supported =
30927 30920 TRUE;
30928 30921 }
30929 30922 if (SD_PM_CAP_LOG_SUPPORTED(pm_cap)) {
30930 30923 un->un_f_log_sense_supported = TRUE;
30931 30924 un->un_f_pm_log_sense_smart =
30932 30925 SD_PM_CAP_SMART_LOG(pm_cap);
30933 30926 }
30934 30927 }
30935 30928
30936 30929 SD_INFO(SD_LOG_ATTACH_DETACH, un,
30937 30930 "sd_unit_attach: un:0x%p pm-capable "
30938 30931 "property set to %d.\n", un, un->un_f_pm_supported);
30939 30932 }
30940 30933 }
30941 30934
30942 30935 if (un->un_f_is_hotpluggable) {
30943 30936
30944 30937 /*
30945 30938 * Have to watch hotpluggable devices as well, since
30946 30939 * that's the only way for userland applications to
30947 30940 * detect hot removal while device is busy/mounted.
30948 30941 */
30949 30942 un->un_f_monitor_media_state = TRUE;
30950 30943
30951 30944 un->un_f_check_start_stop = TRUE;
30952 30945
30953 30946 }
30954 30947 }
30955 30948
30956 30949 /*
30957 30950 * sd_tg_rdwr:
30958 30951 * Provides rdwr access for cmlb via sd_tgops. The start_block is
30959 30952 * in sys block size, req_length in bytes.
30960 30953 *
30961 30954 */
30962 30955 static int
30963 30956 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
30964 30957 diskaddr_t start_block, size_t reqlength, void *tg_cookie)
30965 30958 {
30966 30959 struct sd_lun *un;
30967 30960 int path_flag = (int)(uintptr_t)tg_cookie;
30968 30961 char *dkl = NULL;
30969 30962 diskaddr_t real_addr = start_block;
30970 30963 diskaddr_t first_byte, end_block;
30971 30964
30972 30965 size_t buffer_size = reqlength;
30973 30966 int rval = 0;
30974 30967 diskaddr_t cap;
30975 30968 uint32_t lbasize;
30976 30969 sd_ssc_t *ssc;
30977 30970
30978 30971 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
30979 30972 if (un == NULL)
30980 30973 return (ENXIO);
30981 30974
30982 30975 if (cmd != TG_READ && cmd != TG_WRITE)
30983 30976 return (EINVAL);
30984 30977
30985 30978 ssc = sd_ssc_init(un);
30986 30979 mutex_enter(SD_MUTEX(un));
30987 30980 if (un->un_f_tgt_blocksize_is_valid == FALSE) {
30988 30981 mutex_exit(SD_MUTEX(un));
30989 30982 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap,
30990 30983 &lbasize, path_flag);
30991 30984 if (rval != 0)
30992 30985 goto done1;
30993 30986 mutex_enter(SD_MUTEX(un));
30994 30987 sd_update_block_info(un, lbasize, cap);
30995 30988 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) {
30996 30989 mutex_exit(SD_MUTEX(un));
30997 30990 rval = EIO;
30998 30991 goto done;
30999 30992 }
31000 30993 }
31001 30994
31002 30995 if (NOT_DEVBSIZE(un)) {
31003 30996 /*
31004 30997 * sys_blocksize != tgt_blocksize, need to re-adjust
31005 30998 * blkno and save the index to beginning of dk_label
31006 30999 */
31007 31000 first_byte = SD_SYSBLOCKS2BYTES(start_block);
31008 31001 real_addr = first_byte / un->un_tgt_blocksize;
31009 31002
31010 31003 end_block = (first_byte + reqlength +
31011 31004 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize;
31012 31005
31013 31006 /* round up buffer size to multiple of target block size */
31014 31007 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize;
31015 31008
31016 31009 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr",
31017 31010 "label_addr: 0x%x allocation size: 0x%x\n",
31018 31011 real_addr, buffer_size);
31019 31012
31020 31013 if (((first_byte % un->un_tgt_blocksize) != 0) ||
31021 31014 (reqlength % un->un_tgt_blocksize) != 0)
31022 31015 /* the request is not aligned */
31023 31016 dkl = kmem_zalloc(buffer_size, KM_SLEEP);
31024 31017 }
31025 31018
31026 31019 /*
31027 31020 * The MMC standard allows READ CAPACITY to be
31028 31021 * inaccurate by a bounded amount (in the interest of
31029 31022 * response latency). As a result, failed READs are
31030 31023 * commonplace (due to the reading of metadata and not
31031 31024 * data). Depending on the per-Vendor/drive Sense data,
31032 31025 * the failed READ can cause many (unnecessary) retries.
31033 31026 */
31034 31027
31035 31028 if (ISCD(un) && (cmd == TG_READ) &&
31036 31029 (un->un_f_blockcount_is_valid == TRUE) &&
31037 31030 ((start_block == (un->un_blockcount - 1))||
31038 31031 (start_block == (un->un_blockcount - 2)))) {
31039 31032 path_flag = SD_PATH_DIRECT_PRIORITY;
31040 31033 }
31041 31034
31042 31035 mutex_exit(SD_MUTEX(un));
31043 31036 if (cmd == TG_READ) {
31044 31037 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr,
31045 31038 buffer_size, real_addr, path_flag);
31046 31039 if (dkl != NULL)
31047 31040 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block,
31048 31041 real_addr), bufaddr, reqlength);
31049 31042 } else {
31050 31043 if (dkl) {
31051 31044 rval = sd_send_scsi_READ(ssc, dkl, buffer_size,
31052 31045 real_addr, path_flag);
31053 31046 if (rval) {
31054 31047 goto done1;
31055 31048 }
31056 31049 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block,
31057 31050 real_addr), reqlength);
31058 31051 }
31059 31052 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr,
31060 31053 buffer_size, real_addr, path_flag);
31061 31054 }
31062 31055
31063 31056 done1:
31064 31057 if (dkl != NULL)
31065 31058 kmem_free(dkl, buffer_size);
31066 31059
31067 31060 if (rval != 0) {
31068 31061 if (rval == EIO)
31069 31062 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
31070 31063 else
31071 31064 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
31072 31065 }
31073 31066 done:
31074 31067 sd_ssc_fini(ssc);
31075 31068 return (rval);
31076 31069 }
31077 31070
31078 31071
31079 31072 static int
31080 31073 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie)
31081 31074 {
31082 31075
31083 31076 struct sd_lun *un;
31084 31077 diskaddr_t cap;
31085 31078 uint32_t lbasize;
31086 31079 int path_flag = (int)(uintptr_t)tg_cookie;
31087 31080 int ret = 0;
31088 31081
31089 31082 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
31090 31083 if (un == NULL)
31091 31084 return (ENXIO);
31092 31085
31093 31086 switch (cmd) {
31094 31087 case TG_GETPHYGEOM:
31095 31088 case TG_GETVIRTGEOM:
31096 31089 case TG_GETCAPACITY:
31097 31090 case TG_GETBLOCKSIZE:
31098 31091 mutex_enter(SD_MUTEX(un));
31099 31092
31100 31093 if ((un->un_f_blockcount_is_valid == TRUE) &&
31101 31094 (un->un_f_tgt_blocksize_is_valid == TRUE)) {
31102 31095 cap = un->un_blockcount;
31103 31096 lbasize = un->un_tgt_blocksize;
31104 31097 mutex_exit(SD_MUTEX(un));
31105 31098 } else {
31106 31099 sd_ssc_t *ssc;
31107 31100 mutex_exit(SD_MUTEX(un));
31108 31101 ssc = sd_ssc_init(un);
31109 31102 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap,
31110 31103 &lbasize, path_flag);
31111 31104 if (ret != 0) {
31112 31105 if (ret == EIO)
31113 31106 sd_ssc_assessment(ssc,
31114 31107 SD_FMT_STATUS_CHECK);
31115 31108 else
31116 31109 sd_ssc_assessment(ssc,
31117 31110 SD_FMT_IGNORE);
31118 31111 sd_ssc_fini(ssc);
31119 31112 return (ret);
31120 31113 }
31121 31114 sd_ssc_fini(ssc);
31122 31115 mutex_enter(SD_MUTEX(un));
31123 31116 sd_update_block_info(un, lbasize, cap);
31124 31117 if ((un->un_f_blockcount_is_valid == FALSE) ||
31125 31118 (un->un_f_tgt_blocksize_is_valid == FALSE)) {
31126 31119 mutex_exit(SD_MUTEX(un));
31127 31120 return (EIO);
31128 31121 }
31129 31122 mutex_exit(SD_MUTEX(un));
31130 31123 }
31131 31124
31132 31125 if (cmd == TG_GETCAPACITY) {
31133 31126 *(diskaddr_t *)arg = cap;
31134 31127 return (0);
31135 31128 }
31136 31129
31137 31130 if (cmd == TG_GETBLOCKSIZE) {
31138 31131 *(uint32_t *)arg = lbasize;
31139 31132 return (0);
31140 31133 }
31141 31134
31142 31135 if (cmd == TG_GETPHYGEOM)
31143 31136 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg,
31144 31137 cap, lbasize, path_flag);
31145 31138 else
31146 31139 /* TG_GETVIRTGEOM */
31147 31140 ret = sd_get_virtual_geometry(un,
31148 31141 (cmlb_geom_t *)arg, cap, lbasize);
31149 31142
31150 31143 return (ret);
31151 31144
31152 31145 case TG_GETATTR:
31153 31146 mutex_enter(SD_MUTEX(un));
31154 31147 ((tg_attribute_t *)arg)->media_is_writable =
31155 31148 un->un_f_mmc_writable_media;
31156 31149 ((tg_attribute_t *)arg)->media_is_solid_state =
31157 31150 un->un_f_is_solid_state;
31158 31151 ((tg_attribute_t *)arg)->media_is_rotational =
31159 31152 un->un_f_is_rotational;
31160 31153 mutex_exit(SD_MUTEX(un));
31161 31154 return (0);
31162 31155 default:
31163 31156 return (ENOTTY);
31164 31157
31165 31158 }
31166 31159 }
31167 31160
31168 31161 /*
31169 31162 * Function: sd_ssc_ereport_post
31170 31163 *
31171 31164 * Description: Will be called when SD driver need to post an ereport.
31172 31165 *
31173 31166 * Context: Kernel thread or interrupt context.
31174 31167 */
31175 31168
31176 31169 #define DEVID_IF_KNOWN(d) "devid", DATA_TYPE_STRING, (d) ? (d) : "unknown"
31177 31170
31178 31171 static void
31179 31172 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess)
31180 31173 {
31181 31174 int uscsi_path_instance = 0;
31182 31175 uchar_t uscsi_pkt_reason;
31183 31176 uint32_t uscsi_pkt_state;
31184 31177 uint32_t uscsi_pkt_statistics;
31185 31178 uint64_t uscsi_ena;
31186 31179 uchar_t op_code;
31187 31180 uint8_t *sensep;
31188 31181 union scsi_cdb *cdbp;
31189 31182 uint_t cdblen = 0;
31190 31183 uint_t senlen = 0;
31191 31184 struct sd_lun *un;
31192 31185 dev_info_t *dip;
31193 31186 char *devid;
31194 31187 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON |
31195 31188 SSC_FLAGS_INVALID_STATUS |
31196 31189 SSC_FLAGS_INVALID_SENSE |
31197 31190 SSC_FLAGS_INVALID_DATA;
31198 31191 char assessment[16];
31199 31192
31200 31193 ASSERT(ssc != NULL);
31201 31194 ASSERT(ssc->ssc_uscsi_cmd != NULL);
31202 31195 ASSERT(ssc->ssc_uscsi_info != NULL);
31203 31196
31204 31197 un = ssc->ssc_un;
31205 31198 ASSERT(un != NULL);
31206 31199
31207 31200 dip = un->un_sd->sd_dev;
31208 31201
31209 31202 /*
31210 31203 * Get the devid:
31211 31204 * devid will only be passed to non-transport error reports.
31212 31205 */
31213 31206 devid = DEVI(dip)->devi_devid_str;
31214 31207
31215 31208 /*
31216 31209 * If we are syncing or dumping, the command will not be executed
31217 31210 * so we bypass this situation.
31218 31211 */
31219 31212 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) ||
31220 31213 (un->un_state == SD_STATE_DUMPING))
31221 31214 return;
31222 31215
31223 31216 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason;
31224 31217 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance;
31225 31218 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state;
31226 31219 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics;
31227 31220 uscsi_ena = ssc->ssc_uscsi_info->ui_ena;
31228 31221
31229 31222 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
31230 31223 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb;
31231 31224
31232 31225 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */
31233 31226 if (cdbp == NULL) {
31234 31227 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
31235 31228 "sd_ssc_ereport_post meet empty cdb\n");
31236 31229 return;
31237 31230 }
31238 31231
31239 31232 op_code = cdbp->scc_cmd;
31240 31233
31241 31234 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen;
31242 31235 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
31243 31236 ssc->ssc_uscsi_cmd->uscsi_rqresid);
31244 31237
31245 31238 if (senlen > 0)
31246 31239 ASSERT(sensep != NULL);
31247 31240
31248 31241 /*
31249 31242 * Initialize drv_assess to corresponding values.
31250 31243 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending
31251 31244 * on the sense-key returned back.
31252 31245 */
31253 31246 switch (drv_assess) {
31254 31247 case SD_FM_DRV_RECOVERY:
31255 31248 (void) sprintf(assessment, "%s", "recovered");
31256 31249 break;
31257 31250 case SD_FM_DRV_RETRY:
31258 31251 (void) sprintf(assessment, "%s", "retry");
31259 31252 break;
31260 31253 case SD_FM_DRV_NOTICE:
31261 31254 (void) sprintf(assessment, "%s", "info");
31262 31255 break;
31263 31256 case SD_FM_DRV_FATAL:
31264 31257 default:
31265 31258 (void) sprintf(assessment, "%s", "unknown");
31266 31259 }
31267 31260 /*
31268 31261 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered
31269 31262 * command, we will post ereport.io.scsi.cmd.disk.recovered.
31270 31263 * driver-assessment will always be "recovered" here.
31271 31264 */
31272 31265 if (drv_assess == SD_FM_DRV_RECOVERY) {
31273 31266 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL,
31274 31267 "cmd.disk.recovered", uscsi_ena, devid, NULL,
31275 31268 DDI_NOSLEEP, NULL,
31276 31269 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31277 31270 DEVID_IF_KNOWN(devid),
31278 31271 "driver-assessment", DATA_TYPE_STRING, assessment,
31279 31272 "op-code", DATA_TYPE_UINT8, op_code,
31280 31273 "cdb", DATA_TYPE_UINT8_ARRAY,
31281 31274 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31282 31275 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31283 31276 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state,
31284 31277 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics,
31285 31278 NULL);
31286 31279 return;
31287 31280 }
31288 31281
31289 31282 /*
31290 31283 * If there is un-expected/un-decodable data, we should post
31291 31284 * ereport.io.scsi.cmd.disk.dev.uderr.
31292 31285 * driver-assessment will be set based on parameter drv_assess.
31293 31286 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back.
31294 31287 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered.
31295 31288 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered.
31296 31289 * SSC_FLAGS_INVALID_DATA - invalid data sent back.
31297 31290 */
31298 31291 if (ssc->ssc_flags & ssc_invalid_flags) {
31299 31292 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) {
31300 31293 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
31301 31294 NULL, "cmd.disk.dev.uderr", uscsi_ena, devid,
31302 31295 NULL, DDI_NOSLEEP, NULL,
31303 31296 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31304 31297 DEVID_IF_KNOWN(devid),
31305 31298 "driver-assessment", DATA_TYPE_STRING,
31306 31299 drv_assess == SD_FM_DRV_FATAL ?
31307 31300 "fail" : assessment,
31308 31301 "op-code", DATA_TYPE_UINT8, op_code,
31309 31302 "cdb", DATA_TYPE_UINT8_ARRAY,
31310 31303 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31311 31304 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31312 31305 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state,
31313 31306 "pkt-stats", DATA_TYPE_UINT32,
31314 31307 uscsi_pkt_statistics,
31315 31308 "stat-code", DATA_TYPE_UINT8,
31316 31309 ssc->ssc_uscsi_cmd->uscsi_status,
31317 31310 "un-decode-info", DATA_TYPE_STRING,
31318 31311 ssc->ssc_info,
31319 31312 "un-decode-value", DATA_TYPE_UINT8_ARRAY,
31320 31313 senlen, sensep,
31321 31314 NULL);
31322 31315 } else {
31323 31316 /*
31324 31317 * For other type of invalid data, the
31325 31318 * un-decode-value field would be empty because the
31326 31319 * un-decodable content could be seen from upper
31327 31320 * level payload or inside un-decode-info.
31328 31321 */
31329 31322 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
31330 31323 NULL,
31331 31324 "cmd.disk.dev.uderr", uscsi_ena, devid,
31332 31325 NULL, DDI_NOSLEEP, NULL,
31333 31326 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31334 31327 DEVID_IF_KNOWN(devid),
31335 31328 "driver-assessment", DATA_TYPE_STRING,
31336 31329 drv_assess == SD_FM_DRV_FATAL ?
31337 31330 "fail" : assessment,
31338 31331 "op-code", DATA_TYPE_UINT8, op_code,
31339 31332 "cdb", DATA_TYPE_UINT8_ARRAY,
31340 31333 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31341 31334 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31342 31335 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state,
31343 31336 "pkt-stats", DATA_TYPE_UINT32,
31344 31337 uscsi_pkt_statistics,
31345 31338 "stat-code", DATA_TYPE_UINT8,
31346 31339 ssc->ssc_uscsi_cmd->uscsi_status,
31347 31340 "un-decode-info", DATA_TYPE_STRING,
31348 31341 ssc->ssc_info,
31349 31342 "un-decode-value", DATA_TYPE_UINT8_ARRAY,
31350 31343 0, NULL,
31351 31344 NULL);
31352 31345 }
31353 31346 ssc->ssc_flags &= ~ssc_invalid_flags;
31354 31347 return;
31355 31348 }
31356 31349
31357 31350 if (uscsi_pkt_reason != CMD_CMPLT ||
31358 31351 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) {
31359 31352 /*
31360 31353 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was
31361 31354 * set inside sd_start_cmds due to errors(bad packet or
31362 31355 * fatal transport error), we should take it as a
31363 31356 * transport error, so we post ereport.io.scsi.cmd.disk.tran.
31364 31357 * driver-assessment will be set based on drv_assess.
31365 31358 * We will set devid to NULL because it is a transport
31366 31359 * error.
31367 31360 */
31368 31361 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)
31369 31362 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT;
31370 31363
31371 31364 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL,
31372 31365 "cmd.disk.tran", uscsi_ena, NULL, NULL, DDI_NOSLEEP, NULL,
31373 31366 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31374 31367 DEVID_IF_KNOWN(devid),
31375 31368 "driver-assessment", DATA_TYPE_STRING,
31376 31369 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment,
31377 31370 "op-code", DATA_TYPE_UINT8, op_code,
31378 31371 "cdb", DATA_TYPE_UINT8_ARRAY,
31379 31372 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31380 31373 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31381 31374 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state,
31382 31375 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics,
31383 31376 NULL);
31384 31377 } else {
31385 31378 /*
31386 31379 * If we got here, we have a completed command, and we need
31387 31380 * to further investigate the sense data to see what kind
31388 31381 * of ereport we should post.
31389 31382 * No ereport is needed if sense-key is KEY_RECOVERABLE_ERROR
31390 31383 * and asc/ascq is "ATA PASS-THROUGH INFORMATION AVAILABLE".
31391 31384 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr if sense-key is
31392 31385 * KEY_MEDIUM_ERROR.
31393 31386 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise.
31394 31387 * driver-assessment will be set based on the parameter
31395 31388 * drv_assess.
31396 31389 */
31397 31390 if (senlen > 0) {
31398 31391 /*
31399 31392 * Here we have sense data available.
31400 31393 */
31401 31394 uint8_t sense_key = scsi_sense_key(sensep);
31402 31395 uint8_t sense_asc = scsi_sense_asc(sensep);
31403 31396 uint8_t sense_ascq = scsi_sense_ascq(sensep);
31404 31397
31405 31398 if (sense_key == KEY_RECOVERABLE_ERROR &&
31406 31399 sense_asc == 0x00 && sense_ascq == 0x1d)
31407 31400 return;
31408 31401
31409 31402 if (sense_key == KEY_MEDIUM_ERROR) {
31410 31403 /*
31411 31404 * driver-assessment should be "fatal" if
31412 31405 * drv_assess is SD_FM_DRV_FATAL.
31413 31406 */
31414 31407 scsi_fm_ereport_post(un->un_sd,
31415 31408 uscsi_path_instance, NULL,
31416 31409 "cmd.disk.dev.rqs.merr",
31417 31410 uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL,
31418 31411 FM_VERSION, DATA_TYPE_UINT8,
31419 31412 FM_EREPORT_VERS0,
31420 31413 DEVID_IF_KNOWN(devid),
31421 31414 "driver-assessment",
31422 31415 DATA_TYPE_STRING,
31423 31416 drv_assess == SD_FM_DRV_FATAL ?
31424 31417 "fatal" : assessment,
31425 31418 "op-code",
31426 31419 DATA_TYPE_UINT8, op_code,
31427 31420 "cdb",
31428 31421 DATA_TYPE_UINT8_ARRAY, cdblen,
31429 31422 ssc->ssc_uscsi_cmd->uscsi_cdb,
31430 31423 "pkt-reason",
31431 31424 DATA_TYPE_UINT8, uscsi_pkt_reason,
31432 31425 "pkt-state",
31433 31426 DATA_TYPE_UINT8, uscsi_pkt_state,
31434 31427 "pkt-stats",
31435 31428 DATA_TYPE_UINT32,
31436 31429 uscsi_pkt_statistics,
31437 31430 "stat-code",
31438 31431 DATA_TYPE_UINT8,
31439 31432 ssc->ssc_uscsi_cmd->uscsi_status,
31440 31433 "key",
31441 31434 DATA_TYPE_UINT8,
31442 31435 scsi_sense_key(sensep),
31443 31436 "asc",
31444 31437 DATA_TYPE_UINT8,
31445 31438 scsi_sense_asc(sensep),
31446 31439 "ascq",
31447 31440 DATA_TYPE_UINT8,
31448 31441 scsi_sense_ascq(sensep),
31449 31442 "sense-data",
31450 31443 DATA_TYPE_UINT8_ARRAY,
31451 31444 senlen, sensep,
31452 31445 "lba",
31453 31446 DATA_TYPE_UINT64,
31454 31447 ssc->ssc_uscsi_info->ui_lba,
31455 31448 NULL);
31456 31449 } else {
31457 31450 /*
31458 31451 * if sense-key == 0x4(hardware
31459 31452 * error), driver-assessment should
31460 31453 * be "fatal" if drv_assess is
31461 31454 * SD_FM_DRV_FATAL.
31462 31455 */
31463 31456 scsi_fm_ereport_post(un->un_sd,
31464 31457 uscsi_path_instance, NULL,
31465 31458 "cmd.disk.dev.rqs.derr",
31466 31459 uscsi_ena, devid,
31467 31460 NULL, DDI_NOSLEEP, NULL,
31468 31461 FM_VERSION,
31469 31462 DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31470 31463 DEVID_IF_KNOWN(devid),
31471 31464 "driver-assessment",
31472 31465 DATA_TYPE_STRING,
31473 31466 drv_assess == SD_FM_DRV_FATAL ?
31474 31467 (sense_key == 0x4 ?
31475 31468 "fatal" : "fail") : assessment,
31476 31469 "op-code",
31477 31470 DATA_TYPE_UINT8, op_code,
31478 31471 "cdb",
31479 31472 DATA_TYPE_UINT8_ARRAY, cdblen,
31480 31473 ssc->ssc_uscsi_cmd->uscsi_cdb,
31481 31474 "pkt-reason",
31482 31475 DATA_TYPE_UINT8, uscsi_pkt_reason,
31483 31476 "pkt-state",
31484 31477 DATA_TYPE_UINT8, uscsi_pkt_state,
31485 31478 "pkt-stats",
31486 31479 DATA_TYPE_UINT32,
31487 31480 uscsi_pkt_statistics,
31488 31481 "stat-code",
31489 31482 DATA_TYPE_UINT8,
31490 31483 ssc->ssc_uscsi_cmd->uscsi_status,
31491 31484 "key",
31492 31485 DATA_TYPE_UINT8,
31493 31486 scsi_sense_key(sensep),
31494 31487 "asc",
31495 31488 DATA_TYPE_UINT8,
31496 31489 scsi_sense_asc(sensep),
31497 31490 "ascq",
31498 31491 DATA_TYPE_UINT8,
31499 31492 scsi_sense_ascq(sensep),
31500 31493 "sense-data",
31501 31494 DATA_TYPE_UINT8_ARRAY,
31502 31495 senlen, sensep,
31503 31496 NULL);
31504 31497 }
31505 31498 } else {
31506 31499 /*
31507 31500 * For stat_code == STATUS_GOOD, this is not a
31508 31501 * hardware error.
31509 31502 */
31510 31503 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD)
31511 31504 return;
31512 31505
31513 31506 /*
31514 31507 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the
31515 31508 * stat-code but with sense data unavailable.
31516 31509 * driver-assessment will be set based on parameter
31517 31510 * drv_assess.
31518 31511 */
31519 31512 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
31520 31513 NULL,
31521 31514 "cmd.disk.dev.serr", uscsi_ena,
31522 31515 devid, NULL, DDI_NOSLEEP, NULL,
31523 31516 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31524 31517 DEVID_IF_KNOWN(devid),
31525 31518 "driver-assessment", DATA_TYPE_STRING,
31526 31519 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment,
31527 31520 "op-code", DATA_TYPE_UINT8, op_code,
31528 31521 "cdb",
31529 31522 DATA_TYPE_UINT8_ARRAY,
31530 31523 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31531 31524 "pkt-reason",
31532 31525 DATA_TYPE_UINT8, uscsi_pkt_reason,
31533 31526 "pkt-state",
31534 31527 DATA_TYPE_UINT8, uscsi_pkt_state,
31535 31528 "pkt-stats",
31536 31529 DATA_TYPE_UINT32, uscsi_pkt_statistics,
31537 31530 "stat-code",
31538 31531 DATA_TYPE_UINT8,
31539 31532 ssc->ssc_uscsi_cmd->uscsi_status,
31540 31533 NULL);
31541 31534 }
31542 31535 }
31543 31536 }
31544 31537
31545 31538 /*
31546 31539 * Function: sd_ssc_extract_info
31547 31540 *
31548 31541 * Description: Extract information available to help generate ereport.
31549 31542 *
31550 31543 * Context: Kernel thread or interrupt context.
31551 31544 */
31552 31545 static void
31553 31546 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp,
31554 31547 struct buf *bp, struct sd_xbuf *xp)
31555 31548 {
31556 31549 size_t senlen = 0;
31557 31550 union scsi_cdb *cdbp;
31558 31551 int path_instance;
31559 31552 /*
31560 31553 * Need scsi_cdb_size array to determine the cdb length.
31561 31554 */
31562 31555 extern uchar_t scsi_cdb_size[];
31563 31556
31564 31557 ASSERT(un != NULL);
31565 31558 ASSERT(pktp != NULL);
31566 31559 ASSERT(bp != NULL);
31567 31560 ASSERT(xp != NULL);
31568 31561 ASSERT(ssc != NULL);
31569 31562 ASSERT(mutex_owned(SD_MUTEX(un)));
31570 31563
31571 31564 /*
31572 31565 * Transfer the cdb buffer pointer here.
31573 31566 */
31574 31567 cdbp = (union scsi_cdb *)pktp->pkt_cdbp;
31575 31568
31576 31569 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)];
31577 31570 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp;
31578 31571
31579 31572 /*
31580 31573 * Transfer the sense data buffer pointer if sense data is available,
31581 31574 * calculate the sense data length first.
31582 31575 */
31583 31576 if ((xp->xb_sense_state & STATE_XARQ_DONE) ||
31584 31577 (xp->xb_sense_state & STATE_ARQ_DONE)) {
31585 31578 /*
31586 31579 * For arq case, we will enter here.
31587 31580 */
31588 31581 if (xp->xb_sense_state & STATE_XARQ_DONE) {
31589 31582 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid;
31590 31583 } else {
31591 31584 senlen = SENSE_LENGTH;
31592 31585 }
31593 31586 } else {
31594 31587 /*
31595 31588 * For non-arq case, we will enter this branch.
31596 31589 */
31597 31590 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK &&
31598 31591 (xp->xb_sense_state & STATE_XFERRED_DATA)) {
31599 31592 senlen = SENSE_LENGTH - xp->xb_sense_resid;
31600 31593 }
31601 31594
31602 31595 }
31603 31596
31604 31597 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff);
31605 31598 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0;
31606 31599 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data;
31607 31600
31608 31601 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK);
31609 31602
31610 31603 /*
31611 31604 * Only transfer path_instance when scsi_pkt was properly allocated.
31612 31605 */
31613 31606 path_instance = pktp->pkt_path_instance;
31614 31607 if (scsi_pkt_allocated_correctly(pktp) && path_instance)
31615 31608 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance;
31616 31609 else
31617 31610 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0;
31618 31611
31619 31612 /*
31620 31613 * Copy in the other fields we may need when posting ereport.
31621 31614 */
31622 31615 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason;
31623 31616 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state;
31624 31617 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics;
31625 31618 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp);
31626 31619
31627 31620 /*
31628 31621 * For partially read/write command, we will not create ena
31629 31622 * in case of a successful command be reconized as recovered.
31630 31623 */
31631 31624 if ((pktp->pkt_reason == CMD_CMPLT) &&
31632 31625 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) &&
31633 31626 (senlen == 0)) {
31634 31627 return;
31635 31628 }
31636 31629
31637 31630 /*
31638 31631 * To associate ereports of a single command execution flow, we
31639 31632 * need a shared ena for a specific command.
31640 31633 */
31641 31634 if (xp->xb_ena == 0)
31642 31635 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1);
31643 31636 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena;
31644 31637 }
31645 31638
31646 31639
31647 31640 /*
31648 31641 * Function: sd_check_bdc_vpd
31649 31642 *
31650 31643 * Description: Query the optional INQUIRY VPD page 0xb1. If the device
31651 31644 * supports VPD page 0xb1, sd examines the MEDIUM ROTATION
31652 31645 * RATE.
31653 31646 *
31654 31647 * Set the following based on RPM value:
31655 31648 * = 0 device is not solid state, non-rotational
31656 31649 * = 1 device is solid state, non-rotational
31657 31650 * > 1 device is not solid state, rotational
31658 31651 *
31659 31652 * Context: Kernel thread or interrupt context.
31660 31653 */
31661 31654
31662 31655 static void
31663 31656 sd_check_bdc_vpd(sd_ssc_t *ssc)
31664 31657 {
31665 31658 int rval = 0;
31666 31659 uchar_t *inqb1 = NULL;
31667 31660 size_t inqb1_len = MAX_INQUIRY_SIZE;
31668 31661 size_t inqb1_resid = 0;
31669 31662 struct sd_lun *un;
31670 31663
31671 31664 ASSERT(ssc != NULL);
31672 31665 un = ssc->ssc_un;
31673 31666 ASSERT(un != NULL);
31674 31667 ASSERT(!mutex_owned(SD_MUTEX(un)));
31675 31668
31676 31669 mutex_enter(SD_MUTEX(un));
31677 31670 un->un_f_is_rotational = TRUE;
31678 31671 un->un_f_is_solid_state = FALSE;
31679 31672
31680 31673 if (ISCD(un)) {
31681 31674 mutex_exit(SD_MUTEX(un));
31682 31675 return;
31683 31676 }
31684 31677
31685 31678 if (sd_check_vpd_page_support(ssc) == 0 &&
31686 31679 un->un_vpd_page_mask & SD_VPD_DEV_CHARACTER_PG) {
31687 31680 mutex_exit(SD_MUTEX(un));
31688 31681 /* collect page b1 data */
31689 31682 inqb1 = kmem_zalloc(inqb1_len, KM_SLEEP);
31690 31683
31691 31684 rval = sd_send_scsi_INQUIRY(ssc, inqb1, inqb1_len,
31692 31685 0x01, 0xB1, &inqb1_resid);
31693 31686
31694 31687 if (rval == 0 && (inqb1_len - inqb1_resid > 5)) {
31695 31688 SD_TRACE(SD_LOG_COMMON, un,
31696 31689 "sd_check_bdc_vpd: \
31697 31690 successfully get VPD page: %x \
31698 31691 PAGE LENGTH: %x BYTE 4: %x \
31699 31692 BYTE 5: %x", inqb1[1], inqb1[3], inqb1[4],
31700 31693 inqb1[5]);
31701 31694
31702 31695 mutex_enter(SD_MUTEX(un));
31703 31696 /*
31704 31697 * Check the MEDIUM ROTATION RATE.
31705 31698 */
31706 31699 if (inqb1[4] == 0) {
31707 31700 if (inqb1[5] == 0) {
31708 31701 un->un_f_is_rotational = FALSE;
31709 31702 } else if (inqb1[5] == 1) {
31710 31703 un->un_f_is_rotational = FALSE;
31711 31704 un->un_f_is_solid_state = TRUE;
31712 31705 /*
31713 31706 * Solid state drives don't need
31714 31707 * disksort.
31715 31708 */
31716 31709 un->un_f_disksort_disabled = TRUE;
31717 31710 }
31718 31711 }
31719 31712 mutex_exit(SD_MUTEX(un));
31720 31713 } else if (rval != 0) {
31721 31714 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
31722 31715 }
31723 31716
31724 31717 kmem_free(inqb1, inqb1_len);
31725 31718 } else {
31726 31719 mutex_exit(SD_MUTEX(un));
31727 31720 }
31728 31721 }
31729 31722
31730 31723 /*
31731 31724 * Function: sd_check_emulation_mode
31732 31725 *
31733 31726 * Description: Check whether the SSD is at emulation mode
31734 31727 * by issuing READ_CAPACITY_16 to see whether
31735 31728 * we can get physical block size of the drive.
31736 31729 *
31737 31730 * Context: Kernel thread or interrupt context.
31738 31731 */
31739 31732
31740 31733 static void
31741 31734 sd_check_emulation_mode(sd_ssc_t *ssc)
31742 31735 {
31743 31736 int rval = 0;
31744 31737 uint64_t capacity;
31745 31738 uint_t lbasize;
31746 31739 uint_t pbsize;
31747 31740 int i;
31748 31741 int devid_len;
31749 31742 struct sd_lun *un;
31750 31743
31751 31744 ASSERT(ssc != NULL);
31752 31745 un = ssc->ssc_un;
31753 31746 ASSERT(un != NULL);
31754 31747 ASSERT(!mutex_owned(SD_MUTEX(un)));
31755 31748
31756 31749 mutex_enter(SD_MUTEX(un));
31757 31750 if (ISCD(un)) {
31758 31751 mutex_exit(SD_MUTEX(un));
31759 31752 return;
31760 31753 }
31761 31754
31762 31755 if (un->un_f_descr_format_supported) {
31763 31756 mutex_exit(SD_MUTEX(un));
31764 31757 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize,
31765 31758 &pbsize, SD_PATH_DIRECT);
31766 31759 mutex_enter(SD_MUTEX(un));
31767 31760
31768 31761 if (rval != 0) {
31769 31762 un->un_phy_blocksize = DEV_BSIZE;
31770 31763 } else {
31771 31764 if (!ISP2(pbsize % DEV_BSIZE) || pbsize == 0) {
31772 31765 un->un_phy_blocksize = DEV_BSIZE;
31773 31766 } else if (pbsize > un->un_phy_blocksize) {
31774 31767 /*
31775 31768 * Don't reset the physical blocksize
31776 31769 * unless we've detected a larger value.
31777 31770 */
31778 31771 un->un_phy_blocksize = pbsize;
31779 31772 }
31780 31773 }
31781 31774 }
31782 31775
31783 31776 for (i = 0; i < sd_flash_dev_table_size; i++) {
31784 31777 devid_len = (int)strlen(sd_flash_dev_table[i]);
31785 31778 if (sd_sdconf_id_match(un, sd_flash_dev_table[i], devid_len)
31786 31779 == SD_SUCCESS) {
31787 31780 un->un_phy_blocksize = SSD_SECSIZE;
31788 31781 if (un->un_f_is_solid_state &&
31789 31782 un->un_phy_blocksize != un->un_tgt_blocksize)
31790 31783 un->un_f_enable_rmw = TRUE;
31791 31784 }
31792 31785 }
31793 31786
31794 31787 mutex_exit(SD_MUTEX(un));
31795 31788 }
↓ open down ↓ |
22768 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX