Print this page
3515 sd gives RMW warnings for reads
Reviewed by: Albert Lee <trisk@nexenta.com>
Reviewed by: Kevin Crowe <kevin.crowe@nexenta.com>
Reviewed by: Gordon Ross <gordon.ross@nexenta.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/scsi/targets/sd.c
+++ new/usr/src/uts/common/io/scsi/targets/sd.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25 /*
26 - * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
27 26 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
28 27 * Copyright (c) 2012 by Delphix. All rights reserved.
28 + * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
29 29 */
30 30 /*
31 31 * Copyright 2011 cyril.galibern@opensvc.com
32 32 */
33 33
34 34 /*
35 35 * SCSI disk target driver.
36 36 */
37 37 #include <sys/scsi/scsi.h>
38 38 #include <sys/dkbad.h>
39 39 #include <sys/dklabel.h>
40 40 #include <sys/dkio.h>
41 41 #include <sys/fdio.h>
42 42 #include <sys/cdio.h>
43 43 #include <sys/mhd.h>
44 44 #include <sys/vtoc.h>
45 45 #include <sys/dktp/fdisk.h>
46 46 #include <sys/kstat.h>
47 47 #include <sys/vtrace.h>
48 48 #include <sys/note.h>
49 49 #include <sys/thread.h>
50 50 #include <sys/proc.h>
51 51 #include <sys/efi_partition.h>
52 52 #include <sys/var.h>
53 53 #include <sys/aio_req.h>
54 54
55 55 #ifdef __lock_lint
56 56 #define _LP64
57 57 #define __amd64
58 58 #endif
59 59
60 60 #if (defined(__fibre))
61 61 /* Note: is there a leadville version of the following? */
62 62 #include <sys/fc4/fcal_linkapp.h>
63 63 #endif
64 64 #include <sys/taskq.h>
65 65 #include <sys/uuid.h>
66 66 #include <sys/byteorder.h>
67 67 #include <sys/sdt.h>
68 68
69 69 #include "sd_xbuf.h"
70 70
71 71 #include <sys/scsi/targets/sddef.h>
72 72 #include <sys/cmlb.h>
73 73 #include <sys/sysevent/eventdefs.h>
74 74 #include <sys/sysevent/dev.h>
75 75
76 76 #include <sys/fm/protocol.h>
77 77
78 78 /*
79 79 * Loadable module info.
80 80 */
81 81 #if (defined(__fibre))
82 82 #define SD_MODULE_NAME "SCSI SSA/FCAL Disk Driver"
83 83 #else /* !__fibre */
84 84 #define SD_MODULE_NAME "SCSI Disk Driver"
85 85 #endif /* !__fibre */
86 86
87 87 /*
88 88 * Define the interconnect type, to allow the driver to distinguish
89 89 * between parallel SCSI (sd) and fibre channel (ssd) behaviors.
90 90 *
91 91 * This is really for backward compatibility. In the future, the driver
92 92 * should actually check the "interconnect-type" property as reported by
93 93 * the HBA; however at present this property is not defined by all HBAs,
94 94 * so we will use this #define (1) to permit the driver to run in
95 95 * backward-compatibility mode; and (2) to print a notification message
96 96 * if an FC HBA does not support the "interconnect-type" property. The
97 97 * behavior of the driver will be to assume parallel SCSI behaviors unless
98 98 * the "interconnect-type" property is defined by the HBA **AND** has a
99 99 * value of either INTERCONNECT_FIBRE, INTERCONNECT_SSA, or
100 100 * INTERCONNECT_FABRIC, in which case the driver will assume Fibre
101 101 * Channel behaviors (as per the old ssd). (Note that the
102 102 * INTERCONNECT_1394 and INTERCONNECT_USB types are not supported and
103 103 * will result in the driver assuming parallel SCSI behaviors.)
104 104 *
105 105 * (see common/sys/scsi/impl/services.h)
106 106 *
107 107 * Note: For ssd semantics, don't use INTERCONNECT_FABRIC as the default
108 108 * since some FC HBAs may already support that, and there is some code in
109 109 * the driver that already looks for it. Using INTERCONNECT_FABRIC as the
110 110 * default would confuse that code, and besides things should work fine
111 111 * anyways if the FC HBA already reports INTERCONNECT_FABRIC for the
112 112 * "interconnect_type" property.
113 113 *
114 114 */
115 115 #if (defined(__fibre))
116 116 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_FIBRE
117 117 #else
118 118 #define SD_DEFAULT_INTERCONNECT_TYPE SD_INTERCONNECT_PARALLEL
119 119 #endif
120 120
121 121 /*
122 122 * The name of the driver, established from the module name in _init.
123 123 */
124 124 static char *sd_label = NULL;
125 125
126 126 /*
127 127 * Driver name is unfortunately prefixed on some driver.conf properties.
128 128 */
129 129 #if (defined(__fibre))
130 130 #define sd_max_xfer_size ssd_max_xfer_size
131 131 #define sd_config_list ssd_config_list
132 132 static char *sd_max_xfer_size = "ssd_max_xfer_size";
133 133 static char *sd_config_list = "ssd-config-list";
134 134 #else
135 135 static char *sd_max_xfer_size = "sd_max_xfer_size";
136 136 static char *sd_config_list = "sd-config-list";
137 137 #endif
138 138
139 139 /*
140 140 * Driver global variables
141 141 */
142 142
143 143 #if (defined(__fibre))
144 144 /*
145 145 * These #defines are to avoid namespace collisions that occur because this
146 146 * code is currently used to compile two separate driver modules: sd and ssd.
147 147 * All global variables need to be treated this way (even if declared static)
148 148 * in order to allow the debugger to resolve the names properly.
149 149 * It is anticipated that in the near future the ssd module will be obsoleted,
150 150 * at which time this namespace issue should go away.
151 151 */
152 152 #define sd_state ssd_state
153 153 #define sd_io_time ssd_io_time
154 154 #define sd_failfast_enable ssd_failfast_enable
155 155 #define sd_ua_retry_count ssd_ua_retry_count
156 156 #define sd_report_pfa ssd_report_pfa
157 157 #define sd_max_throttle ssd_max_throttle
158 158 #define sd_min_throttle ssd_min_throttle
159 159 #define sd_rot_delay ssd_rot_delay
160 160
161 161 #define sd_retry_on_reservation_conflict \
162 162 ssd_retry_on_reservation_conflict
163 163 #define sd_reinstate_resv_delay ssd_reinstate_resv_delay
164 164 #define sd_resv_conflict_name ssd_resv_conflict_name
165 165
166 166 #define sd_component_mask ssd_component_mask
167 167 #define sd_level_mask ssd_level_mask
168 168 #define sd_debug_un ssd_debug_un
169 169 #define sd_error_level ssd_error_level
170 170
171 171 #define sd_xbuf_active_limit ssd_xbuf_active_limit
172 172 #define sd_xbuf_reserve_limit ssd_xbuf_reserve_limit
173 173
174 174 #define sd_tr ssd_tr
175 175 #define sd_reset_throttle_timeout ssd_reset_throttle_timeout
176 176 #define sd_qfull_throttle_timeout ssd_qfull_throttle_timeout
177 177 #define sd_qfull_throttle_enable ssd_qfull_throttle_enable
178 178 #define sd_check_media_time ssd_check_media_time
179 179 #define sd_wait_cmds_complete ssd_wait_cmds_complete
180 180 #define sd_label_mutex ssd_label_mutex
181 181 #define sd_detach_mutex ssd_detach_mutex
182 182 #define sd_log_buf ssd_log_buf
183 183 #define sd_log_mutex ssd_log_mutex
184 184
185 185 #define sd_disk_table ssd_disk_table
186 186 #define sd_disk_table_size ssd_disk_table_size
187 187 #define sd_sense_mutex ssd_sense_mutex
188 188 #define sd_cdbtab ssd_cdbtab
189 189
190 190 #define sd_cb_ops ssd_cb_ops
191 191 #define sd_ops ssd_ops
192 192 #define sd_additional_codes ssd_additional_codes
193 193 #define sd_tgops ssd_tgops
194 194
195 195 #define sd_minor_data ssd_minor_data
196 196 #define sd_minor_data_efi ssd_minor_data_efi
197 197
198 198 #define sd_tq ssd_tq
199 199 #define sd_wmr_tq ssd_wmr_tq
200 200 #define sd_taskq_name ssd_taskq_name
201 201 #define sd_wmr_taskq_name ssd_wmr_taskq_name
202 202 #define sd_taskq_minalloc ssd_taskq_minalloc
203 203 #define sd_taskq_maxalloc ssd_taskq_maxalloc
204 204
205 205 #define sd_dump_format_string ssd_dump_format_string
206 206
207 207 #define sd_iostart_chain ssd_iostart_chain
208 208 #define sd_iodone_chain ssd_iodone_chain
209 209
210 210 #define sd_pm_idletime ssd_pm_idletime
211 211
212 212 #define sd_force_pm_supported ssd_force_pm_supported
213 213
214 214 #define sd_dtype_optical_bind ssd_dtype_optical_bind
215 215
216 216 #define sd_ssc_init ssd_ssc_init
217 217 #define sd_ssc_send ssd_ssc_send
218 218 #define sd_ssc_fini ssd_ssc_fini
219 219 #define sd_ssc_assessment ssd_ssc_assessment
220 220 #define sd_ssc_post ssd_ssc_post
221 221 #define sd_ssc_print ssd_ssc_print
222 222 #define sd_ssc_ereport_post ssd_ssc_ereport_post
223 223 #define sd_ssc_set_info ssd_ssc_set_info
224 224 #define sd_ssc_extract_info ssd_ssc_extract_info
225 225
226 226 #endif
227 227
228 228 #ifdef SDDEBUG
229 229 int sd_force_pm_supported = 0;
230 230 #endif /* SDDEBUG */
231 231
232 232 void *sd_state = NULL;
233 233 int sd_io_time = SD_IO_TIME;
234 234 int sd_failfast_enable = 1;
235 235 int sd_ua_retry_count = SD_UA_RETRY_COUNT;
236 236 int sd_report_pfa = 1;
237 237 int sd_max_throttle = SD_MAX_THROTTLE;
238 238 int sd_min_throttle = SD_MIN_THROTTLE;
239 239 int sd_rot_delay = 4; /* Default 4ms Rotation delay */
240 240 int sd_qfull_throttle_enable = TRUE;
241 241
242 242 int sd_retry_on_reservation_conflict = 1;
243 243 int sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY;
244 244 _NOTE(SCHEME_PROTECTS_DATA("safe sharing", sd_reinstate_resv_delay))
245 245
246 246 static int sd_dtype_optical_bind = -1;
247 247
248 248 /* Note: the following is not a bug, it really is "sd_" and not "ssd_" */
249 249 static char *sd_resv_conflict_name = "sd_retry_on_reservation_conflict";
250 250
251 251 /*
252 252 * Global data for debug logging. To enable debug printing, sd_component_mask
253 253 * and sd_level_mask should be set to the desired bit patterns as outlined in
254 254 * sddef.h.
255 255 */
256 256 uint_t sd_component_mask = 0x0;
257 257 uint_t sd_level_mask = 0x0;
258 258 struct sd_lun *sd_debug_un = NULL;
259 259 uint_t sd_error_level = SCSI_ERR_RETRYABLE;
260 260
261 261 /* Note: these may go away in the future... */
262 262 static uint32_t sd_xbuf_active_limit = 512;
263 263 static uint32_t sd_xbuf_reserve_limit = 16;
264 264
265 265 static struct sd_resv_reclaim_request sd_tr = { NULL, NULL, NULL, 0, 0, 0 };
266 266
267 267 /*
268 268 * Timer value used to reset the throttle after it has been reduced
269 269 * (typically in response to TRAN_BUSY or STATUS_QFULL)
270 270 */
271 271 static int sd_reset_throttle_timeout = SD_RESET_THROTTLE_TIMEOUT;
272 272 static int sd_qfull_throttle_timeout = SD_QFULL_THROTTLE_TIMEOUT;
273 273
274 274 /*
275 275 * Interval value associated with the media change scsi watch.
276 276 */
277 277 static int sd_check_media_time = 3000000;
278 278
279 279 /*
280 280 * Wait value used for in progress operations during a DDI_SUSPEND
281 281 */
282 282 static int sd_wait_cmds_complete = SD_WAIT_CMDS_COMPLETE;
283 283
284 284 /*
285 285 * sd_label_mutex protects a static buffer used in the disk label
286 286 * component of the driver
287 287 */
288 288 static kmutex_t sd_label_mutex;
289 289
290 290 /*
291 291 * sd_detach_mutex protects un_layer_count, un_detach_count, and
292 292 * un_opens_in_progress in the sd_lun structure.
293 293 */
294 294 static kmutex_t sd_detach_mutex;
295 295
296 296 _NOTE(MUTEX_PROTECTS_DATA(sd_detach_mutex,
297 297 sd_lun::{un_layer_count un_detach_count un_opens_in_progress}))
298 298
299 299 /*
300 300 * Global buffer and mutex for debug logging
301 301 */
302 302 static char sd_log_buf[1024];
303 303 static kmutex_t sd_log_mutex;
304 304
305 305 /*
306 306 * Structs and globals for recording attached lun information.
307 307 * This maintains a chain. Each node in the chain represents a SCSI controller.
308 308 * The structure records the number of luns attached to each target connected
309 309 * with the controller.
310 310 * For parallel scsi device only.
311 311 */
312 312 struct sd_scsi_hba_tgt_lun {
313 313 struct sd_scsi_hba_tgt_lun *next;
314 314 dev_info_t *pdip;
315 315 int nlun[NTARGETS_WIDE];
316 316 };
317 317
318 318 /*
319 319 * Flag to indicate the lun is attached or detached
320 320 */
321 321 #define SD_SCSI_LUN_ATTACH 0
322 322 #define SD_SCSI_LUN_DETACH 1
323 323
324 324 static kmutex_t sd_scsi_target_lun_mutex;
325 325 static struct sd_scsi_hba_tgt_lun *sd_scsi_target_lun_head = NULL;
326 326
327 327 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex,
328 328 sd_scsi_hba_tgt_lun::next sd_scsi_hba_tgt_lun::pdip))
329 329
330 330 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_target_lun_mutex,
331 331 sd_scsi_target_lun_head))
332 332
333 333 /*
334 334 * "Smart" Probe Caching structs, globals, #defines, etc.
335 335 * For parallel scsi and non-self-identify device only.
336 336 */
337 337
338 338 /*
339 339 * The following resources and routines are implemented to support
340 340 * "smart" probing, which caches the scsi_probe() results in an array,
341 341 * in order to help avoid long probe times.
342 342 */
343 343 struct sd_scsi_probe_cache {
344 344 struct sd_scsi_probe_cache *next;
345 345 dev_info_t *pdip;
346 346 int cache[NTARGETS_WIDE];
347 347 };
348 348
349 349 static kmutex_t sd_scsi_probe_cache_mutex;
350 350 static struct sd_scsi_probe_cache *sd_scsi_probe_cache_head = NULL;
351 351
352 352 /*
353 353 * Really we only need protection on the head of the linked list, but
354 354 * better safe than sorry.
355 355 */
356 356 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex,
357 357 sd_scsi_probe_cache::next sd_scsi_probe_cache::pdip))
358 358
359 359 _NOTE(MUTEX_PROTECTS_DATA(sd_scsi_probe_cache_mutex,
360 360 sd_scsi_probe_cache_head))
361 361
362 362 /*
363 363 * Power attribute table
364 364 */
365 365 static sd_power_attr_ss sd_pwr_ss = {
366 366 { "NAME=spindle-motor", "0=off", "1=on", NULL },
367 367 {0, 100},
368 368 {30, 0},
369 369 {20000, 0}
370 370 };
371 371
372 372 static sd_power_attr_pc sd_pwr_pc = {
373 373 { "NAME=spindle-motor", "0=stopped", "1=standby", "2=idle",
374 374 "3=active", NULL },
375 375 {0, 0, 0, 100},
376 376 {90, 90, 20, 0},
377 377 {15000, 15000, 1000, 0}
378 378 };
379 379
380 380 /*
381 381 * Power level to power condition
382 382 */
383 383 static int sd_pl2pc[] = {
384 384 SD_TARGET_START_VALID,
385 385 SD_TARGET_STANDBY,
386 386 SD_TARGET_IDLE,
387 387 SD_TARGET_ACTIVE
388 388 };
389 389
390 390 /*
391 391 * Vendor specific data name property declarations
392 392 */
393 393
394 394 #if defined(__fibre) || defined(__i386) ||defined(__amd64)
395 395
396 396 static sd_tunables seagate_properties = {
397 397 SEAGATE_THROTTLE_VALUE,
398 398 0,
399 399 0,
400 400 0,
401 401 0,
402 402 0,
403 403 0,
404 404 0,
405 405 0
406 406 };
407 407
408 408
409 409 static sd_tunables fujitsu_properties = {
410 410 FUJITSU_THROTTLE_VALUE,
411 411 0,
412 412 0,
413 413 0,
414 414 0,
415 415 0,
416 416 0,
417 417 0,
418 418 0
419 419 };
420 420
421 421 static sd_tunables ibm_properties = {
422 422 IBM_THROTTLE_VALUE,
423 423 0,
424 424 0,
425 425 0,
426 426 0,
427 427 0,
428 428 0,
429 429 0,
430 430 0
431 431 };
432 432
433 433 static sd_tunables purple_properties = {
434 434 PURPLE_THROTTLE_VALUE,
435 435 0,
436 436 0,
437 437 PURPLE_BUSY_RETRIES,
438 438 PURPLE_RESET_RETRY_COUNT,
439 439 PURPLE_RESERVE_RELEASE_TIME,
440 440 0,
441 441 0,
442 442 0
443 443 };
444 444
445 445 static sd_tunables sve_properties = {
446 446 SVE_THROTTLE_VALUE,
447 447 0,
448 448 0,
449 449 SVE_BUSY_RETRIES,
450 450 SVE_RESET_RETRY_COUNT,
451 451 SVE_RESERVE_RELEASE_TIME,
452 452 SVE_MIN_THROTTLE_VALUE,
453 453 SVE_DISKSORT_DISABLED_FLAG,
454 454 0
455 455 };
456 456
457 457 static sd_tunables maserati_properties = {
458 458 0,
459 459 0,
460 460 0,
461 461 0,
462 462 0,
463 463 0,
464 464 0,
465 465 MASERATI_DISKSORT_DISABLED_FLAG,
466 466 MASERATI_LUN_RESET_ENABLED_FLAG
467 467 };
468 468
469 469 static sd_tunables pirus_properties = {
470 470 PIRUS_THROTTLE_VALUE,
471 471 0,
472 472 PIRUS_NRR_COUNT,
473 473 PIRUS_BUSY_RETRIES,
474 474 PIRUS_RESET_RETRY_COUNT,
475 475 0,
476 476 PIRUS_MIN_THROTTLE_VALUE,
477 477 PIRUS_DISKSORT_DISABLED_FLAG,
478 478 PIRUS_LUN_RESET_ENABLED_FLAG
479 479 };
480 480
481 481 #endif
482 482
483 483 #if (defined(__sparc) && !defined(__fibre)) || \
484 484 (defined(__i386) || defined(__amd64))
485 485
486 486
487 487 static sd_tunables elite_properties = {
488 488 ELITE_THROTTLE_VALUE,
489 489 0,
490 490 0,
491 491 0,
492 492 0,
493 493 0,
494 494 0,
495 495 0,
496 496 0
497 497 };
498 498
499 499 static sd_tunables st31200n_properties = {
500 500 ST31200N_THROTTLE_VALUE,
501 501 0,
502 502 0,
503 503 0,
504 504 0,
505 505 0,
506 506 0,
507 507 0,
508 508 0
509 509 };
510 510
511 511 #endif /* Fibre or not */
512 512
513 513 static sd_tunables lsi_properties_scsi = {
514 514 LSI_THROTTLE_VALUE,
515 515 0,
516 516 LSI_NOTREADY_RETRIES,
517 517 0,
518 518 0,
519 519 0,
520 520 0,
521 521 0,
522 522 0
523 523 };
524 524
525 525 static sd_tunables symbios_properties = {
526 526 SYMBIOS_THROTTLE_VALUE,
527 527 0,
528 528 SYMBIOS_NOTREADY_RETRIES,
529 529 0,
530 530 0,
531 531 0,
532 532 0,
533 533 0,
534 534 0
535 535 };
536 536
537 537 static sd_tunables lsi_properties = {
538 538 0,
539 539 0,
540 540 LSI_NOTREADY_RETRIES,
541 541 0,
542 542 0,
543 543 0,
544 544 0,
545 545 0,
546 546 0
547 547 };
548 548
549 549 static sd_tunables lsi_oem_properties = {
550 550 0,
551 551 0,
552 552 LSI_OEM_NOTREADY_RETRIES,
553 553 0,
554 554 0,
555 555 0,
556 556 0,
557 557 0,
558 558 0,
559 559 1
560 560 };
561 561
562 562
563 563
564 564 #if (defined(SD_PROP_TST))
565 565
566 566 #define SD_TST_CTYPE_VAL CTYPE_CDROM
567 567 #define SD_TST_THROTTLE_VAL 16
568 568 #define SD_TST_NOTREADY_VAL 12
569 569 #define SD_TST_BUSY_VAL 60
570 570 #define SD_TST_RST_RETRY_VAL 36
571 571 #define SD_TST_RSV_REL_TIME 60
572 572
573 573 static sd_tunables tst_properties = {
574 574 SD_TST_THROTTLE_VAL,
575 575 SD_TST_CTYPE_VAL,
576 576 SD_TST_NOTREADY_VAL,
577 577 SD_TST_BUSY_VAL,
578 578 SD_TST_RST_RETRY_VAL,
579 579 SD_TST_RSV_REL_TIME,
580 580 0,
581 581 0,
582 582 0
583 583 };
584 584 #endif
585 585
586 586 /* This is similar to the ANSI toupper implementation */
587 587 #define SD_TOUPPER(C) (((C) >= 'a' && (C) <= 'z') ? (C) - 'a' + 'A' : (C))
588 588
589 589 /*
590 590 * Static Driver Configuration Table
591 591 *
592 592 * This is the table of disks which need throttle adjustment (or, perhaps
593 593 * something else as defined by the flags at a future time.) device_id
594 594 * is a string consisting of concatenated vid (vendor), pid (product/model)
595 595 * and revision strings as defined in the scsi_inquiry structure. Offsets of
596 596 * the parts of the string are as defined by the sizes in the scsi_inquiry
597 597 * structure. Device type is searched as far as the device_id string is
598 598 * defined. Flags defines which values are to be set in the driver from the
599 599 * properties list.
600 600 *
601 601 * Entries below which begin and end with a "*" are a special case.
602 602 * These do not have a specific vendor, and the string which follows
603 603 * can appear anywhere in the 16 byte PID portion of the inquiry data.
604 604 *
605 605 * Entries below which begin and end with a " " (blank) are a special
606 606 * case. The comparison function will treat multiple consecutive blanks
607 607 * as equivalent to a single blank. For example, this causes a
608 608 * sd_disk_table entry of " NEC CDROM " to match a device's id string
609 609 * of "NEC CDROM".
610 610 *
611 611 * Note: The MD21 controller type has been obsoleted.
612 612 * ST318202F is a Legacy device
613 613 * MAM3182FC, MAM3364FC, MAM3738FC do not appear to have ever been
614 614 * made with an FC connection. The entries here are a legacy.
615 615 */
616 616 static sd_disk_config_t sd_disk_table[] = {
617 617 #if defined(__fibre) || defined(__i386) || defined(__amd64)
618 618 { "SEAGATE ST34371FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
619 619 { "SEAGATE ST19171FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
620 620 { "SEAGATE ST39102FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
621 621 { "SEAGATE ST39103FC", SD_CONF_BSET_THROTTLE, &seagate_properties },
622 622 { "SEAGATE ST118273F", SD_CONF_BSET_THROTTLE, &seagate_properties },
623 623 { "SEAGATE ST318202F", SD_CONF_BSET_THROTTLE, &seagate_properties },
624 624 { "SEAGATE ST318203F", SD_CONF_BSET_THROTTLE, &seagate_properties },
625 625 { "SEAGATE ST136403F", SD_CONF_BSET_THROTTLE, &seagate_properties },
626 626 { "SEAGATE ST318304F", SD_CONF_BSET_THROTTLE, &seagate_properties },
627 627 { "SEAGATE ST336704F", SD_CONF_BSET_THROTTLE, &seagate_properties },
628 628 { "SEAGATE ST373405F", SD_CONF_BSET_THROTTLE, &seagate_properties },
629 629 { "SEAGATE ST336605F", SD_CONF_BSET_THROTTLE, &seagate_properties },
630 630 { "SEAGATE ST336752F", SD_CONF_BSET_THROTTLE, &seagate_properties },
631 631 { "SEAGATE ST318452F", SD_CONF_BSET_THROTTLE, &seagate_properties },
632 632 { "FUJITSU MAG3091F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
633 633 { "FUJITSU MAG3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
634 634 { "FUJITSU MAA3182F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
635 635 { "FUJITSU MAF3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
636 636 { "FUJITSU MAL3364F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
637 637 { "FUJITSU MAL3738F", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
638 638 { "FUJITSU MAM3182FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
639 639 { "FUJITSU MAM3364FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
640 640 { "FUJITSU MAM3738FC", SD_CONF_BSET_THROTTLE, &fujitsu_properties },
641 641 { "IBM DDYFT1835", SD_CONF_BSET_THROTTLE, &ibm_properties },
642 642 { "IBM DDYFT3695", SD_CONF_BSET_THROTTLE, &ibm_properties },
643 643 { "IBM IC35LF2D2", SD_CONF_BSET_THROTTLE, &ibm_properties },
644 644 { "IBM IC35LF2PR", SD_CONF_BSET_THROTTLE, &ibm_properties },
645 645 { "IBM 1724-100", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
646 646 { "IBM 1726-2xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
647 647 { "IBM 1726-22x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
648 648 { "IBM 1726-4xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
649 649 { "IBM 1726-42x", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
650 650 { "IBM 1726-3xx", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
651 651 { "IBM 3526", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
652 652 { "IBM 3542", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
653 653 { "IBM 3552", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
654 654 { "IBM 1722", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
655 655 { "IBM 1742", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
656 656 { "IBM 1815", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
657 657 { "IBM FAStT", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
658 658 { "IBM 1814", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
659 659 { "IBM 1814-200", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
660 660 { "IBM 1818", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
661 661 { "DELL MD3000", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
662 662 { "DELL MD3000i", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
663 663 { "LSI INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
664 664 { "ENGENIO INF", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
665 665 { "SGI TP", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
666 666 { "SGI IS", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
667 667 { "*CSM100_*", SD_CONF_BSET_NRR_COUNT |
668 668 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties },
669 669 { "*CSM200_*", SD_CONF_BSET_NRR_COUNT |
670 670 SD_CONF_BSET_CACHE_IS_NV, &lsi_oem_properties },
671 671 { "Fujitsu SX300", SD_CONF_BSET_THROTTLE, &lsi_oem_properties },
672 672 { "LSI", SD_CONF_BSET_NRR_COUNT, &lsi_properties },
673 673 { "SUN T3", SD_CONF_BSET_THROTTLE |
674 674 SD_CONF_BSET_BSY_RETRY_COUNT|
675 675 SD_CONF_BSET_RST_RETRIES|
676 676 SD_CONF_BSET_RSV_REL_TIME,
677 677 &purple_properties },
678 678 { "SUN SESS01", SD_CONF_BSET_THROTTLE |
679 679 SD_CONF_BSET_BSY_RETRY_COUNT|
680 680 SD_CONF_BSET_RST_RETRIES|
681 681 SD_CONF_BSET_RSV_REL_TIME|
682 682 SD_CONF_BSET_MIN_THROTTLE|
683 683 SD_CONF_BSET_DISKSORT_DISABLED,
684 684 &sve_properties },
685 685 { "SUN T4", SD_CONF_BSET_THROTTLE |
686 686 SD_CONF_BSET_BSY_RETRY_COUNT|
687 687 SD_CONF_BSET_RST_RETRIES|
688 688 SD_CONF_BSET_RSV_REL_TIME,
689 689 &purple_properties },
690 690 { "SUN SVE01", SD_CONF_BSET_DISKSORT_DISABLED |
691 691 SD_CONF_BSET_LUN_RESET_ENABLED,
692 692 &maserati_properties },
693 693 { "SUN SE6920", SD_CONF_BSET_THROTTLE |
694 694 SD_CONF_BSET_NRR_COUNT|
695 695 SD_CONF_BSET_BSY_RETRY_COUNT|
696 696 SD_CONF_BSET_RST_RETRIES|
697 697 SD_CONF_BSET_MIN_THROTTLE|
698 698 SD_CONF_BSET_DISKSORT_DISABLED|
699 699 SD_CONF_BSET_LUN_RESET_ENABLED,
700 700 &pirus_properties },
701 701 { "SUN SE6940", SD_CONF_BSET_THROTTLE |
702 702 SD_CONF_BSET_NRR_COUNT|
703 703 SD_CONF_BSET_BSY_RETRY_COUNT|
704 704 SD_CONF_BSET_RST_RETRIES|
705 705 SD_CONF_BSET_MIN_THROTTLE|
706 706 SD_CONF_BSET_DISKSORT_DISABLED|
707 707 SD_CONF_BSET_LUN_RESET_ENABLED,
708 708 &pirus_properties },
709 709 { "SUN StorageTek 6920", SD_CONF_BSET_THROTTLE |
710 710 SD_CONF_BSET_NRR_COUNT|
711 711 SD_CONF_BSET_BSY_RETRY_COUNT|
712 712 SD_CONF_BSET_RST_RETRIES|
713 713 SD_CONF_BSET_MIN_THROTTLE|
714 714 SD_CONF_BSET_DISKSORT_DISABLED|
715 715 SD_CONF_BSET_LUN_RESET_ENABLED,
716 716 &pirus_properties },
717 717 { "SUN StorageTek 6940", SD_CONF_BSET_THROTTLE |
718 718 SD_CONF_BSET_NRR_COUNT|
719 719 SD_CONF_BSET_BSY_RETRY_COUNT|
720 720 SD_CONF_BSET_RST_RETRIES|
721 721 SD_CONF_BSET_MIN_THROTTLE|
722 722 SD_CONF_BSET_DISKSORT_DISABLED|
723 723 SD_CONF_BSET_LUN_RESET_ENABLED,
724 724 &pirus_properties },
725 725 { "SUN PSX1000", SD_CONF_BSET_THROTTLE |
726 726 SD_CONF_BSET_NRR_COUNT|
727 727 SD_CONF_BSET_BSY_RETRY_COUNT|
728 728 SD_CONF_BSET_RST_RETRIES|
729 729 SD_CONF_BSET_MIN_THROTTLE|
730 730 SD_CONF_BSET_DISKSORT_DISABLED|
731 731 SD_CONF_BSET_LUN_RESET_ENABLED,
732 732 &pirus_properties },
733 733 { "SUN SE6330", SD_CONF_BSET_THROTTLE |
734 734 SD_CONF_BSET_NRR_COUNT|
735 735 SD_CONF_BSET_BSY_RETRY_COUNT|
736 736 SD_CONF_BSET_RST_RETRIES|
737 737 SD_CONF_BSET_MIN_THROTTLE|
738 738 SD_CONF_BSET_DISKSORT_DISABLED|
739 739 SD_CONF_BSET_LUN_RESET_ENABLED,
740 740 &pirus_properties },
741 741 { "SUN STK6580_6780", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
742 742 { "SUN SUN_6180", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
743 743 { "STK OPENstorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
744 744 { "STK OpenStorage", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
745 745 { "STK BladeCtlr", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
746 746 { "STK FLEXLINE", SD_CONF_BSET_NRR_COUNT, &lsi_oem_properties },
747 747 { "SYMBIOS", SD_CONF_BSET_NRR_COUNT, &symbios_properties },
748 748 #endif /* fibre or NON-sparc platforms */
749 749 #if ((defined(__sparc) && !defined(__fibre)) ||\
750 750 (defined(__i386) || defined(__amd64)))
751 751 { "SEAGATE ST42400N", SD_CONF_BSET_THROTTLE, &elite_properties },
752 752 { "SEAGATE ST31200N", SD_CONF_BSET_THROTTLE, &st31200n_properties },
753 753 { "SEAGATE ST41600N", SD_CONF_BSET_TUR_CHECK, NULL },
754 754 { "CONNER CP30540", SD_CONF_BSET_NOCACHE, NULL },
755 755 { "*SUN0104*", SD_CONF_BSET_FAB_DEVID, NULL },
756 756 { "*SUN0207*", SD_CONF_BSET_FAB_DEVID, NULL },
757 757 { "*SUN0327*", SD_CONF_BSET_FAB_DEVID, NULL },
758 758 { "*SUN0340*", SD_CONF_BSET_FAB_DEVID, NULL },
759 759 { "*SUN0424*", SD_CONF_BSET_FAB_DEVID, NULL },
760 760 { "*SUN0669*", SD_CONF_BSET_FAB_DEVID, NULL },
761 761 { "*SUN1.0G*", SD_CONF_BSET_FAB_DEVID, NULL },
762 762 { "SYMBIOS INF-01-00 ", SD_CONF_BSET_FAB_DEVID, NULL },
763 763 { "SYMBIOS", SD_CONF_BSET_THROTTLE|SD_CONF_BSET_NRR_COUNT,
764 764 &symbios_properties },
765 765 { "LSI", SD_CONF_BSET_THROTTLE | SD_CONF_BSET_NRR_COUNT,
766 766 &lsi_properties_scsi },
767 767 #if defined(__i386) || defined(__amd64)
768 768 { " NEC CD-ROM DRIVE:260 ", (SD_CONF_BSET_PLAYMSF_BCD
769 769 | SD_CONF_BSET_READSUB_BCD
770 770 | SD_CONF_BSET_READ_TOC_ADDR_BCD
771 771 | SD_CONF_BSET_NO_READ_HEADER
772 772 | SD_CONF_BSET_READ_CD_XD4), NULL },
773 773
774 774 { " NEC CD-ROM DRIVE:270 ", (SD_CONF_BSET_PLAYMSF_BCD
775 775 | SD_CONF_BSET_READSUB_BCD
776 776 | SD_CONF_BSET_READ_TOC_ADDR_BCD
777 777 | SD_CONF_BSET_NO_READ_HEADER
778 778 | SD_CONF_BSET_READ_CD_XD4), NULL },
779 779 #endif /* __i386 || __amd64 */
780 780 #endif /* sparc NON-fibre or NON-sparc platforms */
781 781
782 782 #if (defined(SD_PROP_TST))
783 783 { "VENDOR PRODUCT ", (SD_CONF_BSET_THROTTLE
784 784 | SD_CONF_BSET_CTYPE
785 785 | SD_CONF_BSET_NRR_COUNT
786 786 | SD_CONF_BSET_FAB_DEVID
787 787 | SD_CONF_BSET_NOCACHE
788 788 | SD_CONF_BSET_BSY_RETRY_COUNT
789 789 | SD_CONF_BSET_PLAYMSF_BCD
790 790 | SD_CONF_BSET_READSUB_BCD
791 791 | SD_CONF_BSET_READ_TOC_TRK_BCD
792 792 | SD_CONF_BSET_READ_TOC_ADDR_BCD
793 793 | SD_CONF_BSET_NO_READ_HEADER
794 794 | SD_CONF_BSET_READ_CD_XD4
795 795 | SD_CONF_BSET_RST_RETRIES
796 796 | SD_CONF_BSET_RSV_REL_TIME
797 797 | SD_CONF_BSET_TUR_CHECK), &tst_properties},
798 798 #endif
799 799 };
800 800
801 801 static const int sd_disk_table_size =
802 802 sizeof (sd_disk_table)/ sizeof (sd_disk_config_t);
803 803
804 804 /*
805 805 * Emulation mode disk drive VID/PID table
806 806 */
807 807 static char sd_flash_dev_table[][25] = {
808 808 "ATA MARVELL SD88SA02",
809 809 "MARVELL SD88SA02",
810 810 "TOSHIBA THNSNV05",
811 811 };
812 812
813 813 static const int sd_flash_dev_table_size =
814 814 sizeof (sd_flash_dev_table) / sizeof (sd_flash_dev_table[0]);
815 815
816 816 #define SD_INTERCONNECT_PARALLEL 0
817 817 #define SD_INTERCONNECT_FABRIC 1
818 818 #define SD_INTERCONNECT_FIBRE 2
819 819 #define SD_INTERCONNECT_SSA 3
820 820 #define SD_INTERCONNECT_SATA 4
821 821 #define SD_INTERCONNECT_SAS 5
822 822
823 823 #define SD_IS_PARALLEL_SCSI(un) \
824 824 ((un)->un_interconnect_type == SD_INTERCONNECT_PARALLEL)
825 825 #define SD_IS_SERIAL(un) \
826 826 (((un)->un_interconnect_type == SD_INTERCONNECT_SATA) ||\
827 827 ((un)->un_interconnect_type == SD_INTERCONNECT_SAS))
828 828
829 829 /*
830 830 * Definitions used by device id registration routines
831 831 */
832 832 #define VPD_HEAD_OFFSET 3 /* size of head for vpd page */
833 833 #define VPD_PAGE_LENGTH 3 /* offset for pge length data */
834 834 #define VPD_MODE_PAGE 1 /* offset into vpd pg for "page code" */
835 835
836 836 static kmutex_t sd_sense_mutex = {0};
837 837
838 838 /*
839 839 * Macros for updates of the driver state
840 840 */
841 841 #define New_state(un, s) \
842 842 (un)->un_last_state = (un)->un_state, (un)->un_state = (s)
843 843 #define Restore_state(un) \
844 844 { uchar_t tmp = (un)->un_last_state; New_state((un), tmp); }
845 845
846 846 static struct sd_cdbinfo sd_cdbtab[] = {
847 847 { CDB_GROUP0, 0x00, 0x1FFFFF, 0xFF, },
848 848 { CDB_GROUP1, SCMD_GROUP1, 0xFFFFFFFF, 0xFFFF, },
849 849 { CDB_GROUP5, SCMD_GROUP5, 0xFFFFFFFF, 0xFFFFFFFF, },
850 850 { CDB_GROUP4, SCMD_GROUP4, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFF, },
851 851 };
852 852
853 853 /*
854 854 * Specifies the number of seconds that must have elapsed since the last
855 855 * cmd. has completed for a device to be declared idle to the PM framework.
856 856 */
857 857 static int sd_pm_idletime = 1;
858 858
859 859 /*
860 860 * Internal function prototypes
861 861 */
862 862
863 863 #if (defined(__fibre))
864 864 /*
865 865 * These #defines are to avoid namespace collisions that occur because this
866 866 * code is currently used to compile two separate driver modules: sd and ssd.
867 867 * All function names need to be treated this way (even if declared static)
868 868 * in order to allow the debugger to resolve the names properly.
869 869 * It is anticipated that in the near future the ssd module will be obsoleted,
870 870 * at which time this ugliness should go away.
871 871 */
872 872 #define sd_log_trace ssd_log_trace
873 873 #define sd_log_info ssd_log_info
874 874 #define sd_log_err ssd_log_err
875 875 #define sdprobe ssdprobe
876 876 #define sdinfo ssdinfo
877 877 #define sd_prop_op ssd_prop_op
878 878 #define sd_scsi_probe_cache_init ssd_scsi_probe_cache_init
879 879 #define sd_scsi_probe_cache_fini ssd_scsi_probe_cache_fini
880 880 #define sd_scsi_clear_probe_cache ssd_scsi_clear_probe_cache
881 881 #define sd_scsi_probe_with_cache ssd_scsi_probe_with_cache
882 882 #define sd_scsi_target_lun_init ssd_scsi_target_lun_init
883 883 #define sd_scsi_target_lun_fini ssd_scsi_target_lun_fini
884 884 #define sd_scsi_get_target_lun_count ssd_scsi_get_target_lun_count
885 885 #define sd_scsi_update_lun_on_target ssd_scsi_update_lun_on_target
886 886 #define sd_spin_up_unit ssd_spin_up_unit
887 887 #define sd_enable_descr_sense ssd_enable_descr_sense
888 888 #define sd_reenable_dsense_task ssd_reenable_dsense_task
889 889 #define sd_set_mmc_caps ssd_set_mmc_caps
890 890 #define sd_read_unit_properties ssd_read_unit_properties
891 891 #define sd_process_sdconf_file ssd_process_sdconf_file
892 892 #define sd_process_sdconf_table ssd_process_sdconf_table
893 893 #define sd_sdconf_id_match ssd_sdconf_id_match
894 894 #define sd_blank_cmp ssd_blank_cmp
895 895 #define sd_chk_vers1_data ssd_chk_vers1_data
896 896 #define sd_set_vers1_properties ssd_set_vers1_properties
897 897 #define sd_check_solid_state ssd_check_solid_state
898 898 #define sd_check_emulation_mode ssd_check_emulation_mode
899 899
900 900 #define sd_get_physical_geometry ssd_get_physical_geometry
901 901 #define sd_get_virtual_geometry ssd_get_virtual_geometry
902 902 #define sd_update_block_info ssd_update_block_info
903 903 #define sd_register_devid ssd_register_devid
904 904 #define sd_get_devid ssd_get_devid
905 905 #define sd_create_devid ssd_create_devid
906 906 #define sd_write_deviceid ssd_write_deviceid
907 907 #define sd_check_vpd_page_support ssd_check_vpd_page_support
908 908 #define sd_setup_pm ssd_setup_pm
909 909 #define sd_create_pm_components ssd_create_pm_components
910 910 #define sd_ddi_suspend ssd_ddi_suspend
911 911 #define sd_ddi_resume ssd_ddi_resume
912 912 #define sd_pm_state_change ssd_pm_state_change
913 913 #define sdpower ssdpower
914 914 #define sdattach ssdattach
915 915 #define sddetach ssddetach
916 916 #define sd_unit_attach ssd_unit_attach
917 917 #define sd_unit_detach ssd_unit_detach
918 918 #define sd_set_unit_attributes ssd_set_unit_attributes
919 919 #define sd_create_errstats ssd_create_errstats
920 920 #define sd_set_errstats ssd_set_errstats
921 921 #define sd_set_pstats ssd_set_pstats
922 922 #define sddump ssddump
923 923 #define sd_scsi_poll ssd_scsi_poll
924 924 #define sd_send_polled_RQS ssd_send_polled_RQS
925 925 #define sd_ddi_scsi_poll ssd_ddi_scsi_poll
926 926 #define sd_init_event_callbacks ssd_init_event_callbacks
927 927 #define sd_event_callback ssd_event_callback
928 928 #define sd_cache_control ssd_cache_control
929 929 #define sd_get_write_cache_enabled ssd_get_write_cache_enabled
930 930 #define sd_get_nv_sup ssd_get_nv_sup
931 931 #define sd_make_device ssd_make_device
932 932 #define sdopen ssdopen
933 933 #define sdclose ssdclose
934 934 #define sd_ready_and_valid ssd_ready_and_valid
935 935 #define sdmin ssdmin
936 936 #define sdread ssdread
937 937 #define sdwrite ssdwrite
938 938 #define sdaread ssdaread
939 939 #define sdawrite ssdawrite
940 940 #define sdstrategy ssdstrategy
941 941 #define sdioctl ssdioctl
942 942 #define sd_mapblockaddr_iostart ssd_mapblockaddr_iostart
943 943 #define sd_mapblocksize_iostart ssd_mapblocksize_iostart
944 944 #define sd_checksum_iostart ssd_checksum_iostart
945 945 #define sd_checksum_uscsi_iostart ssd_checksum_uscsi_iostart
946 946 #define sd_pm_iostart ssd_pm_iostart
947 947 #define sd_core_iostart ssd_core_iostart
948 948 #define sd_mapblockaddr_iodone ssd_mapblockaddr_iodone
949 949 #define sd_mapblocksize_iodone ssd_mapblocksize_iodone
950 950 #define sd_checksum_iodone ssd_checksum_iodone
951 951 #define sd_checksum_uscsi_iodone ssd_checksum_uscsi_iodone
952 952 #define sd_pm_iodone ssd_pm_iodone
953 953 #define sd_initpkt_for_buf ssd_initpkt_for_buf
954 954 #define sd_destroypkt_for_buf ssd_destroypkt_for_buf
955 955 #define sd_setup_rw_pkt ssd_setup_rw_pkt
956 956 #define sd_setup_next_rw_pkt ssd_setup_next_rw_pkt
957 957 #define sd_buf_iodone ssd_buf_iodone
958 958 #define sd_uscsi_strategy ssd_uscsi_strategy
959 959 #define sd_initpkt_for_uscsi ssd_initpkt_for_uscsi
960 960 #define sd_destroypkt_for_uscsi ssd_destroypkt_for_uscsi
961 961 #define sd_uscsi_iodone ssd_uscsi_iodone
962 962 #define sd_xbuf_strategy ssd_xbuf_strategy
963 963 #define sd_xbuf_init ssd_xbuf_init
964 964 #define sd_pm_entry ssd_pm_entry
965 965 #define sd_pm_exit ssd_pm_exit
966 966
967 967 #define sd_pm_idletimeout_handler ssd_pm_idletimeout_handler
968 968 #define sd_pm_timeout_handler ssd_pm_timeout_handler
969 969
970 970 #define sd_add_buf_to_waitq ssd_add_buf_to_waitq
971 971 #define sdintr ssdintr
972 972 #define sd_start_cmds ssd_start_cmds
973 973 #define sd_send_scsi_cmd ssd_send_scsi_cmd
974 974 #define sd_bioclone_alloc ssd_bioclone_alloc
975 975 #define sd_bioclone_free ssd_bioclone_free
976 976 #define sd_shadow_buf_alloc ssd_shadow_buf_alloc
977 977 #define sd_shadow_buf_free ssd_shadow_buf_free
978 978 #define sd_print_transport_rejected_message \
979 979 ssd_print_transport_rejected_message
980 980 #define sd_retry_command ssd_retry_command
981 981 #define sd_set_retry_bp ssd_set_retry_bp
982 982 #define sd_send_request_sense_command ssd_send_request_sense_command
983 983 #define sd_start_retry_command ssd_start_retry_command
984 984 #define sd_start_direct_priority_command \
985 985 ssd_start_direct_priority_command
986 986 #define sd_return_failed_command ssd_return_failed_command
987 987 #define sd_return_failed_command_no_restart \
988 988 ssd_return_failed_command_no_restart
989 989 #define sd_return_command ssd_return_command
990 990 #define sd_sync_with_callback ssd_sync_with_callback
991 991 #define sdrunout ssdrunout
992 992 #define sd_mark_rqs_busy ssd_mark_rqs_busy
993 993 #define sd_mark_rqs_idle ssd_mark_rqs_idle
994 994 #define sd_reduce_throttle ssd_reduce_throttle
995 995 #define sd_restore_throttle ssd_restore_throttle
996 996 #define sd_print_incomplete_msg ssd_print_incomplete_msg
997 997 #define sd_init_cdb_limits ssd_init_cdb_limits
998 998 #define sd_pkt_status_good ssd_pkt_status_good
999 999 #define sd_pkt_status_check_condition ssd_pkt_status_check_condition
1000 1000 #define sd_pkt_status_busy ssd_pkt_status_busy
1001 1001 #define sd_pkt_status_reservation_conflict \
1002 1002 ssd_pkt_status_reservation_conflict
1003 1003 #define sd_pkt_status_qfull ssd_pkt_status_qfull
1004 1004 #define sd_handle_request_sense ssd_handle_request_sense
1005 1005 #define sd_handle_auto_request_sense ssd_handle_auto_request_sense
1006 1006 #define sd_print_sense_failed_msg ssd_print_sense_failed_msg
1007 1007 #define sd_validate_sense_data ssd_validate_sense_data
1008 1008 #define sd_decode_sense ssd_decode_sense
1009 1009 #define sd_print_sense_msg ssd_print_sense_msg
1010 1010 #define sd_sense_key_no_sense ssd_sense_key_no_sense
1011 1011 #define sd_sense_key_recoverable_error ssd_sense_key_recoverable_error
1012 1012 #define sd_sense_key_not_ready ssd_sense_key_not_ready
1013 1013 #define sd_sense_key_medium_or_hardware_error \
1014 1014 ssd_sense_key_medium_or_hardware_error
1015 1015 #define sd_sense_key_illegal_request ssd_sense_key_illegal_request
1016 1016 #define sd_sense_key_unit_attention ssd_sense_key_unit_attention
1017 1017 #define sd_sense_key_fail_command ssd_sense_key_fail_command
1018 1018 #define sd_sense_key_blank_check ssd_sense_key_blank_check
1019 1019 #define sd_sense_key_aborted_command ssd_sense_key_aborted_command
1020 1020 #define sd_sense_key_default ssd_sense_key_default
1021 1021 #define sd_print_retry_msg ssd_print_retry_msg
1022 1022 #define sd_print_cmd_incomplete_msg ssd_print_cmd_incomplete_msg
1023 1023 #define sd_pkt_reason_cmd_incomplete ssd_pkt_reason_cmd_incomplete
1024 1024 #define sd_pkt_reason_cmd_tran_err ssd_pkt_reason_cmd_tran_err
1025 1025 #define sd_pkt_reason_cmd_reset ssd_pkt_reason_cmd_reset
1026 1026 #define sd_pkt_reason_cmd_aborted ssd_pkt_reason_cmd_aborted
1027 1027 #define sd_pkt_reason_cmd_timeout ssd_pkt_reason_cmd_timeout
1028 1028 #define sd_pkt_reason_cmd_unx_bus_free ssd_pkt_reason_cmd_unx_bus_free
1029 1029 #define sd_pkt_reason_cmd_tag_reject ssd_pkt_reason_cmd_tag_reject
1030 1030 #define sd_pkt_reason_default ssd_pkt_reason_default
1031 1031 #define sd_reset_target ssd_reset_target
1032 1032 #define sd_start_stop_unit_callback ssd_start_stop_unit_callback
1033 1033 #define sd_start_stop_unit_task ssd_start_stop_unit_task
1034 1034 #define sd_taskq_create ssd_taskq_create
1035 1035 #define sd_taskq_delete ssd_taskq_delete
1036 1036 #define sd_target_change_task ssd_target_change_task
1037 1037 #define sd_log_dev_status_event ssd_log_dev_status_event
1038 1038 #define sd_log_lun_expansion_event ssd_log_lun_expansion_event
1039 1039 #define sd_log_eject_request_event ssd_log_eject_request_event
1040 1040 #define sd_media_change_task ssd_media_change_task
1041 1041 #define sd_handle_mchange ssd_handle_mchange
1042 1042 #define sd_send_scsi_DOORLOCK ssd_send_scsi_DOORLOCK
1043 1043 #define sd_send_scsi_READ_CAPACITY ssd_send_scsi_READ_CAPACITY
1044 1044 #define sd_send_scsi_READ_CAPACITY_16 ssd_send_scsi_READ_CAPACITY_16
1045 1045 #define sd_send_scsi_GET_CONFIGURATION ssd_send_scsi_GET_CONFIGURATION
1046 1046 #define sd_send_scsi_feature_GET_CONFIGURATION \
1047 1047 sd_send_scsi_feature_GET_CONFIGURATION
1048 1048 #define sd_send_scsi_START_STOP_UNIT ssd_send_scsi_START_STOP_UNIT
1049 1049 #define sd_send_scsi_INQUIRY ssd_send_scsi_INQUIRY
1050 1050 #define sd_send_scsi_TEST_UNIT_READY ssd_send_scsi_TEST_UNIT_READY
1051 1051 #define sd_send_scsi_PERSISTENT_RESERVE_IN \
1052 1052 ssd_send_scsi_PERSISTENT_RESERVE_IN
1053 1053 #define sd_send_scsi_PERSISTENT_RESERVE_OUT \
1054 1054 ssd_send_scsi_PERSISTENT_RESERVE_OUT
1055 1055 #define sd_send_scsi_SYNCHRONIZE_CACHE ssd_send_scsi_SYNCHRONIZE_CACHE
1056 1056 #define sd_send_scsi_SYNCHRONIZE_CACHE_biodone \
1057 1057 ssd_send_scsi_SYNCHRONIZE_CACHE_biodone
1058 1058 #define sd_send_scsi_MODE_SENSE ssd_send_scsi_MODE_SENSE
1059 1059 #define sd_send_scsi_MODE_SELECT ssd_send_scsi_MODE_SELECT
1060 1060 #define sd_send_scsi_RDWR ssd_send_scsi_RDWR
1061 1061 #define sd_send_scsi_LOG_SENSE ssd_send_scsi_LOG_SENSE
1062 1062 #define sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION \
1063 1063 ssd_send_scsi_GET_EVENT_STATUS_NOTIFICATION
1064 1064 #define sd_gesn_media_data_valid ssd_gesn_media_data_valid
1065 1065 #define sd_alloc_rqs ssd_alloc_rqs
1066 1066 #define sd_free_rqs ssd_free_rqs
1067 1067 #define sd_dump_memory ssd_dump_memory
1068 1068 #define sd_get_media_info_com ssd_get_media_info_com
1069 1069 #define sd_get_media_info ssd_get_media_info
1070 1070 #define sd_get_media_info_ext ssd_get_media_info_ext
1071 1071 #define sd_dkio_ctrl_info ssd_dkio_ctrl_info
1072 1072 #define sd_nvpair_str_decode ssd_nvpair_str_decode
1073 1073 #define sd_strtok_r ssd_strtok_r
1074 1074 #define sd_set_properties ssd_set_properties
1075 1075 #define sd_get_tunables_from_conf ssd_get_tunables_from_conf
1076 1076 #define sd_setup_next_xfer ssd_setup_next_xfer
1077 1077 #define sd_dkio_get_temp ssd_dkio_get_temp
1078 1078 #define sd_check_mhd ssd_check_mhd
1079 1079 #define sd_mhd_watch_cb ssd_mhd_watch_cb
1080 1080 #define sd_mhd_watch_incomplete ssd_mhd_watch_incomplete
1081 1081 #define sd_sname ssd_sname
1082 1082 #define sd_mhd_resvd_recover ssd_mhd_resvd_recover
1083 1083 #define sd_resv_reclaim_thread ssd_resv_reclaim_thread
1084 1084 #define sd_take_ownership ssd_take_ownership
1085 1085 #define sd_reserve_release ssd_reserve_release
1086 1086 #define sd_rmv_resv_reclaim_req ssd_rmv_resv_reclaim_req
1087 1087 #define sd_mhd_reset_notify_cb ssd_mhd_reset_notify_cb
1088 1088 #define sd_persistent_reservation_in_read_keys \
1089 1089 ssd_persistent_reservation_in_read_keys
1090 1090 #define sd_persistent_reservation_in_read_resv \
1091 1091 ssd_persistent_reservation_in_read_resv
1092 1092 #define sd_mhdioc_takeown ssd_mhdioc_takeown
1093 1093 #define sd_mhdioc_failfast ssd_mhdioc_failfast
1094 1094 #define sd_mhdioc_release ssd_mhdioc_release
1095 1095 #define sd_mhdioc_register_devid ssd_mhdioc_register_devid
1096 1096 #define sd_mhdioc_inkeys ssd_mhdioc_inkeys
1097 1097 #define sd_mhdioc_inresv ssd_mhdioc_inresv
1098 1098 #define sr_change_blkmode ssr_change_blkmode
1099 1099 #define sr_change_speed ssr_change_speed
1100 1100 #define sr_atapi_change_speed ssr_atapi_change_speed
1101 1101 #define sr_pause_resume ssr_pause_resume
1102 1102 #define sr_play_msf ssr_play_msf
1103 1103 #define sr_play_trkind ssr_play_trkind
1104 1104 #define sr_read_all_subcodes ssr_read_all_subcodes
1105 1105 #define sr_read_subchannel ssr_read_subchannel
1106 1106 #define sr_read_tocentry ssr_read_tocentry
1107 1107 #define sr_read_tochdr ssr_read_tochdr
1108 1108 #define sr_read_cdda ssr_read_cdda
1109 1109 #define sr_read_cdxa ssr_read_cdxa
1110 1110 #define sr_read_mode1 ssr_read_mode1
1111 1111 #define sr_read_mode2 ssr_read_mode2
1112 1112 #define sr_read_cd_mode2 ssr_read_cd_mode2
1113 1113 #define sr_sector_mode ssr_sector_mode
1114 1114 #define sr_eject ssr_eject
1115 1115 #define sr_ejected ssr_ejected
1116 1116 #define sr_check_wp ssr_check_wp
1117 1117 #define sd_watch_request_submit ssd_watch_request_submit
1118 1118 #define sd_check_media ssd_check_media
1119 1119 #define sd_media_watch_cb ssd_media_watch_cb
1120 1120 #define sd_delayed_cv_broadcast ssd_delayed_cv_broadcast
1121 1121 #define sr_volume_ctrl ssr_volume_ctrl
1122 1122 #define sr_read_sony_session_offset ssr_read_sony_session_offset
1123 1123 #define sd_log_page_supported ssd_log_page_supported
1124 1124 #define sd_check_for_writable_cd ssd_check_for_writable_cd
1125 1125 #define sd_wm_cache_constructor ssd_wm_cache_constructor
1126 1126 #define sd_wm_cache_destructor ssd_wm_cache_destructor
1127 1127 #define sd_range_lock ssd_range_lock
1128 1128 #define sd_get_range ssd_get_range
1129 1129 #define sd_free_inlist_wmap ssd_free_inlist_wmap
1130 1130 #define sd_range_unlock ssd_range_unlock
1131 1131 #define sd_read_modify_write_task ssd_read_modify_write_task
1132 1132 #define sddump_do_read_of_rmw ssddump_do_read_of_rmw
1133 1133
1134 1134 #define sd_iostart_chain ssd_iostart_chain
1135 1135 #define sd_iodone_chain ssd_iodone_chain
1136 1136 #define sd_initpkt_map ssd_initpkt_map
1137 1137 #define sd_destroypkt_map ssd_destroypkt_map
1138 1138 #define sd_chain_type_map ssd_chain_type_map
1139 1139 #define sd_chain_index_map ssd_chain_index_map
1140 1140
1141 1141 #define sd_failfast_flushctl ssd_failfast_flushctl
1142 1142 #define sd_failfast_flushq ssd_failfast_flushq
1143 1143 #define sd_failfast_flushq_callback ssd_failfast_flushq_callback
1144 1144
1145 1145 #define sd_is_lsi ssd_is_lsi
1146 1146 #define sd_tg_rdwr ssd_tg_rdwr
1147 1147 #define sd_tg_getinfo ssd_tg_getinfo
1148 1148 #define sd_rmw_msg_print_handler ssd_rmw_msg_print_handler
1149 1149
1150 1150 #endif /* #if (defined(__fibre)) */
1151 1151
1152 1152
1153 1153 int _init(void);
1154 1154 int _fini(void);
1155 1155 int _info(struct modinfo *modinfop);
1156 1156
1157 1157 /*PRINTFLIKE3*/
1158 1158 static void sd_log_trace(uint_t comp, struct sd_lun *un, const char *fmt, ...);
1159 1159 /*PRINTFLIKE3*/
1160 1160 static void sd_log_info(uint_t comp, struct sd_lun *un, const char *fmt, ...);
1161 1161 /*PRINTFLIKE3*/
1162 1162 static void sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...);
1163 1163
1164 1164 static int sdprobe(dev_info_t *devi);
1165 1165 static int sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
1166 1166 void **result);
1167 1167 static int sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
1168 1168 int mod_flags, char *name, caddr_t valuep, int *lengthp);
1169 1169
1170 1170 /*
1171 1171 * Smart probe for parallel scsi
1172 1172 */
1173 1173 static void sd_scsi_probe_cache_init(void);
1174 1174 static void sd_scsi_probe_cache_fini(void);
1175 1175 static void sd_scsi_clear_probe_cache(void);
1176 1176 static int sd_scsi_probe_with_cache(struct scsi_device *devp, int (*fn)());
1177 1177
1178 1178 /*
1179 1179 * Attached luns on target for parallel scsi
1180 1180 */
1181 1181 static void sd_scsi_target_lun_init(void);
1182 1182 static void sd_scsi_target_lun_fini(void);
1183 1183 static int sd_scsi_get_target_lun_count(dev_info_t *dip, int target);
1184 1184 static void sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag);
1185 1185
1186 1186 static int sd_spin_up_unit(sd_ssc_t *ssc);
1187 1187
1188 1188 /*
1189 1189 * Using sd_ssc_init to establish sd_ssc_t struct
1190 1190 * Using sd_ssc_send to send uscsi internal command
1191 1191 * Using sd_ssc_fini to free sd_ssc_t struct
1192 1192 */
1193 1193 static sd_ssc_t *sd_ssc_init(struct sd_lun *un);
1194 1194 static int sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd,
1195 1195 int flag, enum uio_seg dataspace, int path_flag);
1196 1196 static void sd_ssc_fini(sd_ssc_t *ssc);
1197 1197
1198 1198 /*
1199 1199 * Using sd_ssc_assessment to set correct type-of-assessment
1200 1200 * Using sd_ssc_post to post ereport & system log
1201 1201 * sd_ssc_post will call sd_ssc_print to print system log
1202 1202 * sd_ssc_post will call sd_ssd_ereport_post to post ereport
1203 1203 */
1204 1204 static void sd_ssc_assessment(sd_ssc_t *ssc,
1205 1205 enum sd_type_assessment tp_assess);
1206 1206
1207 1207 static void sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess);
1208 1208 static void sd_ssc_print(sd_ssc_t *ssc, int sd_severity);
1209 1209 static void sd_ssc_ereport_post(sd_ssc_t *ssc,
1210 1210 enum sd_driver_assessment drv_assess);
1211 1211
1212 1212 /*
1213 1213 * Using sd_ssc_set_info to mark an un-decodable-data error.
1214 1214 * Using sd_ssc_extract_info to transfer information from internal
1215 1215 * data structures to sd_ssc_t.
1216 1216 */
1217 1217 static void sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp,
1218 1218 const char *fmt, ...);
1219 1219 static void sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un,
1220 1220 struct scsi_pkt *pktp, struct buf *bp, struct sd_xbuf *xp);
1221 1221
1222 1222 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag,
1223 1223 enum uio_seg dataspace, int path_flag);
1224 1224
1225 1225 #ifdef _LP64
1226 1226 static void sd_enable_descr_sense(sd_ssc_t *ssc);
1227 1227 static void sd_reenable_dsense_task(void *arg);
1228 1228 #endif /* _LP64 */
1229 1229
1230 1230 static void sd_set_mmc_caps(sd_ssc_t *ssc);
1231 1231
1232 1232 static void sd_read_unit_properties(struct sd_lun *un);
1233 1233 static int sd_process_sdconf_file(struct sd_lun *un);
1234 1234 static void sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str);
1235 1235 static char *sd_strtok_r(char *string, const char *sepset, char **lasts);
1236 1236 static void sd_set_properties(struct sd_lun *un, char *name, char *value);
1237 1237 static void sd_get_tunables_from_conf(struct sd_lun *un, int flags,
1238 1238 int *data_list, sd_tunables *values);
1239 1239 static void sd_process_sdconf_table(struct sd_lun *un);
1240 1240 static int sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen);
1241 1241 static int sd_blank_cmp(struct sd_lun *un, char *id, int idlen);
1242 1242 static int sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list,
1243 1243 int list_len, char *dataname_ptr);
1244 1244 static void sd_set_vers1_properties(struct sd_lun *un, int flags,
1245 1245 sd_tunables *prop_list);
1246 1246
1247 1247 static void sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi,
1248 1248 int reservation_flag);
1249 1249 static int sd_get_devid(sd_ssc_t *ssc);
1250 1250 static ddi_devid_t sd_create_devid(sd_ssc_t *ssc);
1251 1251 static int sd_write_deviceid(sd_ssc_t *ssc);
1252 1252 static int sd_get_devid_page(struct sd_lun *un, uchar_t *wwn, int *len);
1253 1253 static int sd_check_vpd_page_support(sd_ssc_t *ssc);
1254 1254
1255 1255 static void sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi);
1256 1256 static void sd_create_pm_components(dev_info_t *devi, struct sd_lun *un);
1257 1257
1258 1258 static int sd_ddi_suspend(dev_info_t *devi);
1259 1259 static int sd_ddi_resume(dev_info_t *devi);
1260 1260 static int sd_pm_state_change(struct sd_lun *un, int level, int flag);
1261 1261 static int sdpower(dev_info_t *devi, int component, int level);
1262 1262
1263 1263 static int sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd);
1264 1264 static int sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd);
1265 1265 static int sd_unit_attach(dev_info_t *devi);
1266 1266 static int sd_unit_detach(dev_info_t *devi);
1267 1267
1268 1268 static void sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi);
1269 1269 static void sd_create_errstats(struct sd_lun *un, int instance);
1270 1270 static void sd_set_errstats(struct sd_lun *un);
1271 1271 static void sd_set_pstats(struct sd_lun *un);
1272 1272
1273 1273 static int sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk);
1274 1274 static int sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pkt);
1275 1275 static int sd_send_polled_RQS(struct sd_lun *un);
1276 1276 static int sd_ddi_scsi_poll(struct scsi_pkt *pkt);
1277 1277
1278 1278 #if (defined(__fibre))
1279 1279 /*
1280 1280 * Event callbacks (photon)
1281 1281 */
1282 1282 static void sd_init_event_callbacks(struct sd_lun *un);
1283 1283 static void sd_event_callback(dev_info_t *, ddi_eventcookie_t, void *, void *);
1284 1284 #endif
1285 1285
1286 1286 /*
1287 1287 * Defines for sd_cache_control
1288 1288 */
1289 1289
1290 1290 #define SD_CACHE_ENABLE 1
1291 1291 #define SD_CACHE_DISABLE 0
1292 1292 #define SD_CACHE_NOCHANGE -1
1293 1293
1294 1294 static int sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag);
1295 1295 static int sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled);
1296 1296 static void sd_get_nv_sup(sd_ssc_t *ssc);
1297 1297 static dev_t sd_make_device(dev_info_t *devi);
1298 1298 static void sd_check_solid_state(sd_ssc_t *ssc);
1299 1299 static void sd_check_emulation_mode(sd_ssc_t *ssc);
1300 1300 static void sd_update_block_info(struct sd_lun *un, uint32_t lbasize,
1301 1301 uint64_t capacity);
1302 1302
1303 1303 /*
1304 1304 * Driver entry point functions.
1305 1305 */
1306 1306 static int sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p);
1307 1307 static int sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p);
1308 1308 static int sd_ready_and_valid(sd_ssc_t *ssc, int part);
1309 1309
1310 1310 static void sdmin(struct buf *bp);
1311 1311 static int sdread(dev_t dev, struct uio *uio, cred_t *cred_p);
1312 1312 static int sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p);
1313 1313 static int sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p);
1314 1314 static int sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p);
1315 1315
1316 1316 static int sdstrategy(struct buf *bp);
1317 1317 static int sdioctl(dev_t, int, intptr_t, int, cred_t *, int *);
1318 1318
1319 1319 /*
1320 1320 * Function prototypes for layering functions in the iostart chain.
1321 1321 */
1322 1322 static void sd_mapblockaddr_iostart(int index, struct sd_lun *un,
1323 1323 struct buf *bp);
1324 1324 static void sd_mapblocksize_iostart(int index, struct sd_lun *un,
1325 1325 struct buf *bp);
1326 1326 static void sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp);
1327 1327 static void sd_checksum_uscsi_iostart(int index, struct sd_lun *un,
1328 1328 struct buf *bp);
1329 1329 static void sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp);
1330 1330 static void sd_core_iostart(int index, struct sd_lun *un, struct buf *bp);
1331 1331
1332 1332 /*
1333 1333 * Function prototypes for layering functions in the iodone chain.
1334 1334 */
1335 1335 static void sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp);
1336 1336 static void sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp);
1337 1337 static void sd_mapblockaddr_iodone(int index, struct sd_lun *un,
1338 1338 struct buf *bp);
1339 1339 static void sd_mapblocksize_iodone(int index, struct sd_lun *un,
1340 1340 struct buf *bp);
1341 1341 static void sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp);
1342 1342 static void sd_checksum_uscsi_iodone(int index, struct sd_lun *un,
1343 1343 struct buf *bp);
1344 1344 static void sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp);
1345 1345
1346 1346 /*
1347 1347 * Prototypes for functions to support buf(9S) based IO.
1348 1348 */
1349 1349 static void sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg);
1350 1350 static int sd_initpkt_for_buf(struct buf *, struct scsi_pkt **);
1351 1351 static void sd_destroypkt_for_buf(struct buf *);
1352 1352 static int sd_setup_rw_pkt(struct sd_lun *un, struct scsi_pkt **pktpp,
1353 1353 struct buf *bp, int flags,
1354 1354 int (*callback)(caddr_t), caddr_t callback_arg,
1355 1355 diskaddr_t lba, uint32_t blockcount);
1356 1356 static int sd_setup_next_rw_pkt(struct sd_lun *un, struct scsi_pkt *pktp,
1357 1357 struct buf *bp, diskaddr_t lba, uint32_t blockcount);
1358 1358
1359 1359 /*
1360 1360 * Prototypes for functions to support USCSI IO.
1361 1361 */
1362 1362 static int sd_uscsi_strategy(struct buf *bp);
1363 1363 static int sd_initpkt_for_uscsi(struct buf *, struct scsi_pkt **);
1364 1364 static void sd_destroypkt_for_uscsi(struct buf *);
1365 1365
1366 1366 static void sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
1367 1367 uchar_t chain_type, void *pktinfop);
1368 1368
1369 1369 static int sd_pm_entry(struct sd_lun *un);
1370 1370 static void sd_pm_exit(struct sd_lun *un);
1371 1371
1372 1372 static void sd_pm_idletimeout_handler(void *arg);
1373 1373
1374 1374 /*
1375 1375 * sd_core internal functions (used at the sd_core_io layer).
1376 1376 */
1377 1377 static void sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp);
1378 1378 static void sdintr(struct scsi_pkt *pktp);
1379 1379 static void sd_start_cmds(struct sd_lun *un, struct buf *immed_bp);
1380 1380
1381 1381 static int sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag,
1382 1382 enum uio_seg dataspace, int path_flag);
1383 1383
1384 1384 static struct buf *sd_bioclone_alloc(struct buf *bp, size_t datalen,
1385 1385 daddr_t blkno, int (*func)(struct buf *));
1386 1386 static struct buf *sd_shadow_buf_alloc(struct buf *bp, size_t datalen,
1387 1387 uint_t bflags, daddr_t blkno, int (*func)(struct buf *));
1388 1388 static void sd_bioclone_free(struct buf *bp);
1389 1389 static void sd_shadow_buf_free(struct buf *bp);
1390 1390
1391 1391 static void sd_print_transport_rejected_message(struct sd_lun *un,
1392 1392 struct sd_xbuf *xp, int code);
1393 1393 static void sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp,
1394 1394 void *arg, int code);
1395 1395 static void sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp,
1396 1396 void *arg, int code);
1397 1397 static void sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp,
1398 1398 void *arg, int code);
1399 1399
1400 1400 static void sd_retry_command(struct sd_lun *un, struct buf *bp,
1401 1401 int retry_check_flag,
1402 1402 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp,
1403 1403 int c),
1404 1404 void *user_arg, int failure_code, clock_t retry_delay,
1405 1405 void (*statp)(kstat_io_t *));
1406 1406
1407 1407 static void sd_set_retry_bp(struct sd_lun *un, struct buf *bp,
1408 1408 clock_t retry_delay, void (*statp)(kstat_io_t *));
1409 1409
1410 1410 static void sd_send_request_sense_command(struct sd_lun *un, struct buf *bp,
1411 1411 struct scsi_pkt *pktp);
1412 1412 static void sd_start_retry_command(void *arg);
1413 1413 static void sd_start_direct_priority_command(void *arg);
1414 1414 static void sd_return_failed_command(struct sd_lun *un, struct buf *bp,
1415 1415 int errcode);
1416 1416 static void sd_return_failed_command_no_restart(struct sd_lun *un,
1417 1417 struct buf *bp, int errcode);
1418 1418 static void sd_return_command(struct sd_lun *un, struct buf *bp);
1419 1419 static void sd_sync_with_callback(struct sd_lun *un);
1420 1420 static int sdrunout(caddr_t arg);
1421 1421
1422 1422 static void sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp);
1423 1423 static struct buf *sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *xp);
1424 1424
1425 1425 static void sd_reduce_throttle(struct sd_lun *un, int throttle_type);
1426 1426 static void sd_restore_throttle(void *arg);
1427 1427
1428 1428 static void sd_init_cdb_limits(struct sd_lun *un);
1429 1429
1430 1430 static void sd_pkt_status_good(struct sd_lun *un, struct buf *bp,
1431 1431 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1432 1432
1433 1433 /*
1434 1434 * Error handling functions
1435 1435 */
1436 1436 static void sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp,
1437 1437 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1438 1438 static void sd_pkt_status_busy(struct sd_lun *un, struct buf *bp,
1439 1439 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1440 1440 static void sd_pkt_status_reservation_conflict(struct sd_lun *un,
1441 1441 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1442 1442 static void sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp,
1443 1443 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1444 1444
1445 1445 static void sd_handle_request_sense(struct sd_lun *un, struct buf *bp,
1446 1446 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1447 1447 static void sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp,
1448 1448 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1449 1449 static int sd_validate_sense_data(struct sd_lun *un, struct buf *bp,
1450 1450 struct sd_xbuf *xp, size_t actual_len);
1451 1451 static void sd_decode_sense(struct sd_lun *un, struct buf *bp,
1452 1452 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1453 1453
1454 1454 static void sd_print_sense_msg(struct sd_lun *un, struct buf *bp,
1455 1455 void *arg, int code);
1456 1456
1457 1457 static void sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp,
1458 1458 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1459 1459 static void sd_sense_key_recoverable_error(struct sd_lun *un,
1460 1460 uint8_t *sense_datap,
1461 1461 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1462 1462 static void sd_sense_key_not_ready(struct sd_lun *un,
1463 1463 uint8_t *sense_datap,
1464 1464 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1465 1465 static void sd_sense_key_medium_or_hardware_error(struct sd_lun *un,
1466 1466 uint8_t *sense_datap,
1467 1467 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1468 1468 static void sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp,
1469 1469 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1470 1470 static void sd_sense_key_unit_attention(struct sd_lun *un,
1471 1471 uint8_t *sense_datap,
1472 1472 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1473 1473 static void sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp,
1474 1474 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1475 1475 static void sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp,
1476 1476 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1477 1477 static void sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp,
1478 1478 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1479 1479 static void sd_sense_key_default(struct sd_lun *un,
1480 1480 uint8_t *sense_datap,
1481 1481 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp);
1482 1482
1483 1483 static void sd_print_retry_msg(struct sd_lun *un, struct buf *bp,
1484 1484 void *arg, int flag);
1485 1485
1486 1486 static void sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp,
1487 1487 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1488 1488 static void sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp,
1489 1489 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1490 1490 static void sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp,
1491 1491 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1492 1492 static void sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp,
1493 1493 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1494 1494 static void sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp,
1495 1495 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1496 1496 static void sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp,
1497 1497 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1498 1498 static void sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp,
1499 1499 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1500 1500 static void sd_pkt_reason_default(struct sd_lun *un, struct buf *bp,
1501 1501 struct sd_xbuf *xp, struct scsi_pkt *pktp);
1502 1502
1503 1503 static void sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp);
1504 1504
1505 1505 static void sd_start_stop_unit_callback(void *arg);
1506 1506 static void sd_start_stop_unit_task(void *arg);
1507 1507
1508 1508 static void sd_taskq_create(void);
1509 1509 static void sd_taskq_delete(void);
1510 1510 static void sd_target_change_task(void *arg);
1511 1511 static void sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag);
1512 1512 static void sd_log_lun_expansion_event(struct sd_lun *un, int km_flag);
1513 1513 static void sd_log_eject_request_event(struct sd_lun *un, int km_flag);
1514 1514 static void sd_media_change_task(void *arg);
1515 1515
1516 1516 static int sd_handle_mchange(struct sd_lun *un);
1517 1517 static int sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag);
1518 1518 static int sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp,
1519 1519 uint32_t *lbap, int path_flag);
1520 1520 static int sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp,
1521 1521 uint32_t *lbap, uint32_t *psp, int path_flag);
1522 1522 static int sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag,
1523 1523 int flag, int path_flag);
1524 1524 static int sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr,
1525 1525 size_t buflen, uchar_t evpd, uchar_t page_code, size_t *residp);
1526 1526 static int sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag);
1527 1527 static int sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc,
1528 1528 uchar_t usr_cmd, uint16_t data_len, uchar_t *data_bufp);
1529 1529 static int sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc,
1530 1530 uchar_t usr_cmd, uchar_t *usr_bufp);
1531 1531 static int sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un,
1532 1532 struct dk_callback *dkc);
1533 1533 static int sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp);
1534 1534 static int sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc,
1535 1535 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen,
1536 1536 uchar_t *bufaddr, uint_t buflen, int path_flag);
1537 1537 static int sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc,
1538 1538 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen,
1539 1539 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag);
1540 1540 static int sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize,
1541 1541 uchar_t *bufaddr, size_t buflen, uchar_t page_code, int path_flag);
1542 1542 static int sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize,
1543 1543 uchar_t *bufaddr, size_t buflen, uchar_t save_page, int path_flag);
1544 1544 static int sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr,
1545 1545 size_t buflen, daddr_t start_block, int path_flag);
1546 1546 #define sd_send_scsi_READ(ssc, bufaddr, buflen, start_block, path_flag) \
1547 1547 sd_send_scsi_RDWR(ssc, SCMD_READ, bufaddr, buflen, start_block, \
1548 1548 path_flag)
1549 1549 #define sd_send_scsi_WRITE(ssc, bufaddr, buflen, start_block, path_flag)\
1550 1550 sd_send_scsi_RDWR(ssc, SCMD_WRITE, bufaddr, buflen, start_block,\
1551 1551 path_flag)
1552 1552
1553 1553 static int sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr,
1554 1554 uint16_t buflen, uchar_t page_code, uchar_t page_control,
1555 1555 uint16_t param_ptr, int path_flag);
1556 1556 static int sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc,
1557 1557 uchar_t *bufaddr, size_t buflen, uchar_t class_req);
1558 1558 static boolean_t sd_gesn_media_data_valid(uchar_t *data);
1559 1559
1560 1560 static int sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un);
1561 1561 static void sd_free_rqs(struct sd_lun *un);
1562 1562
1563 1563 static void sd_dump_memory(struct sd_lun *un, uint_t comp, char *title,
1564 1564 uchar_t *data, int len, int fmt);
1565 1565 static void sd_panic_for_res_conflict(struct sd_lun *un);
1566 1566
1567 1567 /*
1568 1568 * Disk Ioctl Function Prototypes
1569 1569 */
1570 1570 static int sd_get_media_info(dev_t dev, caddr_t arg, int flag);
1571 1571 static int sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag);
1572 1572 static int sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag);
1573 1573 static int sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag);
1574 1574
1575 1575 /*
1576 1576 * Multi-host Ioctl Prototypes
1577 1577 */
1578 1578 static int sd_check_mhd(dev_t dev, int interval);
1579 1579 static int sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp);
1580 1580 static void sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt);
1581 1581 static char *sd_sname(uchar_t status);
1582 1582 static void sd_mhd_resvd_recover(void *arg);
1583 1583 static void sd_resv_reclaim_thread();
1584 1584 static int sd_take_ownership(dev_t dev, struct mhioctkown *p);
1585 1585 static int sd_reserve_release(dev_t dev, int cmd);
1586 1586 static void sd_rmv_resv_reclaim_req(dev_t dev);
1587 1587 static void sd_mhd_reset_notify_cb(caddr_t arg);
1588 1588 static int sd_persistent_reservation_in_read_keys(struct sd_lun *un,
1589 1589 mhioc_inkeys_t *usrp, int flag);
1590 1590 static int sd_persistent_reservation_in_read_resv(struct sd_lun *un,
1591 1591 mhioc_inresvs_t *usrp, int flag);
1592 1592 static int sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag);
1593 1593 static int sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag);
1594 1594 static int sd_mhdioc_release(dev_t dev);
1595 1595 static int sd_mhdioc_register_devid(dev_t dev);
1596 1596 static int sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag);
1597 1597 static int sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag);
1598 1598
1599 1599 /*
1600 1600 * SCSI removable prototypes
1601 1601 */
1602 1602 static int sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag);
1603 1603 static int sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag);
1604 1604 static int sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag);
1605 1605 static int sr_pause_resume(dev_t dev, int mode);
1606 1606 static int sr_play_msf(dev_t dev, caddr_t data, int flag);
1607 1607 static int sr_play_trkind(dev_t dev, caddr_t data, int flag);
1608 1608 static int sr_read_all_subcodes(dev_t dev, caddr_t data, int flag);
1609 1609 static int sr_read_subchannel(dev_t dev, caddr_t data, int flag);
1610 1610 static int sr_read_tocentry(dev_t dev, caddr_t data, int flag);
1611 1611 static int sr_read_tochdr(dev_t dev, caddr_t data, int flag);
1612 1612 static int sr_read_cdda(dev_t dev, caddr_t data, int flag);
1613 1613 static int sr_read_cdxa(dev_t dev, caddr_t data, int flag);
1614 1614 static int sr_read_mode1(dev_t dev, caddr_t data, int flag);
1615 1615 static int sr_read_mode2(dev_t dev, caddr_t data, int flag);
1616 1616 static int sr_read_cd_mode2(dev_t dev, caddr_t data, int flag);
1617 1617 static int sr_sector_mode(dev_t dev, uint32_t blksize);
1618 1618 static int sr_eject(dev_t dev);
1619 1619 static void sr_ejected(register struct sd_lun *un);
1620 1620 static int sr_check_wp(dev_t dev);
1621 1621 static opaque_t sd_watch_request_submit(struct sd_lun *un);
1622 1622 static int sd_check_media(dev_t dev, enum dkio_state state);
1623 1623 static int sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp);
1624 1624 static void sd_delayed_cv_broadcast(void *arg);
1625 1625 static int sr_volume_ctrl(dev_t dev, caddr_t data, int flag);
1626 1626 static int sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag);
1627 1627
1628 1628 static int sd_log_page_supported(sd_ssc_t *ssc, int log_page);
1629 1629
1630 1630 /*
1631 1631 * Function Prototype for the non-512 support (DVDRAM, MO etc.) functions.
1632 1632 */
1633 1633 static void sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag);
1634 1634 static int sd_wm_cache_constructor(void *wm, void *un, int flags);
1635 1635 static void sd_wm_cache_destructor(void *wm, void *un);
1636 1636 static struct sd_w_map *sd_range_lock(struct sd_lun *un, daddr_t startb,
1637 1637 daddr_t endb, ushort_t typ);
1638 1638 static struct sd_w_map *sd_get_range(struct sd_lun *un, daddr_t startb,
1639 1639 daddr_t endb);
1640 1640 static void sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp);
1641 1641 static void sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm);
1642 1642 static void sd_read_modify_write_task(void * arg);
1643 1643 static int
1644 1644 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk,
1645 1645 struct buf **bpp);
1646 1646
1647 1647
1648 1648 /*
1649 1649 * Function prototypes for failfast support.
1650 1650 */
1651 1651 static void sd_failfast_flushq(struct sd_lun *un);
1652 1652 static int sd_failfast_flushq_callback(struct buf *bp);
1653 1653
1654 1654 /*
1655 1655 * Function prototypes to check for lsi devices
1656 1656 */
1657 1657 static void sd_is_lsi(struct sd_lun *un);
1658 1658
1659 1659 /*
1660 1660 * Function prototypes for partial DMA support
1661 1661 */
1662 1662 static int sd_setup_next_xfer(struct sd_lun *un, struct buf *bp,
1663 1663 struct scsi_pkt *pkt, struct sd_xbuf *xp);
1664 1664
1665 1665
1666 1666 /* Function prototypes for cmlb */
1667 1667 static int sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
1668 1668 diskaddr_t start_block, size_t reqlength, void *tg_cookie);
1669 1669
1670 1670 static int sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie);
1671 1671
1672 1672 /*
1673 1673 * For printing RMW warning message timely
1674 1674 */
1675 1675 static void sd_rmw_msg_print_handler(void *arg);
1676 1676
1677 1677 /*
1678 1678 * Constants for failfast support:
1679 1679 *
1680 1680 * SD_FAILFAST_INACTIVE: Instance is currently in a normal state, with NO
1681 1681 * failfast processing being performed.
1682 1682 *
1683 1683 * SD_FAILFAST_ACTIVE: Instance is in the failfast state and is performing
1684 1684 * failfast processing on all bufs with B_FAILFAST set.
1685 1685 */
1686 1686
1687 1687 #define SD_FAILFAST_INACTIVE 0
1688 1688 #define SD_FAILFAST_ACTIVE 1
1689 1689
1690 1690 /*
1691 1691 * Bitmask to control behavior of buf(9S) flushes when a transition to
1692 1692 * the failfast state occurs. Optional bits include:
1693 1693 *
1694 1694 * SD_FAILFAST_FLUSH_ALL_BUFS: When set, flush ALL bufs including those that
1695 1695 * do NOT have B_FAILFAST set. When clear, only bufs with B_FAILFAST will
1696 1696 * be flushed.
1697 1697 *
1698 1698 * SD_FAILFAST_FLUSH_ALL_QUEUES: When set, flush any/all other queues in the
1699 1699 * driver, in addition to the regular wait queue. This includes the xbuf
1700 1700 * queues. When clear, only the driver's wait queue will be flushed.
1701 1701 */
1702 1702 #define SD_FAILFAST_FLUSH_ALL_BUFS 0x01
1703 1703 #define SD_FAILFAST_FLUSH_ALL_QUEUES 0x02
1704 1704
1705 1705 /*
1706 1706 * The default behavior is to only flush bufs that have B_FAILFAST set, but
1707 1707 * to flush all queues within the driver.
1708 1708 */
1709 1709 static int sd_failfast_flushctl = SD_FAILFAST_FLUSH_ALL_QUEUES;
1710 1710
1711 1711
1712 1712 /*
1713 1713 * SD Testing Fault Injection
1714 1714 */
1715 1715 #ifdef SD_FAULT_INJECTION
1716 1716 static void sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un);
1717 1717 static void sd_faultinjection(struct scsi_pkt *pktp);
1718 1718 static void sd_injection_log(char *buf, struct sd_lun *un);
1719 1719 #endif
1720 1720
1721 1721 /*
1722 1722 * Device driver ops vector
1723 1723 */
1724 1724 static struct cb_ops sd_cb_ops = {
1725 1725 sdopen, /* open */
1726 1726 sdclose, /* close */
1727 1727 sdstrategy, /* strategy */
1728 1728 nodev, /* print */
1729 1729 sddump, /* dump */
1730 1730 sdread, /* read */
1731 1731 sdwrite, /* write */
1732 1732 sdioctl, /* ioctl */
1733 1733 nodev, /* devmap */
1734 1734 nodev, /* mmap */
1735 1735 nodev, /* segmap */
1736 1736 nochpoll, /* poll */
1737 1737 sd_prop_op, /* cb_prop_op */
1738 1738 0, /* streamtab */
1739 1739 D_64BIT | D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flags */
1740 1740 CB_REV, /* cb_rev */
1741 1741 sdaread, /* async I/O read entry point */
1742 1742 sdawrite /* async I/O write entry point */
1743 1743 };
1744 1744
1745 1745 struct dev_ops sd_ops = {
1746 1746 DEVO_REV, /* devo_rev, */
1747 1747 0, /* refcnt */
1748 1748 sdinfo, /* info */
1749 1749 nulldev, /* identify */
1750 1750 sdprobe, /* probe */
1751 1751 sdattach, /* attach */
1752 1752 sddetach, /* detach */
1753 1753 nodev, /* reset */
1754 1754 &sd_cb_ops, /* driver operations */
1755 1755 NULL, /* bus operations */
1756 1756 sdpower, /* power */
1757 1757 ddi_quiesce_not_needed, /* quiesce */
1758 1758 };
1759 1759
1760 1760 /*
1761 1761 * This is the loadable module wrapper.
1762 1762 */
1763 1763 #include <sys/modctl.h>
1764 1764
1765 1765 #ifndef XPV_HVM_DRIVER
1766 1766 static struct modldrv modldrv = {
1767 1767 &mod_driverops, /* Type of module. This one is a driver */
1768 1768 SD_MODULE_NAME, /* Module name. */
1769 1769 &sd_ops /* driver ops */
1770 1770 };
1771 1771
1772 1772 static struct modlinkage modlinkage = {
1773 1773 MODREV_1, &modldrv, NULL
1774 1774 };
1775 1775
1776 1776 #else /* XPV_HVM_DRIVER */
1777 1777 static struct modlmisc modlmisc = {
1778 1778 &mod_miscops, /* Type of module. This one is a misc */
1779 1779 "HVM " SD_MODULE_NAME, /* Module name. */
1780 1780 };
1781 1781
1782 1782 static struct modlinkage modlinkage = {
1783 1783 MODREV_1, &modlmisc, NULL
1784 1784 };
1785 1785
1786 1786 #endif /* XPV_HVM_DRIVER */
1787 1787
1788 1788 static cmlb_tg_ops_t sd_tgops = {
1789 1789 TG_DK_OPS_VERSION_1,
1790 1790 sd_tg_rdwr,
1791 1791 sd_tg_getinfo
1792 1792 };
1793 1793
1794 1794 static struct scsi_asq_key_strings sd_additional_codes[] = {
1795 1795 0x81, 0, "Logical Unit is Reserved",
1796 1796 0x85, 0, "Audio Address Not Valid",
1797 1797 0xb6, 0, "Media Load Mechanism Failed",
1798 1798 0xB9, 0, "Audio Play Operation Aborted",
1799 1799 0xbf, 0, "Buffer Overflow for Read All Subcodes Command",
1800 1800 0x53, 2, "Medium removal prevented",
1801 1801 0x6f, 0, "Authentication failed during key exchange",
1802 1802 0x6f, 1, "Key not present",
1803 1803 0x6f, 2, "Key not established",
1804 1804 0x6f, 3, "Read without proper authentication",
1805 1805 0x6f, 4, "Mismatched region to this logical unit",
1806 1806 0x6f, 5, "Region reset count error",
1807 1807 0xffff, 0x0, NULL
1808 1808 };
1809 1809
1810 1810
1811 1811 /*
1812 1812 * Struct for passing printing information for sense data messages
1813 1813 */
1814 1814 struct sd_sense_info {
1815 1815 int ssi_severity;
1816 1816 int ssi_pfa_flag;
1817 1817 };
1818 1818
1819 1819 /*
1820 1820 * Table of function pointers for iostart-side routines. Separate "chains"
1821 1821 * of layered function calls are formed by placing the function pointers
1822 1822 * sequentially in the desired order. Functions are called according to an
1823 1823 * incrementing table index ordering. The last function in each chain must
1824 1824 * be sd_core_iostart(). The corresponding iodone-side routines are expected
1825 1825 * in the sd_iodone_chain[] array.
1826 1826 *
1827 1827 * Note: It may seem more natural to organize both the iostart and iodone
1828 1828 * functions together, into an array of structures (or some similar
1829 1829 * organization) with a common index, rather than two separate arrays which
1830 1830 * must be maintained in synchronization. The purpose of this division is
1831 1831 * to achieve improved performance: individual arrays allows for more
1832 1832 * effective cache line utilization on certain platforms.
1833 1833 */
1834 1834
1835 1835 typedef void (*sd_chain_t)(int index, struct sd_lun *un, struct buf *bp);
1836 1836
1837 1837
1838 1838 static sd_chain_t sd_iostart_chain[] = {
1839 1839
1840 1840 /* Chain for buf IO for disk drive targets (PM enabled) */
1841 1841 sd_mapblockaddr_iostart, /* Index: 0 */
1842 1842 sd_pm_iostart, /* Index: 1 */
1843 1843 sd_core_iostart, /* Index: 2 */
1844 1844
1845 1845 /* Chain for buf IO for disk drive targets (PM disabled) */
1846 1846 sd_mapblockaddr_iostart, /* Index: 3 */
1847 1847 sd_core_iostart, /* Index: 4 */
1848 1848
1849 1849 /*
1850 1850 * Chain for buf IO for removable-media or large sector size
1851 1851 * disk drive targets with RMW needed (PM enabled)
1852 1852 */
1853 1853 sd_mapblockaddr_iostart, /* Index: 5 */
1854 1854 sd_mapblocksize_iostart, /* Index: 6 */
1855 1855 sd_pm_iostart, /* Index: 7 */
1856 1856 sd_core_iostart, /* Index: 8 */
1857 1857
1858 1858 /*
1859 1859 * Chain for buf IO for removable-media or large sector size
1860 1860 * disk drive targets with RMW needed (PM disabled)
1861 1861 */
1862 1862 sd_mapblockaddr_iostart, /* Index: 9 */
1863 1863 sd_mapblocksize_iostart, /* Index: 10 */
1864 1864 sd_core_iostart, /* Index: 11 */
1865 1865
1866 1866 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
1867 1867 sd_mapblockaddr_iostart, /* Index: 12 */
1868 1868 sd_checksum_iostart, /* Index: 13 */
1869 1869 sd_pm_iostart, /* Index: 14 */
1870 1870 sd_core_iostart, /* Index: 15 */
1871 1871
1872 1872 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
1873 1873 sd_mapblockaddr_iostart, /* Index: 16 */
1874 1874 sd_checksum_iostart, /* Index: 17 */
1875 1875 sd_core_iostart, /* Index: 18 */
1876 1876
1877 1877 /* Chain for USCSI commands (all targets) */
1878 1878 sd_pm_iostart, /* Index: 19 */
1879 1879 sd_core_iostart, /* Index: 20 */
1880 1880
1881 1881 /* Chain for checksumming USCSI commands (all targets) */
1882 1882 sd_checksum_uscsi_iostart, /* Index: 21 */
1883 1883 sd_pm_iostart, /* Index: 22 */
1884 1884 sd_core_iostart, /* Index: 23 */
1885 1885
1886 1886 /* Chain for "direct" USCSI commands (all targets) */
1887 1887 sd_core_iostart, /* Index: 24 */
1888 1888
1889 1889 /* Chain for "direct priority" USCSI commands (all targets) */
1890 1890 sd_core_iostart, /* Index: 25 */
1891 1891
1892 1892 /*
1893 1893 * Chain for buf IO for large sector size disk drive targets
1894 1894 * with RMW needed with checksumming (PM enabled)
1895 1895 */
1896 1896 sd_mapblockaddr_iostart, /* Index: 26 */
1897 1897 sd_mapblocksize_iostart, /* Index: 27 */
1898 1898 sd_checksum_iostart, /* Index: 28 */
1899 1899 sd_pm_iostart, /* Index: 29 */
1900 1900 sd_core_iostart, /* Index: 30 */
1901 1901
1902 1902 /*
1903 1903 * Chain for buf IO for large sector size disk drive targets
1904 1904 * with RMW needed with checksumming (PM disabled)
1905 1905 */
1906 1906 sd_mapblockaddr_iostart, /* Index: 31 */
1907 1907 sd_mapblocksize_iostart, /* Index: 32 */
1908 1908 sd_checksum_iostart, /* Index: 33 */
1909 1909 sd_core_iostart, /* Index: 34 */
1910 1910
1911 1911 };
1912 1912
1913 1913 /*
1914 1914 * Macros to locate the first function of each iostart chain in the
1915 1915 * sd_iostart_chain[] array. These are located by the index in the array.
1916 1916 */
1917 1917 #define SD_CHAIN_DISK_IOSTART 0
1918 1918 #define SD_CHAIN_DISK_IOSTART_NO_PM 3
1919 1919 #define SD_CHAIN_MSS_DISK_IOSTART 5
1920 1920 #define SD_CHAIN_RMMEDIA_IOSTART 5
1921 1921 #define SD_CHAIN_MSS_DISK_IOSTART_NO_PM 9
1922 1922 #define SD_CHAIN_RMMEDIA_IOSTART_NO_PM 9
1923 1923 #define SD_CHAIN_CHKSUM_IOSTART 12
1924 1924 #define SD_CHAIN_CHKSUM_IOSTART_NO_PM 16
1925 1925 #define SD_CHAIN_USCSI_CMD_IOSTART 19
1926 1926 #define SD_CHAIN_USCSI_CHKSUM_IOSTART 21
1927 1927 #define SD_CHAIN_DIRECT_CMD_IOSTART 24
1928 1928 #define SD_CHAIN_PRIORITY_CMD_IOSTART 25
1929 1929 #define SD_CHAIN_MSS_CHKSUM_IOSTART 26
1930 1930 #define SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM 31
1931 1931
1932 1932
1933 1933 /*
1934 1934 * Table of function pointers for the iodone-side routines for the driver-
1935 1935 * internal layering mechanism. The calling sequence for iodone routines
1936 1936 * uses a decrementing table index, so the last routine called in a chain
1937 1937 * must be at the lowest array index location for that chain. The last
1938 1938 * routine for each chain must be either sd_buf_iodone() (for buf(9S) IOs)
1939 1939 * or sd_uscsi_iodone() (for uscsi IOs). Other than this, the ordering
1940 1940 * of the functions in an iodone side chain must correspond to the ordering
1941 1941 * of the iostart routines for that chain. Note that there is no iodone
1942 1942 * side routine that corresponds to sd_core_iostart(), so there is no
1943 1943 * entry in the table for this.
1944 1944 */
1945 1945
1946 1946 static sd_chain_t sd_iodone_chain[] = {
1947 1947
1948 1948 /* Chain for buf IO for disk drive targets (PM enabled) */
1949 1949 sd_buf_iodone, /* Index: 0 */
1950 1950 sd_mapblockaddr_iodone, /* Index: 1 */
1951 1951 sd_pm_iodone, /* Index: 2 */
1952 1952
1953 1953 /* Chain for buf IO for disk drive targets (PM disabled) */
1954 1954 sd_buf_iodone, /* Index: 3 */
1955 1955 sd_mapblockaddr_iodone, /* Index: 4 */
1956 1956
1957 1957 /*
1958 1958 * Chain for buf IO for removable-media or large sector size
1959 1959 * disk drive targets with RMW needed (PM enabled)
1960 1960 */
1961 1961 sd_buf_iodone, /* Index: 5 */
1962 1962 sd_mapblockaddr_iodone, /* Index: 6 */
1963 1963 sd_mapblocksize_iodone, /* Index: 7 */
1964 1964 sd_pm_iodone, /* Index: 8 */
1965 1965
1966 1966 /*
1967 1967 * Chain for buf IO for removable-media or large sector size
1968 1968 * disk drive targets with RMW needed (PM disabled)
1969 1969 */
1970 1970 sd_buf_iodone, /* Index: 9 */
1971 1971 sd_mapblockaddr_iodone, /* Index: 10 */
1972 1972 sd_mapblocksize_iodone, /* Index: 11 */
1973 1973
1974 1974 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
1975 1975 sd_buf_iodone, /* Index: 12 */
1976 1976 sd_mapblockaddr_iodone, /* Index: 13 */
1977 1977 sd_checksum_iodone, /* Index: 14 */
1978 1978 sd_pm_iodone, /* Index: 15 */
1979 1979
1980 1980 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
1981 1981 sd_buf_iodone, /* Index: 16 */
1982 1982 sd_mapblockaddr_iodone, /* Index: 17 */
1983 1983 sd_checksum_iodone, /* Index: 18 */
1984 1984
1985 1985 /* Chain for USCSI commands (non-checksum targets) */
1986 1986 sd_uscsi_iodone, /* Index: 19 */
1987 1987 sd_pm_iodone, /* Index: 20 */
1988 1988
1989 1989 /* Chain for USCSI commands (checksum targets) */
1990 1990 sd_uscsi_iodone, /* Index: 21 */
1991 1991 sd_checksum_uscsi_iodone, /* Index: 22 */
1992 1992 sd_pm_iodone, /* Index: 22 */
1993 1993
1994 1994 /* Chain for "direct" USCSI commands (all targets) */
1995 1995 sd_uscsi_iodone, /* Index: 24 */
1996 1996
1997 1997 /* Chain for "direct priority" USCSI commands (all targets) */
1998 1998 sd_uscsi_iodone, /* Index: 25 */
1999 1999
2000 2000 /*
2001 2001 * Chain for buf IO for large sector size disk drive targets
2002 2002 * with checksumming (PM enabled)
2003 2003 */
2004 2004 sd_buf_iodone, /* Index: 26 */
2005 2005 sd_mapblockaddr_iodone, /* Index: 27 */
2006 2006 sd_mapblocksize_iodone, /* Index: 28 */
2007 2007 sd_checksum_iodone, /* Index: 29 */
2008 2008 sd_pm_iodone, /* Index: 30 */
2009 2009
2010 2010 /*
2011 2011 * Chain for buf IO for large sector size disk drive targets
2012 2012 * with checksumming (PM disabled)
2013 2013 */
2014 2014 sd_buf_iodone, /* Index: 31 */
2015 2015 sd_mapblockaddr_iodone, /* Index: 32 */
2016 2016 sd_mapblocksize_iodone, /* Index: 33 */
2017 2017 sd_checksum_iodone, /* Index: 34 */
2018 2018 };
2019 2019
2020 2020
2021 2021 /*
2022 2022 * Macros to locate the "first" function in the sd_iodone_chain[] array for
2023 2023 * each iodone-side chain. These are located by the array index, but as the
2024 2024 * iodone side functions are called in a decrementing-index order, the
2025 2025 * highest index number in each chain must be specified (as these correspond
2026 2026 * to the first function in the iodone chain that will be called by the core
2027 2027 * at IO completion time).
2028 2028 */
2029 2029
2030 2030 #define SD_CHAIN_DISK_IODONE 2
2031 2031 #define SD_CHAIN_DISK_IODONE_NO_PM 4
2032 2032 #define SD_CHAIN_RMMEDIA_IODONE 8
2033 2033 #define SD_CHAIN_MSS_DISK_IODONE 8
2034 2034 #define SD_CHAIN_RMMEDIA_IODONE_NO_PM 11
2035 2035 #define SD_CHAIN_MSS_DISK_IODONE_NO_PM 11
2036 2036 #define SD_CHAIN_CHKSUM_IODONE 15
2037 2037 #define SD_CHAIN_CHKSUM_IODONE_NO_PM 18
2038 2038 #define SD_CHAIN_USCSI_CMD_IODONE 20
2039 2039 #define SD_CHAIN_USCSI_CHKSUM_IODONE 22
2040 2040 #define SD_CHAIN_DIRECT_CMD_IODONE 24
2041 2041 #define SD_CHAIN_PRIORITY_CMD_IODONE 25
2042 2042 #define SD_CHAIN_MSS_CHKSUM_IODONE 30
2043 2043 #define SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM 34
2044 2044
2045 2045
2046 2046
2047 2047 /*
2048 2048 * Array to map a layering chain index to the appropriate initpkt routine.
2049 2049 * The redundant entries are present so that the index used for accessing
2050 2050 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly
2051 2051 * with this table as well.
2052 2052 */
2053 2053 typedef int (*sd_initpkt_t)(struct buf *, struct scsi_pkt **);
2054 2054
2055 2055 static sd_initpkt_t sd_initpkt_map[] = {
2056 2056
2057 2057 /* Chain for buf IO for disk drive targets (PM enabled) */
2058 2058 sd_initpkt_for_buf, /* Index: 0 */
2059 2059 sd_initpkt_for_buf, /* Index: 1 */
2060 2060 sd_initpkt_for_buf, /* Index: 2 */
2061 2061
2062 2062 /* Chain for buf IO for disk drive targets (PM disabled) */
2063 2063 sd_initpkt_for_buf, /* Index: 3 */
2064 2064 sd_initpkt_for_buf, /* Index: 4 */
2065 2065
2066 2066 /*
2067 2067 * Chain for buf IO for removable-media or large sector size
2068 2068 * disk drive targets (PM enabled)
2069 2069 */
2070 2070 sd_initpkt_for_buf, /* Index: 5 */
2071 2071 sd_initpkt_for_buf, /* Index: 6 */
2072 2072 sd_initpkt_for_buf, /* Index: 7 */
2073 2073 sd_initpkt_for_buf, /* Index: 8 */
2074 2074
2075 2075 /*
2076 2076 * Chain for buf IO for removable-media or large sector size
2077 2077 * disk drive targets (PM disabled)
2078 2078 */
2079 2079 sd_initpkt_for_buf, /* Index: 9 */
2080 2080 sd_initpkt_for_buf, /* Index: 10 */
2081 2081 sd_initpkt_for_buf, /* Index: 11 */
2082 2082
2083 2083 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
2084 2084 sd_initpkt_for_buf, /* Index: 12 */
2085 2085 sd_initpkt_for_buf, /* Index: 13 */
2086 2086 sd_initpkt_for_buf, /* Index: 14 */
2087 2087 sd_initpkt_for_buf, /* Index: 15 */
2088 2088
2089 2089 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
2090 2090 sd_initpkt_for_buf, /* Index: 16 */
2091 2091 sd_initpkt_for_buf, /* Index: 17 */
2092 2092 sd_initpkt_for_buf, /* Index: 18 */
2093 2093
2094 2094 /* Chain for USCSI commands (non-checksum targets) */
2095 2095 sd_initpkt_for_uscsi, /* Index: 19 */
2096 2096 sd_initpkt_for_uscsi, /* Index: 20 */
2097 2097
2098 2098 /* Chain for USCSI commands (checksum targets) */
2099 2099 sd_initpkt_for_uscsi, /* Index: 21 */
2100 2100 sd_initpkt_for_uscsi, /* Index: 22 */
2101 2101 sd_initpkt_for_uscsi, /* Index: 22 */
2102 2102
2103 2103 /* Chain for "direct" USCSI commands (all targets) */
2104 2104 sd_initpkt_for_uscsi, /* Index: 24 */
2105 2105
2106 2106 /* Chain for "direct priority" USCSI commands (all targets) */
2107 2107 sd_initpkt_for_uscsi, /* Index: 25 */
2108 2108
2109 2109 /*
2110 2110 * Chain for buf IO for large sector size disk drive targets
2111 2111 * with checksumming (PM enabled)
2112 2112 */
2113 2113 sd_initpkt_for_buf, /* Index: 26 */
2114 2114 sd_initpkt_for_buf, /* Index: 27 */
2115 2115 sd_initpkt_for_buf, /* Index: 28 */
2116 2116 sd_initpkt_for_buf, /* Index: 29 */
2117 2117 sd_initpkt_for_buf, /* Index: 30 */
2118 2118
2119 2119 /*
2120 2120 * Chain for buf IO for large sector size disk drive targets
2121 2121 * with checksumming (PM disabled)
2122 2122 */
2123 2123 sd_initpkt_for_buf, /* Index: 31 */
2124 2124 sd_initpkt_for_buf, /* Index: 32 */
2125 2125 sd_initpkt_for_buf, /* Index: 33 */
2126 2126 sd_initpkt_for_buf, /* Index: 34 */
2127 2127 };
2128 2128
2129 2129
2130 2130 /*
2131 2131 * Array to map a layering chain index to the appropriate destroypktpkt routine.
2132 2132 * The redundant entries are present so that the index used for accessing
2133 2133 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly
2134 2134 * with this table as well.
2135 2135 */
2136 2136 typedef void (*sd_destroypkt_t)(struct buf *);
2137 2137
2138 2138 static sd_destroypkt_t sd_destroypkt_map[] = {
2139 2139
2140 2140 /* Chain for buf IO for disk drive targets (PM enabled) */
2141 2141 sd_destroypkt_for_buf, /* Index: 0 */
2142 2142 sd_destroypkt_for_buf, /* Index: 1 */
2143 2143 sd_destroypkt_for_buf, /* Index: 2 */
2144 2144
2145 2145 /* Chain for buf IO for disk drive targets (PM disabled) */
2146 2146 sd_destroypkt_for_buf, /* Index: 3 */
2147 2147 sd_destroypkt_for_buf, /* Index: 4 */
2148 2148
2149 2149 /*
2150 2150 * Chain for buf IO for removable-media or large sector size
2151 2151 * disk drive targets (PM enabled)
2152 2152 */
2153 2153 sd_destroypkt_for_buf, /* Index: 5 */
2154 2154 sd_destroypkt_for_buf, /* Index: 6 */
2155 2155 sd_destroypkt_for_buf, /* Index: 7 */
2156 2156 sd_destroypkt_for_buf, /* Index: 8 */
2157 2157
2158 2158 /*
2159 2159 * Chain for buf IO for removable-media or large sector size
2160 2160 * disk drive targets (PM disabled)
2161 2161 */
2162 2162 sd_destroypkt_for_buf, /* Index: 9 */
2163 2163 sd_destroypkt_for_buf, /* Index: 10 */
2164 2164 sd_destroypkt_for_buf, /* Index: 11 */
2165 2165
2166 2166 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
2167 2167 sd_destroypkt_for_buf, /* Index: 12 */
2168 2168 sd_destroypkt_for_buf, /* Index: 13 */
2169 2169 sd_destroypkt_for_buf, /* Index: 14 */
2170 2170 sd_destroypkt_for_buf, /* Index: 15 */
2171 2171
2172 2172 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
2173 2173 sd_destroypkt_for_buf, /* Index: 16 */
2174 2174 sd_destroypkt_for_buf, /* Index: 17 */
2175 2175 sd_destroypkt_for_buf, /* Index: 18 */
2176 2176
2177 2177 /* Chain for USCSI commands (non-checksum targets) */
2178 2178 sd_destroypkt_for_uscsi, /* Index: 19 */
2179 2179 sd_destroypkt_for_uscsi, /* Index: 20 */
2180 2180
2181 2181 /* Chain for USCSI commands (checksum targets) */
2182 2182 sd_destroypkt_for_uscsi, /* Index: 21 */
2183 2183 sd_destroypkt_for_uscsi, /* Index: 22 */
2184 2184 sd_destroypkt_for_uscsi, /* Index: 22 */
2185 2185
2186 2186 /* Chain for "direct" USCSI commands (all targets) */
2187 2187 sd_destroypkt_for_uscsi, /* Index: 24 */
2188 2188
2189 2189 /* Chain for "direct priority" USCSI commands (all targets) */
2190 2190 sd_destroypkt_for_uscsi, /* Index: 25 */
2191 2191
2192 2192 /*
2193 2193 * Chain for buf IO for large sector size disk drive targets
2194 2194 * with checksumming (PM disabled)
2195 2195 */
2196 2196 sd_destroypkt_for_buf, /* Index: 26 */
2197 2197 sd_destroypkt_for_buf, /* Index: 27 */
2198 2198 sd_destroypkt_for_buf, /* Index: 28 */
2199 2199 sd_destroypkt_for_buf, /* Index: 29 */
2200 2200 sd_destroypkt_for_buf, /* Index: 30 */
2201 2201
2202 2202 /*
2203 2203 * Chain for buf IO for large sector size disk drive targets
2204 2204 * with checksumming (PM enabled)
2205 2205 */
2206 2206 sd_destroypkt_for_buf, /* Index: 31 */
2207 2207 sd_destroypkt_for_buf, /* Index: 32 */
2208 2208 sd_destroypkt_for_buf, /* Index: 33 */
2209 2209 sd_destroypkt_for_buf, /* Index: 34 */
2210 2210 };
2211 2211
2212 2212
2213 2213
2214 2214 /*
2215 2215 * Array to map a layering chain index to the appropriate chain "type".
2216 2216 * The chain type indicates a specific property/usage of the chain.
2217 2217 * The redundant entries are present so that the index used for accessing
2218 2218 * the above sd_iostart_chain and sd_iodone_chain tables can be used directly
2219 2219 * with this table as well.
2220 2220 */
2221 2221
2222 2222 #define SD_CHAIN_NULL 0 /* for the special RQS cmd */
2223 2223 #define SD_CHAIN_BUFIO 1 /* regular buf IO */
2224 2224 #define SD_CHAIN_USCSI 2 /* regular USCSI commands */
2225 2225 #define SD_CHAIN_DIRECT 3 /* uscsi, w/ bypass power mgt */
2226 2226 #define SD_CHAIN_DIRECT_PRIORITY 4 /* uscsi, w/ bypass power mgt */
2227 2227 /* (for error recovery) */
2228 2228
2229 2229 static int sd_chain_type_map[] = {
2230 2230
2231 2231 /* Chain for buf IO for disk drive targets (PM enabled) */
2232 2232 SD_CHAIN_BUFIO, /* Index: 0 */
2233 2233 SD_CHAIN_BUFIO, /* Index: 1 */
2234 2234 SD_CHAIN_BUFIO, /* Index: 2 */
2235 2235
2236 2236 /* Chain for buf IO for disk drive targets (PM disabled) */
2237 2237 SD_CHAIN_BUFIO, /* Index: 3 */
2238 2238 SD_CHAIN_BUFIO, /* Index: 4 */
2239 2239
2240 2240 /*
2241 2241 * Chain for buf IO for removable-media or large sector size
2242 2242 * disk drive targets (PM enabled)
2243 2243 */
2244 2244 SD_CHAIN_BUFIO, /* Index: 5 */
2245 2245 SD_CHAIN_BUFIO, /* Index: 6 */
2246 2246 SD_CHAIN_BUFIO, /* Index: 7 */
2247 2247 SD_CHAIN_BUFIO, /* Index: 8 */
2248 2248
2249 2249 /*
2250 2250 * Chain for buf IO for removable-media or large sector size
2251 2251 * disk drive targets (PM disabled)
2252 2252 */
2253 2253 SD_CHAIN_BUFIO, /* Index: 9 */
2254 2254 SD_CHAIN_BUFIO, /* Index: 10 */
2255 2255 SD_CHAIN_BUFIO, /* Index: 11 */
2256 2256
2257 2257 /* Chain for buf IO for disk drives with checksumming (PM enabled) */
2258 2258 SD_CHAIN_BUFIO, /* Index: 12 */
2259 2259 SD_CHAIN_BUFIO, /* Index: 13 */
2260 2260 SD_CHAIN_BUFIO, /* Index: 14 */
2261 2261 SD_CHAIN_BUFIO, /* Index: 15 */
2262 2262
2263 2263 /* Chain for buf IO for disk drives with checksumming (PM disabled) */
2264 2264 SD_CHAIN_BUFIO, /* Index: 16 */
2265 2265 SD_CHAIN_BUFIO, /* Index: 17 */
2266 2266 SD_CHAIN_BUFIO, /* Index: 18 */
2267 2267
2268 2268 /* Chain for USCSI commands (non-checksum targets) */
2269 2269 SD_CHAIN_USCSI, /* Index: 19 */
2270 2270 SD_CHAIN_USCSI, /* Index: 20 */
2271 2271
2272 2272 /* Chain for USCSI commands (checksum targets) */
2273 2273 SD_CHAIN_USCSI, /* Index: 21 */
2274 2274 SD_CHAIN_USCSI, /* Index: 22 */
2275 2275 SD_CHAIN_USCSI, /* Index: 23 */
2276 2276
2277 2277 /* Chain for "direct" USCSI commands (all targets) */
2278 2278 SD_CHAIN_DIRECT, /* Index: 24 */
2279 2279
2280 2280 /* Chain for "direct priority" USCSI commands (all targets) */
2281 2281 SD_CHAIN_DIRECT_PRIORITY, /* Index: 25 */
2282 2282
2283 2283 /*
2284 2284 * Chain for buf IO for large sector size disk drive targets
2285 2285 * with checksumming (PM enabled)
2286 2286 */
2287 2287 SD_CHAIN_BUFIO, /* Index: 26 */
2288 2288 SD_CHAIN_BUFIO, /* Index: 27 */
2289 2289 SD_CHAIN_BUFIO, /* Index: 28 */
2290 2290 SD_CHAIN_BUFIO, /* Index: 29 */
2291 2291 SD_CHAIN_BUFIO, /* Index: 30 */
2292 2292
2293 2293 /*
2294 2294 * Chain for buf IO for large sector size disk drive targets
2295 2295 * with checksumming (PM disabled)
2296 2296 */
2297 2297 SD_CHAIN_BUFIO, /* Index: 31 */
2298 2298 SD_CHAIN_BUFIO, /* Index: 32 */
2299 2299 SD_CHAIN_BUFIO, /* Index: 33 */
2300 2300 SD_CHAIN_BUFIO, /* Index: 34 */
2301 2301 };
2302 2302
2303 2303
2304 2304 /* Macro to return TRUE if the IO has come from the sd_buf_iostart() chain. */
2305 2305 #define SD_IS_BUFIO(xp) \
2306 2306 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_BUFIO)
2307 2307
2308 2308 /* Macro to return TRUE if the IO has come from the "direct priority" chain. */
2309 2309 #define SD_IS_DIRECT_PRIORITY(xp) \
2310 2310 (sd_chain_type_map[(xp)->xb_chain_iostart] == SD_CHAIN_DIRECT_PRIORITY)
2311 2311
2312 2312
2313 2313
2314 2314 /*
2315 2315 * Struct, array, and macros to map a specific chain to the appropriate
2316 2316 * layering indexes in the sd_iostart_chain[] and sd_iodone_chain[] arrays.
2317 2317 *
2318 2318 * The sd_chain_index_map[] array is used at attach time to set the various
2319 2319 * un_xxx_chain type members of the sd_lun softstate to the specific layering
2320 2320 * chain to be used with the instance. This allows different instances to use
2321 2321 * different chain for buf IO, uscsi IO, etc.. Also, since the xb_chain_iostart
2322 2322 * and xb_chain_iodone index values in the sd_xbuf are initialized to these
2323 2323 * values at sd_xbuf init time, this allows (1) layering chains may be changed
2324 2324 * dynamically & without the use of locking; and (2) a layer may update the
2325 2325 * xb_chain_io[start|done] member in a given xbuf with its current index value,
2326 2326 * to allow for deferred processing of an IO within the same chain from a
2327 2327 * different execution context.
2328 2328 */
2329 2329
2330 2330 struct sd_chain_index {
2331 2331 int sci_iostart_index;
2332 2332 int sci_iodone_index;
2333 2333 };
2334 2334
2335 2335 static struct sd_chain_index sd_chain_index_map[] = {
2336 2336 { SD_CHAIN_DISK_IOSTART, SD_CHAIN_DISK_IODONE },
2337 2337 { SD_CHAIN_DISK_IOSTART_NO_PM, SD_CHAIN_DISK_IODONE_NO_PM },
2338 2338 { SD_CHAIN_RMMEDIA_IOSTART, SD_CHAIN_RMMEDIA_IODONE },
2339 2339 { SD_CHAIN_RMMEDIA_IOSTART_NO_PM, SD_CHAIN_RMMEDIA_IODONE_NO_PM },
2340 2340 { SD_CHAIN_CHKSUM_IOSTART, SD_CHAIN_CHKSUM_IODONE },
2341 2341 { SD_CHAIN_CHKSUM_IOSTART_NO_PM, SD_CHAIN_CHKSUM_IODONE_NO_PM },
2342 2342 { SD_CHAIN_USCSI_CMD_IOSTART, SD_CHAIN_USCSI_CMD_IODONE },
2343 2343 { SD_CHAIN_USCSI_CHKSUM_IOSTART, SD_CHAIN_USCSI_CHKSUM_IODONE },
2344 2344 { SD_CHAIN_DIRECT_CMD_IOSTART, SD_CHAIN_DIRECT_CMD_IODONE },
2345 2345 { SD_CHAIN_PRIORITY_CMD_IOSTART, SD_CHAIN_PRIORITY_CMD_IODONE },
2346 2346 { SD_CHAIN_MSS_CHKSUM_IOSTART, SD_CHAIN_MSS_CHKSUM_IODONE },
2347 2347 { SD_CHAIN_MSS_CHKSUM_IOSTART_NO_PM, SD_CHAIN_MSS_CHKSUM_IODONE_NO_PM },
2348 2348
2349 2349 };
2350 2350
2351 2351
2352 2352 /*
2353 2353 * The following are indexes into the sd_chain_index_map[] array.
2354 2354 */
2355 2355
2356 2356 /* un->un_buf_chain_type must be set to one of these */
2357 2357 #define SD_CHAIN_INFO_DISK 0
2358 2358 #define SD_CHAIN_INFO_DISK_NO_PM 1
2359 2359 #define SD_CHAIN_INFO_RMMEDIA 2
2360 2360 #define SD_CHAIN_INFO_MSS_DISK 2
2361 2361 #define SD_CHAIN_INFO_RMMEDIA_NO_PM 3
2362 2362 #define SD_CHAIN_INFO_MSS_DSK_NO_PM 3
2363 2363 #define SD_CHAIN_INFO_CHKSUM 4
2364 2364 #define SD_CHAIN_INFO_CHKSUM_NO_PM 5
2365 2365 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM 10
2366 2366 #define SD_CHAIN_INFO_MSS_DISK_CHKSUM_NO_PM 11
2367 2367
2368 2368 /* un->un_uscsi_chain_type must be set to one of these */
2369 2369 #define SD_CHAIN_INFO_USCSI_CMD 6
2370 2370 /* USCSI with PM disabled is the same as DIRECT */
2371 2371 #define SD_CHAIN_INFO_USCSI_CMD_NO_PM 8
2372 2372 #define SD_CHAIN_INFO_USCSI_CHKSUM 7
2373 2373
2374 2374 /* un->un_direct_chain_type must be set to one of these */
2375 2375 #define SD_CHAIN_INFO_DIRECT_CMD 8
2376 2376
2377 2377 /* un->un_priority_chain_type must be set to one of these */
2378 2378 #define SD_CHAIN_INFO_PRIORITY_CMD 9
2379 2379
2380 2380 /* size for devid inquiries */
2381 2381 #define MAX_INQUIRY_SIZE 0xF0
2382 2382
2383 2383 /*
2384 2384 * Macros used by functions to pass a given buf(9S) struct along to the
2385 2385 * next function in the layering chain for further processing.
2386 2386 *
2387 2387 * In the following macros, passing more than three arguments to the called
2388 2388 * routines causes the optimizer for the SPARC compiler to stop doing tail
2389 2389 * call elimination which results in significant performance degradation.
2390 2390 */
2391 2391 #define SD_BEGIN_IOSTART(index, un, bp) \
2392 2392 ((*(sd_iostart_chain[index]))(index, un, bp))
2393 2393
2394 2394 #define SD_BEGIN_IODONE(index, un, bp) \
2395 2395 ((*(sd_iodone_chain[index]))(index, un, bp))
2396 2396
2397 2397 #define SD_NEXT_IOSTART(index, un, bp) \
2398 2398 ((*(sd_iostart_chain[(index) + 1]))((index) + 1, un, bp))
2399 2399
2400 2400 #define SD_NEXT_IODONE(index, un, bp) \
2401 2401 ((*(sd_iodone_chain[(index) - 1]))((index) - 1, un, bp))
2402 2402
2403 2403 /*
2404 2404 * Function: _init
2405 2405 *
2406 2406 * Description: This is the driver _init(9E) entry point.
2407 2407 *
2408 2408 * Return Code: Returns the value from mod_install(9F) or
2409 2409 * ddi_soft_state_init(9F) as appropriate.
2410 2410 *
2411 2411 * Context: Called when driver module loaded.
2412 2412 */
2413 2413
2414 2414 int
2415 2415 _init(void)
2416 2416 {
2417 2417 int err;
2418 2418
2419 2419 /* establish driver name from module name */
2420 2420 sd_label = (char *)mod_modname(&modlinkage);
2421 2421
2422 2422 #ifndef XPV_HVM_DRIVER
2423 2423 err = ddi_soft_state_init(&sd_state, sizeof (struct sd_lun),
2424 2424 SD_MAXUNIT);
2425 2425 if (err != 0) {
2426 2426 return (err);
2427 2427 }
2428 2428
2429 2429 #else /* XPV_HVM_DRIVER */
2430 2430 /* Remove the leading "hvm_" from the module name */
2431 2431 ASSERT(strncmp(sd_label, "hvm_", strlen("hvm_")) == 0);
2432 2432 sd_label += strlen("hvm_");
2433 2433
2434 2434 #endif /* XPV_HVM_DRIVER */
2435 2435
2436 2436 mutex_init(&sd_detach_mutex, NULL, MUTEX_DRIVER, NULL);
2437 2437 mutex_init(&sd_log_mutex, NULL, MUTEX_DRIVER, NULL);
2438 2438 mutex_init(&sd_label_mutex, NULL, MUTEX_DRIVER, NULL);
2439 2439
2440 2440 mutex_init(&sd_tr.srq_resv_reclaim_mutex, NULL, MUTEX_DRIVER, NULL);
2441 2441 cv_init(&sd_tr.srq_resv_reclaim_cv, NULL, CV_DRIVER, NULL);
2442 2442 cv_init(&sd_tr.srq_inprocess_cv, NULL, CV_DRIVER, NULL);
2443 2443
2444 2444 /*
2445 2445 * it's ok to init here even for fibre device
2446 2446 */
2447 2447 sd_scsi_probe_cache_init();
2448 2448
2449 2449 sd_scsi_target_lun_init();
2450 2450
2451 2451 /*
2452 2452 * Creating taskq before mod_install ensures that all callers (threads)
2453 2453 * that enter the module after a successful mod_install encounter
2454 2454 * a valid taskq.
2455 2455 */
2456 2456 sd_taskq_create();
2457 2457
2458 2458 err = mod_install(&modlinkage);
2459 2459 if (err != 0) {
2460 2460 /* delete taskq if install fails */
2461 2461 sd_taskq_delete();
2462 2462
2463 2463 mutex_destroy(&sd_detach_mutex);
2464 2464 mutex_destroy(&sd_log_mutex);
2465 2465 mutex_destroy(&sd_label_mutex);
2466 2466
2467 2467 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex);
2468 2468 cv_destroy(&sd_tr.srq_resv_reclaim_cv);
2469 2469 cv_destroy(&sd_tr.srq_inprocess_cv);
2470 2470
2471 2471 sd_scsi_probe_cache_fini();
2472 2472
2473 2473 sd_scsi_target_lun_fini();
2474 2474
2475 2475 #ifndef XPV_HVM_DRIVER
2476 2476 ddi_soft_state_fini(&sd_state);
2477 2477 #endif /* !XPV_HVM_DRIVER */
2478 2478 return (err);
2479 2479 }
2480 2480
2481 2481 return (err);
2482 2482 }
2483 2483
2484 2484
2485 2485 /*
2486 2486 * Function: _fini
2487 2487 *
2488 2488 * Description: This is the driver _fini(9E) entry point.
2489 2489 *
2490 2490 * Return Code: Returns the value from mod_remove(9F)
2491 2491 *
2492 2492 * Context: Called when driver module is unloaded.
2493 2493 */
2494 2494
2495 2495 int
2496 2496 _fini(void)
2497 2497 {
2498 2498 int err;
2499 2499
2500 2500 if ((err = mod_remove(&modlinkage)) != 0) {
2501 2501 return (err);
2502 2502 }
2503 2503
2504 2504 sd_taskq_delete();
2505 2505
2506 2506 mutex_destroy(&sd_detach_mutex);
2507 2507 mutex_destroy(&sd_log_mutex);
2508 2508 mutex_destroy(&sd_label_mutex);
2509 2509 mutex_destroy(&sd_tr.srq_resv_reclaim_mutex);
2510 2510
2511 2511 sd_scsi_probe_cache_fini();
2512 2512
2513 2513 sd_scsi_target_lun_fini();
2514 2514
2515 2515 cv_destroy(&sd_tr.srq_resv_reclaim_cv);
2516 2516 cv_destroy(&sd_tr.srq_inprocess_cv);
2517 2517
2518 2518 #ifndef XPV_HVM_DRIVER
2519 2519 ddi_soft_state_fini(&sd_state);
2520 2520 #endif /* !XPV_HVM_DRIVER */
2521 2521
2522 2522 return (err);
2523 2523 }
2524 2524
2525 2525
2526 2526 /*
2527 2527 * Function: _info
2528 2528 *
2529 2529 * Description: This is the driver _info(9E) entry point.
2530 2530 *
2531 2531 * Arguments: modinfop - pointer to the driver modinfo structure
2532 2532 *
2533 2533 * Return Code: Returns the value from mod_info(9F).
2534 2534 *
2535 2535 * Context: Kernel thread context
2536 2536 */
2537 2537
2538 2538 int
2539 2539 _info(struct modinfo *modinfop)
2540 2540 {
2541 2541 return (mod_info(&modlinkage, modinfop));
2542 2542 }
2543 2543
2544 2544
2545 2545 /*
2546 2546 * The following routines implement the driver message logging facility.
2547 2547 * They provide component- and level- based debug output filtering.
2548 2548 * Output may also be restricted to messages for a single instance by
2549 2549 * specifying a soft state pointer in sd_debug_un. If sd_debug_un is set
2550 2550 * to NULL, then messages for all instances are printed.
2551 2551 *
2552 2552 * These routines have been cloned from each other due to the language
2553 2553 * constraints of macros and variable argument list processing.
2554 2554 */
2555 2555
2556 2556
2557 2557 /*
2558 2558 * Function: sd_log_err
2559 2559 *
2560 2560 * Description: This routine is called by the SD_ERROR macro for debug
2561 2561 * logging of error conditions.
2562 2562 *
2563 2563 * Arguments: comp - driver component being logged
2564 2564 * dev - pointer to driver info structure
2565 2565 * fmt - error string and format to be logged
2566 2566 */
2567 2567
2568 2568 static void
2569 2569 sd_log_err(uint_t comp, struct sd_lun *un, const char *fmt, ...)
2570 2570 {
2571 2571 va_list ap;
2572 2572 dev_info_t *dev;
2573 2573
2574 2574 ASSERT(un != NULL);
2575 2575 dev = SD_DEVINFO(un);
2576 2576 ASSERT(dev != NULL);
2577 2577
2578 2578 /*
2579 2579 * Filter messages based on the global component and level masks.
2580 2580 * Also print if un matches the value of sd_debug_un, or if
2581 2581 * sd_debug_un is set to NULL.
2582 2582 */
2583 2583 if ((sd_component_mask & comp) && (sd_level_mask & SD_LOGMASK_ERROR) &&
2584 2584 ((sd_debug_un == NULL) || (sd_debug_un == un))) {
2585 2585 mutex_enter(&sd_log_mutex);
2586 2586 va_start(ap, fmt);
2587 2587 (void) vsprintf(sd_log_buf, fmt, ap);
2588 2588 va_end(ap);
2589 2589 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
2590 2590 mutex_exit(&sd_log_mutex);
2591 2591 }
2592 2592 #ifdef SD_FAULT_INJECTION
2593 2593 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
2594 2594 if (un->sd_injection_mask & comp) {
2595 2595 mutex_enter(&sd_log_mutex);
2596 2596 va_start(ap, fmt);
2597 2597 (void) vsprintf(sd_log_buf, fmt, ap);
2598 2598 va_end(ap);
2599 2599 sd_injection_log(sd_log_buf, un);
2600 2600 mutex_exit(&sd_log_mutex);
2601 2601 }
2602 2602 #endif
2603 2603 }
2604 2604
2605 2605
2606 2606 /*
2607 2607 * Function: sd_log_info
2608 2608 *
2609 2609 * Description: This routine is called by the SD_INFO macro for debug
2610 2610 * logging of general purpose informational conditions.
2611 2611 *
2612 2612 * Arguments: comp - driver component being logged
2613 2613 * dev - pointer to driver info structure
2614 2614 * fmt - info string and format to be logged
2615 2615 */
2616 2616
2617 2617 static void
2618 2618 sd_log_info(uint_t component, struct sd_lun *un, const char *fmt, ...)
2619 2619 {
2620 2620 va_list ap;
2621 2621 dev_info_t *dev;
2622 2622
2623 2623 ASSERT(un != NULL);
2624 2624 dev = SD_DEVINFO(un);
2625 2625 ASSERT(dev != NULL);
2626 2626
2627 2627 /*
2628 2628 * Filter messages based on the global component and level masks.
2629 2629 * Also print if un matches the value of sd_debug_un, or if
2630 2630 * sd_debug_un is set to NULL.
2631 2631 */
2632 2632 if ((sd_component_mask & component) &&
2633 2633 (sd_level_mask & SD_LOGMASK_INFO) &&
2634 2634 ((sd_debug_un == NULL) || (sd_debug_un == un))) {
2635 2635 mutex_enter(&sd_log_mutex);
2636 2636 va_start(ap, fmt);
2637 2637 (void) vsprintf(sd_log_buf, fmt, ap);
2638 2638 va_end(ap);
2639 2639 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
2640 2640 mutex_exit(&sd_log_mutex);
2641 2641 }
2642 2642 #ifdef SD_FAULT_INJECTION
2643 2643 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
2644 2644 if (un->sd_injection_mask & component) {
2645 2645 mutex_enter(&sd_log_mutex);
2646 2646 va_start(ap, fmt);
2647 2647 (void) vsprintf(sd_log_buf, fmt, ap);
2648 2648 va_end(ap);
2649 2649 sd_injection_log(sd_log_buf, un);
2650 2650 mutex_exit(&sd_log_mutex);
2651 2651 }
2652 2652 #endif
2653 2653 }
2654 2654
2655 2655
2656 2656 /*
2657 2657 * Function: sd_log_trace
2658 2658 *
2659 2659 * Description: This routine is called by the SD_TRACE macro for debug
2660 2660 * logging of trace conditions (i.e. function entry/exit).
2661 2661 *
2662 2662 * Arguments: comp - driver component being logged
2663 2663 * dev - pointer to driver info structure
2664 2664 * fmt - trace string and format to be logged
2665 2665 */
2666 2666
2667 2667 static void
2668 2668 sd_log_trace(uint_t component, struct sd_lun *un, const char *fmt, ...)
2669 2669 {
2670 2670 va_list ap;
2671 2671 dev_info_t *dev;
2672 2672
2673 2673 ASSERT(un != NULL);
2674 2674 dev = SD_DEVINFO(un);
2675 2675 ASSERT(dev != NULL);
2676 2676
2677 2677 /*
2678 2678 * Filter messages based on the global component and level masks.
2679 2679 * Also print if un matches the value of sd_debug_un, or if
2680 2680 * sd_debug_un is set to NULL.
2681 2681 */
2682 2682 if ((sd_component_mask & component) &&
2683 2683 (sd_level_mask & SD_LOGMASK_TRACE) &&
2684 2684 ((sd_debug_un == NULL) || (sd_debug_un == un))) {
2685 2685 mutex_enter(&sd_log_mutex);
2686 2686 va_start(ap, fmt);
2687 2687 (void) vsprintf(sd_log_buf, fmt, ap);
2688 2688 va_end(ap);
2689 2689 scsi_log(dev, sd_label, CE_CONT, "%s", sd_log_buf);
2690 2690 mutex_exit(&sd_log_mutex);
2691 2691 }
2692 2692 #ifdef SD_FAULT_INJECTION
2693 2693 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::sd_injection_mask));
2694 2694 if (un->sd_injection_mask & component) {
2695 2695 mutex_enter(&sd_log_mutex);
2696 2696 va_start(ap, fmt);
2697 2697 (void) vsprintf(sd_log_buf, fmt, ap);
2698 2698 va_end(ap);
2699 2699 sd_injection_log(sd_log_buf, un);
2700 2700 mutex_exit(&sd_log_mutex);
2701 2701 }
2702 2702 #endif
2703 2703 }
2704 2704
2705 2705
2706 2706 /*
2707 2707 * Function: sdprobe
2708 2708 *
2709 2709 * Description: This is the driver probe(9e) entry point function.
2710 2710 *
2711 2711 * Arguments: devi - opaque device info handle
2712 2712 *
2713 2713 * Return Code: DDI_PROBE_SUCCESS: If the probe was successful.
2714 2714 * DDI_PROBE_FAILURE: If the probe failed.
2715 2715 * DDI_PROBE_PARTIAL: If the instance is not present now,
2716 2716 * but may be present in the future.
2717 2717 */
2718 2718
2719 2719 static int
2720 2720 sdprobe(dev_info_t *devi)
2721 2721 {
2722 2722 struct scsi_device *devp;
2723 2723 int rval;
2724 2724 #ifndef XPV_HVM_DRIVER
2725 2725 int instance = ddi_get_instance(devi);
2726 2726 #endif /* !XPV_HVM_DRIVER */
2727 2727
2728 2728 /*
2729 2729 * if it wasn't for pln, sdprobe could actually be nulldev
2730 2730 * in the "__fibre" case.
2731 2731 */
2732 2732 if (ddi_dev_is_sid(devi) == DDI_SUCCESS) {
2733 2733 return (DDI_PROBE_DONTCARE);
2734 2734 }
2735 2735
2736 2736 devp = ddi_get_driver_private(devi);
2737 2737
2738 2738 if (devp == NULL) {
2739 2739 /* Ooops... nexus driver is mis-configured... */
2740 2740 return (DDI_PROBE_FAILURE);
2741 2741 }
2742 2742
2743 2743 #ifndef XPV_HVM_DRIVER
2744 2744 if (ddi_get_soft_state(sd_state, instance) != NULL) {
2745 2745 return (DDI_PROBE_PARTIAL);
2746 2746 }
2747 2747 #endif /* !XPV_HVM_DRIVER */
2748 2748
2749 2749 /*
2750 2750 * Call the SCSA utility probe routine to see if we actually
2751 2751 * have a target at this SCSI nexus.
2752 2752 */
2753 2753 switch (sd_scsi_probe_with_cache(devp, NULL_FUNC)) {
2754 2754 case SCSIPROBE_EXISTS:
2755 2755 switch (devp->sd_inq->inq_dtype) {
2756 2756 case DTYPE_DIRECT:
2757 2757 rval = DDI_PROBE_SUCCESS;
2758 2758 break;
2759 2759 case DTYPE_RODIRECT:
2760 2760 /* CDs etc. Can be removable media */
2761 2761 rval = DDI_PROBE_SUCCESS;
2762 2762 break;
2763 2763 case DTYPE_OPTICAL:
2764 2764 /*
2765 2765 * Rewritable optical driver HP115AA
2766 2766 * Can also be removable media
2767 2767 */
2768 2768
2769 2769 /*
2770 2770 * Do not attempt to bind to DTYPE_OPTICAL if
2771 2771 * pre solaris 9 sparc sd behavior is required
2772 2772 *
2773 2773 * If first time through and sd_dtype_optical_bind
2774 2774 * has not been set in /etc/system check properties
2775 2775 */
2776 2776
2777 2777 if (sd_dtype_optical_bind < 0) {
2778 2778 sd_dtype_optical_bind = ddi_prop_get_int
2779 2779 (DDI_DEV_T_ANY, devi, 0,
2780 2780 "optical-device-bind", 1);
2781 2781 }
2782 2782
2783 2783 if (sd_dtype_optical_bind == 0) {
2784 2784 rval = DDI_PROBE_FAILURE;
2785 2785 } else {
2786 2786 rval = DDI_PROBE_SUCCESS;
2787 2787 }
2788 2788 break;
2789 2789
2790 2790 case DTYPE_NOTPRESENT:
2791 2791 default:
2792 2792 rval = DDI_PROBE_FAILURE;
2793 2793 break;
2794 2794 }
2795 2795 break;
2796 2796 default:
2797 2797 rval = DDI_PROBE_PARTIAL;
2798 2798 break;
2799 2799 }
2800 2800
2801 2801 /*
2802 2802 * This routine checks for resource allocation prior to freeing,
2803 2803 * so it will take care of the "smart probing" case where a
2804 2804 * scsi_probe() may or may not have been issued and will *not*
2805 2805 * free previously-freed resources.
2806 2806 */
2807 2807 scsi_unprobe(devp);
2808 2808 return (rval);
2809 2809 }
2810 2810
2811 2811
2812 2812 /*
2813 2813 * Function: sdinfo
2814 2814 *
2815 2815 * Description: This is the driver getinfo(9e) entry point function.
2816 2816 * Given the device number, return the devinfo pointer from
2817 2817 * the scsi_device structure or the instance number
2818 2818 * associated with the dev_t.
2819 2819 *
2820 2820 * Arguments: dip - pointer to device info structure
2821 2821 * infocmd - command argument (DDI_INFO_DEVT2DEVINFO,
2822 2822 * DDI_INFO_DEVT2INSTANCE)
2823 2823 * arg - driver dev_t
2824 2824 * resultp - user buffer for request response
2825 2825 *
2826 2826 * Return Code: DDI_SUCCESS
2827 2827 * DDI_FAILURE
2828 2828 */
2829 2829 /* ARGSUSED */
2830 2830 static int
2831 2831 sdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
2832 2832 {
2833 2833 struct sd_lun *un;
2834 2834 dev_t dev;
2835 2835 int instance;
2836 2836 int error;
2837 2837
2838 2838 switch (infocmd) {
2839 2839 case DDI_INFO_DEVT2DEVINFO:
2840 2840 dev = (dev_t)arg;
2841 2841 instance = SDUNIT(dev);
2842 2842 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) {
2843 2843 return (DDI_FAILURE);
2844 2844 }
2845 2845 *result = (void *) SD_DEVINFO(un);
2846 2846 error = DDI_SUCCESS;
2847 2847 break;
2848 2848 case DDI_INFO_DEVT2INSTANCE:
2849 2849 dev = (dev_t)arg;
2850 2850 instance = SDUNIT(dev);
2851 2851 *result = (void *)(uintptr_t)instance;
2852 2852 error = DDI_SUCCESS;
2853 2853 break;
2854 2854 default:
2855 2855 error = DDI_FAILURE;
2856 2856 }
2857 2857 return (error);
2858 2858 }
2859 2859
2860 2860 /*
2861 2861 * Function: sd_prop_op
2862 2862 *
2863 2863 * Description: This is the driver prop_op(9e) entry point function.
2864 2864 * Return the number of blocks for the partition in question
2865 2865 * or forward the request to the property facilities.
2866 2866 *
2867 2867 * Arguments: dev - device number
2868 2868 * dip - pointer to device info structure
2869 2869 * prop_op - property operator
2870 2870 * mod_flags - DDI_PROP_DONTPASS, don't pass to parent
2871 2871 * name - pointer to property name
2872 2872 * valuep - pointer or address of the user buffer
2873 2873 * lengthp - property length
2874 2874 *
2875 2875 * Return Code: DDI_PROP_SUCCESS
2876 2876 * DDI_PROP_NOT_FOUND
2877 2877 * DDI_PROP_UNDEFINED
2878 2878 * DDI_PROP_NO_MEMORY
2879 2879 * DDI_PROP_BUF_TOO_SMALL
2880 2880 */
2881 2881
2882 2882 static int
2883 2883 sd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
2884 2884 char *name, caddr_t valuep, int *lengthp)
2885 2885 {
2886 2886 struct sd_lun *un;
2887 2887
2888 2888 if ((un = ddi_get_soft_state(sd_state, ddi_get_instance(dip))) == NULL)
2889 2889 return (ddi_prop_op(dev, dip, prop_op, mod_flags,
2890 2890 name, valuep, lengthp));
2891 2891
2892 2892 return (cmlb_prop_op(un->un_cmlbhandle,
2893 2893 dev, dip, prop_op, mod_flags, name, valuep, lengthp,
2894 2894 SDPART(dev), (void *)SD_PATH_DIRECT));
2895 2895 }
2896 2896
2897 2897 /*
2898 2898 * The following functions are for smart probing:
2899 2899 * sd_scsi_probe_cache_init()
2900 2900 * sd_scsi_probe_cache_fini()
2901 2901 * sd_scsi_clear_probe_cache()
2902 2902 * sd_scsi_probe_with_cache()
2903 2903 */
2904 2904
2905 2905 /*
2906 2906 * Function: sd_scsi_probe_cache_init
2907 2907 *
2908 2908 * Description: Initializes the probe response cache mutex and head pointer.
2909 2909 *
2910 2910 * Context: Kernel thread context
2911 2911 */
2912 2912
2913 2913 static void
2914 2914 sd_scsi_probe_cache_init(void)
2915 2915 {
2916 2916 mutex_init(&sd_scsi_probe_cache_mutex, NULL, MUTEX_DRIVER, NULL);
2917 2917 sd_scsi_probe_cache_head = NULL;
2918 2918 }
2919 2919
2920 2920
2921 2921 /*
2922 2922 * Function: sd_scsi_probe_cache_fini
2923 2923 *
2924 2924 * Description: Frees all resources associated with the probe response cache.
2925 2925 *
2926 2926 * Context: Kernel thread context
2927 2927 */
2928 2928
2929 2929 static void
2930 2930 sd_scsi_probe_cache_fini(void)
2931 2931 {
2932 2932 struct sd_scsi_probe_cache *cp;
2933 2933 struct sd_scsi_probe_cache *ncp;
2934 2934
2935 2935 /* Clean up our smart probing linked list */
2936 2936 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = ncp) {
2937 2937 ncp = cp->next;
2938 2938 kmem_free(cp, sizeof (struct sd_scsi_probe_cache));
2939 2939 }
2940 2940 sd_scsi_probe_cache_head = NULL;
2941 2941 mutex_destroy(&sd_scsi_probe_cache_mutex);
2942 2942 }
2943 2943
2944 2944
2945 2945 /*
2946 2946 * Function: sd_scsi_clear_probe_cache
2947 2947 *
2948 2948 * Description: This routine clears the probe response cache. This is
2949 2949 * done when open() returns ENXIO so that when deferred
2950 2950 * attach is attempted (possibly after a device has been
2951 2951 * turned on) we will retry the probe. Since we don't know
2952 2952 * which target we failed to open, we just clear the
2953 2953 * entire cache.
2954 2954 *
2955 2955 * Context: Kernel thread context
2956 2956 */
2957 2957
2958 2958 static void
2959 2959 sd_scsi_clear_probe_cache(void)
2960 2960 {
2961 2961 struct sd_scsi_probe_cache *cp;
2962 2962 int i;
2963 2963
2964 2964 mutex_enter(&sd_scsi_probe_cache_mutex);
2965 2965 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) {
2966 2966 /*
2967 2967 * Reset all entries to SCSIPROBE_EXISTS. This will
2968 2968 * force probing to be performed the next time
2969 2969 * sd_scsi_probe_with_cache is called.
2970 2970 */
2971 2971 for (i = 0; i < NTARGETS_WIDE; i++) {
2972 2972 cp->cache[i] = SCSIPROBE_EXISTS;
2973 2973 }
2974 2974 }
2975 2975 mutex_exit(&sd_scsi_probe_cache_mutex);
2976 2976 }
2977 2977
2978 2978
2979 2979 /*
2980 2980 * Function: sd_scsi_probe_with_cache
2981 2981 *
2982 2982 * Description: This routine implements support for a scsi device probe
2983 2983 * with cache. The driver maintains a cache of the target
2984 2984 * responses to scsi probes. If we get no response from a
2985 2985 * target during a probe inquiry, we remember that, and we
2986 2986 * avoid additional calls to scsi_probe on non-zero LUNs
2987 2987 * on the same target until the cache is cleared. By doing
2988 2988 * so we avoid the 1/4 sec selection timeout for nonzero
2989 2989 * LUNs. lun0 of a target is always probed.
2990 2990 *
2991 2991 * Arguments: devp - Pointer to a scsi_device(9S) structure
2992 2992 * waitfunc - indicates what the allocator routines should
2993 2993 * do when resources are not available. This value
2994 2994 * is passed on to scsi_probe() when that routine
2995 2995 * is called.
2996 2996 *
2997 2997 * Return Code: SCSIPROBE_NORESP if a NORESP in probe response cache;
2998 2998 * otherwise the value returned by scsi_probe(9F).
2999 2999 *
3000 3000 * Context: Kernel thread context
3001 3001 */
3002 3002
3003 3003 static int
3004 3004 sd_scsi_probe_with_cache(struct scsi_device *devp, int (*waitfn)())
3005 3005 {
3006 3006 struct sd_scsi_probe_cache *cp;
3007 3007 dev_info_t *pdip = ddi_get_parent(devp->sd_dev);
3008 3008 int lun, tgt;
3009 3009
3010 3010 lun = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS,
3011 3011 SCSI_ADDR_PROP_LUN, 0);
3012 3012 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devp->sd_dev, DDI_PROP_DONTPASS,
3013 3013 SCSI_ADDR_PROP_TARGET, -1);
3014 3014
3015 3015 /* Make sure caching enabled and target in range */
3016 3016 if ((tgt < 0) || (tgt >= NTARGETS_WIDE)) {
3017 3017 /* do it the old way (no cache) */
3018 3018 return (scsi_probe(devp, waitfn));
3019 3019 }
3020 3020
3021 3021 mutex_enter(&sd_scsi_probe_cache_mutex);
3022 3022
3023 3023 /* Find the cache for this scsi bus instance */
3024 3024 for (cp = sd_scsi_probe_cache_head; cp != NULL; cp = cp->next) {
3025 3025 if (cp->pdip == pdip) {
3026 3026 break;
3027 3027 }
3028 3028 }
3029 3029
3030 3030 /* If we can't find a cache for this pdip, create one */
3031 3031 if (cp == NULL) {
3032 3032 int i;
3033 3033
3034 3034 cp = kmem_zalloc(sizeof (struct sd_scsi_probe_cache),
3035 3035 KM_SLEEP);
3036 3036 cp->pdip = pdip;
3037 3037 cp->next = sd_scsi_probe_cache_head;
3038 3038 sd_scsi_probe_cache_head = cp;
3039 3039 for (i = 0; i < NTARGETS_WIDE; i++) {
3040 3040 cp->cache[i] = SCSIPROBE_EXISTS;
3041 3041 }
3042 3042 }
3043 3043
3044 3044 mutex_exit(&sd_scsi_probe_cache_mutex);
3045 3045
3046 3046 /* Recompute the cache for this target if LUN zero */
3047 3047 if (lun == 0) {
3048 3048 cp->cache[tgt] = SCSIPROBE_EXISTS;
3049 3049 }
3050 3050
3051 3051 /* Don't probe if cache remembers a NORESP from a previous LUN. */
3052 3052 if (cp->cache[tgt] != SCSIPROBE_EXISTS) {
3053 3053 return (SCSIPROBE_NORESP);
3054 3054 }
3055 3055
3056 3056 /* Do the actual probe; save & return the result */
3057 3057 return (cp->cache[tgt] = scsi_probe(devp, waitfn));
3058 3058 }
3059 3059
3060 3060
3061 3061 /*
3062 3062 * Function: sd_scsi_target_lun_init
3063 3063 *
3064 3064 * Description: Initializes the attached lun chain mutex and head pointer.
3065 3065 *
3066 3066 * Context: Kernel thread context
3067 3067 */
3068 3068
3069 3069 static void
3070 3070 sd_scsi_target_lun_init(void)
3071 3071 {
3072 3072 mutex_init(&sd_scsi_target_lun_mutex, NULL, MUTEX_DRIVER, NULL);
3073 3073 sd_scsi_target_lun_head = NULL;
3074 3074 }
3075 3075
3076 3076
3077 3077 /*
3078 3078 * Function: sd_scsi_target_lun_fini
3079 3079 *
3080 3080 * Description: Frees all resources associated with the attached lun
3081 3081 * chain
3082 3082 *
3083 3083 * Context: Kernel thread context
3084 3084 */
3085 3085
3086 3086 static void
3087 3087 sd_scsi_target_lun_fini(void)
3088 3088 {
3089 3089 struct sd_scsi_hba_tgt_lun *cp;
3090 3090 struct sd_scsi_hba_tgt_lun *ncp;
3091 3091
3092 3092 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = ncp) {
3093 3093 ncp = cp->next;
3094 3094 kmem_free(cp, sizeof (struct sd_scsi_hba_tgt_lun));
3095 3095 }
3096 3096 sd_scsi_target_lun_head = NULL;
3097 3097 mutex_destroy(&sd_scsi_target_lun_mutex);
3098 3098 }
3099 3099
3100 3100
3101 3101 /*
3102 3102 * Function: sd_scsi_get_target_lun_count
3103 3103 *
3104 3104 * Description: This routine will check in the attached lun chain to see
3105 3105 * how many luns are attached on the required SCSI controller
3106 3106 * and target. Currently, some capabilities like tagged queue
3107 3107 * are supported per target based by HBA. So all luns in a
3108 3108 * target have the same capabilities. Based on this assumption,
3109 3109 * sd should only set these capabilities once per target. This
3110 3110 * function is called when sd needs to decide how many luns
3111 3111 * already attached on a target.
3112 3112 *
3113 3113 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI
3114 3114 * controller device.
3115 3115 * target - The target ID on the controller's SCSI bus.
3116 3116 *
3117 3117 * Return Code: The number of luns attached on the required target and
3118 3118 * controller.
3119 3119 * -1 if target ID is not in parallel SCSI scope or the given
3120 3120 * dip is not in the chain.
3121 3121 *
3122 3122 * Context: Kernel thread context
3123 3123 */
3124 3124
3125 3125 static int
3126 3126 sd_scsi_get_target_lun_count(dev_info_t *dip, int target)
3127 3127 {
3128 3128 struct sd_scsi_hba_tgt_lun *cp;
3129 3129
3130 3130 if ((target < 0) || (target >= NTARGETS_WIDE)) {
3131 3131 return (-1);
3132 3132 }
3133 3133
3134 3134 mutex_enter(&sd_scsi_target_lun_mutex);
3135 3135
3136 3136 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) {
3137 3137 if (cp->pdip == dip) {
3138 3138 break;
3139 3139 }
3140 3140 }
3141 3141
3142 3142 mutex_exit(&sd_scsi_target_lun_mutex);
3143 3143
3144 3144 if (cp == NULL) {
3145 3145 return (-1);
3146 3146 }
3147 3147
3148 3148 return (cp->nlun[target]);
3149 3149 }
3150 3150
3151 3151
3152 3152 /*
3153 3153 * Function: sd_scsi_update_lun_on_target
3154 3154 *
3155 3155 * Description: This routine is used to update the attached lun chain when a
3156 3156 * lun is attached or detached on a target.
3157 3157 *
3158 3158 * Arguments: dip - Pointer to the system's dev_info_t for the SCSI
3159 3159 * controller device.
3160 3160 * target - The target ID on the controller's SCSI bus.
3161 3161 * flag - Indicate the lun is attached or detached.
3162 3162 *
3163 3163 * Context: Kernel thread context
3164 3164 */
3165 3165
3166 3166 static void
3167 3167 sd_scsi_update_lun_on_target(dev_info_t *dip, int target, int flag)
3168 3168 {
3169 3169 struct sd_scsi_hba_tgt_lun *cp;
3170 3170
3171 3171 mutex_enter(&sd_scsi_target_lun_mutex);
3172 3172
3173 3173 for (cp = sd_scsi_target_lun_head; cp != NULL; cp = cp->next) {
3174 3174 if (cp->pdip == dip) {
3175 3175 break;
3176 3176 }
3177 3177 }
3178 3178
3179 3179 if ((cp == NULL) && (flag == SD_SCSI_LUN_ATTACH)) {
3180 3180 cp = kmem_zalloc(sizeof (struct sd_scsi_hba_tgt_lun),
3181 3181 KM_SLEEP);
3182 3182 cp->pdip = dip;
3183 3183 cp->next = sd_scsi_target_lun_head;
3184 3184 sd_scsi_target_lun_head = cp;
3185 3185 }
3186 3186
3187 3187 mutex_exit(&sd_scsi_target_lun_mutex);
3188 3188
3189 3189 if (cp != NULL) {
3190 3190 if (flag == SD_SCSI_LUN_ATTACH) {
3191 3191 cp->nlun[target] ++;
3192 3192 } else {
3193 3193 cp->nlun[target] --;
3194 3194 }
3195 3195 }
3196 3196 }
3197 3197
3198 3198
3199 3199 /*
3200 3200 * Function: sd_spin_up_unit
3201 3201 *
3202 3202 * Description: Issues the following commands to spin-up the device:
3203 3203 * START STOP UNIT, and INQUIRY.
3204 3204 *
3205 3205 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
3206 3206 * structure for this target.
3207 3207 *
3208 3208 * Return Code: 0 - success
3209 3209 * EIO - failure
3210 3210 * EACCES - reservation conflict
3211 3211 *
3212 3212 * Context: Kernel thread context
3213 3213 */
3214 3214
3215 3215 static int
3216 3216 sd_spin_up_unit(sd_ssc_t *ssc)
3217 3217 {
3218 3218 size_t resid = 0;
3219 3219 int has_conflict = FALSE;
3220 3220 uchar_t *bufaddr;
3221 3221 int status;
3222 3222 struct sd_lun *un;
3223 3223
3224 3224 ASSERT(ssc != NULL);
3225 3225 un = ssc->ssc_un;
3226 3226 ASSERT(un != NULL);
3227 3227
3228 3228 /*
3229 3229 * Send a throwaway START UNIT command.
3230 3230 *
3231 3231 * If we fail on this, we don't care presently what precisely
3232 3232 * is wrong. EMC's arrays will also fail this with a check
3233 3233 * condition (0x2/0x4/0x3) if the device is "inactive," but
3234 3234 * we don't want to fail the attach because it may become
3235 3235 * "active" later.
3236 3236 * We don't know if power condition is supported or not at
3237 3237 * this stage, use START STOP bit.
3238 3238 */
3239 3239 status = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
3240 3240 SD_TARGET_START, SD_PATH_DIRECT);
3241 3241
3242 3242 if (status != 0) {
3243 3243 if (status == EACCES)
3244 3244 has_conflict = TRUE;
3245 3245 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3246 3246 }
3247 3247
3248 3248 /*
3249 3249 * Send another INQUIRY command to the target. This is necessary for
3250 3250 * non-removable media direct access devices because their INQUIRY data
3251 3251 * may not be fully qualified until they are spun up (perhaps via the
3252 3252 * START command above). Note: This seems to be needed for some
3253 3253 * legacy devices only.) The INQUIRY command should succeed even if a
3254 3254 * Reservation Conflict is present.
3255 3255 */
3256 3256 bufaddr = kmem_zalloc(SUN_INQSIZE, KM_SLEEP);
3257 3257
3258 3258 if (sd_send_scsi_INQUIRY(ssc, bufaddr, SUN_INQSIZE, 0, 0, &resid)
3259 3259 != 0) {
3260 3260 kmem_free(bufaddr, SUN_INQSIZE);
3261 3261 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
3262 3262 return (EIO);
3263 3263 }
3264 3264
3265 3265 /*
3266 3266 * If we got enough INQUIRY data, copy it over the old INQUIRY data.
3267 3267 * Note that this routine does not return a failure here even if the
3268 3268 * INQUIRY command did not return any data. This is a legacy behavior.
3269 3269 */
3270 3270 if ((SUN_INQSIZE - resid) >= SUN_MIN_INQLEN) {
3271 3271 bcopy(bufaddr, SD_INQUIRY(un), SUN_INQSIZE);
3272 3272 }
3273 3273
3274 3274 kmem_free(bufaddr, SUN_INQSIZE);
3275 3275
3276 3276 /* If we hit a reservation conflict above, tell the caller. */
3277 3277 if (has_conflict == TRUE) {
3278 3278 return (EACCES);
3279 3279 }
3280 3280
3281 3281 return (0);
3282 3282 }
3283 3283
3284 3284 #ifdef _LP64
3285 3285 /*
3286 3286 * Function: sd_enable_descr_sense
3287 3287 *
3288 3288 * Description: This routine attempts to select descriptor sense format
3289 3289 * using the Control mode page. Devices that support 64 bit
3290 3290 * LBAs (for >2TB luns) should also implement descriptor
3291 3291 * sense data so we will call this function whenever we see
3292 3292 * a lun larger than 2TB. If for some reason the device
3293 3293 * supports 64 bit LBAs but doesn't support descriptor sense
3294 3294 * presumably the mode select will fail. Everything will
3295 3295 * continue to work normally except that we will not get
3296 3296 * complete sense data for commands that fail with an LBA
3297 3297 * larger than 32 bits.
3298 3298 *
3299 3299 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
3300 3300 * structure for this target.
3301 3301 *
3302 3302 * Context: Kernel thread context only
3303 3303 */
3304 3304
3305 3305 static void
3306 3306 sd_enable_descr_sense(sd_ssc_t *ssc)
3307 3307 {
3308 3308 uchar_t *header;
3309 3309 struct mode_control_scsi3 *ctrl_bufp;
3310 3310 size_t buflen;
3311 3311 size_t bd_len;
3312 3312 int status;
3313 3313 struct sd_lun *un;
3314 3314
3315 3315 ASSERT(ssc != NULL);
3316 3316 un = ssc->ssc_un;
3317 3317 ASSERT(un != NULL);
3318 3318
3319 3319 /*
3320 3320 * Read MODE SENSE page 0xA, Control Mode Page
3321 3321 */
3322 3322 buflen = MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH +
3323 3323 sizeof (struct mode_control_scsi3);
3324 3324 header = kmem_zalloc(buflen, KM_SLEEP);
3325 3325
3326 3326 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen,
3327 3327 MODEPAGE_CTRL_MODE, SD_PATH_DIRECT);
3328 3328
3329 3329 if (status != 0) {
3330 3330 SD_ERROR(SD_LOG_COMMON, un,
3331 3331 "sd_enable_descr_sense: mode sense ctrl page failed\n");
3332 3332 goto eds_exit;
3333 3333 }
3334 3334
3335 3335 /*
3336 3336 * Determine size of Block Descriptors in order to locate
3337 3337 * the mode page data. ATAPI devices return 0, SCSI devices
3338 3338 * should return MODE_BLK_DESC_LENGTH.
3339 3339 */
3340 3340 bd_len = ((struct mode_header *)header)->bdesc_length;
3341 3341
3342 3342 /* Clear the mode data length field for MODE SELECT */
3343 3343 ((struct mode_header *)header)->length = 0;
3344 3344
3345 3345 ctrl_bufp = (struct mode_control_scsi3 *)
3346 3346 (header + MODE_HEADER_LENGTH + bd_len);
3347 3347
3348 3348 /*
3349 3349 * If the page length is smaller than the expected value,
3350 3350 * the target device doesn't support D_SENSE. Bail out here.
3351 3351 */
3352 3352 if (ctrl_bufp->mode_page.length <
3353 3353 sizeof (struct mode_control_scsi3) - 2) {
3354 3354 SD_ERROR(SD_LOG_COMMON, un,
3355 3355 "sd_enable_descr_sense: enable D_SENSE failed\n");
3356 3356 goto eds_exit;
3357 3357 }
3358 3358
3359 3359 /*
3360 3360 * Clear PS bit for MODE SELECT
3361 3361 */
3362 3362 ctrl_bufp->mode_page.ps = 0;
3363 3363
3364 3364 /*
3365 3365 * Set D_SENSE to enable descriptor sense format.
3366 3366 */
3367 3367 ctrl_bufp->d_sense = 1;
3368 3368
3369 3369 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3370 3370
3371 3371 /*
3372 3372 * Use MODE SELECT to commit the change to the D_SENSE bit
3373 3373 */
3374 3374 status = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header,
3375 3375 buflen, SD_DONTSAVE_PAGE, SD_PATH_DIRECT);
3376 3376
3377 3377 if (status != 0) {
3378 3378 SD_INFO(SD_LOG_COMMON, un,
3379 3379 "sd_enable_descr_sense: mode select ctrl page failed\n");
3380 3380 } else {
3381 3381 kmem_free(header, buflen);
3382 3382 return;
3383 3383 }
3384 3384
3385 3385 eds_exit:
3386 3386 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3387 3387 kmem_free(header, buflen);
3388 3388 }
3389 3389
3390 3390 /*
3391 3391 * Function: sd_reenable_dsense_task
3392 3392 *
3393 3393 * Description: Re-enable descriptor sense after device or bus reset
3394 3394 *
3395 3395 * Context: Executes in a taskq() thread context
3396 3396 */
3397 3397 static void
3398 3398 sd_reenable_dsense_task(void *arg)
3399 3399 {
3400 3400 struct sd_lun *un = arg;
3401 3401 sd_ssc_t *ssc;
3402 3402
3403 3403 ASSERT(un != NULL);
3404 3404
3405 3405 ssc = sd_ssc_init(un);
3406 3406 sd_enable_descr_sense(ssc);
3407 3407 sd_ssc_fini(ssc);
3408 3408 }
3409 3409 #endif /* _LP64 */
3410 3410
3411 3411 /*
3412 3412 * Function: sd_set_mmc_caps
3413 3413 *
3414 3414 * Description: This routine determines if the device is MMC compliant and if
3415 3415 * the device supports CDDA via a mode sense of the CDVD
3416 3416 * capabilities mode page. Also checks if the device is a
3417 3417 * dvdram writable device.
3418 3418 *
3419 3419 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
3420 3420 * structure for this target.
3421 3421 *
3422 3422 * Context: Kernel thread context only
3423 3423 */
3424 3424
3425 3425 static void
3426 3426 sd_set_mmc_caps(sd_ssc_t *ssc)
3427 3427 {
3428 3428 struct mode_header_grp2 *sense_mhp;
3429 3429 uchar_t *sense_page;
3430 3430 caddr_t buf;
3431 3431 int bd_len;
3432 3432 int status;
3433 3433 struct uscsi_cmd com;
3434 3434 int rtn;
3435 3435 uchar_t *out_data_rw, *out_data_hd;
3436 3436 uchar_t *rqbuf_rw, *rqbuf_hd;
3437 3437 uchar_t *out_data_gesn;
3438 3438 int gesn_len;
3439 3439 struct sd_lun *un;
3440 3440
3441 3441 ASSERT(ssc != NULL);
3442 3442 un = ssc->ssc_un;
3443 3443 ASSERT(un != NULL);
3444 3444
3445 3445 /*
3446 3446 * The flags which will be set in this function are - mmc compliant,
3447 3447 * dvdram writable device, cdda support. Initialize them to FALSE
3448 3448 * and if a capability is detected - it will be set to TRUE.
3449 3449 */
3450 3450 un->un_f_mmc_cap = FALSE;
3451 3451 un->un_f_dvdram_writable_device = FALSE;
3452 3452 un->un_f_cfg_cdda = FALSE;
3453 3453
3454 3454 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP);
3455 3455 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf,
3456 3456 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, SD_PATH_DIRECT);
3457 3457
3458 3458 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3459 3459
3460 3460 if (status != 0) {
3461 3461 /* command failed; just return */
3462 3462 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3463 3463 return;
3464 3464 }
3465 3465 /*
3466 3466 * If the mode sense request for the CDROM CAPABILITIES
3467 3467 * page (0x2A) succeeds the device is assumed to be MMC.
3468 3468 */
3469 3469 un->un_f_mmc_cap = TRUE;
3470 3470
3471 3471 /* See if GET STATUS EVENT NOTIFICATION is supported */
3472 3472 if (un->un_f_mmc_gesn_polling) {
3473 3473 gesn_len = SD_GESN_HEADER_LEN + SD_GESN_MEDIA_DATA_LEN;
3474 3474 out_data_gesn = kmem_zalloc(gesn_len, KM_SLEEP);
3475 3475
3476 3476 rtn = sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(ssc,
3477 3477 out_data_gesn, gesn_len, 1 << SD_GESN_MEDIA_CLASS);
3478 3478
3479 3479 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3480 3480
3481 3481 if ((rtn != 0) || !sd_gesn_media_data_valid(out_data_gesn)) {
3482 3482 un->un_f_mmc_gesn_polling = FALSE;
3483 3483 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3484 3484 "sd_set_mmc_caps: gesn not supported "
3485 3485 "%d %x %x %x %x\n", rtn,
3486 3486 out_data_gesn[0], out_data_gesn[1],
3487 3487 out_data_gesn[2], out_data_gesn[3]);
3488 3488 }
3489 3489
3490 3490 kmem_free(out_data_gesn, gesn_len);
3491 3491 }
3492 3492
3493 3493 /* Get to the page data */
3494 3494 sense_mhp = (struct mode_header_grp2 *)buf;
3495 3495 bd_len = (sense_mhp->bdesc_length_hi << 8) |
3496 3496 sense_mhp->bdesc_length_lo;
3497 3497 if (bd_len > MODE_BLK_DESC_LENGTH) {
3498 3498 /*
3499 3499 * We did not get back the expected block descriptor
3500 3500 * length so we cannot determine if the device supports
3501 3501 * CDDA. However, we still indicate the device is MMC
3502 3502 * according to the successful response to the page
3503 3503 * 0x2A mode sense request.
3504 3504 */
3505 3505 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
3506 3506 "sd_set_mmc_caps: Mode Sense returned "
3507 3507 "invalid block descriptor length\n");
3508 3508 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3509 3509 return;
3510 3510 }
3511 3511
3512 3512 /* See if read CDDA is supported */
3513 3513 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 +
3514 3514 bd_len);
3515 3515 un->un_f_cfg_cdda = (sense_page[5] & 0x01) ? TRUE : FALSE;
3516 3516
3517 3517 /* See if writing DVD RAM is supported. */
3518 3518 un->un_f_dvdram_writable_device = (sense_page[3] & 0x20) ? TRUE : FALSE;
3519 3519 if (un->un_f_dvdram_writable_device == TRUE) {
3520 3520 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3521 3521 return;
3522 3522 }
3523 3523
3524 3524 /*
3525 3525 * If the device presents DVD or CD capabilities in the mode
3526 3526 * page, we can return here since a RRD will not have
3527 3527 * these capabilities.
3528 3528 */
3529 3529 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) {
3530 3530 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3531 3531 return;
3532 3532 }
3533 3533 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3534 3534
3535 3535 /*
3536 3536 * If un->un_f_dvdram_writable_device is still FALSE,
3537 3537 * check for a Removable Rigid Disk (RRD). A RRD
3538 3538 * device is identified by the features RANDOM_WRITABLE and
3539 3539 * HARDWARE_DEFECT_MANAGEMENT.
3540 3540 */
3541 3541 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3542 3542 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3543 3543
3544 3544 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw,
3545 3545 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN,
3546 3546 RANDOM_WRITABLE, SD_PATH_STANDARD);
3547 3547
3548 3548 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3549 3549
3550 3550 if (rtn != 0) {
3551 3551 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3552 3552 kmem_free(rqbuf_rw, SENSE_LENGTH);
3553 3553 return;
3554 3554 }
3555 3555
3556 3556 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3557 3557 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3558 3558
3559 3559 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd,
3560 3560 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN,
3561 3561 HARDWARE_DEFECT_MANAGEMENT, SD_PATH_STANDARD);
3562 3562
3563 3563 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3564 3564
3565 3565 if (rtn == 0) {
3566 3566 /*
3567 3567 * We have good information, check for random writable
3568 3568 * and hardware defect features.
3569 3569 */
3570 3570 if ((out_data_rw[9] & RANDOM_WRITABLE) &&
3571 3571 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT)) {
3572 3572 un->un_f_dvdram_writable_device = TRUE;
3573 3573 }
3574 3574 }
3575 3575
3576 3576 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3577 3577 kmem_free(rqbuf_rw, SENSE_LENGTH);
3578 3578 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN);
3579 3579 kmem_free(rqbuf_hd, SENSE_LENGTH);
3580 3580 }
3581 3581
3582 3582 /*
3583 3583 * Function: sd_check_for_writable_cd
3584 3584 *
3585 3585 * Description: This routine determines if the media in the device is
3586 3586 * writable or not. It uses the get configuration command (0x46)
3587 3587 * to determine if the media is writable
3588 3588 *
3589 3589 * Arguments: un - driver soft state (unit) structure
3590 3590 * path_flag - SD_PATH_DIRECT to use the USCSI "direct"
3591 3591 * chain and the normal command waitq, or
3592 3592 * SD_PATH_DIRECT_PRIORITY to use the USCSI
3593 3593 * "direct" chain and bypass the normal command
3594 3594 * waitq.
3595 3595 *
3596 3596 * Context: Never called at interrupt context.
3597 3597 */
3598 3598
3599 3599 static void
3600 3600 sd_check_for_writable_cd(sd_ssc_t *ssc, int path_flag)
3601 3601 {
3602 3602 struct uscsi_cmd com;
3603 3603 uchar_t *out_data;
3604 3604 uchar_t *rqbuf;
3605 3605 int rtn;
3606 3606 uchar_t *out_data_rw, *out_data_hd;
3607 3607 uchar_t *rqbuf_rw, *rqbuf_hd;
3608 3608 struct mode_header_grp2 *sense_mhp;
3609 3609 uchar_t *sense_page;
3610 3610 caddr_t buf;
3611 3611 int bd_len;
3612 3612 int status;
3613 3613 struct sd_lun *un;
3614 3614
3615 3615 ASSERT(ssc != NULL);
3616 3616 un = ssc->ssc_un;
3617 3617 ASSERT(un != NULL);
3618 3618 ASSERT(mutex_owned(SD_MUTEX(un)));
3619 3619
3620 3620 /*
3621 3621 * Initialize the writable media to false, if configuration info.
3622 3622 * tells us otherwise then only we will set it.
3623 3623 */
3624 3624 un->un_f_mmc_writable_media = FALSE;
3625 3625 mutex_exit(SD_MUTEX(un));
3626 3626
3627 3627 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP);
3628 3628 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3629 3629
3630 3630 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf, SENSE_LENGTH,
3631 3631 out_data, SD_PROFILE_HEADER_LEN, path_flag);
3632 3632
3633 3633 if (rtn != 0)
3634 3634 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3635 3635
3636 3636 mutex_enter(SD_MUTEX(un));
3637 3637 if (rtn == 0) {
3638 3638 /*
3639 3639 * We have good information, check for writable DVD.
3640 3640 */
3641 3641 if ((out_data[6] == 0) && (out_data[7] == 0x12)) {
3642 3642 un->un_f_mmc_writable_media = TRUE;
3643 3643 kmem_free(out_data, SD_PROFILE_HEADER_LEN);
3644 3644 kmem_free(rqbuf, SENSE_LENGTH);
3645 3645 return;
3646 3646 }
3647 3647 }
3648 3648
3649 3649 kmem_free(out_data, SD_PROFILE_HEADER_LEN);
3650 3650 kmem_free(rqbuf, SENSE_LENGTH);
3651 3651
3652 3652 /*
3653 3653 * Determine if this is a RRD type device.
3654 3654 */
3655 3655 mutex_exit(SD_MUTEX(un));
3656 3656 buf = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP);
3657 3657 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, (uchar_t *)buf,
3658 3658 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP, path_flag);
3659 3659
3660 3660 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3661 3661
3662 3662 mutex_enter(SD_MUTEX(un));
3663 3663 if (status != 0) {
3664 3664 /* command failed; just return */
3665 3665 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3666 3666 return;
3667 3667 }
3668 3668
3669 3669 /* Get to the page data */
3670 3670 sense_mhp = (struct mode_header_grp2 *)buf;
3671 3671 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo;
3672 3672 if (bd_len > MODE_BLK_DESC_LENGTH) {
3673 3673 /*
3674 3674 * We did not get back the expected block descriptor length so
3675 3675 * we cannot check the mode page.
3676 3676 */
3677 3677 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
3678 3678 "sd_check_for_writable_cd: Mode Sense returned "
3679 3679 "invalid block descriptor length\n");
3680 3680 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3681 3681 return;
3682 3682 }
3683 3683
3684 3684 /*
3685 3685 * If the device presents DVD or CD capabilities in the mode
3686 3686 * page, we can return here since a RRD device will not have
3687 3687 * these capabilities.
3688 3688 */
3689 3689 sense_page = (uchar_t *)(buf + MODE_HEADER_LENGTH_GRP2 + bd_len);
3690 3690 if ((sense_page[2] & 0x3f) || (sense_page[3] & 0x3f)) {
3691 3691 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3692 3692 return;
3693 3693 }
3694 3694 kmem_free(buf, BUFLEN_MODE_CDROM_CAP);
3695 3695
3696 3696 /*
3697 3697 * If un->un_f_mmc_writable_media is still FALSE,
3698 3698 * check for RRD type media. A RRD device is identified
3699 3699 * by the features RANDOM_WRITABLE and HARDWARE_DEFECT_MANAGEMENT.
3700 3700 */
3701 3701 mutex_exit(SD_MUTEX(un));
3702 3702 out_data_rw = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3703 3703 rqbuf_rw = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3704 3704
3705 3705 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_rw,
3706 3706 SENSE_LENGTH, out_data_rw, SD_CURRENT_FEATURE_LEN,
3707 3707 RANDOM_WRITABLE, path_flag);
3708 3708
3709 3709 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3710 3710 if (rtn != 0) {
3711 3711 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3712 3712 kmem_free(rqbuf_rw, SENSE_LENGTH);
3713 3713 mutex_enter(SD_MUTEX(un));
3714 3714 return;
3715 3715 }
3716 3716
3717 3717 out_data_hd = kmem_zalloc(SD_CURRENT_FEATURE_LEN, KM_SLEEP);
3718 3718 rqbuf_hd = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
3719 3719
3720 3720 rtn = sd_send_scsi_feature_GET_CONFIGURATION(ssc, &com, rqbuf_hd,
3721 3721 SENSE_LENGTH, out_data_hd, SD_CURRENT_FEATURE_LEN,
3722 3722 HARDWARE_DEFECT_MANAGEMENT, path_flag);
3723 3723
3724 3724 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
3725 3725 mutex_enter(SD_MUTEX(un));
3726 3726 if (rtn == 0) {
3727 3727 /*
3728 3728 * We have good information, check for random writable
3729 3729 * and hardware defect features as current.
3730 3730 */
3731 3731 if ((out_data_rw[9] & RANDOM_WRITABLE) &&
3732 3732 (out_data_rw[10] & 0x1) &&
3733 3733 (out_data_hd[9] & HARDWARE_DEFECT_MANAGEMENT) &&
3734 3734 (out_data_hd[10] & 0x1)) {
3735 3735 un->un_f_mmc_writable_media = TRUE;
3736 3736 }
3737 3737 }
3738 3738
3739 3739 kmem_free(out_data_rw, SD_CURRENT_FEATURE_LEN);
3740 3740 kmem_free(rqbuf_rw, SENSE_LENGTH);
3741 3741 kmem_free(out_data_hd, SD_CURRENT_FEATURE_LEN);
3742 3742 kmem_free(rqbuf_hd, SENSE_LENGTH);
3743 3743 }
3744 3744
3745 3745 /*
3746 3746 * Function: sd_read_unit_properties
3747 3747 *
3748 3748 * Description: The following implements a property lookup mechanism.
3749 3749 * Properties for particular disks (keyed on vendor, model
3750 3750 * and rev numbers) are sought in the sd.conf file via
3751 3751 * sd_process_sdconf_file(), and if not found there, are
3752 3752 * looked for in a list hardcoded in this driver via
3753 3753 * sd_process_sdconf_table() Once located the properties
3754 3754 * are used to update the driver unit structure.
3755 3755 *
3756 3756 * Arguments: un - driver soft state (unit) structure
3757 3757 */
3758 3758
3759 3759 static void
3760 3760 sd_read_unit_properties(struct sd_lun *un)
3761 3761 {
3762 3762 /*
3763 3763 * sd_process_sdconf_file returns SD_FAILURE if it cannot find
3764 3764 * the "sd-config-list" property (from the sd.conf file) or if
3765 3765 * there was not a match for the inquiry vid/pid. If this event
3766 3766 * occurs the static driver configuration table is searched for
3767 3767 * a match.
3768 3768 */
3769 3769 ASSERT(un != NULL);
3770 3770 if (sd_process_sdconf_file(un) == SD_FAILURE) {
3771 3771 sd_process_sdconf_table(un);
3772 3772 }
3773 3773
3774 3774 /* check for LSI device */
3775 3775 sd_is_lsi(un);
3776 3776
3777 3777
3778 3778 }
3779 3779
3780 3780
3781 3781 /*
3782 3782 * Function: sd_process_sdconf_file
3783 3783 *
3784 3784 * Description: Use ddi_prop_lookup(9F) to obtain the properties from the
3785 3785 * driver's config file (ie, sd.conf) and update the driver
3786 3786 * soft state structure accordingly.
3787 3787 *
3788 3788 * Arguments: un - driver soft state (unit) structure
3789 3789 *
3790 3790 * Return Code: SD_SUCCESS - The properties were successfully set according
3791 3791 * to the driver configuration file.
3792 3792 * SD_FAILURE - The driver config list was not obtained or
3793 3793 * there was no vid/pid match. This indicates that
3794 3794 * the static config table should be used.
3795 3795 *
3796 3796 * The config file has a property, "sd-config-list". Currently we support
3797 3797 * two kinds of formats. For both formats, the value of this property
3798 3798 * is a list of duplets:
3799 3799 *
3800 3800 * sd-config-list=
3801 3801 * <duplet>,
3802 3802 * [,<duplet>]*;
3803 3803 *
3804 3804 * For the improved format, where
3805 3805 *
3806 3806 * <duplet>:= "<vid+pid>","<tunable-list>"
3807 3807 *
3808 3808 * and
3809 3809 *
3810 3810 * <tunable-list>:= <tunable> [, <tunable> ]*;
3811 3811 * <tunable> = <name> : <value>
3812 3812 *
3813 3813 * The <vid+pid> is the string that is returned by the target device on a
3814 3814 * SCSI inquiry command, the <tunable-list> contains one or more tunables
3815 3815 * to apply to all target devices with the specified <vid+pid>.
3816 3816 *
3817 3817 * Each <tunable> is a "<name> : <value>" pair.
3818 3818 *
3819 3819 * For the old format, the structure of each duplet is as follows:
3820 3820 *
3821 3821 * <duplet>:= "<vid+pid>","<data-property-name_list>"
3822 3822 *
3823 3823 * The first entry of the duplet is the device ID string (the concatenated
3824 3824 * vid & pid; not to be confused with a device_id). This is defined in
3825 3825 * the same way as in the sd_disk_table.
3826 3826 *
3827 3827 * The second part of the duplet is a string that identifies a
3828 3828 * data-property-name-list. The data-property-name-list is defined as
3829 3829 * follows:
3830 3830 *
3831 3831 * <data-property-name-list>:=<data-property-name> [<data-property-name>]
3832 3832 *
3833 3833 * The syntax of <data-property-name> depends on the <version> field.
3834 3834 *
3835 3835 * If version = SD_CONF_VERSION_1 we have the following syntax:
3836 3836 *
3837 3837 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN>
3838 3838 *
3839 3839 * where the prop0 value will be used to set prop0 if bit0 set in the
3840 3840 * flags, prop1 if bit1 set, etc. and N = SD_CONF_MAX_ITEMS -1
3841 3841 *
3842 3842 */
3843 3843
3844 3844 static int
3845 3845 sd_process_sdconf_file(struct sd_lun *un)
3846 3846 {
3847 3847 char **config_list = NULL;
3848 3848 uint_t nelements;
3849 3849 char *vidptr;
3850 3850 int vidlen;
3851 3851 char *dnlist_ptr;
3852 3852 char *dataname_ptr;
3853 3853 char *dataname_lasts;
3854 3854 int *data_list = NULL;
3855 3855 uint_t data_list_len;
3856 3856 int rval = SD_FAILURE;
3857 3857 int i;
3858 3858
3859 3859 ASSERT(un != NULL);
3860 3860
3861 3861 /* Obtain the configuration list associated with the .conf file */
3862 3862 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, SD_DEVINFO(un),
3863 3863 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, sd_config_list,
3864 3864 &config_list, &nelements) != DDI_PROP_SUCCESS) {
3865 3865 return (SD_FAILURE);
3866 3866 }
3867 3867
3868 3868 /*
3869 3869 * Compare vids in each duplet to the inquiry vid - if a match is
3870 3870 * made, get the data value and update the soft state structure
3871 3871 * accordingly.
3872 3872 *
3873 3873 * Each duplet should show as a pair of strings, return SD_FAILURE
3874 3874 * otherwise.
3875 3875 */
3876 3876 if (nelements & 1) {
3877 3877 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
3878 3878 "sd-config-list should show as pairs of strings.\n");
3879 3879 if (config_list)
3880 3880 ddi_prop_free(config_list);
3881 3881 return (SD_FAILURE);
3882 3882 }
3883 3883
3884 3884 for (i = 0; i < nelements; i += 2) {
3885 3885 /*
3886 3886 * Note: The assumption here is that each vid entry is on
3887 3887 * a unique line from its associated duplet.
3888 3888 */
3889 3889 vidptr = config_list[i];
3890 3890 vidlen = (int)strlen(vidptr);
3891 3891 if ((vidlen == 0) ||
3892 3892 (sd_sdconf_id_match(un, vidptr, vidlen) != SD_SUCCESS)) {
3893 3893 continue;
3894 3894 }
3895 3895
3896 3896 /*
3897 3897 * dnlist contains 1 or more blank separated
3898 3898 * data-property-name entries
3899 3899 */
3900 3900 dnlist_ptr = config_list[i + 1];
3901 3901
3902 3902 if (strchr(dnlist_ptr, ':') != NULL) {
3903 3903 /*
3904 3904 * Decode the improved format sd-config-list.
3905 3905 */
3906 3906 sd_nvpair_str_decode(un, dnlist_ptr);
3907 3907 } else {
3908 3908 /*
3909 3909 * The old format sd-config-list, loop through all
3910 3910 * data-property-name entries in the
3911 3911 * data-property-name-list
3912 3912 * setting the properties for each.
3913 3913 */
3914 3914 for (dataname_ptr = sd_strtok_r(dnlist_ptr, " \t",
3915 3915 &dataname_lasts); dataname_ptr != NULL;
3916 3916 dataname_ptr = sd_strtok_r(NULL, " \t",
3917 3917 &dataname_lasts)) {
3918 3918 int version;
3919 3919
3920 3920 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3921 3921 "sd_process_sdconf_file: disk:%s, "
3922 3922 "data:%s\n", vidptr, dataname_ptr);
3923 3923
3924 3924 /* Get the data list */
3925 3925 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY,
3926 3926 SD_DEVINFO(un), 0, dataname_ptr, &data_list,
3927 3927 &data_list_len) != DDI_PROP_SUCCESS) {
3928 3928 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3929 3929 "sd_process_sdconf_file: data "
3930 3930 "property (%s) has no value\n",
3931 3931 dataname_ptr);
3932 3932 continue;
3933 3933 }
3934 3934
3935 3935 version = data_list[0];
3936 3936
3937 3937 if (version == SD_CONF_VERSION_1) {
3938 3938 sd_tunables values;
3939 3939
3940 3940 /* Set the properties */
3941 3941 if (sd_chk_vers1_data(un, data_list[1],
3942 3942 &data_list[2], data_list_len,
3943 3943 dataname_ptr) == SD_SUCCESS) {
3944 3944 sd_get_tunables_from_conf(un,
3945 3945 data_list[1], &data_list[2],
3946 3946 &values);
3947 3947 sd_set_vers1_properties(un,
3948 3948 data_list[1], &values);
3949 3949 rval = SD_SUCCESS;
3950 3950 } else {
3951 3951 rval = SD_FAILURE;
3952 3952 }
3953 3953 } else {
3954 3954 scsi_log(SD_DEVINFO(un), sd_label,
3955 3955 CE_WARN, "data property %s version "
3956 3956 "0x%x is invalid.",
3957 3957 dataname_ptr, version);
3958 3958 rval = SD_FAILURE;
3959 3959 }
3960 3960 if (data_list)
3961 3961 ddi_prop_free(data_list);
3962 3962 }
3963 3963 }
3964 3964 }
3965 3965
3966 3966 /* free up the memory allocated by ddi_prop_lookup_string_array(). */
3967 3967 if (config_list) {
3968 3968 ddi_prop_free(config_list);
3969 3969 }
3970 3970
3971 3971 return (rval);
3972 3972 }
3973 3973
3974 3974 /*
3975 3975 * Function: sd_nvpair_str_decode()
3976 3976 *
3977 3977 * Description: Parse the improved format sd-config-list to get
3978 3978 * each entry of tunable, which includes a name-value pair.
3979 3979 * Then call sd_set_properties() to set the property.
3980 3980 *
3981 3981 * Arguments: un - driver soft state (unit) structure
3982 3982 * nvpair_str - the tunable list
3983 3983 */
3984 3984 static void
3985 3985 sd_nvpair_str_decode(struct sd_lun *un, char *nvpair_str)
3986 3986 {
3987 3987 char *nv, *name, *value, *token;
3988 3988 char *nv_lasts, *v_lasts, *x_lasts;
3989 3989
3990 3990 for (nv = sd_strtok_r(nvpair_str, ",", &nv_lasts); nv != NULL;
3991 3991 nv = sd_strtok_r(NULL, ",", &nv_lasts)) {
3992 3992 token = sd_strtok_r(nv, ":", &v_lasts);
3993 3993 name = sd_strtok_r(token, " \t", &x_lasts);
3994 3994 token = sd_strtok_r(NULL, ":", &v_lasts);
3995 3995 value = sd_strtok_r(token, " \t", &x_lasts);
3996 3996 if (name == NULL || value == NULL) {
3997 3997 SD_INFO(SD_LOG_ATTACH_DETACH, un,
3998 3998 "sd_nvpair_str_decode: "
3999 3999 "name or value is not valid!\n");
4000 4000 } else {
4001 4001 sd_set_properties(un, name, value);
4002 4002 }
4003 4003 }
4004 4004 }
4005 4005
4006 4006 /*
4007 4007 * Function: sd_strtok_r()
4008 4008 *
4009 4009 * Description: This function uses strpbrk and strspn to break
4010 4010 * string into tokens on sequentially subsequent calls. Return
4011 4011 * NULL when no non-separator characters remain. The first
4012 4012 * argument is NULL for subsequent calls.
4013 4013 */
4014 4014 static char *
4015 4015 sd_strtok_r(char *string, const char *sepset, char **lasts)
4016 4016 {
4017 4017 char *q, *r;
4018 4018
4019 4019 /* First or subsequent call */
4020 4020 if (string == NULL)
4021 4021 string = *lasts;
4022 4022
4023 4023 if (string == NULL)
4024 4024 return (NULL);
4025 4025
4026 4026 /* Skip leading separators */
4027 4027 q = string + strspn(string, sepset);
4028 4028
4029 4029 if (*q == '\0')
4030 4030 return (NULL);
4031 4031
4032 4032 if ((r = strpbrk(q, sepset)) == NULL)
4033 4033 *lasts = NULL;
4034 4034 else {
4035 4035 *r = '\0';
4036 4036 *lasts = r + 1;
4037 4037 }
4038 4038 return (q);
4039 4039 }
4040 4040
4041 4041 /*
4042 4042 * Function: sd_set_properties()
4043 4043 *
4044 4044 * Description: Set device properties based on the improved
4045 4045 * format sd-config-list.
4046 4046 *
4047 4047 * Arguments: un - driver soft state (unit) structure
4048 4048 * name - supported tunable name
4049 4049 * value - tunable value
4050 4050 */
4051 4051 static void
4052 4052 sd_set_properties(struct sd_lun *un, char *name, char *value)
4053 4053 {
4054 4054 char *endptr = NULL;
4055 4055 long val = 0;
4056 4056
4057 4057 if (strcasecmp(name, "cache-nonvolatile") == 0) {
4058 4058 if (strcasecmp(value, "true") == 0) {
4059 4059 un->un_f_suppress_cache_flush = TRUE;
4060 4060 } else if (strcasecmp(value, "false") == 0) {
4061 4061 un->un_f_suppress_cache_flush = FALSE;
4062 4062 } else {
4063 4063 goto value_invalid;
4064 4064 }
4065 4065 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4066 4066 "suppress_cache_flush flag set to %d\n",
4067 4067 un->un_f_suppress_cache_flush);
4068 4068 return;
4069 4069 }
4070 4070
4071 4071 if (strcasecmp(name, "controller-type") == 0) {
4072 4072 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4073 4073 un->un_ctype = val;
4074 4074 } else {
4075 4075 goto value_invalid;
4076 4076 }
4077 4077 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4078 4078 "ctype set to %d\n", un->un_ctype);
4079 4079 return;
4080 4080 }
4081 4081
4082 4082 if (strcasecmp(name, "delay-busy") == 0) {
4083 4083 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4084 4084 un->un_busy_timeout = drv_usectohz(val / 1000);
4085 4085 } else {
4086 4086 goto value_invalid;
4087 4087 }
4088 4088 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4089 4089 "busy_timeout set to %d\n", un->un_busy_timeout);
4090 4090 return;
4091 4091 }
4092 4092
4093 4093 if (strcasecmp(name, "disksort") == 0) {
4094 4094 if (strcasecmp(value, "true") == 0) {
4095 4095 un->un_f_disksort_disabled = FALSE;
4096 4096 } else if (strcasecmp(value, "false") == 0) {
4097 4097 un->un_f_disksort_disabled = TRUE;
4098 4098 } else {
4099 4099 goto value_invalid;
4100 4100 }
4101 4101 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4102 4102 "disksort disabled flag set to %d\n",
4103 4103 un->un_f_disksort_disabled);
4104 4104 return;
4105 4105 }
4106 4106
4107 4107 if (strcasecmp(name, "power-condition") == 0) {
4108 4108 if (strcasecmp(value, "true") == 0) {
4109 4109 un->un_f_power_condition_disabled = FALSE;
4110 4110 } else if (strcasecmp(value, "false") == 0) {
4111 4111 un->un_f_power_condition_disabled = TRUE;
4112 4112 } else {
4113 4113 goto value_invalid;
4114 4114 }
4115 4115 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4116 4116 "power condition disabled flag set to %d\n",
4117 4117 un->un_f_power_condition_disabled);
4118 4118 return;
4119 4119 }
4120 4120
4121 4121 if (strcasecmp(name, "timeout-releasereservation") == 0) {
4122 4122 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4123 4123 un->un_reserve_release_time = val;
4124 4124 } else {
4125 4125 goto value_invalid;
4126 4126 }
4127 4127 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4128 4128 "reservation release timeout set to %d\n",
4129 4129 un->un_reserve_release_time);
4130 4130 return;
4131 4131 }
4132 4132
4133 4133 if (strcasecmp(name, "reset-lun") == 0) {
4134 4134 if (strcasecmp(value, "true") == 0) {
4135 4135 un->un_f_lun_reset_enabled = TRUE;
4136 4136 } else if (strcasecmp(value, "false") == 0) {
4137 4137 un->un_f_lun_reset_enabled = FALSE;
4138 4138 } else {
4139 4139 goto value_invalid;
4140 4140 }
4141 4141 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4142 4142 "lun reset enabled flag set to %d\n",
4143 4143 un->un_f_lun_reset_enabled);
4144 4144 return;
4145 4145 }
4146 4146
4147 4147 if (strcasecmp(name, "retries-busy") == 0) {
4148 4148 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4149 4149 un->un_busy_retry_count = val;
4150 4150 } else {
4151 4151 goto value_invalid;
4152 4152 }
4153 4153 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4154 4154 "busy retry count set to %d\n", un->un_busy_retry_count);
4155 4155 return;
4156 4156 }
4157 4157
4158 4158 if (strcasecmp(name, "retries-timeout") == 0) {
4159 4159 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4160 4160 un->un_retry_count = val;
4161 4161 } else {
4162 4162 goto value_invalid;
4163 4163 }
4164 4164 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4165 4165 "timeout retry count set to %d\n", un->un_retry_count);
4166 4166 return;
4167 4167 }
4168 4168
4169 4169 if (strcasecmp(name, "retries-notready") == 0) {
4170 4170 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4171 4171 un->un_notready_retry_count = val;
4172 4172 } else {
4173 4173 goto value_invalid;
4174 4174 }
4175 4175 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4176 4176 "notready retry count set to %d\n",
4177 4177 un->un_notready_retry_count);
4178 4178 return;
4179 4179 }
4180 4180
4181 4181 if (strcasecmp(name, "retries-reset") == 0) {
4182 4182 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4183 4183 un->un_reset_retry_count = val;
4184 4184 } else {
4185 4185 goto value_invalid;
4186 4186 }
4187 4187 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4188 4188 "reset retry count set to %d\n",
4189 4189 un->un_reset_retry_count);
4190 4190 return;
4191 4191 }
4192 4192
4193 4193 if (strcasecmp(name, "throttle-max") == 0) {
4194 4194 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4195 4195 un->un_saved_throttle = un->un_throttle = val;
4196 4196 } else {
4197 4197 goto value_invalid;
4198 4198 }
4199 4199 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4200 4200 "throttle set to %d\n", un->un_throttle);
4201 4201 }
4202 4202
4203 4203 if (strcasecmp(name, "throttle-min") == 0) {
4204 4204 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4205 4205 un->un_min_throttle = val;
4206 4206 } else {
4207 4207 goto value_invalid;
4208 4208 }
4209 4209 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4210 4210 "min throttle set to %d\n", un->un_min_throttle);
4211 4211 }
4212 4212
4213 4213 if (strcasecmp(name, "rmw-type") == 0) {
4214 4214 if (ddi_strtol(value, &endptr, 0, &val) == 0) {
4215 4215 un->un_f_rmw_type = val;
4216 4216 } else {
4217 4217 goto value_invalid;
4218 4218 }
4219 4219 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4220 4220 "RMW type set to %d\n", un->un_f_rmw_type);
4221 4221 }
4222 4222
4223 4223 if (strcasecmp(name, "physical-block-size") == 0) {
4224 4224 if (ddi_strtol(value, &endptr, 0, &val) == 0 &&
4225 4225 ISP2(val) && val >= un->un_tgt_blocksize &&
4226 4226 val >= un->un_sys_blocksize) {
4227 4227 un->un_phy_blocksize = val;
4228 4228 } else {
4229 4229 goto value_invalid;
4230 4230 }
4231 4231 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4232 4232 "physical block size set to %d\n", un->un_phy_blocksize);
4233 4233 }
4234 4234
4235 4235 /*
4236 4236 * Validate the throttle values.
4237 4237 * If any of the numbers are invalid, set everything to defaults.
4238 4238 */
4239 4239 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) ||
4240 4240 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) ||
4241 4241 (un->un_min_throttle > un->un_throttle)) {
4242 4242 un->un_saved_throttle = un->un_throttle = sd_max_throttle;
4243 4243 un->un_min_throttle = sd_min_throttle;
4244 4244 }
4245 4245
4246 4246 if (strcasecmp(name, "mmc-gesn-polling") == 0) {
4247 4247 if (strcasecmp(value, "true") == 0) {
4248 4248 un->un_f_mmc_gesn_polling = TRUE;
4249 4249 } else if (strcasecmp(value, "false") == 0) {
4250 4250 un->un_f_mmc_gesn_polling = FALSE;
4251 4251 } else {
4252 4252 goto value_invalid;
4253 4253 }
4254 4254 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4255 4255 "mmc-gesn-polling set to %d\n",
4256 4256 un->un_f_mmc_gesn_polling);
4257 4257 }
4258 4258
4259 4259 return;
4260 4260
4261 4261 value_invalid:
4262 4262 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_set_properties: "
4263 4263 "value of prop %s is invalid\n", name);
4264 4264 }
4265 4265
4266 4266 /*
4267 4267 * Function: sd_get_tunables_from_conf()
4268 4268 *
4269 4269 *
4270 4270 * This function reads the data list from the sd.conf file and pulls
4271 4271 * the values that can have numeric values as arguments and places
4272 4272 * the values in the appropriate sd_tunables member.
4273 4273 * Since the order of the data list members varies across platforms
4274 4274 * This function reads them from the data list in a platform specific
4275 4275 * order and places them into the correct sd_tunable member that is
4276 4276 * consistent across all platforms.
4277 4277 */
4278 4278 static void
4279 4279 sd_get_tunables_from_conf(struct sd_lun *un, int flags, int *data_list,
4280 4280 sd_tunables *values)
4281 4281 {
4282 4282 int i;
4283 4283 int mask;
4284 4284
4285 4285 bzero(values, sizeof (sd_tunables));
4286 4286
4287 4287 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) {
4288 4288
4289 4289 mask = 1 << i;
4290 4290 if (mask > flags) {
4291 4291 break;
4292 4292 }
4293 4293
4294 4294 switch (mask & flags) {
4295 4295 case 0: /* This mask bit not set in flags */
4296 4296 continue;
4297 4297 case SD_CONF_BSET_THROTTLE:
4298 4298 values->sdt_throttle = data_list[i];
4299 4299 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4300 4300 "sd_get_tunables_from_conf: throttle = %d\n",
4301 4301 values->sdt_throttle);
4302 4302 break;
4303 4303 case SD_CONF_BSET_CTYPE:
4304 4304 values->sdt_ctype = data_list[i];
4305 4305 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4306 4306 "sd_get_tunables_from_conf: ctype = %d\n",
4307 4307 values->sdt_ctype);
4308 4308 break;
4309 4309 case SD_CONF_BSET_NRR_COUNT:
4310 4310 values->sdt_not_rdy_retries = data_list[i];
4311 4311 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4312 4312 "sd_get_tunables_from_conf: not_rdy_retries = %d\n",
4313 4313 values->sdt_not_rdy_retries);
4314 4314 break;
4315 4315 case SD_CONF_BSET_BSY_RETRY_COUNT:
4316 4316 values->sdt_busy_retries = data_list[i];
4317 4317 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4318 4318 "sd_get_tunables_from_conf: busy_retries = %d\n",
4319 4319 values->sdt_busy_retries);
4320 4320 break;
4321 4321 case SD_CONF_BSET_RST_RETRIES:
4322 4322 values->sdt_reset_retries = data_list[i];
4323 4323 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4324 4324 "sd_get_tunables_from_conf: reset_retries = %d\n",
4325 4325 values->sdt_reset_retries);
4326 4326 break;
4327 4327 case SD_CONF_BSET_RSV_REL_TIME:
4328 4328 values->sdt_reserv_rel_time = data_list[i];
4329 4329 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4330 4330 "sd_get_tunables_from_conf: reserv_rel_time = %d\n",
4331 4331 values->sdt_reserv_rel_time);
4332 4332 break;
4333 4333 case SD_CONF_BSET_MIN_THROTTLE:
4334 4334 values->sdt_min_throttle = data_list[i];
4335 4335 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4336 4336 "sd_get_tunables_from_conf: min_throttle = %d\n",
4337 4337 values->sdt_min_throttle);
4338 4338 break;
4339 4339 case SD_CONF_BSET_DISKSORT_DISABLED:
4340 4340 values->sdt_disk_sort_dis = data_list[i];
4341 4341 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4342 4342 "sd_get_tunables_from_conf: disk_sort_dis = %d\n",
4343 4343 values->sdt_disk_sort_dis);
4344 4344 break;
4345 4345 case SD_CONF_BSET_LUN_RESET_ENABLED:
4346 4346 values->sdt_lun_reset_enable = data_list[i];
4347 4347 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4348 4348 "sd_get_tunables_from_conf: lun_reset_enable = %d"
4349 4349 "\n", values->sdt_lun_reset_enable);
4350 4350 break;
4351 4351 case SD_CONF_BSET_CACHE_IS_NV:
4352 4352 values->sdt_suppress_cache_flush = data_list[i];
4353 4353 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4354 4354 "sd_get_tunables_from_conf: \
4355 4355 suppress_cache_flush = %d"
4356 4356 "\n", values->sdt_suppress_cache_flush);
4357 4357 break;
4358 4358 case SD_CONF_BSET_PC_DISABLED:
4359 4359 values->sdt_disk_sort_dis = data_list[i];
4360 4360 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4361 4361 "sd_get_tunables_from_conf: power_condition_dis = "
4362 4362 "%d\n", values->sdt_power_condition_dis);
4363 4363 break;
4364 4364 }
4365 4365 }
4366 4366 }
4367 4367
4368 4368 /*
4369 4369 * Function: sd_process_sdconf_table
4370 4370 *
4371 4371 * Description: Search the static configuration table for a match on the
4372 4372 * inquiry vid/pid and update the driver soft state structure
4373 4373 * according to the table property values for the device.
4374 4374 *
4375 4375 * The form of a configuration table entry is:
4376 4376 * <vid+pid>,<flags>,<property-data>
4377 4377 * "SEAGATE ST42400N",1,0x40000,
4378 4378 * 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1;
4379 4379 *
4380 4380 * Arguments: un - driver soft state (unit) structure
4381 4381 */
4382 4382
4383 4383 static void
4384 4384 sd_process_sdconf_table(struct sd_lun *un)
4385 4385 {
4386 4386 char *id = NULL;
4387 4387 int table_index;
4388 4388 int idlen;
4389 4389
4390 4390 ASSERT(un != NULL);
4391 4391 for (table_index = 0; table_index < sd_disk_table_size;
4392 4392 table_index++) {
4393 4393 id = sd_disk_table[table_index].device_id;
4394 4394 idlen = strlen(id);
4395 4395 if (idlen == 0) {
4396 4396 continue;
4397 4397 }
4398 4398
4399 4399 /*
4400 4400 * The static configuration table currently does not
4401 4401 * implement version 10 properties. Additionally,
4402 4402 * multiple data-property-name entries are not
4403 4403 * implemented in the static configuration table.
4404 4404 */
4405 4405 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) {
4406 4406 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4407 4407 "sd_process_sdconf_table: disk %s\n", id);
4408 4408 sd_set_vers1_properties(un,
4409 4409 sd_disk_table[table_index].flags,
4410 4410 sd_disk_table[table_index].properties);
4411 4411 break;
4412 4412 }
4413 4413 }
4414 4414 }
4415 4415
4416 4416
4417 4417 /*
4418 4418 * Function: sd_sdconf_id_match
4419 4419 *
4420 4420 * Description: This local function implements a case sensitive vid/pid
4421 4421 * comparison as well as the boundary cases of wild card and
4422 4422 * multiple blanks.
4423 4423 *
4424 4424 * Note: An implicit assumption made here is that the scsi
4425 4425 * inquiry structure will always keep the vid, pid and
4426 4426 * revision strings in consecutive sequence, so they can be
4427 4427 * read as a single string. If this assumption is not the
4428 4428 * case, a separate string, to be used for the check, needs
4429 4429 * to be built with these strings concatenated.
4430 4430 *
4431 4431 * Arguments: un - driver soft state (unit) structure
4432 4432 * id - table or config file vid/pid
4433 4433 * idlen - length of the vid/pid (bytes)
4434 4434 *
4435 4435 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid
4436 4436 * SD_FAILURE - Indicates no match with the inquiry vid/pid
4437 4437 */
4438 4438
4439 4439 static int
4440 4440 sd_sdconf_id_match(struct sd_lun *un, char *id, int idlen)
4441 4441 {
4442 4442 struct scsi_inquiry *sd_inq;
4443 4443 int rval = SD_SUCCESS;
4444 4444
4445 4445 ASSERT(un != NULL);
4446 4446 sd_inq = un->un_sd->sd_inq;
4447 4447 ASSERT(id != NULL);
4448 4448
4449 4449 /*
4450 4450 * We use the inq_vid as a pointer to a buffer containing the
4451 4451 * vid and pid and use the entire vid/pid length of the table
4452 4452 * entry for the comparison. This works because the inq_pid
4453 4453 * data member follows inq_vid in the scsi_inquiry structure.
4454 4454 */
4455 4455 if (strncasecmp(sd_inq->inq_vid, id, idlen) != 0) {
4456 4456 /*
4457 4457 * The user id string is compared to the inquiry vid/pid
4458 4458 * using a case insensitive comparison and ignoring
4459 4459 * multiple spaces.
4460 4460 */
4461 4461 rval = sd_blank_cmp(un, id, idlen);
4462 4462 if (rval != SD_SUCCESS) {
4463 4463 /*
4464 4464 * User id strings that start and end with a "*"
4465 4465 * are a special case. These do not have a
4466 4466 * specific vendor, and the product string can
4467 4467 * appear anywhere in the 16 byte PID portion of
4468 4468 * the inquiry data. This is a simple strstr()
4469 4469 * type search for the user id in the inquiry data.
4470 4470 */
4471 4471 if ((id[0] == '*') && (id[idlen - 1] == '*')) {
4472 4472 char *pidptr = &id[1];
4473 4473 int i;
4474 4474 int j;
4475 4475 int pidstrlen = idlen - 2;
4476 4476 j = sizeof (SD_INQUIRY(un)->inq_pid) -
4477 4477 pidstrlen;
4478 4478
4479 4479 if (j < 0) {
4480 4480 return (SD_FAILURE);
4481 4481 }
4482 4482 for (i = 0; i < j; i++) {
4483 4483 if (bcmp(&SD_INQUIRY(un)->inq_pid[i],
4484 4484 pidptr, pidstrlen) == 0) {
4485 4485 rval = SD_SUCCESS;
4486 4486 break;
4487 4487 }
4488 4488 }
4489 4489 }
4490 4490 }
4491 4491 }
4492 4492 return (rval);
4493 4493 }
4494 4494
4495 4495
4496 4496 /*
4497 4497 * Function: sd_blank_cmp
4498 4498 *
4499 4499 * Description: If the id string starts and ends with a space, treat
4500 4500 * multiple consecutive spaces as equivalent to a single
4501 4501 * space. For example, this causes a sd_disk_table entry
4502 4502 * of " NEC CDROM " to match a device's id string of
4503 4503 * "NEC CDROM".
4504 4504 *
4505 4505 * Note: The success exit condition for this routine is if
4506 4506 * the pointer to the table entry is '\0' and the cnt of
4507 4507 * the inquiry length is zero. This will happen if the inquiry
4508 4508 * string returned by the device is padded with spaces to be
4509 4509 * exactly 24 bytes in length (8 byte vid + 16 byte pid). The
4510 4510 * SCSI spec states that the inquiry string is to be padded with
4511 4511 * spaces.
4512 4512 *
4513 4513 * Arguments: un - driver soft state (unit) structure
4514 4514 * id - table or config file vid/pid
4515 4515 * idlen - length of the vid/pid (bytes)
4516 4516 *
4517 4517 * Return Code: SD_SUCCESS - Indicates a match with the inquiry vid/pid
4518 4518 * SD_FAILURE - Indicates no match with the inquiry vid/pid
4519 4519 */
4520 4520
4521 4521 static int
4522 4522 sd_blank_cmp(struct sd_lun *un, char *id, int idlen)
4523 4523 {
4524 4524 char *p1;
4525 4525 char *p2;
4526 4526 int cnt;
4527 4527 cnt = sizeof (SD_INQUIRY(un)->inq_vid) +
4528 4528 sizeof (SD_INQUIRY(un)->inq_pid);
4529 4529
4530 4530 ASSERT(un != NULL);
4531 4531 p2 = un->un_sd->sd_inq->inq_vid;
4532 4532 ASSERT(id != NULL);
4533 4533 p1 = id;
4534 4534
4535 4535 if ((id[0] == ' ') && (id[idlen - 1] == ' ')) {
4536 4536 /*
4537 4537 * Note: string p1 is terminated by a NUL but string p2
4538 4538 * isn't. The end of p2 is determined by cnt.
4539 4539 */
4540 4540 for (;;) {
4541 4541 /* skip over any extra blanks in both strings */
4542 4542 while ((*p1 != '\0') && (*p1 == ' ')) {
4543 4543 p1++;
4544 4544 }
4545 4545 while ((cnt != 0) && (*p2 == ' ')) {
4546 4546 p2++;
4547 4547 cnt--;
4548 4548 }
4549 4549
4550 4550 /* compare the two strings */
4551 4551 if ((cnt == 0) ||
4552 4552 (SD_TOUPPER(*p1) != SD_TOUPPER(*p2))) {
4553 4553 break;
4554 4554 }
4555 4555 while ((cnt > 0) &&
4556 4556 (SD_TOUPPER(*p1) == SD_TOUPPER(*p2))) {
4557 4557 p1++;
4558 4558 p2++;
4559 4559 cnt--;
4560 4560 }
4561 4561 }
4562 4562 }
4563 4563
4564 4564 /* return SD_SUCCESS if both strings match */
4565 4565 return (((*p1 == '\0') && (cnt == 0)) ? SD_SUCCESS : SD_FAILURE);
4566 4566 }
4567 4567
4568 4568
4569 4569 /*
4570 4570 * Function: sd_chk_vers1_data
4571 4571 *
4572 4572 * Description: Verify the version 1 device properties provided by the
4573 4573 * user via the configuration file
4574 4574 *
4575 4575 * Arguments: un - driver soft state (unit) structure
4576 4576 * flags - integer mask indicating properties to be set
4577 4577 * prop_list - integer list of property values
4578 4578 * list_len - number of the elements
4579 4579 *
4580 4580 * Return Code: SD_SUCCESS - Indicates the user provided data is valid
4581 4581 * SD_FAILURE - Indicates the user provided data is invalid
4582 4582 */
4583 4583
4584 4584 static int
4585 4585 sd_chk_vers1_data(struct sd_lun *un, int flags, int *prop_list,
4586 4586 int list_len, char *dataname_ptr)
4587 4587 {
4588 4588 int i;
4589 4589 int mask = 1;
4590 4590 int index = 0;
4591 4591
4592 4592 ASSERT(un != NULL);
4593 4593
4594 4594 /* Check for a NULL property name and list */
4595 4595 if (dataname_ptr == NULL) {
4596 4596 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4597 4597 "sd_chk_vers1_data: NULL data property name.");
4598 4598 return (SD_FAILURE);
4599 4599 }
4600 4600 if (prop_list == NULL) {
4601 4601 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4602 4602 "sd_chk_vers1_data: %s NULL data property list.",
4603 4603 dataname_ptr);
4604 4604 return (SD_FAILURE);
4605 4605 }
4606 4606
4607 4607 /* Display a warning if undefined bits are set in the flags */
4608 4608 if (flags & ~SD_CONF_BIT_MASK) {
4609 4609 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4610 4610 "sd_chk_vers1_data: invalid bits 0x%x in data list %s. "
4611 4611 "Properties not set.",
4612 4612 (flags & ~SD_CONF_BIT_MASK), dataname_ptr);
4613 4613 return (SD_FAILURE);
4614 4614 }
4615 4615
4616 4616 /*
4617 4617 * Verify the length of the list by identifying the highest bit set
4618 4618 * in the flags and validating that the property list has a length
4619 4619 * up to the index of this bit.
4620 4620 */
4621 4621 for (i = 0; i < SD_CONF_MAX_ITEMS; i++) {
4622 4622 if (flags & mask) {
4623 4623 index++;
4624 4624 }
4625 4625 mask = 1 << i;
4626 4626 }
4627 4627 if (list_len < (index + 2)) {
4628 4628 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4629 4629 "sd_chk_vers1_data: "
4630 4630 "Data property list %s size is incorrect. "
4631 4631 "Properties not set.", dataname_ptr);
4632 4632 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT, "Size expected: "
4633 4633 "version + 1 flagword + %d properties", SD_CONF_MAX_ITEMS);
4634 4634 return (SD_FAILURE);
4635 4635 }
4636 4636 return (SD_SUCCESS);
4637 4637 }
4638 4638
4639 4639
4640 4640 /*
4641 4641 * Function: sd_set_vers1_properties
4642 4642 *
4643 4643 * Description: Set version 1 device properties based on a property list
4644 4644 * retrieved from the driver configuration file or static
4645 4645 * configuration table. Version 1 properties have the format:
4646 4646 *
4647 4647 * <data-property-name>:=<version>,<flags>,<prop0>,<prop1>,.....<propN>
4648 4648 *
4649 4649 * where the prop0 value will be used to set prop0 if bit0
4650 4650 * is set in the flags
4651 4651 *
4652 4652 * Arguments: un - driver soft state (unit) structure
4653 4653 * flags - integer mask indicating properties to be set
4654 4654 * prop_list - integer list of property values
4655 4655 */
4656 4656
4657 4657 static void
4658 4658 sd_set_vers1_properties(struct sd_lun *un, int flags, sd_tunables *prop_list)
4659 4659 {
4660 4660 ASSERT(un != NULL);
4661 4661
4662 4662 /*
4663 4663 * Set the flag to indicate cache is to be disabled. An attempt
4664 4664 * to disable the cache via sd_cache_control() will be made
4665 4665 * later during attach once the basic initialization is complete.
4666 4666 */
4667 4667 if (flags & SD_CONF_BSET_NOCACHE) {
4668 4668 un->un_f_opt_disable_cache = TRUE;
4669 4669 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4670 4670 "sd_set_vers1_properties: caching disabled flag set\n");
4671 4671 }
4672 4672
4673 4673 /* CD-specific configuration parameters */
4674 4674 if (flags & SD_CONF_BSET_PLAYMSF_BCD) {
4675 4675 un->un_f_cfg_playmsf_bcd = TRUE;
4676 4676 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4677 4677 "sd_set_vers1_properties: playmsf_bcd set\n");
4678 4678 }
4679 4679 if (flags & SD_CONF_BSET_READSUB_BCD) {
4680 4680 un->un_f_cfg_readsub_bcd = TRUE;
4681 4681 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4682 4682 "sd_set_vers1_properties: readsub_bcd set\n");
4683 4683 }
4684 4684 if (flags & SD_CONF_BSET_READ_TOC_TRK_BCD) {
4685 4685 un->un_f_cfg_read_toc_trk_bcd = TRUE;
4686 4686 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4687 4687 "sd_set_vers1_properties: read_toc_trk_bcd set\n");
4688 4688 }
4689 4689 if (flags & SD_CONF_BSET_READ_TOC_ADDR_BCD) {
4690 4690 un->un_f_cfg_read_toc_addr_bcd = TRUE;
4691 4691 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4692 4692 "sd_set_vers1_properties: read_toc_addr_bcd set\n");
4693 4693 }
4694 4694 if (flags & SD_CONF_BSET_NO_READ_HEADER) {
4695 4695 un->un_f_cfg_no_read_header = TRUE;
4696 4696 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4697 4697 "sd_set_vers1_properties: no_read_header set\n");
4698 4698 }
4699 4699 if (flags & SD_CONF_BSET_READ_CD_XD4) {
4700 4700 un->un_f_cfg_read_cd_xd4 = TRUE;
4701 4701 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4702 4702 "sd_set_vers1_properties: read_cd_xd4 set\n");
4703 4703 }
4704 4704
4705 4705 /* Support for devices which do not have valid/unique serial numbers */
4706 4706 if (flags & SD_CONF_BSET_FAB_DEVID) {
4707 4707 un->un_f_opt_fab_devid = TRUE;
4708 4708 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4709 4709 "sd_set_vers1_properties: fab_devid bit set\n");
4710 4710 }
4711 4711
4712 4712 /* Support for user throttle configuration */
4713 4713 if (flags & SD_CONF_BSET_THROTTLE) {
4714 4714 ASSERT(prop_list != NULL);
4715 4715 un->un_saved_throttle = un->un_throttle =
4716 4716 prop_list->sdt_throttle;
4717 4717 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4718 4718 "sd_set_vers1_properties: throttle set to %d\n",
4719 4719 prop_list->sdt_throttle);
4720 4720 }
4721 4721
4722 4722 /* Set the per disk retry count according to the conf file or table. */
4723 4723 if (flags & SD_CONF_BSET_NRR_COUNT) {
4724 4724 ASSERT(prop_list != NULL);
4725 4725 if (prop_list->sdt_not_rdy_retries) {
4726 4726 un->un_notready_retry_count =
4727 4727 prop_list->sdt_not_rdy_retries;
4728 4728 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4729 4729 "sd_set_vers1_properties: not ready retry count"
4730 4730 " set to %d\n", un->un_notready_retry_count);
4731 4731 }
4732 4732 }
4733 4733
4734 4734 /* The controller type is reported for generic disk driver ioctls */
4735 4735 if (flags & SD_CONF_BSET_CTYPE) {
4736 4736 ASSERT(prop_list != NULL);
4737 4737 switch (prop_list->sdt_ctype) {
4738 4738 case CTYPE_CDROM:
4739 4739 un->un_ctype = prop_list->sdt_ctype;
4740 4740 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4741 4741 "sd_set_vers1_properties: ctype set to "
4742 4742 "CTYPE_CDROM\n");
4743 4743 break;
4744 4744 case CTYPE_CCS:
4745 4745 un->un_ctype = prop_list->sdt_ctype;
4746 4746 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4747 4747 "sd_set_vers1_properties: ctype set to "
4748 4748 "CTYPE_CCS\n");
4749 4749 break;
4750 4750 case CTYPE_ROD: /* RW optical */
4751 4751 un->un_ctype = prop_list->sdt_ctype;
4752 4752 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4753 4753 "sd_set_vers1_properties: ctype set to "
4754 4754 "CTYPE_ROD\n");
4755 4755 break;
4756 4756 default:
4757 4757 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
4758 4758 "sd_set_vers1_properties: Could not set "
4759 4759 "invalid ctype value (%d)",
4760 4760 prop_list->sdt_ctype);
4761 4761 }
4762 4762 }
4763 4763
4764 4764 /* Purple failover timeout */
4765 4765 if (flags & SD_CONF_BSET_BSY_RETRY_COUNT) {
4766 4766 ASSERT(prop_list != NULL);
4767 4767 un->un_busy_retry_count =
4768 4768 prop_list->sdt_busy_retries;
4769 4769 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4770 4770 "sd_set_vers1_properties: "
4771 4771 "busy retry count set to %d\n",
4772 4772 un->un_busy_retry_count);
4773 4773 }
4774 4774
4775 4775 /* Purple reset retry count */
4776 4776 if (flags & SD_CONF_BSET_RST_RETRIES) {
4777 4777 ASSERT(prop_list != NULL);
4778 4778 un->un_reset_retry_count =
4779 4779 prop_list->sdt_reset_retries;
4780 4780 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4781 4781 "sd_set_vers1_properties: "
4782 4782 "reset retry count set to %d\n",
4783 4783 un->un_reset_retry_count);
4784 4784 }
4785 4785
4786 4786 /* Purple reservation release timeout */
4787 4787 if (flags & SD_CONF_BSET_RSV_REL_TIME) {
4788 4788 ASSERT(prop_list != NULL);
4789 4789 un->un_reserve_release_time =
4790 4790 prop_list->sdt_reserv_rel_time;
4791 4791 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4792 4792 "sd_set_vers1_properties: "
4793 4793 "reservation release timeout set to %d\n",
4794 4794 un->un_reserve_release_time);
4795 4795 }
4796 4796
4797 4797 /*
4798 4798 * Driver flag telling the driver to verify that no commands are pending
4799 4799 * for a device before issuing a Test Unit Ready. This is a workaround
4800 4800 * for a firmware bug in some Seagate eliteI drives.
4801 4801 */
4802 4802 if (flags & SD_CONF_BSET_TUR_CHECK) {
4803 4803 un->un_f_cfg_tur_check = TRUE;
4804 4804 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4805 4805 "sd_set_vers1_properties: tur queue check set\n");
4806 4806 }
4807 4807
4808 4808 if (flags & SD_CONF_BSET_MIN_THROTTLE) {
4809 4809 un->un_min_throttle = prop_list->sdt_min_throttle;
4810 4810 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4811 4811 "sd_set_vers1_properties: min throttle set to %d\n",
4812 4812 un->un_min_throttle);
4813 4813 }
4814 4814
4815 4815 if (flags & SD_CONF_BSET_DISKSORT_DISABLED) {
4816 4816 un->un_f_disksort_disabled =
4817 4817 (prop_list->sdt_disk_sort_dis != 0) ?
4818 4818 TRUE : FALSE;
4819 4819 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4820 4820 "sd_set_vers1_properties: disksort disabled "
4821 4821 "flag set to %d\n",
4822 4822 prop_list->sdt_disk_sort_dis);
4823 4823 }
4824 4824
4825 4825 if (flags & SD_CONF_BSET_LUN_RESET_ENABLED) {
4826 4826 un->un_f_lun_reset_enabled =
4827 4827 (prop_list->sdt_lun_reset_enable != 0) ?
4828 4828 TRUE : FALSE;
4829 4829 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4830 4830 "sd_set_vers1_properties: lun reset enabled "
4831 4831 "flag set to %d\n",
4832 4832 prop_list->sdt_lun_reset_enable);
4833 4833 }
4834 4834
4835 4835 if (flags & SD_CONF_BSET_CACHE_IS_NV) {
4836 4836 un->un_f_suppress_cache_flush =
4837 4837 (prop_list->sdt_suppress_cache_flush != 0) ?
4838 4838 TRUE : FALSE;
4839 4839 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4840 4840 "sd_set_vers1_properties: suppress_cache_flush "
4841 4841 "flag set to %d\n",
4842 4842 prop_list->sdt_suppress_cache_flush);
4843 4843 }
4844 4844
4845 4845 if (flags & SD_CONF_BSET_PC_DISABLED) {
4846 4846 un->un_f_power_condition_disabled =
4847 4847 (prop_list->sdt_power_condition_dis != 0) ?
4848 4848 TRUE : FALSE;
4849 4849 SD_INFO(SD_LOG_ATTACH_DETACH, un,
4850 4850 "sd_set_vers1_properties: power_condition_disabled "
4851 4851 "flag set to %d\n",
4852 4852 prop_list->sdt_power_condition_dis);
4853 4853 }
4854 4854
4855 4855 /*
4856 4856 * Validate the throttle values.
4857 4857 * If any of the numbers are invalid, set everything to defaults.
4858 4858 */
4859 4859 if ((un->un_throttle < SD_LOWEST_VALID_THROTTLE) ||
4860 4860 (un->un_min_throttle < SD_LOWEST_VALID_THROTTLE) ||
4861 4861 (un->un_min_throttle > un->un_throttle)) {
4862 4862 un->un_saved_throttle = un->un_throttle = sd_max_throttle;
4863 4863 un->un_min_throttle = sd_min_throttle;
4864 4864 }
4865 4865 }
4866 4866
4867 4867 /*
4868 4868 * Function: sd_is_lsi()
4869 4869 *
4870 4870 * Description: Check for lsi devices, step through the static device
4871 4871 * table to match vid/pid.
4872 4872 *
4873 4873 * Args: un - ptr to sd_lun
4874 4874 *
4875 4875 * Notes: When creating new LSI property, need to add the new LSI property
4876 4876 * to this function.
4877 4877 */
4878 4878 static void
4879 4879 sd_is_lsi(struct sd_lun *un)
4880 4880 {
4881 4881 char *id = NULL;
4882 4882 int table_index;
4883 4883 int idlen;
4884 4884 void *prop;
4885 4885
4886 4886 ASSERT(un != NULL);
4887 4887 for (table_index = 0; table_index < sd_disk_table_size;
4888 4888 table_index++) {
4889 4889 id = sd_disk_table[table_index].device_id;
4890 4890 idlen = strlen(id);
4891 4891 if (idlen == 0) {
4892 4892 continue;
4893 4893 }
4894 4894
4895 4895 if (sd_sdconf_id_match(un, id, idlen) == SD_SUCCESS) {
4896 4896 prop = sd_disk_table[table_index].properties;
4897 4897 if (prop == &lsi_properties ||
4898 4898 prop == &lsi_oem_properties ||
4899 4899 prop == &lsi_properties_scsi ||
4900 4900 prop == &symbios_properties) {
4901 4901 un->un_f_cfg_is_lsi = TRUE;
4902 4902 }
4903 4903 break;
4904 4904 }
4905 4905 }
4906 4906 }
4907 4907
4908 4908 /*
4909 4909 * Function: sd_get_physical_geometry
4910 4910 *
4911 4911 * Description: Retrieve the MODE SENSE page 3 (Format Device Page) and
4912 4912 * MODE SENSE page 4 (Rigid Disk Drive Geometry Page) from the
4913 4913 * target, and use this information to initialize the physical
4914 4914 * geometry cache specified by pgeom_p.
4915 4915 *
4916 4916 * MODE SENSE is an optional command, so failure in this case
4917 4917 * does not necessarily denote an error. We want to use the
4918 4918 * MODE SENSE commands to derive the physical geometry of the
4919 4919 * device, but if either command fails, the logical geometry is
4920 4920 * used as the fallback for disk label geometry in cmlb.
4921 4921 *
4922 4922 * This requires that un->un_blockcount and un->un_tgt_blocksize
4923 4923 * have already been initialized for the current target and
4924 4924 * that the current values be passed as args so that we don't
4925 4925 * end up ever trying to use -1 as a valid value. This could
4926 4926 * happen if either value is reset while we're not holding
4927 4927 * the mutex.
4928 4928 *
4929 4929 * Arguments: un - driver soft state (unit) structure
4930 4930 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
4931 4931 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
4932 4932 * to use the USCSI "direct" chain and bypass the normal
4933 4933 * command waitq.
4934 4934 *
4935 4935 * Context: Kernel thread only (can sleep).
4936 4936 */
4937 4937
4938 4938 static int
4939 4939 sd_get_physical_geometry(struct sd_lun *un, cmlb_geom_t *pgeom_p,
4940 4940 diskaddr_t capacity, int lbasize, int path_flag)
4941 4941 {
4942 4942 struct mode_format *page3p;
4943 4943 struct mode_geometry *page4p;
4944 4944 struct mode_header *headerp;
4945 4945 int sector_size;
4946 4946 int nsect;
4947 4947 int nhead;
4948 4948 int ncyl;
4949 4949 int intrlv;
4950 4950 int spc;
4951 4951 diskaddr_t modesense_capacity;
4952 4952 int rpm;
4953 4953 int bd_len;
4954 4954 int mode_header_length;
4955 4955 uchar_t *p3bufp;
4956 4956 uchar_t *p4bufp;
4957 4957 int cdbsize;
4958 4958 int ret = EIO;
4959 4959 sd_ssc_t *ssc;
4960 4960 int status;
4961 4961
4962 4962 ASSERT(un != NULL);
4963 4963
4964 4964 if (lbasize == 0) {
4965 4965 if (ISCD(un)) {
4966 4966 lbasize = 2048;
4967 4967 } else {
4968 4968 lbasize = un->un_sys_blocksize;
4969 4969 }
4970 4970 }
4971 4971 pgeom_p->g_secsize = (unsigned short)lbasize;
4972 4972
4973 4973 /*
4974 4974 * If the unit is a cd/dvd drive MODE SENSE page three
4975 4975 * and MODE SENSE page four are reserved (see SBC spec
4976 4976 * and MMC spec). To prevent soft errors just return
4977 4977 * using the default LBA size.
4978 4978 */
4979 4979 if (ISCD(un))
4980 4980 return (ret);
4981 4981
4982 4982 cdbsize = (un->un_f_cfg_is_atapi == TRUE) ? CDB_GROUP2 : CDB_GROUP0;
4983 4983
4984 4984 /*
4985 4985 * Retrieve MODE SENSE page 3 - Format Device Page
4986 4986 */
4987 4987 p3bufp = kmem_zalloc(SD_MODE_SENSE_PAGE3_LENGTH, KM_SLEEP);
4988 4988 ssc = sd_ssc_init(un);
4989 4989 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p3bufp,
4990 4990 SD_MODE_SENSE_PAGE3_LENGTH, SD_MODE_SENSE_PAGE3_CODE, path_flag);
4991 4991 if (status != 0) {
4992 4992 SD_ERROR(SD_LOG_COMMON, un,
4993 4993 "sd_get_physical_geometry: mode sense page 3 failed\n");
4994 4994 goto page3_exit;
4995 4995 }
4996 4996
4997 4997 /*
4998 4998 * Determine size of Block Descriptors in order to locate the mode
4999 4999 * page data. ATAPI devices return 0, SCSI devices should return
5000 5000 * MODE_BLK_DESC_LENGTH.
5001 5001 */
5002 5002 headerp = (struct mode_header *)p3bufp;
5003 5003 if (un->un_f_cfg_is_atapi == TRUE) {
5004 5004 struct mode_header_grp2 *mhp =
5005 5005 (struct mode_header_grp2 *)headerp;
5006 5006 mode_header_length = MODE_HEADER_LENGTH_GRP2;
5007 5007 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
5008 5008 } else {
5009 5009 mode_header_length = MODE_HEADER_LENGTH;
5010 5010 bd_len = ((struct mode_header *)headerp)->bdesc_length;
5011 5011 }
5012 5012
5013 5013 if (bd_len > MODE_BLK_DESC_LENGTH) {
5014 5014 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5015 5015 "sd_get_physical_geometry: received unexpected bd_len "
5016 5016 "of %d, page3\n", bd_len);
5017 5017 status = EIO;
5018 5018 goto page3_exit;
5019 5019 }
5020 5020
5021 5021 page3p = (struct mode_format *)
5022 5022 ((caddr_t)headerp + mode_header_length + bd_len);
5023 5023
5024 5024 if (page3p->mode_page.code != SD_MODE_SENSE_PAGE3_CODE) {
5025 5025 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5026 5026 "sd_get_physical_geometry: mode sense pg3 code mismatch "
5027 5027 "%d\n", page3p->mode_page.code);
5028 5028 status = EIO;
5029 5029 goto page3_exit;
5030 5030 }
5031 5031
5032 5032 /*
5033 5033 * Use this physical geometry data only if BOTH MODE SENSE commands
5034 5034 * complete successfully; otherwise, revert to the logical geometry.
5035 5035 * So, we need to save everything in temporary variables.
5036 5036 */
5037 5037 sector_size = BE_16(page3p->data_bytes_sect);
5038 5038
5039 5039 /*
5040 5040 * 1243403: The NEC D38x7 drives do not support MODE SENSE sector size
5041 5041 */
5042 5042 if (sector_size == 0) {
5043 5043 sector_size = un->un_sys_blocksize;
5044 5044 } else {
5045 5045 sector_size &= ~(un->un_sys_blocksize - 1);
5046 5046 }
5047 5047
5048 5048 nsect = BE_16(page3p->sect_track);
5049 5049 intrlv = BE_16(page3p->interleave);
5050 5050
5051 5051 SD_INFO(SD_LOG_COMMON, un,
5052 5052 "sd_get_physical_geometry: Format Parameters (page 3)\n");
5053 5053 SD_INFO(SD_LOG_COMMON, un,
5054 5054 " mode page: %d; nsect: %d; sector size: %d;\n",
5055 5055 page3p->mode_page.code, nsect, sector_size);
5056 5056 SD_INFO(SD_LOG_COMMON, un,
5057 5057 " interleave: %d; track skew: %d; cylinder skew: %d;\n", intrlv,
5058 5058 BE_16(page3p->track_skew),
5059 5059 BE_16(page3p->cylinder_skew));
5060 5060
5061 5061 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
5062 5062
5063 5063 /*
5064 5064 * Retrieve MODE SENSE page 4 - Rigid Disk Drive Geometry Page
5065 5065 */
5066 5066 p4bufp = kmem_zalloc(SD_MODE_SENSE_PAGE4_LENGTH, KM_SLEEP);
5067 5067 status = sd_send_scsi_MODE_SENSE(ssc, cdbsize, p4bufp,
5068 5068 SD_MODE_SENSE_PAGE4_LENGTH, SD_MODE_SENSE_PAGE4_CODE, path_flag);
5069 5069 if (status != 0) {
5070 5070 SD_ERROR(SD_LOG_COMMON, un,
5071 5071 "sd_get_physical_geometry: mode sense page 4 failed\n");
5072 5072 goto page4_exit;
5073 5073 }
5074 5074
5075 5075 /*
5076 5076 * Determine size of Block Descriptors in order to locate the mode
5077 5077 * page data. ATAPI devices return 0, SCSI devices should return
5078 5078 * MODE_BLK_DESC_LENGTH.
5079 5079 */
5080 5080 headerp = (struct mode_header *)p4bufp;
5081 5081 if (un->un_f_cfg_is_atapi == TRUE) {
5082 5082 struct mode_header_grp2 *mhp =
5083 5083 (struct mode_header_grp2 *)headerp;
5084 5084 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
5085 5085 } else {
5086 5086 bd_len = ((struct mode_header *)headerp)->bdesc_length;
5087 5087 }
5088 5088
5089 5089 if (bd_len > MODE_BLK_DESC_LENGTH) {
5090 5090 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5091 5091 "sd_get_physical_geometry: received unexpected bd_len of "
5092 5092 "%d, page4\n", bd_len);
5093 5093 status = EIO;
5094 5094 goto page4_exit;
5095 5095 }
5096 5096
5097 5097 page4p = (struct mode_geometry *)
5098 5098 ((caddr_t)headerp + mode_header_length + bd_len);
5099 5099
5100 5100 if (page4p->mode_page.code != SD_MODE_SENSE_PAGE4_CODE) {
5101 5101 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
5102 5102 "sd_get_physical_geometry: mode sense pg4 code mismatch "
5103 5103 "%d\n", page4p->mode_page.code);
5104 5104 status = EIO;
5105 5105 goto page4_exit;
5106 5106 }
5107 5107
5108 5108 /*
5109 5109 * Stash the data now, after we know that both commands completed.
5110 5110 */
5111 5111
5112 5112
5113 5113 nhead = (int)page4p->heads; /* uchar, so no conversion needed */
5114 5114 spc = nhead * nsect;
5115 5115 ncyl = (page4p->cyl_ub << 16) + (page4p->cyl_mb << 8) + page4p->cyl_lb;
5116 5116 rpm = BE_16(page4p->rpm);
5117 5117
5118 5118 modesense_capacity = spc * ncyl;
5119 5119
5120 5120 SD_INFO(SD_LOG_COMMON, un,
5121 5121 "sd_get_physical_geometry: Geometry Parameters (page 4)\n");
5122 5122 SD_INFO(SD_LOG_COMMON, un,
5123 5123 " cylinders: %d; heads: %d; rpm: %d;\n", ncyl, nhead, rpm);
5124 5124 SD_INFO(SD_LOG_COMMON, un,
5125 5125 " computed capacity(h*s*c): %d;\n", modesense_capacity);
5126 5126 SD_INFO(SD_LOG_COMMON, un, " pgeom_p: %p; read cap: %d\n",
5127 5127 (void *)pgeom_p, capacity);
5128 5128
5129 5129 /*
5130 5130 * Compensate if the drive's geometry is not rectangular, i.e.,
5131 5131 * the product of C * H * S returned by MODE SENSE >= that returned
5132 5132 * by read capacity. This is an idiosyncrasy of the original x86
5133 5133 * disk subsystem.
5134 5134 */
5135 5135 if (modesense_capacity >= capacity) {
5136 5136 SD_INFO(SD_LOG_COMMON, un,
5137 5137 "sd_get_physical_geometry: adjusting acyl; "
5138 5138 "old: %d; new: %d\n", pgeom_p->g_acyl,
5139 5139 (modesense_capacity - capacity + spc - 1) / spc);
5140 5140 if (sector_size != 0) {
5141 5141 /* 1243403: NEC D38x7 drives don't support sec size */
5142 5142 pgeom_p->g_secsize = (unsigned short)sector_size;
5143 5143 }
5144 5144 pgeom_p->g_nsect = (unsigned short)nsect;
5145 5145 pgeom_p->g_nhead = (unsigned short)nhead;
5146 5146 pgeom_p->g_capacity = capacity;
5147 5147 pgeom_p->g_acyl =
5148 5148 (modesense_capacity - pgeom_p->g_capacity + spc - 1) / spc;
5149 5149 pgeom_p->g_ncyl = ncyl - pgeom_p->g_acyl;
5150 5150 }
5151 5151
5152 5152 pgeom_p->g_rpm = (unsigned short)rpm;
5153 5153 pgeom_p->g_intrlv = (unsigned short)intrlv;
5154 5154 ret = 0;
5155 5155
5156 5156 SD_INFO(SD_LOG_COMMON, un,
5157 5157 "sd_get_physical_geometry: mode sense geometry:\n");
5158 5158 SD_INFO(SD_LOG_COMMON, un,
5159 5159 " nsect: %d; sector size: %d; interlv: %d\n",
5160 5160 nsect, sector_size, intrlv);
5161 5161 SD_INFO(SD_LOG_COMMON, un,
5162 5162 " nhead: %d; ncyl: %d; rpm: %d; capacity(ms): %d\n",
5163 5163 nhead, ncyl, rpm, modesense_capacity);
5164 5164 SD_INFO(SD_LOG_COMMON, un,
5165 5165 "sd_get_physical_geometry: (cached)\n");
5166 5166 SD_INFO(SD_LOG_COMMON, un,
5167 5167 " ncyl: %ld; acyl: %d; nhead: %d; nsect: %d\n",
5168 5168 pgeom_p->g_ncyl, pgeom_p->g_acyl,
5169 5169 pgeom_p->g_nhead, pgeom_p->g_nsect);
5170 5170 SD_INFO(SD_LOG_COMMON, un,
5171 5171 " lbasize: %d; capacity: %ld; intrlv: %d; rpm: %d\n",
5172 5172 pgeom_p->g_secsize, pgeom_p->g_capacity,
5173 5173 pgeom_p->g_intrlv, pgeom_p->g_rpm);
5174 5174 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
5175 5175
5176 5176 page4_exit:
5177 5177 kmem_free(p4bufp, SD_MODE_SENSE_PAGE4_LENGTH);
5178 5178
5179 5179 page3_exit:
5180 5180 kmem_free(p3bufp, SD_MODE_SENSE_PAGE3_LENGTH);
5181 5181
5182 5182 if (status != 0) {
5183 5183 if (status == EIO) {
5184 5184 /*
5185 5185 * Some disks do not support mode sense(6), we
5186 5186 * should ignore this kind of error(sense key is
5187 5187 * 0x5 - illegal request).
5188 5188 */
5189 5189 uint8_t *sensep;
5190 5190 int senlen;
5191 5191
5192 5192 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
5193 5193 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
5194 5194 ssc->ssc_uscsi_cmd->uscsi_rqresid);
5195 5195
5196 5196 if (senlen > 0 &&
5197 5197 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) {
5198 5198 sd_ssc_assessment(ssc,
5199 5199 SD_FMT_IGNORE_COMPROMISE);
5200 5200 } else {
5201 5201 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
5202 5202 }
5203 5203 } else {
5204 5204 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5205 5205 }
5206 5206 }
5207 5207 sd_ssc_fini(ssc);
5208 5208 return (ret);
5209 5209 }
5210 5210
5211 5211 /*
5212 5212 * Function: sd_get_virtual_geometry
5213 5213 *
5214 5214 * Description: Ask the controller to tell us about the target device.
5215 5215 *
5216 5216 * Arguments: un - pointer to softstate
5217 5217 * capacity - disk capacity in #blocks
5218 5218 * lbasize - disk block size in bytes
5219 5219 *
5220 5220 * Context: Kernel thread only
5221 5221 */
5222 5222
5223 5223 static int
5224 5224 sd_get_virtual_geometry(struct sd_lun *un, cmlb_geom_t *lgeom_p,
5225 5225 diskaddr_t capacity, int lbasize)
5226 5226 {
5227 5227 uint_t geombuf;
5228 5228 int spc;
5229 5229
5230 5230 ASSERT(un != NULL);
5231 5231
5232 5232 /* Set sector size, and total number of sectors */
5233 5233 (void) scsi_ifsetcap(SD_ADDRESS(un), "sector-size", lbasize, 1);
5234 5234 (void) scsi_ifsetcap(SD_ADDRESS(un), "total-sectors", capacity, 1);
5235 5235
5236 5236 /* Let the HBA tell us its geometry */
5237 5237 geombuf = (uint_t)scsi_ifgetcap(SD_ADDRESS(un), "geometry", 1);
5238 5238
5239 5239 /* A value of -1 indicates an undefined "geometry" property */
5240 5240 if (geombuf == (-1)) {
5241 5241 return (EINVAL);
5242 5242 }
5243 5243
5244 5244 /* Initialize the logical geometry cache. */
5245 5245 lgeom_p->g_nhead = (geombuf >> 16) & 0xffff;
5246 5246 lgeom_p->g_nsect = geombuf & 0xffff;
5247 5247 lgeom_p->g_secsize = un->un_sys_blocksize;
5248 5248
5249 5249 spc = lgeom_p->g_nhead * lgeom_p->g_nsect;
5250 5250
5251 5251 /*
5252 5252 * Note: The driver originally converted the capacity value from
5253 5253 * target blocks to system blocks. However, the capacity value passed
5254 5254 * to this routine is already in terms of system blocks (this scaling
5255 5255 * is done when the READ CAPACITY command is issued and processed).
5256 5256 * This 'error' may have gone undetected because the usage of g_ncyl
5257 5257 * (which is based upon g_capacity) is very limited within the driver
5258 5258 */
5259 5259 lgeom_p->g_capacity = capacity;
5260 5260
5261 5261 /*
5262 5262 * Set ncyl to zero if the hba returned a zero nhead or nsect value. The
5263 5263 * hba may return zero values if the device has been removed.
5264 5264 */
5265 5265 if (spc == 0) {
5266 5266 lgeom_p->g_ncyl = 0;
5267 5267 } else {
5268 5268 lgeom_p->g_ncyl = lgeom_p->g_capacity / spc;
5269 5269 }
5270 5270 lgeom_p->g_acyl = 0;
5271 5271
5272 5272 SD_INFO(SD_LOG_COMMON, un, "sd_get_virtual_geometry: (cached)\n");
5273 5273 return (0);
5274 5274
5275 5275 }
5276 5276 /*
5277 5277 * Function: sd_update_block_info
5278 5278 *
5279 5279 * Description: Calculate a byte count to sector count bitshift value
5280 5280 * from sector size.
5281 5281 *
5282 5282 * Arguments: un: unit struct.
5283 5283 * lbasize: new target sector size
5284 5284 * capacity: new target capacity, ie. block count
5285 5285 *
5286 5286 * Context: Kernel thread context
5287 5287 */
5288 5288
5289 5289 static void
5290 5290 sd_update_block_info(struct sd_lun *un, uint32_t lbasize, uint64_t capacity)
5291 5291 {
5292 5292 if (lbasize != 0) {
5293 5293 un->un_tgt_blocksize = lbasize;
5294 5294 un->un_f_tgt_blocksize_is_valid = TRUE;
5295 5295 if (!un->un_f_has_removable_media) {
5296 5296 un->un_sys_blocksize = lbasize;
5297 5297 }
5298 5298 }
5299 5299
5300 5300 if (capacity != 0) {
5301 5301 un->un_blockcount = capacity;
5302 5302 un->un_f_blockcount_is_valid = TRUE;
5303 5303
5304 5304 /*
5305 5305 * The capacity has changed so update the errstats.
5306 5306 */
5307 5307 if (un->un_errstats != NULL) {
5308 5308 struct sd_errstats *stp;
5309 5309
5310 5310 capacity *= un->un_sys_blocksize;
5311 5311 stp = (struct sd_errstats *)un->un_errstats->ks_data;
5312 5312 if (stp->sd_capacity.value.ui64 < capacity)
5313 5313 stp->sd_capacity.value.ui64 = capacity;
5314 5314 }
5315 5315 }
5316 5316 }
5317 5317
5318 5318
5319 5319 /*
5320 5320 * Function: sd_register_devid
5321 5321 *
5322 5322 * Description: This routine will obtain the device id information from the
5323 5323 * target, obtain the serial number, and register the device
5324 5324 * id with the ddi framework.
5325 5325 *
5326 5326 * Arguments: devi - the system's dev_info_t for the device.
5327 5327 * un - driver soft state (unit) structure
5328 5328 * reservation_flag - indicates if a reservation conflict
5329 5329 * occurred during attach
5330 5330 *
5331 5331 * Context: Kernel Thread
5332 5332 */
5333 5333 static void
5334 5334 sd_register_devid(sd_ssc_t *ssc, dev_info_t *devi, int reservation_flag)
5335 5335 {
5336 5336 int rval = 0;
5337 5337 uchar_t *inq80 = NULL;
5338 5338 size_t inq80_len = MAX_INQUIRY_SIZE;
5339 5339 size_t inq80_resid = 0;
5340 5340 uchar_t *inq83 = NULL;
5341 5341 size_t inq83_len = MAX_INQUIRY_SIZE;
5342 5342 size_t inq83_resid = 0;
5343 5343 int dlen, len;
5344 5344 char *sn;
5345 5345 struct sd_lun *un;
5346 5346
5347 5347 ASSERT(ssc != NULL);
5348 5348 un = ssc->ssc_un;
5349 5349 ASSERT(un != NULL);
5350 5350 ASSERT(mutex_owned(SD_MUTEX(un)));
5351 5351 ASSERT((SD_DEVINFO(un)) == devi);
5352 5352
5353 5353
5354 5354 /*
5355 5355 * We check the availability of the World Wide Name (0x83) and Unit
5356 5356 * Serial Number (0x80) pages in sd_check_vpd_page_support(), and using
5357 5357 * un_vpd_page_mask from them, we decide which way to get the WWN. If
5358 5358 * 0x83 is available, that is the best choice. Our next choice is
5359 5359 * 0x80. If neither are available, we munge the devid from the device
5360 5360 * vid/pid/serial # for Sun qualified disks, or use the ddi framework
5361 5361 * to fabricate a devid for non-Sun qualified disks.
5362 5362 */
5363 5363 if (sd_check_vpd_page_support(ssc) == 0) {
5364 5364 /* collect page 80 data if available */
5365 5365 if (un->un_vpd_page_mask & SD_VPD_UNIT_SERIAL_PG) {
5366 5366
5367 5367 mutex_exit(SD_MUTEX(un));
5368 5368 inq80 = kmem_zalloc(inq80_len, KM_SLEEP);
5369 5369
5370 5370 rval = sd_send_scsi_INQUIRY(ssc, inq80, inq80_len,
5371 5371 0x01, 0x80, &inq80_resid);
5372 5372
5373 5373 if (rval != 0) {
5374 5374 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5375 5375 kmem_free(inq80, inq80_len);
5376 5376 inq80 = NULL;
5377 5377 inq80_len = 0;
5378 5378 } else if (ddi_prop_exists(
5379 5379 DDI_DEV_T_NONE, SD_DEVINFO(un),
5380 5380 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
5381 5381 INQUIRY_SERIAL_NO) == 0) {
5382 5382 /*
5383 5383 * If we don't already have a serial number
5384 5384 * property, do quick verify of data returned
5385 5385 * and define property.
5386 5386 */
5387 5387 dlen = inq80_len - inq80_resid;
5388 5388 len = (size_t)inq80[3];
5389 5389 if ((dlen >= 4) && ((len + 4) <= dlen)) {
5390 5390 /*
5391 5391 * Ensure sn termination, skip leading
5392 5392 * blanks, and create property
5393 5393 * 'inquiry-serial-no'.
5394 5394 */
5395 5395 sn = (char *)&inq80[4];
5396 5396 sn[len] = 0;
5397 5397 while (*sn && (*sn == ' '))
5398 5398 sn++;
5399 5399 if (*sn) {
5400 5400 (void) ddi_prop_update_string(
5401 5401 DDI_DEV_T_NONE,
5402 5402 SD_DEVINFO(un),
5403 5403 INQUIRY_SERIAL_NO, sn);
5404 5404 }
5405 5405 }
5406 5406 }
5407 5407 mutex_enter(SD_MUTEX(un));
5408 5408 }
5409 5409
5410 5410 /* collect page 83 data if available */
5411 5411 if (un->un_vpd_page_mask & SD_VPD_DEVID_WWN_PG) {
5412 5412 mutex_exit(SD_MUTEX(un));
5413 5413 inq83 = kmem_zalloc(inq83_len, KM_SLEEP);
5414 5414
5415 5415 rval = sd_send_scsi_INQUIRY(ssc, inq83, inq83_len,
5416 5416 0x01, 0x83, &inq83_resid);
5417 5417
5418 5418 if (rval != 0) {
5419 5419 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5420 5420 kmem_free(inq83, inq83_len);
5421 5421 inq83 = NULL;
5422 5422 inq83_len = 0;
5423 5423 }
5424 5424 mutex_enter(SD_MUTEX(un));
5425 5425 }
5426 5426 }
5427 5427
5428 5428 /*
5429 5429 * If transport has already registered a devid for this target
5430 5430 * then that takes precedence over the driver's determination
5431 5431 * of the devid.
5432 5432 *
5433 5433 * NOTE: The reason this check is done here instead of at the beginning
5434 5434 * of the function is to allow the code above to create the
5435 5435 * 'inquiry-serial-no' property.
5436 5436 */
5437 5437 if (ddi_devid_get(SD_DEVINFO(un), &un->un_devid) == DDI_SUCCESS) {
5438 5438 ASSERT(un->un_devid);
5439 5439 un->un_f_devid_transport_defined = TRUE;
5440 5440 goto cleanup; /* use devid registered by the transport */
5441 5441 }
5442 5442
5443 5443 /*
5444 5444 * This is the case of antiquated Sun disk drives that have the
5445 5445 * FAB_DEVID property set in the disk_table. These drives
5446 5446 * manage the devid's by storing them in last 2 available sectors
5447 5447 * on the drive and have them fabricated by the ddi layer by calling
5448 5448 * ddi_devid_init and passing the DEVID_FAB flag.
5449 5449 */
5450 5450 if (un->un_f_opt_fab_devid == TRUE) {
5451 5451 /*
5452 5452 * Depending on EINVAL isn't reliable, since a reserved disk
5453 5453 * may result in invalid geometry, so check to make sure a
5454 5454 * reservation conflict did not occur during attach.
5455 5455 */
5456 5456 if ((sd_get_devid(ssc) == EINVAL) &&
5457 5457 (reservation_flag != SD_TARGET_IS_RESERVED)) {
5458 5458 /*
5459 5459 * The devid is invalid AND there is no reservation
5460 5460 * conflict. Fabricate a new devid.
5461 5461 */
5462 5462 (void) sd_create_devid(ssc);
5463 5463 }
5464 5464
5465 5465 /* Register the devid if it exists */
5466 5466 if (un->un_devid != NULL) {
5467 5467 (void) ddi_devid_register(SD_DEVINFO(un),
5468 5468 un->un_devid);
5469 5469 SD_INFO(SD_LOG_ATTACH_DETACH, un,
5470 5470 "sd_register_devid: Devid Fabricated\n");
5471 5471 }
5472 5472 goto cleanup;
5473 5473 }
5474 5474
5475 5475 /* encode best devid possible based on data available */
5476 5476 if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST,
5477 5477 (char *)ddi_driver_name(SD_DEVINFO(un)),
5478 5478 (uchar_t *)SD_INQUIRY(un), sizeof (*SD_INQUIRY(un)),
5479 5479 inq80, inq80_len - inq80_resid, inq83, inq83_len -
5480 5480 inq83_resid, &un->un_devid) == DDI_SUCCESS) {
5481 5481
5482 5482 /* devid successfully encoded, register devid */
5483 5483 (void) ddi_devid_register(SD_DEVINFO(un), un->un_devid);
5484 5484
5485 5485 } else {
5486 5486 /*
5487 5487 * Unable to encode a devid based on data available.
5488 5488 * This is not a Sun qualified disk. Older Sun disk
5489 5489 * drives that have the SD_FAB_DEVID property
5490 5490 * set in the disk_table and non Sun qualified
5491 5491 * disks are treated in the same manner. These
5492 5492 * drives manage the devid's by storing them in
5493 5493 * last 2 available sectors on the drive and
5494 5494 * have them fabricated by the ddi layer by
5495 5495 * calling ddi_devid_init and passing the
5496 5496 * DEVID_FAB flag.
5497 5497 * Create a fabricate devid only if there's no
5498 5498 * fabricate devid existed.
5499 5499 */
5500 5500 if (sd_get_devid(ssc) == EINVAL) {
5501 5501 (void) sd_create_devid(ssc);
5502 5502 }
5503 5503 un->un_f_opt_fab_devid = TRUE;
5504 5504
5505 5505 /* Register the devid if it exists */
5506 5506 if (un->un_devid != NULL) {
5507 5507 (void) ddi_devid_register(SD_DEVINFO(un),
5508 5508 un->un_devid);
5509 5509 SD_INFO(SD_LOG_ATTACH_DETACH, un,
5510 5510 "sd_register_devid: devid fabricated using "
5511 5511 "ddi framework\n");
5512 5512 }
5513 5513 }
5514 5514
5515 5515 cleanup:
5516 5516 /* clean up resources */
5517 5517 if (inq80 != NULL) {
5518 5518 kmem_free(inq80, inq80_len);
5519 5519 }
5520 5520 if (inq83 != NULL) {
5521 5521 kmem_free(inq83, inq83_len);
5522 5522 }
5523 5523 }
5524 5524
5525 5525
5526 5526
5527 5527 /*
5528 5528 * Function: sd_get_devid
5529 5529 *
5530 5530 * Description: This routine will return 0 if a valid device id has been
5531 5531 * obtained from the target and stored in the soft state. If a
5532 5532 * valid device id has not been previously read and stored, a
5533 5533 * read attempt will be made.
5534 5534 *
5535 5535 * Arguments: un - driver soft state (unit) structure
5536 5536 *
5537 5537 * Return Code: 0 if we successfully get the device id
5538 5538 *
5539 5539 * Context: Kernel Thread
5540 5540 */
5541 5541
5542 5542 static int
5543 5543 sd_get_devid(sd_ssc_t *ssc)
5544 5544 {
5545 5545 struct dk_devid *dkdevid;
5546 5546 ddi_devid_t tmpid;
5547 5547 uint_t *ip;
5548 5548 size_t sz;
5549 5549 diskaddr_t blk;
5550 5550 int status;
5551 5551 int chksum;
5552 5552 int i;
5553 5553 size_t buffer_size;
5554 5554 struct sd_lun *un;
5555 5555
5556 5556 ASSERT(ssc != NULL);
5557 5557 un = ssc->ssc_un;
5558 5558 ASSERT(un != NULL);
5559 5559 ASSERT(mutex_owned(SD_MUTEX(un)));
5560 5560
5561 5561 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: entry: un: 0x%p\n",
5562 5562 un);
5563 5563
5564 5564 if (un->un_devid != NULL) {
5565 5565 return (0);
5566 5566 }
5567 5567
5568 5568 mutex_exit(SD_MUTEX(un));
5569 5569 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk,
5570 5570 (void *)SD_PATH_DIRECT) != 0) {
5571 5571 mutex_enter(SD_MUTEX(un));
5572 5572 return (EINVAL);
5573 5573 }
5574 5574
5575 5575 /*
5576 5576 * Read and verify device id, stored in the reserved cylinders at the
5577 5577 * end of the disk. Backup label is on the odd sectors of the last
5578 5578 * track of the last cylinder. Device id will be on track of the next
5579 5579 * to last cylinder.
5580 5580 */
5581 5581 mutex_enter(SD_MUTEX(un));
5582 5582 buffer_size = SD_REQBYTES2TGTBYTES(un, sizeof (struct dk_devid));
5583 5583 mutex_exit(SD_MUTEX(un));
5584 5584 dkdevid = kmem_alloc(buffer_size, KM_SLEEP);
5585 5585 status = sd_send_scsi_READ(ssc, dkdevid, buffer_size, blk,
5586 5586 SD_PATH_DIRECT);
5587 5587
5588 5588 if (status != 0) {
5589 5589 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5590 5590 goto error;
5591 5591 }
5592 5592
5593 5593 /* Validate the revision */
5594 5594 if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) ||
5595 5595 (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) {
5596 5596 status = EINVAL;
5597 5597 goto error;
5598 5598 }
5599 5599
5600 5600 /* Calculate the checksum */
5601 5601 chksum = 0;
5602 5602 ip = (uint_t *)dkdevid;
5603 5603 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int));
5604 5604 i++) {
5605 5605 chksum ^= ip[i];
5606 5606 }
5607 5607
5608 5608 /* Compare the checksums */
5609 5609 if (DKD_GETCHKSUM(dkdevid) != chksum) {
5610 5610 status = EINVAL;
5611 5611 goto error;
5612 5612 }
5613 5613
5614 5614 /* Validate the device id */
5615 5615 if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) {
5616 5616 status = EINVAL;
5617 5617 goto error;
5618 5618 }
5619 5619
5620 5620 /*
5621 5621 * Store the device id in the driver soft state
5622 5622 */
5623 5623 sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid);
5624 5624 tmpid = kmem_alloc(sz, KM_SLEEP);
5625 5625
5626 5626 mutex_enter(SD_MUTEX(un));
5627 5627
5628 5628 un->un_devid = tmpid;
5629 5629 bcopy(&dkdevid->dkd_devid, un->un_devid, sz);
5630 5630
5631 5631 kmem_free(dkdevid, buffer_size);
5632 5632
5633 5633 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_get_devid: exit: un:0x%p\n", un);
5634 5634
5635 5635 return (status);
5636 5636 error:
5637 5637 mutex_enter(SD_MUTEX(un));
5638 5638 kmem_free(dkdevid, buffer_size);
5639 5639 return (status);
5640 5640 }
5641 5641
5642 5642
5643 5643 /*
5644 5644 * Function: sd_create_devid
5645 5645 *
5646 5646 * Description: This routine will fabricate the device id and write it
5647 5647 * to the disk.
5648 5648 *
5649 5649 * Arguments: un - driver soft state (unit) structure
5650 5650 *
5651 5651 * Return Code: value of the fabricated device id
5652 5652 *
5653 5653 * Context: Kernel Thread
5654 5654 */
5655 5655
5656 5656 static ddi_devid_t
5657 5657 sd_create_devid(sd_ssc_t *ssc)
5658 5658 {
5659 5659 struct sd_lun *un;
5660 5660
5661 5661 ASSERT(ssc != NULL);
5662 5662 un = ssc->ssc_un;
5663 5663 ASSERT(un != NULL);
5664 5664
5665 5665 /* Fabricate the devid */
5666 5666 if (ddi_devid_init(SD_DEVINFO(un), DEVID_FAB, 0, NULL, &un->un_devid)
5667 5667 == DDI_FAILURE) {
5668 5668 return (NULL);
5669 5669 }
5670 5670
5671 5671 /* Write the devid to disk */
5672 5672 if (sd_write_deviceid(ssc) != 0) {
5673 5673 ddi_devid_free(un->un_devid);
5674 5674 un->un_devid = NULL;
5675 5675 }
5676 5676
5677 5677 return (un->un_devid);
5678 5678 }
5679 5679
5680 5680
5681 5681 /*
5682 5682 * Function: sd_write_deviceid
5683 5683 *
5684 5684 * Description: This routine will write the device id to the disk
5685 5685 * reserved sector.
5686 5686 *
5687 5687 * Arguments: un - driver soft state (unit) structure
5688 5688 *
5689 5689 * Return Code: EINVAL
5690 5690 * value returned by sd_send_scsi_cmd
5691 5691 *
5692 5692 * Context: Kernel Thread
5693 5693 */
5694 5694
5695 5695 static int
5696 5696 sd_write_deviceid(sd_ssc_t *ssc)
5697 5697 {
5698 5698 struct dk_devid *dkdevid;
5699 5699 uchar_t *buf;
5700 5700 diskaddr_t blk;
5701 5701 uint_t *ip, chksum;
5702 5702 int status;
5703 5703 int i;
5704 5704 struct sd_lun *un;
5705 5705
5706 5706 ASSERT(ssc != NULL);
5707 5707 un = ssc->ssc_un;
5708 5708 ASSERT(un != NULL);
5709 5709 ASSERT(mutex_owned(SD_MUTEX(un)));
5710 5710
5711 5711 mutex_exit(SD_MUTEX(un));
5712 5712 if (cmlb_get_devid_block(un->un_cmlbhandle, &blk,
5713 5713 (void *)SD_PATH_DIRECT) != 0) {
5714 5714 mutex_enter(SD_MUTEX(un));
5715 5715 return (-1);
5716 5716 }
5717 5717
5718 5718
5719 5719 /* Allocate the buffer */
5720 5720 buf = kmem_zalloc(un->un_sys_blocksize, KM_SLEEP);
5721 5721 dkdevid = (struct dk_devid *)buf;
5722 5722
5723 5723 /* Fill in the revision */
5724 5724 dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB;
5725 5725 dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB;
5726 5726
5727 5727 /* Copy in the device id */
5728 5728 mutex_enter(SD_MUTEX(un));
5729 5729 bcopy(un->un_devid, &dkdevid->dkd_devid,
5730 5730 ddi_devid_sizeof(un->un_devid));
5731 5731 mutex_exit(SD_MUTEX(un));
5732 5732
5733 5733 /* Calculate the checksum */
5734 5734 chksum = 0;
5735 5735 ip = (uint_t *)dkdevid;
5736 5736 for (i = 0; i < ((DEV_BSIZE - sizeof (int)) / sizeof (int));
5737 5737 i++) {
5738 5738 chksum ^= ip[i];
5739 5739 }
5740 5740
5741 5741 /* Fill-in checksum */
5742 5742 DKD_FORMCHKSUM(chksum, dkdevid);
5743 5743
5744 5744 /* Write the reserved sector */
5745 5745 status = sd_send_scsi_WRITE(ssc, buf, un->un_sys_blocksize, blk,
5746 5746 SD_PATH_DIRECT);
5747 5747 if (status != 0)
5748 5748 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5749 5749
5750 5750 kmem_free(buf, un->un_sys_blocksize);
5751 5751
5752 5752 mutex_enter(SD_MUTEX(un));
5753 5753 return (status);
5754 5754 }
5755 5755
5756 5756
5757 5757 /*
5758 5758 * Function: sd_check_vpd_page_support
5759 5759 *
5760 5760 * Description: This routine sends an inquiry command with the EVPD bit set and
5761 5761 * a page code of 0x00 to the device. It is used to determine which
5762 5762 * vital product pages are available to find the devid. We are
5763 5763 * looking for pages 0x83 0x80 or 0xB1. If we return a negative 1,
5764 5764 * the device does not support that command.
5765 5765 *
5766 5766 * Arguments: un - driver soft state (unit) structure
5767 5767 *
5768 5768 * Return Code: 0 - success
5769 5769 * 1 - check condition
5770 5770 *
5771 5771 * Context: This routine can sleep.
5772 5772 */
5773 5773
5774 5774 static int
5775 5775 sd_check_vpd_page_support(sd_ssc_t *ssc)
5776 5776 {
5777 5777 uchar_t *page_list = NULL;
5778 5778 uchar_t page_length = 0xff; /* Use max possible length */
5779 5779 uchar_t evpd = 0x01; /* Set the EVPD bit */
5780 5780 uchar_t page_code = 0x00; /* Supported VPD Pages */
5781 5781 int rval = 0;
5782 5782 int counter;
5783 5783 struct sd_lun *un;
5784 5784
5785 5785 ASSERT(ssc != NULL);
5786 5786 un = ssc->ssc_un;
5787 5787 ASSERT(un != NULL);
5788 5788 ASSERT(mutex_owned(SD_MUTEX(un)));
5789 5789
5790 5790 mutex_exit(SD_MUTEX(un));
5791 5791
5792 5792 /*
5793 5793 * We'll set the page length to the maximum to save figuring it out
5794 5794 * with an additional call.
5795 5795 */
5796 5796 page_list = kmem_zalloc(page_length, KM_SLEEP);
5797 5797
5798 5798 rval = sd_send_scsi_INQUIRY(ssc, page_list, page_length, evpd,
5799 5799 page_code, NULL);
5800 5800
5801 5801 if (rval != 0)
5802 5802 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5803 5803
5804 5804 mutex_enter(SD_MUTEX(un));
5805 5805
5806 5806 /*
5807 5807 * Now we must validate that the device accepted the command, as some
5808 5808 * drives do not support it. If the drive does support it, we will
5809 5809 * return 0, and the supported pages will be in un_vpd_page_mask. If
5810 5810 * not, we return -1.
5811 5811 */
5812 5812 if ((rval == 0) && (page_list[VPD_MODE_PAGE] == 0x00)) {
5813 5813 /* Loop to find one of the 2 pages we need */
5814 5814 counter = 4; /* Supported pages start at byte 4, with 0x00 */
5815 5815
5816 5816 /*
5817 5817 * Pages are returned in ascending order, and 0x83 is what we
5818 5818 * are hoping for.
5819 5819 */
5820 5820 while ((page_list[counter] <= 0xB1) &&
5821 5821 (counter <= (page_list[VPD_PAGE_LENGTH] +
5822 5822 VPD_HEAD_OFFSET))) {
5823 5823 /*
5824 5824 * Add 3 because page_list[3] is the number of
5825 5825 * pages minus 3
5826 5826 */
5827 5827
5828 5828 switch (page_list[counter]) {
5829 5829 case 0x00:
5830 5830 un->un_vpd_page_mask |= SD_VPD_SUPPORTED_PG;
5831 5831 break;
5832 5832 case 0x80:
5833 5833 un->un_vpd_page_mask |= SD_VPD_UNIT_SERIAL_PG;
5834 5834 break;
5835 5835 case 0x81:
5836 5836 un->un_vpd_page_mask |= SD_VPD_OPERATING_PG;
5837 5837 break;
5838 5838 case 0x82:
5839 5839 un->un_vpd_page_mask |= SD_VPD_ASCII_OP_PG;
5840 5840 break;
5841 5841 case 0x83:
5842 5842 un->un_vpd_page_mask |= SD_VPD_DEVID_WWN_PG;
5843 5843 break;
5844 5844 case 0x86:
5845 5845 un->un_vpd_page_mask |= SD_VPD_EXTENDED_DATA_PG;
5846 5846 break;
5847 5847 case 0xB1:
5848 5848 un->un_vpd_page_mask |= SD_VPD_DEV_CHARACTER_PG;
5849 5849 break;
5850 5850 }
5851 5851 counter++;
5852 5852 }
5853 5853
5854 5854 } else {
5855 5855 rval = -1;
5856 5856
5857 5857 SD_INFO(SD_LOG_ATTACH_DETACH, un,
5858 5858 "sd_check_vpd_page_support: This drive does not implement "
5859 5859 "VPD pages.\n");
5860 5860 }
5861 5861
5862 5862 kmem_free(page_list, page_length);
5863 5863
5864 5864 return (rval);
5865 5865 }
5866 5866
5867 5867
5868 5868 /*
5869 5869 * Function: sd_setup_pm
5870 5870 *
5871 5871 * Description: Initialize Power Management on the device
5872 5872 *
5873 5873 * Context: Kernel Thread
5874 5874 */
5875 5875
5876 5876 static void
5877 5877 sd_setup_pm(sd_ssc_t *ssc, dev_info_t *devi)
5878 5878 {
5879 5879 uint_t log_page_size;
5880 5880 uchar_t *log_page_data;
5881 5881 int rval = 0;
5882 5882 struct sd_lun *un;
5883 5883
5884 5884 ASSERT(ssc != NULL);
5885 5885 un = ssc->ssc_un;
5886 5886 ASSERT(un != NULL);
5887 5887
5888 5888 /*
5889 5889 * Since we are called from attach, holding a mutex for
5890 5890 * un is unnecessary. Because some of the routines called
5891 5891 * from here require SD_MUTEX to not be held, assert this
5892 5892 * right up front.
5893 5893 */
5894 5894 ASSERT(!mutex_owned(SD_MUTEX(un)));
5895 5895 /*
5896 5896 * Since the sd device does not have the 'reg' property,
5897 5897 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries.
5898 5898 * The following code is to tell cpr that this device
5899 5899 * DOES need to be suspended and resumed.
5900 5900 */
5901 5901 (void) ddi_prop_update_string(DDI_DEV_T_NONE, devi,
5902 5902 "pm-hardware-state", "needs-suspend-resume");
5903 5903
5904 5904 /*
5905 5905 * This complies with the new power management framework
5906 5906 * for certain desktop machines. Create the pm_components
5907 5907 * property as a string array property.
5908 5908 * If un_f_pm_supported is TRUE, that means the disk
5909 5909 * attached HBA has set the "pm-capable" property and
5910 5910 * the value of this property is bigger than 0.
5911 5911 */
5912 5912 if (un->un_f_pm_supported) {
5913 5913 /*
5914 5914 * not all devices have a motor, try it first.
5915 5915 * some devices may return ILLEGAL REQUEST, some
5916 5916 * will hang
5917 5917 * The following START_STOP_UNIT is used to check if target
5918 5918 * device has a motor.
5919 5919 */
5920 5920 un->un_f_start_stop_supported = TRUE;
5921 5921
5922 5922 if (un->un_f_power_condition_supported) {
5923 5923 rval = sd_send_scsi_START_STOP_UNIT(ssc,
5924 5924 SD_POWER_CONDITION, SD_TARGET_ACTIVE,
5925 5925 SD_PATH_DIRECT);
5926 5926 if (rval != 0) {
5927 5927 un->un_f_power_condition_supported = FALSE;
5928 5928 }
5929 5929 }
5930 5930 if (!un->un_f_power_condition_supported) {
5931 5931 rval = sd_send_scsi_START_STOP_UNIT(ssc,
5932 5932 SD_START_STOP, SD_TARGET_START, SD_PATH_DIRECT);
5933 5933 }
5934 5934 if (rval != 0) {
5935 5935 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
5936 5936 un->un_f_start_stop_supported = FALSE;
5937 5937 }
5938 5938
5939 5939 /*
5940 5940 * create pm properties anyways otherwise the parent can't
5941 5941 * go to sleep
5942 5942 */
5943 5943 un->un_f_pm_is_enabled = TRUE;
5944 5944 (void) sd_create_pm_components(devi, un);
5945 5945
5946 5946 /*
5947 5947 * If it claims that log sense is supported, check it out.
5948 5948 */
5949 5949 if (un->un_f_log_sense_supported) {
5950 5950 rval = sd_log_page_supported(ssc,
5951 5951 START_STOP_CYCLE_PAGE);
5952 5952 if (rval == 1) {
5953 5953 /* Page found, use it. */
5954 5954 un->un_start_stop_cycle_page =
5955 5955 START_STOP_CYCLE_PAGE;
5956 5956 } else {
5957 5957 /*
5958 5958 * Page not found or log sense is not
5959 5959 * supported.
5960 5960 * Notice we do not check the old style
5961 5961 * START_STOP_CYCLE_VU_PAGE because this
5962 5962 * code path does not apply to old disks.
5963 5963 */
5964 5964 un->un_f_log_sense_supported = FALSE;
5965 5965 un->un_f_pm_log_sense_smart = FALSE;
5966 5966 }
5967 5967 }
5968 5968
5969 5969 return;
5970 5970 }
5971 5971
5972 5972 /*
5973 5973 * For the disk whose attached HBA has not set the "pm-capable"
5974 5974 * property, check if it supports the power management.
5975 5975 */
5976 5976 if (!un->un_f_log_sense_supported) {
5977 5977 un->un_power_level = SD_SPINDLE_ON;
5978 5978 un->un_f_pm_is_enabled = FALSE;
5979 5979 return;
5980 5980 }
5981 5981
5982 5982 rval = sd_log_page_supported(ssc, START_STOP_CYCLE_PAGE);
5983 5983
5984 5984 #ifdef SDDEBUG
5985 5985 if (sd_force_pm_supported) {
5986 5986 /* Force a successful result */
5987 5987 rval = 1;
5988 5988 }
5989 5989 #endif
5990 5990
5991 5991 /*
5992 5992 * If the start-stop cycle counter log page is not supported
5993 5993 * or if the pm-capable property is set to be false (0),
5994 5994 * then we should not create the pm_components property.
5995 5995 */
5996 5996 if (rval == -1) {
5997 5997 /*
5998 5998 * Error.
5999 5999 * Reading log sense failed, most likely this is
6000 6000 * an older drive that does not support log sense.
6001 6001 * If this fails auto-pm is not supported.
6002 6002 */
6003 6003 un->un_power_level = SD_SPINDLE_ON;
6004 6004 un->un_f_pm_is_enabled = FALSE;
6005 6005
6006 6006 } else if (rval == 0) {
6007 6007 /*
6008 6008 * Page not found.
6009 6009 * The start stop cycle counter is implemented as page
6010 6010 * START_STOP_CYCLE_PAGE_VU_PAGE (0x31) in older disks. For
6011 6011 * newer disks it is implemented as START_STOP_CYCLE_PAGE (0xE).
6012 6012 */
6013 6013 if (sd_log_page_supported(ssc, START_STOP_CYCLE_VU_PAGE) == 1) {
6014 6014 /*
6015 6015 * Page found, use this one.
6016 6016 */
6017 6017 un->un_start_stop_cycle_page = START_STOP_CYCLE_VU_PAGE;
6018 6018 un->un_f_pm_is_enabled = TRUE;
6019 6019 } else {
6020 6020 /*
6021 6021 * Error or page not found.
6022 6022 * auto-pm is not supported for this device.
6023 6023 */
6024 6024 un->un_power_level = SD_SPINDLE_ON;
6025 6025 un->un_f_pm_is_enabled = FALSE;
6026 6026 }
6027 6027 } else {
6028 6028 /*
6029 6029 * Page found, use it.
6030 6030 */
6031 6031 un->un_start_stop_cycle_page = START_STOP_CYCLE_PAGE;
6032 6032 un->un_f_pm_is_enabled = TRUE;
6033 6033 }
6034 6034
6035 6035
6036 6036 if (un->un_f_pm_is_enabled == TRUE) {
6037 6037 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE;
6038 6038 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP);
6039 6039
6040 6040 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data,
6041 6041 log_page_size, un->un_start_stop_cycle_page,
6042 6042 0x01, 0, SD_PATH_DIRECT);
6043 6043
6044 6044 if (rval != 0) {
6045 6045 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6046 6046 }
6047 6047
6048 6048 #ifdef SDDEBUG
6049 6049 if (sd_force_pm_supported) {
6050 6050 /* Force a successful result */
6051 6051 rval = 0;
6052 6052 }
6053 6053 #endif
6054 6054
6055 6055 /*
6056 6056 * If the Log sense for Page( Start/stop cycle counter page)
6057 6057 * succeeds, then power management is supported and we can
6058 6058 * enable auto-pm.
6059 6059 */
6060 6060 if (rval == 0) {
6061 6061 (void) sd_create_pm_components(devi, un);
6062 6062 } else {
6063 6063 un->un_power_level = SD_SPINDLE_ON;
6064 6064 un->un_f_pm_is_enabled = FALSE;
6065 6065 }
6066 6066
6067 6067 kmem_free(log_page_data, log_page_size);
6068 6068 }
6069 6069 }
6070 6070
6071 6071
6072 6072 /*
6073 6073 * Function: sd_create_pm_components
6074 6074 *
6075 6075 * Description: Initialize PM property.
6076 6076 *
6077 6077 * Context: Kernel thread context
6078 6078 */
6079 6079
6080 6080 static void
6081 6081 sd_create_pm_components(dev_info_t *devi, struct sd_lun *un)
6082 6082 {
6083 6083 ASSERT(!mutex_owned(SD_MUTEX(un)));
6084 6084
6085 6085 if (un->un_f_power_condition_supported) {
6086 6086 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi,
6087 6087 "pm-components", sd_pwr_pc.pm_comp, 5)
6088 6088 != DDI_PROP_SUCCESS) {
6089 6089 un->un_power_level = SD_SPINDLE_ACTIVE;
6090 6090 un->un_f_pm_is_enabled = FALSE;
6091 6091 return;
6092 6092 }
6093 6093 } else {
6094 6094 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, devi,
6095 6095 "pm-components", sd_pwr_ss.pm_comp, 3)
6096 6096 != DDI_PROP_SUCCESS) {
6097 6097 un->un_power_level = SD_SPINDLE_ON;
6098 6098 un->un_f_pm_is_enabled = FALSE;
6099 6099 return;
6100 6100 }
6101 6101 }
6102 6102 /*
6103 6103 * When components are initially created they are idle,
6104 6104 * power up any non-removables.
6105 6105 * Note: the return value of pm_raise_power can't be used
6106 6106 * for determining if PM should be enabled for this device.
6107 6107 * Even if you check the return values and remove this
6108 6108 * property created above, the PM framework will not honor the
6109 6109 * change after the first call to pm_raise_power. Hence,
6110 6110 * removal of that property does not help if pm_raise_power
6111 6111 * fails. In the case of removable media, the start/stop
6112 6112 * will fail if the media is not present.
6113 6113 */
6114 6114 if (un->un_f_attach_spinup && (pm_raise_power(SD_DEVINFO(un), 0,
6115 6115 SD_PM_STATE_ACTIVE(un)) == DDI_SUCCESS)) {
6116 6116 mutex_enter(SD_MUTEX(un));
6117 6117 un->un_power_level = SD_PM_STATE_ACTIVE(un);
6118 6118 mutex_enter(&un->un_pm_mutex);
6119 6119 /* Set to on and not busy. */
6120 6120 un->un_pm_count = 0;
6121 6121 } else {
6122 6122 mutex_enter(SD_MUTEX(un));
6123 6123 un->un_power_level = SD_PM_STATE_STOPPED(un);
6124 6124 mutex_enter(&un->un_pm_mutex);
6125 6125 /* Set to off. */
6126 6126 un->un_pm_count = -1;
6127 6127 }
6128 6128 mutex_exit(&un->un_pm_mutex);
6129 6129 mutex_exit(SD_MUTEX(un));
6130 6130 }
6131 6131
6132 6132
6133 6133 /*
6134 6134 * Function: sd_ddi_suspend
6135 6135 *
6136 6136 * Description: Performs system power-down operations. This includes
6137 6137 * setting the drive state to indicate its suspended so
6138 6138 * that no new commands will be accepted. Also, wait for
6139 6139 * all commands that are in transport or queued to a timer
6140 6140 * for retry to complete. All timeout threads are cancelled.
6141 6141 *
6142 6142 * Return Code: DDI_FAILURE or DDI_SUCCESS
6143 6143 *
6144 6144 * Context: Kernel thread context
6145 6145 */
6146 6146
6147 6147 static int
6148 6148 sd_ddi_suspend(dev_info_t *devi)
6149 6149 {
6150 6150 struct sd_lun *un;
6151 6151 clock_t wait_cmds_complete;
6152 6152
6153 6153 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
6154 6154 if (un == NULL) {
6155 6155 return (DDI_FAILURE);
6156 6156 }
6157 6157
6158 6158 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: entry\n");
6159 6159
6160 6160 mutex_enter(SD_MUTEX(un));
6161 6161
6162 6162 /* Return success if the device is already suspended. */
6163 6163 if (un->un_state == SD_STATE_SUSPENDED) {
6164 6164 mutex_exit(SD_MUTEX(un));
6165 6165 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: "
6166 6166 "device already suspended, exiting\n");
6167 6167 return (DDI_SUCCESS);
6168 6168 }
6169 6169
6170 6170 /* Return failure if the device is being used by HA */
6171 6171 if (un->un_resvd_status &
6172 6172 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE)) {
6173 6173 mutex_exit(SD_MUTEX(un));
6174 6174 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: "
6175 6175 "device in use by HA, exiting\n");
6176 6176 return (DDI_FAILURE);
6177 6177 }
6178 6178
6179 6179 /*
6180 6180 * Return failure if the device is in a resource wait
6181 6181 * or power changing state.
6182 6182 */
6183 6183 if ((un->un_state == SD_STATE_RWAIT) ||
6184 6184 (un->un_state == SD_STATE_PM_CHANGING)) {
6185 6185 mutex_exit(SD_MUTEX(un));
6186 6186 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: "
6187 6187 "device in resource wait state, exiting\n");
6188 6188 return (DDI_FAILURE);
6189 6189 }
6190 6190
6191 6191
6192 6192 un->un_save_state = un->un_last_state;
6193 6193 New_state(un, SD_STATE_SUSPENDED);
6194 6194
6195 6195 /*
6196 6196 * Wait for all commands that are in transport or queued to a timer
6197 6197 * for retry to complete.
6198 6198 *
6199 6199 * While waiting, no new commands will be accepted or sent because of
6200 6200 * the new state we set above.
6201 6201 *
6202 6202 * Wait till current operation has completed. If we are in the resource
6203 6203 * wait state (with an intr outstanding) then we need to wait till the
6204 6204 * intr completes and starts the next cmd. We want to wait for
6205 6205 * SD_WAIT_CMDS_COMPLETE seconds before failing the DDI_SUSPEND.
6206 6206 */
6207 6207 wait_cmds_complete = ddi_get_lbolt() +
6208 6208 (sd_wait_cmds_complete * drv_usectohz(1000000));
6209 6209
6210 6210 while (un->un_ncmds_in_transport != 0) {
6211 6211 /*
6212 6212 * Fail if commands do not finish in the specified time.
6213 6213 */
6214 6214 if (cv_timedwait(&un->un_disk_busy_cv, SD_MUTEX(un),
6215 6215 wait_cmds_complete) == -1) {
6216 6216 /*
6217 6217 * Undo the state changes made above. Everything
6218 6218 * must go back to it's original value.
6219 6219 */
6220 6220 Restore_state(un);
6221 6221 un->un_last_state = un->un_save_state;
6222 6222 /* Wake up any threads that might be waiting. */
6223 6223 cv_broadcast(&un->un_suspend_cv);
6224 6224 mutex_exit(SD_MUTEX(un));
6225 6225 SD_ERROR(SD_LOG_IO_PM, un,
6226 6226 "sd_ddi_suspend: failed due to outstanding cmds\n");
6227 6227 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exiting\n");
6228 6228 return (DDI_FAILURE);
6229 6229 }
6230 6230 }
6231 6231
6232 6232 /*
6233 6233 * Cancel SCSI watch thread and timeouts, if any are active
6234 6234 */
6235 6235
6236 6236 if (SD_OK_TO_SUSPEND_SCSI_WATCHER(un)) {
6237 6237 opaque_t temp_token = un->un_swr_token;
6238 6238 mutex_exit(SD_MUTEX(un));
6239 6239 scsi_watch_suspend(temp_token);
6240 6240 mutex_enter(SD_MUTEX(un));
6241 6241 }
6242 6242
6243 6243 if (un->un_reset_throttle_timeid != NULL) {
6244 6244 timeout_id_t temp_id = un->un_reset_throttle_timeid;
6245 6245 un->un_reset_throttle_timeid = NULL;
6246 6246 mutex_exit(SD_MUTEX(un));
6247 6247 (void) untimeout(temp_id);
6248 6248 mutex_enter(SD_MUTEX(un));
6249 6249 }
6250 6250
6251 6251 if (un->un_dcvb_timeid != NULL) {
6252 6252 timeout_id_t temp_id = un->un_dcvb_timeid;
6253 6253 un->un_dcvb_timeid = NULL;
6254 6254 mutex_exit(SD_MUTEX(un));
6255 6255 (void) untimeout(temp_id);
6256 6256 mutex_enter(SD_MUTEX(un));
6257 6257 }
6258 6258
6259 6259 mutex_enter(&un->un_pm_mutex);
6260 6260 if (un->un_pm_timeid != NULL) {
6261 6261 timeout_id_t temp_id = un->un_pm_timeid;
6262 6262 un->un_pm_timeid = NULL;
6263 6263 mutex_exit(&un->un_pm_mutex);
6264 6264 mutex_exit(SD_MUTEX(un));
6265 6265 (void) untimeout(temp_id);
6266 6266 mutex_enter(SD_MUTEX(un));
6267 6267 } else {
6268 6268 mutex_exit(&un->un_pm_mutex);
6269 6269 }
6270 6270
6271 6271 if (un->un_rmw_msg_timeid != NULL) {
6272 6272 timeout_id_t temp_id = un->un_rmw_msg_timeid;
6273 6273 un->un_rmw_msg_timeid = NULL;
6274 6274 mutex_exit(SD_MUTEX(un));
6275 6275 (void) untimeout(temp_id);
6276 6276 mutex_enter(SD_MUTEX(un));
6277 6277 }
6278 6278
6279 6279 if (un->un_retry_timeid != NULL) {
6280 6280 timeout_id_t temp_id = un->un_retry_timeid;
6281 6281 un->un_retry_timeid = NULL;
6282 6282 mutex_exit(SD_MUTEX(un));
6283 6283 (void) untimeout(temp_id);
6284 6284 mutex_enter(SD_MUTEX(un));
6285 6285
6286 6286 if (un->un_retry_bp != NULL) {
6287 6287 un->un_retry_bp->av_forw = un->un_waitq_headp;
6288 6288 un->un_waitq_headp = un->un_retry_bp;
6289 6289 if (un->un_waitq_tailp == NULL) {
6290 6290 un->un_waitq_tailp = un->un_retry_bp;
6291 6291 }
6292 6292 un->un_retry_bp = NULL;
6293 6293 un->un_retry_statp = NULL;
6294 6294 }
6295 6295 }
6296 6296
6297 6297 if (un->un_direct_priority_timeid != NULL) {
6298 6298 timeout_id_t temp_id = un->un_direct_priority_timeid;
6299 6299 un->un_direct_priority_timeid = NULL;
6300 6300 mutex_exit(SD_MUTEX(un));
6301 6301 (void) untimeout(temp_id);
6302 6302 mutex_enter(SD_MUTEX(un));
6303 6303 }
6304 6304
6305 6305 if (un->un_f_is_fibre == TRUE) {
6306 6306 /*
6307 6307 * Remove callbacks for insert and remove events
6308 6308 */
6309 6309 if (un->un_insert_event != NULL) {
6310 6310 mutex_exit(SD_MUTEX(un));
6311 6311 (void) ddi_remove_event_handler(un->un_insert_cb_id);
6312 6312 mutex_enter(SD_MUTEX(un));
6313 6313 un->un_insert_event = NULL;
6314 6314 }
6315 6315
6316 6316 if (un->un_remove_event != NULL) {
6317 6317 mutex_exit(SD_MUTEX(un));
6318 6318 (void) ddi_remove_event_handler(un->un_remove_cb_id);
6319 6319 mutex_enter(SD_MUTEX(un));
6320 6320 un->un_remove_event = NULL;
6321 6321 }
6322 6322 }
6323 6323
6324 6324 mutex_exit(SD_MUTEX(un));
6325 6325
6326 6326 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_suspend: exit\n");
6327 6327
6328 6328 return (DDI_SUCCESS);
6329 6329 }
6330 6330
6331 6331
6332 6332 /*
6333 6333 * Function: sd_ddi_resume
6334 6334 *
6335 6335 * Description: Performs system power-up operations..
6336 6336 *
6337 6337 * Return Code: DDI_SUCCESS
6338 6338 * DDI_FAILURE
6339 6339 *
6340 6340 * Context: Kernel thread context
6341 6341 */
6342 6342
6343 6343 static int
6344 6344 sd_ddi_resume(dev_info_t *devi)
6345 6345 {
6346 6346 struct sd_lun *un;
6347 6347
6348 6348 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
6349 6349 if (un == NULL) {
6350 6350 return (DDI_FAILURE);
6351 6351 }
6352 6352
6353 6353 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: entry\n");
6354 6354
6355 6355 mutex_enter(SD_MUTEX(un));
6356 6356 Restore_state(un);
6357 6357
6358 6358 /*
6359 6359 * Restore the state which was saved to give the
6360 6360 * the right state in un_last_state
6361 6361 */
6362 6362 un->un_last_state = un->un_save_state;
6363 6363 /*
6364 6364 * Note: throttle comes back at full.
6365 6365 * Also note: this MUST be done before calling pm_raise_power
6366 6366 * otherwise the system can get hung in biowait. The scenario where
6367 6367 * this'll happen is under cpr suspend. Writing of the system
6368 6368 * state goes through sddump, which writes 0 to un_throttle. If
6369 6369 * writing the system state then fails, example if the partition is
6370 6370 * too small, then cpr attempts a resume. If throttle isn't restored
6371 6371 * from the saved value until after calling pm_raise_power then
6372 6372 * cmds sent in sdpower are not transported and sd_send_scsi_cmd hangs
6373 6373 * in biowait.
6374 6374 */
6375 6375 un->un_throttle = un->un_saved_throttle;
6376 6376
6377 6377 /*
6378 6378 * The chance of failure is very rare as the only command done in power
6379 6379 * entry point is START command when you transition from 0->1 or
6380 6380 * unknown->1. Put it to SPINDLE ON state irrespective of the state at
6381 6381 * which suspend was done. Ignore the return value as the resume should
6382 6382 * not be failed. In the case of removable media the media need not be
6383 6383 * inserted and hence there is a chance that raise power will fail with
6384 6384 * media not present.
6385 6385 */
6386 6386 if (un->un_f_attach_spinup) {
6387 6387 mutex_exit(SD_MUTEX(un));
6388 6388 (void) pm_raise_power(SD_DEVINFO(un), 0,
6389 6389 SD_PM_STATE_ACTIVE(un));
6390 6390 mutex_enter(SD_MUTEX(un));
6391 6391 }
6392 6392
6393 6393 /*
6394 6394 * Don't broadcast to the suspend cv and therefore possibly
6395 6395 * start I/O until after power has been restored.
6396 6396 */
6397 6397 cv_broadcast(&un->un_suspend_cv);
6398 6398 cv_broadcast(&un->un_state_cv);
6399 6399
6400 6400 /* restart thread */
6401 6401 if (SD_OK_TO_RESUME_SCSI_WATCHER(un)) {
6402 6402 scsi_watch_resume(un->un_swr_token);
6403 6403 }
6404 6404
6405 6405 #if (defined(__fibre))
6406 6406 if (un->un_f_is_fibre == TRUE) {
6407 6407 /*
6408 6408 * Add callbacks for insert and remove events
6409 6409 */
6410 6410 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) {
6411 6411 sd_init_event_callbacks(un);
6412 6412 }
6413 6413 }
6414 6414 #endif
6415 6415
6416 6416 /*
6417 6417 * Transport any pending commands to the target.
6418 6418 *
6419 6419 * If this is a low-activity device commands in queue will have to wait
6420 6420 * until new commands come in, which may take awhile. Also, we
6421 6421 * specifically don't check un_ncmds_in_transport because we know that
6422 6422 * there really are no commands in progress after the unit was
6423 6423 * suspended and we could have reached the throttle level, been
6424 6424 * suspended, and have no new commands coming in for awhile. Highly
6425 6425 * unlikely, but so is the low-activity disk scenario.
6426 6426 */
6427 6427 ddi_xbuf_dispatch(un->un_xbuf_attr);
6428 6428
6429 6429 sd_start_cmds(un, NULL);
6430 6430 mutex_exit(SD_MUTEX(un));
6431 6431
6432 6432 SD_TRACE(SD_LOG_IO_PM, un, "sd_ddi_resume: exit\n");
6433 6433
6434 6434 return (DDI_SUCCESS);
6435 6435 }
6436 6436
6437 6437
6438 6438 /*
6439 6439 * Function: sd_pm_state_change
6440 6440 *
6441 6441 * Description: Change the driver power state.
6442 6442 * Someone else is required to actually change the driver
6443 6443 * power level.
6444 6444 *
6445 6445 * Arguments: un - driver soft state (unit) structure
6446 6446 * level - the power level that is changed to
6447 6447 * flag - to decide how to change the power state
6448 6448 *
6449 6449 * Return Code: DDI_SUCCESS
6450 6450 *
6451 6451 * Context: Kernel thread context
6452 6452 */
6453 6453 static int
6454 6454 sd_pm_state_change(struct sd_lun *un, int level, int flag)
6455 6455 {
6456 6456 ASSERT(un != NULL);
6457 6457 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: entry\n");
6458 6458
6459 6459 ASSERT(!mutex_owned(SD_MUTEX(un)));
6460 6460 mutex_enter(SD_MUTEX(un));
6461 6461
6462 6462 if (flag == SD_PM_STATE_ROLLBACK || SD_PM_IS_IO_CAPABLE(un, level)) {
6463 6463 un->un_power_level = level;
6464 6464 ASSERT(!mutex_owned(&un->un_pm_mutex));
6465 6465 mutex_enter(&un->un_pm_mutex);
6466 6466 if (SD_DEVICE_IS_IN_LOW_POWER(un)) {
6467 6467 un->un_pm_count++;
6468 6468 ASSERT(un->un_pm_count == 0);
6469 6469 }
6470 6470 mutex_exit(&un->un_pm_mutex);
6471 6471 } else {
6472 6472 /*
6473 6473 * Exit if power management is not enabled for this device,
6474 6474 * or if the device is being used by HA.
6475 6475 */
6476 6476 if ((un->un_f_pm_is_enabled == FALSE) || (un->un_resvd_status &
6477 6477 (SD_RESERVE | SD_WANT_RESERVE | SD_LOST_RESERVE))) {
6478 6478 mutex_exit(SD_MUTEX(un));
6479 6479 SD_TRACE(SD_LOG_POWER, un,
6480 6480 "sd_pm_state_change: exiting\n");
6481 6481 return (DDI_FAILURE);
6482 6482 }
6483 6483
6484 6484 SD_INFO(SD_LOG_POWER, un, "sd_pm_state_change: "
6485 6485 "un_ncmds_in_driver=%ld\n", un->un_ncmds_in_driver);
6486 6486
6487 6487 /*
6488 6488 * See if the device is not busy, ie.:
6489 6489 * - we have no commands in the driver for this device
6490 6490 * - not waiting for resources
6491 6491 */
6492 6492 if ((un->un_ncmds_in_driver == 0) &&
6493 6493 (un->un_state != SD_STATE_RWAIT)) {
6494 6494 /*
6495 6495 * The device is not busy, so it is OK to go to low
6496 6496 * power state. Indicate low power, but rely on someone
6497 6497 * else to actually change it.
6498 6498 */
6499 6499 mutex_enter(&un->un_pm_mutex);
6500 6500 un->un_pm_count = -1;
6501 6501 mutex_exit(&un->un_pm_mutex);
6502 6502 un->un_power_level = level;
6503 6503 }
6504 6504 }
6505 6505
6506 6506 mutex_exit(SD_MUTEX(un));
6507 6507
6508 6508 SD_TRACE(SD_LOG_POWER, un, "sd_pm_state_change: exit\n");
6509 6509
6510 6510 return (DDI_SUCCESS);
6511 6511 }
6512 6512
6513 6513
6514 6514 /*
6515 6515 * Function: sd_pm_idletimeout_handler
6516 6516 *
6517 6517 * Description: A timer routine that's active only while a device is busy.
6518 6518 * The purpose is to extend slightly the pm framework's busy
6519 6519 * view of the device to prevent busy/idle thrashing for
6520 6520 * back-to-back commands. Do this by comparing the current time
6521 6521 * to the time at which the last command completed and when the
6522 6522 * difference is greater than sd_pm_idletime, call
6523 6523 * pm_idle_component. In addition to indicating idle to the pm
6524 6524 * framework, update the chain type to again use the internal pm
6525 6525 * layers of the driver.
6526 6526 *
6527 6527 * Arguments: arg - driver soft state (unit) structure
6528 6528 *
6529 6529 * Context: Executes in a timeout(9F) thread context
6530 6530 */
6531 6531
6532 6532 static void
6533 6533 sd_pm_idletimeout_handler(void *arg)
6534 6534 {
6535 6535 struct sd_lun *un = arg;
6536 6536
6537 6537 time_t now;
6538 6538
6539 6539 mutex_enter(&sd_detach_mutex);
6540 6540 if (un->un_detach_count != 0) {
6541 6541 /* Abort if the instance is detaching */
6542 6542 mutex_exit(&sd_detach_mutex);
6543 6543 return;
6544 6544 }
6545 6545 mutex_exit(&sd_detach_mutex);
6546 6546
6547 6547 now = ddi_get_time();
6548 6548 /*
6549 6549 * Grab both mutexes, in the proper order, since we're accessing
6550 6550 * both PM and softstate variables.
6551 6551 */
6552 6552 mutex_enter(SD_MUTEX(un));
6553 6553 mutex_enter(&un->un_pm_mutex);
6554 6554 if (((now - un->un_pm_idle_time) > sd_pm_idletime) &&
6555 6555 (un->un_ncmds_in_driver == 0) && (un->un_pm_count == 0)) {
6556 6556 /*
6557 6557 * Update the chain types.
6558 6558 * This takes affect on the next new command received.
6559 6559 */
6560 6560 if (un->un_f_non_devbsize_supported) {
6561 6561 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA;
6562 6562 } else {
6563 6563 un->un_buf_chain_type = SD_CHAIN_INFO_DISK;
6564 6564 }
6565 6565 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD;
6566 6566
6567 6567 SD_TRACE(SD_LOG_IO_PM, un,
6568 6568 "sd_pm_idletimeout_handler: idling device\n");
6569 6569 (void) pm_idle_component(SD_DEVINFO(un), 0);
6570 6570 un->un_pm_idle_timeid = NULL;
6571 6571 } else {
6572 6572 un->un_pm_idle_timeid =
6573 6573 timeout(sd_pm_idletimeout_handler, un,
6574 6574 (drv_usectohz((clock_t)300000))); /* 300 ms. */
6575 6575 }
6576 6576 mutex_exit(&un->un_pm_mutex);
6577 6577 mutex_exit(SD_MUTEX(un));
6578 6578 }
6579 6579
6580 6580
6581 6581 /*
6582 6582 * Function: sd_pm_timeout_handler
6583 6583 *
6584 6584 * Description: Callback to tell framework we are idle.
6585 6585 *
6586 6586 * Context: timeout(9f) thread context.
6587 6587 */
6588 6588
6589 6589 static void
6590 6590 sd_pm_timeout_handler(void *arg)
6591 6591 {
6592 6592 struct sd_lun *un = arg;
6593 6593
6594 6594 (void) pm_idle_component(SD_DEVINFO(un), 0);
6595 6595 mutex_enter(&un->un_pm_mutex);
6596 6596 un->un_pm_timeid = NULL;
6597 6597 mutex_exit(&un->un_pm_mutex);
6598 6598 }
6599 6599
6600 6600
6601 6601 /*
6602 6602 * Function: sdpower
6603 6603 *
6604 6604 * Description: PM entry point.
6605 6605 *
6606 6606 * Return Code: DDI_SUCCESS
6607 6607 * DDI_FAILURE
6608 6608 *
6609 6609 * Context: Kernel thread context
6610 6610 */
6611 6611
6612 6612 static int
6613 6613 sdpower(dev_info_t *devi, int component, int level)
6614 6614 {
6615 6615 struct sd_lun *un;
6616 6616 int instance;
6617 6617 int rval = DDI_SUCCESS;
6618 6618 uint_t i, log_page_size, maxcycles, ncycles;
6619 6619 uchar_t *log_page_data;
6620 6620 int log_sense_page;
6621 6621 int medium_present;
6622 6622 time_t intvlp;
6623 6623 struct pm_trans_data sd_pm_tran_data;
6624 6624 uchar_t save_state;
6625 6625 int sval;
6626 6626 uchar_t state_before_pm;
6627 6627 int got_semaphore_here;
6628 6628 sd_ssc_t *ssc;
6629 6629 int last_power_level;
6630 6630
6631 6631 instance = ddi_get_instance(devi);
6632 6632
6633 6633 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) ||
6634 6634 !SD_PM_IS_LEVEL_VALID(un, level) || component != 0) {
6635 6635 return (DDI_FAILURE);
6636 6636 }
6637 6637
6638 6638 ssc = sd_ssc_init(un);
6639 6639
6640 6640 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: entry, level = %d\n", level);
6641 6641
6642 6642 /*
6643 6643 * Must synchronize power down with close.
6644 6644 * Attempt to decrement/acquire the open/close semaphore,
6645 6645 * but do NOT wait on it. If it's not greater than zero,
6646 6646 * ie. it can't be decremented without waiting, then
6647 6647 * someone else, either open or close, already has it
6648 6648 * and the try returns 0. Use that knowledge here to determine
6649 6649 * if it's OK to change the device power level.
6650 6650 * Also, only increment it on exit if it was decremented, ie. gotten,
6651 6651 * here.
6652 6652 */
6653 6653 got_semaphore_here = sema_tryp(&un->un_semoclose);
6654 6654
6655 6655 mutex_enter(SD_MUTEX(un));
6656 6656
6657 6657 SD_INFO(SD_LOG_POWER, un, "sdpower: un_ncmds_in_driver = %ld\n",
6658 6658 un->un_ncmds_in_driver);
6659 6659
6660 6660 /*
6661 6661 * If un_ncmds_in_driver is non-zero it indicates commands are
6662 6662 * already being processed in the driver, or if the semaphore was
6663 6663 * not gotten here it indicates an open or close is being processed.
6664 6664 * At the same time somebody is requesting to go to a lower power
6665 6665 * that can't perform I/O, which can't happen, therefore we need to
6666 6666 * return failure.
6667 6667 */
6668 6668 if ((!SD_PM_IS_IO_CAPABLE(un, level)) &&
6669 6669 ((un->un_ncmds_in_driver != 0) || (got_semaphore_here == 0))) {
6670 6670 mutex_exit(SD_MUTEX(un));
6671 6671
6672 6672 if (got_semaphore_here != 0) {
6673 6673 sema_v(&un->un_semoclose);
6674 6674 }
6675 6675 SD_TRACE(SD_LOG_IO_PM, un,
6676 6676 "sdpower: exit, device has queued cmds.\n");
6677 6677
6678 6678 goto sdpower_failed;
6679 6679 }
6680 6680
6681 6681 /*
6682 6682 * if it is OFFLINE that means the disk is completely dead
6683 6683 * in our case we have to put the disk in on or off by sending commands
6684 6684 * Of course that will fail anyway so return back here.
6685 6685 *
6686 6686 * Power changes to a device that's OFFLINE or SUSPENDED
6687 6687 * are not allowed.
6688 6688 */
6689 6689 if ((un->un_state == SD_STATE_OFFLINE) ||
6690 6690 (un->un_state == SD_STATE_SUSPENDED)) {
6691 6691 mutex_exit(SD_MUTEX(un));
6692 6692
6693 6693 if (got_semaphore_here != 0) {
6694 6694 sema_v(&un->un_semoclose);
6695 6695 }
6696 6696 SD_TRACE(SD_LOG_IO_PM, un,
6697 6697 "sdpower: exit, device is off-line.\n");
6698 6698
6699 6699 goto sdpower_failed;
6700 6700 }
6701 6701
6702 6702 /*
6703 6703 * Change the device's state to indicate it's power level
6704 6704 * is being changed. Do this to prevent a power off in the
6705 6705 * middle of commands, which is especially bad on devices
6706 6706 * that are really powered off instead of just spun down.
6707 6707 */
6708 6708 state_before_pm = un->un_state;
6709 6709 un->un_state = SD_STATE_PM_CHANGING;
6710 6710
6711 6711 mutex_exit(SD_MUTEX(un));
6712 6712
6713 6713 /*
6714 6714 * If log sense command is not supported, bypass the
6715 6715 * following checking, otherwise, check the log sense
6716 6716 * information for this device.
6717 6717 */
6718 6718 if (SD_PM_STOP_MOTOR_NEEDED(un, level) &&
6719 6719 un->un_f_log_sense_supported) {
6720 6720 /*
6721 6721 * Get the log sense information to understand whether the
6722 6722 * the powercycle counts have gone beyond the threshhold.
6723 6723 */
6724 6724 log_page_size = START_STOP_CYCLE_COUNTER_PAGE_SIZE;
6725 6725 log_page_data = kmem_zalloc(log_page_size, KM_SLEEP);
6726 6726
6727 6727 mutex_enter(SD_MUTEX(un));
6728 6728 log_sense_page = un->un_start_stop_cycle_page;
6729 6729 mutex_exit(SD_MUTEX(un));
6730 6730
6731 6731 rval = sd_send_scsi_LOG_SENSE(ssc, log_page_data,
6732 6732 log_page_size, log_sense_page, 0x01, 0, SD_PATH_DIRECT);
6733 6733
6734 6734 if (rval != 0) {
6735 6735 if (rval == EIO)
6736 6736 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
6737 6737 else
6738 6738 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6739 6739 }
6740 6740
6741 6741 #ifdef SDDEBUG
6742 6742 if (sd_force_pm_supported) {
6743 6743 /* Force a successful result */
6744 6744 rval = 0;
6745 6745 }
6746 6746 #endif
6747 6747 if (rval != 0) {
6748 6748 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
6749 6749 "Log Sense Failed\n");
6750 6750
6751 6751 kmem_free(log_page_data, log_page_size);
6752 6752 /* Cannot support power management on those drives */
6753 6753
6754 6754 if (got_semaphore_here != 0) {
6755 6755 sema_v(&un->un_semoclose);
6756 6756 }
6757 6757 /*
6758 6758 * On exit put the state back to it's original value
6759 6759 * and broadcast to anyone waiting for the power
6760 6760 * change completion.
6761 6761 */
6762 6762 mutex_enter(SD_MUTEX(un));
6763 6763 un->un_state = state_before_pm;
6764 6764 cv_broadcast(&un->un_suspend_cv);
6765 6765 mutex_exit(SD_MUTEX(un));
6766 6766 SD_TRACE(SD_LOG_IO_PM, un,
6767 6767 "sdpower: exit, Log Sense Failed.\n");
6768 6768
6769 6769 goto sdpower_failed;
6770 6770 }
6771 6771
6772 6772 /*
6773 6773 * From the page data - Convert the essential information to
6774 6774 * pm_trans_data
6775 6775 */
6776 6776 maxcycles =
6777 6777 (log_page_data[0x1c] << 24) | (log_page_data[0x1d] << 16) |
6778 6778 (log_page_data[0x1E] << 8) | log_page_data[0x1F];
6779 6779
6780 6780 ncycles =
6781 6781 (log_page_data[0x24] << 24) | (log_page_data[0x25] << 16) |
6782 6782 (log_page_data[0x26] << 8) | log_page_data[0x27];
6783 6783
6784 6784 if (un->un_f_pm_log_sense_smart) {
6785 6785 sd_pm_tran_data.un.smart_count.allowed = maxcycles;
6786 6786 sd_pm_tran_data.un.smart_count.consumed = ncycles;
6787 6787 sd_pm_tran_data.un.smart_count.flag = 0;
6788 6788 sd_pm_tran_data.format = DC_SMART_FORMAT;
6789 6789 } else {
6790 6790 sd_pm_tran_data.un.scsi_cycles.lifemax = maxcycles;
6791 6791 sd_pm_tran_data.un.scsi_cycles.ncycles = ncycles;
6792 6792 for (i = 0; i < DC_SCSI_MFR_LEN; i++) {
6793 6793 sd_pm_tran_data.un.scsi_cycles.svc_date[i] =
6794 6794 log_page_data[8+i];
6795 6795 }
6796 6796 sd_pm_tran_data.un.scsi_cycles.flag = 0;
6797 6797 sd_pm_tran_data.format = DC_SCSI_FORMAT;
6798 6798 }
6799 6799
6800 6800 kmem_free(log_page_data, log_page_size);
6801 6801
6802 6802 /*
6803 6803 * Call pm_trans_check routine to get the Ok from
6804 6804 * the global policy
6805 6805 */
6806 6806 rval = pm_trans_check(&sd_pm_tran_data, &intvlp);
6807 6807 #ifdef SDDEBUG
6808 6808 if (sd_force_pm_supported) {
6809 6809 /* Force a successful result */
6810 6810 rval = 1;
6811 6811 }
6812 6812 #endif
6813 6813 switch (rval) {
6814 6814 case 0:
6815 6815 /*
6816 6816 * Not Ok to Power cycle or error in parameters passed
6817 6817 * Would have given the advised time to consider power
6818 6818 * cycle. Based on the new intvlp parameter we are
6819 6819 * supposed to pretend we are busy so that pm framework
6820 6820 * will never call our power entry point. Because of
6821 6821 * that install a timeout handler and wait for the
6822 6822 * recommended time to elapse so that power management
6823 6823 * can be effective again.
6824 6824 *
6825 6825 * To effect this behavior, call pm_busy_component to
6826 6826 * indicate to the framework this device is busy.
6827 6827 * By not adjusting un_pm_count the rest of PM in
6828 6828 * the driver will function normally, and independent
6829 6829 * of this but because the framework is told the device
6830 6830 * is busy it won't attempt powering down until it gets
6831 6831 * a matching idle. The timeout handler sends this.
6832 6832 * Note: sd_pm_entry can't be called here to do this
6833 6833 * because sdpower may have been called as a result
6834 6834 * of a call to pm_raise_power from within sd_pm_entry.
6835 6835 *
6836 6836 * If a timeout handler is already active then
6837 6837 * don't install another.
6838 6838 */
6839 6839 mutex_enter(&un->un_pm_mutex);
6840 6840 if (un->un_pm_timeid == NULL) {
6841 6841 un->un_pm_timeid =
6842 6842 timeout(sd_pm_timeout_handler,
6843 6843 un, intvlp * drv_usectohz(1000000));
6844 6844 mutex_exit(&un->un_pm_mutex);
6845 6845 (void) pm_busy_component(SD_DEVINFO(un), 0);
6846 6846 } else {
6847 6847 mutex_exit(&un->un_pm_mutex);
6848 6848 }
6849 6849 if (got_semaphore_here != 0) {
6850 6850 sema_v(&un->un_semoclose);
6851 6851 }
6852 6852 /*
6853 6853 * On exit put the state back to it's original value
6854 6854 * and broadcast to anyone waiting for the power
6855 6855 * change completion.
6856 6856 */
6857 6857 mutex_enter(SD_MUTEX(un));
6858 6858 un->un_state = state_before_pm;
6859 6859 cv_broadcast(&un->un_suspend_cv);
6860 6860 mutex_exit(SD_MUTEX(un));
6861 6861
6862 6862 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, "
6863 6863 "trans check Failed, not ok to power cycle.\n");
6864 6864
6865 6865 goto sdpower_failed;
6866 6866 case -1:
6867 6867 if (got_semaphore_here != 0) {
6868 6868 sema_v(&un->un_semoclose);
6869 6869 }
6870 6870 /*
6871 6871 * On exit put the state back to it's original value
6872 6872 * and broadcast to anyone waiting for the power
6873 6873 * change completion.
6874 6874 */
6875 6875 mutex_enter(SD_MUTEX(un));
6876 6876 un->un_state = state_before_pm;
6877 6877 cv_broadcast(&un->un_suspend_cv);
6878 6878 mutex_exit(SD_MUTEX(un));
6879 6879 SD_TRACE(SD_LOG_IO_PM, un,
6880 6880 "sdpower: exit, trans check command Failed.\n");
6881 6881
6882 6882 goto sdpower_failed;
6883 6883 }
6884 6884 }
6885 6885
6886 6886 if (!SD_PM_IS_IO_CAPABLE(un, level)) {
6887 6887 /*
6888 6888 * Save the last state... if the STOP FAILS we need it
6889 6889 * for restoring
6890 6890 */
6891 6891 mutex_enter(SD_MUTEX(un));
6892 6892 save_state = un->un_last_state;
6893 6893 last_power_level = un->un_power_level;
6894 6894 /*
6895 6895 * There must not be any cmds. getting processed
6896 6896 * in the driver when we get here. Power to the
6897 6897 * device is potentially going off.
6898 6898 */
6899 6899 ASSERT(un->un_ncmds_in_driver == 0);
6900 6900 mutex_exit(SD_MUTEX(un));
6901 6901
6902 6902 /*
6903 6903 * For now PM suspend the device completely before spindle is
6904 6904 * turned off
6905 6905 */
6906 6906 if ((rval = sd_pm_state_change(un, level, SD_PM_STATE_CHANGE))
6907 6907 == DDI_FAILURE) {
6908 6908 if (got_semaphore_here != 0) {
6909 6909 sema_v(&un->un_semoclose);
6910 6910 }
6911 6911 /*
6912 6912 * On exit put the state back to it's original value
6913 6913 * and broadcast to anyone waiting for the power
6914 6914 * change completion.
6915 6915 */
6916 6916 mutex_enter(SD_MUTEX(un));
6917 6917 un->un_state = state_before_pm;
6918 6918 un->un_power_level = last_power_level;
6919 6919 cv_broadcast(&un->un_suspend_cv);
6920 6920 mutex_exit(SD_MUTEX(un));
6921 6921 SD_TRACE(SD_LOG_IO_PM, un,
6922 6922 "sdpower: exit, PM suspend Failed.\n");
6923 6923
6924 6924 goto sdpower_failed;
6925 6925 }
6926 6926 }
6927 6927
6928 6928 /*
6929 6929 * The transition from SPINDLE_OFF to SPINDLE_ON can happen in open,
6930 6930 * close, or strategy. Dump no long uses this routine, it uses it's
6931 6931 * own code so it can be done in polled mode.
6932 6932 */
6933 6933
6934 6934 medium_present = TRUE;
6935 6935
6936 6936 /*
6937 6937 * When powering up, issue a TUR in case the device is at unit
6938 6938 * attention. Don't do retries. Bypass the PM layer, otherwise
6939 6939 * a deadlock on un_pm_busy_cv will occur.
6940 6940 */
6941 6941 if (SD_PM_IS_IO_CAPABLE(un, level)) {
6942 6942 sval = sd_send_scsi_TEST_UNIT_READY(ssc,
6943 6943 SD_DONT_RETRY_TUR | SD_BYPASS_PM);
6944 6944 if (sval != 0)
6945 6945 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6946 6946 }
6947 6947
6948 6948 if (un->un_f_power_condition_supported) {
6949 6949 char *pm_condition_name[] = {"STOPPED", "STANDBY",
6950 6950 "IDLE", "ACTIVE"};
6951 6951 SD_TRACE(SD_LOG_IO_PM, un,
6952 6952 "sdpower: sending \'%s\' power condition",
6953 6953 pm_condition_name[level]);
6954 6954 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION,
6955 6955 sd_pl2pc[level], SD_PATH_DIRECT);
6956 6956 } else {
6957 6957 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: sending \'%s\' unit\n",
6958 6958 ((level == SD_SPINDLE_ON) ? "START" : "STOP"));
6959 6959 sval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
6960 6960 ((level == SD_SPINDLE_ON) ? SD_TARGET_START :
6961 6961 SD_TARGET_STOP), SD_PATH_DIRECT);
6962 6962 }
6963 6963 if (sval != 0) {
6964 6964 if (sval == EIO)
6965 6965 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
6966 6966 else
6967 6967 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
6968 6968 }
6969 6969
6970 6970 /* Command failed, check for media present. */
6971 6971 if ((sval == ENXIO) && un->un_f_has_removable_media) {
6972 6972 medium_present = FALSE;
6973 6973 }
6974 6974
6975 6975 /*
6976 6976 * The conditions of interest here are:
6977 6977 * if a spindle off with media present fails,
6978 6978 * then restore the state and return an error.
6979 6979 * else if a spindle on fails,
6980 6980 * then return an error (there's no state to restore).
6981 6981 * In all other cases we setup for the new state
6982 6982 * and return success.
6983 6983 */
6984 6984 if (!SD_PM_IS_IO_CAPABLE(un, level)) {
6985 6985 if ((medium_present == TRUE) && (sval != 0)) {
6986 6986 /* The stop command from above failed */
6987 6987 rval = DDI_FAILURE;
6988 6988 /*
6989 6989 * The stop command failed, and we have media
6990 6990 * present. Put the level back by calling the
6991 6991 * sd_pm_resume() and set the state back to
6992 6992 * it's previous value.
6993 6993 */
6994 6994 (void) sd_pm_state_change(un, last_power_level,
6995 6995 SD_PM_STATE_ROLLBACK);
6996 6996 mutex_enter(SD_MUTEX(un));
6997 6997 un->un_last_state = save_state;
6998 6998 mutex_exit(SD_MUTEX(un));
6999 6999 } else if (un->un_f_monitor_media_state) {
7000 7000 /*
7001 7001 * The stop command from above succeeded.
7002 7002 * Terminate watch thread in case of removable media
7003 7003 * devices going into low power state. This is as per
7004 7004 * the requirements of pm framework, otherwise commands
7005 7005 * will be generated for the device (through watch
7006 7006 * thread), even when the device is in low power state.
7007 7007 */
7008 7008 mutex_enter(SD_MUTEX(un));
7009 7009 un->un_f_watcht_stopped = FALSE;
7010 7010 if (un->un_swr_token != NULL) {
7011 7011 opaque_t temp_token = un->un_swr_token;
7012 7012 un->un_f_watcht_stopped = TRUE;
7013 7013 un->un_swr_token = NULL;
7014 7014 mutex_exit(SD_MUTEX(un));
7015 7015 (void) scsi_watch_request_terminate(temp_token,
7016 7016 SCSI_WATCH_TERMINATE_ALL_WAIT);
7017 7017 } else {
7018 7018 mutex_exit(SD_MUTEX(un));
7019 7019 }
7020 7020 }
7021 7021 } else {
7022 7022 /*
7023 7023 * The level requested is I/O capable.
7024 7024 * Legacy behavior: return success on a failed spinup
7025 7025 * if there is no media in the drive.
7026 7026 * Do this by looking at medium_present here.
7027 7027 */
7028 7028 if ((sval != 0) && medium_present) {
7029 7029 /* The start command from above failed */
7030 7030 rval = DDI_FAILURE;
7031 7031 } else {
7032 7032 /*
7033 7033 * The start command from above succeeded
7034 7034 * PM resume the devices now that we have
7035 7035 * started the disks
7036 7036 */
7037 7037 (void) sd_pm_state_change(un, level,
7038 7038 SD_PM_STATE_CHANGE);
7039 7039
7040 7040 /*
7041 7041 * Resume the watch thread since it was suspended
7042 7042 * when the device went into low power mode.
7043 7043 */
7044 7044 if (un->un_f_monitor_media_state) {
7045 7045 mutex_enter(SD_MUTEX(un));
7046 7046 if (un->un_f_watcht_stopped == TRUE) {
7047 7047 opaque_t temp_token;
7048 7048
7049 7049 un->un_f_watcht_stopped = FALSE;
7050 7050 mutex_exit(SD_MUTEX(un));
7051 7051 temp_token =
7052 7052 sd_watch_request_submit(un);
7053 7053 mutex_enter(SD_MUTEX(un));
7054 7054 un->un_swr_token = temp_token;
7055 7055 }
7056 7056 mutex_exit(SD_MUTEX(un));
7057 7057 }
7058 7058 }
7059 7059 }
7060 7060
7061 7061 if (got_semaphore_here != 0) {
7062 7062 sema_v(&un->un_semoclose);
7063 7063 }
7064 7064 /*
7065 7065 * On exit put the state back to it's original value
7066 7066 * and broadcast to anyone waiting for the power
7067 7067 * change completion.
7068 7068 */
7069 7069 mutex_enter(SD_MUTEX(un));
7070 7070 un->un_state = state_before_pm;
7071 7071 cv_broadcast(&un->un_suspend_cv);
7072 7072 mutex_exit(SD_MUTEX(un));
7073 7073
7074 7074 SD_TRACE(SD_LOG_IO_PM, un, "sdpower: exit, status = 0x%x\n", rval);
7075 7075
7076 7076 sd_ssc_fini(ssc);
7077 7077 return (rval);
7078 7078
7079 7079 sdpower_failed:
7080 7080
7081 7081 sd_ssc_fini(ssc);
7082 7082 return (DDI_FAILURE);
7083 7083 }
7084 7084
7085 7085
7086 7086
7087 7087 /*
7088 7088 * Function: sdattach
7089 7089 *
7090 7090 * Description: Driver's attach(9e) entry point function.
7091 7091 *
7092 7092 * Arguments: devi - opaque device info handle
7093 7093 * cmd - attach type
7094 7094 *
7095 7095 * Return Code: DDI_SUCCESS
7096 7096 * DDI_FAILURE
7097 7097 *
7098 7098 * Context: Kernel thread context
7099 7099 */
7100 7100
7101 7101 static int
7102 7102 sdattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
7103 7103 {
7104 7104 switch (cmd) {
7105 7105 case DDI_ATTACH:
7106 7106 return (sd_unit_attach(devi));
7107 7107 case DDI_RESUME:
7108 7108 return (sd_ddi_resume(devi));
7109 7109 default:
7110 7110 break;
7111 7111 }
7112 7112 return (DDI_FAILURE);
7113 7113 }
7114 7114
7115 7115
7116 7116 /*
7117 7117 * Function: sddetach
7118 7118 *
7119 7119 * Description: Driver's detach(9E) entry point function.
7120 7120 *
7121 7121 * Arguments: devi - opaque device info handle
7122 7122 * cmd - detach type
7123 7123 *
7124 7124 * Return Code: DDI_SUCCESS
7125 7125 * DDI_FAILURE
7126 7126 *
7127 7127 * Context: Kernel thread context
7128 7128 */
7129 7129
7130 7130 static int
7131 7131 sddetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
7132 7132 {
7133 7133 switch (cmd) {
7134 7134 case DDI_DETACH:
7135 7135 return (sd_unit_detach(devi));
7136 7136 case DDI_SUSPEND:
7137 7137 return (sd_ddi_suspend(devi));
7138 7138 default:
7139 7139 break;
7140 7140 }
7141 7141 return (DDI_FAILURE);
7142 7142 }
7143 7143
7144 7144
7145 7145 /*
7146 7146 * Function: sd_sync_with_callback
7147 7147 *
7148 7148 * Description: Prevents sd_unit_attach or sd_unit_detach from freeing the soft
7149 7149 * state while the callback routine is active.
7150 7150 *
7151 7151 * Arguments: un: softstate structure for the instance
7152 7152 *
7153 7153 * Context: Kernel thread context
7154 7154 */
7155 7155
7156 7156 static void
7157 7157 sd_sync_with_callback(struct sd_lun *un)
7158 7158 {
7159 7159 ASSERT(un != NULL);
7160 7160
7161 7161 mutex_enter(SD_MUTEX(un));
7162 7162
7163 7163 ASSERT(un->un_in_callback >= 0);
7164 7164
7165 7165 while (un->un_in_callback > 0) {
7166 7166 mutex_exit(SD_MUTEX(un));
7167 7167 delay(2);
7168 7168 mutex_enter(SD_MUTEX(un));
7169 7169 }
7170 7170
7171 7171 mutex_exit(SD_MUTEX(un));
7172 7172 }
7173 7173
7174 7174 /*
7175 7175 * Function: sd_unit_attach
7176 7176 *
7177 7177 * Description: Performs DDI_ATTACH processing for sdattach(). Allocates
7178 7178 * the soft state structure for the device and performs
7179 7179 * all necessary structure and device initializations.
7180 7180 *
7181 7181 * Arguments: devi: the system's dev_info_t for the device.
7182 7182 *
7183 7183 * Return Code: DDI_SUCCESS if attach is successful.
7184 7184 * DDI_FAILURE if any part of the attach fails.
7185 7185 *
7186 7186 * Context: Called at attach(9e) time for the DDI_ATTACH flag.
7187 7187 * Kernel thread context only. Can sleep.
7188 7188 */
7189 7189
7190 7190 static int
7191 7191 sd_unit_attach(dev_info_t *devi)
7192 7192 {
7193 7193 struct scsi_device *devp;
7194 7194 struct sd_lun *un;
7195 7195 char *variantp;
7196 7196 char name_str[48];
7197 7197 int reservation_flag = SD_TARGET_IS_UNRESERVED;
7198 7198 int instance;
7199 7199 int rval;
7200 7200 int wc_enabled;
7201 7201 int tgt;
7202 7202 uint64_t capacity;
7203 7203 uint_t lbasize = 0;
7204 7204 dev_info_t *pdip = ddi_get_parent(devi);
7205 7205 int offbyone = 0;
7206 7206 int geom_label_valid = 0;
7207 7207 sd_ssc_t *ssc;
7208 7208 int status;
7209 7209 struct sd_fm_internal *sfip = NULL;
7210 7210 int max_xfer_size;
7211 7211
7212 7212 /*
7213 7213 * Retrieve the target driver's private data area. This was set
7214 7214 * up by the HBA.
7215 7215 */
7216 7216 devp = ddi_get_driver_private(devi);
7217 7217
7218 7218 /*
7219 7219 * Retrieve the target ID of the device.
7220 7220 */
7221 7221 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
7222 7222 SCSI_ADDR_PROP_TARGET, -1);
7223 7223
7224 7224 /*
7225 7225 * Since we have no idea what state things were left in by the last
7226 7226 * user of the device, set up some 'default' settings, ie. turn 'em
7227 7227 * off. The scsi_ifsetcap calls force re-negotiations with the drive.
7228 7228 * Do this before the scsi_probe, which sends an inquiry.
7229 7229 * This is a fix for bug (4430280).
7230 7230 * Of special importance is wide-xfer. The drive could have been left
7231 7231 * in wide transfer mode by the last driver to communicate with it,
7232 7232 * this includes us. If that's the case, and if the following is not
7233 7233 * setup properly or we don't re-negotiate with the drive prior to
7234 7234 * transferring data to/from the drive, it causes bus parity errors,
7235 7235 * data overruns, and unexpected interrupts. This first occurred when
7236 7236 * the fix for bug (4378686) was made.
7237 7237 */
7238 7238 (void) scsi_ifsetcap(&devp->sd_address, "lun-reset", 0, 1);
7239 7239 (void) scsi_ifsetcap(&devp->sd_address, "wide-xfer", 0, 1);
7240 7240 (void) scsi_ifsetcap(&devp->sd_address, "auto-rqsense", 0, 1);
7241 7241
7242 7242 /*
7243 7243 * Currently, scsi_ifsetcap sets tagged-qing capability for all LUNs
7244 7244 * on a target. Setting it per lun instance actually sets the
7245 7245 * capability of this target, which affects those luns already
7246 7246 * attached on the same target. So during attach, we can only disable
7247 7247 * this capability only when no other lun has been attached on this
7248 7248 * target. By doing this, we assume a target has the same tagged-qing
7249 7249 * capability for every lun. The condition can be removed when HBA
7250 7250 * is changed to support per lun based tagged-qing capability.
7251 7251 */
7252 7252 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) {
7253 7253 (void) scsi_ifsetcap(&devp->sd_address, "tagged-qing", 0, 1);
7254 7254 }
7255 7255
7256 7256 /*
7257 7257 * Use scsi_probe() to issue an INQUIRY command to the device.
7258 7258 * This call will allocate and fill in the scsi_inquiry structure
7259 7259 * and point the sd_inq member of the scsi_device structure to it.
7260 7260 * If the attach succeeds, then this memory will not be de-allocated
7261 7261 * (via scsi_unprobe()) until the instance is detached.
7262 7262 */
7263 7263 if (scsi_probe(devp, SLEEP_FUNC) != SCSIPROBE_EXISTS) {
7264 7264 goto probe_failed;
7265 7265 }
7266 7266
7267 7267 /*
7268 7268 * Check the device type as specified in the inquiry data and
7269 7269 * claim it if it is of a type that we support.
7270 7270 */
7271 7271 switch (devp->sd_inq->inq_dtype) {
7272 7272 case DTYPE_DIRECT:
7273 7273 break;
7274 7274 case DTYPE_RODIRECT:
7275 7275 break;
7276 7276 case DTYPE_OPTICAL:
7277 7277 break;
7278 7278 case DTYPE_NOTPRESENT:
7279 7279 default:
7280 7280 /* Unsupported device type; fail the attach. */
7281 7281 goto probe_failed;
7282 7282 }
7283 7283
7284 7284 /*
7285 7285 * Allocate the soft state structure for this unit.
7286 7286 *
7287 7287 * We rely upon this memory being set to all zeroes by
7288 7288 * ddi_soft_state_zalloc(). We assume that any member of the
7289 7289 * soft state structure that is not explicitly initialized by
7290 7290 * this routine will have a value of zero.
7291 7291 */
7292 7292 instance = ddi_get_instance(devp->sd_dev);
7293 7293 #ifndef XPV_HVM_DRIVER
7294 7294 if (ddi_soft_state_zalloc(sd_state, instance) != DDI_SUCCESS) {
7295 7295 goto probe_failed;
7296 7296 }
7297 7297 #endif /* !XPV_HVM_DRIVER */
7298 7298
7299 7299 /*
7300 7300 * Retrieve a pointer to the newly-allocated soft state.
7301 7301 *
7302 7302 * This should NEVER fail if the ddi_soft_state_zalloc() call above
7303 7303 * was successful, unless something has gone horribly wrong and the
7304 7304 * ddi's soft state internals are corrupt (in which case it is
7305 7305 * probably better to halt here than just fail the attach....)
7306 7306 */
7307 7307 if ((un = ddi_get_soft_state(sd_state, instance)) == NULL) {
7308 7308 panic("sd_unit_attach: NULL soft state on instance:0x%x",
7309 7309 instance);
7310 7310 /*NOTREACHED*/
7311 7311 }
7312 7312
7313 7313 /*
7314 7314 * Link the back ptr of the driver soft state to the scsi_device
7315 7315 * struct for this lun.
7316 7316 * Save a pointer to the softstate in the driver-private area of
7317 7317 * the scsi_device struct.
7318 7318 * Note: We cannot call SD_INFO, SD_TRACE, SD_ERROR, or SD_DIAG until
7319 7319 * we first set un->un_sd below.
7320 7320 */
7321 7321 un->un_sd = devp;
7322 7322 devp->sd_private = (opaque_t)un;
7323 7323
7324 7324 /*
7325 7325 * The following must be after devp is stored in the soft state struct.
7326 7326 */
7327 7327 #ifdef SDDEBUG
7328 7328 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7329 7329 "%s_unit_attach: un:0x%p instance:%d\n",
7330 7330 ddi_driver_name(devi), un, instance);
7331 7331 #endif
7332 7332
7333 7333 /*
7334 7334 * Set up the device type and node type (for the minor nodes).
7335 7335 * By default we assume that the device can at least support the
7336 7336 * Common Command Set. Call it a CD-ROM if it reports itself
7337 7337 * as a RODIRECT device.
7338 7338 */
7339 7339 switch (devp->sd_inq->inq_dtype) {
7340 7340 case DTYPE_RODIRECT:
7341 7341 un->un_node_type = DDI_NT_CD_CHAN;
7342 7342 un->un_ctype = CTYPE_CDROM;
7343 7343 break;
7344 7344 case DTYPE_OPTICAL:
7345 7345 un->un_node_type = DDI_NT_BLOCK_CHAN;
7346 7346 un->un_ctype = CTYPE_ROD;
7347 7347 break;
7348 7348 default:
7349 7349 un->un_node_type = DDI_NT_BLOCK_CHAN;
7350 7350 un->un_ctype = CTYPE_CCS;
7351 7351 break;
7352 7352 }
7353 7353
7354 7354 /*
7355 7355 * Try to read the interconnect type from the HBA.
7356 7356 *
7357 7357 * Note: This driver is currently compiled as two binaries, a parallel
7358 7358 * scsi version (sd) and a fibre channel version (ssd). All functional
7359 7359 * differences are determined at compile time. In the future a single
7360 7360 * binary will be provided and the interconnect type will be used to
7361 7361 * differentiate between fibre and parallel scsi behaviors. At that time
7362 7362 * it will be necessary for all fibre channel HBAs to support this
7363 7363 * property.
7364 7364 *
7365 7365 * set un_f_is_fiber to TRUE ( default fiber )
7366 7366 */
7367 7367 un->un_f_is_fibre = TRUE;
7368 7368 switch (scsi_ifgetcap(SD_ADDRESS(un), "interconnect-type", -1)) {
7369 7369 case INTERCONNECT_SSA:
7370 7370 un->un_interconnect_type = SD_INTERCONNECT_SSA;
7371 7371 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7372 7372 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SSA\n", un);
7373 7373 break;
7374 7374 case INTERCONNECT_PARALLEL:
7375 7375 un->un_f_is_fibre = FALSE;
7376 7376 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL;
7377 7377 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7378 7378 "sd_unit_attach: un:0x%p SD_INTERCONNECT_PARALLEL\n", un);
7379 7379 break;
7380 7380 case INTERCONNECT_SAS:
7381 7381 un->un_f_is_fibre = FALSE;
7382 7382 un->un_interconnect_type = SD_INTERCONNECT_SAS;
7383 7383 un->un_node_type = DDI_NT_BLOCK_SAS;
7384 7384 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7385 7385 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SAS\n", un);
7386 7386 break;
7387 7387 case INTERCONNECT_SATA:
7388 7388 un->un_f_is_fibre = FALSE;
7389 7389 un->un_interconnect_type = SD_INTERCONNECT_SATA;
7390 7390 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7391 7391 "sd_unit_attach: un:0x%p SD_INTERCONNECT_SATA\n", un);
7392 7392 break;
7393 7393 case INTERCONNECT_FIBRE:
7394 7394 un->un_interconnect_type = SD_INTERCONNECT_FIBRE;
7395 7395 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7396 7396 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FIBRE\n", un);
7397 7397 break;
7398 7398 case INTERCONNECT_FABRIC:
7399 7399 un->un_interconnect_type = SD_INTERCONNECT_FABRIC;
7400 7400 un->un_node_type = DDI_NT_BLOCK_FABRIC;
7401 7401 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7402 7402 "sd_unit_attach: un:0x%p SD_INTERCONNECT_FABRIC\n", un);
7403 7403 break;
7404 7404 default:
7405 7405 #ifdef SD_DEFAULT_INTERCONNECT_TYPE
7406 7406 /*
7407 7407 * The HBA does not support the "interconnect-type" property
7408 7408 * (or did not provide a recognized type).
7409 7409 *
7410 7410 * Note: This will be obsoleted when a single fibre channel
7411 7411 * and parallel scsi driver is delivered. In the meantime the
7412 7412 * interconnect type will be set to the platform default.If that
7413 7413 * type is not parallel SCSI, it means that we should be
7414 7414 * assuming "ssd" semantics. However, here this also means that
7415 7415 * the FC HBA is not supporting the "interconnect-type" property
7416 7416 * like we expect it to, so log this occurrence.
7417 7417 */
7418 7418 un->un_interconnect_type = SD_DEFAULT_INTERCONNECT_TYPE;
7419 7419 if (!SD_IS_PARALLEL_SCSI(un)) {
7420 7420 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7421 7421 "sd_unit_attach: un:0x%p Assuming "
7422 7422 "INTERCONNECT_FIBRE\n", un);
7423 7423 } else {
7424 7424 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7425 7425 "sd_unit_attach: un:0x%p Assuming "
7426 7426 "INTERCONNECT_PARALLEL\n", un);
7427 7427 un->un_f_is_fibre = FALSE;
7428 7428 }
7429 7429 #else
7430 7430 /*
7431 7431 * Note: This source will be implemented when a single fibre
7432 7432 * channel and parallel scsi driver is delivered. The default
7433 7433 * will be to assume that if a device does not support the
7434 7434 * "interconnect-type" property it is a parallel SCSI HBA and
7435 7435 * we will set the interconnect type for parallel scsi.
7436 7436 */
7437 7437 un->un_interconnect_type = SD_INTERCONNECT_PARALLEL;
7438 7438 un->un_f_is_fibre = FALSE;
7439 7439 #endif
7440 7440 break;
7441 7441 }
7442 7442
7443 7443 if (un->un_f_is_fibre == TRUE) {
7444 7444 if (scsi_ifgetcap(SD_ADDRESS(un), "scsi-version", 1) ==
7445 7445 SCSI_VERSION_3) {
7446 7446 switch (un->un_interconnect_type) {
7447 7447 case SD_INTERCONNECT_FIBRE:
7448 7448 case SD_INTERCONNECT_SSA:
7449 7449 un->un_node_type = DDI_NT_BLOCK_WWN;
7450 7450 break;
7451 7451 default:
7452 7452 break;
7453 7453 }
7454 7454 }
7455 7455 }
7456 7456
7457 7457 /*
7458 7458 * Initialize the Request Sense command for the target
7459 7459 */
7460 7460 if (sd_alloc_rqs(devp, un) != DDI_SUCCESS) {
7461 7461 goto alloc_rqs_failed;
7462 7462 }
7463 7463
7464 7464 /*
7465 7465 * Set un_retry_count with SD_RETRY_COUNT, this is ok for Sparc
7466 7466 * with separate binary for sd and ssd.
7467 7467 *
7468 7468 * x86 has 1 binary, un_retry_count is set base on connection type.
7469 7469 * The hardcoded values will go away when Sparc uses 1 binary
7470 7470 * for sd and ssd. This hardcoded values need to match
7471 7471 * SD_RETRY_COUNT in sddef.h
7472 7472 * The value used is base on interconnect type.
7473 7473 * fibre = 3, parallel = 5
7474 7474 */
7475 7475 #if defined(__i386) || defined(__amd64)
7476 7476 un->un_retry_count = un->un_f_is_fibre ? 3 : 5;
7477 7477 #else
7478 7478 un->un_retry_count = SD_RETRY_COUNT;
7479 7479 #endif
7480 7480
7481 7481 /*
7482 7482 * Set the per disk retry count to the default number of retries
7483 7483 * for disks and CDROMs. This value can be overridden by the
7484 7484 * disk property list or an entry in sd.conf.
7485 7485 */
7486 7486 un->un_notready_retry_count =
7487 7487 ISCD(un) ? CD_NOT_READY_RETRY_COUNT(un)
7488 7488 : DISK_NOT_READY_RETRY_COUNT(un);
7489 7489
7490 7490 /*
7491 7491 * Set the busy retry count to the default value of un_retry_count.
7492 7492 * This can be overridden by entries in sd.conf or the device
7493 7493 * config table.
7494 7494 */
7495 7495 un->un_busy_retry_count = un->un_retry_count;
7496 7496
7497 7497 /*
7498 7498 * Init the reset threshold for retries. This number determines
7499 7499 * how many retries must be performed before a reset can be issued
7500 7500 * (for certain error conditions). This can be overridden by entries
7501 7501 * in sd.conf or the device config table.
7502 7502 */
7503 7503 un->un_reset_retry_count = (un->un_retry_count / 2);
7504 7504
7505 7505 /*
7506 7506 * Set the victim_retry_count to the default un_retry_count
7507 7507 */
7508 7508 un->un_victim_retry_count = (2 * un->un_retry_count);
7509 7509
7510 7510 /*
7511 7511 * Set the reservation release timeout to the default value of
7512 7512 * 5 seconds. This can be overridden by entries in ssd.conf or the
7513 7513 * device config table.
7514 7514 */
7515 7515 un->un_reserve_release_time = 5;
7516 7516
7517 7517 /*
7518 7518 * Set up the default maximum transfer size. Note that this may
7519 7519 * get updated later in the attach, when setting up default wide
7520 7520 * operations for disks.
7521 7521 */
7522 7522 #if defined(__i386) || defined(__amd64)
7523 7523 un->un_max_xfer_size = (uint_t)SD_DEFAULT_MAX_XFER_SIZE;
7524 7524 un->un_partial_dma_supported = 1;
7525 7525 #else
7526 7526 un->un_max_xfer_size = (uint_t)maxphys;
7527 7527 #endif
7528 7528
7529 7529 /*
7530 7530 * Get "allow bus device reset" property (defaults to "enabled" if
7531 7531 * the property was not defined). This is to disable bus resets for
7532 7532 * certain kinds of error recovery. Note: In the future when a run-time
7533 7533 * fibre check is available the soft state flag should default to
7534 7534 * enabled.
7535 7535 */
7536 7536 if (un->un_f_is_fibre == TRUE) {
7537 7537 un->un_f_allow_bus_device_reset = TRUE;
7538 7538 } else {
7539 7539 if (ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
7540 7540 "allow-bus-device-reset", 1) != 0) {
7541 7541 un->un_f_allow_bus_device_reset = TRUE;
7542 7542 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7543 7543 "sd_unit_attach: un:0x%p Bus device reset "
7544 7544 "enabled\n", un);
7545 7545 } else {
7546 7546 un->un_f_allow_bus_device_reset = FALSE;
7547 7547 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7548 7548 "sd_unit_attach: un:0x%p Bus device reset "
7549 7549 "disabled\n", un);
7550 7550 }
7551 7551 }
7552 7552
7553 7553 /*
7554 7554 * Check if this is an ATAPI device. ATAPI devices use Group 1
7555 7555 * Read/Write commands and Group 2 Mode Sense/Select commands.
7556 7556 *
7557 7557 * Note: The "obsolete" way of doing this is to check for the "atapi"
7558 7558 * property. The new "variant" property with a value of "atapi" has been
7559 7559 * introduced so that future 'variants' of standard SCSI behavior (like
7560 7560 * atapi) could be specified by the underlying HBA drivers by supplying
7561 7561 * a new value for the "variant" property, instead of having to define a
7562 7562 * new property.
7563 7563 */
7564 7564 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "atapi", -1) != -1) {
7565 7565 un->un_f_cfg_is_atapi = TRUE;
7566 7566 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7567 7567 "sd_unit_attach: un:0x%p Atapi device\n", un);
7568 7568 }
7569 7569 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, devi, 0, "variant",
7570 7570 &variantp) == DDI_PROP_SUCCESS) {
7571 7571 if (strcmp(variantp, "atapi") == 0) {
7572 7572 un->un_f_cfg_is_atapi = TRUE;
7573 7573 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7574 7574 "sd_unit_attach: un:0x%p Atapi device\n", un);
7575 7575 }
7576 7576 ddi_prop_free(variantp);
7577 7577 }
7578 7578
7579 7579 un->un_cmd_timeout = SD_IO_TIME;
7580 7580
7581 7581 un->un_busy_timeout = SD_BSY_TIMEOUT;
7582 7582
7583 7583 /* Info on current states, statuses, etc. (Updated frequently) */
7584 7584 un->un_state = SD_STATE_NORMAL;
7585 7585 un->un_last_state = SD_STATE_NORMAL;
7586 7586
7587 7587 /* Control & status info for command throttling */
7588 7588 un->un_throttle = sd_max_throttle;
7589 7589 un->un_saved_throttle = sd_max_throttle;
7590 7590 un->un_min_throttle = sd_min_throttle;
7591 7591
7592 7592 if (un->un_f_is_fibre == TRUE) {
7593 7593 un->un_f_use_adaptive_throttle = TRUE;
7594 7594 } else {
7595 7595 un->un_f_use_adaptive_throttle = FALSE;
7596 7596 }
7597 7597
7598 7598 /* Removable media support. */
7599 7599 cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL);
7600 7600 un->un_mediastate = DKIO_NONE;
7601 7601 un->un_specified_mediastate = DKIO_NONE;
7602 7602
7603 7603 /* CVs for suspend/resume (PM or DR) */
7604 7604 cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL);
7605 7605 cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL);
7606 7606
7607 7607 /* Power management support. */
7608 7608 un->un_power_level = SD_SPINDLE_UNINIT;
7609 7609
7610 7610 cv_init(&un->un_wcc_cv, NULL, CV_DRIVER, NULL);
7611 7611 un->un_f_wcc_inprog = 0;
7612 7612
7613 7613 /*
7614 7614 * The open/close semaphore is used to serialize threads executing
7615 7615 * in the driver's open & close entry point routines for a given
7616 7616 * instance.
7617 7617 */
7618 7618 (void) sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL);
7619 7619
7620 7620 /*
7621 7621 * The conf file entry and softstate variable is a forceful override,
7622 7622 * meaning a non-zero value must be entered to change the default.
7623 7623 */
7624 7624 un->un_f_disksort_disabled = FALSE;
7625 7625 un->un_f_rmw_type = SD_RMW_TYPE_DEFAULT;
7626 7626 un->un_f_enable_rmw = FALSE;
7627 7627
7628 7628 /*
7629 7629 * GET EVENT STATUS NOTIFICATION media polling enabled by default, but
7630 7630 * can be overridden via [s]sd-config-list "mmc-gesn-polling" property.
7631 7631 */
7632 7632 un->un_f_mmc_gesn_polling = TRUE;
7633 7633
7634 7634 /*
7635 7635 * physical sector size defaults to DEV_BSIZE currently. We can
7636 7636 * override this value via the driver configuration file so we must
7637 7637 * set it before calling sd_read_unit_properties().
7638 7638 */
7639 7639 un->un_phy_blocksize = DEV_BSIZE;
7640 7640
7641 7641 /*
7642 7642 * Retrieve the properties from the static driver table or the driver
7643 7643 * configuration file (.conf) for this unit and update the soft state
7644 7644 * for the device as needed for the indicated properties.
7645 7645 * Note: the property configuration needs to occur here as some of the
7646 7646 * following routines may have dependencies on soft state flags set
7647 7647 * as part of the driver property configuration.
7648 7648 */
7649 7649 sd_read_unit_properties(un);
7650 7650 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7651 7651 "sd_unit_attach: un:0x%p property configuration complete.\n", un);
7652 7652
7653 7653 /*
7654 7654 * Only if a device has "hotpluggable" property, it is
7655 7655 * treated as hotpluggable device. Otherwise, it is
7656 7656 * regarded as non-hotpluggable one.
7657 7657 */
7658 7658 if (ddi_prop_get_int(DDI_DEV_T_ANY, devi, 0, "hotpluggable",
7659 7659 -1) != -1) {
7660 7660 un->un_f_is_hotpluggable = TRUE;
7661 7661 }
7662 7662
7663 7663 /*
7664 7664 * set unit's attributes(flags) according to "hotpluggable" and
7665 7665 * RMB bit in INQUIRY data.
7666 7666 */
7667 7667 sd_set_unit_attributes(un, devi);
7668 7668
7669 7669 /*
7670 7670 * By default, we mark the capacity, lbasize, and geometry
7671 7671 * as invalid. Only if we successfully read a valid capacity
7672 7672 * will we update the un_blockcount and un_tgt_blocksize with the
7673 7673 * valid values (the geometry will be validated later).
7674 7674 */
7675 7675 un->un_f_blockcount_is_valid = FALSE;
7676 7676 un->un_f_tgt_blocksize_is_valid = FALSE;
7677 7677
7678 7678 /*
7679 7679 * Use DEV_BSIZE and DEV_BSHIFT as defaults, until we can determine
7680 7680 * otherwise.
7681 7681 */
7682 7682 un->un_tgt_blocksize = un->un_sys_blocksize = DEV_BSIZE;
7683 7683 un->un_blockcount = 0;
7684 7684
7685 7685 /*
7686 7686 * Set up the per-instance info needed to determine the correct
7687 7687 * CDBs and other info for issuing commands to the target.
7688 7688 */
7689 7689 sd_init_cdb_limits(un);
7690 7690
7691 7691 /*
7692 7692 * Set up the IO chains to use, based upon the target type.
7693 7693 */
7694 7694 if (un->un_f_non_devbsize_supported) {
7695 7695 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA;
7696 7696 } else {
7697 7697 un->un_buf_chain_type = SD_CHAIN_INFO_DISK;
7698 7698 }
7699 7699 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD;
7700 7700 un->un_direct_chain_type = SD_CHAIN_INFO_DIRECT_CMD;
7701 7701 un->un_priority_chain_type = SD_CHAIN_INFO_PRIORITY_CMD;
7702 7702
7703 7703 un->un_xbuf_attr = ddi_xbuf_attr_create(sizeof (struct sd_xbuf),
7704 7704 sd_xbuf_strategy, un, sd_xbuf_active_limit, sd_xbuf_reserve_limit,
7705 7705 ddi_driver_major(devi), DDI_XBUF_QTHREAD_DRIVER);
7706 7706 ddi_xbuf_attr_register_devinfo(un->un_xbuf_attr, devi);
7707 7707
7708 7708
7709 7709 if (ISCD(un)) {
7710 7710 un->un_additional_codes = sd_additional_codes;
7711 7711 } else {
7712 7712 un->un_additional_codes = NULL;
7713 7713 }
7714 7714
7715 7715 /*
7716 7716 * Create the kstats here so they can be available for attach-time
7717 7717 * routines that send commands to the unit (either polled or via
7718 7718 * sd_send_scsi_cmd).
7719 7719 *
7720 7720 * Note: This is a critical sequence that needs to be maintained:
7721 7721 * 1) Instantiate the kstats here, before any routines using the
7722 7722 * iopath (i.e. sd_send_scsi_cmd).
7723 7723 * 2) Instantiate and initialize the partition stats
7724 7724 * (sd_set_pstats).
7725 7725 * 3) Initialize the error stats (sd_set_errstats), following
7726 7726 * sd_validate_geometry(),sd_register_devid(),
7727 7727 * and sd_cache_control().
7728 7728 */
7729 7729
7730 7730 un->un_stats = kstat_create(sd_label, instance,
7731 7731 NULL, "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
7732 7732 if (un->un_stats != NULL) {
7733 7733 un->un_stats->ks_lock = SD_MUTEX(un);
7734 7734 kstat_install(un->un_stats);
7735 7735 }
7736 7736 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7737 7737 "sd_unit_attach: un:0x%p un_stats created\n", un);
7738 7738
7739 7739 sd_create_errstats(un, instance);
7740 7740 if (un->un_errstats == NULL) {
7741 7741 goto create_errstats_failed;
7742 7742 }
7743 7743 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7744 7744 "sd_unit_attach: un:0x%p errstats created\n", un);
7745 7745
7746 7746 /*
7747 7747 * The following if/else code was relocated here from below as part
7748 7748 * of the fix for bug (4430280). However with the default setup added
7749 7749 * on entry to this routine, it's no longer absolutely necessary for
7750 7750 * this to be before the call to sd_spin_up_unit.
7751 7751 */
7752 7752 if (SD_IS_PARALLEL_SCSI(un) || SD_IS_SERIAL(un)) {
7753 7753 int tq_trigger_flag = (((devp->sd_inq->inq_ansi == 4) ||
7754 7754 (devp->sd_inq->inq_ansi == 5)) &&
7755 7755 devp->sd_inq->inq_bque) || devp->sd_inq->inq_cmdque;
7756 7756
7757 7757 /*
7758 7758 * If tagged queueing is supported by the target
7759 7759 * and by the host adapter then we will enable it
7760 7760 */
7761 7761 un->un_tagflags = 0;
7762 7762 if ((devp->sd_inq->inq_rdf == RDF_SCSI2) && tq_trigger_flag &&
7763 7763 (un->un_f_arq_enabled == TRUE)) {
7764 7764 if (scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing",
7765 7765 1, 1) == 1) {
7766 7766 un->un_tagflags = FLAG_STAG;
7767 7767 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7768 7768 "sd_unit_attach: un:0x%p tag queueing "
7769 7769 "enabled\n", un);
7770 7770 } else if (scsi_ifgetcap(SD_ADDRESS(un),
7771 7771 "untagged-qing", 0) == 1) {
7772 7772 un->un_f_opt_queueing = TRUE;
7773 7773 un->un_saved_throttle = un->un_throttle =
7774 7774 min(un->un_throttle, 3);
7775 7775 } else {
7776 7776 un->un_f_opt_queueing = FALSE;
7777 7777 un->un_saved_throttle = un->un_throttle = 1;
7778 7778 }
7779 7779 } else if ((scsi_ifgetcap(SD_ADDRESS(un), "untagged-qing", 0)
7780 7780 == 1) && (un->un_f_arq_enabled == TRUE)) {
7781 7781 /* The Host Adapter supports internal queueing. */
7782 7782 un->un_f_opt_queueing = TRUE;
7783 7783 un->un_saved_throttle = un->un_throttle =
7784 7784 min(un->un_throttle, 3);
7785 7785 } else {
7786 7786 un->un_f_opt_queueing = FALSE;
7787 7787 un->un_saved_throttle = un->un_throttle = 1;
7788 7788 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7789 7789 "sd_unit_attach: un:0x%p no tag queueing\n", un);
7790 7790 }
7791 7791
7792 7792 /*
7793 7793 * Enable large transfers for SATA/SAS drives
7794 7794 */
7795 7795 if (SD_IS_SERIAL(un)) {
7796 7796 un->un_max_xfer_size =
7797 7797 ddi_getprop(DDI_DEV_T_ANY, devi, 0,
7798 7798 sd_max_xfer_size, SD_MAX_XFER_SIZE);
7799 7799 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7800 7800 "sd_unit_attach: un:0x%p max transfer "
7801 7801 "size=0x%x\n", un, un->un_max_xfer_size);
7802 7802
7803 7803 }
7804 7804
7805 7805 /* Setup or tear down default wide operations for disks */
7806 7806
7807 7807 /*
7808 7808 * Note: Legacy: it may be possible for both "sd_max_xfer_size"
7809 7809 * and "ssd_max_xfer_size" to exist simultaneously on the same
7810 7810 * system and be set to different values. In the future this
7811 7811 * code may need to be updated when the ssd module is
7812 7812 * obsoleted and removed from the system. (4299588)
7813 7813 */
7814 7814 if (SD_IS_PARALLEL_SCSI(un) &&
7815 7815 (devp->sd_inq->inq_rdf == RDF_SCSI2) &&
7816 7816 (devp->sd_inq->inq_wbus16 || devp->sd_inq->inq_wbus32)) {
7817 7817 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer",
7818 7818 1, 1) == 1) {
7819 7819 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7820 7820 "sd_unit_attach: un:0x%p Wide Transfer "
7821 7821 "enabled\n", un);
7822 7822 }
7823 7823
7824 7824 /*
7825 7825 * If tagged queuing has also been enabled, then
7826 7826 * enable large xfers
7827 7827 */
7828 7828 if (un->un_saved_throttle == sd_max_throttle) {
7829 7829 un->un_max_xfer_size =
7830 7830 ddi_getprop(DDI_DEV_T_ANY, devi, 0,
7831 7831 sd_max_xfer_size, SD_MAX_XFER_SIZE);
7832 7832 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7833 7833 "sd_unit_attach: un:0x%p max transfer "
7834 7834 "size=0x%x\n", un, un->un_max_xfer_size);
7835 7835 }
7836 7836 } else {
7837 7837 if (scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer",
7838 7838 0, 1) == 1) {
7839 7839 SD_INFO(SD_LOG_ATTACH_DETACH, un,
7840 7840 "sd_unit_attach: un:0x%p "
7841 7841 "Wide Transfer disabled\n", un);
7842 7842 }
7843 7843 }
7844 7844 } else {
7845 7845 un->un_tagflags = FLAG_STAG;
7846 7846 un->un_max_xfer_size = ddi_getprop(DDI_DEV_T_ANY,
7847 7847 devi, 0, sd_max_xfer_size, SD_MAX_XFER_SIZE);
7848 7848 }
7849 7849
7850 7850 /*
7851 7851 * If this target supports LUN reset, try to enable it.
7852 7852 */
7853 7853 if (un->un_f_lun_reset_enabled) {
7854 7854 if (scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 1, 1) == 1) {
7855 7855 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
7856 7856 "un:0x%p lun_reset capability set\n", un);
7857 7857 } else {
7858 7858 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
7859 7859 "un:0x%p lun-reset capability not set\n", un);
7860 7860 }
7861 7861 }
7862 7862
7863 7863 /*
7864 7864 * Adjust the maximum transfer size. This is to fix
7865 7865 * the problem of partial DMA support on SPARC. Some
7866 7866 * HBA driver, like aac, has very small dma_attr_maxxfer
7867 7867 * size, which requires partial DMA support on SPARC.
7868 7868 * In the future the SPARC pci nexus driver may solve
7869 7869 * the problem instead of this fix.
7870 7870 */
7871 7871 max_xfer_size = scsi_ifgetcap(SD_ADDRESS(un), "dma-max", 1);
7872 7872 if ((max_xfer_size > 0) && (max_xfer_size < un->un_max_xfer_size)) {
7873 7873 /* We need DMA partial even on sparc to ensure sddump() works */
7874 7874 un->un_max_xfer_size = max_xfer_size;
7875 7875 if (un->un_partial_dma_supported == 0)
7876 7876 un->un_partial_dma_supported = 1;
7877 7877 }
7878 7878 if (ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
7879 7879 DDI_PROP_DONTPASS, "buf_break", 0) == 1) {
7880 7880 if (ddi_xbuf_attr_setup_brk(un->un_xbuf_attr,
7881 7881 un->un_max_xfer_size) == 1) {
7882 7882 un->un_buf_breakup_supported = 1;
7883 7883 SD_INFO(SD_LOG_ATTACH_DETACH, un, "sd_unit_attach: "
7884 7884 "un:0x%p Buf breakup enabled\n", un);
7885 7885 }
7886 7886 }
7887 7887
7888 7888 /*
7889 7889 * Set PKT_DMA_PARTIAL flag.
7890 7890 */
7891 7891 if (un->un_partial_dma_supported == 1) {
7892 7892 un->un_pkt_flags = PKT_DMA_PARTIAL;
7893 7893 } else {
7894 7894 un->un_pkt_flags = 0;
7895 7895 }
7896 7896
7897 7897 /* Initialize sd_ssc_t for internal uscsi commands */
7898 7898 ssc = sd_ssc_init(un);
7899 7899 scsi_fm_init(devp);
7900 7900
7901 7901 /*
7902 7902 * Allocate memory for SCSI FMA stuffs.
7903 7903 */
7904 7904 un->un_fm_private =
7905 7905 kmem_zalloc(sizeof (struct sd_fm_internal), KM_SLEEP);
7906 7906 sfip = (struct sd_fm_internal *)un->un_fm_private;
7907 7907 sfip->fm_ssc.ssc_uscsi_cmd = &sfip->fm_ucmd;
7908 7908 sfip->fm_ssc.ssc_uscsi_info = &sfip->fm_uinfo;
7909 7909 sfip->fm_ssc.ssc_un = un;
7910 7910
7911 7911 if (ISCD(un) ||
7912 7912 un->un_f_has_removable_media ||
7913 7913 devp->sd_fm_capable == DDI_FM_NOT_CAPABLE) {
7914 7914 /*
7915 7915 * We don't touch CDROM or the DDI_FM_NOT_CAPABLE device.
7916 7916 * Their log are unchanged.
7917 7917 */
7918 7918 sfip->fm_log_level = SD_FM_LOG_NSUP;
7919 7919 } else {
7920 7920 /*
7921 7921 * If enter here, it should be non-CDROM and FM-capable
7922 7922 * device, and it will not keep the old scsi_log as before
7923 7923 * in /var/adm/messages. However, the property
7924 7924 * "fm-scsi-log" will control whether the FM telemetry will
7925 7925 * be logged in /var/adm/messages.
7926 7926 */
7927 7927 int fm_scsi_log;
7928 7928 fm_scsi_log = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
7929 7929 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fm-scsi-log", 0);
7930 7930
7931 7931 if (fm_scsi_log)
7932 7932 sfip->fm_log_level = SD_FM_LOG_EREPORT;
7933 7933 else
7934 7934 sfip->fm_log_level = SD_FM_LOG_SILENT;
7935 7935 }
7936 7936
7937 7937 /*
7938 7938 * At this point in the attach, we have enough info in the
7939 7939 * soft state to be able to issue commands to the target.
7940 7940 *
7941 7941 * All command paths used below MUST issue their commands as
7942 7942 * SD_PATH_DIRECT. This is important as intermediate layers
7943 7943 * are not all initialized yet (such as PM).
7944 7944 */
7945 7945
7946 7946 /*
7947 7947 * Send a TEST UNIT READY command to the device. This should clear
7948 7948 * any outstanding UNIT ATTENTION that may be present.
7949 7949 *
7950 7950 * Note: Don't check for success, just track if there is a reservation,
7951 7951 * this is a throw away command to clear any unit attentions.
7952 7952 *
7953 7953 * Note: This MUST be the first command issued to the target during
7954 7954 * attach to ensure power on UNIT ATTENTIONS are cleared.
7955 7955 * Pass in flag SD_DONT_RETRY_TUR to prevent the long delays associated
7956 7956 * with attempts at spinning up a device with no media.
7957 7957 */
7958 7958 status = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR);
7959 7959 if (status != 0) {
7960 7960 if (status == EACCES)
7961 7961 reservation_flag = SD_TARGET_IS_RESERVED;
7962 7962 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
7963 7963 }
7964 7964
7965 7965 /*
7966 7966 * If the device is NOT a removable media device, attempt to spin
7967 7967 * it up (using the START_STOP_UNIT command) and read its capacity
7968 7968 * (using the READ CAPACITY command). Note, however, that either
7969 7969 * of these could fail and in some cases we would continue with
7970 7970 * the attach despite the failure (see below).
7971 7971 */
7972 7972 if (un->un_f_descr_format_supported) {
7973 7973
7974 7974 switch (sd_spin_up_unit(ssc)) {
7975 7975 case 0:
7976 7976 /*
7977 7977 * Spin-up was successful; now try to read the
7978 7978 * capacity. If successful then save the results
7979 7979 * and mark the capacity & lbasize as valid.
7980 7980 */
7981 7981 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
7982 7982 "sd_unit_attach: un:0x%p spin-up successful\n", un);
7983 7983
7984 7984 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity,
7985 7985 &lbasize, SD_PATH_DIRECT);
7986 7986
7987 7987 switch (status) {
7988 7988 case 0: {
7989 7989 if (capacity > DK_MAX_BLOCKS) {
7990 7990 #ifdef _LP64
7991 7991 if ((capacity + 1) >
7992 7992 SD_GROUP1_MAX_ADDRESS) {
7993 7993 /*
7994 7994 * Enable descriptor format
7995 7995 * sense data so that we can
7996 7996 * get 64 bit sense data
7997 7997 * fields.
7998 7998 */
7999 7999 sd_enable_descr_sense(ssc);
8000 8000 }
8001 8001 #else
8002 8002 /* 32-bit kernels can't handle this */
8003 8003 scsi_log(SD_DEVINFO(un),
8004 8004 sd_label, CE_WARN,
8005 8005 "disk has %llu blocks, which "
8006 8006 "is too large for a 32-bit "
8007 8007 "kernel", capacity);
8008 8008
8009 8009 #if defined(__i386) || defined(__amd64)
8010 8010 /*
8011 8011 * 1TB disk was treated as (1T - 512)B
8012 8012 * in the past, so that it might have
8013 8013 * valid VTOC and solaris partitions,
8014 8014 * we have to allow it to continue to
8015 8015 * work.
8016 8016 */
8017 8017 if (capacity -1 > DK_MAX_BLOCKS)
8018 8018 #endif
8019 8019 goto spinup_failed;
8020 8020 #endif
8021 8021 }
8022 8022
8023 8023 /*
8024 8024 * Here it's not necessary to check the case:
8025 8025 * the capacity of the device is bigger than
8026 8026 * what the max hba cdb can support. Because
8027 8027 * sd_send_scsi_READ_CAPACITY will retrieve
8028 8028 * the capacity by sending USCSI command, which
8029 8029 * is constrained by the max hba cdb. Actually,
8030 8030 * sd_send_scsi_READ_CAPACITY will return
8031 8031 * EINVAL when using bigger cdb than required
8032 8032 * cdb length. Will handle this case in
8033 8033 * "case EINVAL".
8034 8034 */
8035 8035
8036 8036 /*
8037 8037 * The following relies on
8038 8038 * sd_send_scsi_READ_CAPACITY never
8039 8039 * returning 0 for capacity and/or lbasize.
8040 8040 */
8041 8041 sd_update_block_info(un, lbasize, capacity);
8042 8042
8043 8043 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8044 8044 "sd_unit_attach: un:0x%p capacity = %ld "
8045 8045 "blocks; lbasize= %ld.\n", un,
8046 8046 un->un_blockcount, un->un_tgt_blocksize);
8047 8047
8048 8048 break;
8049 8049 }
8050 8050 case EINVAL:
8051 8051 /*
8052 8052 * In the case where the max-cdb-length property
8053 8053 * is smaller than the required CDB length for
8054 8054 * a SCSI device, a target driver can fail to
8055 8055 * attach to that device.
8056 8056 */
8057 8057 scsi_log(SD_DEVINFO(un),
8058 8058 sd_label, CE_WARN,
8059 8059 "disk capacity is too large "
8060 8060 "for current cdb length");
8061 8061 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8062 8062
8063 8063 goto spinup_failed;
8064 8064 case EACCES:
8065 8065 /*
8066 8066 * Should never get here if the spin-up
8067 8067 * succeeded, but code it in anyway.
8068 8068 * From here, just continue with the attach...
8069 8069 */
8070 8070 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8071 8071 "sd_unit_attach: un:0x%p "
8072 8072 "sd_send_scsi_READ_CAPACITY "
8073 8073 "returned reservation conflict\n", un);
8074 8074 reservation_flag = SD_TARGET_IS_RESERVED;
8075 8075 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8076 8076 break;
8077 8077 default:
8078 8078 /*
8079 8079 * Likewise, should never get here if the
8080 8080 * spin-up succeeded. Just continue with
8081 8081 * the attach...
8082 8082 */
8083 8083 if (status == EIO)
8084 8084 sd_ssc_assessment(ssc,
8085 8085 SD_FMT_STATUS_CHECK);
8086 8086 else
8087 8087 sd_ssc_assessment(ssc,
8088 8088 SD_FMT_IGNORE);
8089 8089 break;
8090 8090 }
8091 8091 break;
8092 8092 case EACCES:
8093 8093 /*
8094 8094 * Device is reserved by another host. In this case
8095 8095 * we could not spin it up or read the capacity, but
8096 8096 * we continue with the attach anyway.
8097 8097 */
8098 8098 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8099 8099 "sd_unit_attach: un:0x%p spin-up reservation "
8100 8100 "conflict.\n", un);
8101 8101 reservation_flag = SD_TARGET_IS_RESERVED;
8102 8102 break;
8103 8103 default:
8104 8104 /* Fail the attach if the spin-up failed. */
8105 8105 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8106 8106 "sd_unit_attach: un:0x%p spin-up failed.", un);
8107 8107 goto spinup_failed;
8108 8108 }
8109 8109
8110 8110 }
8111 8111
8112 8112 /*
8113 8113 * Check to see if this is a MMC drive
8114 8114 */
8115 8115 if (ISCD(un)) {
8116 8116 sd_set_mmc_caps(ssc);
8117 8117 }
8118 8118
8119 8119 /*
8120 8120 * Add a zero-length attribute to tell the world we support
8121 8121 * kernel ioctls (for layered drivers)
8122 8122 */
8123 8123 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP,
8124 8124 DDI_KERNEL_IOCTL, NULL, 0);
8125 8125
8126 8126 /*
8127 8127 * Add a boolean property to tell the world we support
8128 8128 * the B_FAILFAST flag (for layered drivers)
8129 8129 */
8130 8130 (void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP,
8131 8131 "ddi-failfast-supported", NULL, 0);
8132 8132
8133 8133 /*
8134 8134 * Initialize power management
8135 8135 */
8136 8136 mutex_init(&un->un_pm_mutex, NULL, MUTEX_DRIVER, NULL);
8137 8137 cv_init(&un->un_pm_busy_cv, NULL, CV_DRIVER, NULL);
8138 8138 sd_setup_pm(ssc, devi);
8139 8139 if (un->un_f_pm_is_enabled == FALSE) {
8140 8140 /*
8141 8141 * For performance, point to a jump table that does
8142 8142 * not include pm.
8143 8143 * The direct and priority chains don't change with PM.
8144 8144 *
8145 8145 * Note: this is currently done based on individual device
8146 8146 * capabilities. When an interface for determining system
8147 8147 * power enabled state becomes available, or when additional
8148 8148 * layers are added to the command chain, these values will
8149 8149 * have to be re-evaluated for correctness.
8150 8150 */
8151 8151 if (un->un_f_non_devbsize_supported) {
8152 8152 un->un_buf_chain_type = SD_CHAIN_INFO_RMMEDIA_NO_PM;
8153 8153 } else {
8154 8154 un->un_buf_chain_type = SD_CHAIN_INFO_DISK_NO_PM;
8155 8155 }
8156 8156 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM;
8157 8157 }
8158 8158
8159 8159 /*
8160 8160 * This property is set to 0 by HA software to avoid retries
8161 8161 * on a reserved disk. (The preferred property name is
8162 8162 * "retry-on-reservation-conflict") (1189689)
8163 8163 *
8164 8164 * Note: The use of a global here can have unintended consequences. A
8165 8165 * per instance variable is preferable to match the capabilities of
8166 8166 * different underlying hba's (4402600)
8167 8167 */
8168 8168 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY, devi,
8169 8169 DDI_PROP_DONTPASS, "retry-on-reservation-conflict",
8170 8170 sd_retry_on_reservation_conflict);
8171 8171 if (sd_retry_on_reservation_conflict != 0) {
8172 8172 sd_retry_on_reservation_conflict = ddi_getprop(DDI_DEV_T_ANY,
8173 8173 devi, DDI_PROP_DONTPASS, sd_resv_conflict_name,
8174 8174 sd_retry_on_reservation_conflict);
8175 8175 }
8176 8176
8177 8177 /* Set up options for QFULL handling. */
8178 8178 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0,
8179 8179 "qfull-retries", -1)) != -1) {
8180 8180 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retries",
8181 8181 rval, 1);
8182 8182 }
8183 8183 if ((rval = ddi_getprop(DDI_DEV_T_ANY, devi, 0,
8184 8184 "qfull-retry-interval", -1)) != -1) {
8185 8185 (void) scsi_ifsetcap(SD_ADDRESS(un), "qfull-retry-interval",
8186 8186 rval, 1);
8187 8187 }
8188 8188
8189 8189 /*
8190 8190 * This just prints a message that announces the existence of the
8191 8191 * device. The message is always printed in the system logfile, but
8192 8192 * only appears on the console if the system is booted with the
8193 8193 * -v (verbose) argument.
8194 8194 */
8195 8195 ddi_report_dev(devi);
8196 8196
8197 8197 un->un_mediastate = DKIO_NONE;
8198 8198
8199 8199 /*
8200 8200 * Check if this is a SSD(Solid State Drive).
8201 8201 */
8202 8202 sd_check_solid_state(ssc);
8203 8203
8204 8204 /*
8205 8205 * Check whether the drive is in emulation mode.
8206 8206 */
8207 8207 sd_check_emulation_mode(ssc);
8208 8208
8209 8209 cmlb_alloc_handle(&un->un_cmlbhandle);
8210 8210
8211 8211 #if defined(__i386) || defined(__amd64)
8212 8212 /*
8213 8213 * On x86, compensate for off-by-1 legacy error
8214 8214 */
8215 8215 if (!un->un_f_has_removable_media && !un->un_f_is_hotpluggable &&
8216 8216 (lbasize == un->un_sys_blocksize))
8217 8217 offbyone = CMLB_OFF_BY_ONE;
8218 8218 #endif
8219 8219
8220 8220 if (cmlb_attach(devi, &sd_tgops, (int)devp->sd_inq->inq_dtype,
8221 8221 VOID2BOOLEAN(un->un_f_has_removable_media != 0),
8222 8222 VOID2BOOLEAN(un->un_f_is_hotpluggable != 0),
8223 8223 un->un_node_type, offbyone, un->un_cmlbhandle,
8224 8224 (void *)SD_PATH_DIRECT) != 0) {
8225 8225 goto cmlb_attach_failed;
8226 8226 }
8227 8227
8228 8228
8229 8229 /*
8230 8230 * Read and validate the device's geometry (ie, disk label)
8231 8231 * A new unformatted drive will not have a valid geometry, but
8232 8232 * the driver needs to successfully attach to this device so
8233 8233 * the drive can be formatted via ioctls.
8234 8234 */
8235 8235 geom_label_valid = (cmlb_validate(un->un_cmlbhandle, 0,
8236 8236 (void *)SD_PATH_DIRECT) == 0) ? 1: 0;
8237 8237
8238 8238 mutex_enter(SD_MUTEX(un));
8239 8239
8240 8240 /*
8241 8241 * Read and initialize the devid for the unit.
8242 8242 */
8243 8243 if (un->un_f_devid_supported) {
8244 8244 sd_register_devid(ssc, devi, reservation_flag);
8245 8245 }
8246 8246 mutex_exit(SD_MUTEX(un));
8247 8247
8248 8248 #if (defined(__fibre))
8249 8249 /*
8250 8250 * Register callbacks for fibre only. You can't do this solely
8251 8251 * on the basis of the devid_type because this is hba specific.
8252 8252 * We need to query our hba capabilities to find out whether to
8253 8253 * register or not.
8254 8254 */
8255 8255 if (un->un_f_is_fibre) {
8256 8256 if (strcmp(un->un_node_type, DDI_NT_BLOCK_CHAN)) {
8257 8257 sd_init_event_callbacks(un);
8258 8258 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
8259 8259 "sd_unit_attach: un:0x%p event callbacks inserted",
8260 8260 un);
8261 8261 }
8262 8262 }
8263 8263 #endif
8264 8264
8265 8265 if (un->un_f_opt_disable_cache == TRUE) {
8266 8266 /*
8267 8267 * Disable both read cache and write cache. This is
8268 8268 * the historic behavior of the keywords in the config file.
8269 8269 */
8270 8270 if (sd_cache_control(ssc, SD_CACHE_DISABLE, SD_CACHE_DISABLE) !=
8271 8271 0) {
8272 8272 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8273 8273 "sd_unit_attach: un:0x%p Could not disable "
8274 8274 "caching", un);
8275 8275 goto devid_failed;
8276 8276 }
8277 8277 }
8278 8278
8279 8279 /*
8280 8280 * Check the value of the WCE bit now and
8281 8281 * set un_f_write_cache_enabled accordingly.
8282 8282 */
8283 8283 (void) sd_get_write_cache_enabled(ssc, &wc_enabled);
8284 8284 mutex_enter(SD_MUTEX(un));
8285 8285 un->un_f_write_cache_enabled = (wc_enabled != 0);
8286 8286 mutex_exit(SD_MUTEX(un));
8287 8287
8288 8288 if ((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR &&
8289 8289 un->un_tgt_blocksize != DEV_BSIZE) ||
8290 8290 un->un_f_enable_rmw) {
8291 8291 if (!(un->un_wm_cache)) {
8292 8292 (void) snprintf(name_str, sizeof (name_str),
8293 8293 "%s%d_cache",
8294 8294 ddi_driver_name(SD_DEVINFO(un)),
8295 8295 ddi_get_instance(SD_DEVINFO(un)));
8296 8296 un->un_wm_cache = kmem_cache_create(
8297 8297 name_str, sizeof (struct sd_w_map),
8298 8298 8, sd_wm_cache_constructor,
8299 8299 sd_wm_cache_destructor, NULL,
8300 8300 (void *)un, NULL, 0);
8301 8301 if (!(un->un_wm_cache)) {
8302 8302 goto wm_cache_failed;
8303 8303 }
8304 8304 }
8305 8305 }
8306 8306
8307 8307 /*
8308 8308 * Check the value of the NV_SUP bit and set
8309 8309 * un_f_suppress_cache_flush accordingly.
8310 8310 */
8311 8311 sd_get_nv_sup(ssc);
8312 8312
8313 8313 /*
8314 8314 * Find out what type of reservation this disk supports.
8315 8315 */
8316 8316 status = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS, 0, NULL);
8317 8317
8318 8318 switch (status) {
8319 8319 case 0:
8320 8320 /*
8321 8321 * SCSI-3 reservations are supported.
8322 8322 */
8323 8323 un->un_reservation_type = SD_SCSI3_RESERVATION;
8324 8324 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8325 8325 "sd_unit_attach: un:0x%p SCSI-3 reservations\n", un);
8326 8326 break;
8327 8327 case ENOTSUP:
8328 8328 /*
8329 8329 * The PERSISTENT RESERVE IN command would not be recognized by
8330 8330 * a SCSI-2 device, so assume the reservation type is SCSI-2.
8331 8331 */
8332 8332 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8333 8333 "sd_unit_attach: un:0x%p SCSI-2 reservations\n", un);
8334 8334 un->un_reservation_type = SD_SCSI2_RESERVATION;
8335 8335
8336 8336 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8337 8337 break;
8338 8338 default:
8339 8339 /*
8340 8340 * default to SCSI-3 reservations
8341 8341 */
8342 8342 SD_INFO(SD_LOG_ATTACH_DETACH, un,
8343 8343 "sd_unit_attach: un:0x%p default SCSI3 reservations\n", un);
8344 8344 un->un_reservation_type = SD_SCSI3_RESERVATION;
8345 8345
8346 8346 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
8347 8347 break;
8348 8348 }
8349 8349
8350 8350 /*
8351 8351 * Set the pstat and error stat values here, so data obtained during the
8352 8352 * previous attach-time routines is available.
8353 8353 *
8354 8354 * Note: This is a critical sequence that needs to be maintained:
8355 8355 * 1) Instantiate the kstats before any routines using the iopath
8356 8356 * (i.e. sd_send_scsi_cmd).
8357 8357 * 2) Initialize the error stats (sd_set_errstats) and partition
8358 8358 * stats (sd_set_pstats)here, following
8359 8359 * cmlb_validate_geometry(), sd_register_devid(), and
8360 8360 * sd_cache_control().
8361 8361 */
8362 8362
8363 8363 if (un->un_f_pkstats_enabled && geom_label_valid) {
8364 8364 sd_set_pstats(un);
8365 8365 SD_TRACE(SD_LOG_IO_PARTITION, un,
8366 8366 "sd_unit_attach: un:0x%p pstats created and set\n", un);
8367 8367 }
8368 8368
8369 8369 sd_set_errstats(un);
8370 8370 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
8371 8371 "sd_unit_attach: un:0x%p errstats set\n", un);
8372 8372
8373 8373
8374 8374 /*
8375 8375 * After successfully attaching an instance, we record the information
8376 8376 * of how many luns have been attached on the relative target and
8377 8377 * controller for parallel SCSI. This information is used when sd tries
8378 8378 * to set the tagged queuing capability in HBA.
8379 8379 */
8380 8380 if (SD_IS_PARALLEL_SCSI(un) && (tgt >= 0) && (tgt < NTARGETS_WIDE)) {
8381 8381 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_ATTACH);
8382 8382 }
8383 8383
8384 8384 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
8385 8385 "sd_unit_attach: un:0x%p exit success\n", un);
8386 8386
8387 8387 /* Uninitialize sd_ssc_t pointer */
8388 8388 sd_ssc_fini(ssc);
8389 8389
8390 8390 return (DDI_SUCCESS);
8391 8391
8392 8392 /*
8393 8393 * An error occurred during the attach; clean up & return failure.
8394 8394 */
8395 8395 wm_cache_failed:
8396 8396 devid_failed:
8397 8397
8398 8398 setup_pm_failed:
8399 8399 ddi_remove_minor_node(devi, NULL);
8400 8400
8401 8401 cmlb_attach_failed:
8402 8402 /*
8403 8403 * Cleanup from the scsi_ifsetcap() calls (437868)
8404 8404 */
8405 8405 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1);
8406 8406 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1);
8407 8407
8408 8408 /*
8409 8409 * Refer to the comments of setting tagged-qing in the beginning of
8410 8410 * sd_unit_attach. We can only disable tagged queuing when there is
8411 8411 * no lun attached on the target.
8412 8412 */
8413 8413 if (sd_scsi_get_target_lun_count(pdip, tgt) < 1) {
8414 8414 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
8415 8415 }
8416 8416
8417 8417 if (un->un_f_is_fibre == FALSE) {
8418 8418 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1);
8419 8419 }
8420 8420
8421 8421 spinup_failed:
8422 8422
8423 8423 /* Uninitialize sd_ssc_t pointer */
8424 8424 sd_ssc_fini(ssc);
8425 8425
8426 8426 mutex_enter(SD_MUTEX(un));
8427 8427
8428 8428 /* Deallocate SCSI FMA memory spaces */
8429 8429 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal));
8430 8430
8431 8431 /* Cancel callback for SD_PATH_DIRECT_PRIORITY cmd. restart */
8432 8432 if (un->un_direct_priority_timeid != NULL) {
8433 8433 timeout_id_t temp_id = un->un_direct_priority_timeid;
8434 8434 un->un_direct_priority_timeid = NULL;
8435 8435 mutex_exit(SD_MUTEX(un));
8436 8436 (void) untimeout(temp_id);
8437 8437 mutex_enter(SD_MUTEX(un));
8438 8438 }
8439 8439
8440 8440 /* Cancel any pending start/stop timeouts */
8441 8441 if (un->un_startstop_timeid != NULL) {
8442 8442 timeout_id_t temp_id = un->un_startstop_timeid;
8443 8443 un->un_startstop_timeid = NULL;
8444 8444 mutex_exit(SD_MUTEX(un));
8445 8445 (void) untimeout(temp_id);
8446 8446 mutex_enter(SD_MUTEX(un));
8447 8447 }
8448 8448
8449 8449 /* Cancel any pending reset-throttle timeouts */
8450 8450 if (un->un_reset_throttle_timeid != NULL) {
8451 8451 timeout_id_t temp_id = un->un_reset_throttle_timeid;
8452 8452 un->un_reset_throttle_timeid = NULL;
8453 8453 mutex_exit(SD_MUTEX(un));
8454 8454 (void) untimeout(temp_id);
8455 8455 mutex_enter(SD_MUTEX(un));
8456 8456 }
8457 8457
8458 8458 /* Cancel rmw warning message timeouts */
8459 8459 if (un->un_rmw_msg_timeid != NULL) {
8460 8460 timeout_id_t temp_id = un->un_rmw_msg_timeid;
8461 8461 un->un_rmw_msg_timeid = NULL;
8462 8462 mutex_exit(SD_MUTEX(un));
8463 8463 (void) untimeout(temp_id);
8464 8464 mutex_enter(SD_MUTEX(un));
8465 8465 }
8466 8466
8467 8467 /* Cancel any pending retry timeouts */
8468 8468 if (un->un_retry_timeid != NULL) {
8469 8469 timeout_id_t temp_id = un->un_retry_timeid;
8470 8470 un->un_retry_timeid = NULL;
8471 8471 mutex_exit(SD_MUTEX(un));
8472 8472 (void) untimeout(temp_id);
8473 8473 mutex_enter(SD_MUTEX(un));
8474 8474 }
8475 8475
8476 8476 /* Cancel any pending delayed cv broadcast timeouts */
8477 8477 if (un->un_dcvb_timeid != NULL) {
8478 8478 timeout_id_t temp_id = un->un_dcvb_timeid;
8479 8479 un->un_dcvb_timeid = NULL;
8480 8480 mutex_exit(SD_MUTEX(un));
8481 8481 (void) untimeout(temp_id);
8482 8482 mutex_enter(SD_MUTEX(un));
8483 8483 }
8484 8484
8485 8485 mutex_exit(SD_MUTEX(un));
8486 8486
8487 8487 /* There should not be any in-progress I/O so ASSERT this check */
8488 8488 ASSERT(un->un_ncmds_in_transport == 0);
8489 8489 ASSERT(un->un_ncmds_in_driver == 0);
8490 8490
8491 8491 /* Do not free the softstate if the callback routine is active */
8492 8492 sd_sync_with_callback(un);
8493 8493
8494 8494 /*
8495 8495 * Partition stats apparently are not used with removables. These would
8496 8496 * not have been created during attach, so no need to clean them up...
8497 8497 */
8498 8498 if (un->un_errstats != NULL) {
8499 8499 kstat_delete(un->un_errstats);
8500 8500 un->un_errstats = NULL;
8501 8501 }
8502 8502
8503 8503 create_errstats_failed:
8504 8504
8505 8505 if (un->un_stats != NULL) {
8506 8506 kstat_delete(un->un_stats);
8507 8507 un->un_stats = NULL;
8508 8508 }
8509 8509
8510 8510 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi);
8511 8511 ddi_xbuf_attr_destroy(un->un_xbuf_attr);
8512 8512
8513 8513 ddi_prop_remove_all(devi);
8514 8514 sema_destroy(&un->un_semoclose);
8515 8515 cv_destroy(&un->un_state_cv);
8516 8516
8517 8517 getrbuf_failed:
8518 8518
8519 8519 sd_free_rqs(un);
8520 8520
8521 8521 alloc_rqs_failed:
8522 8522
8523 8523 devp->sd_private = NULL;
8524 8524 bzero(un, sizeof (struct sd_lun)); /* Clear any stale data! */
8525 8525
8526 8526 get_softstate_failed:
8527 8527 /*
8528 8528 * Note: the man pages are unclear as to whether or not doing a
8529 8529 * ddi_soft_state_free(sd_state, instance) is the right way to
8530 8530 * clean up after the ddi_soft_state_zalloc() if the subsequent
8531 8531 * ddi_get_soft_state() fails. The implication seems to be
8532 8532 * that the get_soft_state cannot fail if the zalloc succeeds.
8533 8533 */
8534 8534 #ifndef XPV_HVM_DRIVER
8535 8535 ddi_soft_state_free(sd_state, instance);
8536 8536 #endif /* !XPV_HVM_DRIVER */
8537 8537
8538 8538 probe_failed:
8539 8539 scsi_unprobe(devp);
8540 8540
8541 8541 return (DDI_FAILURE);
8542 8542 }
8543 8543
8544 8544
8545 8545 /*
8546 8546 * Function: sd_unit_detach
8547 8547 *
8548 8548 * Description: Performs DDI_DETACH processing for sddetach().
8549 8549 *
8550 8550 * Return Code: DDI_SUCCESS
8551 8551 * DDI_FAILURE
8552 8552 *
8553 8553 * Context: Kernel thread context
8554 8554 */
8555 8555
8556 8556 static int
8557 8557 sd_unit_detach(dev_info_t *devi)
8558 8558 {
8559 8559 struct scsi_device *devp;
8560 8560 struct sd_lun *un;
8561 8561 int i;
8562 8562 int tgt;
8563 8563 dev_t dev;
8564 8564 dev_info_t *pdip = ddi_get_parent(devi);
8565 8565 #ifndef XPV_HVM_DRIVER
8566 8566 int instance = ddi_get_instance(devi);
8567 8567 #endif /* !XPV_HVM_DRIVER */
8568 8568
8569 8569 mutex_enter(&sd_detach_mutex);
8570 8570
8571 8571 /*
8572 8572 * Fail the detach for any of the following:
8573 8573 * - Unable to get the sd_lun struct for the instance
8574 8574 * - A layered driver has an outstanding open on the instance
8575 8575 * - Another thread is already detaching this instance
8576 8576 * - Another thread is currently performing an open
8577 8577 */
8578 8578 devp = ddi_get_driver_private(devi);
8579 8579 if ((devp == NULL) ||
8580 8580 ((un = (struct sd_lun *)devp->sd_private) == NULL) ||
8581 8581 (un->un_ncmds_in_driver != 0) || (un->un_layer_count != 0) ||
8582 8582 (un->un_detach_count != 0) || (un->un_opens_in_progress != 0)) {
8583 8583 mutex_exit(&sd_detach_mutex);
8584 8584 return (DDI_FAILURE);
8585 8585 }
8586 8586
8587 8587 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: entry 0x%p\n", un);
8588 8588
8589 8589 /*
8590 8590 * Mark this instance as currently in a detach, to inhibit any
8591 8591 * opens from a layered driver.
8592 8592 */
8593 8593 un->un_detach_count++;
8594 8594 mutex_exit(&sd_detach_mutex);
8595 8595
8596 8596 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
8597 8597 SCSI_ADDR_PROP_TARGET, -1);
8598 8598
8599 8599 dev = sd_make_device(SD_DEVINFO(un));
8600 8600
8601 8601 #ifndef lint
8602 8602 _NOTE(COMPETING_THREADS_NOW);
8603 8603 #endif
8604 8604
8605 8605 mutex_enter(SD_MUTEX(un));
8606 8606
8607 8607 /*
8608 8608 * Fail the detach if there are any outstanding layered
8609 8609 * opens on this device.
8610 8610 */
8611 8611 for (i = 0; i < NDKMAP; i++) {
8612 8612 if (un->un_ocmap.lyropen[i] != 0) {
8613 8613 goto err_notclosed;
8614 8614 }
8615 8615 }
8616 8616
8617 8617 /*
8618 8618 * Verify there are NO outstanding commands issued to this device.
8619 8619 * ie, un_ncmds_in_transport == 0.
8620 8620 * It's possible to have outstanding commands through the physio
8621 8621 * code path, even though everything's closed.
8622 8622 */
8623 8623 if ((un->un_ncmds_in_transport != 0) || (un->un_retry_timeid != NULL) ||
8624 8624 (un->un_direct_priority_timeid != NULL) ||
8625 8625 (un->un_state == SD_STATE_RWAIT)) {
8626 8626 mutex_exit(SD_MUTEX(un));
8627 8627 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8628 8628 "sd_dr_detach: Detach failure due to outstanding cmds\n");
8629 8629 goto err_stillbusy;
8630 8630 }
8631 8631
8632 8632 /*
8633 8633 * If we have the device reserved, release the reservation.
8634 8634 */
8635 8635 if ((un->un_resvd_status & SD_RESERVE) &&
8636 8636 !(un->un_resvd_status & SD_LOST_RESERVE)) {
8637 8637 mutex_exit(SD_MUTEX(un));
8638 8638 /*
8639 8639 * Note: sd_reserve_release sends a command to the device
8640 8640 * via the sd_ioctlcmd() path, and can sleep.
8641 8641 */
8642 8642 if (sd_reserve_release(dev, SD_RELEASE) != 0) {
8643 8643 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8644 8644 "sd_dr_detach: Cannot release reservation \n");
8645 8645 }
8646 8646 } else {
8647 8647 mutex_exit(SD_MUTEX(un));
8648 8648 }
8649 8649
8650 8650 /*
8651 8651 * Untimeout any reserve recover, throttle reset, restart unit
8652 8652 * and delayed broadcast timeout threads. Protect the timeout pointer
8653 8653 * from getting nulled by their callback functions.
8654 8654 */
8655 8655 mutex_enter(SD_MUTEX(un));
8656 8656 if (un->un_resvd_timeid != NULL) {
8657 8657 timeout_id_t temp_id = un->un_resvd_timeid;
8658 8658 un->un_resvd_timeid = NULL;
8659 8659 mutex_exit(SD_MUTEX(un));
8660 8660 (void) untimeout(temp_id);
8661 8661 mutex_enter(SD_MUTEX(un));
8662 8662 }
8663 8663
8664 8664 if (un->un_reset_throttle_timeid != NULL) {
8665 8665 timeout_id_t temp_id = un->un_reset_throttle_timeid;
8666 8666 un->un_reset_throttle_timeid = NULL;
8667 8667 mutex_exit(SD_MUTEX(un));
8668 8668 (void) untimeout(temp_id);
8669 8669 mutex_enter(SD_MUTEX(un));
8670 8670 }
8671 8671
8672 8672 if (un->un_startstop_timeid != NULL) {
8673 8673 timeout_id_t temp_id = un->un_startstop_timeid;
8674 8674 un->un_startstop_timeid = NULL;
8675 8675 mutex_exit(SD_MUTEX(un));
8676 8676 (void) untimeout(temp_id);
8677 8677 mutex_enter(SD_MUTEX(un));
8678 8678 }
8679 8679
8680 8680 if (un->un_rmw_msg_timeid != NULL) {
8681 8681 timeout_id_t temp_id = un->un_rmw_msg_timeid;
8682 8682 un->un_rmw_msg_timeid = NULL;
8683 8683 mutex_exit(SD_MUTEX(un));
8684 8684 (void) untimeout(temp_id);
8685 8685 mutex_enter(SD_MUTEX(un));
8686 8686 }
8687 8687
8688 8688 if (un->un_dcvb_timeid != NULL) {
8689 8689 timeout_id_t temp_id = un->un_dcvb_timeid;
8690 8690 un->un_dcvb_timeid = NULL;
8691 8691 mutex_exit(SD_MUTEX(un));
8692 8692 (void) untimeout(temp_id);
8693 8693 } else {
8694 8694 mutex_exit(SD_MUTEX(un));
8695 8695 }
8696 8696
8697 8697 /* Remove any pending reservation reclaim requests for this device */
8698 8698 sd_rmv_resv_reclaim_req(dev);
8699 8699
8700 8700 mutex_enter(SD_MUTEX(un));
8701 8701
8702 8702 /* Cancel any pending callbacks for SD_PATH_DIRECT_PRIORITY cmd. */
8703 8703 if (un->un_direct_priority_timeid != NULL) {
8704 8704 timeout_id_t temp_id = un->un_direct_priority_timeid;
8705 8705 un->un_direct_priority_timeid = NULL;
8706 8706 mutex_exit(SD_MUTEX(un));
8707 8707 (void) untimeout(temp_id);
8708 8708 mutex_enter(SD_MUTEX(un));
8709 8709 }
8710 8710
8711 8711 /* Cancel any active multi-host disk watch thread requests */
8712 8712 if (un->un_mhd_token != NULL) {
8713 8713 mutex_exit(SD_MUTEX(un));
8714 8714 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_mhd_token));
8715 8715 if (scsi_watch_request_terminate(un->un_mhd_token,
8716 8716 SCSI_WATCH_TERMINATE_NOWAIT)) {
8717 8717 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8718 8718 "sd_dr_detach: Cannot cancel mhd watch request\n");
8719 8719 /*
8720 8720 * Note: We are returning here after having removed
8721 8721 * some driver timeouts above. This is consistent with
8722 8722 * the legacy implementation but perhaps the watch
8723 8723 * terminate call should be made with the wait flag set.
8724 8724 */
8725 8725 goto err_stillbusy;
8726 8726 }
8727 8727 mutex_enter(SD_MUTEX(un));
8728 8728 un->un_mhd_token = NULL;
8729 8729 }
8730 8730
8731 8731 if (un->un_swr_token != NULL) {
8732 8732 mutex_exit(SD_MUTEX(un));
8733 8733 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_swr_token));
8734 8734 if (scsi_watch_request_terminate(un->un_swr_token,
8735 8735 SCSI_WATCH_TERMINATE_NOWAIT)) {
8736 8736 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8737 8737 "sd_dr_detach: Cannot cancel swr watch request\n");
8738 8738 /*
8739 8739 * Note: We are returning here after having removed
8740 8740 * some driver timeouts above. This is consistent with
8741 8741 * the legacy implementation but perhaps the watch
8742 8742 * terminate call should be made with the wait flag set.
8743 8743 */
8744 8744 goto err_stillbusy;
8745 8745 }
8746 8746 mutex_enter(SD_MUTEX(un));
8747 8747 un->un_swr_token = NULL;
8748 8748 }
8749 8749
8750 8750 mutex_exit(SD_MUTEX(un));
8751 8751
8752 8752 /*
8753 8753 * Clear any scsi_reset_notifies. We clear the reset notifies
8754 8754 * if we have not registered one.
8755 8755 * Note: The sd_mhd_reset_notify_cb() fn tries to acquire SD_MUTEX!
8756 8756 */
8757 8757 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL,
8758 8758 sd_mhd_reset_notify_cb, (caddr_t)un);
8759 8759
8760 8760 /*
8761 8761 * protect the timeout pointers from getting nulled by
8762 8762 * their callback functions during the cancellation process.
8763 8763 * In such a scenario untimeout can be invoked with a null value.
8764 8764 */
8765 8765 _NOTE(NO_COMPETING_THREADS_NOW);
8766 8766
8767 8767 mutex_enter(&un->un_pm_mutex);
8768 8768 if (un->un_pm_idle_timeid != NULL) {
8769 8769 timeout_id_t temp_id = un->un_pm_idle_timeid;
8770 8770 un->un_pm_idle_timeid = NULL;
8771 8771 mutex_exit(&un->un_pm_mutex);
8772 8772
8773 8773 /*
8774 8774 * Timeout is active; cancel it.
8775 8775 * Note that it'll never be active on a device
8776 8776 * that does not support PM therefore we don't
8777 8777 * have to check before calling pm_idle_component.
8778 8778 */
8779 8779 (void) untimeout(temp_id);
8780 8780 (void) pm_idle_component(SD_DEVINFO(un), 0);
8781 8781 mutex_enter(&un->un_pm_mutex);
8782 8782 }
8783 8783
8784 8784 /*
8785 8785 * Check whether there is already a timeout scheduled for power
8786 8786 * management. If yes then don't lower the power here, that's.
8787 8787 * the timeout handler's job.
8788 8788 */
8789 8789 if (un->un_pm_timeid != NULL) {
8790 8790 timeout_id_t temp_id = un->un_pm_timeid;
8791 8791 un->un_pm_timeid = NULL;
8792 8792 mutex_exit(&un->un_pm_mutex);
8793 8793 /*
8794 8794 * Timeout is active; cancel it.
8795 8795 * Note that it'll never be active on a device
8796 8796 * that does not support PM therefore we don't
8797 8797 * have to check before calling pm_idle_component.
8798 8798 */
8799 8799 (void) untimeout(temp_id);
8800 8800 (void) pm_idle_component(SD_DEVINFO(un), 0);
8801 8801
8802 8802 } else {
8803 8803 mutex_exit(&un->un_pm_mutex);
8804 8804 if ((un->un_f_pm_is_enabled == TRUE) &&
8805 8805 (pm_lower_power(SD_DEVINFO(un), 0, SD_PM_STATE_STOPPED(un))
8806 8806 != DDI_SUCCESS)) {
8807 8807 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8808 8808 "sd_dr_detach: Lower power request failed, ignoring.\n");
8809 8809 /*
8810 8810 * Fix for bug: 4297749, item # 13
8811 8811 * The above test now includes a check to see if PM is
8812 8812 * supported by this device before call
8813 8813 * pm_lower_power().
8814 8814 * Note, the following is not dead code. The call to
8815 8815 * pm_lower_power above will generate a call back into
8816 8816 * our sdpower routine which might result in a timeout
8817 8817 * handler getting activated. Therefore the following
8818 8818 * code is valid and necessary.
8819 8819 */
8820 8820 mutex_enter(&un->un_pm_mutex);
8821 8821 if (un->un_pm_timeid != NULL) {
8822 8822 timeout_id_t temp_id = un->un_pm_timeid;
8823 8823 un->un_pm_timeid = NULL;
8824 8824 mutex_exit(&un->un_pm_mutex);
8825 8825 (void) untimeout(temp_id);
8826 8826 (void) pm_idle_component(SD_DEVINFO(un), 0);
8827 8827 } else {
8828 8828 mutex_exit(&un->un_pm_mutex);
8829 8829 }
8830 8830 }
8831 8831 }
8832 8832
8833 8833 /*
8834 8834 * Cleanup from the scsi_ifsetcap() calls (437868)
8835 8835 * Relocated here from above to be after the call to
8836 8836 * pm_lower_power, which was getting errors.
8837 8837 */
8838 8838 (void) scsi_ifsetcap(SD_ADDRESS(un), "lun-reset", 0, 1);
8839 8839 (void) scsi_ifsetcap(SD_ADDRESS(un), "wide-xfer", 0, 1);
8840 8840
8841 8841 /*
8842 8842 * Currently, tagged queuing is supported per target based by HBA.
8843 8843 * Setting this per lun instance actually sets the capability of this
8844 8844 * target in HBA, which affects those luns already attached on the
8845 8845 * same target. So during detach, we can only disable this capability
8846 8846 * only when this is the only lun left on this target. By doing
8847 8847 * this, we assume a target has the same tagged queuing capability
8848 8848 * for every lun. The condition can be removed when HBA is changed to
8849 8849 * support per lun based tagged queuing capability.
8850 8850 */
8851 8851 if (sd_scsi_get_target_lun_count(pdip, tgt) <= 1) {
8852 8852 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
8853 8853 }
8854 8854
8855 8855 if (un->un_f_is_fibre == FALSE) {
8856 8856 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 0, 1);
8857 8857 }
8858 8858
8859 8859 /*
8860 8860 * Remove any event callbacks, fibre only
8861 8861 */
8862 8862 if (un->un_f_is_fibre == TRUE) {
8863 8863 if ((un->un_insert_event != NULL) &&
8864 8864 (ddi_remove_event_handler(un->un_insert_cb_id) !=
8865 8865 DDI_SUCCESS)) {
8866 8866 /*
8867 8867 * Note: We are returning here after having done
8868 8868 * substantial cleanup above. This is consistent
8869 8869 * with the legacy implementation but this may not
8870 8870 * be the right thing to do.
8871 8871 */
8872 8872 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8873 8873 "sd_dr_detach: Cannot cancel insert event\n");
8874 8874 goto err_remove_event;
8875 8875 }
8876 8876 un->un_insert_event = NULL;
8877 8877
8878 8878 if ((un->un_remove_event != NULL) &&
8879 8879 (ddi_remove_event_handler(un->un_remove_cb_id) !=
8880 8880 DDI_SUCCESS)) {
8881 8881 /*
8882 8882 * Note: We are returning here after having done
8883 8883 * substantial cleanup above. This is consistent
8884 8884 * with the legacy implementation but this may not
8885 8885 * be the right thing to do.
8886 8886 */
8887 8887 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
8888 8888 "sd_dr_detach: Cannot cancel remove event\n");
8889 8889 goto err_remove_event;
8890 8890 }
8891 8891 un->un_remove_event = NULL;
8892 8892 }
8893 8893
8894 8894 /* Do not free the softstate if the callback routine is active */
8895 8895 sd_sync_with_callback(un);
8896 8896
8897 8897 cmlb_detach(un->un_cmlbhandle, (void *)SD_PATH_DIRECT);
8898 8898 cmlb_free_handle(&un->un_cmlbhandle);
8899 8899
8900 8900 /*
8901 8901 * Hold the detach mutex here, to make sure that no other threads ever
8902 8902 * can access a (partially) freed soft state structure.
8903 8903 */
8904 8904 mutex_enter(&sd_detach_mutex);
8905 8905
8906 8906 /*
8907 8907 * Clean up the soft state struct.
8908 8908 * Cleanup is done in reverse order of allocs/inits.
8909 8909 * At this point there should be no competing threads anymore.
8910 8910 */
8911 8911
8912 8912 scsi_fm_fini(devp);
8913 8913
8914 8914 /*
8915 8915 * Deallocate memory for SCSI FMA.
8916 8916 */
8917 8917 kmem_free(un->un_fm_private, sizeof (struct sd_fm_internal));
8918 8918
8919 8919 /*
8920 8920 * Unregister and free device id if it was not registered
8921 8921 * by the transport.
8922 8922 */
8923 8923 if (un->un_f_devid_transport_defined == FALSE)
8924 8924 ddi_devid_unregister(devi);
8925 8925
8926 8926 /*
8927 8927 * free the devid structure if allocated before (by ddi_devid_init()
8928 8928 * or ddi_devid_get()).
8929 8929 */
8930 8930 if (un->un_devid) {
8931 8931 ddi_devid_free(un->un_devid);
8932 8932 un->un_devid = NULL;
8933 8933 }
8934 8934
8935 8935 /*
8936 8936 * Destroy wmap cache if it exists.
8937 8937 */
8938 8938 if (un->un_wm_cache != NULL) {
8939 8939 kmem_cache_destroy(un->un_wm_cache);
8940 8940 un->un_wm_cache = NULL;
8941 8941 }
8942 8942
8943 8943 /*
8944 8944 * kstat cleanup is done in detach for all device types (4363169).
8945 8945 * We do not want to fail detach if the device kstats are not deleted
8946 8946 * since there is a confusion about the devo_refcnt for the device.
8947 8947 * We just delete the kstats and let detach complete successfully.
8948 8948 */
8949 8949 if (un->un_stats != NULL) {
8950 8950 kstat_delete(un->un_stats);
8951 8951 un->un_stats = NULL;
8952 8952 }
8953 8953 if (un->un_errstats != NULL) {
8954 8954 kstat_delete(un->un_errstats);
8955 8955 un->un_errstats = NULL;
8956 8956 }
8957 8957
8958 8958 /* Remove partition stats */
8959 8959 if (un->un_f_pkstats_enabled) {
8960 8960 for (i = 0; i < NSDMAP; i++) {
8961 8961 if (un->un_pstats[i] != NULL) {
8962 8962 kstat_delete(un->un_pstats[i]);
8963 8963 un->un_pstats[i] = NULL;
8964 8964 }
8965 8965 }
8966 8966 }
8967 8967
8968 8968 /* Remove xbuf registration */
8969 8969 ddi_xbuf_attr_unregister_devinfo(un->un_xbuf_attr, devi);
8970 8970 ddi_xbuf_attr_destroy(un->un_xbuf_attr);
8971 8971
8972 8972 /* Remove driver properties */
8973 8973 ddi_prop_remove_all(devi);
8974 8974
8975 8975 mutex_destroy(&un->un_pm_mutex);
8976 8976 cv_destroy(&un->un_pm_busy_cv);
8977 8977
8978 8978 cv_destroy(&un->un_wcc_cv);
8979 8979
8980 8980 /* Open/close semaphore */
8981 8981 sema_destroy(&un->un_semoclose);
8982 8982
8983 8983 /* Removable media condvar. */
8984 8984 cv_destroy(&un->un_state_cv);
8985 8985
8986 8986 /* Suspend/resume condvar. */
8987 8987 cv_destroy(&un->un_suspend_cv);
8988 8988 cv_destroy(&un->un_disk_busy_cv);
8989 8989
8990 8990 sd_free_rqs(un);
8991 8991
8992 8992 /* Free up soft state */
8993 8993 devp->sd_private = NULL;
8994 8994
8995 8995 bzero(un, sizeof (struct sd_lun));
8996 8996 #ifndef XPV_HVM_DRIVER
8997 8997 ddi_soft_state_free(sd_state, instance);
8998 8998 #endif /* !XPV_HVM_DRIVER */
8999 8999
9000 9000 mutex_exit(&sd_detach_mutex);
9001 9001
9002 9002 /* This frees up the INQUIRY data associated with the device. */
9003 9003 scsi_unprobe(devp);
9004 9004
9005 9005 /*
9006 9006 * After successfully detaching an instance, we update the information
9007 9007 * of how many luns have been attached in the relative target and
9008 9008 * controller for parallel SCSI. This information is used when sd tries
9009 9009 * to set the tagged queuing capability in HBA.
9010 9010 * Since un has been released, we can't use SD_IS_PARALLEL_SCSI(un) to
9011 9011 * check if the device is parallel SCSI. However, we don't need to
9012 9012 * check here because we've already checked during attach. No device
9013 9013 * that is not parallel SCSI is in the chain.
9014 9014 */
9015 9015 if ((tgt >= 0) && (tgt < NTARGETS_WIDE)) {
9016 9016 sd_scsi_update_lun_on_target(pdip, tgt, SD_SCSI_LUN_DETACH);
9017 9017 }
9018 9018
9019 9019 return (DDI_SUCCESS);
9020 9020
9021 9021 err_notclosed:
9022 9022 mutex_exit(SD_MUTEX(un));
9023 9023
9024 9024 err_stillbusy:
9025 9025 _NOTE(NO_COMPETING_THREADS_NOW);
9026 9026
9027 9027 err_remove_event:
9028 9028 mutex_enter(&sd_detach_mutex);
9029 9029 un->un_detach_count--;
9030 9030 mutex_exit(&sd_detach_mutex);
9031 9031
9032 9032 SD_TRACE(SD_LOG_ATTACH_DETACH, un, "sd_unit_detach: exit failure\n");
9033 9033 return (DDI_FAILURE);
9034 9034 }
9035 9035
9036 9036
9037 9037 /*
9038 9038 * Function: sd_create_errstats
9039 9039 *
9040 9040 * Description: This routine instantiates the device error stats.
9041 9041 *
9042 9042 * Note: During attach the stats are instantiated first so they are
9043 9043 * available for attach-time routines that utilize the driver
9044 9044 * iopath to send commands to the device. The stats are initialized
9045 9045 * separately so data obtained during some attach-time routines is
9046 9046 * available. (4362483)
9047 9047 *
9048 9048 * Arguments: un - driver soft state (unit) structure
9049 9049 * instance - driver instance
9050 9050 *
9051 9051 * Context: Kernel thread context
9052 9052 */
9053 9053
9054 9054 static void
9055 9055 sd_create_errstats(struct sd_lun *un, int instance)
9056 9056 {
9057 9057 struct sd_errstats *stp;
9058 9058 char kstatmodule_err[KSTAT_STRLEN];
9059 9059 char kstatname[KSTAT_STRLEN];
9060 9060 int ndata = (sizeof (struct sd_errstats) / sizeof (kstat_named_t));
9061 9061
9062 9062 ASSERT(un != NULL);
9063 9063
9064 9064 if (un->un_errstats != NULL) {
9065 9065 return;
9066 9066 }
9067 9067
9068 9068 (void) snprintf(kstatmodule_err, sizeof (kstatmodule_err),
9069 9069 "%serr", sd_label);
9070 9070 (void) snprintf(kstatname, sizeof (kstatname),
9071 9071 "%s%d,err", sd_label, instance);
9072 9072
9073 9073 un->un_errstats = kstat_create(kstatmodule_err, instance, kstatname,
9074 9074 "device_error", KSTAT_TYPE_NAMED, ndata, KSTAT_FLAG_PERSISTENT);
9075 9075
9076 9076 if (un->un_errstats == NULL) {
9077 9077 SD_ERROR(SD_LOG_ATTACH_DETACH, un,
9078 9078 "sd_create_errstats: Failed kstat_create\n");
9079 9079 return;
9080 9080 }
9081 9081
9082 9082 stp = (struct sd_errstats *)un->un_errstats->ks_data;
9083 9083 kstat_named_init(&stp->sd_softerrs, "Soft Errors",
9084 9084 KSTAT_DATA_UINT32);
9085 9085 kstat_named_init(&stp->sd_harderrs, "Hard Errors",
9086 9086 KSTAT_DATA_UINT32);
9087 9087 kstat_named_init(&stp->sd_transerrs, "Transport Errors",
9088 9088 KSTAT_DATA_UINT32);
9089 9089 kstat_named_init(&stp->sd_vid, "Vendor",
9090 9090 KSTAT_DATA_CHAR);
9091 9091 kstat_named_init(&stp->sd_pid, "Product",
9092 9092 KSTAT_DATA_CHAR);
9093 9093 kstat_named_init(&stp->sd_revision, "Revision",
9094 9094 KSTAT_DATA_CHAR);
9095 9095 kstat_named_init(&stp->sd_serial, "Serial No",
9096 9096 KSTAT_DATA_CHAR);
9097 9097 kstat_named_init(&stp->sd_capacity, "Size",
9098 9098 KSTAT_DATA_ULONGLONG);
9099 9099 kstat_named_init(&stp->sd_rq_media_err, "Media Error",
9100 9100 KSTAT_DATA_UINT32);
9101 9101 kstat_named_init(&stp->sd_rq_ntrdy_err, "Device Not Ready",
9102 9102 KSTAT_DATA_UINT32);
9103 9103 kstat_named_init(&stp->sd_rq_nodev_err, "No Device",
9104 9104 KSTAT_DATA_UINT32);
9105 9105 kstat_named_init(&stp->sd_rq_recov_err, "Recoverable",
9106 9106 KSTAT_DATA_UINT32);
9107 9107 kstat_named_init(&stp->sd_rq_illrq_err, "Illegal Request",
9108 9108 KSTAT_DATA_UINT32);
9109 9109 kstat_named_init(&stp->sd_rq_pfa_err, "Predictive Failure Analysis",
9110 9110 KSTAT_DATA_UINT32);
9111 9111
9112 9112 un->un_errstats->ks_private = un;
9113 9113 un->un_errstats->ks_update = nulldev;
9114 9114
9115 9115 kstat_install(un->un_errstats);
9116 9116 }
9117 9117
9118 9118
9119 9119 /*
9120 9120 * Function: sd_set_errstats
9121 9121 *
9122 9122 * Description: This routine sets the value of the vendor id, product id,
9123 9123 * revision, serial number, and capacity device error stats.
9124 9124 *
9125 9125 * Note: During attach the stats are instantiated first so they are
9126 9126 * available for attach-time routines that utilize the driver
9127 9127 * iopath to send commands to the device. The stats are initialized
9128 9128 * separately so data obtained during some attach-time routines is
9129 9129 * available. (4362483)
9130 9130 *
9131 9131 * Arguments: un - driver soft state (unit) structure
9132 9132 *
9133 9133 * Context: Kernel thread context
9134 9134 */
9135 9135
9136 9136 static void
9137 9137 sd_set_errstats(struct sd_lun *un)
9138 9138 {
9139 9139 struct sd_errstats *stp;
9140 9140 char *sn;
9141 9141
9142 9142 ASSERT(un != NULL);
9143 9143 ASSERT(un->un_errstats != NULL);
9144 9144 stp = (struct sd_errstats *)un->un_errstats->ks_data;
9145 9145 ASSERT(stp != NULL);
9146 9146 (void) strncpy(stp->sd_vid.value.c, un->un_sd->sd_inq->inq_vid, 8);
9147 9147 (void) strncpy(stp->sd_pid.value.c, un->un_sd->sd_inq->inq_pid, 16);
9148 9148 (void) strncpy(stp->sd_revision.value.c,
9149 9149 un->un_sd->sd_inq->inq_revision, 4);
9150 9150
9151 9151 /*
9152 9152 * All the errstats are persistent across detach/attach,
9153 9153 * so reset all the errstats here in case of the hot
9154 9154 * replacement of disk drives, except for not changed
9155 9155 * Sun qualified drives.
9156 9156 */
9157 9157 if ((bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) != 0) ||
9158 9158 (bcmp(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c,
9159 9159 sizeof (SD_INQUIRY(un)->inq_serial)) != 0)) {
9160 9160 stp->sd_softerrs.value.ui32 = 0;
9161 9161 stp->sd_harderrs.value.ui32 = 0;
9162 9162 stp->sd_transerrs.value.ui32 = 0;
9163 9163 stp->sd_rq_media_err.value.ui32 = 0;
9164 9164 stp->sd_rq_ntrdy_err.value.ui32 = 0;
9165 9165 stp->sd_rq_nodev_err.value.ui32 = 0;
9166 9166 stp->sd_rq_recov_err.value.ui32 = 0;
9167 9167 stp->sd_rq_illrq_err.value.ui32 = 0;
9168 9168 stp->sd_rq_pfa_err.value.ui32 = 0;
9169 9169 }
9170 9170
9171 9171 /*
9172 9172 * Set the "Serial No" kstat for Sun qualified drives (indicated by
9173 9173 * "SUN" in bytes 25-27 of the inquiry data (bytes 9-11 of the pid)
9174 9174 * (4376302))
9175 9175 */
9176 9176 if (bcmp(&SD_INQUIRY(un)->inq_pid[9], "SUN", 3) == 0) {
9177 9177 bcopy(&SD_INQUIRY(un)->inq_serial, stp->sd_serial.value.c,
9178 9178 sizeof (SD_INQUIRY(un)->inq_serial));
9179 9179 } else {
9180 9180 /*
9181 9181 * Set the "Serial No" kstat for non-Sun qualified drives
9182 9182 */
9183 9183 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, SD_DEVINFO(un),
9184 9184 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9185 9185 INQUIRY_SERIAL_NO, &sn) == DDI_SUCCESS) {
9186 9186 (void) strlcpy(stp->sd_serial.value.c, sn,
9187 9187 sizeof (stp->sd_serial.value.c));
9188 9188 ddi_prop_free(sn);
9189 9189 }
9190 9190 }
9191 9191
9192 9192 if (un->un_f_blockcount_is_valid != TRUE) {
9193 9193 /*
9194 9194 * Set capacity error stat to 0 for no media. This ensures
9195 9195 * a valid capacity is displayed in response to 'iostat -E'
9196 9196 * when no media is present in the device.
9197 9197 */
9198 9198 stp->sd_capacity.value.ui64 = 0;
9199 9199 } else {
9200 9200 /*
9201 9201 * Multiply un_blockcount by un->un_sys_blocksize to get
9202 9202 * capacity.
9203 9203 *
9204 9204 * Note: for non-512 blocksize devices "un_blockcount" has been
9205 9205 * "scaled" in sd_send_scsi_READ_CAPACITY by multiplying by
9206 9206 * (un_tgt_blocksize / un->un_sys_blocksize).
9207 9207 */
9208 9208 stp->sd_capacity.value.ui64 = (uint64_t)
9209 9209 ((uint64_t)un->un_blockcount * un->un_sys_blocksize);
9210 9210 }
9211 9211 }
9212 9212
9213 9213
9214 9214 /*
9215 9215 * Function: sd_set_pstats
9216 9216 *
9217 9217 * Description: This routine instantiates and initializes the partition
9218 9218 * stats for each partition with more than zero blocks.
9219 9219 * (4363169)
9220 9220 *
9221 9221 * Arguments: un - driver soft state (unit) structure
9222 9222 *
9223 9223 * Context: Kernel thread context
9224 9224 */
9225 9225
9226 9226 static void
9227 9227 sd_set_pstats(struct sd_lun *un)
9228 9228 {
9229 9229 char kstatname[KSTAT_STRLEN];
9230 9230 int instance;
9231 9231 int i;
9232 9232 diskaddr_t nblks = 0;
9233 9233 char *partname = NULL;
9234 9234
9235 9235 ASSERT(un != NULL);
9236 9236
9237 9237 instance = ddi_get_instance(SD_DEVINFO(un));
9238 9238
9239 9239 /* Note:x86: is this a VTOC8/VTOC16 difference? */
9240 9240 for (i = 0; i < NSDMAP; i++) {
9241 9241
9242 9242 if (cmlb_partinfo(un->un_cmlbhandle, i,
9243 9243 &nblks, NULL, &partname, NULL, (void *)SD_PATH_DIRECT) != 0)
9244 9244 continue;
9245 9245 mutex_enter(SD_MUTEX(un));
9246 9246
9247 9247 if ((un->un_pstats[i] == NULL) &&
9248 9248 (nblks != 0)) {
9249 9249
9250 9250 (void) snprintf(kstatname, sizeof (kstatname),
9251 9251 "%s%d,%s", sd_label, instance,
9252 9252 partname);
9253 9253
9254 9254 un->un_pstats[i] = kstat_create(sd_label,
9255 9255 instance, kstatname, "partition", KSTAT_TYPE_IO,
9256 9256 1, KSTAT_FLAG_PERSISTENT);
9257 9257 if (un->un_pstats[i] != NULL) {
9258 9258 un->un_pstats[i]->ks_lock = SD_MUTEX(un);
9259 9259 kstat_install(un->un_pstats[i]);
9260 9260 }
9261 9261 }
9262 9262 mutex_exit(SD_MUTEX(un));
9263 9263 }
9264 9264 }
9265 9265
9266 9266
9267 9267 #if (defined(__fibre))
9268 9268 /*
9269 9269 * Function: sd_init_event_callbacks
9270 9270 *
9271 9271 * Description: This routine initializes the insertion and removal event
9272 9272 * callbacks. (fibre only)
9273 9273 *
9274 9274 * Arguments: un - driver soft state (unit) structure
9275 9275 *
9276 9276 * Context: Kernel thread context
9277 9277 */
9278 9278
9279 9279 static void
9280 9280 sd_init_event_callbacks(struct sd_lun *un)
9281 9281 {
9282 9282 ASSERT(un != NULL);
9283 9283
9284 9284 if ((un->un_insert_event == NULL) &&
9285 9285 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_INSERT_EVENT,
9286 9286 &un->un_insert_event) == DDI_SUCCESS)) {
9287 9287 /*
9288 9288 * Add the callback for an insertion event
9289 9289 */
9290 9290 (void) ddi_add_event_handler(SD_DEVINFO(un),
9291 9291 un->un_insert_event, sd_event_callback, (void *)un,
9292 9292 &(un->un_insert_cb_id));
9293 9293 }
9294 9294
9295 9295 if ((un->un_remove_event == NULL) &&
9296 9296 (ddi_get_eventcookie(SD_DEVINFO(un), FCAL_REMOVE_EVENT,
9297 9297 &un->un_remove_event) == DDI_SUCCESS)) {
9298 9298 /*
9299 9299 * Add the callback for a removal event
9300 9300 */
9301 9301 (void) ddi_add_event_handler(SD_DEVINFO(un),
9302 9302 un->un_remove_event, sd_event_callback, (void *)un,
9303 9303 &(un->un_remove_cb_id));
9304 9304 }
9305 9305 }
9306 9306
9307 9307
9308 9308 /*
9309 9309 * Function: sd_event_callback
9310 9310 *
9311 9311 * Description: This routine handles insert/remove events (photon). The
9312 9312 * state is changed to OFFLINE which can be used to supress
9313 9313 * error msgs. (fibre only)
9314 9314 *
9315 9315 * Arguments: un - driver soft state (unit) structure
9316 9316 *
9317 9317 * Context: Callout thread context
9318 9318 */
9319 9319 /* ARGSUSED */
9320 9320 static void
9321 9321 sd_event_callback(dev_info_t *dip, ddi_eventcookie_t event, void *arg,
9322 9322 void *bus_impldata)
9323 9323 {
9324 9324 struct sd_lun *un = (struct sd_lun *)arg;
9325 9325
9326 9326 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_insert_event));
9327 9327 if (event == un->un_insert_event) {
9328 9328 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: insert event");
9329 9329 mutex_enter(SD_MUTEX(un));
9330 9330 if (un->un_state == SD_STATE_OFFLINE) {
9331 9331 if (un->un_last_state != SD_STATE_SUSPENDED) {
9332 9332 un->un_state = un->un_last_state;
9333 9333 } else {
9334 9334 /*
9335 9335 * We have gone through SUSPEND/RESUME while
9336 9336 * we were offline. Restore the last state
9337 9337 */
9338 9338 un->un_state = un->un_save_state;
9339 9339 }
9340 9340 }
9341 9341 mutex_exit(SD_MUTEX(un));
9342 9342
9343 9343 _NOTE(DATA_READABLE_WITHOUT_LOCK(sd_lun::un_remove_event));
9344 9344 } else if (event == un->un_remove_event) {
9345 9345 SD_TRACE(SD_LOG_COMMON, un, "sd_event_callback: remove event");
9346 9346 mutex_enter(SD_MUTEX(un));
9347 9347 /*
9348 9348 * We need to handle an event callback that occurs during
9349 9349 * the suspend operation, since we don't prevent it.
9350 9350 */
9351 9351 if (un->un_state != SD_STATE_OFFLINE) {
9352 9352 if (un->un_state != SD_STATE_SUSPENDED) {
9353 9353 New_state(un, SD_STATE_OFFLINE);
9354 9354 } else {
9355 9355 un->un_last_state = SD_STATE_OFFLINE;
9356 9356 }
9357 9357 }
9358 9358 mutex_exit(SD_MUTEX(un));
9359 9359 } else {
9360 9360 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE,
9361 9361 "!Unknown event\n");
9362 9362 }
9363 9363
9364 9364 }
9365 9365 #endif
9366 9366
9367 9367 /*
9368 9368 * Function: sd_cache_control()
9369 9369 *
9370 9370 * Description: This routine is the driver entry point for setting
9371 9371 * read and write caching by modifying the WCE (write cache
9372 9372 * enable) and RCD (read cache disable) bits of mode
9373 9373 * page 8 (MODEPAGE_CACHING).
9374 9374 *
9375 9375 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
9376 9376 * structure for this target.
9377 9377 * rcd_flag - flag for controlling the read cache
9378 9378 * wce_flag - flag for controlling the write cache
9379 9379 *
9380 9380 * Return Code: EIO
9381 9381 * code returned by sd_send_scsi_MODE_SENSE and
9382 9382 * sd_send_scsi_MODE_SELECT
9383 9383 *
9384 9384 * Context: Kernel Thread
9385 9385 */
9386 9386
9387 9387 static int
9388 9388 sd_cache_control(sd_ssc_t *ssc, int rcd_flag, int wce_flag)
9389 9389 {
9390 9390 struct mode_caching *mode_caching_page;
9391 9391 uchar_t *header;
9392 9392 size_t buflen;
9393 9393 int hdrlen;
9394 9394 int bd_len;
9395 9395 int rval = 0;
9396 9396 struct mode_header_grp2 *mhp;
9397 9397 struct sd_lun *un;
9398 9398 int status;
9399 9399
9400 9400 ASSERT(ssc != NULL);
9401 9401 un = ssc->ssc_un;
9402 9402 ASSERT(un != NULL);
9403 9403
9404 9404 /*
9405 9405 * Do a test unit ready, otherwise a mode sense may not work if this
9406 9406 * is the first command sent to the device after boot.
9407 9407 */
9408 9408 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
9409 9409 if (status != 0)
9410 9410 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9411 9411
9412 9412 if (un->un_f_cfg_is_atapi == TRUE) {
9413 9413 hdrlen = MODE_HEADER_LENGTH_GRP2;
9414 9414 } else {
9415 9415 hdrlen = MODE_HEADER_LENGTH;
9416 9416 }
9417 9417
9418 9418 /*
9419 9419 * Allocate memory for the retrieved mode page and its headers. Set
9420 9420 * a pointer to the page itself. Use mode_cache_scsi3 to insure
9421 9421 * we get all of the mode sense data otherwise, the mode select
9422 9422 * will fail. mode_cache_scsi3 is a superset of mode_caching.
9423 9423 */
9424 9424 buflen = hdrlen + MODE_BLK_DESC_LENGTH +
9425 9425 sizeof (struct mode_cache_scsi3);
9426 9426
9427 9427 header = kmem_zalloc(buflen, KM_SLEEP);
9428 9428
9429 9429 /* Get the information from the device. */
9430 9430 if (un->un_f_cfg_is_atapi == TRUE) {
9431 9431 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen,
9432 9432 MODEPAGE_CACHING, SD_PATH_DIRECT);
9433 9433 } else {
9434 9434 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen,
9435 9435 MODEPAGE_CACHING, SD_PATH_DIRECT);
9436 9436 }
9437 9437
9438 9438 if (rval != 0) {
9439 9439 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
9440 9440 "sd_cache_control: Mode Sense Failed\n");
9441 9441 goto mode_sense_failed;
9442 9442 }
9443 9443
9444 9444 /*
9445 9445 * Determine size of Block Descriptors in order to locate
9446 9446 * the mode page data. ATAPI devices return 0, SCSI devices
9447 9447 * should return MODE_BLK_DESC_LENGTH.
9448 9448 */
9449 9449 if (un->un_f_cfg_is_atapi == TRUE) {
9450 9450 mhp = (struct mode_header_grp2 *)header;
9451 9451 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
9452 9452 } else {
9453 9453 bd_len = ((struct mode_header *)header)->bdesc_length;
9454 9454 }
9455 9455
9456 9456 if (bd_len > MODE_BLK_DESC_LENGTH) {
9457 9457 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0,
9458 9458 "sd_cache_control: Mode Sense returned invalid block "
9459 9459 "descriptor length\n");
9460 9460 rval = EIO;
9461 9461 goto mode_sense_failed;
9462 9462 }
9463 9463
9464 9464 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len);
9465 9465 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) {
9466 9466 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
9467 9467 "sd_cache_control: Mode Sense caching page code mismatch "
9468 9468 "%d\n", mode_caching_page->mode_page.code);
9469 9469 rval = EIO;
9470 9470 goto mode_sense_failed;
9471 9471 }
9472 9472
9473 9473 /* Check the relevant bits on successful mode sense. */
9474 9474 if ((mode_caching_page->rcd && rcd_flag == SD_CACHE_ENABLE) ||
9475 9475 (!mode_caching_page->rcd && rcd_flag == SD_CACHE_DISABLE) ||
9476 9476 (mode_caching_page->wce && wce_flag == SD_CACHE_DISABLE) ||
9477 9477 (!mode_caching_page->wce && wce_flag == SD_CACHE_ENABLE)) {
9478 9478
9479 9479 size_t sbuflen;
9480 9480 uchar_t save_pg;
9481 9481
9482 9482 /*
9483 9483 * Construct select buffer length based on the
9484 9484 * length of the sense data returned.
9485 9485 */
9486 9486 sbuflen = hdrlen + bd_len +
9487 9487 sizeof (struct mode_page) +
9488 9488 (int)mode_caching_page->mode_page.length;
9489 9489
9490 9490 /*
9491 9491 * Set the caching bits as requested.
9492 9492 */
9493 9493 if (rcd_flag == SD_CACHE_ENABLE)
9494 9494 mode_caching_page->rcd = 0;
9495 9495 else if (rcd_flag == SD_CACHE_DISABLE)
9496 9496 mode_caching_page->rcd = 1;
9497 9497
9498 9498 if (wce_flag == SD_CACHE_ENABLE)
9499 9499 mode_caching_page->wce = 1;
9500 9500 else if (wce_flag == SD_CACHE_DISABLE)
9501 9501 mode_caching_page->wce = 0;
9502 9502
9503 9503 /*
9504 9504 * Save the page if the mode sense says the
9505 9505 * drive supports it.
9506 9506 */
9507 9507 save_pg = mode_caching_page->mode_page.ps ?
9508 9508 SD_SAVE_PAGE : SD_DONTSAVE_PAGE;
9509 9509
9510 9510 /* Clear reserved bits before mode select. */
9511 9511 mode_caching_page->mode_page.ps = 0;
9512 9512
9513 9513 /*
9514 9514 * Clear out mode header for mode select.
9515 9515 * The rest of the retrieved page will be reused.
9516 9516 */
9517 9517 bzero(header, hdrlen);
9518 9518
9519 9519 if (un->un_f_cfg_is_atapi == TRUE) {
9520 9520 mhp = (struct mode_header_grp2 *)header;
9521 9521 mhp->bdesc_length_hi = bd_len >> 8;
9522 9522 mhp->bdesc_length_lo = (uchar_t)bd_len & 0xff;
9523 9523 } else {
9524 9524 ((struct mode_header *)header)->bdesc_length = bd_len;
9525 9525 }
9526 9526
9527 9527 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9528 9528
9529 9529 /* Issue mode select to change the cache settings */
9530 9530 if (un->un_f_cfg_is_atapi == TRUE) {
9531 9531 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, header,
9532 9532 sbuflen, save_pg, SD_PATH_DIRECT);
9533 9533 } else {
9534 9534 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, header,
9535 9535 sbuflen, save_pg, SD_PATH_DIRECT);
9536 9536 }
9537 9537
9538 9538 }
9539 9539
9540 9540
9541 9541 mode_sense_failed:
9542 9542
9543 9543 kmem_free(header, buflen);
9544 9544
9545 9545 if (rval != 0) {
9546 9546 if (rval == EIO)
9547 9547 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
9548 9548 else
9549 9549 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9550 9550 }
9551 9551 return (rval);
9552 9552 }
9553 9553
9554 9554
9555 9555 /*
9556 9556 * Function: sd_get_write_cache_enabled()
9557 9557 *
9558 9558 * Description: This routine is the driver entry point for determining if
9559 9559 * write caching is enabled. It examines the WCE (write cache
9560 9560 * enable) bits of mode page 8 (MODEPAGE_CACHING).
9561 9561 *
9562 9562 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
9563 9563 * structure for this target.
9564 9564 * is_enabled - pointer to int where write cache enabled state
9565 9565 * is returned (non-zero -> write cache enabled)
9566 9566 *
9567 9567 *
9568 9568 * Return Code: EIO
9569 9569 * code returned by sd_send_scsi_MODE_SENSE
9570 9570 *
9571 9571 * Context: Kernel Thread
9572 9572 *
9573 9573 * NOTE: If ioctl is added to disable write cache, this sequence should
9574 9574 * be followed so that no locking is required for accesses to
9575 9575 * un->un_f_write_cache_enabled:
9576 9576 * do mode select to clear wce
9577 9577 * do synchronize cache to flush cache
9578 9578 * set un->un_f_write_cache_enabled = FALSE
9579 9579 *
9580 9580 * Conversely, an ioctl to enable the write cache should be done
9581 9581 * in this order:
9582 9582 * set un->un_f_write_cache_enabled = TRUE
9583 9583 * do mode select to set wce
9584 9584 */
9585 9585
9586 9586 static int
9587 9587 sd_get_write_cache_enabled(sd_ssc_t *ssc, int *is_enabled)
9588 9588 {
9589 9589 struct mode_caching *mode_caching_page;
9590 9590 uchar_t *header;
9591 9591 size_t buflen;
9592 9592 int hdrlen;
9593 9593 int bd_len;
9594 9594 int rval = 0;
9595 9595 struct sd_lun *un;
9596 9596 int status;
9597 9597
9598 9598 ASSERT(ssc != NULL);
9599 9599 un = ssc->ssc_un;
9600 9600 ASSERT(un != NULL);
9601 9601 ASSERT(is_enabled != NULL);
9602 9602
9603 9603 /* in case of error, flag as enabled */
9604 9604 *is_enabled = TRUE;
9605 9605
9606 9606 /*
9607 9607 * Do a test unit ready, otherwise a mode sense may not work if this
9608 9608 * is the first command sent to the device after boot.
9609 9609 */
9610 9610 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
9611 9611
9612 9612 if (status != 0)
9613 9613 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9614 9614
9615 9615 if (un->un_f_cfg_is_atapi == TRUE) {
9616 9616 hdrlen = MODE_HEADER_LENGTH_GRP2;
9617 9617 } else {
9618 9618 hdrlen = MODE_HEADER_LENGTH;
9619 9619 }
9620 9620
9621 9621 /*
9622 9622 * Allocate memory for the retrieved mode page and its headers. Set
9623 9623 * a pointer to the page itself.
9624 9624 */
9625 9625 buflen = hdrlen + MODE_BLK_DESC_LENGTH + sizeof (struct mode_caching);
9626 9626 header = kmem_zalloc(buflen, KM_SLEEP);
9627 9627
9628 9628 /* Get the information from the device. */
9629 9629 if (un->un_f_cfg_is_atapi == TRUE) {
9630 9630 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, header, buflen,
9631 9631 MODEPAGE_CACHING, SD_PATH_DIRECT);
9632 9632 } else {
9633 9633 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, header, buflen,
9634 9634 MODEPAGE_CACHING, SD_PATH_DIRECT);
9635 9635 }
9636 9636
9637 9637 if (rval != 0) {
9638 9638 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
9639 9639 "sd_get_write_cache_enabled: Mode Sense Failed\n");
9640 9640 goto mode_sense_failed;
9641 9641 }
9642 9642
9643 9643 /*
9644 9644 * Determine size of Block Descriptors in order to locate
9645 9645 * the mode page data. ATAPI devices return 0, SCSI devices
9646 9646 * should return MODE_BLK_DESC_LENGTH.
9647 9647 */
9648 9648 if (un->un_f_cfg_is_atapi == TRUE) {
9649 9649 struct mode_header_grp2 *mhp;
9650 9650 mhp = (struct mode_header_grp2 *)header;
9651 9651 bd_len = (mhp->bdesc_length_hi << 8) | mhp->bdesc_length_lo;
9652 9652 } else {
9653 9653 bd_len = ((struct mode_header *)header)->bdesc_length;
9654 9654 }
9655 9655
9656 9656 if (bd_len > MODE_BLK_DESC_LENGTH) {
9657 9657 /* FMA should make upset complain here */
9658 9658 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, 0,
9659 9659 "sd_get_write_cache_enabled: Mode Sense returned invalid "
9660 9660 "block descriptor length\n");
9661 9661 rval = EIO;
9662 9662 goto mode_sense_failed;
9663 9663 }
9664 9664
9665 9665 mode_caching_page = (struct mode_caching *)(header + hdrlen + bd_len);
9666 9666 if (mode_caching_page->mode_page.code != MODEPAGE_CACHING) {
9667 9667 /* FMA could make upset complain here */
9668 9668 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, SD_LOG_COMMON,
9669 9669 "sd_get_write_cache_enabled: Mode Sense caching page "
9670 9670 "code mismatch %d\n", mode_caching_page->mode_page.code);
9671 9671 rval = EIO;
9672 9672 goto mode_sense_failed;
9673 9673 }
9674 9674 *is_enabled = mode_caching_page->wce;
9675 9675
9676 9676 mode_sense_failed:
9677 9677 if (rval == 0) {
9678 9678 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
9679 9679 } else if (rval == EIO) {
9680 9680 /*
9681 9681 * Some disks do not support mode sense(6), we
9682 9682 * should ignore this kind of error(sense key is
9683 9683 * 0x5 - illegal request).
9684 9684 */
9685 9685 uint8_t *sensep;
9686 9686 int senlen;
9687 9687
9688 9688 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
9689 9689 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
9690 9690 ssc->ssc_uscsi_cmd->uscsi_rqresid);
9691 9691
9692 9692 if (senlen > 0 &&
9693 9693 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) {
9694 9694 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE);
9695 9695 } else {
9696 9696 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
9697 9697 }
9698 9698 } else {
9699 9699 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9700 9700 }
9701 9701 kmem_free(header, buflen);
9702 9702 return (rval);
9703 9703 }
9704 9704
9705 9705 /*
9706 9706 * Function: sd_get_nv_sup()
9707 9707 *
9708 9708 * Description: This routine is the driver entry point for
9709 9709 * determining whether non-volatile cache is supported. This
9710 9710 * determination process works as follows:
9711 9711 *
9712 9712 * 1. sd first queries sd.conf on whether
9713 9713 * suppress_cache_flush bit is set for this device.
9714 9714 *
9715 9715 * 2. if not there, then queries the internal disk table.
9716 9716 *
9717 9717 * 3. if either sd.conf or internal disk table specifies
9718 9718 * cache flush be suppressed, we don't bother checking
9719 9719 * NV_SUP bit.
9720 9720 *
9721 9721 * If SUPPRESS_CACHE_FLUSH bit is not set to 1, sd queries
9722 9722 * the optional INQUIRY VPD page 0x86. If the device
9723 9723 * supports VPD page 0x86, sd examines the NV_SUP
9724 9724 * (non-volatile cache support) bit in the INQUIRY VPD page
9725 9725 * 0x86:
9726 9726 * o If NV_SUP bit is set, sd assumes the device has a
9727 9727 * non-volatile cache and set the
9728 9728 * un_f_sync_nv_supported to TRUE.
9729 9729 * o Otherwise cache is not non-volatile,
9730 9730 * un_f_sync_nv_supported is set to FALSE.
9731 9731 *
9732 9732 * Arguments: un - driver soft state (unit) structure
9733 9733 *
9734 9734 * Return Code:
9735 9735 *
9736 9736 * Context: Kernel Thread
9737 9737 */
9738 9738
9739 9739 static void
9740 9740 sd_get_nv_sup(sd_ssc_t *ssc)
9741 9741 {
9742 9742 int rval = 0;
9743 9743 uchar_t *inq86 = NULL;
9744 9744 size_t inq86_len = MAX_INQUIRY_SIZE;
9745 9745 size_t inq86_resid = 0;
9746 9746 struct dk_callback *dkc;
9747 9747 struct sd_lun *un;
9748 9748
9749 9749 ASSERT(ssc != NULL);
9750 9750 un = ssc->ssc_un;
9751 9751 ASSERT(un != NULL);
9752 9752
9753 9753 mutex_enter(SD_MUTEX(un));
9754 9754
9755 9755 /*
9756 9756 * Be conservative on the device's support of
9757 9757 * SYNC_NV bit: un_f_sync_nv_supported is
9758 9758 * initialized to be false.
9759 9759 */
9760 9760 un->un_f_sync_nv_supported = FALSE;
9761 9761
9762 9762 /*
9763 9763 * If either sd.conf or internal disk table
9764 9764 * specifies cache flush be suppressed, then
9765 9765 * we don't bother checking NV_SUP bit.
9766 9766 */
9767 9767 if (un->un_f_suppress_cache_flush == TRUE) {
9768 9768 mutex_exit(SD_MUTEX(un));
9769 9769 return;
9770 9770 }
9771 9771
9772 9772 if (sd_check_vpd_page_support(ssc) == 0 &&
9773 9773 un->un_vpd_page_mask & SD_VPD_EXTENDED_DATA_PG) {
9774 9774 mutex_exit(SD_MUTEX(un));
9775 9775 /* collect page 86 data if available */
9776 9776 inq86 = kmem_zalloc(inq86_len, KM_SLEEP);
9777 9777
9778 9778 rval = sd_send_scsi_INQUIRY(ssc, inq86, inq86_len,
9779 9779 0x01, 0x86, &inq86_resid);
9780 9780
9781 9781 if (rval == 0 && (inq86_len - inq86_resid > 6)) {
9782 9782 SD_TRACE(SD_LOG_COMMON, un,
9783 9783 "sd_get_nv_sup: \
9784 9784 successfully get VPD page: %x \
9785 9785 PAGE LENGTH: %x BYTE 6: %x\n",
9786 9786 inq86[1], inq86[3], inq86[6]);
9787 9787
9788 9788 mutex_enter(SD_MUTEX(un));
9789 9789 /*
9790 9790 * check the value of NV_SUP bit: only if the device
9791 9791 * reports NV_SUP bit to be 1, the
9792 9792 * un_f_sync_nv_supported bit will be set to true.
9793 9793 */
9794 9794 if (inq86[6] & SD_VPD_NV_SUP) {
9795 9795 un->un_f_sync_nv_supported = TRUE;
9796 9796 }
9797 9797 mutex_exit(SD_MUTEX(un));
9798 9798 } else if (rval != 0) {
9799 9799 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9800 9800 }
9801 9801
9802 9802 kmem_free(inq86, inq86_len);
9803 9803 } else {
9804 9804 mutex_exit(SD_MUTEX(un));
9805 9805 }
9806 9806
9807 9807 /*
9808 9808 * Send a SYNC CACHE command to check whether
9809 9809 * SYNC_NV bit is supported. This command should have
9810 9810 * un_f_sync_nv_supported set to correct value.
9811 9811 */
9812 9812 mutex_enter(SD_MUTEX(un));
9813 9813 if (un->un_f_sync_nv_supported) {
9814 9814 mutex_exit(SD_MUTEX(un));
9815 9815 dkc = kmem_zalloc(sizeof (struct dk_callback), KM_SLEEP);
9816 9816 dkc->dkc_flag = FLUSH_VOLATILE;
9817 9817 (void) sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc);
9818 9818
9819 9819 /*
9820 9820 * Send a TEST UNIT READY command to the device. This should
9821 9821 * clear any outstanding UNIT ATTENTION that may be present.
9822 9822 */
9823 9823 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_DONT_RETRY_TUR);
9824 9824 if (rval != 0)
9825 9825 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
9826 9826
9827 9827 kmem_free(dkc, sizeof (struct dk_callback));
9828 9828 } else {
9829 9829 mutex_exit(SD_MUTEX(un));
9830 9830 }
9831 9831
9832 9832 SD_TRACE(SD_LOG_COMMON, un, "sd_get_nv_sup: \
9833 9833 un_f_suppress_cache_flush is set to %d\n",
9834 9834 un->un_f_suppress_cache_flush);
9835 9835 }
9836 9836
9837 9837 /*
9838 9838 * Function: sd_make_device
9839 9839 *
9840 9840 * Description: Utility routine to return the Solaris device number from
9841 9841 * the data in the device's dev_info structure.
9842 9842 *
9843 9843 * Return Code: The Solaris device number
9844 9844 *
9845 9845 * Context: Any
9846 9846 */
9847 9847
9848 9848 static dev_t
9849 9849 sd_make_device(dev_info_t *devi)
9850 9850 {
9851 9851 return (makedevice(ddi_driver_major(devi),
9852 9852 ddi_get_instance(devi) << SDUNIT_SHIFT));
9853 9853 }
9854 9854
9855 9855
9856 9856 /*
9857 9857 * Function: sd_pm_entry
9858 9858 *
9859 9859 * Description: Called at the start of a new command to manage power
9860 9860 * and busy status of a device. This includes determining whether
9861 9861 * the current power state of the device is sufficient for
9862 9862 * performing the command or whether it must be changed.
9863 9863 * The PM framework is notified appropriately.
9864 9864 * Only with a return status of DDI_SUCCESS will the
9865 9865 * component be busy to the framework.
9866 9866 *
9867 9867 * All callers of sd_pm_entry must check the return status
9868 9868 * and only call sd_pm_exit it it was DDI_SUCCESS. A status
9869 9869 * of DDI_FAILURE indicates the device failed to power up.
9870 9870 * In this case un_pm_count has been adjusted so the result
9871 9871 * on exit is still powered down, ie. count is less than 0.
9872 9872 * Calling sd_pm_exit with this count value hits an ASSERT.
9873 9873 *
9874 9874 * Return Code: DDI_SUCCESS or DDI_FAILURE
9875 9875 *
9876 9876 * Context: Kernel thread context.
9877 9877 */
9878 9878
9879 9879 static int
9880 9880 sd_pm_entry(struct sd_lun *un)
9881 9881 {
9882 9882 int return_status = DDI_SUCCESS;
9883 9883
9884 9884 ASSERT(!mutex_owned(SD_MUTEX(un)));
9885 9885 ASSERT(!mutex_owned(&un->un_pm_mutex));
9886 9886
9887 9887 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: entry\n");
9888 9888
9889 9889 if (un->un_f_pm_is_enabled == FALSE) {
9890 9890 SD_TRACE(SD_LOG_IO_PM, un,
9891 9891 "sd_pm_entry: exiting, PM not enabled\n");
9892 9892 return (return_status);
9893 9893 }
9894 9894
9895 9895 /*
9896 9896 * Just increment a counter if PM is enabled. On the transition from
9897 9897 * 0 ==> 1, mark the device as busy. The iodone side will decrement
9898 9898 * the count with each IO and mark the device as idle when the count
9899 9899 * hits 0.
9900 9900 *
9901 9901 * If the count is less than 0 the device is powered down. If a powered
9902 9902 * down device is successfully powered up then the count must be
9903 9903 * incremented to reflect the power up. Note that it'll get incremented
9904 9904 * a second time to become busy.
9905 9905 *
9906 9906 * Because the following has the potential to change the device state
9907 9907 * and must release the un_pm_mutex to do so, only one thread can be
9908 9908 * allowed through at a time.
9909 9909 */
9910 9910
9911 9911 mutex_enter(&un->un_pm_mutex);
9912 9912 while (un->un_pm_busy == TRUE) {
9913 9913 cv_wait(&un->un_pm_busy_cv, &un->un_pm_mutex);
9914 9914 }
9915 9915 un->un_pm_busy = TRUE;
9916 9916
9917 9917 if (un->un_pm_count < 1) {
9918 9918
9919 9919 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_entry: busy component\n");
9920 9920
9921 9921 /*
9922 9922 * Indicate we are now busy so the framework won't attempt to
9923 9923 * power down the device. This call will only fail if either
9924 9924 * we passed a bad component number or the device has no
9925 9925 * components. Neither of these should ever happen.
9926 9926 */
9927 9927 mutex_exit(&un->un_pm_mutex);
9928 9928 return_status = pm_busy_component(SD_DEVINFO(un), 0);
9929 9929 ASSERT(return_status == DDI_SUCCESS);
9930 9930
9931 9931 mutex_enter(&un->un_pm_mutex);
9932 9932
9933 9933 if (un->un_pm_count < 0) {
9934 9934 mutex_exit(&un->un_pm_mutex);
9935 9935
9936 9936 SD_TRACE(SD_LOG_IO_PM, un,
9937 9937 "sd_pm_entry: power up component\n");
9938 9938
9939 9939 /*
9940 9940 * pm_raise_power will cause sdpower to be called
9941 9941 * which brings the device power level to the
9942 9942 * desired state, If successful, un_pm_count and
9943 9943 * un_power_level will be updated appropriately.
9944 9944 */
9945 9945 return_status = pm_raise_power(SD_DEVINFO(un), 0,
9946 9946 SD_PM_STATE_ACTIVE(un));
9947 9947
9948 9948 mutex_enter(&un->un_pm_mutex);
9949 9949
9950 9950 if (return_status != DDI_SUCCESS) {
9951 9951 /*
9952 9952 * Power up failed.
9953 9953 * Idle the device and adjust the count
9954 9954 * so the result on exit is that we're
9955 9955 * still powered down, ie. count is less than 0.
9956 9956 */
9957 9957 SD_TRACE(SD_LOG_IO_PM, un,
9958 9958 "sd_pm_entry: power up failed,"
9959 9959 " idle the component\n");
9960 9960
9961 9961 (void) pm_idle_component(SD_DEVINFO(un), 0);
9962 9962 un->un_pm_count--;
9963 9963 } else {
9964 9964 /*
9965 9965 * Device is powered up, verify the
9966 9966 * count is non-negative.
9967 9967 * This is debug only.
9968 9968 */
9969 9969 ASSERT(un->un_pm_count == 0);
9970 9970 }
9971 9971 }
9972 9972
9973 9973 if (return_status == DDI_SUCCESS) {
9974 9974 /*
9975 9975 * For performance, now that the device has been tagged
9976 9976 * as busy, and it's known to be powered up, update the
9977 9977 * chain types to use jump tables that do not include
9978 9978 * pm. This significantly lowers the overhead and
9979 9979 * therefore improves performance.
9980 9980 */
9981 9981
9982 9982 mutex_exit(&un->un_pm_mutex);
9983 9983 mutex_enter(SD_MUTEX(un));
9984 9984 SD_TRACE(SD_LOG_IO_PM, un,
9985 9985 "sd_pm_entry: changing uscsi_chain_type from %d\n",
9986 9986 un->un_uscsi_chain_type);
9987 9987
9988 9988 if (un->un_f_non_devbsize_supported) {
9989 9989 un->un_buf_chain_type =
9990 9990 SD_CHAIN_INFO_RMMEDIA_NO_PM;
9991 9991 } else {
9992 9992 un->un_buf_chain_type =
9993 9993 SD_CHAIN_INFO_DISK_NO_PM;
9994 9994 }
9995 9995 un->un_uscsi_chain_type = SD_CHAIN_INFO_USCSI_CMD_NO_PM;
9996 9996
9997 9997 SD_TRACE(SD_LOG_IO_PM, un,
9998 9998 " changed uscsi_chain_type to %d\n",
9999 9999 un->un_uscsi_chain_type);
10000 10000 mutex_exit(SD_MUTEX(un));
10001 10001 mutex_enter(&un->un_pm_mutex);
10002 10002
10003 10003 if (un->un_pm_idle_timeid == NULL) {
10004 10004 /* 300 ms. */
10005 10005 un->un_pm_idle_timeid =
10006 10006 timeout(sd_pm_idletimeout_handler, un,
10007 10007 (drv_usectohz((clock_t)300000)));
10008 10008 /*
10009 10009 * Include an extra call to busy which keeps the
10010 10010 * device busy with-respect-to the PM layer
10011 10011 * until the timer fires, at which time it'll
10012 10012 * get the extra idle call.
10013 10013 */
10014 10014 (void) pm_busy_component(SD_DEVINFO(un), 0);
10015 10015 }
10016 10016 }
10017 10017 }
10018 10018 un->un_pm_busy = FALSE;
10019 10019 /* Next... */
10020 10020 cv_signal(&un->un_pm_busy_cv);
10021 10021
10022 10022 un->un_pm_count++;
10023 10023
10024 10024 SD_TRACE(SD_LOG_IO_PM, un,
10025 10025 "sd_pm_entry: exiting, un_pm_count = %d\n", un->un_pm_count);
10026 10026
10027 10027 mutex_exit(&un->un_pm_mutex);
10028 10028
10029 10029 return (return_status);
10030 10030 }
10031 10031
10032 10032
10033 10033 /*
10034 10034 * Function: sd_pm_exit
10035 10035 *
10036 10036 * Description: Called at the completion of a command to manage busy
10037 10037 * status for the device. If the device becomes idle the
10038 10038 * PM framework is notified.
10039 10039 *
10040 10040 * Context: Kernel thread context
10041 10041 */
10042 10042
10043 10043 static void
10044 10044 sd_pm_exit(struct sd_lun *un)
10045 10045 {
10046 10046 ASSERT(!mutex_owned(SD_MUTEX(un)));
10047 10047 ASSERT(!mutex_owned(&un->un_pm_mutex));
10048 10048
10049 10049 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: entry\n");
10050 10050
10051 10051 /*
10052 10052 * After attach the following flag is only read, so don't
10053 10053 * take the penalty of acquiring a mutex for it.
10054 10054 */
10055 10055 if (un->un_f_pm_is_enabled == TRUE) {
10056 10056
10057 10057 mutex_enter(&un->un_pm_mutex);
10058 10058 un->un_pm_count--;
10059 10059
10060 10060 SD_TRACE(SD_LOG_IO_PM, un,
10061 10061 "sd_pm_exit: un_pm_count = %d\n", un->un_pm_count);
10062 10062
10063 10063 ASSERT(un->un_pm_count >= 0);
10064 10064 if (un->un_pm_count == 0) {
10065 10065 mutex_exit(&un->un_pm_mutex);
10066 10066
10067 10067 SD_TRACE(SD_LOG_IO_PM, un,
10068 10068 "sd_pm_exit: idle component\n");
10069 10069
10070 10070 (void) pm_idle_component(SD_DEVINFO(un), 0);
10071 10071
10072 10072 } else {
10073 10073 mutex_exit(&un->un_pm_mutex);
10074 10074 }
10075 10075 }
10076 10076
10077 10077 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_exit: exiting\n");
10078 10078 }
10079 10079
10080 10080
10081 10081 /*
10082 10082 * Function: sdopen
10083 10083 *
10084 10084 * Description: Driver's open(9e) entry point function.
10085 10085 *
10086 10086 * Arguments: dev_i - pointer to device number
10087 10087 * flag - how to open file (FEXCL, FNDELAY, FREAD, FWRITE)
10088 10088 * otyp - open type (OTYP_BLK, OTYP_CHR, OTYP_LYR)
10089 10089 * cred_p - user credential pointer
10090 10090 *
10091 10091 * Return Code: EINVAL
10092 10092 * ENXIO
10093 10093 * EIO
10094 10094 * EROFS
10095 10095 * EBUSY
10096 10096 *
10097 10097 * Context: Kernel thread context
10098 10098 */
10099 10099 /* ARGSUSED */
10100 10100 static int
10101 10101 sdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p)
10102 10102 {
10103 10103 struct sd_lun *un;
10104 10104 int nodelay;
10105 10105 int part;
10106 10106 uint64_t partmask;
10107 10107 int instance;
10108 10108 dev_t dev;
10109 10109 int rval = EIO;
10110 10110 diskaddr_t nblks = 0;
10111 10111 diskaddr_t label_cap;
10112 10112
10113 10113 /* Validate the open type */
10114 10114 if (otyp >= OTYPCNT) {
10115 10115 return (EINVAL);
10116 10116 }
10117 10117
10118 10118 dev = *dev_p;
10119 10119 instance = SDUNIT(dev);
10120 10120 mutex_enter(&sd_detach_mutex);
10121 10121
10122 10122 /*
10123 10123 * Fail the open if there is no softstate for the instance, or
10124 10124 * if another thread somewhere is trying to detach the instance.
10125 10125 */
10126 10126 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) ||
10127 10127 (un->un_detach_count != 0)) {
10128 10128 mutex_exit(&sd_detach_mutex);
10129 10129 /*
10130 10130 * The probe cache only needs to be cleared when open (9e) fails
10131 10131 * with ENXIO (4238046).
10132 10132 */
10133 10133 /*
10134 10134 * un-conditionally clearing probe cache is ok with
10135 10135 * separate sd/ssd binaries
10136 10136 * x86 platform can be an issue with both parallel
10137 10137 * and fibre in 1 binary
10138 10138 */
10139 10139 sd_scsi_clear_probe_cache();
10140 10140 return (ENXIO);
10141 10141 }
10142 10142
10143 10143 /*
10144 10144 * The un_layer_count is to prevent another thread in specfs from
10145 10145 * trying to detach the instance, which can happen when we are
10146 10146 * called from a higher-layer driver instead of thru specfs.
10147 10147 * This will not be needed when DDI provides a layered driver
10148 10148 * interface that allows specfs to know that an instance is in
10149 10149 * use by a layered driver & should not be detached.
10150 10150 *
10151 10151 * Note: the semantics for layered driver opens are exactly one
10152 10152 * close for every open.
10153 10153 */
10154 10154 if (otyp == OTYP_LYR) {
10155 10155 un->un_layer_count++;
10156 10156 }
10157 10157
10158 10158 /*
10159 10159 * Keep a count of the current # of opens in progress. This is because
10160 10160 * some layered drivers try to call us as a regular open. This can
10161 10161 * cause problems that we cannot prevent, however by keeping this count
10162 10162 * we can at least keep our open and detach routines from racing against
10163 10163 * each other under such conditions.
10164 10164 */
10165 10165 un->un_opens_in_progress++;
10166 10166 mutex_exit(&sd_detach_mutex);
10167 10167
10168 10168 nodelay = (flag & (FNDELAY | FNONBLOCK));
10169 10169 part = SDPART(dev);
10170 10170 partmask = 1 << part;
10171 10171
10172 10172 /*
10173 10173 * We use a semaphore here in order to serialize
10174 10174 * open and close requests on the device.
10175 10175 */
10176 10176 sema_p(&un->un_semoclose);
10177 10177
10178 10178 mutex_enter(SD_MUTEX(un));
10179 10179
10180 10180 /*
10181 10181 * All device accesses go thru sdstrategy() where we check
10182 10182 * on suspend status but there could be a scsi_poll command,
10183 10183 * which bypasses sdstrategy(), so we need to check pm
10184 10184 * status.
10185 10185 */
10186 10186
10187 10187 if (!nodelay) {
10188 10188 while ((un->un_state == SD_STATE_SUSPENDED) ||
10189 10189 (un->un_state == SD_STATE_PM_CHANGING)) {
10190 10190 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10191 10191 }
10192 10192
10193 10193 mutex_exit(SD_MUTEX(un));
10194 10194 if (sd_pm_entry(un) != DDI_SUCCESS) {
10195 10195 rval = EIO;
10196 10196 SD_ERROR(SD_LOG_OPEN_CLOSE, un,
10197 10197 "sdopen: sd_pm_entry failed\n");
10198 10198 goto open_failed_with_pm;
10199 10199 }
10200 10200 mutex_enter(SD_MUTEX(un));
10201 10201 }
10202 10202
10203 10203 /* check for previous exclusive open */
10204 10204 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: un=%p\n", (void *)un);
10205 10205 SD_TRACE(SD_LOG_OPEN_CLOSE, un,
10206 10206 "sdopen: exclopen=%x, flag=%x, regopen=%x\n",
10207 10207 un->un_exclopen, flag, un->un_ocmap.regopen[otyp]);
10208 10208
10209 10209 if (un->un_exclopen & (partmask)) {
10210 10210 goto excl_open_fail;
10211 10211 }
10212 10212
10213 10213 if (flag & FEXCL) {
10214 10214 int i;
10215 10215 if (un->un_ocmap.lyropen[part]) {
10216 10216 goto excl_open_fail;
10217 10217 }
10218 10218 for (i = 0; i < (OTYPCNT - 1); i++) {
10219 10219 if (un->un_ocmap.regopen[i] & (partmask)) {
10220 10220 goto excl_open_fail;
10221 10221 }
10222 10222 }
10223 10223 }
10224 10224
10225 10225 /*
10226 10226 * Check the write permission if this is a removable media device,
10227 10227 * NDELAY has not been set, and writable permission is requested.
10228 10228 *
10229 10229 * Note: If NDELAY was set and this is write-protected media the WRITE
10230 10230 * attempt will fail with EIO as part of the I/O processing. This is a
10231 10231 * more permissive implementation that allows the open to succeed and
10232 10232 * WRITE attempts to fail when appropriate.
10233 10233 */
10234 10234 if (un->un_f_chk_wp_open) {
10235 10235 if ((flag & FWRITE) && (!nodelay)) {
10236 10236 mutex_exit(SD_MUTEX(un));
10237 10237 /*
10238 10238 * Defer the check for write permission on writable
10239 10239 * DVD drive till sdstrategy and will not fail open even
10240 10240 * if FWRITE is set as the device can be writable
10241 10241 * depending upon the media and the media can change
10242 10242 * after the call to open().
10243 10243 */
10244 10244 if (un->un_f_dvdram_writable_device == FALSE) {
10245 10245 if (ISCD(un) || sr_check_wp(dev)) {
10246 10246 rval = EROFS;
10247 10247 mutex_enter(SD_MUTEX(un));
10248 10248 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: "
10249 10249 "write to cd or write protected media\n");
10250 10250 goto open_fail;
10251 10251 }
10252 10252 }
10253 10253 mutex_enter(SD_MUTEX(un));
10254 10254 }
10255 10255 }
10256 10256
10257 10257 /*
10258 10258 * If opening in NDELAY/NONBLOCK mode, just return.
10259 10259 * Check if disk is ready and has a valid geometry later.
10260 10260 */
10261 10261 if (!nodelay) {
10262 10262 sd_ssc_t *ssc;
10263 10263
10264 10264 mutex_exit(SD_MUTEX(un));
10265 10265 ssc = sd_ssc_init(un);
10266 10266 rval = sd_ready_and_valid(ssc, part);
10267 10267 sd_ssc_fini(ssc);
10268 10268 mutex_enter(SD_MUTEX(un));
10269 10269 /*
10270 10270 * Fail if device is not ready or if the number of disk
10271 10271 * blocks is zero or negative for non CD devices.
10272 10272 */
10273 10273
10274 10274 nblks = 0;
10275 10275
10276 10276 if (rval == SD_READY_VALID && (!ISCD(un))) {
10277 10277 /* if cmlb_partinfo fails, nblks remains 0 */
10278 10278 mutex_exit(SD_MUTEX(un));
10279 10279 (void) cmlb_partinfo(un->un_cmlbhandle, part, &nblks,
10280 10280 NULL, NULL, NULL, (void *)SD_PATH_DIRECT);
10281 10281 mutex_enter(SD_MUTEX(un));
10282 10282 }
10283 10283
10284 10284 if ((rval != SD_READY_VALID) ||
10285 10285 (!ISCD(un) && nblks <= 0)) {
10286 10286 rval = un->un_f_has_removable_media ? ENXIO : EIO;
10287 10287 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: "
10288 10288 "device not ready or invalid disk block value\n");
10289 10289 goto open_fail;
10290 10290 }
10291 10291 #if defined(__i386) || defined(__amd64)
10292 10292 } else {
10293 10293 uchar_t *cp;
10294 10294 /*
10295 10295 * x86 requires special nodelay handling, so that p0 is
10296 10296 * always defined and accessible.
10297 10297 * Invalidate geometry only if device is not already open.
10298 10298 */
10299 10299 cp = &un->un_ocmap.chkd[0];
10300 10300 while (cp < &un->un_ocmap.chkd[OCSIZE]) {
10301 10301 if (*cp != (uchar_t)0) {
10302 10302 break;
10303 10303 }
10304 10304 cp++;
10305 10305 }
10306 10306 if (cp == &un->un_ocmap.chkd[OCSIZE]) {
10307 10307 mutex_exit(SD_MUTEX(un));
10308 10308 cmlb_invalidate(un->un_cmlbhandle,
10309 10309 (void *)SD_PATH_DIRECT);
10310 10310 mutex_enter(SD_MUTEX(un));
10311 10311 }
10312 10312
10313 10313 #endif
10314 10314 }
10315 10315
10316 10316 if (otyp == OTYP_LYR) {
10317 10317 un->un_ocmap.lyropen[part]++;
10318 10318 } else {
10319 10319 un->un_ocmap.regopen[otyp] |= partmask;
10320 10320 }
10321 10321
10322 10322 /* Set up open and exclusive open flags */
10323 10323 if (flag & FEXCL) {
10324 10324 un->un_exclopen |= (partmask);
10325 10325 }
10326 10326
10327 10327 /*
10328 10328 * If the lun is EFI labeled and lun capacity is greater than the
10329 10329 * capacity contained in the label, log a sys-event to notify the
10330 10330 * interested module.
10331 10331 * To avoid an infinite loop of logging sys-event, we only log the
10332 10332 * event when the lun is not opened in NDELAY mode. The event handler
10333 10333 * should open the lun in NDELAY mode.
10334 10334 */
10335 10335 if (!nodelay) {
10336 10336 mutex_exit(SD_MUTEX(un));
10337 10337 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap,
10338 10338 (void*)SD_PATH_DIRECT) == 0) {
10339 10339 mutex_enter(SD_MUTEX(un));
10340 10340 if (un->un_f_blockcount_is_valid &&
10341 10341 un->un_blockcount > label_cap &&
10342 10342 un->un_f_expnevent == B_FALSE) {
10343 10343 un->un_f_expnevent = B_TRUE;
10344 10344 mutex_exit(SD_MUTEX(un));
10345 10345 sd_log_lun_expansion_event(un,
10346 10346 (nodelay ? KM_NOSLEEP : KM_SLEEP));
10347 10347 mutex_enter(SD_MUTEX(un));
10348 10348 }
10349 10349 } else {
10350 10350 mutex_enter(SD_MUTEX(un));
10351 10351 }
10352 10352 }
10353 10353
10354 10354 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: "
10355 10355 "open of part %d type %d\n", part, otyp);
10356 10356
10357 10357 mutex_exit(SD_MUTEX(un));
10358 10358 if (!nodelay) {
10359 10359 sd_pm_exit(un);
10360 10360 }
10361 10361
10362 10362 sema_v(&un->un_semoclose);
10363 10363
10364 10364 mutex_enter(&sd_detach_mutex);
10365 10365 un->un_opens_in_progress--;
10366 10366 mutex_exit(&sd_detach_mutex);
10367 10367
10368 10368 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdopen: exit success\n");
10369 10369 return (DDI_SUCCESS);
10370 10370
10371 10371 excl_open_fail:
10372 10372 SD_ERROR(SD_LOG_OPEN_CLOSE, un, "sdopen: fail exclusive open\n");
10373 10373 rval = EBUSY;
10374 10374
10375 10375 open_fail:
10376 10376 mutex_exit(SD_MUTEX(un));
10377 10377
10378 10378 /*
10379 10379 * On a failed open we must exit the pm management.
10380 10380 */
10381 10381 if (!nodelay) {
10382 10382 sd_pm_exit(un);
10383 10383 }
10384 10384 open_failed_with_pm:
10385 10385 sema_v(&un->un_semoclose);
10386 10386
10387 10387 mutex_enter(&sd_detach_mutex);
10388 10388 un->un_opens_in_progress--;
10389 10389 if (otyp == OTYP_LYR) {
10390 10390 un->un_layer_count--;
10391 10391 }
10392 10392 mutex_exit(&sd_detach_mutex);
10393 10393
10394 10394 return (rval);
10395 10395 }
10396 10396
10397 10397
10398 10398 /*
10399 10399 * Function: sdclose
10400 10400 *
10401 10401 * Description: Driver's close(9e) entry point function.
10402 10402 *
10403 10403 * Arguments: dev - device number
10404 10404 * flag - file status flag, informational only
10405 10405 * otyp - close type (OTYP_BLK, OTYP_CHR, OTYP_LYR)
10406 10406 * cred_p - user credential pointer
10407 10407 *
10408 10408 * Return Code: ENXIO
10409 10409 *
10410 10410 * Context: Kernel thread context
10411 10411 */
10412 10412 /* ARGSUSED */
10413 10413 static int
10414 10414 sdclose(dev_t dev, int flag, int otyp, cred_t *cred_p)
10415 10415 {
10416 10416 struct sd_lun *un;
10417 10417 uchar_t *cp;
10418 10418 int part;
10419 10419 int nodelay;
10420 10420 int rval = 0;
10421 10421
10422 10422 /* Validate the open type */
10423 10423 if (otyp >= OTYPCNT) {
10424 10424 return (ENXIO);
10425 10425 }
10426 10426
10427 10427 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
10428 10428 return (ENXIO);
10429 10429 }
10430 10430
10431 10431 part = SDPART(dev);
10432 10432 nodelay = flag & (FNDELAY | FNONBLOCK);
10433 10433
10434 10434 SD_TRACE(SD_LOG_OPEN_CLOSE, un,
10435 10435 "sdclose: close of part %d type %d\n", part, otyp);
10436 10436
10437 10437 /*
10438 10438 * We use a semaphore here in order to serialize
10439 10439 * open and close requests on the device.
10440 10440 */
10441 10441 sema_p(&un->un_semoclose);
10442 10442
10443 10443 mutex_enter(SD_MUTEX(un));
10444 10444
10445 10445 /* Don't proceed if power is being changed. */
10446 10446 while (un->un_state == SD_STATE_PM_CHANGING) {
10447 10447 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10448 10448 }
10449 10449
10450 10450 if (un->un_exclopen & (1 << part)) {
10451 10451 un->un_exclopen &= ~(1 << part);
10452 10452 }
10453 10453
10454 10454 /* Update the open partition map */
10455 10455 if (otyp == OTYP_LYR) {
10456 10456 un->un_ocmap.lyropen[part] -= 1;
10457 10457 } else {
10458 10458 un->un_ocmap.regopen[otyp] &= ~(1 << part);
10459 10459 }
10460 10460
10461 10461 cp = &un->un_ocmap.chkd[0];
10462 10462 while (cp < &un->un_ocmap.chkd[OCSIZE]) {
10463 10463 if (*cp != NULL) {
10464 10464 break;
10465 10465 }
10466 10466 cp++;
10467 10467 }
10468 10468
10469 10469 if (cp == &un->un_ocmap.chkd[OCSIZE]) {
10470 10470 SD_TRACE(SD_LOG_OPEN_CLOSE, un, "sdclose: last close\n");
10471 10471
10472 10472 /*
10473 10473 * We avoid persistance upon the last close, and set
10474 10474 * the throttle back to the maximum.
10475 10475 */
10476 10476 un->un_throttle = un->un_saved_throttle;
10477 10477
10478 10478 if (un->un_state == SD_STATE_OFFLINE) {
10479 10479 if (un->un_f_is_fibre == FALSE) {
10480 10480 scsi_log(SD_DEVINFO(un), sd_label,
10481 10481 CE_WARN, "offline\n");
10482 10482 }
10483 10483 mutex_exit(SD_MUTEX(un));
10484 10484 cmlb_invalidate(un->un_cmlbhandle,
10485 10485 (void *)SD_PATH_DIRECT);
10486 10486 mutex_enter(SD_MUTEX(un));
10487 10487
10488 10488 } else {
10489 10489 /*
10490 10490 * Flush any outstanding writes in NVRAM cache.
10491 10491 * Note: SYNCHRONIZE CACHE is an optional SCSI-2
10492 10492 * cmd, it may not work for non-Pluto devices.
10493 10493 * SYNCHRONIZE CACHE is not required for removables,
10494 10494 * except DVD-RAM drives.
10495 10495 *
10496 10496 * Also note: because SYNCHRONIZE CACHE is currently
10497 10497 * the only command issued here that requires the
10498 10498 * drive be powered up, only do the power up before
10499 10499 * sending the Sync Cache command. If additional
10500 10500 * commands are added which require a powered up
10501 10501 * drive, the following sequence may have to change.
10502 10502 *
10503 10503 * And finally, note that parallel SCSI on SPARC
10504 10504 * only issues a Sync Cache to DVD-RAM, a newly
10505 10505 * supported device.
10506 10506 */
10507 10507 #if defined(__i386) || defined(__amd64)
10508 10508 if ((un->un_f_sync_cache_supported &&
10509 10509 un->un_f_sync_cache_required) ||
10510 10510 un->un_f_dvdram_writable_device == TRUE) {
10511 10511 #else
10512 10512 if (un->un_f_dvdram_writable_device == TRUE) {
10513 10513 #endif
10514 10514 mutex_exit(SD_MUTEX(un));
10515 10515 if (sd_pm_entry(un) == DDI_SUCCESS) {
10516 10516 rval =
10517 10517 sd_send_scsi_SYNCHRONIZE_CACHE(un,
10518 10518 NULL);
10519 10519 /* ignore error if not supported */
10520 10520 if (rval == ENOTSUP) {
10521 10521 rval = 0;
10522 10522 } else if (rval != 0) {
10523 10523 rval = EIO;
10524 10524 }
10525 10525 sd_pm_exit(un);
10526 10526 } else {
10527 10527 rval = EIO;
10528 10528 }
10529 10529 mutex_enter(SD_MUTEX(un));
10530 10530 }
10531 10531
10532 10532 /*
10533 10533 * For devices which supports DOOR_LOCK, send an ALLOW
10534 10534 * MEDIA REMOVAL command, but don't get upset if it
10535 10535 * fails. We need to raise the power of the drive before
10536 10536 * we can call sd_send_scsi_DOORLOCK()
10537 10537 */
10538 10538 if (un->un_f_doorlock_supported) {
10539 10539 mutex_exit(SD_MUTEX(un));
10540 10540 if (sd_pm_entry(un) == DDI_SUCCESS) {
10541 10541 sd_ssc_t *ssc;
10542 10542
10543 10543 ssc = sd_ssc_init(un);
10544 10544 rval = sd_send_scsi_DOORLOCK(ssc,
10545 10545 SD_REMOVAL_ALLOW, SD_PATH_DIRECT);
10546 10546 if (rval != 0)
10547 10547 sd_ssc_assessment(ssc,
10548 10548 SD_FMT_IGNORE);
10549 10549 sd_ssc_fini(ssc);
10550 10550
10551 10551 sd_pm_exit(un);
10552 10552 if (ISCD(un) && (rval != 0) &&
10553 10553 (nodelay != 0)) {
10554 10554 rval = ENXIO;
10555 10555 }
10556 10556 } else {
10557 10557 rval = EIO;
10558 10558 }
10559 10559 mutex_enter(SD_MUTEX(un));
10560 10560 }
10561 10561
10562 10562 /*
10563 10563 * If a device has removable media, invalidate all
10564 10564 * parameters related to media, such as geometry,
10565 10565 * blocksize, and blockcount.
10566 10566 */
10567 10567 if (un->un_f_has_removable_media) {
10568 10568 sr_ejected(un);
10569 10569 }
10570 10570
10571 10571 /*
10572 10572 * Destroy the cache (if it exists) which was
10573 10573 * allocated for the write maps since this is
10574 10574 * the last close for this media.
10575 10575 */
10576 10576 if (un->un_wm_cache) {
10577 10577 /*
10578 10578 * Check if there are pending commands.
10579 10579 * and if there are give a warning and
10580 10580 * do not destroy the cache.
10581 10581 */
10582 10582 if (un->un_ncmds_in_driver > 0) {
10583 10583 scsi_log(SD_DEVINFO(un),
10584 10584 sd_label, CE_WARN,
10585 10585 "Unable to clean up memory "
10586 10586 "because of pending I/O\n");
10587 10587 } else {
10588 10588 kmem_cache_destroy(
10589 10589 un->un_wm_cache);
10590 10590 un->un_wm_cache = NULL;
10591 10591 }
10592 10592 }
10593 10593 }
10594 10594 }
10595 10595
10596 10596 mutex_exit(SD_MUTEX(un));
10597 10597 sema_v(&un->un_semoclose);
10598 10598
10599 10599 if (otyp == OTYP_LYR) {
10600 10600 mutex_enter(&sd_detach_mutex);
10601 10601 /*
10602 10602 * The detach routine may run when the layer count
10603 10603 * drops to zero.
10604 10604 */
10605 10605 un->un_layer_count--;
10606 10606 mutex_exit(&sd_detach_mutex);
10607 10607 }
10608 10608
10609 10609 return (rval);
10610 10610 }
10611 10611
10612 10612
10613 10613 /*
10614 10614 * Function: sd_ready_and_valid
10615 10615 *
10616 10616 * Description: Test if device is ready and has a valid geometry.
10617 10617 *
10618 10618 * Arguments: ssc - sd_ssc_t will contain un
10619 10619 * un - driver soft state (unit) structure
10620 10620 *
10621 10621 * Return Code: SD_READY_VALID ready and valid label
10622 10622 * SD_NOT_READY_VALID not ready, no label
10623 10623 * SD_RESERVED_BY_OTHERS reservation conflict
10624 10624 *
10625 10625 * Context: Never called at interrupt context.
10626 10626 */
10627 10627
10628 10628 static int
10629 10629 sd_ready_and_valid(sd_ssc_t *ssc, int part)
10630 10630 {
10631 10631 struct sd_errstats *stp;
10632 10632 uint64_t capacity;
10633 10633 uint_t lbasize;
10634 10634 int rval = SD_READY_VALID;
10635 10635 char name_str[48];
10636 10636 boolean_t is_valid;
10637 10637 struct sd_lun *un;
10638 10638 int status;
10639 10639
10640 10640 ASSERT(ssc != NULL);
10641 10641 un = ssc->ssc_un;
10642 10642 ASSERT(un != NULL);
10643 10643 ASSERT(!mutex_owned(SD_MUTEX(un)));
10644 10644
10645 10645 mutex_enter(SD_MUTEX(un));
10646 10646 /*
10647 10647 * If a device has removable media, we must check if media is
10648 10648 * ready when checking if this device is ready and valid.
10649 10649 */
10650 10650 if (un->un_f_has_removable_media) {
10651 10651 mutex_exit(SD_MUTEX(un));
10652 10652 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
10653 10653
10654 10654 if (status != 0) {
10655 10655 rval = SD_NOT_READY_VALID;
10656 10656 mutex_enter(SD_MUTEX(un));
10657 10657
10658 10658 /* Ignore all failed status for removalbe media */
10659 10659 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10660 10660
10661 10661 goto done;
10662 10662 }
10663 10663
10664 10664 is_valid = SD_IS_VALID_LABEL(un);
10665 10665 mutex_enter(SD_MUTEX(un));
10666 10666 if (!is_valid ||
10667 10667 (un->un_f_blockcount_is_valid == FALSE) ||
10668 10668 (un->un_f_tgt_blocksize_is_valid == FALSE)) {
10669 10669
10670 10670 /* capacity has to be read every open. */
10671 10671 mutex_exit(SD_MUTEX(un));
10672 10672 status = sd_send_scsi_READ_CAPACITY(ssc, &capacity,
10673 10673 &lbasize, SD_PATH_DIRECT);
10674 10674
10675 10675 if (status != 0) {
10676 10676 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10677 10677
10678 10678 cmlb_invalidate(un->un_cmlbhandle,
10679 10679 (void *)SD_PATH_DIRECT);
10680 10680 mutex_enter(SD_MUTEX(un));
10681 10681 rval = SD_NOT_READY_VALID;
10682 10682
10683 10683 goto done;
10684 10684 } else {
10685 10685 mutex_enter(SD_MUTEX(un));
10686 10686 sd_update_block_info(un, lbasize, capacity);
10687 10687 }
10688 10688 }
10689 10689
10690 10690 /*
10691 10691 * Check if the media in the device is writable or not.
10692 10692 */
10693 10693 if (!is_valid && ISCD(un)) {
10694 10694 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT);
10695 10695 }
10696 10696
10697 10697 } else {
10698 10698 /*
10699 10699 * Do a test unit ready to clear any unit attention from non-cd
10700 10700 * devices.
10701 10701 */
10702 10702 mutex_exit(SD_MUTEX(un));
10703 10703
10704 10704 status = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
10705 10705 if (status != 0) {
10706 10706 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10707 10707 }
10708 10708
10709 10709 mutex_enter(SD_MUTEX(un));
10710 10710 }
10711 10711
10712 10712
10713 10713 /*
10714 10714 * If this is a non 512 block device, allocate space for
10715 10715 * the wmap cache. This is being done here since every time
10716 10716 * a media is changed this routine will be called and the
10717 10717 * block size is a function of media rather than device.
10718 10718 */
10719 10719 if (((un->un_f_rmw_type != SD_RMW_TYPE_RETURN_ERROR ||
10720 10720 un->un_f_non_devbsize_supported) &&
10721 10721 un->un_tgt_blocksize != DEV_BSIZE) ||
10722 10722 un->un_f_enable_rmw) {
10723 10723 if (!(un->un_wm_cache)) {
10724 10724 (void) snprintf(name_str, sizeof (name_str),
10725 10725 "%s%d_cache",
10726 10726 ddi_driver_name(SD_DEVINFO(un)),
10727 10727 ddi_get_instance(SD_DEVINFO(un)));
10728 10728 un->un_wm_cache = kmem_cache_create(
10729 10729 name_str, sizeof (struct sd_w_map),
10730 10730 8, sd_wm_cache_constructor,
10731 10731 sd_wm_cache_destructor, NULL,
10732 10732 (void *)un, NULL, 0);
10733 10733 if (!(un->un_wm_cache)) {
10734 10734 rval = ENOMEM;
10735 10735 goto done;
10736 10736 }
10737 10737 }
10738 10738 }
10739 10739
10740 10740 if (un->un_state == SD_STATE_NORMAL) {
10741 10741 /*
10742 10742 * If the target is not yet ready here (defined by a TUR
10743 10743 * failure), invalidate the geometry and print an 'offline'
10744 10744 * message. This is a legacy message, as the state of the
10745 10745 * target is not actually changed to SD_STATE_OFFLINE.
10746 10746 *
10747 10747 * If the TUR fails for EACCES (Reservation Conflict),
10748 10748 * SD_RESERVED_BY_OTHERS will be returned to indicate
10749 10749 * reservation conflict. If the TUR fails for other
10750 10750 * reasons, SD_NOT_READY_VALID will be returned.
10751 10751 */
10752 10752 int err;
10753 10753
10754 10754 mutex_exit(SD_MUTEX(un));
10755 10755 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
10756 10756 mutex_enter(SD_MUTEX(un));
10757 10757
10758 10758 if (err != 0) {
10759 10759 mutex_exit(SD_MUTEX(un));
10760 10760 cmlb_invalidate(un->un_cmlbhandle,
10761 10761 (void *)SD_PATH_DIRECT);
10762 10762 mutex_enter(SD_MUTEX(un));
10763 10763 if (err == EACCES) {
10764 10764 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
10765 10765 "reservation conflict\n");
10766 10766 rval = SD_RESERVED_BY_OTHERS;
10767 10767 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10768 10768 } else {
10769 10769 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
10770 10770 "drive offline\n");
10771 10771 rval = SD_NOT_READY_VALID;
10772 10772 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
10773 10773 }
10774 10774 goto done;
10775 10775 }
10776 10776 }
10777 10777
10778 10778 if (un->un_f_format_in_progress == FALSE) {
10779 10779 mutex_exit(SD_MUTEX(un));
10780 10780
10781 10781 (void) cmlb_validate(un->un_cmlbhandle, 0,
10782 10782 (void *)SD_PATH_DIRECT);
10783 10783 if (cmlb_partinfo(un->un_cmlbhandle, part, NULL, NULL, NULL,
10784 10784 NULL, (void *) SD_PATH_DIRECT) != 0) {
10785 10785 rval = SD_NOT_READY_VALID;
10786 10786 mutex_enter(SD_MUTEX(un));
10787 10787
10788 10788 goto done;
10789 10789 }
10790 10790 if (un->un_f_pkstats_enabled) {
10791 10791 sd_set_pstats(un);
10792 10792 SD_TRACE(SD_LOG_IO_PARTITION, un,
10793 10793 "sd_ready_and_valid: un:0x%p pstats created and "
10794 10794 "set\n", un);
10795 10795 }
10796 10796 mutex_enter(SD_MUTEX(un));
10797 10797 }
10798 10798
10799 10799 /*
10800 10800 * If this device supports DOOR_LOCK command, try and send
10801 10801 * this command to PREVENT MEDIA REMOVAL, but don't get upset
10802 10802 * if it fails. For a CD, however, it is an error
10803 10803 */
10804 10804 if (un->un_f_doorlock_supported) {
10805 10805 mutex_exit(SD_MUTEX(un));
10806 10806 status = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
10807 10807 SD_PATH_DIRECT);
10808 10808
10809 10809 if ((status != 0) && ISCD(un)) {
10810 10810 rval = SD_NOT_READY_VALID;
10811 10811 mutex_enter(SD_MUTEX(un));
10812 10812
10813 10813 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10814 10814
10815 10815 goto done;
10816 10816 } else if (status != 0)
10817 10817 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
10818 10818 mutex_enter(SD_MUTEX(un));
10819 10819 }
10820 10820
10821 10821 /* The state has changed, inform the media watch routines */
10822 10822 un->un_mediastate = DKIO_INSERTED;
10823 10823 cv_broadcast(&un->un_state_cv);
10824 10824 rval = SD_READY_VALID;
10825 10825
10826 10826 done:
10827 10827
10828 10828 /*
10829 10829 * Initialize the capacity kstat value, if no media previously
10830 10830 * (capacity kstat is 0) and a media has been inserted
10831 10831 * (un_blockcount > 0).
10832 10832 */
10833 10833 if (un->un_errstats != NULL) {
10834 10834 stp = (struct sd_errstats *)un->un_errstats->ks_data;
10835 10835 if ((stp->sd_capacity.value.ui64 == 0) &&
10836 10836 (un->un_f_blockcount_is_valid == TRUE)) {
10837 10837 stp->sd_capacity.value.ui64 =
10838 10838 (uint64_t)((uint64_t)un->un_blockcount *
10839 10839 un->un_sys_blocksize);
10840 10840 }
10841 10841 }
10842 10842
10843 10843 mutex_exit(SD_MUTEX(un));
10844 10844 return (rval);
10845 10845 }
10846 10846
10847 10847
10848 10848 /*
10849 10849 * Function: sdmin
10850 10850 *
10851 10851 * Description: Routine to limit the size of a data transfer. Used in
10852 10852 * conjunction with physio(9F).
10853 10853 *
10854 10854 * Arguments: bp - pointer to the indicated buf(9S) struct.
10855 10855 *
10856 10856 * Context: Kernel thread context.
10857 10857 */
10858 10858
10859 10859 static void
10860 10860 sdmin(struct buf *bp)
10861 10861 {
10862 10862 struct sd_lun *un;
10863 10863 int instance;
10864 10864
10865 10865 instance = SDUNIT(bp->b_edev);
10866 10866
10867 10867 un = ddi_get_soft_state(sd_state, instance);
10868 10868 ASSERT(un != NULL);
10869 10869
10870 10870 /*
10871 10871 * We depend on buf breakup to restrict
10872 10872 * IO size if it is enabled.
10873 10873 */
10874 10874 if (un->un_buf_breakup_supported) {
10875 10875 return;
10876 10876 }
10877 10877
10878 10878 if (bp->b_bcount > un->un_max_xfer_size) {
10879 10879 bp->b_bcount = un->un_max_xfer_size;
10880 10880 }
10881 10881 }
10882 10882
10883 10883
10884 10884 /*
10885 10885 * Function: sdread
10886 10886 *
10887 10887 * Description: Driver's read(9e) entry point function.
10888 10888 *
10889 10889 * Arguments: dev - device number
10890 10890 * uio - structure pointer describing where data is to be stored
10891 10891 * in user's space
10892 10892 * cred_p - user credential pointer
10893 10893 *
10894 10894 * Return Code: ENXIO
10895 10895 * EIO
10896 10896 * EINVAL
10897 10897 * value returned by physio
10898 10898 *
10899 10899 * Context: Kernel thread context.
10900 10900 */
10901 10901 /* ARGSUSED */
10902 10902 static int
10903 10903 sdread(dev_t dev, struct uio *uio, cred_t *cred_p)
10904 10904 {
10905 10905 struct sd_lun *un = NULL;
10906 10906 int secmask;
10907 10907 int err = 0;
10908 10908 sd_ssc_t *ssc;
10909 10909
10910 10910 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
10911 10911 return (ENXIO);
10912 10912 }
10913 10913
10914 10914 ASSERT(!mutex_owned(SD_MUTEX(un)));
10915 10915
10916 10916
10917 10917 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
10918 10918 mutex_enter(SD_MUTEX(un));
10919 10919 /*
10920 10920 * Because the call to sd_ready_and_valid will issue I/O we
10921 10921 * must wait here if either the device is suspended or
10922 10922 * if it's power level is changing.
10923 10923 */
10924 10924 while ((un->un_state == SD_STATE_SUSPENDED) ||
10925 10925 (un->un_state == SD_STATE_PM_CHANGING)) {
10926 10926 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
10927 10927 }
10928 10928 un->un_ncmds_in_driver++;
10929 10929 mutex_exit(SD_MUTEX(un));
10930 10930
10931 10931 /* Initialize sd_ssc_t for internal uscsi commands */
10932 10932 ssc = sd_ssc_init(un);
10933 10933 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
10934 10934 err = EIO;
10935 10935 } else {
10936 10936 err = 0;
10937 10937 }
10938 10938 sd_ssc_fini(ssc);
10939 10939
10940 10940 mutex_enter(SD_MUTEX(un));
10941 10941 un->un_ncmds_in_driver--;
10942 10942 ASSERT(un->un_ncmds_in_driver >= 0);
10943 10943 mutex_exit(SD_MUTEX(un));
10944 10944 if (err != 0)
10945 10945 return (err);
10946 10946 }
10947 10947
10948 10948 /*
10949 10949 * Read requests are restricted to multiples of the system block size.
10950 10950 */
10951 10951 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
10952 10952 !un->un_f_enable_rmw)
10953 10953 secmask = un->un_tgt_blocksize - 1;
10954 10954 else
10955 10955 secmask = DEV_BSIZE - 1;
10956 10956
10957 10957 if (uio->uio_loffset & ((offset_t)(secmask))) {
10958 10958 SD_ERROR(SD_LOG_READ_WRITE, un,
10959 10959 "sdread: file offset not modulo %d\n",
10960 10960 secmask + 1);
10961 10961 err = EINVAL;
10962 10962 } else if (uio->uio_iov->iov_len & (secmask)) {
10963 10963 SD_ERROR(SD_LOG_READ_WRITE, un,
10964 10964 "sdread: transfer length not modulo %d\n",
10965 10965 secmask + 1);
10966 10966 err = EINVAL;
10967 10967 } else {
10968 10968 err = physio(sdstrategy, NULL, dev, B_READ, sdmin, uio);
10969 10969 }
10970 10970
10971 10971 return (err);
10972 10972 }
10973 10973
10974 10974
10975 10975 /*
10976 10976 * Function: sdwrite
10977 10977 *
10978 10978 * Description: Driver's write(9e) entry point function.
10979 10979 *
10980 10980 * Arguments: dev - device number
10981 10981 * uio - structure pointer describing where data is stored in
10982 10982 * user's space
10983 10983 * cred_p - user credential pointer
10984 10984 *
10985 10985 * Return Code: ENXIO
10986 10986 * EIO
10987 10987 * EINVAL
10988 10988 * value returned by physio
10989 10989 *
10990 10990 * Context: Kernel thread context.
10991 10991 */
10992 10992 /* ARGSUSED */
10993 10993 static int
10994 10994 sdwrite(dev_t dev, struct uio *uio, cred_t *cred_p)
10995 10995 {
10996 10996 struct sd_lun *un = NULL;
10997 10997 int secmask;
10998 10998 int err = 0;
10999 10999 sd_ssc_t *ssc;
11000 11000
11001 11001 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
11002 11002 return (ENXIO);
11003 11003 }
11004 11004
11005 11005 ASSERT(!mutex_owned(SD_MUTEX(un)));
11006 11006
11007 11007 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
11008 11008 mutex_enter(SD_MUTEX(un));
11009 11009 /*
11010 11010 * Because the call to sd_ready_and_valid will issue I/O we
11011 11011 * must wait here if either the device is suspended or
11012 11012 * if it's power level is changing.
11013 11013 */
11014 11014 while ((un->un_state == SD_STATE_SUSPENDED) ||
11015 11015 (un->un_state == SD_STATE_PM_CHANGING)) {
11016 11016 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11017 11017 }
11018 11018 un->un_ncmds_in_driver++;
11019 11019 mutex_exit(SD_MUTEX(un));
11020 11020
11021 11021 /* Initialize sd_ssc_t for internal uscsi commands */
11022 11022 ssc = sd_ssc_init(un);
11023 11023 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
11024 11024 err = EIO;
11025 11025 } else {
11026 11026 err = 0;
11027 11027 }
11028 11028 sd_ssc_fini(ssc);
11029 11029
11030 11030 mutex_enter(SD_MUTEX(un));
11031 11031 un->un_ncmds_in_driver--;
11032 11032 ASSERT(un->un_ncmds_in_driver >= 0);
11033 11033 mutex_exit(SD_MUTEX(un));
11034 11034 if (err != 0)
11035 11035 return (err);
11036 11036 }
11037 11037
11038 11038 /*
11039 11039 * Write requests are restricted to multiples of the system block size.
11040 11040 */
11041 11041 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
11042 11042 !un->un_f_enable_rmw)
11043 11043 secmask = un->un_tgt_blocksize - 1;
11044 11044 else
11045 11045 secmask = DEV_BSIZE - 1;
11046 11046
11047 11047 if (uio->uio_loffset & ((offset_t)(secmask))) {
11048 11048 SD_ERROR(SD_LOG_READ_WRITE, un,
11049 11049 "sdwrite: file offset not modulo %d\n",
11050 11050 secmask + 1);
11051 11051 err = EINVAL;
11052 11052 } else if (uio->uio_iov->iov_len & (secmask)) {
11053 11053 SD_ERROR(SD_LOG_READ_WRITE, un,
11054 11054 "sdwrite: transfer length not modulo %d\n",
11055 11055 secmask + 1);
11056 11056 err = EINVAL;
11057 11057 } else {
11058 11058 err = physio(sdstrategy, NULL, dev, B_WRITE, sdmin, uio);
11059 11059 }
11060 11060
11061 11061 return (err);
11062 11062 }
11063 11063
11064 11064
11065 11065 /*
11066 11066 * Function: sdaread
11067 11067 *
11068 11068 * Description: Driver's aread(9e) entry point function.
11069 11069 *
11070 11070 * Arguments: dev - device number
11071 11071 * aio - structure pointer describing where data is to be stored
11072 11072 * cred_p - user credential pointer
11073 11073 *
11074 11074 * Return Code: ENXIO
11075 11075 * EIO
11076 11076 * EINVAL
11077 11077 * value returned by aphysio
11078 11078 *
11079 11079 * Context: Kernel thread context.
11080 11080 */
11081 11081 /* ARGSUSED */
11082 11082 static int
11083 11083 sdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p)
11084 11084 {
11085 11085 struct sd_lun *un = NULL;
11086 11086 struct uio *uio = aio->aio_uio;
11087 11087 int secmask;
11088 11088 int err = 0;
11089 11089 sd_ssc_t *ssc;
11090 11090
11091 11091 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
11092 11092 return (ENXIO);
11093 11093 }
11094 11094
11095 11095 ASSERT(!mutex_owned(SD_MUTEX(un)));
11096 11096
11097 11097 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
11098 11098 mutex_enter(SD_MUTEX(un));
11099 11099 /*
11100 11100 * Because the call to sd_ready_and_valid will issue I/O we
11101 11101 * must wait here if either the device is suspended or
11102 11102 * if it's power level is changing.
11103 11103 */
11104 11104 while ((un->un_state == SD_STATE_SUSPENDED) ||
11105 11105 (un->un_state == SD_STATE_PM_CHANGING)) {
11106 11106 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11107 11107 }
11108 11108 un->un_ncmds_in_driver++;
11109 11109 mutex_exit(SD_MUTEX(un));
11110 11110
11111 11111 /* Initialize sd_ssc_t for internal uscsi commands */
11112 11112 ssc = sd_ssc_init(un);
11113 11113 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
11114 11114 err = EIO;
11115 11115 } else {
11116 11116 err = 0;
11117 11117 }
11118 11118 sd_ssc_fini(ssc);
11119 11119
11120 11120 mutex_enter(SD_MUTEX(un));
11121 11121 un->un_ncmds_in_driver--;
11122 11122 ASSERT(un->un_ncmds_in_driver >= 0);
11123 11123 mutex_exit(SD_MUTEX(un));
11124 11124 if (err != 0)
11125 11125 return (err);
11126 11126 }
11127 11127
11128 11128 /*
11129 11129 * Read requests are restricted to multiples of the system block size.
11130 11130 */
11131 11131 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
11132 11132 !un->un_f_enable_rmw)
11133 11133 secmask = un->un_tgt_blocksize - 1;
11134 11134 else
11135 11135 secmask = DEV_BSIZE - 1;
11136 11136
11137 11137 if (uio->uio_loffset & ((offset_t)(secmask))) {
11138 11138 SD_ERROR(SD_LOG_READ_WRITE, un,
11139 11139 "sdaread: file offset not modulo %d\n",
11140 11140 secmask + 1);
11141 11141 err = EINVAL;
11142 11142 } else if (uio->uio_iov->iov_len & (secmask)) {
11143 11143 SD_ERROR(SD_LOG_READ_WRITE, un,
11144 11144 "sdaread: transfer length not modulo %d\n",
11145 11145 secmask + 1);
11146 11146 err = EINVAL;
11147 11147 } else {
11148 11148 err = aphysio(sdstrategy, anocancel, dev, B_READ, sdmin, aio);
11149 11149 }
11150 11150
11151 11151 return (err);
11152 11152 }
11153 11153
11154 11154
11155 11155 /*
11156 11156 * Function: sdawrite
11157 11157 *
11158 11158 * Description: Driver's awrite(9e) entry point function.
11159 11159 *
11160 11160 * Arguments: dev - device number
11161 11161 * aio - structure pointer describing where data is stored
11162 11162 * cred_p - user credential pointer
11163 11163 *
11164 11164 * Return Code: ENXIO
11165 11165 * EIO
11166 11166 * EINVAL
11167 11167 * value returned by aphysio
11168 11168 *
11169 11169 * Context: Kernel thread context.
11170 11170 */
11171 11171 /* ARGSUSED */
11172 11172 static int
11173 11173 sdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p)
11174 11174 {
11175 11175 struct sd_lun *un = NULL;
11176 11176 struct uio *uio = aio->aio_uio;
11177 11177 int secmask;
11178 11178 int err = 0;
11179 11179 sd_ssc_t *ssc;
11180 11180
11181 11181 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
11182 11182 return (ENXIO);
11183 11183 }
11184 11184
11185 11185 ASSERT(!mutex_owned(SD_MUTEX(un)));
11186 11186
11187 11187 if (!SD_IS_VALID_LABEL(un) && !ISCD(un)) {
11188 11188 mutex_enter(SD_MUTEX(un));
11189 11189 /*
11190 11190 * Because the call to sd_ready_and_valid will issue I/O we
11191 11191 * must wait here if either the device is suspended or
11192 11192 * if it's power level is changing.
11193 11193 */
11194 11194 while ((un->un_state == SD_STATE_SUSPENDED) ||
11195 11195 (un->un_state == SD_STATE_PM_CHANGING)) {
11196 11196 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11197 11197 }
11198 11198 un->un_ncmds_in_driver++;
11199 11199 mutex_exit(SD_MUTEX(un));
11200 11200
11201 11201 /* Initialize sd_ssc_t for internal uscsi commands */
11202 11202 ssc = sd_ssc_init(un);
11203 11203 if ((sd_ready_and_valid(ssc, SDPART(dev))) != SD_READY_VALID) {
11204 11204 err = EIO;
11205 11205 } else {
11206 11206 err = 0;
11207 11207 }
11208 11208 sd_ssc_fini(ssc);
11209 11209
11210 11210 mutex_enter(SD_MUTEX(un));
11211 11211 un->un_ncmds_in_driver--;
11212 11212 ASSERT(un->un_ncmds_in_driver >= 0);
11213 11213 mutex_exit(SD_MUTEX(un));
11214 11214 if (err != 0)
11215 11215 return (err);
11216 11216 }
11217 11217
11218 11218 /*
11219 11219 * Write requests are restricted to multiples of the system block size.
11220 11220 */
11221 11221 if (un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR &&
11222 11222 !un->un_f_enable_rmw)
11223 11223 secmask = un->un_tgt_blocksize - 1;
11224 11224 else
11225 11225 secmask = DEV_BSIZE - 1;
11226 11226
11227 11227 if (uio->uio_loffset & ((offset_t)(secmask))) {
11228 11228 SD_ERROR(SD_LOG_READ_WRITE, un,
11229 11229 "sdawrite: file offset not modulo %d\n",
11230 11230 secmask + 1);
11231 11231 err = EINVAL;
11232 11232 } else if (uio->uio_iov->iov_len & (secmask)) {
11233 11233 SD_ERROR(SD_LOG_READ_WRITE, un,
11234 11234 "sdawrite: transfer length not modulo %d\n",
11235 11235 secmask + 1);
11236 11236 err = EINVAL;
11237 11237 } else {
11238 11238 err = aphysio(sdstrategy, anocancel, dev, B_WRITE, sdmin, aio);
11239 11239 }
11240 11240
11241 11241 return (err);
11242 11242 }
11243 11243
11244 11244
11245 11245
11246 11246
11247 11247
11248 11248 /*
11249 11249 * Driver IO processing follows the following sequence:
11250 11250 *
11251 11251 * sdioctl(9E) sdstrategy(9E) biodone(9F)
11252 11252 * | | ^
11253 11253 * v v |
11254 11254 * sd_send_scsi_cmd() ddi_xbuf_qstrategy() +-------------------+
11255 11255 * | | | |
11256 11256 * v | | |
11257 11257 * sd_uscsi_strategy() sd_xbuf_strategy() sd_buf_iodone() sd_uscsi_iodone()
11258 11258 * | | ^ ^
11259 11259 * v v | |
11260 11260 * SD_BEGIN_IOSTART() SD_BEGIN_IOSTART() | |
11261 11261 * | | | |
11262 11262 * +---+ | +------------+ +-------+
11263 11263 * | | | |
11264 11264 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| |
11265 11265 * | v | |
11266 11266 * | sd_mapblockaddr_iostart() sd_mapblockaddr_iodone() |
11267 11267 * | | ^ |
11268 11268 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| |
11269 11269 * | v | |
11270 11270 * | sd_mapblocksize_iostart() sd_mapblocksize_iodone() |
11271 11271 * | | ^ |
11272 11272 * | SD_NEXT_IOSTART()| SD_NEXT_IODONE()| |
11273 11273 * | v | |
11274 11274 * | sd_checksum_iostart() sd_checksum_iodone() |
11275 11275 * | | ^ |
11276 11276 * +-> SD_NEXT_IOSTART()| SD_NEXT_IODONE()+------------->+
11277 11277 * | v | |
11278 11278 * | sd_pm_iostart() sd_pm_iodone() |
11279 11279 * | | ^ |
11280 11280 * | | | |
11281 11281 * +-> SD_NEXT_IOSTART()| SD_BEGIN_IODONE()--+--------------+
11282 11282 * | ^
11283 11283 * v |
11284 11284 * sd_core_iostart() |
11285 11285 * | |
11286 11286 * | +------>(*destroypkt)()
11287 11287 * +-> sd_start_cmds() <-+ | |
11288 11288 * | | | v
11289 11289 * | | | scsi_destroy_pkt(9F)
11290 11290 * | | |
11291 11291 * +->(*initpkt)() +- sdintr()
11292 11292 * | | | |
11293 11293 * | +-> scsi_init_pkt(9F) | +-> sd_handle_xxx()
11294 11294 * | +-> scsi_setup_cdb(9F) |
11295 11295 * | |
11296 11296 * +--> scsi_transport(9F) |
11297 11297 * | |
11298 11298 * +----> SCSA ---->+
11299 11299 *
11300 11300 *
11301 11301 * This code is based upon the following presumptions:
11302 11302 *
11303 11303 * - iostart and iodone functions operate on buf(9S) structures. These
11304 11304 * functions perform the necessary operations on the buf(9S) and pass
11305 11305 * them along to the next function in the chain by using the macros
11306 11306 * SD_NEXT_IOSTART() (for iostart side functions) and SD_NEXT_IODONE()
11307 11307 * (for iodone side functions).
11308 11308 *
11309 11309 * - The iostart side functions may sleep. The iodone side functions
11310 11310 * are called under interrupt context and may NOT sleep. Therefore
11311 11311 * iodone side functions also may not call iostart side functions.
11312 11312 * (NOTE: iostart side functions should NOT sleep for memory, as
11313 11313 * this could result in deadlock.)
11314 11314 *
11315 11315 * - An iostart side function may call its corresponding iodone side
11316 11316 * function directly (if necessary).
11317 11317 *
11318 11318 * - In the event of an error, an iostart side function can return a buf(9S)
11319 11319 * to its caller by calling SD_BEGIN_IODONE() (after setting B_ERROR and
11320 11320 * b_error in the usual way of course).
11321 11321 *
11322 11322 * - The taskq mechanism may be used by the iodone side functions to dispatch
11323 11323 * requests to the iostart side functions. The iostart side functions in
11324 11324 * this case would be called under the context of a taskq thread, so it's
11325 11325 * OK for them to block/sleep/spin in this case.
11326 11326 *
11327 11327 * - iostart side functions may allocate "shadow" buf(9S) structs and
11328 11328 * pass them along to the next function in the chain. The corresponding
11329 11329 * iodone side functions must coalesce the "shadow" bufs and return
11330 11330 * the "original" buf to the next higher layer.
11331 11331 *
11332 11332 * - The b_private field of the buf(9S) struct holds a pointer to
11333 11333 * an sd_xbuf struct, which contains information needed to
11334 11334 * construct the scsi_pkt for the command.
11335 11335 *
11336 11336 * - The SD_MUTEX(un) is NOT held across calls to the next layer. Each
11337 11337 * layer must acquire & release the SD_MUTEX(un) as needed.
11338 11338 */
11339 11339
11340 11340
11341 11341 /*
11342 11342 * Create taskq for all targets in the system. This is created at
11343 11343 * _init(9E) and destroyed at _fini(9E).
11344 11344 *
11345 11345 * Note: here we set the minalloc to a reasonably high number to ensure that
11346 11346 * we will have an adequate supply of task entries available at interrupt time.
11347 11347 * This is used in conjunction with the TASKQ_PREPOPULATE flag in
11348 11348 * sd_create_taskq(). Since we do not want to sleep for allocations at
11349 11349 * interrupt time, set maxalloc equal to minalloc. That way we will just fail
11350 11350 * the command if we ever try to dispatch more than SD_TASKQ_MAXALLOC taskq
11351 11351 * requests any one instant in time.
11352 11352 */
11353 11353 #define SD_TASKQ_NUMTHREADS 8
11354 11354 #define SD_TASKQ_MINALLOC 256
11355 11355 #define SD_TASKQ_MAXALLOC 256
11356 11356
11357 11357 static taskq_t *sd_tq = NULL;
11358 11358 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_tq))
11359 11359
11360 11360 static int sd_taskq_minalloc = SD_TASKQ_MINALLOC;
11361 11361 static int sd_taskq_maxalloc = SD_TASKQ_MAXALLOC;
11362 11362
11363 11363 /*
11364 11364 * The following task queue is being created for the write part of
11365 11365 * read-modify-write of non-512 block size devices.
11366 11366 * Limit the number of threads to 1 for now. This number has been chosen
11367 11367 * considering the fact that it applies only to dvd ram drives/MO drives
11368 11368 * currently. Performance for which is not main criteria at this stage.
11369 11369 * Note: It needs to be explored if we can use a single taskq in future
11370 11370 */
11371 11371 #define SD_WMR_TASKQ_NUMTHREADS 1
11372 11372 static taskq_t *sd_wmr_tq = NULL;
11373 11373 _NOTE(SCHEME_PROTECTS_DATA("stable data", sd_wmr_tq))
11374 11374
11375 11375 /*
11376 11376 * Function: sd_taskq_create
11377 11377 *
11378 11378 * Description: Create taskq thread(s) and preallocate task entries
11379 11379 *
11380 11380 * Return Code: Returns a pointer to the allocated taskq_t.
11381 11381 *
11382 11382 * Context: Can sleep. Requires blockable context.
11383 11383 *
11384 11384 * Notes: - The taskq() facility currently is NOT part of the DDI.
11385 11385 * (definitely NOT recommeded for 3rd-party drivers!) :-)
11386 11386 * - taskq_create() will block for memory, also it will panic
11387 11387 * if it cannot create the requested number of threads.
11388 11388 * - Currently taskq_create() creates threads that cannot be
11389 11389 * swapped.
11390 11390 * - We use TASKQ_PREPOPULATE to ensure we have an adequate
11391 11391 * supply of taskq entries at interrupt time (ie, so that we
11392 11392 * do not have to sleep for memory)
11393 11393 */
11394 11394
11395 11395 static void
11396 11396 sd_taskq_create(void)
11397 11397 {
11398 11398 char taskq_name[TASKQ_NAMELEN];
11399 11399
11400 11400 ASSERT(sd_tq == NULL);
11401 11401 ASSERT(sd_wmr_tq == NULL);
11402 11402
11403 11403 (void) snprintf(taskq_name, sizeof (taskq_name),
11404 11404 "%s_drv_taskq", sd_label);
11405 11405 sd_tq = (taskq_create(taskq_name, SD_TASKQ_NUMTHREADS,
11406 11406 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc,
11407 11407 TASKQ_PREPOPULATE));
11408 11408
11409 11409 (void) snprintf(taskq_name, sizeof (taskq_name),
11410 11410 "%s_rmw_taskq", sd_label);
11411 11411 sd_wmr_tq = (taskq_create(taskq_name, SD_WMR_TASKQ_NUMTHREADS,
11412 11412 (v.v_maxsyspri - 2), sd_taskq_minalloc, sd_taskq_maxalloc,
11413 11413 TASKQ_PREPOPULATE));
11414 11414 }
11415 11415
11416 11416
11417 11417 /*
11418 11418 * Function: sd_taskq_delete
11419 11419 *
11420 11420 * Description: Complementary cleanup routine for sd_taskq_create().
11421 11421 *
11422 11422 * Context: Kernel thread context.
11423 11423 */
11424 11424
11425 11425 static void
11426 11426 sd_taskq_delete(void)
11427 11427 {
11428 11428 ASSERT(sd_tq != NULL);
11429 11429 ASSERT(sd_wmr_tq != NULL);
11430 11430 taskq_destroy(sd_tq);
11431 11431 taskq_destroy(sd_wmr_tq);
11432 11432 sd_tq = NULL;
11433 11433 sd_wmr_tq = NULL;
11434 11434 }
11435 11435
11436 11436
11437 11437 /*
11438 11438 * Function: sdstrategy
11439 11439 *
11440 11440 * Description: Driver's strategy (9E) entry point function.
11441 11441 *
11442 11442 * Arguments: bp - pointer to buf(9S)
11443 11443 *
11444 11444 * Return Code: Always returns zero
11445 11445 *
11446 11446 * Context: Kernel thread context.
11447 11447 */
11448 11448
11449 11449 static int
11450 11450 sdstrategy(struct buf *bp)
11451 11451 {
11452 11452 struct sd_lun *un;
11453 11453
11454 11454 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
11455 11455 if (un == NULL) {
11456 11456 bioerror(bp, EIO);
11457 11457 bp->b_resid = bp->b_bcount;
11458 11458 biodone(bp);
11459 11459 return (0);
11460 11460 }
11461 11461
11462 11462 /* As was done in the past, fail new cmds. if state is dumping. */
11463 11463 if (un->un_state == SD_STATE_DUMPING) {
11464 11464 bioerror(bp, ENXIO);
11465 11465 bp->b_resid = bp->b_bcount;
11466 11466 biodone(bp);
11467 11467 return (0);
11468 11468 }
11469 11469
11470 11470 ASSERT(!mutex_owned(SD_MUTEX(un)));
11471 11471
11472 11472 /*
11473 11473 * Commands may sneak in while we released the mutex in
11474 11474 * DDI_SUSPEND, we should block new commands. However, old
11475 11475 * commands that are still in the driver at this point should
11476 11476 * still be allowed to drain.
11477 11477 */
11478 11478 mutex_enter(SD_MUTEX(un));
11479 11479 /*
11480 11480 * Must wait here if either the device is suspended or
11481 11481 * if it's power level is changing.
11482 11482 */
11483 11483 while ((un->un_state == SD_STATE_SUSPENDED) ||
11484 11484 (un->un_state == SD_STATE_PM_CHANGING)) {
11485 11485 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
11486 11486 }
11487 11487
11488 11488 un->un_ncmds_in_driver++;
11489 11489
11490 11490 /*
11491 11491 * atapi: Since we are running the CD for now in PIO mode we need to
11492 11492 * call bp_mapin here to avoid bp_mapin called interrupt context under
11493 11493 * the HBA's init_pkt routine.
11494 11494 */
11495 11495 if (un->un_f_cfg_is_atapi == TRUE) {
11496 11496 mutex_exit(SD_MUTEX(un));
11497 11497 bp_mapin(bp);
11498 11498 mutex_enter(SD_MUTEX(un));
11499 11499 }
11500 11500 SD_INFO(SD_LOG_IO, un, "sdstrategy: un_ncmds_in_driver = %ld\n",
11501 11501 un->un_ncmds_in_driver);
11502 11502
11503 11503 if (bp->b_flags & B_WRITE)
11504 11504 un->un_f_sync_cache_required = TRUE;
11505 11505
11506 11506 mutex_exit(SD_MUTEX(un));
11507 11507
11508 11508 /*
11509 11509 * This will (eventually) allocate the sd_xbuf area and
11510 11510 * call sd_xbuf_strategy(). We just want to return the
11511 11511 * result of ddi_xbuf_qstrategy so that we have an opt-
11512 11512 * imized tail call which saves us a stack frame.
11513 11513 */
11514 11514 return (ddi_xbuf_qstrategy(bp, un->un_xbuf_attr));
11515 11515 }
11516 11516
11517 11517
11518 11518 /*
11519 11519 * Function: sd_xbuf_strategy
11520 11520 *
11521 11521 * Description: Function for initiating IO operations via the
11522 11522 * ddi_xbuf_qstrategy() mechanism.
11523 11523 *
11524 11524 * Context: Kernel thread context.
11525 11525 */
11526 11526
11527 11527 static void
11528 11528 sd_xbuf_strategy(struct buf *bp, ddi_xbuf_t xp, void *arg)
11529 11529 {
11530 11530 struct sd_lun *un = arg;
11531 11531
11532 11532 ASSERT(bp != NULL);
11533 11533 ASSERT(xp != NULL);
11534 11534 ASSERT(un != NULL);
11535 11535 ASSERT(!mutex_owned(SD_MUTEX(un)));
11536 11536
11537 11537 /*
11538 11538 * Initialize the fields in the xbuf and save a pointer to the
11539 11539 * xbuf in bp->b_private.
11540 11540 */
11541 11541 sd_xbuf_init(un, bp, xp, SD_CHAIN_BUFIO, NULL);
11542 11542
11543 11543 /* Send the buf down the iostart chain */
11544 11544 SD_BEGIN_IOSTART(((struct sd_xbuf *)xp)->xb_chain_iostart, un, bp);
11545 11545 }
11546 11546
11547 11547
11548 11548 /*
11549 11549 * Function: sd_xbuf_init
11550 11550 *
11551 11551 * Description: Prepare the given sd_xbuf struct for use.
11552 11552 *
11553 11553 * Arguments: un - ptr to softstate
11554 11554 * bp - ptr to associated buf(9S)
11555 11555 * xp - ptr to associated sd_xbuf
11556 11556 * chain_type - IO chain type to use:
11557 11557 * SD_CHAIN_NULL
11558 11558 * SD_CHAIN_BUFIO
11559 11559 * SD_CHAIN_USCSI
11560 11560 * SD_CHAIN_DIRECT
11561 11561 * SD_CHAIN_DIRECT_PRIORITY
11562 11562 * pktinfop - ptr to private data struct for scsi_pkt(9S)
11563 11563 * initialization; may be NULL if none.
11564 11564 *
11565 11565 * Context: Kernel thread context
11566 11566 */
11567 11567
11568 11568 static void
11569 11569 sd_xbuf_init(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
11570 11570 uchar_t chain_type, void *pktinfop)
11571 11571 {
11572 11572 int index;
11573 11573
11574 11574 ASSERT(un != NULL);
11575 11575 ASSERT(bp != NULL);
11576 11576 ASSERT(xp != NULL);
11577 11577
11578 11578 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: buf:0x%p chain type:0x%x\n",
11579 11579 bp, chain_type);
11580 11580
11581 11581 xp->xb_un = un;
11582 11582 xp->xb_pktp = NULL;
11583 11583 xp->xb_pktinfo = pktinfop;
11584 11584 xp->xb_private = bp->b_private;
11585 11585 xp->xb_blkno = (daddr_t)bp->b_blkno;
11586 11586
11587 11587 /*
11588 11588 * Set up the iostart and iodone chain indexes in the xbuf, based
11589 11589 * upon the specified chain type to use.
11590 11590 */
11591 11591 switch (chain_type) {
11592 11592 case SD_CHAIN_NULL:
11593 11593 /*
11594 11594 * Fall thru to just use the values for the buf type, even
11595 11595 * tho for the NULL chain these values will never be used.
11596 11596 */
11597 11597 /* FALLTHRU */
11598 11598 case SD_CHAIN_BUFIO:
11599 11599 index = un->un_buf_chain_type;
11600 11600 if ((!un->un_f_has_removable_media) &&
11601 11601 (un->un_tgt_blocksize != 0) &&
11602 11602 (un->un_tgt_blocksize != DEV_BSIZE ||
11603 11603 un->un_f_enable_rmw)) {
11604 11604 int secmask = 0, blknomask = 0;
11605 11605 if (un->un_f_enable_rmw) {
11606 11606 blknomask =
11607 11607 (un->un_phy_blocksize / DEV_BSIZE) - 1;
11608 11608 secmask = un->un_phy_blocksize - 1;
11609 11609 } else {
11610 11610 blknomask =
11611 11611 (un->un_tgt_blocksize / DEV_BSIZE) - 1;
11612 11612 secmask = un->un_tgt_blocksize - 1;
11613 11613 }
11614 11614
11615 11615 if ((bp->b_lblkno & (blknomask)) ||
11616 11616 (bp->b_bcount & (secmask))) {
11617 11617 if ((un->un_f_rmw_type !=
11618 11618 SD_RMW_TYPE_RETURN_ERROR) ||
11619 11619 un->un_f_enable_rmw) {
11620 11620 if (un->un_f_pm_is_enabled == FALSE)
11621 11621 index =
11622 11622 SD_CHAIN_INFO_MSS_DSK_NO_PM;
11623 11623 else
11624 11624 index =
11625 11625 SD_CHAIN_INFO_MSS_DISK;
11626 11626 }
11627 11627 }
11628 11628 }
11629 11629 break;
11630 11630 case SD_CHAIN_USCSI:
11631 11631 index = un->un_uscsi_chain_type;
11632 11632 break;
11633 11633 case SD_CHAIN_DIRECT:
11634 11634 index = un->un_direct_chain_type;
11635 11635 break;
11636 11636 case SD_CHAIN_DIRECT_PRIORITY:
11637 11637 index = un->un_priority_chain_type;
11638 11638 break;
11639 11639 default:
11640 11640 /* We're really broken if we ever get here... */
11641 11641 panic("sd_xbuf_init: illegal chain type!");
11642 11642 /*NOTREACHED*/
11643 11643 }
11644 11644
11645 11645 xp->xb_chain_iostart = sd_chain_index_map[index].sci_iostart_index;
11646 11646 xp->xb_chain_iodone = sd_chain_index_map[index].sci_iodone_index;
11647 11647
11648 11648 /*
11649 11649 * It might be a bit easier to simply bzero the entire xbuf above,
11650 11650 * but it turns out that since we init a fair number of members anyway,
11651 11651 * we save a fair number cycles by doing explicit assignment of zero.
11652 11652 */
11653 11653 xp->xb_pkt_flags = 0;
11654 11654 xp->xb_dma_resid = 0;
11655 11655 xp->xb_retry_count = 0;
11656 11656 xp->xb_victim_retry_count = 0;
11657 11657 xp->xb_ua_retry_count = 0;
11658 11658 xp->xb_nr_retry_count = 0;
11659 11659 xp->xb_sense_bp = NULL;
11660 11660 xp->xb_sense_status = 0;
11661 11661 xp->xb_sense_state = 0;
11662 11662 xp->xb_sense_resid = 0;
11663 11663 xp->xb_ena = 0;
11664 11664
11665 11665 bp->b_private = xp;
11666 11666 bp->b_flags &= ~(B_DONE | B_ERROR);
11667 11667 bp->b_resid = 0;
11668 11668 bp->av_forw = NULL;
11669 11669 bp->av_back = NULL;
11670 11670 bioerror(bp, 0);
11671 11671
11672 11672 SD_INFO(SD_LOG_IO, un, "sd_xbuf_init: done.\n");
11673 11673 }
11674 11674
11675 11675
11676 11676 /*
11677 11677 * Function: sd_uscsi_strategy
11678 11678 *
11679 11679 * Description: Wrapper for calling into the USCSI chain via physio(9F)
11680 11680 *
11681 11681 * Arguments: bp - buf struct ptr
11682 11682 *
11683 11683 * Return Code: Always returns 0
11684 11684 *
11685 11685 * Context: Kernel thread context
11686 11686 */
11687 11687
11688 11688 static int
11689 11689 sd_uscsi_strategy(struct buf *bp)
11690 11690 {
11691 11691 struct sd_lun *un;
11692 11692 struct sd_uscsi_info *uip;
11693 11693 struct sd_xbuf *xp;
11694 11694 uchar_t chain_type;
11695 11695 uchar_t cmd;
11696 11696
11697 11697 ASSERT(bp != NULL);
11698 11698
11699 11699 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
11700 11700 if (un == NULL) {
11701 11701 bioerror(bp, EIO);
11702 11702 bp->b_resid = bp->b_bcount;
11703 11703 biodone(bp);
11704 11704 return (0);
11705 11705 }
11706 11706
11707 11707 ASSERT(!mutex_owned(SD_MUTEX(un)));
11708 11708
11709 11709 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: entry: buf:0x%p\n", bp);
11710 11710
11711 11711 /*
11712 11712 * A pointer to a struct sd_uscsi_info is expected in bp->b_private
11713 11713 */
11714 11714 ASSERT(bp->b_private != NULL);
11715 11715 uip = (struct sd_uscsi_info *)bp->b_private;
11716 11716 cmd = ((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_cdb[0];
11717 11717
11718 11718 mutex_enter(SD_MUTEX(un));
11719 11719 /*
11720 11720 * atapi: Since we are running the CD for now in PIO mode we need to
11721 11721 * call bp_mapin here to avoid bp_mapin called interrupt context under
11722 11722 * the HBA's init_pkt routine.
11723 11723 */
11724 11724 if (un->un_f_cfg_is_atapi == TRUE) {
11725 11725 mutex_exit(SD_MUTEX(un));
11726 11726 bp_mapin(bp);
11727 11727 mutex_enter(SD_MUTEX(un));
11728 11728 }
11729 11729 un->un_ncmds_in_driver++;
11730 11730 SD_INFO(SD_LOG_IO, un, "sd_uscsi_strategy: un_ncmds_in_driver = %ld\n",
11731 11731 un->un_ncmds_in_driver);
11732 11732
11733 11733 if ((bp->b_flags & B_WRITE) && (bp->b_bcount != 0) &&
11734 11734 (cmd != SCMD_MODE_SELECT) && (cmd != SCMD_MODE_SELECT_G1))
11735 11735 un->un_f_sync_cache_required = TRUE;
11736 11736
11737 11737 mutex_exit(SD_MUTEX(un));
11738 11738
11739 11739 switch (uip->ui_flags) {
11740 11740 case SD_PATH_DIRECT:
11741 11741 chain_type = SD_CHAIN_DIRECT;
11742 11742 break;
11743 11743 case SD_PATH_DIRECT_PRIORITY:
11744 11744 chain_type = SD_CHAIN_DIRECT_PRIORITY;
11745 11745 break;
11746 11746 default:
11747 11747 chain_type = SD_CHAIN_USCSI;
11748 11748 break;
11749 11749 }
11750 11750
11751 11751 /*
11752 11752 * We may allocate extra buf for external USCSI commands. If the
11753 11753 * application asks for bigger than 20-byte sense data via USCSI,
11754 11754 * SCSA layer will allocate 252 bytes sense buf for that command.
11755 11755 */
11756 11756 if (((struct uscsi_cmd *)(uip->ui_cmdp))->uscsi_rqlen >
11757 11757 SENSE_LENGTH) {
11758 11758 xp = kmem_zalloc(sizeof (struct sd_xbuf) - SENSE_LENGTH +
11759 11759 MAX_SENSE_LENGTH, KM_SLEEP);
11760 11760 } else {
11761 11761 xp = kmem_zalloc(sizeof (struct sd_xbuf), KM_SLEEP);
11762 11762 }
11763 11763
11764 11764 sd_xbuf_init(un, bp, xp, chain_type, uip->ui_cmdp);
11765 11765
11766 11766 /* Use the index obtained within xbuf_init */
11767 11767 SD_BEGIN_IOSTART(xp->xb_chain_iostart, un, bp);
11768 11768
11769 11769 SD_TRACE(SD_LOG_IO, un, "sd_uscsi_strategy: exit: buf:0x%p\n", bp);
11770 11770
11771 11771 return (0);
11772 11772 }
11773 11773
11774 11774 /*
11775 11775 * Function: sd_send_scsi_cmd
11776 11776 *
11777 11777 * Description: Runs a USCSI command for user (when called thru sdioctl),
11778 11778 * or for the driver
11779 11779 *
11780 11780 * Arguments: dev - the dev_t for the device
11781 11781 * incmd - ptr to a valid uscsi_cmd struct
11782 11782 * flag - bit flag, indicating open settings, 32/64 bit type
11783 11783 * dataspace - UIO_USERSPACE or UIO_SYSSPACE
11784 11784 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
11785 11785 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
11786 11786 * to use the USCSI "direct" chain and bypass the normal
11787 11787 * command waitq.
11788 11788 *
11789 11789 * Return Code: 0 - successful completion of the given command
11790 11790 * EIO - scsi_uscsi_handle_command() failed
11791 11791 * ENXIO - soft state not found for specified dev
11792 11792 * EINVAL
11793 11793 * EFAULT - copyin/copyout error
11794 11794 * return code of scsi_uscsi_handle_command():
11795 11795 * EIO
11796 11796 * ENXIO
11797 11797 * EACCES
11798 11798 *
11799 11799 * Context: Waits for command to complete. Can sleep.
11800 11800 */
11801 11801
11802 11802 static int
11803 11803 sd_send_scsi_cmd(dev_t dev, struct uscsi_cmd *incmd, int flag,
11804 11804 enum uio_seg dataspace, int path_flag)
11805 11805 {
11806 11806 struct sd_lun *un;
11807 11807 sd_ssc_t *ssc;
11808 11808 int rval;
11809 11809
11810 11810 un = ddi_get_soft_state(sd_state, SDUNIT(dev));
11811 11811 if (un == NULL) {
11812 11812 return (ENXIO);
11813 11813 }
11814 11814
11815 11815 /*
11816 11816 * Using sd_ssc_send to handle uscsi cmd
11817 11817 */
11818 11818 ssc = sd_ssc_init(un);
11819 11819 rval = sd_ssc_send(ssc, incmd, flag, dataspace, path_flag);
11820 11820 sd_ssc_fini(ssc);
11821 11821
11822 11822 return (rval);
11823 11823 }
11824 11824
11825 11825 /*
11826 11826 * Function: sd_ssc_init
11827 11827 *
11828 11828 * Description: Uscsi end-user call this function to initialize necessary
11829 11829 * fields, such as uscsi_cmd and sd_uscsi_info struct.
11830 11830 *
11831 11831 * The return value of sd_send_scsi_cmd will be treated as a
11832 11832 * fault in various conditions. Even it is not Zero, some
11833 11833 * callers may ignore the return value. That is to say, we can
11834 11834 * not make an accurate assessment in sdintr, since if a
11835 11835 * command is failed in sdintr it does not mean the caller of
11836 11836 * sd_send_scsi_cmd will treat it as a real failure.
11837 11837 *
11838 11838 * To avoid printing too many error logs for a failed uscsi
11839 11839 * packet that the caller may not treat it as a failure, the
11840 11840 * sd will keep silent for handling all uscsi commands.
11841 11841 *
11842 11842 * During detach->attach and attach-open, for some types of
11843 11843 * problems, the driver should be providing information about
11844 11844 * the problem encountered. Device use USCSI_SILENT, which
11845 11845 * suppresses all driver information. The result is that no
11846 11846 * information about the problem is available. Being
11847 11847 * completely silent during this time is inappropriate. The
11848 11848 * driver needs a more selective filter than USCSI_SILENT, so
11849 11849 * that information related to faults is provided.
11850 11850 *
11851 11851 * To make the accurate accessment, the caller of
11852 11852 * sd_send_scsi_USCSI_CMD should take the ownership and
11853 11853 * get necessary information to print error messages.
11854 11854 *
11855 11855 * If we want to print necessary info of uscsi command, we need to
11856 11856 * keep the uscsi_cmd and sd_uscsi_info till we can make the
11857 11857 * assessment. We use sd_ssc_init to alloc necessary
11858 11858 * structs for sending an uscsi command and we are also
11859 11859 * responsible for free the memory by calling
11860 11860 * sd_ssc_fini.
11861 11861 *
11862 11862 * The calling secquences will look like:
11863 11863 * sd_ssc_init->
11864 11864 *
11865 11865 * ...
11866 11866 *
11867 11867 * sd_send_scsi_USCSI_CMD->
11868 11868 * sd_ssc_send-> - - - sdintr
11869 11869 * ...
11870 11870 *
11871 11871 * if we think the return value should be treated as a
11872 11872 * failure, we make the accessment here and print out
11873 11873 * necessary by retrieving uscsi_cmd and sd_uscsi_info'
11874 11874 *
11875 11875 * ...
11876 11876 *
11877 11877 * sd_ssc_fini
11878 11878 *
11879 11879 *
11880 11880 * Arguments: un - pointer to driver soft state (unit) structure for this
11881 11881 * target.
11882 11882 *
11883 11883 * Return code: sd_ssc_t - pointer to allocated sd_ssc_t struct, it contains
11884 11884 * uscsi_cmd and sd_uscsi_info.
11885 11885 * NULL - if can not alloc memory for sd_ssc_t struct
11886 11886 *
11887 11887 * Context: Kernel Thread.
11888 11888 */
11889 11889 static sd_ssc_t *
11890 11890 sd_ssc_init(struct sd_lun *un)
11891 11891 {
11892 11892 sd_ssc_t *ssc;
11893 11893 struct uscsi_cmd *ucmdp;
11894 11894 struct sd_uscsi_info *uip;
11895 11895
11896 11896 ASSERT(un != NULL);
11897 11897 ASSERT(!mutex_owned(SD_MUTEX(un)));
11898 11898
11899 11899 /*
11900 11900 * Allocate sd_ssc_t structure
11901 11901 */
11902 11902 ssc = kmem_zalloc(sizeof (sd_ssc_t), KM_SLEEP);
11903 11903
11904 11904 /*
11905 11905 * Allocate uscsi_cmd by calling scsi_uscsi_alloc common routine
11906 11906 */
11907 11907 ucmdp = scsi_uscsi_alloc();
11908 11908
11909 11909 /*
11910 11910 * Allocate sd_uscsi_info structure
11911 11911 */
11912 11912 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP);
11913 11913
11914 11914 ssc->ssc_uscsi_cmd = ucmdp;
11915 11915 ssc->ssc_uscsi_info = uip;
11916 11916 ssc->ssc_un = un;
11917 11917
11918 11918 return (ssc);
11919 11919 }
11920 11920
11921 11921 /*
11922 11922 * Function: sd_ssc_fini
11923 11923 *
11924 11924 * Description: To free sd_ssc_t and it's hanging off
11925 11925 *
11926 11926 * Arguments: ssc - struct pointer of sd_ssc_t.
11927 11927 */
11928 11928 static void
11929 11929 sd_ssc_fini(sd_ssc_t *ssc)
11930 11930 {
11931 11931 scsi_uscsi_free(ssc->ssc_uscsi_cmd);
11932 11932
11933 11933 if (ssc->ssc_uscsi_info != NULL) {
11934 11934 kmem_free(ssc->ssc_uscsi_info, sizeof (struct sd_uscsi_info));
11935 11935 ssc->ssc_uscsi_info = NULL;
11936 11936 }
11937 11937
11938 11938 kmem_free(ssc, sizeof (sd_ssc_t));
11939 11939 ssc = NULL;
11940 11940 }
11941 11941
11942 11942 /*
11943 11943 * Function: sd_ssc_send
11944 11944 *
11945 11945 * Description: Runs a USCSI command for user when called through sdioctl,
11946 11946 * or for the driver.
11947 11947 *
11948 11948 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
11949 11949 * sd_uscsi_info in.
11950 11950 * incmd - ptr to a valid uscsi_cmd struct
11951 11951 * flag - bit flag, indicating open settings, 32/64 bit type
11952 11952 * dataspace - UIO_USERSPACE or UIO_SYSSPACE
11953 11953 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
11954 11954 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
11955 11955 * to use the USCSI "direct" chain and bypass the normal
11956 11956 * command waitq.
11957 11957 *
11958 11958 * Return Code: 0 - successful completion of the given command
11959 11959 * EIO - scsi_uscsi_handle_command() failed
11960 11960 * ENXIO - soft state not found for specified dev
11961 11961 * ECANCELED - command cancelled due to low power
11962 11962 * EINVAL
11963 11963 * EFAULT - copyin/copyout error
11964 11964 * return code of scsi_uscsi_handle_command():
11965 11965 * EIO
11966 11966 * ENXIO
11967 11967 * EACCES
11968 11968 *
11969 11969 * Context: Kernel Thread;
11970 11970 * Waits for command to complete. Can sleep.
11971 11971 */
11972 11972 static int
11973 11973 sd_ssc_send(sd_ssc_t *ssc, struct uscsi_cmd *incmd, int flag,
11974 11974 enum uio_seg dataspace, int path_flag)
11975 11975 {
11976 11976 struct sd_uscsi_info *uip;
11977 11977 struct uscsi_cmd *uscmd;
11978 11978 struct sd_lun *un;
11979 11979 dev_t dev;
11980 11980
11981 11981 int format = 0;
11982 11982 int rval;
11983 11983
11984 11984 ASSERT(ssc != NULL);
11985 11985 un = ssc->ssc_un;
11986 11986 ASSERT(un != NULL);
11987 11987 uscmd = ssc->ssc_uscsi_cmd;
11988 11988 ASSERT(uscmd != NULL);
11989 11989 ASSERT(!mutex_owned(SD_MUTEX(un)));
11990 11990 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) {
11991 11991 /*
11992 11992 * If enter here, it indicates that the previous uscsi
11993 11993 * command has not been processed by sd_ssc_assessment.
11994 11994 * This is violating our rules of FMA telemetry processing.
11995 11995 * We should print out this message and the last undisposed
11996 11996 * uscsi command.
11997 11997 */
11998 11998 if (uscmd->uscsi_cdb != NULL) {
11999 11999 SD_INFO(SD_LOG_SDTEST, un,
12000 12000 "sd_ssc_send is missing the alternative "
12001 12001 "sd_ssc_assessment when running command 0x%x.\n",
12002 12002 uscmd->uscsi_cdb[0]);
12003 12003 }
12004 12004 /*
12005 12005 * Set the ssc_flags to SSC_FLAGS_UNKNOWN, which should be
12006 12006 * the initial status.
12007 12007 */
12008 12008 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12009 12009 }
12010 12010
12011 12011 /*
12012 12012 * We need to make sure sd_ssc_send will have sd_ssc_assessment
12013 12013 * followed to avoid missing FMA telemetries.
12014 12014 */
12015 12015 ssc->ssc_flags |= SSC_FLAGS_NEED_ASSESSMENT;
12016 12016
12017 12017 /*
12018 12018 * if USCSI_PMFAILFAST is set and un is in low power, fail the
12019 12019 * command immediately.
12020 12020 */
12021 12021 mutex_enter(SD_MUTEX(un));
12022 12022 mutex_enter(&un->un_pm_mutex);
12023 12023 if ((uscmd->uscsi_flags & USCSI_PMFAILFAST) &&
12024 12024 SD_DEVICE_IS_IN_LOW_POWER(un)) {
12025 12025 SD_TRACE(SD_LOG_IO, un, "sd_ssc_send:"
12026 12026 "un:0x%p is in low power\n", un);
12027 12027 mutex_exit(&un->un_pm_mutex);
12028 12028 mutex_exit(SD_MUTEX(un));
12029 12029 return (ECANCELED);
12030 12030 }
12031 12031 mutex_exit(&un->un_pm_mutex);
12032 12032 mutex_exit(SD_MUTEX(un));
12033 12033
12034 12034 #ifdef SDDEBUG
12035 12035 switch (dataspace) {
12036 12036 case UIO_USERSPACE:
12037 12037 SD_TRACE(SD_LOG_IO, un,
12038 12038 "sd_ssc_send: entry: un:0x%p UIO_USERSPACE\n", un);
12039 12039 break;
12040 12040 case UIO_SYSSPACE:
12041 12041 SD_TRACE(SD_LOG_IO, un,
12042 12042 "sd_ssc_send: entry: un:0x%p UIO_SYSSPACE\n", un);
12043 12043 break;
12044 12044 default:
12045 12045 SD_TRACE(SD_LOG_IO, un,
12046 12046 "sd_ssc_send: entry: un:0x%p UNEXPECTED SPACE\n", un);
12047 12047 break;
12048 12048 }
12049 12049 #endif
12050 12050
12051 12051 rval = scsi_uscsi_copyin((intptr_t)incmd, flag,
12052 12052 SD_ADDRESS(un), &uscmd);
12053 12053 if (rval != 0) {
12054 12054 SD_TRACE(SD_LOG_IO, un, "sd_sense_scsi_cmd: "
12055 12055 "scsi_uscsi_alloc_and_copyin failed\n", un);
12056 12056 return (rval);
12057 12057 }
12058 12058
12059 12059 if ((uscmd->uscsi_cdb != NULL) &&
12060 12060 (uscmd->uscsi_cdb[0] == SCMD_FORMAT)) {
12061 12061 mutex_enter(SD_MUTEX(un));
12062 12062 un->un_f_format_in_progress = TRUE;
12063 12063 mutex_exit(SD_MUTEX(un));
12064 12064 format = 1;
12065 12065 }
12066 12066
12067 12067 /*
12068 12068 * Allocate an sd_uscsi_info struct and fill it with the info
12069 12069 * needed by sd_initpkt_for_uscsi(). Then put the pointer into
12070 12070 * b_private in the buf for sd_initpkt_for_uscsi(). Note that
12071 12071 * since we allocate the buf here in this function, we do not
12072 12072 * need to preserve the prior contents of b_private.
12073 12073 * The sd_uscsi_info struct is also used by sd_uscsi_strategy()
12074 12074 */
12075 12075 uip = ssc->ssc_uscsi_info;
12076 12076 uip->ui_flags = path_flag;
12077 12077 uip->ui_cmdp = uscmd;
12078 12078
12079 12079 /*
12080 12080 * Commands sent with priority are intended for error recovery
12081 12081 * situations, and do not have retries performed.
12082 12082 */
12083 12083 if (path_flag == SD_PATH_DIRECT_PRIORITY) {
12084 12084 uscmd->uscsi_flags |= USCSI_DIAGNOSE;
12085 12085 }
12086 12086 uscmd->uscsi_flags &= ~USCSI_NOINTR;
12087 12087
12088 12088 dev = SD_GET_DEV(un);
12089 12089 rval = scsi_uscsi_handle_cmd(dev, dataspace, uscmd,
12090 12090 sd_uscsi_strategy, NULL, uip);
12091 12091
12092 12092 /*
12093 12093 * mark ssc_flags right after handle_cmd to make sure
12094 12094 * the uscsi has been sent
12095 12095 */
12096 12096 ssc->ssc_flags |= SSC_FLAGS_CMD_ISSUED;
12097 12097
12098 12098 #ifdef SDDEBUG
12099 12099 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: "
12100 12100 "uscsi_status: 0x%02x uscsi_resid:0x%x\n",
12101 12101 uscmd->uscsi_status, uscmd->uscsi_resid);
12102 12102 if (uscmd->uscsi_bufaddr != NULL) {
12103 12103 SD_INFO(SD_LOG_IO, un, "sd_ssc_send: "
12104 12104 "uscmd->uscsi_bufaddr: 0x%p uscmd->uscsi_buflen:%d\n",
12105 12105 uscmd->uscsi_bufaddr, uscmd->uscsi_buflen);
12106 12106 if (dataspace == UIO_SYSSPACE) {
12107 12107 SD_DUMP_MEMORY(un, SD_LOG_IO,
12108 12108 "data", (uchar_t *)uscmd->uscsi_bufaddr,
12109 12109 uscmd->uscsi_buflen, SD_LOG_HEX);
12110 12110 }
12111 12111 }
12112 12112 #endif
12113 12113
12114 12114 if (format == 1) {
12115 12115 mutex_enter(SD_MUTEX(un));
12116 12116 un->un_f_format_in_progress = FALSE;
12117 12117 mutex_exit(SD_MUTEX(un));
12118 12118 }
12119 12119
12120 12120 (void) scsi_uscsi_copyout((intptr_t)incmd, uscmd);
12121 12121
12122 12122 return (rval);
12123 12123 }
12124 12124
12125 12125 /*
12126 12126 * Function: sd_ssc_print
12127 12127 *
12128 12128 * Description: Print information available to the console.
12129 12129 *
12130 12130 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
12131 12131 * sd_uscsi_info in.
12132 12132 * sd_severity - log level.
12133 12133 * Context: Kernel thread or interrupt context.
12134 12134 */
12135 12135 static void
12136 12136 sd_ssc_print(sd_ssc_t *ssc, int sd_severity)
12137 12137 {
12138 12138 struct uscsi_cmd *ucmdp;
12139 12139 struct scsi_device *devp;
12140 12140 dev_info_t *devinfo;
12141 12141 uchar_t *sensep;
12142 12142 int senlen;
12143 12143 union scsi_cdb *cdbp;
12144 12144 uchar_t com;
12145 12145 extern struct scsi_key_strings scsi_cmds[];
12146 12146
12147 12147 ASSERT(ssc != NULL);
12148 12148 ASSERT(ssc->ssc_un != NULL);
12149 12149
12150 12150 if (SD_FM_LOG(ssc->ssc_un) != SD_FM_LOG_EREPORT)
12151 12151 return;
12152 12152 ucmdp = ssc->ssc_uscsi_cmd;
12153 12153 devp = SD_SCSI_DEVP(ssc->ssc_un);
12154 12154 devinfo = SD_DEVINFO(ssc->ssc_un);
12155 12155 ASSERT(ucmdp != NULL);
12156 12156 ASSERT(devp != NULL);
12157 12157 ASSERT(devinfo != NULL);
12158 12158 sensep = (uint8_t *)ucmdp->uscsi_rqbuf;
12159 12159 senlen = ucmdp->uscsi_rqlen - ucmdp->uscsi_rqresid;
12160 12160 cdbp = (union scsi_cdb *)ucmdp->uscsi_cdb;
12161 12161
12162 12162 /* In certain case (like DOORLOCK), the cdb could be NULL. */
12163 12163 if (cdbp == NULL)
12164 12164 return;
12165 12165 /* We don't print log if no sense data available. */
12166 12166 if (senlen == 0)
12167 12167 sensep = NULL;
12168 12168 com = cdbp->scc_cmd;
12169 12169 scsi_generic_errmsg(devp, sd_label, sd_severity, 0, 0, com,
12170 12170 scsi_cmds, sensep, ssc->ssc_un->un_additional_codes, NULL);
12171 12171 }
12172 12172
12173 12173 /*
12174 12174 * Function: sd_ssc_assessment
12175 12175 *
12176 12176 * Description: We use this function to make an assessment at the point
12177 12177 * where SD driver may encounter a potential error.
12178 12178 *
12179 12179 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
12180 12180 * sd_uscsi_info in.
12181 12181 * tp_assess - a hint of strategy for ereport posting.
12182 12182 * Possible values of tp_assess include:
12183 12183 * SD_FMT_IGNORE - we don't post any ereport because we're
12184 12184 * sure that it is ok to ignore the underlying problems.
12185 12185 * SD_FMT_IGNORE_COMPROMISE - we don't post any ereport for now
12186 12186 * but it might be not correct to ignore the underlying hardware
12187 12187 * error.
12188 12188 * SD_FMT_STATUS_CHECK - we will post an ereport with the
12189 12189 * payload driver-assessment of value "fail" or
12190 12190 * "fatal"(depending on what information we have here). This
12191 12191 * assessment value is usually set when SD driver think there
12192 12192 * is a potential error occurred(Typically, when return value
12193 12193 * of the SCSI command is EIO).
12194 12194 * SD_FMT_STANDARD - we will post an ereport with the payload
12195 12195 * driver-assessment of value "info". This assessment value is
12196 12196 * set when the SCSI command returned successfully and with
12197 12197 * sense data sent back.
12198 12198 *
12199 12199 * Context: Kernel thread.
12200 12200 */
12201 12201 static void
12202 12202 sd_ssc_assessment(sd_ssc_t *ssc, enum sd_type_assessment tp_assess)
12203 12203 {
12204 12204 int senlen = 0;
12205 12205 struct uscsi_cmd *ucmdp = NULL;
12206 12206 struct sd_lun *un;
12207 12207
12208 12208 ASSERT(ssc != NULL);
12209 12209 un = ssc->ssc_un;
12210 12210 ASSERT(un != NULL);
12211 12211 ucmdp = ssc->ssc_uscsi_cmd;
12212 12212 ASSERT(ucmdp != NULL);
12213 12213
12214 12214 if (ssc->ssc_flags & SSC_FLAGS_NEED_ASSESSMENT) {
12215 12215 ssc->ssc_flags &= ~SSC_FLAGS_NEED_ASSESSMENT;
12216 12216 } else {
12217 12217 /*
12218 12218 * If enter here, it indicates that we have a wrong
12219 12219 * calling sequence of sd_ssc_send and sd_ssc_assessment,
12220 12220 * both of which should be called in a pair in case of
12221 12221 * loss of FMA telemetries.
12222 12222 */
12223 12223 if (ucmdp->uscsi_cdb != NULL) {
12224 12224 SD_INFO(SD_LOG_SDTEST, un,
12225 12225 "sd_ssc_assessment is missing the "
12226 12226 "alternative sd_ssc_send when running 0x%x, "
12227 12227 "or there are superfluous sd_ssc_assessment for "
12228 12228 "the same sd_ssc_send.\n",
12229 12229 ucmdp->uscsi_cdb[0]);
12230 12230 }
12231 12231 /*
12232 12232 * Set the ssc_flags to the initial value to avoid passing
12233 12233 * down dirty flags to the following sd_ssc_send function.
12234 12234 */
12235 12235 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12236 12236 return;
12237 12237 }
12238 12238
12239 12239 /*
12240 12240 * Only handle an issued command which is waiting for assessment.
12241 12241 * A command which is not issued will not have
12242 12242 * SSC_FLAGS_INVALID_DATA set, so it'ok we just return here.
12243 12243 */
12244 12244 if (!(ssc->ssc_flags & SSC_FLAGS_CMD_ISSUED)) {
12245 12245 sd_ssc_print(ssc, SCSI_ERR_INFO);
12246 12246 return;
12247 12247 } else {
12248 12248 /*
12249 12249 * For an issued command, we should clear this flag in
12250 12250 * order to make the sd_ssc_t structure be used off
12251 12251 * multiple uscsi commands.
12252 12252 */
12253 12253 ssc->ssc_flags &= ~SSC_FLAGS_CMD_ISSUED;
12254 12254 }
12255 12255
12256 12256 /*
12257 12257 * We will not deal with non-retryable(flag USCSI_DIAGNOSE set)
12258 12258 * commands here. And we should clear the ssc_flags before return.
12259 12259 */
12260 12260 if (ucmdp->uscsi_flags & USCSI_DIAGNOSE) {
12261 12261 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12262 12262 return;
12263 12263 }
12264 12264
12265 12265 switch (tp_assess) {
12266 12266 case SD_FMT_IGNORE:
12267 12267 case SD_FMT_IGNORE_COMPROMISE:
12268 12268 break;
12269 12269 case SD_FMT_STATUS_CHECK:
12270 12270 /*
12271 12271 * For a failed command(including the succeeded command
12272 12272 * with invalid data sent back).
12273 12273 */
12274 12274 sd_ssc_post(ssc, SD_FM_DRV_FATAL);
12275 12275 break;
12276 12276 case SD_FMT_STANDARD:
12277 12277 /*
12278 12278 * Always for the succeeded commands probably with sense
12279 12279 * data sent back.
12280 12280 * Limitation:
12281 12281 * We can only handle a succeeded command with sense
12282 12282 * data sent back when auto-request-sense is enabled.
12283 12283 */
12284 12284 senlen = ssc->ssc_uscsi_cmd->uscsi_rqlen -
12285 12285 ssc->ssc_uscsi_cmd->uscsi_rqresid;
12286 12286 if ((ssc->ssc_uscsi_info->ui_pkt_state & STATE_ARQ_DONE) &&
12287 12287 (un->un_f_arq_enabled == TRUE) &&
12288 12288 senlen > 0 &&
12289 12289 ssc->ssc_uscsi_cmd->uscsi_rqbuf != NULL) {
12290 12290 sd_ssc_post(ssc, SD_FM_DRV_NOTICE);
12291 12291 }
12292 12292 break;
12293 12293 default:
12294 12294 /*
12295 12295 * Should not have other type of assessment.
12296 12296 */
12297 12297 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
12298 12298 "sd_ssc_assessment got wrong "
12299 12299 "sd_type_assessment %d.\n", tp_assess);
12300 12300 break;
12301 12301 }
12302 12302 /*
12303 12303 * Clear up the ssc_flags before return.
12304 12304 */
12305 12305 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12306 12306 }
12307 12307
12308 12308 /*
12309 12309 * Function: sd_ssc_post
12310 12310 *
12311 12311 * Description: 1. read the driver property to get fm-scsi-log flag.
12312 12312 * 2. print log if fm_log_capable is non-zero.
12313 12313 * 3. call sd_ssc_ereport_post to post ereport if possible.
12314 12314 *
12315 12315 * Context: May be called from kernel thread or interrupt context.
12316 12316 */
12317 12317 static void
12318 12318 sd_ssc_post(sd_ssc_t *ssc, enum sd_driver_assessment sd_assess)
12319 12319 {
12320 12320 struct sd_lun *un;
12321 12321 int sd_severity;
12322 12322
12323 12323 ASSERT(ssc != NULL);
12324 12324 un = ssc->ssc_un;
12325 12325 ASSERT(un != NULL);
12326 12326
12327 12327 /*
12328 12328 * We may enter here from sd_ssc_assessment(for USCSI command) or
12329 12329 * by directly called from sdintr context.
12330 12330 * We don't handle a non-disk drive(CD-ROM, removable media).
12331 12331 * Clear the ssc_flags before return in case we've set
12332 12332 * SSC_FLAGS_INVALID_XXX which should be skipped for a non-disk
12333 12333 * driver.
12334 12334 */
12335 12335 if (ISCD(un) || un->un_f_has_removable_media) {
12336 12336 ssc->ssc_flags = SSC_FLAGS_UNKNOWN;
12337 12337 return;
12338 12338 }
12339 12339
12340 12340 switch (sd_assess) {
12341 12341 case SD_FM_DRV_FATAL:
12342 12342 sd_severity = SCSI_ERR_FATAL;
12343 12343 break;
12344 12344 case SD_FM_DRV_RECOVERY:
12345 12345 sd_severity = SCSI_ERR_RECOVERED;
12346 12346 break;
12347 12347 case SD_FM_DRV_RETRY:
12348 12348 sd_severity = SCSI_ERR_RETRYABLE;
12349 12349 break;
12350 12350 case SD_FM_DRV_NOTICE:
12351 12351 sd_severity = SCSI_ERR_INFO;
12352 12352 break;
12353 12353 default:
12354 12354 sd_severity = SCSI_ERR_UNKNOWN;
12355 12355 }
12356 12356 /* print log */
12357 12357 sd_ssc_print(ssc, sd_severity);
12358 12358
12359 12359 /* always post ereport */
12360 12360 sd_ssc_ereport_post(ssc, sd_assess);
12361 12361 }
12362 12362
12363 12363 /*
12364 12364 * Function: sd_ssc_set_info
12365 12365 *
12366 12366 * Description: Mark ssc_flags and set ssc_info which would be the
12367 12367 * payload of uderr ereport. This function will cause
12368 12368 * sd_ssc_ereport_post to post uderr ereport only.
12369 12369 * Besides, when ssc_flags == SSC_FLAGS_INVALID_DATA(USCSI),
12370 12370 * the function will also call SD_ERROR or scsi_log for a
12371 12371 * CDROM/removable-media/DDI_FM_NOT_CAPABLE device.
12372 12372 *
12373 12373 * Arguments: ssc - the struct of sd_ssc_t will bring uscsi_cmd and
12374 12374 * sd_uscsi_info in.
12375 12375 * ssc_flags - indicate the sub-category of a uderr.
12376 12376 * comp - this argument is meaningful only when
12377 12377 * ssc_flags == SSC_FLAGS_INVALID_DATA, and its possible
12378 12378 * values include:
12379 12379 * > 0, SD_ERROR is used with comp as the driver logging
12380 12380 * component;
12381 12381 * = 0, scsi-log is used to log error telemetries;
12382 12382 * < 0, no log available for this telemetry.
12383 12383 *
12384 12384 * Context: Kernel thread or interrupt context
12385 12385 */
12386 12386 static void
12387 12387 sd_ssc_set_info(sd_ssc_t *ssc, int ssc_flags, uint_t comp, const char *fmt, ...)
12388 12388 {
12389 12389 va_list ap;
12390 12390
12391 12391 ASSERT(ssc != NULL);
12392 12392 ASSERT(ssc->ssc_un != NULL);
12393 12393
12394 12394 ssc->ssc_flags |= ssc_flags;
12395 12395 va_start(ap, fmt);
12396 12396 (void) vsnprintf(ssc->ssc_info, sizeof (ssc->ssc_info), fmt, ap);
12397 12397 va_end(ap);
12398 12398
12399 12399 /*
12400 12400 * If SSC_FLAGS_INVALID_DATA is set, it should be a uscsi command
12401 12401 * with invalid data sent back. For non-uscsi command, the
12402 12402 * following code will be bypassed.
12403 12403 */
12404 12404 if (ssc_flags & SSC_FLAGS_INVALID_DATA) {
12405 12405 if (SD_FM_LOG(ssc->ssc_un) == SD_FM_LOG_NSUP) {
12406 12406 /*
12407 12407 * If the error belong to certain component and we
12408 12408 * do not want it to show up on the console, we
12409 12409 * will use SD_ERROR, otherwise scsi_log is
12410 12410 * preferred.
12411 12411 */
12412 12412 if (comp > 0) {
12413 12413 SD_ERROR(comp, ssc->ssc_un, ssc->ssc_info);
12414 12414 } else if (comp == 0) {
12415 12415 scsi_log(SD_DEVINFO(ssc->ssc_un), sd_label,
12416 12416 CE_WARN, ssc->ssc_info);
12417 12417 }
12418 12418 }
12419 12419 }
12420 12420 }
12421 12421
12422 12422 /*
12423 12423 * Function: sd_buf_iodone
12424 12424 *
12425 12425 * Description: Frees the sd_xbuf & returns the buf to its originator.
12426 12426 *
12427 12427 * Context: May be called from interrupt context.
12428 12428 */
12429 12429 /* ARGSUSED */
12430 12430 static void
12431 12431 sd_buf_iodone(int index, struct sd_lun *un, struct buf *bp)
12432 12432 {
12433 12433 struct sd_xbuf *xp;
12434 12434
12435 12435 ASSERT(un != NULL);
12436 12436 ASSERT(bp != NULL);
12437 12437 ASSERT(!mutex_owned(SD_MUTEX(un)));
12438 12438
12439 12439 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: entry.\n");
12440 12440
12441 12441 xp = SD_GET_XBUF(bp);
12442 12442 ASSERT(xp != NULL);
12443 12443
12444 12444 /* xbuf is gone after this */
12445 12445 if (ddi_xbuf_done(bp, un->un_xbuf_attr)) {
12446 12446 mutex_enter(SD_MUTEX(un));
12447 12447
12448 12448 /*
12449 12449 * Grab time when the cmd completed.
12450 12450 * This is used for determining if the system has been
12451 12451 * idle long enough to make it idle to the PM framework.
12452 12452 * This is for lowering the overhead, and therefore improving
12453 12453 * performance per I/O operation.
12454 12454 */
12455 12455 un->un_pm_idle_time = ddi_get_time();
12456 12456
12457 12457 un->un_ncmds_in_driver--;
12458 12458 ASSERT(un->un_ncmds_in_driver >= 0);
12459 12459 SD_INFO(SD_LOG_IO, un,
12460 12460 "sd_buf_iodone: un_ncmds_in_driver = %ld\n",
12461 12461 un->un_ncmds_in_driver);
12462 12462
12463 12463 mutex_exit(SD_MUTEX(un));
12464 12464 }
12465 12465
12466 12466 biodone(bp); /* bp is gone after this */
12467 12467
12468 12468 SD_TRACE(SD_LOG_IO_CORE, un, "sd_buf_iodone: exit.\n");
12469 12469 }
12470 12470
12471 12471
12472 12472 /*
12473 12473 * Function: sd_uscsi_iodone
12474 12474 *
12475 12475 * Description: Frees the sd_xbuf & returns the buf to its originator.
12476 12476 *
12477 12477 * Context: May be called from interrupt context.
12478 12478 */
12479 12479 /* ARGSUSED */
12480 12480 static void
12481 12481 sd_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp)
12482 12482 {
12483 12483 struct sd_xbuf *xp;
12484 12484
12485 12485 ASSERT(un != NULL);
12486 12486 ASSERT(bp != NULL);
12487 12487
12488 12488 xp = SD_GET_XBUF(bp);
12489 12489 ASSERT(xp != NULL);
12490 12490 ASSERT(!mutex_owned(SD_MUTEX(un)));
12491 12491
12492 12492 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: entry.\n");
12493 12493
12494 12494 bp->b_private = xp->xb_private;
12495 12495
12496 12496 mutex_enter(SD_MUTEX(un));
12497 12497
12498 12498 /*
12499 12499 * Grab time when the cmd completed.
12500 12500 * This is used for determining if the system has been
12501 12501 * idle long enough to make it idle to the PM framework.
12502 12502 * This is for lowering the overhead, and therefore improving
12503 12503 * performance per I/O operation.
12504 12504 */
12505 12505 un->un_pm_idle_time = ddi_get_time();
12506 12506
12507 12507 un->un_ncmds_in_driver--;
12508 12508 ASSERT(un->un_ncmds_in_driver >= 0);
12509 12509 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: un_ncmds_in_driver = %ld\n",
12510 12510 un->un_ncmds_in_driver);
12511 12511
12512 12512 mutex_exit(SD_MUTEX(un));
12513 12513
12514 12514 if (((struct uscsi_cmd *)(xp->xb_pktinfo))->uscsi_rqlen >
12515 12515 SENSE_LENGTH) {
12516 12516 kmem_free(xp, sizeof (struct sd_xbuf) - SENSE_LENGTH +
12517 12517 MAX_SENSE_LENGTH);
12518 12518 } else {
12519 12519 kmem_free(xp, sizeof (struct sd_xbuf));
12520 12520 }
12521 12521
12522 12522 biodone(bp);
12523 12523
12524 12524 SD_INFO(SD_LOG_IO, un, "sd_uscsi_iodone: exit.\n");
12525 12525 }
12526 12526
12527 12527
12528 12528 /*
12529 12529 * Function: sd_mapblockaddr_iostart
12530 12530 *
12531 12531 * Description: Verify request lies within the partition limits for
12532 12532 * the indicated minor device. Issue "overrun" buf if
12533 12533 * request would exceed partition range. Converts
12534 12534 * partition-relative block address to absolute.
12535 12535 *
12536 12536 * Upon exit of this function:
12537 12537 * 1.I/O is aligned
12538 12538 * xp->xb_blkno represents the absolute sector address
12539 12539 * 2.I/O is misaligned
12540 12540 * xp->xb_blkno represents the absolute logical block address
12541 12541 * based on DEV_BSIZE. The logical block address will be
12542 12542 * converted to physical sector address in sd_mapblocksize_\
12543 12543 * iostart.
12544 12544 * 3.I/O is misaligned but is aligned in "overrun" buf
12545 12545 * xp->xb_blkno represents the absolute logical block address
12546 12546 * based on DEV_BSIZE. The logical block address will be
12547 12547 * converted to physical sector address in sd_mapblocksize_\
12548 12548 * iostart. But no RMW will be issued in this case.
12549 12549 *
12550 12550 * Context: Can sleep
12551 12551 *
12552 12552 * Issues: This follows what the old code did, in terms of accessing
12553 12553 * some of the partition info in the unit struct without holding
12554 12554 * the mutext. This is a general issue, if the partition info
12555 12555 * can be altered while IO is in progress... as soon as we send
12556 12556 * a buf, its partitioning can be invalid before it gets to the
12557 12557 * device. Probably the right fix is to move partitioning out
12558 12558 * of the driver entirely.
12559 12559 */
12560 12560
12561 12561 static void
12562 12562 sd_mapblockaddr_iostart(int index, struct sd_lun *un, struct buf *bp)
12563 12563 {
12564 12564 diskaddr_t nblocks; /* #blocks in the given partition */
12565 12565 daddr_t blocknum; /* Block number specified by the buf */
12566 12566 size_t requested_nblocks;
12567 12567 size_t available_nblocks;
12568 12568 int partition;
12569 12569 diskaddr_t partition_offset;
12570 12570 struct sd_xbuf *xp;
12571 12571 int secmask = 0, blknomask = 0;
12572 12572 ushort_t is_aligned = TRUE;
12573 12573
12574 12574 ASSERT(un != NULL);
12575 12575 ASSERT(bp != NULL);
12576 12576 ASSERT(!mutex_owned(SD_MUTEX(un)));
12577 12577
12578 12578 SD_TRACE(SD_LOG_IO_PARTITION, un,
12579 12579 "sd_mapblockaddr_iostart: entry: buf:0x%p\n", bp);
12580 12580
12581 12581 xp = SD_GET_XBUF(bp);
12582 12582 ASSERT(xp != NULL);
12583 12583
12584 12584 /*
12585 12585 * If the geometry is not indicated as valid, attempt to access
12586 12586 * the unit & verify the geometry/label. This can be the case for
12587 12587 * removable-media devices, of if the device was opened in
12588 12588 * NDELAY/NONBLOCK mode.
12589 12589 */
12590 12590 partition = SDPART(bp->b_edev);
12591 12591
12592 12592 if (!SD_IS_VALID_LABEL(un)) {
12593 12593 sd_ssc_t *ssc;
12594 12594 /*
12595 12595 * Initialize sd_ssc_t for internal uscsi commands
12596 12596 * In case of potential porformance issue, we need
12597 12597 * to alloc memory only if there is invalid label
12598 12598 */
12599 12599 ssc = sd_ssc_init(un);
12600 12600
12601 12601 if (sd_ready_and_valid(ssc, partition) != SD_READY_VALID) {
12602 12602 /*
12603 12603 * For removable devices it is possible to start an
12604 12604 * I/O without a media by opening the device in nodelay
12605 12605 * mode. Also for writable CDs there can be many
12606 12606 * scenarios where there is no geometry yet but volume
12607 12607 * manager is trying to issue a read() just because
12608 12608 * it can see TOC on the CD. So do not print a message
12609 12609 * for removables.
12610 12610 */
12611 12611 if (!un->un_f_has_removable_media) {
12612 12612 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
12613 12613 "i/o to invalid geometry\n");
12614 12614 }
12615 12615 bioerror(bp, EIO);
12616 12616 bp->b_resid = bp->b_bcount;
12617 12617 SD_BEGIN_IODONE(index, un, bp);
12618 12618
12619 12619 sd_ssc_fini(ssc);
12620 12620 return;
12621 12621 }
12622 12622 sd_ssc_fini(ssc);
12623 12623 }
12624 12624
12625 12625 nblocks = 0;
12626 12626 (void) cmlb_partinfo(un->un_cmlbhandle, partition,
12627 12627 &nblocks, &partition_offset, NULL, NULL, (void *)SD_PATH_DIRECT);
12628 12628
12629 12629 if (un->un_f_enable_rmw) {
12630 12630 blknomask = (un->un_phy_blocksize / DEV_BSIZE) - 1;
12631 12631 secmask = un->un_phy_blocksize - 1;
12632 12632 } else {
12633 12633 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1;
12634 12634 secmask = un->un_tgt_blocksize - 1;
12635 12635 }
12636 12636
12637 12637 if ((bp->b_lblkno & (blknomask)) || (bp->b_bcount & (secmask))) {
12638 12638 is_aligned = FALSE;
12639 12639 }
↓ open down ↓ |
12601 lines elided |
↑ open up ↑ |
12640 12640
12641 12641 if (!(NOT_DEVBSIZE(un)) || un->un_f_enable_rmw) {
12642 12642 /*
12643 12643 * If I/O is aligned, no need to involve RMW(Read Modify Write)
12644 12644 * Convert the logical block number to target's physical sector
12645 12645 * number.
12646 12646 */
12647 12647 if (is_aligned) {
12648 12648 xp->xb_blkno = SD_SYS2TGTBLOCK(un, xp->xb_blkno);
12649 12649 } else {
12650 - switch (un->un_f_rmw_type) {
12651 - case SD_RMW_TYPE_RETURN_ERROR:
12652 - if (un->un_f_enable_rmw)
12653 - break;
12654 - else {
12655 - bp->b_flags |= B_ERROR;
12656 - goto error_exit;
12657 - }
12658 -
12659 - case SD_RMW_TYPE_DEFAULT:
12650 + /*
12651 + * There is no RMW if we're just reading, so don't
12652 + * warn or error out because of it.
12653 + */
12654 + if (bp->b_flags & B_READ) {
12655 + /*EMPTY*/
12656 + } else if (!un->un_f_enable_rmw &&
12657 + un->un_f_rmw_type == SD_RMW_TYPE_RETURN_ERROR) {
12658 + bp->b_flags |= B_ERROR;
12659 + goto error_exit;
12660 + } else if (un->un_f_rmw_type == SD_RMW_TYPE_DEFAULT) {
12660 12661 mutex_enter(SD_MUTEX(un));
12661 12662 if (!un->un_f_enable_rmw &&
12662 12663 un->un_rmw_msg_timeid == NULL) {
12663 12664 scsi_log(SD_DEVINFO(un), sd_label,
12664 12665 CE_WARN, "I/O request is not "
12665 12666 "aligned with %d disk sector size. "
12666 12667 "It is handled through Read Modify "
12667 12668 "Write but the performance is "
12668 12669 "very low.\n",
12669 12670 un->un_tgt_blocksize);
12670 12671 un->un_rmw_msg_timeid =
12671 12672 timeout(sd_rmw_msg_print_handler,
12672 12673 un, SD_RMW_MSG_PRINT_TIMEOUT);
12673 12674 } else {
12674 12675 un->un_rmw_incre_count ++;
12675 12676 }
12676 12677 mutex_exit(SD_MUTEX(un));
12677 - break;
12678 -
12679 - case SD_RMW_TYPE_NO_WARNING:
12680 - default:
12681 - break;
12682 12678 }
12683 12679
12684 12680 nblocks = SD_TGT2SYSBLOCK(un, nblocks);
12685 12681 partition_offset = SD_TGT2SYSBLOCK(un,
12686 12682 partition_offset);
12687 12683 }
12688 12684 }
12689 12685
12690 12686 /*
12691 12687 * blocknum is the starting block number of the request. At this
12692 12688 * point it is still relative to the start of the minor device.
12693 12689 */
12694 12690 blocknum = xp->xb_blkno;
12695 12691
12696 12692 /*
12697 12693 * Legacy: If the starting block number is one past the last block
12698 12694 * in the partition, do not set B_ERROR in the buf.
12699 12695 */
12700 12696 if (blocknum == nblocks) {
12701 12697 goto error_exit;
12702 12698 }
12703 12699
12704 12700 /*
12705 12701 * Confirm that the first block of the request lies within the
12706 12702 * partition limits. Also the requested number of bytes must be
12707 12703 * a multiple of the system block size.
12708 12704 */
12709 12705 if ((blocknum < 0) || (blocknum >= nblocks) ||
12710 12706 ((bp->b_bcount & (DEV_BSIZE - 1)) != 0)) {
12711 12707 bp->b_flags |= B_ERROR;
12712 12708 goto error_exit;
12713 12709 }
12714 12710
12715 12711 /*
12716 12712 * If the requsted # blocks exceeds the available # blocks, that
12717 12713 * is an overrun of the partition.
12718 12714 */
12719 12715 if ((!NOT_DEVBSIZE(un)) && is_aligned) {
12720 12716 requested_nblocks = SD_BYTES2TGTBLOCKS(un, bp->b_bcount);
12721 12717 } else {
12722 12718 requested_nblocks = SD_BYTES2SYSBLOCKS(bp->b_bcount);
12723 12719 }
12724 12720
12725 12721 available_nblocks = (size_t)(nblocks - blocknum);
12726 12722 ASSERT(nblocks >= blocknum);
12727 12723
12728 12724 if (requested_nblocks > available_nblocks) {
12729 12725 size_t resid;
12730 12726
12731 12727 /*
12732 12728 * Allocate an "overrun" buf to allow the request to proceed
12733 12729 * for the amount of space available in the partition. The
12734 12730 * amount not transferred will be added into the b_resid
12735 12731 * when the operation is complete. The overrun buf
12736 12732 * replaces the original buf here, and the original buf
12737 12733 * is saved inside the overrun buf, for later use.
12738 12734 */
12739 12735 if ((!NOT_DEVBSIZE(un)) && is_aligned) {
12740 12736 resid = SD_TGTBLOCKS2BYTES(un,
12741 12737 (offset_t)(requested_nblocks - available_nblocks));
12742 12738 } else {
12743 12739 resid = SD_SYSBLOCKS2BYTES(
12744 12740 (offset_t)(requested_nblocks - available_nblocks));
12745 12741 }
12746 12742
12747 12743 size_t count = bp->b_bcount - resid;
12748 12744 /*
12749 12745 * Note: count is an unsigned entity thus it'll NEVER
12750 12746 * be less than 0 so ASSERT the original values are
12751 12747 * correct.
12752 12748 */
12753 12749 ASSERT(bp->b_bcount >= resid);
12754 12750
12755 12751 bp = sd_bioclone_alloc(bp, count, blocknum,
12756 12752 (int (*)(struct buf *)) sd_mapblockaddr_iodone);
12757 12753 xp = SD_GET_XBUF(bp); /* Update for 'new' bp! */
12758 12754 ASSERT(xp != NULL);
12759 12755 }
12760 12756
12761 12757 /* At this point there should be no residual for this buf. */
12762 12758 ASSERT(bp->b_resid == 0);
12763 12759
12764 12760 /* Convert the block number to an absolute address. */
12765 12761 xp->xb_blkno += partition_offset;
12766 12762
12767 12763 SD_NEXT_IOSTART(index, un, bp);
12768 12764
12769 12765 SD_TRACE(SD_LOG_IO_PARTITION, un,
12770 12766 "sd_mapblockaddr_iostart: exit 0: buf:0x%p\n", bp);
12771 12767
12772 12768 return;
12773 12769
12774 12770 error_exit:
12775 12771 bp->b_resid = bp->b_bcount;
12776 12772 SD_BEGIN_IODONE(index, un, bp);
12777 12773 SD_TRACE(SD_LOG_IO_PARTITION, un,
12778 12774 "sd_mapblockaddr_iostart: exit 1: buf:0x%p\n", bp);
12779 12775 }
12780 12776
12781 12777
12782 12778 /*
12783 12779 * Function: sd_mapblockaddr_iodone
12784 12780 *
12785 12781 * Description: Completion-side processing for partition management.
12786 12782 *
12787 12783 * Context: May be called under interrupt context
12788 12784 */
12789 12785
12790 12786 static void
12791 12787 sd_mapblockaddr_iodone(int index, struct sd_lun *un, struct buf *bp)
12792 12788 {
12793 12789 /* int partition; */ /* Not used, see below. */
12794 12790 ASSERT(un != NULL);
12795 12791 ASSERT(bp != NULL);
12796 12792 ASSERT(!mutex_owned(SD_MUTEX(un)));
12797 12793
12798 12794 SD_TRACE(SD_LOG_IO_PARTITION, un,
12799 12795 "sd_mapblockaddr_iodone: entry: buf:0x%p\n", bp);
12800 12796
12801 12797 if (bp->b_iodone == (int (*)(struct buf *)) sd_mapblockaddr_iodone) {
12802 12798 /*
12803 12799 * We have an "overrun" buf to deal with...
12804 12800 */
12805 12801 struct sd_xbuf *xp;
12806 12802 struct buf *obp; /* ptr to the original buf */
12807 12803
12808 12804 xp = SD_GET_XBUF(bp);
12809 12805 ASSERT(xp != NULL);
12810 12806
12811 12807 /* Retrieve the pointer to the original buf */
12812 12808 obp = (struct buf *)xp->xb_private;
12813 12809 ASSERT(obp != NULL);
12814 12810
12815 12811 obp->b_resid = obp->b_bcount - (bp->b_bcount - bp->b_resid);
12816 12812 bioerror(obp, bp->b_error);
12817 12813
12818 12814 sd_bioclone_free(bp);
12819 12815
12820 12816 /*
12821 12817 * Get back the original buf.
12822 12818 * Note that since the restoration of xb_blkno below
12823 12819 * was removed, the sd_xbuf is not needed.
12824 12820 */
12825 12821 bp = obp;
12826 12822 /*
12827 12823 * xp = SD_GET_XBUF(bp);
12828 12824 * ASSERT(xp != NULL);
12829 12825 */
12830 12826 }
12831 12827
12832 12828 /*
12833 12829 * Convert sd->xb_blkno back to a minor-device relative value.
12834 12830 * Note: this has been commented out, as it is not needed in the
12835 12831 * current implementation of the driver (ie, since this function
12836 12832 * is at the top of the layering chains, so the info will be
12837 12833 * discarded) and it is in the "hot" IO path.
12838 12834 *
12839 12835 * partition = getminor(bp->b_edev) & SDPART_MASK;
12840 12836 * xp->xb_blkno -= un->un_offset[partition];
12841 12837 */
12842 12838
12843 12839 SD_NEXT_IODONE(index, un, bp);
12844 12840
12845 12841 SD_TRACE(SD_LOG_IO_PARTITION, un,
12846 12842 "sd_mapblockaddr_iodone: exit: buf:0x%p\n", bp);
12847 12843 }
12848 12844
12849 12845
12850 12846 /*
12851 12847 * Function: sd_mapblocksize_iostart
12852 12848 *
12853 12849 * Description: Convert between system block size (un->un_sys_blocksize)
12854 12850 * and target block size (un->un_tgt_blocksize).
12855 12851 *
12856 12852 * Context: Can sleep to allocate resources.
12857 12853 *
12858 12854 * Assumptions: A higher layer has already performed any partition validation,
12859 12855 * and converted the xp->xb_blkno to an absolute value relative
12860 12856 * to the start of the device.
12861 12857 *
12862 12858 * It is also assumed that the higher layer has implemented
12863 12859 * an "overrun" mechanism for the case where the request would
12864 12860 * read/write beyond the end of a partition. In this case we
12865 12861 * assume (and ASSERT) that bp->b_resid == 0.
12866 12862 *
12867 12863 * Note: The implementation for this routine assumes the target
12868 12864 * block size remains constant between allocation and transport.
12869 12865 */
12870 12866
12871 12867 static void
12872 12868 sd_mapblocksize_iostart(int index, struct sd_lun *un, struct buf *bp)
12873 12869 {
12874 12870 struct sd_mapblocksize_info *bsp;
12875 12871 struct sd_xbuf *xp;
12876 12872 offset_t first_byte;
12877 12873 daddr_t start_block, end_block;
12878 12874 daddr_t request_bytes;
12879 12875 ushort_t is_aligned = FALSE;
12880 12876
12881 12877 ASSERT(un != NULL);
12882 12878 ASSERT(bp != NULL);
12883 12879 ASSERT(!mutex_owned(SD_MUTEX(un)));
12884 12880 ASSERT(bp->b_resid == 0);
12885 12881
12886 12882 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
12887 12883 "sd_mapblocksize_iostart: entry: buf:0x%p\n", bp);
12888 12884
12889 12885 /*
12890 12886 * For a non-writable CD, a write request is an error
12891 12887 */
12892 12888 if (ISCD(un) && ((bp->b_flags & B_READ) == 0) &&
12893 12889 (un->un_f_mmc_writable_media == FALSE)) {
12894 12890 bioerror(bp, EIO);
12895 12891 bp->b_resid = bp->b_bcount;
12896 12892 SD_BEGIN_IODONE(index, un, bp);
12897 12893 return;
12898 12894 }
12899 12895
12900 12896 /*
12901 12897 * We do not need a shadow buf if the device is using
12902 12898 * un->un_sys_blocksize as its block size or if bcount == 0.
12903 12899 * In this case there is no layer-private data block allocated.
12904 12900 */
12905 12901 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) ||
12906 12902 (bp->b_bcount == 0)) {
12907 12903 goto done;
12908 12904 }
12909 12905
12910 12906 #if defined(__i386) || defined(__amd64)
12911 12907 /* We do not support non-block-aligned transfers for ROD devices */
12912 12908 ASSERT(!ISROD(un));
12913 12909 #endif
12914 12910
12915 12911 xp = SD_GET_XBUF(bp);
12916 12912 ASSERT(xp != NULL);
12917 12913
12918 12914 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
12919 12915 "tgt_blocksize:0x%x sys_blocksize: 0x%x\n",
12920 12916 un->un_tgt_blocksize, DEV_BSIZE);
12921 12917 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
12922 12918 "request start block:0x%x\n", xp->xb_blkno);
12923 12919 SD_INFO(SD_LOG_IO_RMMEDIA, un, "sd_mapblocksize_iostart: "
12924 12920 "request len:0x%x\n", bp->b_bcount);
12925 12921
12926 12922 /*
12927 12923 * Allocate the layer-private data area for the mapblocksize layer.
12928 12924 * Layers are allowed to use the xp_private member of the sd_xbuf
12929 12925 * struct to store the pointer to their layer-private data block, but
12930 12926 * each layer also has the responsibility of restoring the prior
12931 12927 * contents of xb_private before returning the buf/xbuf to the
12932 12928 * higher layer that sent it.
12933 12929 *
12934 12930 * Here we save the prior contents of xp->xb_private into the
12935 12931 * bsp->mbs_oprivate field of our layer-private data area. This value
12936 12932 * is restored by sd_mapblocksize_iodone() just prior to freeing up
12937 12933 * the layer-private area and returning the buf/xbuf to the layer
12938 12934 * that sent it.
12939 12935 *
12940 12936 * Note that here we use kmem_zalloc for the allocation as there are
12941 12937 * parts of the mapblocksize code that expect certain fields to be
12942 12938 * zero unless explicitly set to a required value.
12943 12939 */
12944 12940 bsp = kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP);
12945 12941 bsp->mbs_oprivate = xp->xb_private;
12946 12942 xp->xb_private = bsp;
12947 12943
12948 12944 /*
12949 12945 * This treats the data on the disk (target) as an array of bytes.
12950 12946 * first_byte is the byte offset, from the beginning of the device,
12951 12947 * to the location of the request. This is converted from a
12952 12948 * un->un_sys_blocksize block address to a byte offset, and then back
12953 12949 * to a block address based upon a un->un_tgt_blocksize block size.
12954 12950 *
12955 12951 * xp->xb_blkno should be absolute upon entry into this function,
12956 12952 * but, but it is based upon partitions that use the "system"
12957 12953 * block size. It must be adjusted to reflect the block size of
12958 12954 * the target.
12959 12955 *
12960 12956 * Note that end_block is actually the block that follows the last
12961 12957 * block of the request, but that's what is needed for the computation.
12962 12958 */
12963 12959 first_byte = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno);
12964 12960 if (un->un_f_enable_rmw) {
12965 12961 start_block = xp->xb_blkno =
12966 12962 (first_byte / un->un_phy_blocksize) *
12967 12963 (un->un_phy_blocksize / DEV_BSIZE);
12968 12964 end_block = ((first_byte + bp->b_bcount +
12969 12965 un->un_phy_blocksize - 1) / un->un_phy_blocksize) *
12970 12966 (un->un_phy_blocksize / DEV_BSIZE);
12971 12967 } else {
12972 12968 start_block = xp->xb_blkno = first_byte / un->un_tgt_blocksize;
12973 12969 end_block = (first_byte + bp->b_bcount +
12974 12970 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize;
12975 12971 }
12976 12972
12977 12973 /* request_bytes is rounded up to a multiple of the target block size */
12978 12974 request_bytes = (end_block - start_block) * un->un_tgt_blocksize;
12979 12975
12980 12976 /*
12981 12977 * See if the starting address of the request and the request
12982 12978 * length are aligned on a un->un_tgt_blocksize boundary. If aligned
12983 12979 * then we do not need to allocate a shadow buf to handle the request.
12984 12980 */
12985 12981 if (un->un_f_enable_rmw) {
12986 12982 if (((first_byte % un->un_phy_blocksize) == 0) &&
12987 12983 ((bp->b_bcount % un->un_phy_blocksize) == 0)) {
12988 12984 is_aligned = TRUE;
12989 12985 }
12990 12986 } else {
12991 12987 if (((first_byte % un->un_tgt_blocksize) == 0) &&
12992 12988 ((bp->b_bcount % un->un_tgt_blocksize) == 0)) {
12993 12989 is_aligned = TRUE;
12994 12990 }
12995 12991 }
12996 12992
12997 12993 if ((bp->b_flags & B_READ) == 0) {
12998 12994 /*
12999 12995 * Lock the range for a write operation. An aligned request is
13000 12996 * considered a simple write; otherwise the request must be a
13001 12997 * read-modify-write.
13002 12998 */
13003 12999 bsp->mbs_wmp = sd_range_lock(un, start_block, end_block - 1,
13004 13000 (is_aligned == TRUE) ? SD_WTYPE_SIMPLE : SD_WTYPE_RMW);
13005 13001 }
13006 13002
13007 13003 /*
13008 13004 * Alloc a shadow buf if the request is not aligned. Also, this is
13009 13005 * where the READ command is generated for a read-modify-write. (The
13010 13006 * write phase is deferred until after the read completes.)
13011 13007 */
13012 13008 if (is_aligned == FALSE) {
13013 13009
13014 13010 struct sd_mapblocksize_info *shadow_bsp;
13015 13011 struct sd_xbuf *shadow_xp;
13016 13012 struct buf *shadow_bp;
13017 13013
13018 13014 /*
13019 13015 * Allocate the shadow buf and it associated xbuf. Note that
13020 13016 * after this call the xb_blkno value in both the original
13021 13017 * buf's sd_xbuf _and_ the shadow buf's sd_xbuf will be the
13022 13018 * same: absolute relative to the start of the device, and
13023 13019 * adjusted for the target block size. The b_blkno in the
13024 13020 * shadow buf will also be set to this value. We should never
13025 13021 * change b_blkno in the original bp however.
13026 13022 *
13027 13023 * Note also that the shadow buf will always need to be a
13028 13024 * READ command, regardless of whether the incoming command
13029 13025 * is a READ or a WRITE.
13030 13026 */
13031 13027 shadow_bp = sd_shadow_buf_alloc(bp, request_bytes, B_READ,
13032 13028 xp->xb_blkno,
13033 13029 (int (*)(struct buf *)) sd_mapblocksize_iodone);
13034 13030
13035 13031 shadow_xp = SD_GET_XBUF(shadow_bp);
13036 13032
13037 13033 /*
13038 13034 * Allocate the layer-private data for the shadow buf.
13039 13035 * (No need to preserve xb_private in the shadow xbuf.)
13040 13036 */
13041 13037 shadow_xp->xb_private = shadow_bsp =
13042 13038 kmem_zalloc(sizeof (struct sd_mapblocksize_info), KM_SLEEP);
13043 13039
13044 13040 /*
13045 13041 * bsp->mbs_copy_offset is used later by sd_mapblocksize_iodone
13046 13042 * to figure out where the start of the user data is (based upon
13047 13043 * the system block size) in the data returned by the READ
13048 13044 * command (which will be based upon the target blocksize). Note
13049 13045 * that this is only really used if the request is unaligned.
13050 13046 */
13051 13047 if (un->un_f_enable_rmw) {
13052 13048 bsp->mbs_copy_offset = (ssize_t)(first_byte -
13053 13049 ((offset_t)xp->xb_blkno * un->un_sys_blocksize));
13054 13050 ASSERT((bsp->mbs_copy_offset >= 0) &&
13055 13051 (bsp->mbs_copy_offset < un->un_phy_blocksize));
13056 13052 } else {
13057 13053 bsp->mbs_copy_offset = (ssize_t)(first_byte -
13058 13054 ((offset_t)xp->xb_blkno * un->un_tgt_blocksize));
13059 13055 ASSERT((bsp->mbs_copy_offset >= 0) &&
13060 13056 (bsp->mbs_copy_offset < un->un_tgt_blocksize));
13061 13057 }
13062 13058
13063 13059 shadow_bsp->mbs_copy_offset = bsp->mbs_copy_offset;
13064 13060
13065 13061 shadow_bsp->mbs_layer_index = bsp->mbs_layer_index = index;
13066 13062
13067 13063 /* Transfer the wmap (if any) to the shadow buf */
13068 13064 shadow_bsp->mbs_wmp = bsp->mbs_wmp;
13069 13065 bsp->mbs_wmp = NULL;
13070 13066
13071 13067 /*
13072 13068 * The shadow buf goes on from here in place of the
13073 13069 * original buf.
13074 13070 */
13075 13071 shadow_bsp->mbs_orig_bp = bp;
13076 13072 bp = shadow_bp;
13077 13073 }
13078 13074
13079 13075 SD_INFO(SD_LOG_IO_RMMEDIA, un,
13080 13076 "sd_mapblocksize_iostart: tgt start block:0x%x\n", xp->xb_blkno);
13081 13077 SD_INFO(SD_LOG_IO_RMMEDIA, un,
13082 13078 "sd_mapblocksize_iostart: tgt request len:0x%x\n",
13083 13079 request_bytes);
13084 13080 SD_INFO(SD_LOG_IO_RMMEDIA, un,
13085 13081 "sd_mapblocksize_iostart: shadow buf:0x%x\n", bp);
13086 13082
13087 13083 done:
13088 13084 SD_NEXT_IOSTART(index, un, bp);
13089 13085
13090 13086 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
13091 13087 "sd_mapblocksize_iostart: exit: buf:0x%p\n", bp);
13092 13088 }
13093 13089
13094 13090
13095 13091 /*
13096 13092 * Function: sd_mapblocksize_iodone
13097 13093 *
13098 13094 * Description: Completion side processing for block-size mapping.
13099 13095 *
13100 13096 * Context: May be called under interrupt context
13101 13097 */
13102 13098
13103 13099 static void
13104 13100 sd_mapblocksize_iodone(int index, struct sd_lun *un, struct buf *bp)
13105 13101 {
13106 13102 struct sd_mapblocksize_info *bsp;
13107 13103 struct sd_xbuf *xp;
13108 13104 struct sd_xbuf *orig_xp; /* sd_xbuf for the original buf */
13109 13105 struct buf *orig_bp; /* ptr to the original buf */
13110 13106 offset_t shadow_end;
13111 13107 offset_t request_end;
13112 13108 offset_t shadow_start;
13113 13109 ssize_t copy_offset;
13114 13110 size_t copy_length;
13115 13111 size_t shortfall;
13116 13112 uint_t is_write; /* TRUE if this bp is a WRITE */
13117 13113 uint_t has_wmap; /* TRUE is this bp has a wmap */
13118 13114
13119 13115 ASSERT(un != NULL);
13120 13116 ASSERT(bp != NULL);
13121 13117
13122 13118 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
13123 13119 "sd_mapblocksize_iodone: entry: buf:0x%p\n", bp);
13124 13120
13125 13121 /*
13126 13122 * There is no shadow buf or layer-private data if the target is
13127 13123 * using un->un_sys_blocksize as its block size or if bcount == 0.
13128 13124 */
13129 13125 if ((un->un_tgt_blocksize == DEV_BSIZE && !un->un_f_enable_rmw) ||
13130 13126 (bp->b_bcount == 0)) {
13131 13127 goto exit;
13132 13128 }
13133 13129
13134 13130 xp = SD_GET_XBUF(bp);
13135 13131 ASSERT(xp != NULL);
13136 13132
13137 13133 /* Retrieve the pointer to the layer-private data area from the xbuf. */
13138 13134 bsp = xp->xb_private;
13139 13135
13140 13136 is_write = ((bp->b_flags & B_READ) == 0) ? TRUE : FALSE;
13141 13137 has_wmap = (bsp->mbs_wmp != NULL) ? TRUE : FALSE;
13142 13138
13143 13139 if (is_write) {
13144 13140 /*
13145 13141 * For a WRITE request we must free up the block range that
13146 13142 * we have locked up. This holds regardless of whether this is
13147 13143 * an aligned write request or a read-modify-write request.
13148 13144 */
13149 13145 sd_range_unlock(un, bsp->mbs_wmp);
13150 13146 bsp->mbs_wmp = NULL;
13151 13147 }
13152 13148
13153 13149 if ((bp->b_iodone != (int(*)(struct buf *))sd_mapblocksize_iodone)) {
13154 13150 /*
13155 13151 * An aligned read or write command will have no shadow buf;
13156 13152 * there is not much else to do with it.
13157 13153 */
13158 13154 goto done;
13159 13155 }
13160 13156
13161 13157 orig_bp = bsp->mbs_orig_bp;
13162 13158 ASSERT(orig_bp != NULL);
13163 13159 orig_xp = SD_GET_XBUF(orig_bp);
13164 13160 ASSERT(orig_xp != NULL);
13165 13161 ASSERT(!mutex_owned(SD_MUTEX(un)));
13166 13162
13167 13163 if (!is_write && has_wmap) {
13168 13164 /*
13169 13165 * A READ with a wmap means this is the READ phase of a
13170 13166 * read-modify-write. If an error occurred on the READ then
13171 13167 * we do not proceed with the WRITE phase or copy any data.
13172 13168 * Just release the write maps and return with an error.
13173 13169 */
13174 13170 if ((bp->b_resid != 0) || (bp->b_error != 0)) {
13175 13171 orig_bp->b_resid = orig_bp->b_bcount;
13176 13172 bioerror(orig_bp, bp->b_error);
13177 13173 sd_range_unlock(un, bsp->mbs_wmp);
13178 13174 goto freebuf_done;
13179 13175 }
13180 13176 }
13181 13177
13182 13178 /*
13183 13179 * Here is where we set up to copy the data from the shadow buf
13184 13180 * into the space associated with the original buf.
13185 13181 *
13186 13182 * To deal with the conversion between block sizes, these
13187 13183 * computations treat the data as an array of bytes, with the
13188 13184 * first byte (byte 0) corresponding to the first byte in the
13189 13185 * first block on the disk.
13190 13186 */
13191 13187
13192 13188 /*
13193 13189 * shadow_start and shadow_len indicate the location and size of
13194 13190 * the data returned with the shadow IO request.
13195 13191 */
13196 13192 if (un->un_f_enable_rmw) {
13197 13193 shadow_start = SD_SYSBLOCKS2BYTES((offset_t)xp->xb_blkno);
13198 13194 } else {
13199 13195 shadow_start = SD_TGTBLOCKS2BYTES(un, (offset_t)xp->xb_blkno);
13200 13196 }
13201 13197 shadow_end = shadow_start + bp->b_bcount - bp->b_resid;
13202 13198
13203 13199 /*
13204 13200 * copy_offset gives the offset (in bytes) from the start of the first
13205 13201 * block of the READ request to the beginning of the data. We retrieve
13206 13202 * this value from xb_pktp in the ORIGINAL xbuf, as it has been saved
13207 13203 * there by sd_mapblockize_iostart(). copy_length gives the amount of
13208 13204 * data to be copied (in bytes).
13209 13205 */
13210 13206 copy_offset = bsp->mbs_copy_offset;
13211 13207 if (un->un_f_enable_rmw) {
13212 13208 ASSERT((copy_offset >= 0) &&
13213 13209 (copy_offset < un->un_phy_blocksize));
13214 13210 } else {
13215 13211 ASSERT((copy_offset >= 0) &&
13216 13212 (copy_offset < un->un_tgt_blocksize));
13217 13213 }
13218 13214
13219 13215 copy_length = orig_bp->b_bcount;
13220 13216 request_end = shadow_start + copy_offset + orig_bp->b_bcount;
13221 13217
13222 13218 /*
13223 13219 * Set up the resid and error fields of orig_bp as appropriate.
13224 13220 */
13225 13221 if (shadow_end >= request_end) {
13226 13222 /* We got all the requested data; set resid to zero */
13227 13223 orig_bp->b_resid = 0;
13228 13224 } else {
13229 13225 /*
13230 13226 * We failed to get enough data to fully satisfy the original
13231 13227 * request. Just copy back whatever data we got and set
13232 13228 * up the residual and error code as required.
13233 13229 *
13234 13230 * 'shortfall' is the amount by which the data received with the
13235 13231 * shadow buf has "fallen short" of the requested amount.
13236 13232 */
13237 13233 shortfall = (size_t)(request_end - shadow_end);
13238 13234
13239 13235 if (shortfall > orig_bp->b_bcount) {
13240 13236 /*
13241 13237 * We did not get enough data to even partially
13242 13238 * fulfill the original request. The residual is
13243 13239 * equal to the amount requested.
13244 13240 */
13245 13241 orig_bp->b_resid = orig_bp->b_bcount;
13246 13242 } else {
13247 13243 /*
13248 13244 * We did not get all the data that we requested
13249 13245 * from the device, but we will try to return what
13250 13246 * portion we did get.
13251 13247 */
13252 13248 orig_bp->b_resid = shortfall;
13253 13249 }
13254 13250 ASSERT(copy_length >= orig_bp->b_resid);
13255 13251 copy_length -= orig_bp->b_resid;
13256 13252 }
13257 13253
13258 13254 /* Propagate the error code from the shadow buf to the original buf */
13259 13255 bioerror(orig_bp, bp->b_error);
13260 13256
13261 13257 if (is_write) {
13262 13258 goto freebuf_done; /* No data copying for a WRITE */
13263 13259 }
13264 13260
13265 13261 if (has_wmap) {
13266 13262 /*
13267 13263 * This is a READ command from the READ phase of a
13268 13264 * read-modify-write request. We have to copy the data given
13269 13265 * by the user OVER the data returned by the READ command,
13270 13266 * then convert the command from a READ to a WRITE and send
13271 13267 * it back to the target.
13272 13268 */
13273 13269 bcopy(orig_bp->b_un.b_addr, bp->b_un.b_addr + copy_offset,
13274 13270 copy_length);
13275 13271
13276 13272 bp->b_flags &= ~((int)B_READ); /* Convert to a WRITE */
13277 13273
13278 13274 /*
13279 13275 * Dispatch the WRITE command to the taskq thread, which
13280 13276 * will in turn send the command to the target. When the
13281 13277 * WRITE command completes, we (sd_mapblocksize_iodone())
13282 13278 * will get called again as part of the iodone chain
13283 13279 * processing for it. Note that we will still be dealing
13284 13280 * with the shadow buf at that point.
13285 13281 */
13286 13282 if (taskq_dispatch(sd_wmr_tq, sd_read_modify_write_task, bp,
13287 13283 KM_NOSLEEP) != 0) {
13288 13284 /*
13289 13285 * Dispatch was successful so we are done. Return
13290 13286 * without going any higher up the iodone chain. Do
13291 13287 * not free up any layer-private data until after the
13292 13288 * WRITE completes.
13293 13289 */
13294 13290 return;
13295 13291 }
13296 13292
13297 13293 /*
13298 13294 * Dispatch of the WRITE command failed; set up the error
13299 13295 * condition and send this IO back up the iodone chain.
13300 13296 */
13301 13297 bioerror(orig_bp, EIO);
13302 13298 orig_bp->b_resid = orig_bp->b_bcount;
13303 13299
13304 13300 } else {
13305 13301 /*
13306 13302 * This is a regular READ request (ie, not a RMW). Copy the
13307 13303 * data from the shadow buf into the original buf. The
13308 13304 * copy_offset compensates for any "misalignment" between the
13309 13305 * shadow buf (with its un->un_tgt_blocksize blocks) and the
13310 13306 * original buf (with its un->un_sys_blocksize blocks).
13311 13307 */
13312 13308 bcopy(bp->b_un.b_addr + copy_offset, orig_bp->b_un.b_addr,
13313 13309 copy_length);
13314 13310 }
13315 13311
13316 13312 freebuf_done:
13317 13313
13318 13314 /*
13319 13315 * At this point we still have both the shadow buf AND the original
13320 13316 * buf to deal with, as well as the layer-private data area in each.
13321 13317 * Local variables are as follows:
13322 13318 *
13323 13319 * bp -- points to shadow buf
13324 13320 * xp -- points to xbuf of shadow buf
13325 13321 * bsp -- points to layer-private data area of shadow buf
13326 13322 * orig_bp -- points to original buf
13327 13323 *
13328 13324 * First free the shadow buf and its associated xbuf, then free the
13329 13325 * layer-private data area from the shadow buf. There is no need to
13330 13326 * restore xb_private in the shadow xbuf.
13331 13327 */
13332 13328 sd_shadow_buf_free(bp);
13333 13329 kmem_free(bsp, sizeof (struct sd_mapblocksize_info));
13334 13330
13335 13331 /*
13336 13332 * Now update the local variables to point to the original buf, xbuf,
13337 13333 * and layer-private area.
13338 13334 */
13339 13335 bp = orig_bp;
13340 13336 xp = SD_GET_XBUF(bp);
13341 13337 ASSERT(xp != NULL);
13342 13338 ASSERT(xp == orig_xp);
13343 13339 bsp = xp->xb_private;
13344 13340 ASSERT(bsp != NULL);
13345 13341
13346 13342 done:
13347 13343 /*
13348 13344 * Restore xb_private to whatever it was set to by the next higher
13349 13345 * layer in the chain, then free the layer-private data area.
13350 13346 */
13351 13347 xp->xb_private = bsp->mbs_oprivate;
13352 13348 kmem_free(bsp, sizeof (struct sd_mapblocksize_info));
13353 13349
13354 13350 exit:
13355 13351 SD_TRACE(SD_LOG_IO_RMMEDIA, SD_GET_UN(bp),
13356 13352 "sd_mapblocksize_iodone: calling SD_NEXT_IODONE: buf:0x%p\n", bp);
13357 13353
13358 13354 SD_NEXT_IODONE(index, un, bp);
13359 13355 }
13360 13356
13361 13357
13362 13358 /*
13363 13359 * Function: sd_checksum_iostart
13364 13360 *
13365 13361 * Description: A stub function for a layer that's currently not used.
13366 13362 * For now just a placeholder.
13367 13363 *
13368 13364 * Context: Kernel thread context
13369 13365 */
13370 13366
13371 13367 static void
13372 13368 sd_checksum_iostart(int index, struct sd_lun *un, struct buf *bp)
13373 13369 {
13374 13370 ASSERT(un != NULL);
13375 13371 ASSERT(bp != NULL);
13376 13372 ASSERT(!mutex_owned(SD_MUTEX(un)));
13377 13373 SD_NEXT_IOSTART(index, un, bp);
13378 13374 }
13379 13375
13380 13376
13381 13377 /*
13382 13378 * Function: sd_checksum_iodone
13383 13379 *
13384 13380 * Description: A stub function for a layer that's currently not used.
13385 13381 * For now just a placeholder.
13386 13382 *
13387 13383 * Context: May be called under interrupt context
13388 13384 */
13389 13385
13390 13386 static void
13391 13387 sd_checksum_iodone(int index, struct sd_lun *un, struct buf *bp)
13392 13388 {
13393 13389 ASSERT(un != NULL);
13394 13390 ASSERT(bp != NULL);
13395 13391 ASSERT(!mutex_owned(SD_MUTEX(un)));
13396 13392 SD_NEXT_IODONE(index, un, bp);
13397 13393 }
13398 13394
13399 13395
13400 13396 /*
13401 13397 * Function: sd_checksum_uscsi_iostart
13402 13398 *
13403 13399 * Description: A stub function for a layer that's currently not used.
13404 13400 * For now just a placeholder.
13405 13401 *
13406 13402 * Context: Kernel thread context
13407 13403 */
13408 13404
13409 13405 static void
13410 13406 sd_checksum_uscsi_iostart(int index, struct sd_lun *un, struct buf *bp)
13411 13407 {
13412 13408 ASSERT(un != NULL);
13413 13409 ASSERT(bp != NULL);
13414 13410 ASSERT(!mutex_owned(SD_MUTEX(un)));
13415 13411 SD_NEXT_IOSTART(index, un, bp);
13416 13412 }
13417 13413
13418 13414
13419 13415 /*
13420 13416 * Function: sd_checksum_uscsi_iodone
13421 13417 *
13422 13418 * Description: A stub function for a layer that's currently not used.
13423 13419 * For now just a placeholder.
13424 13420 *
13425 13421 * Context: May be called under interrupt context
13426 13422 */
13427 13423
13428 13424 static void
13429 13425 sd_checksum_uscsi_iodone(int index, struct sd_lun *un, struct buf *bp)
13430 13426 {
13431 13427 ASSERT(un != NULL);
13432 13428 ASSERT(bp != NULL);
13433 13429 ASSERT(!mutex_owned(SD_MUTEX(un)));
13434 13430 SD_NEXT_IODONE(index, un, bp);
13435 13431 }
13436 13432
13437 13433
13438 13434 /*
13439 13435 * Function: sd_pm_iostart
13440 13436 *
13441 13437 * Description: iostart-side routine for Power mangement.
13442 13438 *
13443 13439 * Context: Kernel thread context
13444 13440 */
13445 13441
13446 13442 static void
13447 13443 sd_pm_iostart(int index, struct sd_lun *un, struct buf *bp)
13448 13444 {
13449 13445 ASSERT(un != NULL);
13450 13446 ASSERT(bp != NULL);
13451 13447 ASSERT(!mutex_owned(SD_MUTEX(un)));
13452 13448 ASSERT(!mutex_owned(&un->un_pm_mutex));
13453 13449
13454 13450 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: entry\n");
13455 13451
13456 13452 if (sd_pm_entry(un) != DDI_SUCCESS) {
13457 13453 /*
13458 13454 * Set up to return the failed buf back up the 'iodone'
13459 13455 * side of the calling chain.
13460 13456 */
13461 13457 bioerror(bp, EIO);
13462 13458 bp->b_resid = bp->b_bcount;
13463 13459
13464 13460 SD_BEGIN_IODONE(index, un, bp);
13465 13461
13466 13462 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n");
13467 13463 return;
13468 13464 }
13469 13465
13470 13466 SD_NEXT_IOSTART(index, un, bp);
13471 13467
13472 13468 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iostart: exit\n");
13473 13469 }
13474 13470
13475 13471
13476 13472 /*
13477 13473 * Function: sd_pm_iodone
13478 13474 *
13479 13475 * Description: iodone-side routine for power mangement.
13480 13476 *
13481 13477 * Context: may be called from interrupt context
13482 13478 */
13483 13479
13484 13480 static void
13485 13481 sd_pm_iodone(int index, struct sd_lun *un, struct buf *bp)
13486 13482 {
13487 13483 ASSERT(un != NULL);
13488 13484 ASSERT(bp != NULL);
13489 13485 ASSERT(!mutex_owned(&un->un_pm_mutex));
13490 13486
13491 13487 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: entry\n");
13492 13488
13493 13489 /*
13494 13490 * After attach the following flag is only read, so don't
13495 13491 * take the penalty of acquiring a mutex for it.
13496 13492 */
13497 13493 if (un->un_f_pm_is_enabled == TRUE) {
13498 13494 sd_pm_exit(un);
13499 13495 }
13500 13496
13501 13497 SD_NEXT_IODONE(index, un, bp);
13502 13498
13503 13499 SD_TRACE(SD_LOG_IO_PM, un, "sd_pm_iodone: exit\n");
13504 13500 }
13505 13501
13506 13502
13507 13503 /*
13508 13504 * Function: sd_core_iostart
13509 13505 *
13510 13506 * Description: Primary driver function for enqueuing buf(9S) structs from
13511 13507 * the system and initiating IO to the target device
13512 13508 *
13513 13509 * Context: Kernel thread context. Can sleep.
13514 13510 *
13515 13511 * Assumptions: - The given xp->xb_blkno is absolute
13516 13512 * (ie, relative to the start of the device).
13517 13513 * - The IO is to be done using the native blocksize of
13518 13514 * the device, as specified in un->un_tgt_blocksize.
13519 13515 */
13520 13516 /* ARGSUSED */
13521 13517 static void
13522 13518 sd_core_iostart(int index, struct sd_lun *un, struct buf *bp)
13523 13519 {
13524 13520 struct sd_xbuf *xp;
13525 13521
13526 13522 ASSERT(un != NULL);
13527 13523 ASSERT(bp != NULL);
13528 13524 ASSERT(!mutex_owned(SD_MUTEX(un)));
13529 13525 ASSERT(bp->b_resid == 0);
13530 13526
13531 13527 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: entry: bp:0x%p\n", bp);
13532 13528
13533 13529 xp = SD_GET_XBUF(bp);
13534 13530 ASSERT(xp != NULL);
13535 13531
13536 13532 mutex_enter(SD_MUTEX(un));
13537 13533
13538 13534 /*
13539 13535 * If we are currently in the failfast state, fail any new IO
13540 13536 * that has B_FAILFAST set, then return.
13541 13537 */
13542 13538 if ((bp->b_flags & B_FAILFAST) &&
13543 13539 (un->un_failfast_state == SD_FAILFAST_ACTIVE)) {
13544 13540 mutex_exit(SD_MUTEX(un));
13545 13541 bioerror(bp, EIO);
13546 13542 bp->b_resid = bp->b_bcount;
13547 13543 SD_BEGIN_IODONE(index, un, bp);
13548 13544 return;
13549 13545 }
13550 13546
13551 13547 if (SD_IS_DIRECT_PRIORITY(xp)) {
13552 13548 /*
13553 13549 * Priority command -- transport it immediately.
13554 13550 *
13555 13551 * Note: We may want to assert that USCSI_DIAGNOSE is set,
13556 13552 * because all direct priority commands should be associated
13557 13553 * with error recovery actions which we don't want to retry.
13558 13554 */
13559 13555 sd_start_cmds(un, bp);
13560 13556 } else {
13561 13557 /*
13562 13558 * Normal command -- add it to the wait queue, then start
13563 13559 * transporting commands from the wait queue.
13564 13560 */
13565 13561 sd_add_buf_to_waitq(un, bp);
13566 13562 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp);
13567 13563 sd_start_cmds(un, NULL);
13568 13564 }
13569 13565
13570 13566 mutex_exit(SD_MUTEX(un));
13571 13567
13572 13568 SD_TRACE(SD_LOG_IO_CORE, un, "sd_core_iostart: exit: bp:0x%p\n", bp);
13573 13569 }
13574 13570
13575 13571
13576 13572 /*
13577 13573 * Function: sd_init_cdb_limits
13578 13574 *
13579 13575 * Description: This is to handle scsi_pkt initialization differences
13580 13576 * between the driver platforms.
13581 13577 *
13582 13578 * Legacy behaviors:
13583 13579 *
13584 13580 * If the block number or the sector count exceeds the
13585 13581 * capabilities of a Group 0 command, shift over to a
13586 13582 * Group 1 command. We don't blindly use Group 1
13587 13583 * commands because a) some drives (CDC Wren IVs) get a
13588 13584 * bit confused, and b) there is probably a fair amount
13589 13585 * of speed difference for a target to receive and decode
13590 13586 * a 10 byte command instead of a 6 byte command.
13591 13587 *
13592 13588 * The xfer time difference of 6 vs 10 byte CDBs is
13593 13589 * still significant so this code is still worthwhile.
13594 13590 * 10 byte CDBs are very inefficient with the fas HBA driver
13595 13591 * and older disks. Each CDB byte took 1 usec with some
13596 13592 * popular disks.
13597 13593 *
13598 13594 * Context: Must be called at attach time
13599 13595 */
13600 13596
13601 13597 static void
13602 13598 sd_init_cdb_limits(struct sd_lun *un)
13603 13599 {
13604 13600 int hba_cdb_limit;
13605 13601
13606 13602 /*
13607 13603 * Use CDB_GROUP1 commands for most devices except for
13608 13604 * parallel SCSI fixed drives in which case we get better
13609 13605 * performance using CDB_GROUP0 commands (where applicable).
13610 13606 */
13611 13607 un->un_mincdb = SD_CDB_GROUP1;
13612 13608 #if !defined(__fibre)
13613 13609 if (!un->un_f_is_fibre && !un->un_f_cfg_is_atapi && !ISROD(un) &&
13614 13610 !un->un_f_has_removable_media) {
13615 13611 un->un_mincdb = SD_CDB_GROUP0;
13616 13612 }
13617 13613 #endif
13618 13614
13619 13615 /*
13620 13616 * Try to read the max-cdb-length supported by HBA.
13621 13617 */
13622 13618 un->un_max_hba_cdb = scsi_ifgetcap(SD_ADDRESS(un), "max-cdb-length", 1);
13623 13619 if (0 >= un->un_max_hba_cdb) {
13624 13620 un->un_max_hba_cdb = CDB_GROUP4;
13625 13621 hba_cdb_limit = SD_CDB_GROUP4;
13626 13622 } else if (0 < un->un_max_hba_cdb &&
13627 13623 un->un_max_hba_cdb < CDB_GROUP1) {
13628 13624 hba_cdb_limit = SD_CDB_GROUP0;
13629 13625 } else if (CDB_GROUP1 <= un->un_max_hba_cdb &&
13630 13626 un->un_max_hba_cdb < CDB_GROUP5) {
13631 13627 hba_cdb_limit = SD_CDB_GROUP1;
13632 13628 } else if (CDB_GROUP5 <= un->un_max_hba_cdb &&
13633 13629 un->un_max_hba_cdb < CDB_GROUP4) {
13634 13630 hba_cdb_limit = SD_CDB_GROUP5;
13635 13631 } else {
13636 13632 hba_cdb_limit = SD_CDB_GROUP4;
13637 13633 }
13638 13634
13639 13635 /*
13640 13636 * Use CDB_GROUP5 commands for removable devices. Use CDB_GROUP4
13641 13637 * commands for fixed disks unless we are building for a 32 bit
13642 13638 * kernel.
13643 13639 */
13644 13640 #ifdef _LP64
13645 13641 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 :
13646 13642 min(hba_cdb_limit, SD_CDB_GROUP4);
13647 13643 #else
13648 13644 un->un_maxcdb = (un->un_f_has_removable_media) ? SD_CDB_GROUP5 :
13649 13645 min(hba_cdb_limit, SD_CDB_GROUP1);
13650 13646 #endif
13651 13647
13652 13648 un->un_status_len = (int)((un->un_f_arq_enabled == TRUE)
13653 13649 ? sizeof (struct scsi_arq_status) : 1);
13654 13650 un->un_cmd_timeout = (ushort_t)sd_io_time;
13655 13651 un->un_uscsi_timeout = ((ISCD(un)) ? 2 : 1) * un->un_cmd_timeout;
13656 13652 }
13657 13653
13658 13654
13659 13655 /*
13660 13656 * Function: sd_initpkt_for_buf
13661 13657 *
13662 13658 * Description: Allocate and initialize for transport a scsi_pkt struct,
13663 13659 * based upon the info specified in the given buf struct.
13664 13660 *
13665 13661 * Assumes the xb_blkno in the request is absolute (ie,
13666 13662 * relative to the start of the device (NOT partition!).
13667 13663 * Also assumes that the request is using the native block
13668 13664 * size of the device (as returned by the READ CAPACITY
13669 13665 * command).
13670 13666 *
13671 13667 * Return Code: SD_PKT_ALLOC_SUCCESS
13672 13668 * SD_PKT_ALLOC_FAILURE
13673 13669 * SD_PKT_ALLOC_FAILURE_NO_DMA
13674 13670 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL
13675 13671 *
13676 13672 * Context: Kernel thread and may be called from software interrupt context
13677 13673 * as part of a sdrunout callback. This function may not block or
13678 13674 * call routines that block
13679 13675 */
13680 13676
13681 13677 static int
13682 13678 sd_initpkt_for_buf(struct buf *bp, struct scsi_pkt **pktpp)
13683 13679 {
13684 13680 struct sd_xbuf *xp;
13685 13681 struct scsi_pkt *pktp = NULL;
13686 13682 struct sd_lun *un;
13687 13683 size_t blockcount;
13688 13684 daddr_t startblock;
13689 13685 int rval;
13690 13686 int cmd_flags;
13691 13687
13692 13688 ASSERT(bp != NULL);
13693 13689 ASSERT(pktpp != NULL);
13694 13690 xp = SD_GET_XBUF(bp);
13695 13691 ASSERT(xp != NULL);
13696 13692 un = SD_GET_UN(bp);
13697 13693 ASSERT(un != NULL);
13698 13694 ASSERT(mutex_owned(SD_MUTEX(un)));
13699 13695 ASSERT(bp->b_resid == 0);
13700 13696
13701 13697 SD_TRACE(SD_LOG_IO_CORE, un,
13702 13698 "sd_initpkt_for_buf: entry: buf:0x%p\n", bp);
13703 13699
13704 13700 mutex_exit(SD_MUTEX(un));
13705 13701
13706 13702 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
13707 13703 if (xp->xb_pkt_flags & SD_XB_DMA_FREED) {
13708 13704 /*
13709 13705 * Already have a scsi_pkt -- just need DMA resources.
13710 13706 * We must recompute the CDB in case the mapping returns
13711 13707 * a nonzero pkt_resid.
13712 13708 * Note: if this is a portion of a PKT_DMA_PARTIAL transfer
13713 13709 * that is being retried, the unmap/remap of the DMA resouces
13714 13710 * will result in the entire transfer starting over again
13715 13711 * from the very first block.
13716 13712 */
13717 13713 ASSERT(xp->xb_pktp != NULL);
13718 13714 pktp = xp->xb_pktp;
13719 13715 } else {
13720 13716 pktp = NULL;
13721 13717 }
13722 13718 #endif /* __i386 || __amd64 */
13723 13719
13724 13720 startblock = xp->xb_blkno; /* Absolute block num. */
13725 13721 blockcount = SD_BYTES2TGTBLOCKS(un, bp->b_bcount);
13726 13722
13727 13723 cmd_flags = un->un_pkt_flags | (xp->xb_pkt_flags & SD_XB_INITPKT_MASK);
13728 13724
13729 13725 /*
13730 13726 * sd_setup_rw_pkt will determine the appropriate CDB group to use,
13731 13727 * call scsi_init_pkt, and build the CDB.
13732 13728 */
13733 13729 rval = sd_setup_rw_pkt(un, &pktp, bp,
13734 13730 cmd_flags, sdrunout, (caddr_t)un,
13735 13731 startblock, blockcount);
13736 13732
13737 13733 if (rval == 0) {
13738 13734 /*
13739 13735 * Success.
13740 13736 *
13741 13737 * If partial DMA is being used and required for this transfer.
13742 13738 * set it up here.
13743 13739 */
13744 13740 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) != 0 &&
13745 13741 (pktp->pkt_resid != 0)) {
13746 13742
13747 13743 /*
13748 13744 * Save the CDB length and pkt_resid for the
13749 13745 * next xfer
13750 13746 */
13751 13747 xp->xb_dma_resid = pktp->pkt_resid;
13752 13748
13753 13749 /* rezero resid */
13754 13750 pktp->pkt_resid = 0;
13755 13751
13756 13752 } else {
13757 13753 xp->xb_dma_resid = 0;
13758 13754 }
13759 13755
13760 13756 pktp->pkt_flags = un->un_tagflags;
13761 13757 pktp->pkt_time = un->un_cmd_timeout;
13762 13758 pktp->pkt_comp = sdintr;
13763 13759
13764 13760 pktp->pkt_private = bp;
13765 13761 *pktpp = pktp;
13766 13762
13767 13763 SD_TRACE(SD_LOG_IO_CORE, un,
13768 13764 "sd_initpkt_for_buf: exit: buf:0x%p\n", bp);
13769 13765
13770 13766 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
13771 13767 xp->xb_pkt_flags &= ~SD_XB_DMA_FREED;
13772 13768 #endif
13773 13769
13774 13770 mutex_enter(SD_MUTEX(un));
13775 13771 return (SD_PKT_ALLOC_SUCCESS);
13776 13772
13777 13773 }
13778 13774
13779 13775 /*
13780 13776 * SD_PKT_ALLOC_FAILURE is the only expected failure code
13781 13777 * from sd_setup_rw_pkt.
13782 13778 */
13783 13779 ASSERT(rval == SD_PKT_ALLOC_FAILURE);
13784 13780
13785 13781 if (rval == SD_PKT_ALLOC_FAILURE) {
13786 13782 *pktpp = NULL;
13787 13783 /*
13788 13784 * Set the driver state to RWAIT to indicate the driver
13789 13785 * is waiting on resource allocations. The driver will not
13790 13786 * suspend, pm_suspend, or detatch while the state is RWAIT.
13791 13787 */
13792 13788 mutex_enter(SD_MUTEX(un));
13793 13789 New_state(un, SD_STATE_RWAIT);
13794 13790
13795 13791 SD_ERROR(SD_LOG_IO_CORE, un,
13796 13792 "sd_initpkt_for_buf: No pktp. exit bp:0x%p\n", bp);
13797 13793
13798 13794 if ((bp->b_flags & B_ERROR) != 0) {
13799 13795 return (SD_PKT_ALLOC_FAILURE_NO_DMA);
13800 13796 }
13801 13797 return (SD_PKT_ALLOC_FAILURE);
13802 13798 } else {
13803 13799 /*
13804 13800 * PKT_ALLOC_FAILURE_CDB_TOO_SMALL
13805 13801 *
13806 13802 * This should never happen. Maybe someone messed with the
13807 13803 * kernel's minphys?
13808 13804 */
13809 13805 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
13810 13806 "Request rejected: too large for CDB: "
13811 13807 "lba:0x%08lx len:0x%08lx\n", startblock, blockcount);
13812 13808 SD_ERROR(SD_LOG_IO_CORE, un,
13813 13809 "sd_initpkt_for_buf: No cp. exit bp:0x%p\n", bp);
13814 13810 mutex_enter(SD_MUTEX(un));
13815 13811 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
13816 13812
13817 13813 }
13818 13814 }
13819 13815
13820 13816
13821 13817 /*
13822 13818 * Function: sd_destroypkt_for_buf
13823 13819 *
13824 13820 * Description: Free the scsi_pkt(9S) for the given bp (buf IO processing).
13825 13821 *
13826 13822 * Context: Kernel thread or interrupt context
13827 13823 */
13828 13824
13829 13825 static void
13830 13826 sd_destroypkt_for_buf(struct buf *bp)
13831 13827 {
13832 13828 ASSERT(bp != NULL);
13833 13829 ASSERT(SD_GET_UN(bp) != NULL);
13834 13830
13835 13831 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp),
13836 13832 "sd_destroypkt_for_buf: entry: buf:0x%p\n", bp);
13837 13833
13838 13834 ASSERT(SD_GET_PKTP(bp) != NULL);
13839 13835 scsi_destroy_pkt(SD_GET_PKTP(bp));
13840 13836
13841 13837 SD_TRACE(SD_LOG_IO_CORE, SD_GET_UN(bp),
13842 13838 "sd_destroypkt_for_buf: exit: buf:0x%p\n", bp);
13843 13839 }
13844 13840
13845 13841 /*
13846 13842 * Function: sd_setup_rw_pkt
13847 13843 *
13848 13844 * Description: Determines appropriate CDB group for the requested LBA
13849 13845 * and transfer length, calls scsi_init_pkt, and builds
13850 13846 * the CDB. Do not use for partial DMA transfers except
13851 13847 * for the initial transfer since the CDB size must
13852 13848 * remain constant.
13853 13849 *
13854 13850 * Context: Kernel thread and may be called from software interrupt
13855 13851 * context as part of a sdrunout callback. This function may not
13856 13852 * block or call routines that block
13857 13853 */
13858 13854
13859 13855
13860 13856 int
13861 13857 sd_setup_rw_pkt(struct sd_lun *un,
13862 13858 struct scsi_pkt **pktpp, struct buf *bp, int flags,
13863 13859 int (*callback)(caddr_t), caddr_t callback_arg,
13864 13860 diskaddr_t lba, uint32_t blockcount)
13865 13861 {
13866 13862 struct scsi_pkt *return_pktp;
13867 13863 union scsi_cdb *cdbp;
13868 13864 struct sd_cdbinfo *cp = NULL;
13869 13865 int i;
13870 13866
13871 13867 /*
13872 13868 * See which size CDB to use, based upon the request.
13873 13869 */
13874 13870 for (i = un->un_mincdb; i <= un->un_maxcdb; i++) {
13875 13871
13876 13872 /*
13877 13873 * Check lba and block count against sd_cdbtab limits.
13878 13874 * In the partial DMA case, we have to use the same size
13879 13875 * CDB for all the transfers. Check lba + blockcount
13880 13876 * against the max LBA so we know that segment of the
13881 13877 * transfer can use the CDB we select.
13882 13878 */
13883 13879 if ((lba + blockcount - 1 <= sd_cdbtab[i].sc_maxlba) &&
13884 13880 (blockcount <= sd_cdbtab[i].sc_maxlen)) {
13885 13881
13886 13882 /*
13887 13883 * The command will fit into the CDB type
13888 13884 * specified by sd_cdbtab[i].
13889 13885 */
13890 13886 cp = sd_cdbtab + i;
13891 13887
13892 13888 /*
13893 13889 * Call scsi_init_pkt so we can fill in the
13894 13890 * CDB.
13895 13891 */
13896 13892 return_pktp = scsi_init_pkt(SD_ADDRESS(un), *pktpp,
13897 13893 bp, cp->sc_grpcode, un->un_status_len, 0,
13898 13894 flags, callback, callback_arg);
13899 13895
13900 13896 if (return_pktp != NULL) {
13901 13897
13902 13898 /*
13903 13899 * Return new value of pkt
13904 13900 */
13905 13901 *pktpp = return_pktp;
13906 13902
13907 13903 /*
13908 13904 * To be safe, zero the CDB insuring there is
13909 13905 * no leftover data from a previous command.
13910 13906 */
13911 13907 bzero(return_pktp->pkt_cdbp, cp->sc_grpcode);
13912 13908
13913 13909 /*
13914 13910 * Handle partial DMA mapping
13915 13911 */
13916 13912 if (return_pktp->pkt_resid != 0) {
13917 13913
13918 13914 /*
13919 13915 * Not going to xfer as many blocks as
13920 13916 * originally expected
13921 13917 */
13922 13918 blockcount -=
13923 13919 SD_BYTES2TGTBLOCKS(un,
13924 13920 return_pktp->pkt_resid);
13925 13921 }
13926 13922
13927 13923 cdbp = (union scsi_cdb *)return_pktp->pkt_cdbp;
13928 13924
13929 13925 /*
13930 13926 * Set command byte based on the CDB
13931 13927 * type we matched.
13932 13928 */
13933 13929 cdbp->scc_cmd = cp->sc_grpmask |
13934 13930 ((bp->b_flags & B_READ) ?
13935 13931 SCMD_READ : SCMD_WRITE);
13936 13932
13937 13933 SD_FILL_SCSI1_LUN(un, return_pktp);
13938 13934
13939 13935 /*
13940 13936 * Fill in LBA and length
13941 13937 */
13942 13938 ASSERT((cp->sc_grpcode == CDB_GROUP1) ||
13943 13939 (cp->sc_grpcode == CDB_GROUP4) ||
13944 13940 (cp->sc_grpcode == CDB_GROUP0) ||
13945 13941 (cp->sc_grpcode == CDB_GROUP5));
13946 13942
13947 13943 if (cp->sc_grpcode == CDB_GROUP1) {
13948 13944 FORMG1ADDR(cdbp, lba);
13949 13945 FORMG1COUNT(cdbp, blockcount);
13950 13946 return (0);
13951 13947 } else if (cp->sc_grpcode == CDB_GROUP4) {
13952 13948 FORMG4LONGADDR(cdbp, lba);
13953 13949 FORMG4COUNT(cdbp, blockcount);
13954 13950 return (0);
13955 13951 } else if (cp->sc_grpcode == CDB_GROUP0) {
13956 13952 FORMG0ADDR(cdbp, lba);
13957 13953 FORMG0COUNT(cdbp, blockcount);
13958 13954 return (0);
13959 13955 } else if (cp->sc_grpcode == CDB_GROUP5) {
13960 13956 FORMG5ADDR(cdbp, lba);
13961 13957 FORMG5COUNT(cdbp, blockcount);
13962 13958 return (0);
13963 13959 }
13964 13960
13965 13961 /*
13966 13962 * It should be impossible to not match one
13967 13963 * of the CDB types above, so we should never
13968 13964 * reach this point. Set the CDB command byte
13969 13965 * to test-unit-ready to avoid writing
13970 13966 * to somewhere we don't intend.
13971 13967 */
13972 13968 cdbp->scc_cmd = SCMD_TEST_UNIT_READY;
13973 13969 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
13974 13970 } else {
13975 13971 /*
13976 13972 * Couldn't get scsi_pkt
13977 13973 */
13978 13974 return (SD_PKT_ALLOC_FAILURE);
13979 13975 }
13980 13976 }
13981 13977 }
13982 13978
13983 13979 /*
13984 13980 * None of the available CDB types were suitable. This really
13985 13981 * should never happen: on a 64 bit system we support
13986 13982 * READ16/WRITE16 which will hold an entire 64 bit disk address
13987 13983 * and on a 32 bit system we will refuse to bind to a device
13988 13984 * larger than 2TB so addresses will never be larger than 32 bits.
13989 13985 */
13990 13986 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
13991 13987 }
13992 13988
13993 13989 /*
13994 13990 * Function: sd_setup_next_rw_pkt
13995 13991 *
13996 13992 * Description: Setup packet for partial DMA transfers, except for the
13997 13993 * initial transfer. sd_setup_rw_pkt should be used for
13998 13994 * the initial transfer.
13999 13995 *
14000 13996 * Context: Kernel thread and may be called from interrupt context.
14001 13997 */
14002 13998
14003 13999 int
14004 14000 sd_setup_next_rw_pkt(struct sd_lun *un,
14005 14001 struct scsi_pkt *pktp, struct buf *bp,
14006 14002 diskaddr_t lba, uint32_t blockcount)
14007 14003 {
14008 14004 uchar_t com;
14009 14005 union scsi_cdb *cdbp;
14010 14006 uchar_t cdb_group_id;
14011 14007
14012 14008 ASSERT(pktp != NULL);
14013 14009 ASSERT(pktp->pkt_cdbp != NULL);
14014 14010
14015 14011 cdbp = (union scsi_cdb *)pktp->pkt_cdbp;
14016 14012 com = cdbp->scc_cmd;
14017 14013 cdb_group_id = CDB_GROUPID(com);
14018 14014
14019 14015 ASSERT((cdb_group_id == CDB_GROUPID_0) ||
14020 14016 (cdb_group_id == CDB_GROUPID_1) ||
14021 14017 (cdb_group_id == CDB_GROUPID_4) ||
14022 14018 (cdb_group_id == CDB_GROUPID_5));
14023 14019
14024 14020 /*
14025 14021 * Move pkt to the next portion of the xfer.
14026 14022 * func is NULL_FUNC so we do not have to release
14027 14023 * the disk mutex here.
14028 14024 */
14029 14025 if (scsi_init_pkt(SD_ADDRESS(un), pktp, bp, 0, 0, 0, 0,
14030 14026 NULL_FUNC, NULL) == pktp) {
14031 14027 /* Success. Handle partial DMA */
14032 14028 if (pktp->pkt_resid != 0) {
14033 14029 blockcount -=
14034 14030 SD_BYTES2TGTBLOCKS(un, pktp->pkt_resid);
14035 14031 }
14036 14032
14037 14033 cdbp->scc_cmd = com;
14038 14034 SD_FILL_SCSI1_LUN(un, pktp);
14039 14035 if (cdb_group_id == CDB_GROUPID_1) {
14040 14036 FORMG1ADDR(cdbp, lba);
14041 14037 FORMG1COUNT(cdbp, blockcount);
14042 14038 return (0);
14043 14039 } else if (cdb_group_id == CDB_GROUPID_4) {
14044 14040 FORMG4LONGADDR(cdbp, lba);
14045 14041 FORMG4COUNT(cdbp, blockcount);
14046 14042 return (0);
14047 14043 } else if (cdb_group_id == CDB_GROUPID_0) {
14048 14044 FORMG0ADDR(cdbp, lba);
14049 14045 FORMG0COUNT(cdbp, blockcount);
14050 14046 return (0);
14051 14047 } else if (cdb_group_id == CDB_GROUPID_5) {
14052 14048 FORMG5ADDR(cdbp, lba);
14053 14049 FORMG5COUNT(cdbp, blockcount);
14054 14050 return (0);
14055 14051 }
14056 14052
14057 14053 /* Unreachable */
14058 14054 return (SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL);
14059 14055 }
14060 14056
14061 14057 /*
14062 14058 * Error setting up next portion of cmd transfer.
14063 14059 * Something is definitely very wrong and this
14064 14060 * should not happen.
14065 14061 */
14066 14062 return (SD_PKT_ALLOC_FAILURE);
14067 14063 }
14068 14064
14069 14065 /*
14070 14066 * Function: sd_initpkt_for_uscsi
14071 14067 *
14072 14068 * Description: Allocate and initialize for transport a scsi_pkt struct,
14073 14069 * based upon the info specified in the given uscsi_cmd struct.
14074 14070 *
14075 14071 * Return Code: SD_PKT_ALLOC_SUCCESS
14076 14072 * SD_PKT_ALLOC_FAILURE
14077 14073 * SD_PKT_ALLOC_FAILURE_NO_DMA
14078 14074 * SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL
14079 14075 *
14080 14076 * Context: Kernel thread and may be called from software interrupt context
14081 14077 * as part of a sdrunout callback. This function may not block or
14082 14078 * call routines that block
14083 14079 */
14084 14080
14085 14081 static int
14086 14082 sd_initpkt_for_uscsi(struct buf *bp, struct scsi_pkt **pktpp)
14087 14083 {
14088 14084 struct uscsi_cmd *uscmd;
14089 14085 struct sd_xbuf *xp;
14090 14086 struct scsi_pkt *pktp;
14091 14087 struct sd_lun *un;
14092 14088 uint32_t flags = 0;
14093 14089
14094 14090 ASSERT(bp != NULL);
14095 14091 ASSERT(pktpp != NULL);
14096 14092 xp = SD_GET_XBUF(bp);
14097 14093 ASSERT(xp != NULL);
14098 14094 un = SD_GET_UN(bp);
14099 14095 ASSERT(un != NULL);
14100 14096 ASSERT(mutex_owned(SD_MUTEX(un)));
14101 14097
14102 14098 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */
14103 14099 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo;
14104 14100 ASSERT(uscmd != NULL);
14105 14101
14106 14102 SD_TRACE(SD_LOG_IO_CORE, un,
14107 14103 "sd_initpkt_for_uscsi: entry: buf:0x%p\n", bp);
14108 14104
14109 14105 /*
14110 14106 * Allocate the scsi_pkt for the command.
14111 14107 * Note: If PKT_DMA_PARTIAL flag is set, scsi_vhci binds a path
14112 14108 * during scsi_init_pkt time and will continue to use the
14113 14109 * same path as long as the same scsi_pkt is used without
14114 14110 * intervening scsi_dma_free(). Since uscsi command does
14115 14111 * not call scsi_dmafree() before retry failed command, it
14116 14112 * is necessary to make sure PKT_DMA_PARTIAL flag is NOT
14117 14113 * set such that scsi_vhci can use other available path for
14118 14114 * retry. Besides, ucsci command does not allow DMA breakup,
14119 14115 * so there is no need to set PKT_DMA_PARTIAL flag.
14120 14116 */
14121 14117 if (uscmd->uscsi_rqlen > SENSE_LENGTH) {
14122 14118 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL,
14123 14119 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen,
14124 14120 ((int)(uscmd->uscsi_rqlen) + sizeof (struct scsi_arq_status)
14125 14121 - sizeof (struct scsi_extended_sense)), 0,
14126 14122 (un->un_pkt_flags & ~PKT_DMA_PARTIAL) | PKT_XARQ,
14127 14123 sdrunout, (caddr_t)un);
14128 14124 } else {
14129 14125 pktp = scsi_init_pkt(SD_ADDRESS(un), NULL,
14130 14126 ((bp->b_bcount != 0) ? bp : NULL), uscmd->uscsi_cdblen,
14131 14127 sizeof (struct scsi_arq_status), 0,
14132 14128 (un->un_pkt_flags & ~PKT_DMA_PARTIAL),
14133 14129 sdrunout, (caddr_t)un);
14134 14130 }
14135 14131
14136 14132 if (pktp == NULL) {
14137 14133 *pktpp = NULL;
14138 14134 /*
14139 14135 * Set the driver state to RWAIT to indicate the driver
14140 14136 * is waiting on resource allocations. The driver will not
14141 14137 * suspend, pm_suspend, or detatch while the state is RWAIT.
14142 14138 */
14143 14139 New_state(un, SD_STATE_RWAIT);
14144 14140
14145 14141 SD_ERROR(SD_LOG_IO_CORE, un,
14146 14142 "sd_initpkt_for_uscsi: No pktp. exit bp:0x%p\n", bp);
14147 14143
14148 14144 if ((bp->b_flags & B_ERROR) != 0) {
14149 14145 return (SD_PKT_ALLOC_FAILURE_NO_DMA);
14150 14146 }
14151 14147 return (SD_PKT_ALLOC_FAILURE);
14152 14148 }
14153 14149
14154 14150 /*
14155 14151 * We do not do DMA breakup for USCSI commands, so return failure
14156 14152 * here if all the needed DMA resources were not allocated.
14157 14153 */
14158 14154 if ((un->un_pkt_flags & PKT_DMA_PARTIAL) &&
14159 14155 (bp->b_bcount != 0) && (pktp->pkt_resid != 0)) {
14160 14156 scsi_destroy_pkt(pktp);
14161 14157 SD_ERROR(SD_LOG_IO_CORE, un, "sd_initpkt_for_uscsi: "
14162 14158 "No partial DMA for USCSI. exit: buf:0x%p\n", bp);
14163 14159 return (SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL);
14164 14160 }
14165 14161
14166 14162 /* Init the cdb from the given uscsi struct */
14167 14163 (void) scsi_setup_cdb((union scsi_cdb *)pktp->pkt_cdbp,
14168 14164 uscmd->uscsi_cdb[0], 0, 0, 0);
14169 14165
14170 14166 SD_FILL_SCSI1_LUN(un, pktp);
14171 14167
14172 14168 /*
14173 14169 * Set up the optional USCSI flags. See the uscsi (7I) man page
14174 14170 * for listing of the supported flags.
14175 14171 */
14176 14172
14177 14173 if (uscmd->uscsi_flags & USCSI_SILENT) {
14178 14174 flags |= FLAG_SILENT;
14179 14175 }
14180 14176
14181 14177 if (uscmd->uscsi_flags & USCSI_DIAGNOSE) {
14182 14178 flags |= FLAG_DIAGNOSE;
14183 14179 }
14184 14180
14185 14181 if (uscmd->uscsi_flags & USCSI_ISOLATE) {
14186 14182 flags |= FLAG_ISOLATE;
14187 14183 }
14188 14184
14189 14185 if (un->un_f_is_fibre == FALSE) {
14190 14186 if (uscmd->uscsi_flags & USCSI_RENEGOT) {
14191 14187 flags |= FLAG_RENEGOTIATE_WIDE_SYNC;
14192 14188 }
14193 14189 }
14194 14190
14195 14191 /*
14196 14192 * Set the pkt flags here so we save time later.
14197 14193 * Note: These flags are NOT in the uscsi man page!!!
14198 14194 */
14199 14195 if (uscmd->uscsi_flags & USCSI_HEAD) {
14200 14196 flags |= FLAG_HEAD;
14201 14197 }
14202 14198
14203 14199 if (uscmd->uscsi_flags & USCSI_NOINTR) {
14204 14200 flags |= FLAG_NOINTR;
14205 14201 }
14206 14202
14207 14203 /*
14208 14204 * For tagged queueing, things get a bit complicated.
14209 14205 * Check first for head of queue and last for ordered queue.
14210 14206 * If neither head nor order, use the default driver tag flags.
14211 14207 */
14212 14208 if ((uscmd->uscsi_flags & USCSI_NOTAG) == 0) {
14213 14209 if (uscmd->uscsi_flags & USCSI_HTAG) {
14214 14210 flags |= FLAG_HTAG;
14215 14211 } else if (uscmd->uscsi_flags & USCSI_OTAG) {
14216 14212 flags |= FLAG_OTAG;
14217 14213 } else {
14218 14214 flags |= un->un_tagflags & FLAG_TAGMASK;
14219 14215 }
14220 14216 }
14221 14217
14222 14218 if (uscmd->uscsi_flags & USCSI_NODISCON) {
14223 14219 flags = (flags & ~FLAG_TAGMASK) | FLAG_NODISCON;
14224 14220 }
14225 14221
14226 14222 pktp->pkt_flags = flags;
14227 14223
14228 14224 /* Transfer uscsi information to scsi_pkt */
14229 14225 (void) scsi_uscsi_pktinit(uscmd, pktp);
14230 14226
14231 14227 /* Copy the caller's CDB into the pkt... */
14232 14228 bcopy(uscmd->uscsi_cdb, pktp->pkt_cdbp, uscmd->uscsi_cdblen);
14233 14229
14234 14230 if (uscmd->uscsi_timeout == 0) {
14235 14231 pktp->pkt_time = un->un_uscsi_timeout;
14236 14232 } else {
14237 14233 pktp->pkt_time = uscmd->uscsi_timeout;
14238 14234 }
14239 14235
14240 14236 /* need it later to identify USCSI request in sdintr */
14241 14237 xp->xb_pkt_flags |= SD_XB_USCSICMD;
14242 14238
14243 14239 xp->xb_sense_resid = uscmd->uscsi_rqresid;
14244 14240
14245 14241 pktp->pkt_private = bp;
14246 14242 pktp->pkt_comp = sdintr;
14247 14243 *pktpp = pktp;
14248 14244
14249 14245 SD_TRACE(SD_LOG_IO_CORE, un,
14250 14246 "sd_initpkt_for_uscsi: exit: buf:0x%p\n", bp);
14251 14247
14252 14248 return (SD_PKT_ALLOC_SUCCESS);
14253 14249 }
14254 14250
14255 14251
14256 14252 /*
14257 14253 * Function: sd_destroypkt_for_uscsi
14258 14254 *
14259 14255 * Description: Free the scsi_pkt(9S) struct for the given bp, for uscsi
14260 14256 * IOs.. Also saves relevant info into the associated uscsi_cmd
14261 14257 * struct.
14262 14258 *
14263 14259 * Context: May be called under interrupt context
14264 14260 */
14265 14261
14266 14262 static void
14267 14263 sd_destroypkt_for_uscsi(struct buf *bp)
14268 14264 {
14269 14265 struct uscsi_cmd *uscmd;
14270 14266 struct sd_xbuf *xp;
14271 14267 struct scsi_pkt *pktp;
14272 14268 struct sd_lun *un;
14273 14269 struct sd_uscsi_info *suip;
14274 14270
14275 14271 ASSERT(bp != NULL);
14276 14272 xp = SD_GET_XBUF(bp);
14277 14273 ASSERT(xp != NULL);
14278 14274 un = SD_GET_UN(bp);
14279 14275 ASSERT(un != NULL);
14280 14276 ASSERT(!mutex_owned(SD_MUTEX(un)));
14281 14277 pktp = SD_GET_PKTP(bp);
14282 14278 ASSERT(pktp != NULL);
14283 14279
14284 14280 SD_TRACE(SD_LOG_IO_CORE, un,
14285 14281 "sd_destroypkt_for_uscsi: entry: buf:0x%p\n", bp);
14286 14282
14287 14283 /* The pointer to the uscsi_cmd struct is expected in xb_pktinfo */
14288 14284 uscmd = (struct uscsi_cmd *)xp->xb_pktinfo;
14289 14285 ASSERT(uscmd != NULL);
14290 14286
14291 14287 /* Save the status and the residual into the uscsi_cmd struct */
14292 14288 uscmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK);
14293 14289 uscmd->uscsi_resid = bp->b_resid;
14294 14290
14295 14291 /* Transfer scsi_pkt information to uscsi */
14296 14292 (void) scsi_uscsi_pktfini(pktp, uscmd);
14297 14293
14298 14294 /*
14299 14295 * If enabled, copy any saved sense data into the area specified
14300 14296 * by the uscsi command.
14301 14297 */
14302 14298 if (((uscmd->uscsi_flags & USCSI_RQENABLE) != 0) &&
14303 14299 (uscmd->uscsi_rqlen != 0) && (uscmd->uscsi_rqbuf != NULL)) {
14304 14300 /*
14305 14301 * Note: uscmd->uscsi_rqbuf should always point to a buffer
14306 14302 * at least SENSE_LENGTH bytes in size (see sd_send_scsi_cmd())
14307 14303 */
14308 14304 uscmd->uscsi_rqstatus = xp->xb_sense_status;
14309 14305 uscmd->uscsi_rqresid = xp->xb_sense_resid;
14310 14306 if (uscmd->uscsi_rqlen > SENSE_LENGTH) {
14311 14307 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf,
14312 14308 MAX_SENSE_LENGTH);
14313 14309 } else {
14314 14310 bcopy(xp->xb_sense_data, uscmd->uscsi_rqbuf,
14315 14311 SENSE_LENGTH);
14316 14312 }
14317 14313 }
14318 14314 /*
14319 14315 * The following assignments are for SCSI FMA.
14320 14316 */
14321 14317 ASSERT(xp->xb_private != NULL);
14322 14318 suip = (struct sd_uscsi_info *)xp->xb_private;
14323 14319 suip->ui_pkt_reason = pktp->pkt_reason;
14324 14320 suip->ui_pkt_state = pktp->pkt_state;
14325 14321 suip->ui_pkt_statistics = pktp->pkt_statistics;
14326 14322 suip->ui_lba = (uint64_t)SD_GET_BLKNO(bp);
14327 14323
14328 14324 /* We are done with the scsi_pkt; free it now */
14329 14325 ASSERT(SD_GET_PKTP(bp) != NULL);
14330 14326 scsi_destroy_pkt(SD_GET_PKTP(bp));
14331 14327
14332 14328 SD_TRACE(SD_LOG_IO_CORE, un,
14333 14329 "sd_destroypkt_for_uscsi: exit: buf:0x%p\n", bp);
14334 14330 }
14335 14331
14336 14332
14337 14333 /*
14338 14334 * Function: sd_bioclone_alloc
14339 14335 *
14340 14336 * Description: Allocate a buf(9S) and init it as per the given buf
14341 14337 * and the various arguments. The associated sd_xbuf
14342 14338 * struct is (nearly) duplicated. The struct buf *bp
14343 14339 * argument is saved in new_xp->xb_private.
14344 14340 *
14345 14341 * Arguments: bp - ptr the the buf(9S) to be "shadowed"
14346 14342 * datalen - size of data area for the shadow bp
14347 14343 * blkno - starting LBA
14348 14344 * func - function pointer for b_iodone in the shadow buf. (May
14349 14345 * be NULL if none.)
14350 14346 *
14351 14347 * Return Code: Pointer to allocates buf(9S) struct
14352 14348 *
14353 14349 * Context: Can sleep.
14354 14350 */
14355 14351
14356 14352 static struct buf *
14357 14353 sd_bioclone_alloc(struct buf *bp, size_t datalen,
14358 14354 daddr_t blkno, int (*func)(struct buf *))
14359 14355 {
14360 14356 struct sd_lun *un;
14361 14357 struct sd_xbuf *xp;
14362 14358 struct sd_xbuf *new_xp;
14363 14359 struct buf *new_bp;
14364 14360
14365 14361 ASSERT(bp != NULL);
14366 14362 xp = SD_GET_XBUF(bp);
14367 14363 ASSERT(xp != NULL);
14368 14364 un = SD_GET_UN(bp);
14369 14365 ASSERT(un != NULL);
14370 14366 ASSERT(!mutex_owned(SD_MUTEX(un)));
14371 14367
14372 14368 new_bp = bioclone(bp, 0, datalen, SD_GET_DEV(un), blkno, func,
14373 14369 NULL, KM_SLEEP);
14374 14370
14375 14371 new_bp->b_lblkno = blkno;
14376 14372
14377 14373 /*
14378 14374 * Allocate an xbuf for the shadow bp and copy the contents of the
14379 14375 * original xbuf into it.
14380 14376 */
14381 14377 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP);
14382 14378 bcopy(xp, new_xp, sizeof (struct sd_xbuf));
14383 14379
14384 14380 /*
14385 14381 * The given bp is automatically saved in the xb_private member
14386 14382 * of the new xbuf. Callers are allowed to depend on this.
14387 14383 */
14388 14384 new_xp->xb_private = bp;
14389 14385
14390 14386 new_bp->b_private = new_xp;
14391 14387
14392 14388 return (new_bp);
14393 14389 }
14394 14390
14395 14391 /*
14396 14392 * Function: sd_shadow_buf_alloc
14397 14393 *
14398 14394 * Description: Allocate a buf(9S) and init it as per the given buf
14399 14395 * and the various arguments. The associated sd_xbuf
14400 14396 * struct is (nearly) duplicated. The struct buf *bp
14401 14397 * argument is saved in new_xp->xb_private.
14402 14398 *
14403 14399 * Arguments: bp - ptr the the buf(9S) to be "shadowed"
14404 14400 * datalen - size of data area for the shadow bp
14405 14401 * bflags - B_READ or B_WRITE (pseudo flag)
14406 14402 * blkno - starting LBA
14407 14403 * func - function pointer for b_iodone in the shadow buf. (May
14408 14404 * be NULL if none.)
14409 14405 *
14410 14406 * Return Code: Pointer to allocates buf(9S) struct
14411 14407 *
14412 14408 * Context: Can sleep.
14413 14409 */
14414 14410
14415 14411 static struct buf *
14416 14412 sd_shadow_buf_alloc(struct buf *bp, size_t datalen, uint_t bflags,
14417 14413 daddr_t blkno, int (*func)(struct buf *))
14418 14414 {
14419 14415 struct sd_lun *un;
14420 14416 struct sd_xbuf *xp;
14421 14417 struct sd_xbuf *new_xp;
14422 14418 struct buf *new_bp;
14423 14419
14424 14420 ASSERT(bp != NULL);
14425 14421 xp = SD_GET_XBUF(bp);
14426 14422 ASSERT(xp != NULL);
14427 14423 un = SD_GET_UN(bp);
14428 14424 ASSERT(un != NULL);
14429 14425 ASSERT(!mutex_owned(SD_MUTEX(un)));
14430 14426
14431 14427 if (bp->b_flags & (B_PAGEIO | B_PHYS)) {
14432 14428 bp_mapin(bp);
14433 14429 }
14434 14430
14435 14431 bflags &= (B_READ | B_WRITE);
14436 14432 #if defined(__i386) || defined(__amd64)
14437 14433 new_bp = getrbuf(KM_SLEEP);
14438 14434 new_bp->b_un.b_addr = kmem_zalloc(datalen, KM_SLEEP);
14439 14435 new_bp->b_bcount = datalen;
14440 14436 new_bp->b_flags = bflags |
14441 14437 (bp->b_flags & ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW));
14442 14438 #else
14443 14439 new_bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), NULL,
14444 14440 datalen, bflags, SLEEP_FUNC, NULL);
14445 14441 #endif
14446 14442 new_bp->av_forw = NULL;
14447 14443 new_bp->av_back = NULL;
14448 14444 new_bp->b_dev = bp->b_dev;
14449 14445 new_bp->b_blkno = blkno;
14450 14446 new_bp->b_iodone = func;
14451 14447 new_bp->b_edev = bp->b_edev;
14452 14448 new_bp->b_resid = 0;
14453 14449
14454 14450 /* We need to preserve the B_FAILFAST flag */
14455 14451 if (bp->b_flags & B_FAILFAST) {
14456 14452 new_bp->b_flags |= B_FAILFAST;
14457 14453 }
14458 14454
14459 14455 /*
14460 14456 * Allocate an xbuf for the shadow bp and copy the contents of the
14461 14457 * original xbuf into it.
14462 14458 */
14463 14459 new_xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP);
14464 14460 bcopy(xp, new_xp, sizeof (struct sd_xbuf));
14465 14461
14466 14462 /* Need later to copy data between the shadow buf & original buf! */
14467 14463 new_xp->xb_pkt_flags |= PKT_CONSISTENT;
14468 14464
14469 14465 /*
14470 14466 * The given bp is automatically saved in the xb_private member
14471 14467 * of the new xbuf. Callers are allowed to depend on this.
14472 14468 */
14473 14469 new_xp->xb_private = bp;
14474 14470
14475 14471 new_bp->b_private = new_xp;
14476 14472
14477 14473 return (new_bp);
14478 14474 }
14479 14475
14480 14476 /*
14481 14477 * Function: sd_bioclone_free
14482 14478 *
14483 14479 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations
14484 14480 * in the larger than partition operation.
14485 14481 *
14486 14482 * Context: May be called under interrupt context
14487 14483 */
14488 14484
14489 14485 static void
14490 14486 sd_bioclone_free(struct buf *bp)
14491 14487 {
14492 14488 struct sd_xbuf *xp;
14493 14489
14494 14490 ASSERT(bp != NULL);
14495 14491 xp = SD_GET_XBUF(bp);
14496 14492 ASSERT(xp != NULL);
14497 14493
14498 14494 /*
14499 14495 * Call bp_mapout() before freeing the buf, in case a lower
14500 14496 * layer or HBA had done a bp_mapin(). we must do this here
14501 14497 * as we are the "originator" of the shadow buf.
14502 14498 */
14503 14499 bp_mapout(bp);
14504 14500
14505 14501 /*
14506 14502 * Null out b_iodone before freeing the bp, to ensure that the driver
14507 14503 * never gets confused by a stale value in this field. (Just a little
14508 14504 * extra defensiveness here.)
14509 14505 */
14510 14506 bp->b_iodone = NULL;
14511 14507
14512 14508 freerbuf(bp);
14513 14509
14514 14510 kmem_free(xp, sizeof (struct sd_xbuf));
14515 14511 }
14516 14512
14517 14513 /*
14518 14514 * Function: sd_shadow_buf_free
14519 14515 *
14520 14516 * Description: Deallocate a buf(9S) that was used for 'shadow' IO operations.
14521 14517 *
14522 14518 * Context: May be called under interrupt context
14523 14519 */
14524 14520
14525 14521 static void
14526 14522 sd_shadow_buf_free(struct buf *bp)
14527 14523 {
14528 14524 struct sd_xbuf *xp;
14529 14525
14530 14526 ASSERT(bp != NULL);
14531 14527 xp = SD_GET_XBUF(bp);
14532 14528 ASSERT(xp != NULL);
14533 14529
14534 14530 #if defined(__sparc)
14535 14531 /*
14536 14532 * Call bp_mapout() before freeing the buf, in case a lower
14537 14533 * layer or HBA had done a bp_mapin(). we must do this here
14538 14534 * as we are the "originator" of the shadow buf.
14539 14535 */
14540 14536 bp_mapout(bp);
14541 14537 #endif
14542 14538
14543 14539 /*
14544 14540 * Null out b_iodone before freeing the bp, to ensure that the driver
14545 14541 * never gets confused by a stale value in this field. (Just a little
14546 14542 * extra defensiveness here.)
14547 14543 */
14548 14544 bp->b_iodone = NULL;
14549 14545
14550 14546 #if defined(__i386) || defined(__amd64)
14551 14547 kmem_free(bp->b_un.b_addr, bp->b_bcount);
14552 14548 freerbuf(bp);
14553 14549 #else
14554 14550 scsi_free_consistent_buf(bp);
14555 14551 #endif
14556 14552
14557 14553 kmem_free(xp, sizeof (struct sd_xbuf));
14558 14554 }
14559 14555
14560 14556
14561 14557 /*
14562 14558 * Function: sd_print_transport_rejected_message
14563 14559 *
14564 14560 * Description: This implements the ludicrously complex rules for printing
14565 14561 * a "transport rejected" message. This is to address the
14566 14562 * specific problem of having a flood of this error message
14567 14563 * produced when a failover occurs.
14568 14564 *
14569 14565 * Context: Any.
14570 14566 */
14571 14567
14572 14568 static void
14573 14569 sd_print_transport_rejected_message(struct sd_lun *un, struct sd_xbuf *xp,
14574 14570 int code)
14575 14571 {
14576 14572 ASSERT(un != NULL);
14577 14573 ASSERT(mutex_owned(SD_MUTEX(un)));
14578 14574 ASSERT(xp != NULL);
14579 14575
14580 14576 /*
14581 14577 * Print the "transport rejected" message under the following
14582 14578 * conditions:
14583 14579 *
14584 14580 * - Whenever the SD_LOGMASK_DIAG bit of sd_level_mask is set
14585 14581 * - The error code from scsi_transport() is NOT a TRAN_FATAL_ERROR.
14586 14582 * - If the error code IS a TRAN_FATAL_ERROR, then the message is
14587 14583 * printed the FIRST time a TRAN_FATAL_ERROR is returned from
14588 14584 * scsi_transport(9F) (which indicates that the target might have
14589 14585 * gone off-line). This uses the un->un_tran_fatal_count
14590 14586 * count, which is incremented whenever a TRAN_FATAL_ERROR is
14591 14587 * received, and reset to zero whenver a TRAN_ACCEPT is returned
14592 14588 * from scsi_transport().
14593 14589 *
14594 14590 * The FLAG_SILENT in the scsi_pkt must be CLEARED in ALL of
14595 14591 * the preceeding cases in order for the message to be printed.
14596 14592 */
14597 14593 if (((xp->xb_pktp->pkt_flags & FLAG_SILENT) == 0) &&
14598 14594 (SD_FM_LOG(un) == SD_FM_LOG_NSUP)) {
14599 14595 if ((sd_level_mask & SD_LOGMASK_DIAG) ||
14600 14596 (code != TRAN_FATAL_ERROR) ||
14601 14597 (un->un_tran_fatal_count == 1)) {
14602 14598 switch (code) {
14603 14599 case TRAN_BADPKT:
14604 14600 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
14605 14601 "transport rejected bad packet\n");
14606 14602 break;
14607 14603 case TRAN_FATAL_ERROR:
14608 14604 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
14609 14605 "transport rejected fatal error\n");
14610 14606 break;
14611 14607 default:
14612 14608 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
14613 14609 "transport rejected (%d)\n", code);
14614 14610 break;
14615 14611 }
14616 14612 }
14617 14613 }
14618 14614 }
14619 14615
14620 14616
14621 14617 /*
14622 14618 * Function: sd_add_buf_to_waitq
14623 14619 *
14624 14620 * Description: Add the given buf(9S) struct to the wait queue for the
14625 14621 * instance. If sorting is enabled, then the buf is added
14626 14622 * to the queue via an elevator sort algorithm (a la
14627 14623 * disksort(9F)). The SD_GET_BLKNO(bp) is used as the sort key.
14628 14624 * If sorting is not enabled, then the buf is just added
14629 14625 * to the end of the wait queue.
14630 14626 *
14631 14627 * Return Code: void
14632 14628 *
14633 14629 * Context: Does not sleep/block, therefore technically can be called
14634 14630 * from any context. However if sorting is enabled then the
14635 14631 * execution time is indeterminate, and may take long if
14636 14632 * the wait queue grows large.
14637 14633 */
14638 14634
14639 14635 static void
14640 14636 sd_add_buf_to_waitq(struct sd_lun *un, struct buf *bp)
14641 14637 {
14642 14638 struct buf *ap;
14643 14639
14644 14640 ASSERT(bp != NULL);
14645 14641 ASSERT(un != NULL);
14646 14642 ASSERT(mutex_owned(SD_MUTEX(un)));
14647 14643
14648 14644 /* If the queue is empty, add the buf as the only entry & return. */
14649 14645 if (un->un_waitq_headp == NULL) {
14650 14646 ASSERT(un->un_waitq_tailp == NULL);
14651 14647 un->un_waitq_headp = un->un_waitq_tailp = bp;
14652 14648 bp->av_forw = NULL;
14653 14649 return;
14654 14650 }
14655 14651
14656 14652 ASSERT(un->un_waitq_tailp != NULL);
14657 14653
14658 14654 /*
14659 14655 * If sorting is disabled, just add the buf to the tail end of
14660 14656 * the wait queue and return.
14661 14657 */
14662 14658 if (un->un_f_disksort_disabled || un->un_f_enable_rmw) {
14663 14659 un->un_waitq_tailp->av_forw = bp;
14664 14660 un->un_waitq_tailp = bp;
14665 14661 bp->av_forw = NULL;
14666 14662 return;
14667 14663 }
14668 14664
14669 14665 /*
14670 14666 * Sort thru the list of requests currently on the wait queue
14671 14667 * and add the new buf request at the appropriate position.
14672 14668 *
14673 14669 * The un->un_waitq_headp is an activity chain pointer on which
14674 14670 * we keep two queues, sorted in ascending SD_GET_BLKNO() order. The
14675 14671 * first queue holds those requests which are positioned after
14676 14672 * the current SD_GET_BLKNO() (in the first request); the second holds
14677 14673 * requests which came in after their SD_GET_BLKNO() number was passed.
14678 14674 * Thus we implement a one way scan, retracting after reaching
14679 14675 * the end of the drive to the first request on the second
14680 14676 * queue, at which time it becomes the first queue.
14681 14677 * A one-way scan is natural because of the way UNIX read-ahead
14682 14678 * blocks are allocated.
14683 14679 *
14684 14680 * If we lie after the first request, then we must locate the
14685 14681 * second request list and add ourselves to it.
14686 14682 */
14687 14683 ap = un->un_waitq_headp;
14688 14684 if (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap)) {
14689 14685 while (ap->av_forw != NULL) {
14690 14686 /*
14691 14687 * Look for an "inversion" in the (normally
14692 14688 * ascending) block numbers. This indicates
14693 14689 * the start of the second request list.
14694 14690 */
14695 14691 if (SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) {
14696 14692 /*
14697 14693 * Search the second request list for the
14698 14694 * first request at a larger block number.
14699 14695 * We go before that; however if there is
14700 14696 * no such request, we go at the end.
14701 14697 */
14702 14698 do {
14703 14699 if (SD_GET_BLKNO(bp) <
14704 14700 SD_GET_BLKNO(ap->av_forw)) {
14705 14701 goto insert;
14706 14702 }
14707 14703 ap = ap->av_forw;
14708 14704 } while (ap->av_forw != NULL);
14709 14705 goto insert; /* after last */
14710 14706 }
14711 14707 ap = ap->av_forw;
14712 14708 }
14713 14709
14714 14710 /*
14715 14711 * No inversions... we will go after the last, and
14716 14712 * be the first request in the second request list.
14717 14713 */
14718 14714 goto insert;
14719 14715 }
14720 14716
14721 14717 /*
14722 14718 * Request is at/after the current request...
14723 14719 * sort in the first request list.
14724 14720 */
14725 14721 while (ap->av_forw != NULL) {
14726 14722 /*
14727 14723 * We want to go after the current request (1) if
14728 14724 * there is an inversion after it (i.e. it is the end
14729 14725 * of the first request list), or (2) if the next
14730 14726 * request is a larger block no. than our request.
14731 14727 */
14732 14728 if ((SD_GET_BLKNO(ap->av_forw) < SD_GET_BLKNO(ap)) ||
14733 14729 (SD_GET_BLKNO(bp) < SD_GET_BLKNO(ap->av_forw))) {
14734 14730 goto insert;
14735 14731 }
14736 14732 ap = ap->av_forw;
14737 14733 }
14738 14734
14739 14735 /*
14740 14736 * Neither a second list nor a larger request, therefore
14741 14737 * we go at the end of the first list (which is the same
14742 14738 * as the end of the whole schebang).
14743 14739 */
14744 14740 insert:
14745 14741 bp->av_forw = ap->av_forw;
14746 14742 ap->av_forw = bp;
14747 14743
14748 14744 /*
14749 14745 * If we inserted onto the tail end of the waitq, make sure the
14750 14746 * tail pointer is updated.
14751 14747 */
14752 14748 if (ap == un->un_waitq_tailp) {
14753 14749 un->un_waitq_tailp = bp;
14754 14750 }
14755 14751 }
14756 14752
14757 14753
14758 14754 /*
14759 14755 * Function: sd_start_cmds
14760 14756 *
14761 14757 * Description: Remove and transport cmds from the driver queues.
14762 14758 *
14763 14759 * Arguments: un - pointer to the unit (soft state) struct for the target.
14764 14760 *
14765 14761 * immed_bp - ptr to a buf to be transported immediately. Only
14766 14762 * the immed_bp is transported; bufs on the waitq are not
14767 14763 * processed and the un_retry_bp is not checked. If immed_bp is
14768 14764 * NULL, then normal queue processing is performed.
14769 14765 *
14770 14766 * Context: May be called from kernel thread context, interrupt context,
14771 14767 * or runout callback context. This function may not block or
14772 14768 * call routines that block.
14773 14769 */
14774 14770
14775 14771 static void
14776 14772 sd_start_cmds(struct sd_lun *un, struct buf *immed_bp)
14777 14773 {
14778 14774 struct sd_xbuf *xp;
14779 14775 struct buf *bp;
14780 14776 void (*statp)(kstat_io_t *);
14781 14777 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14782 14778 void (*saved_statp)(kstat_io_t *);
14783 14779 #endif
14784 14780 int rval;
14785 14781 struct sd_fm_internal *sfip = NULL;
14786 14782
14787 14783 ASSERT(un != NULL);
14788 14784 ASSERT(mutex_owned(SD_MUTEX(un)));
14789 14785 ASSERT(un->un_ncmds_in_transport >= 0);
14790 14786 ASSERT(un->un_throttle >= 0);
14791 14787
14792 14788 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: entry\n");
14793 14789
14794 14790 do {
14795 14791 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14796 14792 saved_statp = NULL;
14797 14793 #endif
14798 14794
14799 14795 /*
14800 14796 * If we are syncing or dumping, fail the command to
14801 14797 * avoid recursively calling back into scsi_transport().
14802 14798 * The dump I/O itself uses a separate code path so this
14803 14799 * only prevents non-dump I/O from being sent while dumping.
14804 14800 * File system sync takes place before dumping begins.
14805 14801 * During panic, filesystem I/O is allowed provided
14806 14802 * un_in_callback is <= 1. This is to prevent recursion
14807 14803 * such as sd_start_cmds -> scsi_transport -> sdintr ->
14808 14804 * sd_start_cmds and so on. See panic.c for more information
14809 14805 * about the states the system can be in during panic.
14810 14806 */
14811 14807 if ((un->un_state == SD_STATE_DUMPING) ||
14812 14808 (ddi_in_panic() && (un->un_in_callback > 1))) {
14813 14809 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14814 14810 "sd_start_cmds: panicking\n");
14815 14811 goto exit;
14816 14812 }
14817 14813
14818 14814 if ((bp = immed_bp) != NULL) {
14819 14815 /*
14820 14816 * We have a bp that must be transported immediately.
14821 14817 * It's OK to transport the immed_bp here without doing
14822 14818 * the throttle limit check because the immed_bp is
14823 14819 * always used in a retry/recovery case. This means
14824 14820 * that we know we are not at the throttle limit by
14825 14821 * virtue of the fact that to get here we must have
14826 14822 * already gotten a command back via sdintr(). This also
14827 14823 * relies on (1) the command on un_retry_bp preventing
14828 14824 * further commands from the waitq from being issued;
14829 14825 * and (2) the code in sd_retry_command checking the
14830 14826 * throttle limit before issuing a delayed or immediate
14831 14827 * retry. This holds even if the throttle limit is
14832 14828 * currently ratcheted down from its maximum value.
14833 14829 */
14834 14830 statp = kstat_runq_enter;
14835 14831 if (bp == un->un_retry_bp) {
14836 14832 ASSERT((un->un_retry_statp == NULL) ||
14837 14833 (un->un_retry_statp == kstat_waitq_enter) ||
14838 14834 (un->un_retry_statp ==
14839 14835 kstat_runq_back_to_waitq));
14840 14836 /*
14841 14837 * If the waitq kstat was incremented when
14842 14838 * sd_set_retry_bp() queued this bp for a retry,
14843 14839 * then we must set up statp so that the waitq
14844 14840 * count will get decremented correctly below.
14845 14841 * Also we must clear un->un_retry_statp to
14846 14842 * ensure that we do not act on a stale value
14847 14843 * in this field.
14848 14844 */
14849 14845 if ((un->un_retry_statp == kstat_waitq_enter) ||
14850 14846 (un->un_retry_statp ==
14851 14847 kstat_runq_back_to_waitq)) {
14852 14848 statp = kstat_waitq_to_runq;
14853 14849 }
14854 14850 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14855 14851 saved_statp = un->un_retry_statp;
14856 14852 #endif
14857 14853 un->un_retry_statp = NULL;
14858 14854
14859 14855 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
14860 14856 "sd_start_cmds: un:0x%p: GOT retry_bp:0x%p "
14861 14857 "un_throttle:%d un_ncmds_in_transport:%d\n",
14862 14858 un, un->un_retry_bp, un->un_throttle,
14863 14859 un->un_ncmds_in_transport);
14864 14860 } else {
14865 14861 SD_TRACE(SD_LOG_IO_CORE, un, "sd_start_cmds: "
14866 14862 "processing priority bp:0x%p\n", bp);
14867 14863 }
14868 14864
14869 14865 } else if ((bp = un->un_waitq_headp) != NULL) {
14870 14866 /*
14871 14867 * A command on the waitq is ready to go, but do not
14872 14868 * send it if:
14873 14869 *
14874 14870 * (1) the throttle limit has been reached, or
14875 14871 * (2) a retry is pending, or
14876 14872 * (3) a START_STOP_UNIT callback pending, or
14877 14873 * (4) a callback for a SD_PATH_DIRECT_PRIORITY
14878 14874 * command is pending.
14879 14875 *
14880 14876 * For all of these conditions, IO processing will
14881 14877 * restart after the condition is cleared.
14882 14878 */
14883 14879 if (un->un_ncmds_in_transport >= un->un_throttle) {
14884 14880 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14885 14881 "sd_start_cmds: exiting, "
14886 14882 "throttle limit reached!\n");
14887 14883 goto exit;
14888 14884 }
14889 14885 if (un->un_retry_bp != NULL) {
14890 14886 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14891 14887 "sd_start_cmds: exiting, retry pending!\n");
14892 14888 goto exit;
14893 14889 }
14894 14890 if (un->un_startstop_timeid != NULL) {
14895 14891 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14896 14892 "sd_start_cmds: exiting, "
14897 14893 "START_STOP pending!\n");
14898 14894 goto exit;
14899 14895 }
14900 14896 if (un->un_direct_priority_timeid != NULL) {
14901 14897 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
14902 14898 "sd_start_cmds: exiting, "
14903 14899 "SD_PATH_DIRECT_PRIORITY cmd. pending!\n");
14904 14900 goto exit;
14905 14901 }
14906 14902
14907 14903 /* Dequeue the command */
14908 14904 un->un_waitq_headp = bp->av_forw;
14909 14905 if (un->un_waitq_headp == NULL) {
14910 14906 un->un_waitq_tailp = NULL;
14911 14907 }
14912 14908 bp->av_forw = NULL;
14913 14909 statp = kstat_waitq_to_runq;
14914 14910 SD_TRACE(SD_LOG_IO_CORE, un,
14915 14911 "sd_start_cmds: processing waitq bp:0x%p\n", bp);
14916 14912
14917 14913 } else {
14918 14914 /* No work to do so bail out now */
14919 14915 SD_TRACE(SD_LOG_IO_CORE, un,
14920 14916 "sd_start_cmds: no more work, exiting!\n");
14921 14917 goto exit;
14922 14918 }
14923 14919
14924 14920 /*
14925 14921 * Reset the state to normal. This is the mechanism by which
14926 14922 * the state transitions from either SD_STATE_RWAIT or
14927 14923 * SD_STATE_OFFLINE to SD_STATE_NORMAL.
14928 14924 * If state is SD_STATE_PM_CHANGING then this command is
14929 14925 * part of the device power control and the state must
14930 14926 * not be put back to normal. Doing so would would
14931 14927 * allow new commands to proceed when they shouldn't,
14932 14928 * the device may be going off.
14933 14929 */
14934 14930 if ((un->un_state != SD_STATE_SUSPENDED) &&
14935 14931 (un->un_state != SD_STATE_PM_CHANGING)) {
14936 14932 New_state(un, SD_STATE_NORMAL);
14937 14933 }
14938 14934
14939 14935 xp = SD_GET_XBUF(bp);
14940 14936 ASSERT(xp != NULL);
14941 14937
14942 14938 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
14943 14939 /*
14944 14940 * Allocate the scsi_pkt if we need one, or attach DMA
14945 14941 * resources if we have a scsi_pkt that needs them. The
14946 14942 * latter should only occur for commands that are being
14947 14943 * retried.
14948 14944 */
14949 14945 if ((xp->xb_pktp == NULL) ||
14950 14946 ((xp->xb_pkt_flags & SD_XB_DMA_FREED) != 0)) {
14951 14947 #else
14952 14948 if (xp->xb_pktp == NULL) {
14953 14949 #endif
14954 14950 /*
14955 14951 * There is no scsi_pkt allocated for this buf. Call
14956 14952 * the initpkt function to allocate & init one.
14957 14953 *
14958 14954 * The scsi_init_pkt runout callback functionality is
14959 14955 * implemented as follows:
14960 14956 *
14961 14957 * 1) The initpkt function always calls
14962 14958 * scsi_init_pkt(9F) with sdrunout specified as the
14963 14959 * callback routine.
14964 14960 * 2) A successful packet allocation is initialized and
14965 14961 * the I/O is transported.
14966 14962 * 3) The I/O associated with an allocation resource
14967 14963 * failure is left on its queue to be retried via
14968 14964 * runout or the next I/O.
14969 14965 * 4) The I/O associated with a DMA error is removed
14970 14966 * from the queue and failed with EIO. Processing of
14971 14967 * the transport queues is also halted to be
14972 14968 * restarted via runout or the next I/O.
14973 14969 * 5) The I/O associated with a CDB size or packet
14974 14970 * size error is removed from the queue and failed
14975 14971 * with EIO. Processing of the transport queues is
14976 14972 * continued.
14977 14973 *
14978 14974 * Note: there is no interface for canceling a runout
14979 14975 * callback. To prevent the driver from detaching or
14980 14976 * suspending while a runout is pending the driver
14981 14977 * state is set to SD_STATE_RWAIT
14982 14978 *
14983 14979 * Note: using the scsi_init_pkt callback facility can
14984 14980 * result in an I/O request persisting at the head of
14985 14981 * the list which cannot be satisfied even after
14986 14982 * multiple retries. In the future the driver may
14987 14983 * implement some kind of maximum runout count before
14988 14984 * failing an I/O.
14989 14985 *
14990 14986 * Note: the use of funcp below may seem superfluous,
14991 14987 * but it helps warlock figure out the correct
14992 14988 * initpkt function calls (see [s]sd.wlcmd).
14993 14989 */
14994 14990 struct scsi_pkt *pktp;
14995 14991 int (*funcp)(struct buf *bp, struct scsi_pkt **pktp);
14996 14992
14997 14993 ASSERT(bp != un->un_rqs_bp);
14998 14994
14999 14995 funcp = sd_initpkt_map[xp->xb_chain_iostart];
15000 14996 switch ((*funcp)(bp, &pktp)) {
15001 14997 case SD_PKT_ALLOC_SUCCESS:
15002 14998 xp->xb_pktp = pktp;
15003 14999 SD_TRACE(SD_LOG_IO_CORE, un,
15004 15000 "sd_start_cmd: SD_PKT_ALLOC_SUCCESS 0x%p\n",
15005 15001 pktp);
15006 15002 goto got_pkt;
15007 15003
15008 15004 case SD_PKT_ALLOC_FAILURE:
15009 15005 /*
15010 15006 * Temporary (hopefully) resource depletion.
15011 15007 * Since retries and RQS commands always have a
15012 15008 * scsi_pkt allocated, these cases should never
15013 15009 * get here. So the only cases this needs to
15014 15010 * handle is a bp from the waitq (which we put
15015 15011 * back onto the waitq for sdrunout), or a bp
15016 15012 * sent as an immed_bp (which we just fail).
15017 15013 */
15018 15014 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15019 15015 "sd_start_cmds: SD_PKT_ALLOC_FAILURE\n");
15020 15016
15021 15017 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
15022 15018
15023 15019 if (bp == immed_bp) {
15024 15020 /*
15025 15021 * If SD_XB_DMA_FREED is clear, then
15026 15022 * this is a failure to allocate a
15027 15023 * scsi_pkt, and we must fail the
15028 15024 * command.
15029 15025 */
15030 15026 if ((xp->xb_pkt_flags &
15031 15027 SD_XB_DMA_FREED) == 0) {
15032 15028 break;
15033 15029 }
15034 15030
15035 15031 /*
15036 15032 * If this immediate command is NOT our
15037 15033 * un_retry_bp, then we must fail it.
15038 15034 */
15039 15035 if (bp != un->un_retry_bp) {
15040 15036 break;
15041 15037 }
15042 15038
15043 15039 /*
15044 15040 * We get here if this cmd is our
15045 15041 * un_retry_bp that was DMAFREED, but
15046 15042 * scsi_init_pkt() failed to reallocate
15047 15043 * DMA resources when we attempted to
15048 15044 * retry it. This can happen when an
15049 15045 * mpxio failover is in progress, but
15050 15046 * we don't want to just fail the
15051 15047 * command in this case.
15052 15048 *
15053 15049 * Use timeout(9F) to restart it after
15054 15050 * a 100ms delay. We don't want to
15055 15051 * let sdrunout() restart it, because
15056 15052 * sdrunout() is just supposed to start
15057 15053 * commands that are sitting on the
15058 15054 * wait queue. The un_retry_bp stays
15059 15055 * set until the command completes, but
15060 15056 * sdrunout can be called many times
15061 15057 * before that happens. Since sdrunout
15062 15058 * cannot tell if the un_retry_bp is
15063 15059 * already in the transport, it could
15064 15060 * end up calling scsi_transport() for
15065 15061 * the un_retry_bp multiple times.
15066 15062 *
15067 15063 * Also: don't schedule the callback
15068 15064 * if some other callback is already
15069 15065 * pending.
15070 15066 */
15071 15067 if (un->un_retry_statp == NULL) {
15072 15068 /*
15073 15069 * restore the kstat pointer to
15074 15070 * keep kstat counts coherent
15075 15071 * when we do retry the command.
15076 15072 */
15077 15073 un->un_retry_statp =
15078 15074 saved_statp;
15079 15075 }
15080 15076
15081 15077 if ((un->un_startstop_timeid == NULL) &&
15082 15078 (un->un_retry_timeid == NULL) &&
15083 15079 (un->un_direct_priority_timeid ==
15084 15080 NULL)) {
15085 15081
15086 15082 un->un_retry_timeid =
15087 15083 timeout(
15088 15084 sd_start_retry_command,
15089 15085 un, SD_RESTART_TIMEOUT);
15090 15086 }
15091 15087 goto exit;
15092 15088 }
15093 15089
15094 15090 #else
15095 15091 if (bp == immed_bp) {
15096 15092 break; /* Just fail the command */
15097 15093 }
15098 15094 #endif
15099 15095
15100 15096 /* Add the buf back to the head of the waitq */
15101 15097 bp->av_forw = un->un_waitq_headp;
15102 15098 un->un_waitq_headp = bp;
15103 15099 if (un->un_waitq_tailp == NULL) {
15104 15100 un->un_waitq_tailp = bp;
15105 15101 }
15106 15102 goto exit;
15107 15103
15108 15104 case SD_PKT_ALLOC_FAILURE_NO_DMA:
15109 15105 /*
15110 15106 * HBA DMA resource failure. Fail the command
15111 15107 * and continue processing of the queues.
15112 15108 */
15113 15109 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15114 15110 "sd_start_cmds: "
15115 15111 "SD_PKT_ALLOC_FAILURE_NO_DMA\n");
15116 15112 break;
15117 15113
15118 15114 case SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL:
15119 15115 /*
15120 15116 * Note:x86: Partial DMA mapping not supported
15121 15117 * for USCSI commands, and all the needed DMA
15122 15118 * resources were not allocated.
15123 15119 */
15124 15120 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15125 15121 "sd_start_cmds: "
15126 15122 "SD_PKT_ALLOC_FAILURE_PKT_TOO_SMALL\n");
15127 15123 break;
15128 15124
15129 15125 case SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL:
15130 15126 /*
15131 15127 * Note:x86: Request cannot fit into CDB based
15132 15128 * on lba and len.
15133 15129 */
15134 15130 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15135 15131 "sd_start_cmds: "
15136 15132 "SD_PKT_ALLOC_FAILURE_CDB_TOO_SMALL\n");
15137 15133 break;
15138 15134
15139 15135 default:
15140 15136 /* Should NEVER get here! */
15141 15137 panic("scsi_initpkt error");
15142 15138 /*NOTREACHED*/
15143 15139 }
15144 15140
15145 15141 /*
15146 15142 * Fatal error in allocating a scsi_pkt for this buf.
15147 15143 * Update kstats & return the buf with an error code.
15148 15144 * We must use sd_return_failed_command_no_restart() to
15149 15145 * avoid a recursive call back into sd_start_cmds().
15150 15146 * However this also means that we must keep processing
15151 15147 * the waitq here in order to avoid stalling.
15152 15148 */
15153 15149 if (statp == kstat_waitq_to_runq) {
15154 15150 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp);
15155 15151 }
15156 15152 sd_return_failed_command_no_restart(un, bp, EIO);
15157 15153 if (bp == immed_bp) {
15158 15154 /* immed_bp is gone by now, so clear this */
15159 15155 immed_bp = NULL;
15160 15156 }
15161 15157 continue;
15162 15158 }
15163 15159 got_pkt:
15164 15160 if (bp == immed_bp) {
15165 15161 /* goto the head of the class.... */
15166 15162 xp->xb_pktp->pkt_flags |= FLAG_HEAD;
15167 15163 }
15168 15164
15169 15165 un->un_ncmds_in_transport++;
15170 15166 SD_UPDATE_KSTATS(un, statp, bp);
15171 15167
15172 15168 /*
15173 15169 * Call scsi_transport() to send the command to the target.
15174 15170 * According to SCSA architecture, we must drop the mutex here
15175 15171 * before calling scsi_transport() in order to avoid deadlock.
15176 15172 * Note that the scsi_pkt's completion routine can be executed
15177 15173 * (from interrupt context) even before the call to
15178 15174 * scsi_transport() returns.
15179 15175 */
15180 15176 SD_TRACE(SD_LOG_IO_CORE, un,
15181 15177 "sd_start_cmds: calling scsi_transport()\n");
15182 15178 DTRACE_PROBE1(scsi__transport__dispatch, struct buf *, bp);
15183 15179
15184 15180 mutex_exit(SD_MUTEX(un));
15185 15181 rval = scsi_transport(xp->xb_pktp);
15186 15182 mutex_enter(SD_MUTEX(un));
15187 15183
15188 15184 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15189 15185 "sd_start_cmds: scsi_transport() returned %d\n", rval);
15190 15186
15191 15187 switch (rval) {
15192 15188 case TRAN_ACCEPT:
15193 15189 /* Clear this with every pkt accepted by the HBA */
15194 15190 un->un_tran_fatal_count = 0;
15195 15191 break; /* Success; try the next cmd (if any) */
15196 15192
15197 15193 case TRAN_BUSY:
15198 15194 un->un_ncmds_in_transport--;
15199 15195 ASSERT(un->un_ncmds_in_transport >= 0);
15200 15196
15201 15197 /*
15202 15198 * Don't retry request sense, the sense data
15203 15199 * is lost when another request is sent.
15204 15200 * Free up the rqs buf and retry
15205 15201 * the original failed cmd. Update kstat.
15206 15202 */
15207 15203 if (bp == un->un_rqs_bp) {
15208 15204 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
15209 15205 bp = sd_mark_rqs_idle(un, xp);
15210 15206 sd_retry_command(un, bp, SD_RETRIES_STANDARD,
15211 15207 NULL, NULL, EIO, un->un_busy_timeout / 500,
15212 15208 kstat_waitq_enter);
15213 15209 goto exit;
15214 15210 }
15215 15211
15216 15212 #if defined(__i386) || defined(__amd64) /* DMAFREE for x86 only */
15217 15213 /*
15218 15214 * Free the DMA resources for the scsi_pkt. This will
15219 15215 * allow mpxio to select another path the next time
15220 15216 * we call scsi_transport() with this scsi_pkt.
15221 15217 * See sdintr() for the rationalization behind this.
15222 15218 */
15223 15219 if ((un->un_f_is_fibre == TRUE) &&
15224 15220 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
15225 15221 ((xp->xb_pktp->pkt_flags & FLAG_SENSING) == 0)) {
15226 15222 scsi_dmafree(xp->xb_pktp);
15227 15223 xp->xb_pkt_flags |= SD_XB_DMA_FREED;
15228 15224 }
15229 15225 #endif
15230 15226
15231 15227 if (SD_IS_DIRECT_PRIORITY(SD_GET_XBUF(bp))) {
15232 15228 /*
15233 15229 * Commands that are SD_PATH_DIRECT_PRIORITY
15234 15230 * are for error recovery situations. These do
15235 15231 * not use the normal command waitq, so if they
15236 15232 * get a TRAN_BUSY we cannot put them back onto
15237 15233 * the waitq for later retry. One possible
15238 15234 * problem is that there could already be some
15239 15235 * other command on un_retry_bp that is waiting
15240 15236 * for this one to complete, so we would be
15241 15237 * deadlocked if we put this command back onto
15242 15238 * the waitq for later retry (since un_retry_bp
15243 15239 * must complete before the driver gets back to
15244 15240 * commands on the waitq).
15245 15241 *
15246 15242 * To avoid deadlock we must schedule a callback
15247 15243 * that will restart this command after a set
15248 15244 * interval. This should keep retrying for as
15249 15245 * long as the underlying transport keeps
15250 15246 * returning TRAN_BUSY (just like for other
15251 15247 * commands). Use the same timeout interval as
15252 15248 * for the ordinary TRAN_BUSY retry.
15253 15249 */
15254 15250 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15255 15251 "sd_start_cmds: scsi_transport() returned "
15256 15252 "TRAN_BUSY for DIRECT_PRIORITY cmd!\n");
15257 15253
15258 15254 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
15259 15255 un->un_direct_priority_timeid =
15260 15256 timeout(sd_start_direct_priority_command,
15261 15257 bp, un->un_busy_timeout / 500);
15262 15258
15263 15259 goto exit;
15264 15260 }
15265 15261
15266 15262 /*
15267 15263 * For TRAN_BUSY, we want to reduce the throttle value,
15268 15264 * unless we are retrying a command.
15269 15265 */
15270 15266 if (bp != un->un_retry_bp) {
15271 15267 sd_reduce_throttle(un, SD_THROTTLE_TRAN_BUSY);
15272 15268 }
15273 15269
15274 15270 /*
15275 15271 * Set up the bp to be tried again 10 ms later.
15276 15272 * Note:x86: Is there a timeout value in the sd_lun
15277 15273 * for this condition?
15278 15274 */
15279 15275 sd_set_retry_bp(un, bp, un->un_busy_timeout / 500,
15280 15276 kstat_runq_back_to_waitq);
15281 15277 goto exit;
15282 15278
15283 15279 case TRAN_FATAL_ERROR:
15284 15280 un->un_tran_fatal_count++;
15285 15281 /* FALLTHRU */
15286 15282
15287 15283 case TRAN_BADPKT:
15288 15284 default:
15289 15285 un->un_ncmds_in_transport--;
15290 15286 ASSERT(un->un_ncmds_in_transport >= 0);
15291 15287
15292 15288 /*
15293 15289 * If this is our REQUEST SENSE command with a
15294 15290 * transport error, we must get back the pointers
15295 15291 * to the original buf, and mark the REQUEST
15296 15292 * SENSE command as "available".
15297 15293 */
15298 15294 if (bp == un->un_rqs_bp) {
15299 15295 bp = sd_mark_rqs_idle(un, xp);
15300 15296 xp = SD_GET_XBUF(bp);
15301 15297 } else {
15302 15298 /*
15303 15299 * Legacy behavior: do not update transport
15304 15300 * error count for request sense commands.
15305 15301 */
15306 15302 SD_UPDATE_ERRSTATS(un, sd_transerrs);
15307 15303 }
15308 15304
15309 15305 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
15310 15306 sd_print_transport_rejected_message(un, xp, rval);
15311 15307
15312 15308 /*
15313 15309 * This command will be terminated by SD driver due
15314 15310 * to a fatal transport error. We should post
15315 15311 * ereport.io.scsi.cmd.disk.tran with driver-assessment
15316 15312 * of "fail" for any command to indicate this
15317 15313 * situation.
15318 15314 */
15319 15315 if (xp->xb_ena > 0) {
15320 15316 ASSERT(un->un_fm_private != NULL);
15321 15317 sfip = un->un_fm_private;
15322 15318 sfip->fm_ssc.ssc_flags |= SSC_FLAGS_TRAN_ABORT;
15323 15319 sd_ssc_extract_info(&sfip->fm_ssc, un,
15324 15320 xp->xb_pktp, bp, xp);
15325 15321 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL);
15326 15322 }
15327 15323
15328 15324 /*
15329 15325 * We must use sd_return_failed_command_no_restart() to
15330 15326 * avoid a recursive call back into sd_start_cmds().
15331 15327 * However this also means that we must keep processing
15332 15328 * the waitq here in order to avoid stalling.
15333 15329 */
15334 15330 sd_return_failed_command_no_restart(un, bp, EIO);
15335 15331
15336 15332 /*
15337 15333 * Notify any threads waiting in sd_ddi_suspend() that
15338 15334 * a command completion has occurred.
15339 15335 */
15340 15336 if (un->un_state == SD_STATE_SUSPENDED) {
15341 15337 cv_broadcast(&un->un_disk_busy_cv);
15342 15338 }
15343 15339
15344 15340 if (bp == immed_bp) {
15345 15341 /* immed_bp is gone by now, so clear this */
15346 15342 immed_bp = NULL;
15347 15343 }
15348 15344 break;
15349 15345 }
15350 15346
15351 15347 } while (immed_bp == NULL);
15352 15348
15353 15349 exit:
15354 15350 ASSERT(mutex_owned(SD_MUTEX(un)));
15355 15351 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_start_cmds: exit\n");
15356 15352 }
15357 15353
15358 15354
15359 15355 /*
15360 15356 * Function: sd_return_command
15361 15357 *
15362 15358 * Description: Returns a command to its originator (with or without an
15363 15359 * error). Also starts commands waiting to be transported
15364 15360 * to the target.
15365 15361 *
15366 15362 * Context: May be called from interrupt, kernel, or timeout context
15367 15363 */
15368 15364
15369 15365 static void
15370 15366 sd_return_command(struct sd_lun *un, struct buf *bp)
15371 15367 {
15372 15368 struct sd_xbuf *xp;
15373 15369 struct scsi_pkt *pktp;
15374 15370 struct sd_fm_internal *sfip;
15375 15371
15376 15372 ASSERT(bp != NULL);
15377 15373 ASSERT(un != NULL);
15378 15374 ASSERT(mutex_owned(SD_MUTEX(un)));
15379 15375 ASSERT(bp != un->un_rqs_bp);
15380 15376 xp = SD_GET_XBUF(bp);
15381 15377 ASSERT(xp != NULL);
15382 15378
15383 15379 pktp = SD_GET_PKTP(bp);
15384 15380 sfip = (struct sd_fm_internal *)un->un_fm_private;
15385 15381 ASSERT(sfip != NULL);
15386 15382
15387 15383 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: entry\n");
15388 15384
15389 15385 /*
15390 15386 * Note: check for the "sdrestart failed" case.
15391 15387 */
15392 15388 if ((un->un_partial_dma_supported == 1) &&
15393 15389 ((xp->xb_pkt_flags & SD_XB_USCSICMD) != SD_XB_USCSICMD) &&
15394 15390 (geterror(bp) == 0) && (xp->xb_dma_resid != 0) &&
15395 15391 (xp->xb_pktp->pkt_resid == 0)) {
15396 15392
15397 15393 if (sd_setup_next_xfer(un, bp, pktp, xp) != 0) {
15398 15394 /*
15399 15395 * Successfully set up next portion of cmd
15400 15396 * transfer, try sending it
15401 15397 */
15402 15398 sd_retry_command(un, bp, SD_RETRIES_NOCHECK,
15403 15399 NULL, NULL, 0, (clock_t)0, NULL);
15404 15400 sd_start_cmds(un, NULL);
15405 15401 return; /* Note:x86: need a return here? */
15406 15402 }
15407 15403 }
15408 15404
15409 15405 /*
15410 15406 * If this is the failfast bp, clear it from un_failfast_bp. This
15411 15407 * can happen if upon being re-tried the failfast bp either
15412 15408 * succeeded or encountered another error (possibly even a different
15413 15409 * error than the one that precipitated the failfast state, but in
15414 15410 * that case it would have had to exhaust retries as well). Regardless,
15415 15411 * this should not occur whenever the instance is in the active
15416 15412 * failfast state.
15417 15413 */
15418 15414 if (bp == un->un_failfast_bp) {
15419 15415 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE);
15420 15416 un->un_failfast_bp = NULL;
15421 15417 }
15422 15418
15423 15419 /*
15424 15420 * Clear the failfast state upon successful completion of ANY cmd.
15425 15421 */
15426 15422 if (bp->b_error == 0) {
15427 15423 un->un_failfast_state = SD_FAILFAST_INACTIVE;
15428 15424 /*
15429 15425 * If this is a successful command, but used to be retried,
15430 15426 * we will take it as a recovered command and post an
15431 15427 * ereport with driver-assessment of "recovered".
15432 15428 */
15433 15429 if (xp->xb_ena > 0) {
15434 15430 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp);
15435 15431 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RECOVERY);
15436 15432 }
15437 15433 } else {
15438 15434 /*
15439 15435 * If this is a failed non-USCSI command we will post an
15440 15436 * ereport with driver-assessment set accordingly("fail" or
15441 15437 * "fatal").
15442 15438 */
15443 15439 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
15444 15440 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp);
15445 15441 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_FATAL);
15446 15442 }
15447 15443 }
15448 15444
15449 15445 /*
15450 15446 * This is used if the command was retried one or more times. Show that
15451 15447 * we are done with it, and allow processing of the waitq to resume.
15452 15448 */
15453 15449 if (bp == un->un_retry_bp) {
15454 15450 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15455 15451 "sd_return_command: un:0x%p: "
15456 15452 "RETURNING retry_bp:0x%p\n", un, un->un_retry_bp);
15457 15453 un->un_retry_bp = NULL;
15458 15454 un->un_retry_statp = NULL;
15459 15455 }
15460 15456
15461 15457 SD_UPDATE_RDWR_STATS(un, bp);
15462 15458 SD_UPDATE_PARTITION_STATS(un, bp);
15463 15459
15464 15460 switch (un->un_state) {
15465 15461 case SD_STATE_SUSPENDED:
15466 15462 /*
15467 15463 * Notify any threads waiting in sd_ddi_suspend() that
15468 15464 * a command completion has occurred.
15469 15465 */
15470 15466 cv_broadcast(&un->un_disk_busy_cv);
15471 15467 break;
15472 15468 default:
15473 15469 sd_start_cmds(un, NULL);
15474 15470 break;
15475 15471 }
15476 15472
15477 15473 /* Return this command up the iodone chain to its originator. */
15478 15474 mutex_exit(SD_MUTEX(un));
15479 15475
15480 15476 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp);
15481 15477 xp->xb_pktp = NULL;
15482 15478
15483 15479 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp);
15484 15480
15485 15481 ASSERT(!mutex_owned(SD_MUTEX(un)));
15486 15482 mutex_enter(SD_MUTEX(un));
15487 15483
15488 15484 SD_TRACE(SD_LOG_IO_CORE, un, "sd_return_command: exit\n");
15489 15485 }
15490 15486
15491 15487
15492 15488 /*
15493 15489 * Function: sd_return_failed_command
15494 15490 *
15495 15491 * Description: Command completion when an error occurred.
15496 15492 *
15497 15493 * Context: May be called from interrupt context
15498 15494 */
15499 15495
15500 15496 static void
15501 15497 sd_return_failed_command(struct sd_lun *un, struct buf *bp, int errcode)
15502 15498 {
15503 15499 ASSERT(bp != NULL);
15504 15500 ASSERT(un != NULL);
15505 15501 ASSERT(mutex_owned(SD_MUTEX(un)));
15506 15502
15507 15503 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15508 15504 "sd_return_failed_command: entry\n");
15509 15505
15510 15506 /*
15511 15507 * b_resid could already be nonzero due to a partial data
15512 15508 * transfer, so do not change it here.
15513 15509 */
15514 15510 SD_BIOERROR(bp, errcode);
15515 15511
15516 15512 sd_return_command(un, bp);
15517 15513 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15518 15514 "sd_return_failed_command: exit\n");
15519 15515 }
15520 15516
15521 15517
15522 15518 /*
15523 15519 * Function: sd_return_failed_command_no_restart
15524 15520 *
15525 15521 * Description: Same as sd_return_failed_command, but ensures that no
15526 15522 * call back into sd_start_cmds will be issued.
15527 15523 *
15528 15524 * Context: May be called from interrupt context
15529 15525 */
15530 15526
15531 15527 static void
15532 15528 sd_return_failed_command_no_restart(struct sd_lun *un, struct buf *bp,
15533 15529 int errcode)
15534 15530 {
15535 15531 struct sd_xbuf *xp;
15536 15532
15537 15533 ASSERT(bp != NULL);
15538 15534 ASSERT(un != NULL);
15539 15535 ASSERT(mutex_owned(SD_MUTEX(un)));
15540 15536 xp = SD_GET_XBUF(bp);
15541 15537 ASSERT(xp != NULL);
15542 15538 ASSERT(errcode != 0);
15543 15539
15544 15540 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15545 15541 "sd_return_failed_command_no_restart: entry\n");
15546 15542
15547 15543 /*
15548 15544 * b_resid could already be nonzero due to a partial data
15549 15545 * transfer, so do not change it here.
15550 15546 */
15551 15547 SD_BIOERROR(bp, errcode);
15552 15548
15553 15549 /*
15554 15550 * If this is the failfast bp, clear it. This can happen if the
15555 15551 * failfast bp encounterd a fatal error when we attempted to
15556 15552 * re-try it (such as a scsi_transport(9F) failure). However
15557 15553 * we should NOT be in an active failfast state if the failfast
15558 15554 * bp is not NULL.
15559 15555 */
15560 15556 if (bp == un->un_failfast_bp) {
15561 15557 ASSERT(un->un_failfast_state == SD_FAILFAST_INACTIVE);
15562 15558 un->un_failfast_bp = NULL;
15563 15559 }
15564 15560
15565 15561 if (bp == un->un_retry_bp) {
15566 15562 /*
15567 15563 * This command was retried one or more times. Show that we are
15568 15564 * done with it, and allow processing of the waitq to resume.
15569 15565 */
15570 15566 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15571 15567 "sd_return_failed_command_no_restart: "
15572 15568 " un:0x%p: RETURNING retry_bp:0x%p\n", un, un->un_retry_bp);
15573 15569 un->un_retry_bp = NULL;
15574 15570 un->un_retry_statp = NULL;
15575 15571 }
15576 15572
15577 15573 SD_UPDATE_RDWR_STATS(un, bp);
15578 15574 SD_UPDATE_PARTITION_STATS(un, bp);
15579 15575
15580 15576 mutex_exit(SD_MUTEX(un));
15581 15577
15582 15578 if (xp->xb_pktp != NULL) {
15583 15579 (*(sd_destroypkt_map[xp->xb_chain_iodone]))(bp);
15584 15580 xp->xb_pktp = NULL;
15585 15581 }
15586 15582
15587 15583 SD_BEGIN_IODONE(xp->xb_chain_iodone, un, bp);
15588 15584
15589 15585 mutex_enter(SD_MUTEX(un));
15590 15586
15591 15587 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15592 15588 "sd_return_failed_command_no_restart: exit\n");
15593 15589 }
15594 15590
15595 15591
15596 15592 /*
15597 15593 * Function: sd_retry_command
15598 15594 *
15599 15595 * Description: queue up a command for retry, or (optionally) fail it
15600 15596 * if retry counts are exhausted.
15601 15597 *
15602 15598 * Arguments: un - Pointer to the sd_lun struct for the target.
15603 15599 *
15604 15600 * bp - Pointer to the buf for the command to be retried.
15605 15601 *
15606 15602 * retry_check_flag - Flag to see which (if any) of the retry
15607 15603 * counts should be decremented/checked. If the indicated
15608 15604 * retry count is exhausted, then the command will not be
15609 15605 * retried; it will be failed instead. This should use a
15610 15606 * value equal to one of the following:
15611 15607 *
15612 15608 * SD_RETRIES_NOCHECK
15613 15609 * SD_RESD_RETRIES_STANDARD
15614 15610 * SD_RETRIES_VICTIM
15615 15611 *
15616 15612 * Optionally may be bitwise-OR'ed with SD_RETRIES_ISOLATE
15617 15613 * if the check should be made to see of FLAG_ISOLATE is set
15618 15614 * in the pkt. If FLAG_ISOLATE is set, then the command is
15619 15615 * not retried, it is simply failed.
15620 15616 *
15621 15617 * user_funcp - Ptr to function to call before dispatching the
15622 15618 * command. May be NULL if no action needs to be performed.
15623 15619 * (Primarily intended for printing messages.)
15624 15620 *
15625 15621 * user_arg - Optional argument to be passed along to
15626 15622 * the user_funcp call.
15627 15623 *
15628 15624 * failure_code - errno return code to set in the bp if the
15629 15625 * command is going to be failed.
15630 15626 *
15631 15627 * retry_delay - Retry delay interval in (clock_t) units. May
15632 15628 * be zero which indicates that the retry should be retried
15633 15629 * immediately (ie, without an intervening delay).
15634 15630 *
15635 15631 * statp - Ptr to kstat function to be updated if the command
15636 15632 * is queued for a delayed retry. May be NULL if no kstat
15637 15633 * update is desired.
15638 15634 *
15639 15635 * Context: May be called from interrupt context.
15640 15636 */
15641 15637
15642 15638 static void
15643 15639 sd_retry_command(struct sd_lun *un, struct buf *bp, int retry_check_flag,
15644 15640 void (*user_funcp)(struct sd_lun *un, struct buf *bp, void *argp, int
15645 15641 code), void *user_arg, int failure_code, clock_t retry_delay,
15646 15642 void (*statp)(kstat_io_t *))
15647 15643 {
15648 15644 struct sd_xbuf *xp;
15649 15645 struct scsi_pkt *pktp;
15650 15646 struct sd_fm_internal *sfip;
15651 15647
15652 15648 ASSERT(un != NULL);
15653 15649 ASSERT(mutex_owned(SD_MUTEX(un)));
15654 15650 ASSERT(bp != NULL);
15655 15651 xp = SD_GET_XBUF(bp);
15656 15652 ASSERT(xp != NULL);
15657 15653 pktp = SD_GET_PKTP(bp);
15658 15654 ASSERT(pktp != NULL);
15659 15655
15660 15656 sfip = (struct sd_fm_internal *)un->un_fm_private;
15661 15657 ASSERT(sfip != NULL);
15662 15658
15663 15659 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
15664 15660 "sd_retry_command: entry: bp:0x%p xp:0x%p\n", bp, xp);
15665 15661
15666 15662 /*
15667 15663 * If we are syncing or dumping, fail the command to avoid
15668 15664 * recursively calling back into scsi_transport().
15669 15665 */
15670 15666 if (ddi_in_panic()) {
15671 15667 goto fail_command_no_log;
15672 15668 }
15673 15669
15674 15670 /*
15675 15671 * We should never be be retrying a command with FLAG_DIAGNOSE set, so
15676 15672 * log an error and fail the command.
15677 15673 */
15678 15674 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) {
15679 15675 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE,
15680 15676 "ERROR, retrying FLAG_DIAGNOSE command.\n");
15681 15677 sd_dump_memory(un, SD_LOG_IO, "CDB",
15682 15678 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX);
15683 15679 sd_dump_memory(un, SD_LOG_IO, "Sense Data",
15684 15680 (uchar_t *)xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX);
15685 15681 goto fail_command;
15686 15682 }
15687 15683
15688 15684 /*
15689 15685 * If we are suspended, then put the command onto head of the
15690 15686 * wait queue since we don't want to start more commands, and
15691 15687 * clear the un_retry_bp. Next time when we are resumed, will
15692 15688 * handle the command in the wait queue.
15693 15689 */
15694 15690 switch (un->un_state) {
15695 15691 case SD_STATE_SUSPENDED:
15696 15692 case SD_STATE_DUMPING:
15697 15693 bp->av_forw = un->un_waitq_headp;
15698 15694 un->un_waitq_headp = bp;
15699 15695 if (un->un_waitq_tailp == NULL) {
15700 15696 un->un_waitq_tailp = bp;
15701 15697 }
15702 15698 if (bp == un->un_retry_bp) {
15703 15699 un->un_retry_bp = NULL;
15704 15700 un->un_retry_statp = NULL;
15705 15701 }
15706 15702 SD_UPDATE_KSTATS(un, kstat_waitq_enter, bp);
15707 15703 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: "
15708 15704 "exiting; cmd bp:0x%p requeued for SUSPEND/DUMP\n", bp);
15709 15705 return;
15710 15706 default:
15711 15707 break;
15712 15708 }
15713 15709
15714 15710 /*
15715 15711 * If the caller wants us to check FLAG_ISOLATE, then see if that
15716 15712 * is set; if it is then we do not want to retry the command.
15717 15713 * Normally, FLAG_ISOLATE is only used with USCSI cmds.
15718 15714 */
15719 15715 if ((retry_check_flag & SD_RETRIES_ISOLATE) != 0) {
15720 15716 if ((pktp->pkt_flags & FLAG_ISOLATE) != 0) {
15721 15717 goto fail_command;
15722 15718 }
15723 15719 }
15724 15720
15725 15721
15726 15722 /*
15727 15723 * If SD_RETRIES_FAILFAST is set, it indicates that either a
15728 15724 * command timeout or a selection timeout has occurred. This means
15729 15725 * that we were unable to establish an kind of communication with
15730 15726 * the target, and subsequent retries and/or commands are likely
15731 15727 * to encounter similar results and take a long time to complete.
15732 15728 *
15733 15729 * If this is a failfast error condition, we need to update the
15734 15730 * failfast state, even if this bp does not have B_FAILFAST set.
15735 15731 */
15736 15732 if (retry_check_flag & SD_RETRIES_FAILFAST) {
15737 15733 if (un->un_failfast_state == SD_FAILFAST_ACTIVE) {
15738 15734 ASSERT(un->un_failfast_bp == NULL);
15739 15735 /*
15740 15736 * If we are already in the active failfast state, and
15741 15737 * another failfast error condition has been detected,
15742 15738 * then fail this command if it has B_FAILFAST set.
15743 15739 * If B_FAILFAST is clear, then maintain the legacy
15744 15740 * behavior of retrying heroically, even tho this will
15745 15741 * take a lot more time to fail the command.
15746 15742 */
15747 15743 if (bp->b_flags & B_FAILFAST) {
15748 15744 goto fail_command;
15749 15745 }
15750 15746 } else {
15751 15747 /*
15752 15748 * We're not in the active failfast state, but we
15753 15749 * have a failfast error condition, so we must begin
15754 15750 * transition to the next state. We do this regardless
15755 15751 * of whether or not this bp has B_FAILFAST set.
15756 15752 */
15757 15753 if (un->un_failfast_bp == NULL) {
15758 15754 /*
15759 15755 * This is the first bp to meet a failfast
15760 15756 * condition so save it on un_failfast_bp &
15761 15757 * do normal retry processing. Do not enter
15762 15758 * active failfast state yet. This marks
15763 15759 * entry into the "failfast pending" state.
15764 15760 */
15765 15761 un->un_failfast_bp = bp;
15766 15762
15767 15763 } else if (un->un_failfast_bp == bp) {
15768 15764 /*
15769 15765 * This is the second time *this* bp has
15770 15766 * encountered a failfast error condition,
15771 15767 * so enter active failfast state & flush
15772 15768 * queues as appropriate.
15773 15769 */
15774 15770 un->un_failfast_state = SD_FAILFAST_ACTIVE;
15775 15771 un->un_failfast_bp = NULL;
15776 15772 sd_failfast_flushq(un);
15777 15773
15778 15774 /*
15779 15775 * Fail this bp now if B_FAILFAST set;
15780 15776 * otherwise continue with retries. (It would
15781 15777 * be pretty ironic if this bp succeeded on a
15782 15778 * subsequent retry after we just flushed all
15783 15779 * the queues).
15784 15780 */
15785 15781 if (bp->b_flags & B_FAILFAST) {
15786 15782 goto fail_command;
15787 15783 }
15788 15784
15789 15785 #if !defined(lint) && !defined(__lint)
15790 15786 } else {
15791 15787 /*
15792 15788 * If neither of the preceeding conditionals
15793 15789 * was true, it means that there is some
15794 15790 * *other* bp that has met an inital failfast
15795 15791 * condition and is currently either being
15796 15792 * retried or is waiting to be retried. In
15797 15793 * that case we should perform normal retry
15798 15794 * processing on *this* bp, since there is a
15799 15795 * chance that the current failfast condition
15800 15796 * is transient and recoverable. If that does
15801 15797 * not turn out to be the case, then retries
15802 15798 * will be cleared when the wait queue is
15803 15799 * flushed anyway.
15804 15800 */
15805 15801 #endif
15806 15802 }
15807 15803 }
15808 15804 } else {
15809 15805 /*
15810 15806 * SD_RETRIES_FAILFAST is clear, which indicates that we
15811 15807 * likely were able to at least establish some level of
15812 15808 * communication with the target and subsequent commands
15813 15809 * and/or retries are likely to get through to the target,
15814 15810 * In this case we want to be aggressive about clearing
15815 15811 * the failfast state. Note that this does not affect
15816 15812 * the "failfast pending" condition.
15817 15813 */
15818 15814 un->un_failfast_state = SD_FAILFAST_INACTIVE;
15819 15815 }
15820 15816
15821 15817
15822 15818 /*
15823 15819 * Check the specified retry count to see if we can still do
15824 15820 * any retries with this pkt before we should fail it.
15825 15821 */
15826 15822 switch (retry_check_flag & SD_RETRIES_MASK) {
15827 15823 case SD_RETRIES_VICTIM:
15828 15824 /*
15829 15825 * Check the victim retry count. If exhausted, then fall
15830 15826 * thru & check against the standard retry count.
15831 15827 */
15832 15828 if (xp->xb_victim_retry_count < un->un_victim_retry_count) {
15833 15829 /* Increment count & proceed with the retry */
15834 15830 xp->xb_victim_retry_count++;
15835 15831 break;
15836 15832 }
15837 15833 /* Victim retries exhausted, fall back to std. retries... */
15838 15834 /* FALLTHRU */
15839 15835
15840 15836 case SD_RETRIES_STANDARD:
15841 15837 if (xp->xb_retry_count >= un->un_retry_count) {
15842 15838 /* Retries exhausted, fail the command */
15843 15839 SD_TRACE(SD_LOG_IO_CORE, un,
15844 15840 "sd_retry_command: retries exhausted!\n");
15845 15841 /*
15846 15842 * update b_resid for failed SCMD_READ & SCMD_WRITE
15847 15843 * commands with nonzero pkt_resid.
15848 15844 */
15849 15845 if ((pktp->pkt_reason == CMD_CMPLT) &&
15850 15846 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD) &&
15851 15847 (pktp->pkt_resid != 0)) {
15852 15848 uchar_t op = SD_GET_PKT_OPCODE(pktp) & 0x1F;
15853 15849 if ((op == SCMD_READ) || (op == SCMD_WRITE)) {
15854 15850 SD_UPDATE_B_RESID(bp, pktp);
15855 15851 }
15856 15852 }
15857 15853 goto fail_command;
15858 15854 }
15859 15855 xp->xb_retry_count++;
15860 15856 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15861 15857 "sd_retry_command: retry count:%d\n", xp->xb_retry_count);
15862 15858 break;
15863 15859
15864 15860 case SD_RETRIES_UA:
15865 15861 if (xp->xb_ua_retry_count >= sd_ua_retry_count) {
15866 15862 /* Retries exhausted, fail the command */
15867 15863 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
15868 15864 "Unit Attention retries exhausted. "
15869 15865 "Check the target.\n");
15870 15866 goto fail_command;
15871 15867 }
15872 15868 xp->xb_ua_retry_count++;
15873 15869 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15874 15870 "sd_retry_command: retry count:%d\n",
15875 15871 xp->xb_ua_retry_count);
15876 15872 break;
15877 15873
15878 15874 case SD_RETRIES_BUSY:
15879 15875 if (xp->xb_retry_count >= un->un_busy_retry_count) {
15880 15876 /* Retries exhausted, fail the command */
15881 15877 SD_TRACE(SD_LOG_IO_CORE, un,
15882 15878 "sd_retry_command: retries exhausted!\n");
15883 15879 goto fail_command;
15884 15880 }
15885 15881 xp->xb_retry_count++;
15886 15882 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15887 15883 "sd_retry_command: retry count:%d\n", xp->xb_retry_count);
15888 15884 break;
15889 15885
15890 15886 case SD_RETRIES_NOCHECK:
15891 15887 default:
15892 15888 /* No retry count to check. Just proceed with the retry */
15893 15889 break;
15894 15890 }
15895 15891
15896 15892 xp->xb_pktp->pkt_flags |= FLAG_HEAD;
15897 15893
15898 15894 /*
15899 15895 * If this is a non-USCSI command being retried
15900 15896 * during execution last time, we should post an ereport with
15901 15897 * driver-assessment of the value "retry".
15902 15898 * For partial DMA, request sense and STATUS_QFULL, there are no
15903 15899 * hardware errors, we bypass ereport posting.
15904 15900 */
15905 15901 if (failure_code != 0) {
15906 15902 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
15907 15903 sd_ssc_extract_info(&sfip->fm_ssc, un, pktp, bp, xp);
15908 15904 sd_ssc_post(&sfip->fm_ssc, SD_FM_DRV_RETRY);
15909 15905 }
15910 15906 }
15911 15907
15912 15908 /*
15913 15909 * If we were given a zero timeout, we must attempt to retry the
15914 15910 * command immediately (ie, without a delay).
15915 15911 */
15916 15912 if (retry_delay == 0) {
15917 15913 /*
15918 15914 * Check some limiting conditions to see if we can actually
15919 15915 * do the immediate retry. If we cannot, then we must
15920 15916 * fall back to queueing up a delayed retry.
15921 15917 */
15922 15918 if (un->un_ncmds_in_transport >= un->un_throttle) {
15923 15919 /*
15924 15920 * We are at the throttle limit for the target,
15925 15921 * fall back to delayed retry.
15926 15922 */
15927 15923 retry_delay = un->un_busy_timeout;
15928 15924 statp = kstat_waitq_enter;
15929 15925 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15930 15926 "sd_retry_command: immed. retry hit "
15931 15927 "throttle!\n");
15932 15928 } else {
15933 15929 /*
15934 15930 * We're clear to proceed with the immediate retry.
15935 15931 * First call the user-provided function (if any)
15936 15932 */
15937 15933 if (user_funcp != NULL) {
15938 15934 (*user_funcp)(un, bp, user_arg,
15939 15935 SD_IMMEDIATE_RETRY_ISSUED);
15940 15936 #ifdef __lock_lint
15941 15937 sd_print_incomplete_msg(un, bp, user_arg,
15942 15938 SD_IMMEDIATE_RETRY_ISSUED);
15943 15939 sd_print_cmd_incomplete_msg(un, bp, user_arg,
15944 15940 SD_IMMEDIATE_RETRY_ISSUED);
15945 15941 sd_print_sense_failed_msg(un, bp, user_arg,
15946 15942 SD_IMMEDIATE_RETRY_ISSUED);
15947 15943 #endif
15948 15944 }
15949 15945
15950 15946 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15951 15947 "sd_retry_command: issuing immediate retry\n");
15952 15948
15953 15949 /*
15954 15950 * Call sd_start_cmds() to transport the command to
15955 15951 * the target.
15956 15952 */
15957 15953 sd_start_cmds(un, bp);
15958 15954
15959 15955 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15960 15956 "sd_retry_command exit\n");
15961 15957 return;
15962 15958 }
15963 15959 }
15964 15960
15965 15961 /*
15966 15962 * Set up to retry the command after a delay.
15967 15963 * First call the user-provided function (if any)
15968 15964 */
15969 15965 if (user_funcp != NULL) {
15970 15966 (*user_funcp)(un, bp, user_arg, SD_DELAYED_RETRY_ISSUED);
15971 15967 }
15972 15968
15973 15969 sd_set_retry_bp(un, bp, retry_delay, statp);
15974 15970
15975 15971 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n");
15976 15972 return;
15977 15973
15978 15974 fail_command:
15979 15975
15980 15976 if (user_funcp != NULL) {
15981 15977 (*user_funcp)(un, bp, user_arg, SD_NO_RETRY_ISSUED);
15982 15978 }
15983 15979
15984 15980 fail_command_no_log:
15985 15981
15986 15982 SD_INFO(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
15987 15983 "sd_retry_command: returning failed command\n");
15988 15984
15989 15985 sd_return_failed_command(un, bp, failure_code);
15990 15986
15991 15987 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_retry_command: exit\n");
15992 15988 }
15993 15989
15994 15990
15995 15991 /*
15996 15992 * Function: sd_set_retry_bp
15997 15993 *
15998 15994 * Description: Set up the given bp for retry.
15999 15995 *
16000 15996 * Arguments: un - ptr to associated softstate
16001 15997 * bp - ptr to buf(9S) for the command
16002 15998 * retry_delay - time interval before issuing retry (may be 0)
16003 15999 * statp - optional pointer to kstat function
16004 16000 *
16005 16001 * Context: May be called under interrupt context
16006 16002 */
16007 16003
16008 16004 static void
16009 16005 sd_set_retry_bp(struct sd_lun *un, struct buf *bp, clock_t retry_delay,
16010 16006 void (*statp)(kstat_io_t *))
16011 16007 {
16012 16008 ASSERT(un != NULL);
16013 16009 ASSERT(mutex_owned(SD_MUTEX(un)));
16014 16010 ASSERT(bp != NULL);
16015 16011
16016 16012 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
16017 16013 "sd_set_retry_bp: entry: un:0x%p bp:0x%p\n", un, bp);
16018 16014
16019 16015 /*
16020 16016 * Indicate that the command is being retried. This will not allow any
16021 16017 * other commands on the wait queue to be transported to the target
16022 16018 * until this command has been completed (success or failure). The
16023 16019 * "retry command" is not transported to the target until the given
16024 16020 * time delay expires, unless the user specified a 0 retry_delay.
16025 16021 *
16026 16022 * Note: the timeout(9F) callback routine is what actually calls
16027 16023 * sd_start_cmds() to transport the command, with the exception of a
16028 16024 * zero retry_delay. The only current implementor of a zero retry delay
16029 16025 * is the case where a START_STOP_UNIT is sent to spin-up a device.
16030 16026 */
16031 16027 if (un->un_retry_bp == NULL) {
16032 16028 ASSERT(un->un_retry_statp == NULL);
16033 16029 un->un_retry_bp = bp;
16034 16030
16035 16031 /*
16036 16032 * If the user has not specified a delay the command should
16037 16033 * be queued and no timeout should be scheduled.
16038 16034 */
16039 16035 if (retry_delay == 0) {
16040 16036 /*
16041 16037 * Save the kstat pointer that will be used in the
16042 16038 * call to SD_UPDATE_KSTATS() below, so that
16043 16039 * sd_start_cmds() can correctly decrement the waitq
16044 16040 * count when it is time to transport this command.
16045 16041 */
16046 16042 un->un_retry_statp = statp;
16047 16043 goto done;
16048 16044 }
16049 16045 }
16050 16046
16051 16047 if (un->un_retry_bp == bp) {
16052 16048 /*
16053 16049 * Save the kstat pointer that will be used in the call to
16054 16050 * SD_UPDATE_KSTATS() below, so that sd_start_cmds() can
16055 16051 * correctly decrement the waitq count when it is time to
16056 16052 * transport this command.
16057 16053 */
16058 16054 un->un_retry_statp = statp;
16059 16055
16060 16056 /*
16061 16057 * Schedule a timeout if:
16062 16058 * 1) The user has specified a delay.
16063 16059 * 2) There is not a START_STOP_UNIT callback pending.
16064 16060 *
16065 16061 * If no delay has been specified, then it is up to the caller
16066 16062 * to ensure that IO processing continues without stalling.
16067 16063 * Effectively, this means that the caller will issue the
16068 16064 * required call to sd_start_cmds(). The START_STOP_UNIT
16069 16065 * callback does this after the START STOP UNIT command has
16070 16066 * completed. In either of these cases we should not schedule
16071 16067 * a timeout callback here. Also don't schedule the timeout if
16072 16068 * an SD_PATH_DIRECT_PRIORITY command is waiting to restart.
16073 16069 */
16074 16070 if ((retry_delay != 0) && (un->un_startstop_timeid == NULL) &&
16075 16071 (un->un_direct_priority_timeid == NULL)) {
16076 16072 un->un_retry_timeid =
16077 16073 timeout(sd_start_retry_command, un, retry_delay);
16078 16074 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16079 16075 "sd_set_retry_bp: setting timeout: un: 0x%p"
16080 16076 " bp:0x%p un_retry_timeid:0x%p\n",
16081 16077 un, bp, un->un_retry_timeid);
16082 16078 }
16083 16079 } else {
16084 16080 /*
16085 16081 * We only get in here if there is already another command
16086 16082 * waiting to be retried. In this case, we just put the
16087 16083 * given command onto the wait queue, so it can be transported
16088 16084 * after the current retry command has completed.
16089 16085 *
16090 16086 * Also we have to make sure that if the command at the head
16091 16087 * of the wait queue is the un_failfast_bp, that we do not
16092 16088 * put ahead of it any other commands that are to be retried.
16093 16089 */
16094 16090 if ((un->un_failfast_bp != NULL) &&
16095 16091 (un->un_failfast_bp == un->un_waitq_headp)) {
16096 16092 /*
16097 16093 * Enqueue this command AFTER the first command on
16098 16094 * the wait queue (which is also un_failfast_bp).
16099 16095 */
16100 16096 bp->av_forw = un->un_waitq_headp->av_forw;
16101 16097 un->un_waitq_headp->av_forw = bp;
16102 16098 if (un->un_waitq_headp == un->un_waitq_tailp) {
16103 16099 un->un_waitq_tailp = bp;
16104 16100 }
16105 16101 } else {
16106 16102 /* Enqueue this command at the head of the waitq. */
16107 16103 bp->av_forw = un->un_waitq_headp;
16108 16104 un->un_waitq_headp = bp;
16109 16105 if (un->un_waitq_tailp == NULL) {
16110 16106 un->un_waitq_tailp = bp;
16111 16107 }
16112 16108 }
16113 16109
16114 16110 if (statp == NULL) {
16115 16111 statp = kstat_waitq_enter;
16116 16112 }
16117 16113 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16118 16114 "sd_set_retry_bp: un:0x%p already delayed retry\n", un);
16119 16115 }
16120 16116
16121 16117 done:
16122 16118 if (statp != NULL) {
16123 16119 SD_UPDATE_KSTATS(un, statp, bp);
16124 16120 }
16125 16121
16126 16122 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16127 16123 "sd_set_retry_bp: exit un:0x%p\n", un);
16128 16124 }
16129 16125
16130 16126
16131 16127 /*
16132 16128 * Function: sd_start_retry_command
16133 16129 *
16134 16130 * Description: Start the command that has been waiting on the target's
16135 16131 * retry queue. Called from timeout(9F) context after the
16136 16132 * retry delay interval has expired.
16137 16133 *
16138 16134 * Arguments: arg - pointer to associated softstate for the device.
16139 16135 *
16140 16136 * Context: timeout(9F) thread context. May not sleep.
16141 16137 */
16142 16138
16143 16139 static void
16144 16140 sd_start_retry_command(void *arg)
16145 16141 {
16146 16142 struct sd_lun *un = arg;
16147 16143
16148 16144 ASSERT(un != NULL);
16149 16145 ASSERT(!mutex_owned(SD_MUTEX(un)));
16150 16146
16151 16147 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16152 16148 "sd_start_retry_command: entry\n");
16153 16149
16154 16150 mutex_enter(SD_MUTEX(un));
16155 16151
16156 16152 un->un_retry_timeid = NULL;
16157 16153
16158 16154 if (un->un_retry_bp != NULL) {
16159 16155 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16160 16156 "sd_start_retry_command: un:0x%p STARTING bp:0x%p\n",
16161 16157 un, un->un_retry_bp);
16162 16158 sd_start_cmds(un, un->un_retry_bp);
16163 16159 }
16164 16160
16165 16161 mutex_exit(SD_MUTEX(un));
16166 16162
16167 16163 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16168 16164 "sd_start_retry_command: exit\n");
16169 16165 }
16170 16166
16171 16167 /*
16172 16168 * Function: sd_rmw_msg_print_handler
16173 16169 *
16174 16170 * Description: If RMW mode is enabled and warning message is triggered
16175 16171 * print I/O count during a fixed interval.
16176 16172 *
16177 16173 * Arguments: arg - pointer to associated softstate for the device.
16178 16174 *
16179 16175 * Context: timeout(9F) thread context. May not sleep.
16180 16176 */
16181 16177 static void
16182 16178 sd_rmw_msg_print_handler(void *arg)
16183 16179 {
16184 16180 struct sd_lun *un = arg;
16185 16181
16186 16182 ASSERT(un != NULL);
16187 16183 ASSERT(!mutex_owned(SD_MUTEX(un)));
16188 16184
16189 16185 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16190 16186 "sd_rmw_msg_print_handler: entry\n");
16191 16187
16192 16188 mutex_enter(SD_MUTEX(un));
16193 16189
16194 16190 if (un->un_rmw_incre_count > 0) {
16195 16191 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
16196 16192 "%"PRIu64" I/O requests are not aligned with %d disk "
16197 16193 "sector size in %ld seconds. They are handled through "
16198 16194 "Read Modify Write but the performance is very low!\n",
16199 16195 un->un_rmw_incre_count, un->un_tgt_blocksize,
16200 16196 drv_hztousec(SD_RMW_MSG_PRINT_TIMEOUT) / 1000000);
16201 16197 un->un_rmw_incre_count = 0;
16202 16198 un->un_rmw_msg_timeid = timeout(sd_rmw_msg_print_handler,
16203 16199 un, SD_RMW_MSG_PRINT_TIMEOUT);
16204 16200 } else {
16205 16201 un->un_rmw_msg_timeid = NULL;
16206 16202 }
16207 16203
16208 16204 mutex_exit(SD_MUTEX(un));
16209 16205
16210 16206 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16211 16207 "sd_rmw_msg_print_handler: exit\n");
16212 16208 }
16213 16209
16214 16210 /*
16215 16211 * Function: sd_start_direct_priority_command
16216 16212 *
16217 16213 * Description: Used to re-start an SD_PATH_DIRECT_PRIORITY command that had
16218 16214 * received TRAN_BUSY when we called scsi_transport() to send it
16219 16215 * to the underlying HBA. This function is called from timeout(9F)
16220 16216 * context after the delay interval has expired.
16221 16217 *
16222 16218 * Arguments: arg - pointer to associated buf(9S) to be restarted.
16223 16219 *
16224 16220 * Context: timeout(9F) thread context. May not sleep.
16225 16221 */
16226 16222
16227 16223 static void
16228 16224 sd_start_direct_priority_command(void *arg)
16229 16225 {
16230 16226 struct buf *priority_bp = arg;
16231 16227 struct sd_lun *un;
16232 16228
16233 16229 ASSERT(priority_bp != NULL);
16234 16230 un = SD_GET_UN(priority_bp);
16235 16231 ASSERT(un != NULL);
16236 16232 ASSERT(!mutex_owned(SD_MUTEX(un)));
16237 16233
16238 16234 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16239 16235 "sd_start_direct_priority_command: entry\n");
16240 16236
16241 16237 mutex_enter(SD_MUTEX(un));
16242 16238 un->un_direct_priority_timeid = NULL;
16243 16239 sd_start_cmds(un, priority_bp);
16244 16240 mutex_exit(SD_MUTEX(un));
16245 16241
16246 16242 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16247 16243 "sd_start_direct_priority_command: exit\n");
16248 16244 }
16249 16245
16250 16246
16251 16247 /*
16252 16248 * Function: sd_send_request_sense_command
16253 16249 *
16254 16250 * Description: Sends a REQUEST SENSE command to the target
16255 16251 *
16256 16252 * Context: May be called from interrupt context.
16257 16253 */
16258 16254
16259 16255 static void
16260 16256 sd_send_request_sense_command(struct sd_lun *un, struct buf *bp,
16261 16257 struct scsi_pkt *pktp)
16262 16258 {
16263 16259 ASSERT(bp != NULL);
16264 16260 ASSERT(un != NULL);
16265 16261 ASSERT(mutex_owned(SD_MUTEX(un)));
16266 16262
16267 16263 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_send_request_sense_command: "
16268 16264 "entry: buf:0x%p\n", bp);
16269 16265
16270 16266 /*
16271 16267 * If we are syncing or dumping, then fail the command to avoid a
16272 16268 * recursive callback into scsi_transport(). Also fail the command
16273 16269 * if we are suspended (legacy behavior).
16274 16270 */
16275 16271 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) ||
16276 16272 (un->un_state == SD_STATE_DUMPING)) {
16277 16273 sd_return_failed_command(un, bp, EIO);
16278 16274 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16279 16275 "sd_send_request_sense_command: syncing/dumping, exit\n");
16280 16276 return;
16281 16277 }
16282 16278
16283 16279 /*
16284 16280 * Retry the failed command and don't issue the request sense if:
16285 16281 * 1) the sense buf is busy
16286 16282 * 2) we have 1 or more outstanding commands on the target
16287 16283 * (the sense data will be cleared or invalidated any way)
16288 16284 *
16289 16285 * Note: There could be an issue with not checking a retry limit here,
16290 16286 * the problem is determining which retry limit to check.
16291 16287 */
16292 16288 if ((un->un_sense_isbusy != 0) || (un->un_ncmds_in_transport > 0)) {
16293 16289 /* Don't retry if the command is flagged as non-retryable */
16294 16290 if ((pktp->pkt_flags & FLAG_DIAGNOSE) == 0) {
16295 16291 sd_retry_command(un, bp, SD_RETRIES_NOCHECK,
16296 16292 NULL, NULL, 0, un->un_busy_timeout,
16297 16293 kstat_waitq_enter);
16298 16294 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16299 16295 "sd_send_request_sense_command: "
16300 16296 "at full throttle, retrying exit\n");
16301 16297 } else {
16302 16298 sd_return_failed_command(un, bp, EIO);
16303 16299 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16304 16300 "sd_send_request_sense_command: "
16305 16301 "at full throttle, non-retryable exit\n");
16306 16302 }
16307 16303 return;
16308 16304 }
16309 16305
16310 16306 sd_mark_rqs_busy(un, bp);
16311 16307 sd_start_cmds(un, un->un_rqs_bp);
16312 16308
16313 16309 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16314 16310 "sd_send_request_sense_command: exit\n");
16315 16311 }
16316 16312
16317 16313
16318 16314 /*
16319 16315 * Function: sd_mark_rqs_busy
16320 16316 *
16321 16317 * Description: Indicate that the request sense bp for this instance is
16322 16318 * in use.
16323 16319 *
16324 16320 * Context: May be called under interrupt context
16325 16321 */
16326 16322
16327 16323 static void
16328 16324 sd_mark_rqs_busy(struct sd_lun *un, struct buf *bp)
16329 16325 {
16330 16326 struct sd_xbuf *sense_xp;
16331 16327
16332 16328 ASSERT(un != NULL);
16333 16329 ASSERT(bp != NULL);
16334 16330 ASSERT(mutex_owned(SD_MUTEX(un)));
16335 16331 ASSERT(un->un_sense_isbusy == 0);
16336 16332
16337 16333 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: entry: "
16338 16334 "buf:0x%p xp:0x%p un:0x%p\n", bp, SD_GET_XBUF(bp), un);
16339 16335
16340 16336 sense_xp = SD_GET_XBUF(un->un_rqs_bp);
16341 16337 ASSERT(sense_xp != NULL);
16342 16338
16343 16339 SD_INFO(SD_LOG_IO, un,
16344 16340 "sd_mark_rqs_busy: entry: sense_xp:0x%p\n", sense_xp);
16345 16341
16346 16342 ASSERT(sense_xp->xb_pktp != NULL);
16347 16343 ASSERT((sense_xp->xb_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD))
16348 16344 == (FLAG_SENSING | FLAG_HEAD));
16349 16345
16350 16346 un->un_sense_isbusy = 1;
16351 16347 un->un_rqs_bp->b_resid = 0;
16352 16348 sense_xp->xb_pktp->pkt_resid = 0;
16353 16349 sense_xp->xb_pktp->pkt_reason = 0;
16354 16350
16355 16351 /* So we can get back the bp at interrupt time! */
16356 16352 sense_xp->xb_sense_bp = bp;
16357 16353
16358 16354 bzero(un->un_rqs_bp->b_un.b_addr, SENSE_LENGTH);
16359 16355
16360 16356 /*
16361 16357 * Mark this buf as awaiting sense data. (This is already set in
16362 16358 * the pkt_flags for the RQS packet.)
16363 16359 */
16364 16360 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags |= FLAG_SENSING;
16365 16361
16366 16362 /* Request sense down same path */
16367 16363 if (scsi_pkt_allocated_correctly((SD_GET_XBUF(bp))->xb_pktp) &&
16368 16364 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance)
16369 16365 sense_xp->xb_pktp->pkt_path_instance =
16370 16366 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_path_instance;
16371 16367
16372 16368 sense_xp->xb_retry_count = 0;
16373 16369 sense_xp->xb_victim_retry_count = 0;
16374 16370 sense_xp->xb_ua_retry_count = 0;
16375 16371 sense_xp->xb_nr_retry_count = 0;
16376 16372 sense_xp->xb_dma_resid = 0;
16377 16373
16378 16374 /* Clean up the fields for auto-request sense */
16379 16375 sense_xp->xb_sense_status = 0;
16380 16376 sense_xp->xb_sense_state = 0;
16381 16377 sense_xp->xb_sense_resid = 0;
16382 16378 bzero(sense_xp->xb_sense_data, sizeof (sense_xp->xb_sense_data));
16383 16379
16384 16380 SD_TRACE(SD_LOG_IO_CORE, un, "sd_mark_rqs_busy: exit\n");
16385 16381 }
16386 16382
16387 16383
16388 16384 /*
16389 16385 * Function: sd_mark_rqs_idle
16390 16386 *
16391 16387 * Description: SD_MUTEX must be held continuously through this routine
16392 16388 * to prevent reuse of the rqs struct before the caller can
16393 16389 * complete it's processing.
16394 16390 *
16395 16391 * Return Code: Pointer to the RQS buf
16396 16392 *
16397 16393 * Context: May be called under interrupt context
16398 16394 */
16399 16395
16400 16396 static struct buf *
16401 16397 sd_mark_rqs_idle(struct sd_lun *un, struct sd_xbuf *sense_xp)
16402 16398 {
16403 16399 struct buf *bp;
16404 16400 ASSERT(un != NULL);
16405 16401 ASSERT(sense_xp != NULL);
16406 16402 ASSERT(mutex_owned(SD_MUTEX(un)));
16407 16403 ASSERT(un->un_sense_isbusy != 0);
16408 16404
16409 16405 un->un_sense_isbusy = 0;
16410 16406 bp = sense_xp->xb_sense_bp;
16411 16407 sense_xp->xb_sense_bp = NULL;
16412 16408
16413 16409 /* This pkt is no longer interested in getting sense data */
16414 16410 ((SD_GET_XBUF(bp))->xb_pktp)->pkt_flags &= ~FLAG_SENSING;
16415 16411
16416 16412 return (bp);
16417 16413 }
16418 16414
16419 16415
16420 16416
16421 16417 /*
16422 16418 * Function: sd_alloc_rqs
16423 16419 *
16424 16420 * Description: Set up the unit to receive auto request sense data
16425 16421 *
16426 16422 * Return Code: DDI_SUCCESS or DDI_FAILURE
16427 16423 *
16428 16424 * Context: Called under attach(9E) context
16429 16425 */
16430 16426
16431 16427 static int
16432 16428 sd_alloc_rqs(struct scsi_device *devp, struct sd_lun *un)
16433 16429 {
16434 16430 struct sd_xbuf *xp;
16435 16431
16436 16432 ASSERT(un != NULL);
16437 16433 ASSERT(!mutex_owned(SD_MUTEX(un)));
16438 16434 ASSERT(un->un_rqs_bp == NULL);
16439 16435 ASSERT(un->un_rqs_pktp == NULL);
16440 16436
16441 16437 /*
16442 16438 * First allocate the required buf and scsi_pkt structs, then set up
16443 16439 * the CDB in the scsi_pkt for a REQUEST SENSE command.
16444 16440 */
16445 16441 un->un_rqs_bp = scsi_alloc_consistent_buf(&devp->sd_address, NULL,
16446 16442 MAX_SENSE_LENGTH, B_READ, SLEEP_FUNC, NULL);
16447 16443 if (un->un_rqs_bp == NULL) {
16448 16444 return (DDI_FAILURE);
16449 16445 }
16450 16446
16451 16447 un->un_rqs_pktp = scsi_init_pkt(&devp->sd_address, NULL, un->un_rqs_bp,
16452 16448 CDB_GROUP0, 1, 0, PKT_CONSISTENT, SLEEP_FUNC, NULL);
16453 16449
16454 16450 if (un->un_rqs_pktp == NULL) {
16455 16451 sd_free_rqs(un);
16456 16452 return (DDI_FAILURE);
16457 16453 }
16458 16454
16459 16455 /* Set up the CDB in the scsi_pkt for a REQUEST SENSE command. */
16460 16456 (void) scsi_setup_cdb((union scsi_cdb *)un->un_rqs_pktp->pkt_cdbp,
16461 16457 SCMD_REQUEST_SENSE, 0, MAX_SENSE_LENGTH, 0);
16462 16458
16463 16459 SD_FILL_SCSI1_LUN(un, un->un_rqs_pktp);
16464 16460
16465 16461 /* Set up the other needed members in the ARQ scsi_pkt. */
16466 16462 un->un_rqs_pktp->pkt_comp = sdintr;
16467 16463 un->un_rqs_pktp->pkt_time = sd_io_time;
16468 16464 un->un_rqs_pktp->pkt_flags |=
16469 16465 (FLAG_SENSING | FLAG_HEAD); /* (1222170) */
16470 16466
16471 16467 /*
16472 16468 * Allocate & init the sd_xbuf struct for the RQS command. Do not
16473 16469 * provide any intpkt, destroypkt routines as we take care of
16474 16470 * scsi_pkt allocation/freeing here and in sd_free_rqs().
16475 16471 */
16476 16472 xp = kmem_alloc(sizeof (struct sd_xbuf), KM_SLEEP);
16477 16473 sd_xbuf_init(un, un->un_rqs_bp, xp, SD_CHAIN_NULL, NULL);
16478 16474 xp->xb_pktp = un->un_rqs_pktp;
16479 16475 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16480 16476 "sd_alloc_rqs: un 0x%p, rqs xp 0x%p, pkt 0x%p, buf 0x%p\n",
16481 16477 un, xp, un->un_rqs_pktp, un->un_rqs_bp);
16482 16478
16483 16479 /*
16484 16480 * Save the pointer to the request sense private bp so it can
16485 16481 * be retrieved in sdintr.
16486 16482 */
16487 16483 un->un_rqs_pktp->pkt_private = un->un_rqs_bp;
16488 16484 ASSERT(un->un_rqs_bp->b_private == xp);
16489 16485
16490 16486 /*
16491 16487 * See if the HBA supports auto-request sense for the specified
16492 16488 * target/lun. If it does, then try to enable it (if not already
16493 16489 * enabled).
16494 16490 *
16495 16491 * Note: For some HBAs (ifp & sf), scsi_ifsetcap will always return
16496 16492 * failure, while for other HBAs (pln) scsi_ifsetcap will always
16497 16493 * return success. However, in both of these cases ARQ is always
16498 16494 * enabled and scsi_ifgetcap will always return true. The best approach
16499 16495 * is to issue the scsi_ifgetcap() first, then try the scsi_ifsetcap().
16500 16496 *
16501 16497 * The 3rd case is the HBA (adp) always return enabled on
16502 16498 * scsi_ifgetgetcap even when it's not enable, the best approach
16503 16499 * is issue a scsi_ifsetcap then a scsi_ifgetcap
16504 16500 * Note: this case is to circumvent the Adaptec bug. (x86 only)
16505 16501 */
16506 16502
16507 16503 if (un->un_f_is_fibre == TRUE) {
16508 16504 un->un_f_arq_enabled = TRUE;
16509 16505 } else {
16510 16506 #if defined(__i386) || defined(__amd64)
16511 16507 /*
16512 16508 * Circumvent the Adaptec bug, remove this code when
16513 16509 * the bug is fixed
16514 16510 */
16515 16511 (void) scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1);
16516 16512 #endif
16517 16513 switch (scsi_ifgetcap(SD_ADDRESS(un), "auto-rqsense", 1)) {
16518 16514 case 0:
16519 16515 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16520 16516 "sd_alloc_rqs: HBA supports ARQ\n");
16521 16517 /*
16522 16518 * ARQ is supported by this HBA but currently is not
16523 16519 * enabled. Attempt to enable it and if successful then
16524 16520 * mark this instance as ARQ enabled.
16525 16521 */
16526 16522 if (scsi_ifsetcap(SD_ADDRESS(un), "auto-rqsense", 1, 1)
16527 16523 == 1) {
16528 16524 /* Successfully enabled ARQ in the HBA */
16529 16525 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16530 16526 "sd_alloc_rqs: ARQ enabled\n");
16531 16527 un->un_f_arq_enabled = TRUE;
16532 16528 } else {
16533 16529 /* Could not enable ARQ in the HBA */
16534 16530 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16535 16531 "sd_alloc_rqs: failed ARQ enable\n");
16536 16532 un->un_f_arq_enabled = FALSE;
16537 16533 }
16538 16534 break;
16539 16535 case 1:
16540 16536 /*
16541 16537 * ARQ is supported by this HBA and is already enabled.
16542 16538 * Just mark ARQ as enabled for this instance.
16543 16539 */
16544 16540 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16545 16541 "sd_alloc_rqs: ARQ already enabled\n");
16546 16542 un->un_f_arq_enabled = TRUE;
16547 16543 break;
16548 16544 default:
16549 16545 /*
16550 16546 * ARQ is not supported by this HBA; disable it for this
16551 16547 * instance.
16552 16548 */
16553 16549 SD_INFO(SD_LOG_ATTACH_DETACH, un,
16554 16550 "sd_alloc_rqs: HBA does not support ARQ\n");
16555 16551 un->un_f_arq_enabled = FALSE;
16556 16552 break;
16557 16553 }
16558 16554 }
16559 16555
16560 16556 return (DDI_SUCCESS);
16561 16557 }
16562 16558
16563 16559
16564 16560 /*
16565 16561 * Function: sd_free_rqs
16566 16562 *
16567 16563 * Description: Cleanup for the pre-instance RQS command.
16568 16564 *
16569 16565 * Context: Kernel thread context
16570 16566 */
16571 16567
16572 16568 static void
16573 16569 sd_free_rqs(struct sd_lun *un)
16574 16570 {
16575 16571 ASSERT(un != NULL);
16576 16572
16577 16573 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: entry\n");
16578 16574
16579 16575 /*
16580 16576 * If consistent memory is bound to a scsi_pkt, the pkt
16581 16577 * has to be destroyed *before* freeing the consistent memory.
16582 16578 * Don't change the sequence of this operations.
16583 16579 * scsi_destroy_pkt() might access memory, which isn't allowed,
16584 16580 * after it was freed in scsi_free_consistent_buf().
16585 16581 */
16586 16582 if (un->un_rqs_pktp != NULL) {
16587 16583 scsi_destroy_pkt(un->un_rqs_pktp);
16588 16584 un->un_rqs_pktp = NULL;
16589 16585 }
16590 16586
16591 16587 if (un->un_rqs_bp != NULL) {
16592 16588 struct sd_xbuf *xp = SD_GET_XBUF(un->un_rqs_bp);
16593 16589 if (xp != NULL) {
16594 16590 kmem_free(xp, sizeof (struct sd_xbuf));
16595 16591 }
16596 16592 scsi_free_consistent_buf(un->un_rqs_bp);
16597 16593 un->un_rqs_bp = NULL;
16598 16594 }
16599 16595 SD_TRACE(SD_LOG_IO_CORE, un, "sd_free_rqs: exit\n");
16600 16596 }
16601 16597
16602 16598
16603 16599
16604 16600 /*
16605 16601 * Function: sd_reduce_throttle
16606 16602 *
16607 16603 * Description: Reduces the maximum # of outstanding commands on a
16608 16604 * target to the current number of outstanding commands.
16609 16605 * Queues a tiemout(9F) callback to restore the limit
16610 16606 * after a specified interval has elapsed.
16611 16607 * Typically used when we get a TRAN_BUSY return code
16612 16608 * back from scsi_transport().
16613 16609 *
16614 16610 * Arguments: un - ptr to the sd_lun softstate struct
16615 16611 * throttle_type: SD_THROTTLE_TRAN_BUSY or SD_THROTTLE_QFULL
16616 16612 *
16617 16613 * Context: May be called from interrupt context
16618 16614 */
16619 16615
16620 16616 static void
16621 16617 sd_reduce_throttle(struct sd_lun *un, int throttle_type)
16622 16618 {
16623 16619 ASSERT(un != NULL);
16624 16620 ASSERT(mutex_owned(SD_MUTEX(un)));
16625 16621 ASSERT(un->un_ncmds_in_transport >= 0);
16626 16622
16627 16623 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: "
16628 16624 "entry: un:0x%p un_throttle:%d un_ncmds_in_transport:%d\n",
16629 16625 un, un->un_throttle, un->un_ncmds_in_transport);
16630 16626
16631 16627 if (un->un_throttle > 1) {
16632 16628 if (un->un_f_use_adaptive_throttle == TRUE) {
16633 16629 switch (throttle_type) {
16634 16630 case SD_THROTTLE_TRAN_BUSY:
16635 16631 if (un->un_busy_throttle == 0) {
16636 16632 un->un_busy_throttle = un->un_throttle;
16637 16633 }
16638 16634 break;
16639 16635 case SD_THROTTLE_QFULL:
16640 16636 un->un_busy_throttle = 0;
16641 16637 break;
16642 16638 default:
16643 16639 ASSERT(FALSE);
16644 16640 }
16645 16641
16646 16642 if (un->un_ncmds_in_transport > 0) {
16647 16643 un->un_throttle = un->un_ncmds_in_transport;
16648 16644 }
16649 16645
16650 16646 } else {
16651 16647 if (un->un_ncmds_in_transport == 0) {
16652 16648 un->un_throttle = 1;
16653 16649 } else {
16654 16650 un->un_throttle = un->un_ncmds_in_transport;
16655 16651 }
16656 16652 }
16657 16653 }
16658 16654
16659 16655 /* Reschedule the timeout if none is currently active */
16660 16656 if (un->un_reset_throttle_timeid == NULL) {
16661 16657 un->un_reset_throttle_timeid = timeout(sd_restore_throttle,
16662 16658 un, SD_THROTTLE_RESET_INTERVAL);
16663 16659 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16664 16660 "sd_reduce_throttle: timeout scheduled!\n");
16665 16661 }
16666 16662
16667 16663 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reduce_throttle: "
16668 16664 "exit: un:0x%p un_throttle:%d\n", un, un->un_throttle);
16669 16665 }
16670 16666
16671 16667
16672 16668
16673 16669 /*
16674 16670 * Function: sd_restore_throttle
16675 16671 *
16676 16672 * Description: Callback function for timeout(9F). Resets the current
16677 16673 * value of un->un_throttle to its default.
16678 16674 *
16679 16675 * Arguments: arg - pointer to associated softstate for the device.
16680 16676 *
16681 16677 * Context: May be called from interrupt context
16682 16678 */
16683 16679
16684 16680 static void
16685 16681 sd_restore_throttle(void *arg)
16686 16682 {
16687 16683 struct sd_lun *un = arg;
16688 16684
16689 16685 ASSERT(un != NULL);
16690 16686 ASSERT(!mutex_owned(SD_MUTEX(un)));
16691 16687
16692 16688 mutex_enter(SD_MUTEX(un));
16693 16689
16694 16690 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: "
16695 16691 "entry: un:0x%p un_throttle:%d\n", un, un->un_throttle);
16696 16692
16697 16693 un->un_reset_throttle_timeid = NULL;
16698 16694
16699 16695 if (un->un_f_use_adaptive_throttle == TRUE) {
16700 16696 /*
16701 16697 * If un_busy_throttle is nonzero, then it contains the
16702 16698 * value that un_throttle was when we got a TRAN_BUSY back
16703 16699 * from scsi_transport(). We want to revert back to this
16704 16700 * value.
16705 16701 *
16706 16702 * In the QFULL case, the throttle limit will incrementally
16707 16703 * increase until it reaches max throttle.
16708 16704 */
16709 16705 if (un->un_busy_throttle > 0) {
16710 16706 un->un_throttle = un->un_busy_throttle;
16711 16707 un->un_busy_throttle = 0;
16712 16708 } else {
16713 16709 /*
16714 16710 * increase throttle by 10% open gate slowly, schedule
16715 16711 * another restore if saved throttle has not been
16716 16712 * reached
16717 16713 */
16718 16714 short throttle;
16719 16715 if (sd_qfull_throttle_enable) {
16720 16716 throttle = un->un_throttle +
16721 16717 max((un->un_throttle / 10), 1);
16722 16718 un->un_throttle =
16723 16719 (throttle < un->un_saved_throttle) ?
16724 16720 throttle : un->un_saved_throttle;
16725 16721 if (un->un_throttle < un->un_saved_throttle) {
16726 16722 un->un_reset_throttle_timeid =
16727 16723 timeout(sd_restore_throttle,
16728 16724 un,
16729 16725 SD_QFULL_THROTTLE_RESET_INTERVAL);
16730 16726 }
16731 16727 }
16732 16728 }
16733 16729
16734 16730 /*
16735 16731 * If un_throttle has fallen below the low-water mark, we
16736 16732 * restore the maximum value here (and allow it to ratchet
16737 16733 * down again if necessary).
16738 16734 */
16739 16735 if (un->un_throttle < un->un_min_throttle) {
16740 16736 un->un_throttle = un->un_saved_throttle;
16741 16737 }
16742 16738 } else {
16743 16739 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: "
16744 16740 "restoring limit from 0x%x to 0x%x\n",
16745 16741 un->un_throttle, un->un_saved_throttle);
16746 16742 un->un_throttle = un->un_saved_throttle;
16747 16743 }
16748 16744
16749 16745 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
16750 16746 "sd_restore_throttle: calling sd_start_cmds!\n");
16751 16747
16752 16748 sd_start_cmds(un, NULL);
16753 16749
16754 16750 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un,
16755 16751 "sd_restore_throttle: exit: un:0x%p un_throttle:%d\n",
16756 16752 un, un->un_throttle);
16757 16753
16758 16754 mutex_exit(SD_MUTEX(un));
16759 16755
16760 16756 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sd_restore_throttle: exit\n");
16761 16757 }
16762 16758
16763 16759 /*
16764 16760 * Function: sdrunout
16765 16761 *
16766 16762 * Description: Callback routine for scsi_init_pkt when a resource allocation
16767 16763 * fails.
16768 16764 *
16769 16765 * Arguments: arg - a pointer to the sd_lun unit struct for the particular
16770 16766 * soft state instance.
16771 16767 *
16772 16768 * Return Code: The scsi_init_pkt routine allows for the callback function to
16773 16769 * return a 0 indicating the callback should be rescheduled or a 1
16774 16770 * indicating not to reschedule. This routine always returns 1
16775 16771 * because the driver always provides a callback function to
16776 16772 * scsi_init_pkt. This results in a callback always being scheduled
16777 16773 * (via the scsi_init_pkt callback implementation) if a resource
16778 16774 * failure occurs.
16779 16775 *
16780 16776 * Context: This callback function may not block or call routines that block
16781 16777 *
16782 16778 * Note: Using the scsi_init_pkt callback facility can result in an I/O
16783 16779 * request persisting at the head of the list which cannot be
16784 16780 * satisfied even after multiple retries. In the future the driver
16785 16781 * may implement some time of maximum runout count before failing
16786 16782 * an I/O.
16787 16783 */
16788 16784
16789 16785 static int
16790 16786 sdrunout(caddr_t arg)
16791 16787 {
16792 16788 struct sd_lun *un = (struct sd_lun *)arg;
16793 16789
16794 16790 ASSERT(un != NULL);
16795 16791 ASSERT(!mutex_owned(SD_MUTEX(un)));
16796 16792
16797 16793 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: entry\n");
16798 16794
16799 16795 mutex_enter(SD_MUTEX(un));
16800 16796 sd_start_cmds(un, NULL);
16801 16797 mutex_exit(SD_MUTEX(un));
16802 16798 /*
16803 16799 * This callback routine always returns 1 (i.e. do not reschedule)
16804 16800 * because we always specify sdrunout as the callback handler for
16805 16801 * scsi_init_pkt inside the call to sd_start_cmds.
16806 16802 */
16807 16803 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdrunout: exit\n");
16808 16804 return (1);
16809 16805 }
16810 16806
16811 16807
16812 16808 /*
16813 16809 * Function: sdintr
16814 16810 *
16815 16811 * Description: Completion callback routine for scsi_pkt(9S) structs
16816 16812 * sent to the HBA driver via scsi_transport(9F).
16817 16813 *
16818 16814 * Context: Interrupt context
16819 16815 */
16820 16816
16821 16817 static void
16822 16818 sdintr(struct scsi_pkt *pktp)
16823 16819 {
16824 16820 struct buf *bp;
16825 16821 struct sd_xbuf *xp;
16826 16822 struct sd_lun *un;
16827 16823 size_t actual_len;
16828 16824 sd_ssc_t *sscp;
16829 16825
16830 16826 ASSERT(pktp != NULL);
16831 16827 bp = (struct buf *)pktp->pkt_private;
16832 16828 ASSERT(bp != NULL);
16833 16829 xp = SD_GET_XBUF(bp);
16834 16830 ASSERT(xp != NULL);
16835 16831 ASSERT(xp->xb_pktp != NULL);
16836 16832 un = SD_GET_UN(bp);
16837 16833 ASSERT(un != NULL);
16838 16834 ASSERT(!mutex_owned(SD_MUTEX(un)));
16839 16835
16840 16836 #ifdef SD_FAULT_INJECTION
16841 16837
16842 16838 SD_INFO(SD_LOG_IOERR, un, "sdintr: sdintr calling Fault injection\n");
16843 16839 /* SD FaultInjection */
16844 16840 sd_faultinjection(pktp);
16845 16841
16846 16842 #endif /* SD_FAULT_INJECTION */
16847 16843
16848 16844 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: entry: buf:0x%p,"
16849 16845 " xp:0x%p, un:0x%p\n", bp, xp, un);
16850 16846
16851 16847 mutex_enter(SD_MUTEX(un));
16852 16848
16853 16849 ASSERT(un->un_fm_private != NULL);
16854 16850 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc;
16855 16851 ASSERT(sscp != NULL);
16856 16852
16857 16853 /* Reduce the count of the #commands currently in transport */
16858 16854 un->un_ncmds_in_transport--;
16859 16855 ASSERT(un->un_ncmds_in_transport >= 0);
16860 16856
16861 16857 /* Increment counter to indicate that the callback routine is active */
16862 16858 un->un_in_callback++;
16863 16859
16864 16860 SD_UPDATE_KSTATS(un, kstat_runq_exit, bp);
16865 16861
16866 16862 #ifdef SDDEBUG
16867 16863 if (bp == un->un_retry_bp) {
16868 16864 SD_TRACE(SD_LOG_IO | SD_LOG_ERROR, un, "sdintr: "
16869 16865 "un:0x%p: GOT retry_bp:0x%p un_ncmds_in_transport:%d\n",
16870 16866 un, un->un_retry_bp, un->un_ncmds_in_transport);
16871 16867 }
16872 16868 #endif
16873 16869
16874 16870 /*
16875 16871 * If pkt_reason is CMD_DEV_GONE, fail the command, and update the media
16876 16872 * state if needed.
16877 16873 */
16878 16874 if (pktp->pkt_reason == CMD_DEV_GONE) {
16879 16875 /* Prevent multiple console messages for the same failure. */
16880 16876 if (un->un_last_pkt_reason != CMD_DEV_GONE) {
16881 16877 un->un_last_pkt_reason = CMD_DEV_GONE;
16882 16878 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
16883 16879 "Command failed to complete...Device is gone\n");
16884 16880 }
16885 16881 if (un->un_mediastate != DKIO_DEV_GONE) {
16886 16882 un->un_mediastate = DKIO_DEV_GONE;
16887 16883 cv_broadcast(&un->un_state_cv);
16888 16884 }
16889 16885 /*
16890 16886 * If the command happens to be the REQUEST SENSE command,
16891 16887 * free up the rqs buf and fail the original command.
16892 16888 */
16893 16889 if (bp == un->un_rqs_bp) {
16894 16890 bp = sd_mark_rqs_idle(un, xp);
16895 16891 }
16896 16892 sd_return_failed_command(un, bp, EIO);
16897 16893 goto exit;
16898 16894 }
16899 16895
16900 16896 if (pktp->pkt_state & STATE_XARQ_DONE) {
16901 16897 SD_TRACE(SD_LOG_COMMON, un,
16902 16898 "sdintr: extra sense data received. pkt=%p\n", pktp);
16903 16899 }
16904 16900
16905 16901 /*
16906 16902 * First see if the pkt has auto-request sense data with it....
16907 16903 * Look at the packet state first so we don't take a performance
16908 16904 * hit looking at the arq enabled flag unless absolutely necessary.
16909 16905 */
16910 16906 if ((pktp->pkt_state & STATE_ARQ_DONE) &&
16911 16907 (un->un_f_arq_enabled == TRUE)) {
16912 16908 /*
16913 16909 * The HBA did an auto request sense for this command so check
16914 16910 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal
16915 16911 * driver command that should not be retried.
16916 16912 */
16917 16913 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) {
16918 16914 /*
16919 16915 * Save the relevant sense info into the xp for the
16920 16916 * original cmd.
16921 16917 */
16922 16918 struct scsi_arq_status *asp;
16923 16919 asp = (struct scsi_arq_status *)(pktp->pkt_scbp);
16924 16920 xp->xb_sense_status =
16925 16921 *((uchar_t *)(&(asp->sts_rqpkt_status)));
16926 16922 xp->xb_sense_state = asp->sts_rqpkt_state;
16927 16923 xp->xb_sense_resid = asp->sts_rqpkt_resid;
16928 16924 if (pktp->pkt_state & STATE_XARQ_DONE) {
16929 16925 actual_len = MAX_SENSE_LENGTH -
16930 16926 xp->xb_sense_resid;
16931 16927 bcopy(&asp->sts_sensedata, xp->xb_sense_data,
16932 16928 MAX_SENSE_LENGTH);
16933 16929 } else {
16934 16930 if (xp->xb_sense_resid > SENSE_LENGTH) {
16935 16931 actual_len = MAX_SENSE_LENGTH -
16936 16932 xp->xb_sense_resid;
16937 16933 } else {
16938 16934 actual_len = SENSE_LENGTH -
16939 16935 xp->xb_sense_resid;
16940 16936 }
16941 16937 if (xp->xb_pkt_flags & SD_XB_USCSICMD) {
16942 16938 if ((((struct uscsi_cmd *)
16943 16939 (xp->xb_pktinfo))->uscsi_rqlen) >
16944 16940 actual_len) {
16945 16941 xp->xb_sense_resid =
16946 16942 (((struct uscsi_cmd *)
16947 16943 (xp->xb_pktinfo))->
16948 16944 uscsi_rqlen) - actual_len;
16949 16945 } else {
16950 16946 xp->xb_sense_resid = 0;
16951 16947 }
16952 16948 }
16953 16949 bcopy(&asp->sts_sensedata, xp->xb_sense_data,
16954 16950 SENSE_LENGTH);
16955 16951 }
16956 16952
16957 16953 /* fail the command */
16958 16954 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16959 16955 "sdintr: arq done and FLAG_DIAGNOSE set\n");
16960 16956 sd_return_failed_command(un, bp, EIO);
16961 16957 goto exit;
16962 16958 }
16963 16959
16964 16960 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */
16965 16961 /*
16966 16962 * We want to either retry or fail this command, so free
16967 16963 * the DMA resources here. If we retry the command then
16968 16964 * the DMA resources will be reallocated in sd_start_cmds().
16969 16965 * Note that when PKT_DMA_PARTIAL is used, this reallocation
16970 16966 * causes the *entire* transfer to start over again from the
16971 16967 * beginning of the request, even for PARTIAL chunks that
16972 16968 * have already transferred successfully.
16973 16969 */
16974 16970 if ((un->un_f_is_fibre == TRUE) &&
16975 16971 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
16976 16972 ((pktp->pkt_flags & FLAG_SENSING) == 0)) {
16977 16973 scsi_dmafree(pktp);
16978 16974 xp->xb_pkt_flags |= SD_XB_DMA_FREED;
16979 16975 }
16980 16976 #endif
16981 16977
16982 16978 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16983 16979 "sdintr: arq done, sd_handle_auto_request_sense\n");
16984 16980
16985 16981 sd_handle_auto_request_sense(un, bp, xp, pktp);
16986 16982 goto exit;
16987 16983 }
16988 16984
16989 16985 /* Next see if this is the REQUEST SENSE pkt for the instance */
16990 16986 if (pktp->pkt_flags & FLAG_SENSING) {
16991 16987 /* This pktp is from the unit's REQUEST_SENSE command */
16992 16988 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
16993 16989 "sdintr: sd_handle_request_sense\n");
16994 16990 sd_handle_request_sense(un, bp, xp, pktp);
16995 16991 goto exit;
16996 16992 }
16997 16993
16998 16994 /*
16999 16995 * Check to see if the command successfully completed as requested;
17000 16996 * this is the most common case (and also the hot performance path).
17001 16997 *
17002 16998 * Requirements for successful completion are:
17003 16999 * pkt_reason is CMD_CMPLT and packet status is status good.
17004 17000 * In addition:
17005 17001 * - A residual of zero indicates successful completion no matter what
17006 17002 * the command is.
17007 17003 * - If the residual is not zero and the command is not a read or
17008 17004 * write, then it's still defined as successful completion. In other
17009 17005 * words, if the command is a read or write the residual must be
17010 17006 * zero for successful completion.
17011 17007 * - If the residual is not zero and the command is a read or
17012 17008 * write, and it's a USCSICMD, then it's still defined as
17013 17009 * successful completion.
17014 17010 */
17015 17011 if ((pktp->pkt_reason == CMD_CMPLT) &&
17016 17012 (SD_GET_PKT_STATUS(pktp) == STATUS_GOOD)) {
17017 17013
17018 17014 /*
17019 17015 * Since this command is returned with a good status, we
17020 17016 * can reset the count for Sonoma failover.
17021 17017 */
17022 17018 un->un_sonoma_failure_count = 0;
17023 17019
17024 17020 /*
17025 17021 * Return all USCSI commands on good status
17026 17022 */
17027 17023 if (pktp->pkt_resid == 0) {
17028 17024 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17029 17025 "sdintr: returning command for resid == 0\n");
17030 17026 } else if (((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_READ) &&
17031 17027 ((SD_GET_PKT_OPCODE(pktp) & 0x1F) != SCMD_WRITE)) {
17032 17028 SD_UPDATE_B_RESID(bp, pktp);
17033 17029 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17034 17030 "sdintr: returning command for resid != 0\n");
17035 17031 } else if (xp->xb_pkt_flags & SD_XB_USCSICMD) {
17036 17032 SD_UPDATE_B_RESID(bp, pktp);
17037 17033 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17038 17034 "sdintr: returning uscsi command\n");
17039 17035 } else {
17040 17036 goto not_successful;
17041 17037 }
17042 17038 sd_return_command(un, bp);
17043 17039
17044 17040 /*
17045 17041 * Decrement counter to indicate that the callback routine
17046 17042 * is done.
17047 17043 */
17048 17044 un->un_in_callback--;
17049 17045 ASSERT(un->un_in_callback >= 0);
17050 17046 mutex_exit(SD_MUTEX(un));
17051 17047
17052 17048 return;
17053 17049 }
17054 17050
17055 17051 not_successful:
17056 17052
17057 17053 #if (defined(__i386) || defined(__amd64)) /* DMAFREE for x86 only */
17058 17054 /*
17059 17055 * The following is based upon knowledge of the underlying transport
17060 17056 * and its use of DMA resources. This code should be removed when
17061 17057 * PKT_DMA_PARTIAL support is taken out of the disk driver in favor
17062 17058 * of the new PKT_CMD_BREAKUP protocol. See also sd_initpkt_for_buf()
17063 17059 * and sd_start_cmds().
17064 17060 *
17065 17061 * Free any DMA resources associated with this command if there
17066 17062 * is a chance it could be retried or enqueued for later retry.
17067 17063 * If we keep the DMA binding then mpxio cannot reissue the
17068 17064 * command on another path whenever a path failure occurs.
17069 17065 *
17070 17066 * Note that when PKT_DMA_PARTIAL is used, free/reallocation
17071 17067 * causes the *entire* transfer to start over again from the
17072 17068 * beginning of the request, even for PARTIAL chunks that
17073 17069 * have already transferred successfully.
17074 17070 *
17075 17071 * This is only done for non-uscsi commands (and also skipped for the
17076 17072 * driver's internal RQS command). Also just do this for Fibre Channel
17077 17073 * devices as these are the only ones that support mpxio.
17078 17074 */
17079 17075 if ((un->un_f_is_fibre == TRUE) &&
17080 17076 ((xp->xb_pkt_flags & SD_XB_USCSICMD) == 0) &&
17081 17077 ((pktp->pkt_flags & FLAG_SENSING) == 0)) {
17082 17078 scsi_dmafree(pktp);
17083 17079 xp->xb_pkt_flags |= SD_XB_DMA_FREED;
17084 17080 }
17085 17081 #endif
17086 17082
17087 17083 /*
17088 17084 * The command did not successfully complete as requested so check
17089 17085 * for FLAG_DIAGNOSE. If set this indicates a uscsi or internal
17090 17086 * driver command that should not be retried so just return. If
17091 17087 * FLAG_DIAGNOSE is not set the error will be processed below.
17092 17088 */
17093 17089 if ((pktp->pkt_flags & FLAG_DIAGNOSE) != 0) {
17094 17090 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17095 17091 "sdintr: FLAG_DIAGNOSE: sd_return_failed_command\n");
17096 17092 /*
17097 17093 * Issue a request sense if a check condition caused the error
17098 17094 * (we handle the auto request sense case above), otherwise
17099 17095 * just fail the command.
17100 17096 */
17101 17097 if ((pktp->pkt_reason == CMD_CMPLT) &&
17102 17098 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK)) {
17103 17099 sd_send_request_sense_command(un, bp, pktp);
17104 17100 } else {
17105 17101 sd_return_failed_command(un, bp, EIO);
17106 17102 }
17107 17103 goto exit;
17108 17104 }
17109 17105
17110 17106 /*
17111 17107 * The command did not successfully complete as requested so process
17112 17108 * the error, retry, and/or attempt recovery.
17113 17109 */
17114 17110 switch (pktp->pkt_reason) {
17115 17111 case CMD_CMPLT:
17116 17112 switch (SD_GET_PKT_STATUS(pktp)) {
17117 17113 case STATUS_GOOD:
17118 17114 /*
17119 17115 * The command completed successfully with a non-zero
17120 17116 * residual
17121 17117 */
17122 17118 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17123 17119 "sdintr: STATUS_GOOD \n");
17124 17120 sd_pkt_status_good(un, bp, xp, pktp);
17125 17121 break;
17126 17122
17127 17123 case STATUS_CHECK:
17128 17124 case STATUS_TERMINATED:
17129 17125 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17130 17126 "sdintr: STATUS_TERMINATED | STATUS_CHECK\n");
17131 17127 sd_pkt_status_check_condition(un, bp, xp, pktp);
17132 17128 break;
17133 17129
17134 17130 case STATUS_BUSY:
17135 17131 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17136 17132 "sdintr: STATUS_BUSY\n");
17137 17133 sd_pkt_status_busy(un, bp, xp, pktp);
17138 17134 break;
17139 17135
17140 17136 case STATUS_RESERVATION_CONFLICT:
17141 17137 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17142 17138 "sdintr: STATUS_RESERVATION_CONFLICT\n");
17143 17139 sd_pkt_status_reservation_conflict(un, bp, xp, pktp);
17144 17140 break;
17145 17141
17146 17142 case STATUS_QFULL:
17147 17143 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17148 17144 "sdintr: STATUS_QFULL\n");
17149 17145 sd_pkt_status_qfull(un, bp, xp, pktp);
17150 17146 break;
17151 17147
17152 17148 case STATUS_MET:
17153 17149 case STATUS_INTERMEDIATE:
17154 17150 case STATUS_SCSI2:
17155 17151 case STATUS_INTERMEDIATE_MET:
17156 17152 case STATUS_ACA_ACTIVE:
17157 17153 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17158 17154 "Unexpected SCSI status received: 0x%x\n",
17159 17155 SD_GET_PKT_STATUS(pktp));
17160 17156 /*
17161 17157 * Mark the ssc_flags when detected invalid status
17162 17158 * code for non-USCSI command.
17163 17159 */
17164 17160 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17165 17161 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS,
17166 17162 0, "stat-code");
17167 17163 }
17168 17164 sd_return_failed_command(un, bp, EIO);
17169 17165 break;
17170 17166
17171 17167 default:
17172 17168 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17173 17169 "Invalid SCSI status received: 0x%x\n",
17174 17170 SD_GET_PKT_STATUS(pktp));
17175 17171 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17176 17172 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_STATUS,
17177 17173 0, "stat-code");
17178 17174 }
17179 17175 sd_return_failed_command(un, bp, EIO);
17180 17176 break;
17181 17177
17182 17178 }
17183 17179 break;
17184 17180
17185 17181 case CMD_INCOMPLETE:
17186 17182 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17187 17183 "sdintr: CMD_INCOMPLETE\n");
17188 17184 sd_pkt_reason_cmd_incomplete(un, bp, xp, pktp);
17189 17185 break;
17190 17186 case CMD_TRAN_ERR:
17191 17187 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17192 17188 "sdintr: CMD_TRAN_ERR\n");
17193 17189 sd_pkt_reason_cmd_tran_err(un, bp, xp, pktp);
17194 17190 break;
17195 17191 case CMD_RESET:
17196 17192 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17197 17193 "sdintr: CMD_RESET \n");
17198 17194 sd_pkt_reason_cmd_reset(un, bp, xp, pktp);
17199 17195 break;
17200 17196 case CMD_ABORTED:
17201 17197 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17202 17198 "sdintr: CMD_ABORTED \n");
17203 17199 sd_pkt_reason_cmd_aborted(un, bp, xp, pktp);
17204 17200 break;
17205 17201 case CMD_TIMEOUT:
17206 17202 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17207 17203 "sdintr: CMD_TIMEOUT\n");
17208 17204 sd_pkt_reason_cmd_timeout(un, bp, xp, pktp);
17209 17205 break;
17210 17206 case CMD_UNX_BUS_FREE:
17211 17207 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17212 17208 "sdintr: CMD_UNX_BUS_FREE \n");
17213 17209 sd_pkt_reason_cmd_unx_bus_free(un, bp, xp, pktp);
17214 17210 break;
17215 17211 case CMD_TAG_REJECT:
17216 17212 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17217 17213 "sdintr: CMD_TAG_REJECT\n");
17218 17214 sd_pkt_reason_cmd_tag_reject(un, bp, xp, pktp);
17219 17215 break;
17220 17216 default:
17221 17217 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
17222 17218 "sdintr: default\n");
17223 17219 /*
17224 17220 * Mark the ssc_flags for detecting invliad pkt_reason.
17225 17221 */
17226 17222 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17227 17223 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_PKT_REASON,
17228 17224 0, "pkt-reason");
17229 17225 }
17230 17226 sd_pkt_reason_default(un, bp, xp, pktp);
17231 17227 break;
17232 17228 }
17233 17229
17234 17230 exit:
17235 17231 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sdintr: exit\n");
17236 17232
17237 17233 /* Decrement counter to indicate that the callback routine is done. */
17238 17234 un->un_in_callback--;
17239 17235 ASSERT(un->un_in_callback >= 0);
17240 17236
17241 17237 /*
17242 17238 * At this point, the pkt has been dispatched, ie, it is either
17243 17239 * being re-tried or has been returned to its caller and should
17244 17240 * not be referenced.
17245 17241 */
17246 17242
17247 17243 mutex_exit(SD_MUTEX(un));
17248 17244 }
17249 17245
17250 17246
17251 17247 /*
17252 17248 * Function: sd_print_incomplete_msg
17253 17249 *
17254 17250 * Description: Prints the error message for a CMD_INCOMPLETE error.
17255 17251 *
17256 17252 * Arguments: un - ptr to associated softstate for the device.
17257 17253 * bp - ptr to the buf(9S) for the command.
17258 17254 * arg - message string ptr
17259 17255 * code - SD_DELAYED_RETRY_ISSUED, SD_IMMEDIATE_RETRY_ISSUED,
17260 17256 * or SD_NO_RETRY_ISSUED.
17261 17257 *
17262 17258 * Context: May be called under interrupt context
17263 17259 */
17264 17260
17265 17261 static void
17266 17262 sd_print_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg, int code)
17267 17263 {
17268 17264 struct scsi_pkt *pktp;
17269 17265 char *msgp;
17270 17266 char *cmdp = arg;
17271 17267
17272 17268 ASSERT(un != NULL);
17273 17269 ASSERT(mutex_owned(SD_MUTEX(un)));
17274 17270 ASSERT(bp != NULL);
17275 17271 ASSERT(arg != NULL);
17276 17272 pktp = SD_GET_PKTP(bp);
17277 17273 ASSERT(pktp != NULL);
17278 17274
17279 17275 switch (code) {
17280 17276 case SD_DELAYED_RETRY_ISSUED:
17281 17277 case SD_IMMEDIATE_RETRY_ISSUED:
17282 17278 msgp = "retrying";
17283 17279 break;
17284 17280 case SD_NO_RETRY_ISSUED:
17285 17281 default:
17286 17282 msgp = "giving up";
17287 17283 break;
17288 17284 }
17289 17285
17290 17286 if ((pktp->pkt_flags & FLAG_SILENT) == 0) {
17291 17287 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17292 17288 "incomplete %s- %s\n", cmdp, msgp);
17293 17289 }
17294 17290 }
17295 17291
17296 17292
17297 17293
17298 17294 /*
17299 17295 * Function: sd_pkt_status_good
17300 17296 *
17301 17297 * Description: Processing for a STATUS_GOOD code in pkt_status.
17302 17298 *
17303 17299 * Context: May be called under interrupt context
17304 17300 */
17305 17301
17306 17302 static void
17307 17303 sd_pkt_status_good(struct sd_lun *un, struct buf *bp,
17308 17304 struct sd_xbuf *xp, struct scsi_pkt *pktp)
17309 17305 {
17310 17306 char *cmdp;
17311 17307
17312 17308 ASSERT(un != NULL);
17313 17309 ASSERT(mutex_owned(SD_MUTEX(un)));
17314 17310 ASSERT(bp != NULL);
17315 17311 ASSERT(xp != NULL);
17316 17312 ASSERT(pktp != NULL);
17317 17313 ASSERT(pktp->pkt_reason == CMD_CMPLT);
17318 17314 ASSERT(SD_GET_PKT_STATUS(pktp) == STATUS_GOOD);
17319 17315 ASSERT(pktp->pkt_resid != 0);
17320 17316
17321 17317 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: entry\n");
17322 17318
17323 17319 SD_UPDATE_ERRSTATS(un, sd_harderrs);
17324 17320 switch (SD_GET_PKT_OPCODE(pktp) & 0x1F) {
17325 17321 case SCMD_READ:
17326 17322 cmdp = "read";
17327 17323 break;
17328 17324 case SCMD_WRITE:
17329 17325 cmdp = "write";
17330 17326 break;
17331 17327 default:
17332 17328 SD_UPDATE_B_RESID(bp, pktp);
17333 17329 sd_return_command(un, bp);
17334 17330 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n");
17335 17331 return;
17336 17332 }
17337 17333
17338 17334 /*
17339 17335 * See if we can retry the read/write, preferrably immediately.
17340 17336 * If retries are exhaused, then sd_retry_command() will update
17341 17337 * the b_resid count.
17342 17338 */
17343 17339 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_incomplete_msg,
17344 17340 cmdp, EIO, (clock_t)0, NULL);
17345 17341
17346 17342 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_good: exit\n");
17347 17343 }
17348 17344
17349 17345
17350 17346
17351 17347
17352 17348
17353 17349 /*
17354 17350 * Function: sd_handle_request_sense
17355 17351 *
17356 17352 * Description: Processing for non-auto Request Sense command.
17357 17353 *
17358 17354 * Arguments: un - ptr to associated softstate
17359 17355 * sense_bp - ptr to buf(9S) for the RQS command
17360 17356 * sense_xp - ptr to the sd_xbuf for the RQS command
17361 17357 * sense_pktp - ptr to the scsi_pkt(9S) for the RQS command
17362 17358 *
17363 17359 * Context: May be called under interrupt context
17364 17360 */
17365 17361
17366 17362 static void
17367 17363 sd_handle_request_sense(struct sd_lun *un, struct buf *sense_bp,
17368 17364 struct sd_xbuf *sense_xp, struct scsi_pkt *sense_pktp)
17369 17365 {
17370 17366 struct buf *cmd_bp; /* buf for the original command */
17371 17367 struct sd_xbuf *cmd_xp; /* sd_xbuf for the original command */
17372 17368 struct scsi_pkt *cmd_pktp; /* pkt for the original command */
17373 17369 size_t actual_len; /* actual sense data length */
17374 17370
17375 17371 ASSERT(un != NULL);
17376 17372 ASSERT(mutex_owned(SD_MUTEX(un)));
17377 17373 ASSERT(sense_bp != NULL);
17378 17374 ASSERT(sense_xp != NULL);
17379 17375 ASSERT(sense_pktp != NULL);
17380 17376
17381 17377 /*
17382 17378 * Note the sense_bp, sense_xp, and sense_pktp here are for the
17383 17379 * RQS command and not the original command.
17384 17380 */
17385 17381 ASSERT(sense_pktp == un->un_rqs_pktp);
17386 17382 ASSERT(sense_bp == un->un_rqs_bp);
17387 17383 ASSERT((sense_pktp->pkt_flags & (FLAG_SENSING | FLAG_HEAD)) ==
17388 17384 (FLAG_SENSING | FLAG_HEAD));
17389 17385 ASSERT((((SD_GET_XBUF(sense_xp->xb_sense_bp))->xb_pktp->pkt_flags) &
17390 17386 FLAG_SENSING) == FLAG_SENSING);
17391 17387
17392 17388 /* These are the bp, xp, and pktp for the original command */
17393 17389 cmd_bp = sense_xp->xb_sense_bp;
17394 17390 cmd_xp = SD_GET_XBUF(cmd_bp);
17395 17391 cmd_pktp = SD_GET_PKTP(cmd_bp);
17396 17392
17397 17393 if (sense_pktp->pkt_reason != CMD_CMPLT) {
17398 17394 /*
17399 17395 * The REQUEST SENSE command failed. Release the REQUEST
17400 17396 * SENSE command for re-use, get back the bp for the original
17401 17397 * command, and attempt to re-try the original command if
17402 17398 * FLAG_DIAGNOSE is not set in the original packet.
17403 17399 */
17404 17400 SD_UPDATE_ERRSTATS(un, sd_harderrs);
17405 17401 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) {
17406 17402 cmd_bp = sd_mark_rqs_idle(un, sense_xp);
17407 17403 sd_retry_command(un, cmd_bp, SD_RETRIES_STANDARD,
17408 17404 NULL, NULL, EIO, (clock_t)0, NULL);
17409 17405 return;
17410 17406 }
17411 17407 }
17412 17408
17413 17409 /*
17414 17410 * Save the relevant sense info into the xp for the original cmd.
17415 17411 *
17416 17412 * Note: if the request sense failed the state info will be zero
17417 17413 * as set in sd_mark_rqs_busy()
17418 17414 */
17419 17415 cmd_xp->xb_sense_status = *(sense_pktp->pkt_scbp);
17420 17416 cmd_xp->xb_sense_state = sense_pktp->pkt_state;
17421 17417 actual_len = MAX_SENSE_LENGTH - sense_pktp->pkt_resid;
17422 17418 if ((cmd_xp->xb_pkt_flags & SD_XB_USCSICMD) &&
17423 17419 (((struct uscsi_cmd *)cmd_xp->xb_pktinfo)->uscsi_rqlen >
17424 17420 SENSE_LENGTH)) {
17425 17421 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data,
17426 17422 MAX_SENSE_LENGTH);
17427 17423 cmd_xp->xb_sense_resid = sense_pktp->pkt_resid;
17428 17424 } else {
17429 17425 bcopy(sense_bp->b_un.b_addr, cmd_xp->xb_sense_data,
17430 17426 SENSE_LENGTH);
17431 17427 if (actual_len < SENSE_LENGTH) {
17432 17428 cmd_xp->xb_sense_resid = SENSE_LENGTH - actual_len;
17433 17429 } else {
17434 17430 cmd_xp->xb_sense_resid = 0;
17435 17431 }
17436 17432 }
17437 17433
17438 17434 /*
17439 17435 * Free up the RQS command....
17440 17436 * NOTE:
17441 17437 * Must do this BEFORE calling sd_validate_sense_data!
17442 17438 * sd_validate_sense_data may return the original command in
17443 17439 * which case the pkt will be freed and the flags can no
17444 17440 * longer be touched.
17445 17441 * SD_MUTEX is held through this process until the command
17446 17442 * is dispatched based upon the sense data, so there are
17447 17443 * no race conditions.
17448 17444 */
17449 17445 (void) sd_mark_rqs_idle(un, sense_xp);
17450 17446
17451 17447 /*
17452 17448 * For a retryable command see if we have valid sense data, if so then
17453 17449 * turn it over to sd_decode_sense() to figure out the right course of
17454 17450 * action. Just fail a non-retryable command.
17455 17451 */
17456 17452 if ((cmd_pktp->pkt_flags & FLAG_DIAGNOSE) == 0) {
17457 17453 if (sd_validate_sense_data(un, cmd_bp, cmd_xp, actual_len) ==
17458 17454 SD_SENSE_DATA_IS_VALID) {
17459 17455 sd_decode_sense(un, cmd_bp, cmd_xp, cmd_pktp);
17460 17456 }
17461 17457 } else {
17462 17458 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Failed CDB",
17463 17459 (uchar_t *)cmd_pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX);
17464 17460 SD_DUMP_MEMORY(un, SD_LOG_IO_CORE, "Sense Data",
17465 17461 (uchar_t *)cmd_xp->xb_sense_data, SENSE_LENGTH, SD_LOG_HEX);
17466 17462 sd_return_failed_command(un, cmd_bp, EIO);
17467 17463 }
17468 17464 }
17469 17465
17470 17466
17471 17467
17472 17468
17473 17469 /*
17474 17470 * Function: sd_handle_auto_request_sense
17475 17471 *
17476 17472 * Description: Processing for auto-request sense information.
17477 17473 *
17478 17474 * Arguments: un - ptr to associated softstate
17479 17475 * bp - ptr to buf(9S) for the command
17480 17476 * xp - ptr to the sd_xbuf for the command
17481 17477 * pktp - ptr to the scsi_pkt(9S) for the command
17482 17478 *
17483 17479 * Context: May be called under interrupt context
17484 17480 */
17485 17481
17486 17482 static void
17487 17483 sd_handle_auto_request_sense(struct sd_lun *un, struct buf *bp,
17488 17484 struct sd_xbuf *xp, struct scsi_pkt *pktp)
17489 17485 {
17490 17486 struct scsi_arq_status *asp;
17491 17487 size_t actual_len;
17492 17488
17493 17489 ASSERT(un != NULL);
17494 17490 ASSERT(mutex_owned(SD_MUTEX(un)));
17495 17491 ASSERT(bp != NULL);
17496 17492 ASSERT(xp != NULL);
17497 17493 ASSERT(pktp != NULL);
17498 17494 ASSERT(pktp != un->un_rqs_pktp);
17499 17495 ASSERT(bp != un->un_rqs_bp);
17500 17496
17501 17497 /*
17502 17498 * For auto-request sense, we get a scsi_arq_status back from
17503 17499 * the HBA, with the sense data in the sts_sensedata member.
17504 17500 * The pkt_scbp of the packet points to this scsi_arq_status.
17505 17501 */
17506 17502 asp = (struct scsi_arq_status *)(pktp->pkt_scbp);
17507 17503
17508 17504 if (asp->sts_rqpkt_reason != CMD_CMPLT) {
17509 17505 /*
17510 17506 * The auto REQUEST SENSE failed; see if we can re-try
17511 17507 * the original command.
17512 17508 */
17513 17509 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17514 17510 "auto request sense failed (reason=%s)\n",
17515 17511 scsi_rname(asp->sts_rqpkt_reason));
17516 17512
17517 17513 sd_reset_target(un, pktp);
17518 17514
17519 17515 sd_retry_command(un, bp, SD_RETRIES_STANDARD,
17520 17516 NULL, NULL, EIO, (clock_t)0, NULL);
17521 17517 return;
17522 17518 }
17523 17519
17524 17520 /* Save the relevant sense info into the xp for the original cmd. */
17525 17521 xp->xb_sense_status = *((uchar_t *)(&(asp->sts_rqpkt_status)));
17526 17522 xp->xb_sense_state = asp->sts_rqpkt_state;
17527 17523 xp->xb_sense_resid = asp->sts_rqpkt_resid;
17528 17524 if (xp->xb_sense_state & STATE_XARQ_DONE) {
17529 17525 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid;
17530 17526 bcopy(&asp->sts_sensedata, xp->xb_sense_data,
17531 17527 MAX_SENSE_LENGTH);
17532 17528 } else {
17533 17529 if (xp->xb_sense_resid > SENSE_LENGTH) {
17534 17530 actual_len = MAX_SENSE_LENGTH - xp->xb_sense_resid;
17535 17531 } else {
17536 17532 actual_len = SENSE_LENGTH - xp->xb_sense_resid;
17537 17533 }
17538 17534 if (xp->xb_pkt_flags & SD_XB_USCSICMD) {
17539 17535 if ((((struct uscsi_cmd *)
17540 17536 (xp->xb_pktinfo))->uscsi_rqlen) > actual_len) {
17541 17537 xp->xb_sense_resid = (((struct uscsi_cmd *)
17542 17538 (xp->xb_pktinfo))->uscsi_rqlen) -
17543 17539 actual_len;
17544 17540 } else {
17545 17541 xp->xb_sense_resid = 0;
17546 17542 }
17547 17543 }
17548 17544 bcopy(&asp->sts_sensedata, xp->xb_sense_data, SENSE_LENGTH);
17549 17545 }
17550 17546
17551 17547 /*
17552 17548 * See if we have valid sense data, if so then turn it over to
17553 17549 * sd_decode_sense() to figure out the right course of action.
17554 17550 */
17555 17551 if (sd_validate_sense_data(un, bp, xp, actual_len) ==
17556 17552 SD_SENSE_DATA_IS_VALID) {
17557 17553 sd_decode_sense(un, bp, xp, pktp);
17558 17554 }
17559 17555 }
17560 17556
17561 17557
17562 17558 /*
17563 17559 * Function: sd_print_sense_failed_msg
17564 17560 *
17565 17561 * Description: Print log message when RQS has failed.
17566 17562 *
17567 17563 * Arguments: un - ptr to associated softstate
17568 17564 * bp - ptr to buf(9S) for the command
17569 17565 * arg - generic message string ptr
17570 17566 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
17571 17567 * or SD_NO_RETRY_ISSUED
17572 17568 *
17573 17569 * Context: May be called from interrupt context
17574 17570 */
17575 17571
17576 17572 static void
17577 17573 sd_print_sense_failed_msg(struct sd_lun *un, struct buf *bp, void *arg,
17578 17574 int code)
17579 17575 {
17580 17576 char *msgp = arg;
17581 17577
17582 17578 ASSERT(un != NULL);
17583 17579 ASSERT(mutex_owned(SD_MUTEX(un)));
17584 17580 ASSERT(bp != NULL);
17585 17581
17586 17582 if ((code == SD_NO_RETRY_ISSUED) && (msgp != NULL)) {
17587 17583 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, msgp);
17588 17584 }
17589 17585 }
17590 17586
17591 17587
17592 17588 /*
17593 17589 * Function: sd_validate_sense_data
17594 17590 *
17595 17591 * Description: Check the given sense data for validity.
17596 17592 * If the sense data is not valid, the command will
17597 17593 * be either failed or retried!
17598 17594 *
17599 17595 * Return Code: SD_SENSE_DATA_IS_INVALID
17600 17596 * SD_SENSE_DATA_IS_VALID
17601 17597 *
17602 17598 * Context: May be called from interrupt context
17603 17599 */
17604 17600
17605 17601 static int
17606 17602 sd_validate_sense_data(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
17607 17603 size_t actual_len)
17608 17604 {
17609 17605 struct scsi_extended_sense *esp;
17610 17606 struct scsi_pkt *pktp;
17611 17607 char *msgp = NULL;
17612 17608 sd_ssc_t *sscp;
17613 17609
17614 17610 ASSERT(un != NULL);
17615 17611 ASSERT(mutex_owned(SD_MUTEX(un)));
17616 17612 ASSERT(bp != NULL);
17617 17613 ASSERT(bp != un->un_rqs_bp);
17618 17614 ASSERT(xp != NULL);
17619 17615 ASSERT(un->un_fm_private != NULL);
17620 17616
17621 17617 pktp = SD_GET_PKTP(bp);
17622 17618 ASSERT(pktp != NULL);
17623 17619
17624 17620 sscp = &((struct sd_fm_internal *)(un->un_fm_private))->fm_ssc;
17625 17621 ASSERT(sscp != NULL);
17626 17622
17627 17623 /*
17628 17624 * Check the status of the RQS command (auto or manual).
17629 17625 */
17630 17626 switch (xp->xb_sense_status & STATUS_MASK) {
17631 17627 case STATUS_GOOD:
17632 17628 break;
17633 17629
17634 17630 case STATUS_RESERVATION_CONFLICT:
17635 17631 sd_pkt_status_reservation_conflict(un, bp, xp, pktp);
17636 17632 return (SD_SENSE_DATA_IS_INVALID);
17637 17633
17638 17634 case STATUS_BUSY:
17639 17635 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17640 17636 "Busy Status on REQUEST SENSE\n");
17641 17637 sd_retry_command(un, bp, SD_RETRIES_BUSY, NULL,
17642 17638 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter);
17643 17639 return (SD_SENSE_DATA_IS_INVALID);
17644 17640
17645 17641 case STATUS_QFULL:
17646 17642 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
17647 17643 "QFULL Status on REQUEST SENSE\n");
17648 17644 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL,
17649 17645 NULL, EIO, un->un_busy_timeout / 500, kstat_waitq_enter);
17650 17646 return (SD_SENSE_DATA_IS_INVALID);
17651 17647
17652 17648 case STATUS_CHECK:
17653 17649 case STATUS_TERMINATED:
17654 17650 msgp = "Check Condition on REQUEST SENSE\n";
17655 17651 goto sense_failed;
17656 17652
17657 17653 default:
17658 17654 msgp = "Not STATUS_GOOD on REQUEST_SENSE\n";
17659 17655 goto sense_failed;
17660 17656 }
17661 17657
17662 17658 /*
17663 17659 * See if we got the minimum required amount of sense data.
17664 17660 * Note: We are assuming the returned sense data is SENSE_LENGTH bytes
17665 17661 * or less.
17666 17662 */
17667 17663 if (((xp->xb_sense_state & STATE_XFERRED_DATA) == 0) ||
17668 17664 (actual_len == 0)) {
17669 17665 msgp = "Request Sense couldn't get sense data\n";
17670 17666 goto sense_failed;
17671 17667 }
17672 17668
17673 17669 if (actual_len < SUN_MIN_SENSE_LENGTH) {
17674 17670 msgp = "Not enough sense information\n";
17675 17671 /* Mark the ssc_flags for detecting invalid sense data */
17676 17672 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17677 17673 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0,
17678 17674 "sense-data");
17679 17675 }
17680 17676 goto sense_failed;
17681 17677 }
17682 17678
17683 17679 /*
17684 17680 * We require the extended sense data
17685 17681 */
17686 17682 esp = (struct scsi_extended_sense *)xp->xb_sense_data;
17687 17683 if (esp->es_class != CLASS_EXTENDED_SENSE) {
17688 17684 if ((pktp->pkt_flags & FLAG_SILENT) == 0) {
17689 17685 static char tmp[8];
17690 17686 static char buf[148];
17691 17687 char *p = (char *)(xp->xb_sense_data);
17692 17688 int i;
17693 17689
17694 17690 mutex_enter(&sd_sense_mutex);
17695 17691 (void) strcpy(buf, "undecodable sense information:");
17696 17692 for (i = 0; i < actual_len; i++) {
17697 17693 (void) sprintf(tmp, " 0x%x", *(p++)&0xff);
17698 17694 (void) strcpy(&buf[strlen(buf)], tmp);
17699 17695 }
17700 17696 i = strlen(buf);
17701 17697 (void) strcpy(&buf[i], "-(assumed fatal)\n");
17702 17698
17703 17699 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) {
17704 17700 scsi_log(SD_DEVINFO(un), sd_label,
17705 17701 CE_WARN, buf);
17706 17702 }
17707 17703 mutex_exit(&sd_sense_mutex);
17708 17704 }
17709 17705
17710 17706 /* Mark the ssc_flags for detecting invalid sense data */
17711 17707 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17712 17708 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0,
17713 17709 "sense-data");
17714 17710 }
17715 17711
17716 17712 /* Note: Legacy behavior, fail the command with no retry */
17717 17713 sd_return_failed_command(un, bp, EIO);
17718 17714 return (SD_SENSE_DATA_IS_INVALID);
17719 17715 }
17720 17716
17721 17717 /*
17722 17718 * Check that es_code is valid (es_class concatenated with es_code
17723 17719 * make up the "response code" field. es_class will always be 7, so
17724 17720 * make sure es_code is 0, 1, 2, 3 or 0xf. es_code will indicate the
17725 17721 * format.
17726 17722 */
17727 17723 if ((esp->es_code != CODE_FMT_FIXED_CURRENT) &&
17728 17724 (esp->es_code != CODE_FMT_FIXED_DEFERRED) &&
17729 17725 (esp->es_code != CODE_FMT_DESCR_CURRENT) &&
17730 17726 (esp->es_code != CODE_FMT_DESCR_DEFERRED) &&
17731 17727 (esp->es_code != CODE_FMT_VENDOR_SPECIFIC)) {
17732 17728 /* Mark the ssc_flags for detecting invalid sense data */
17733 17729 if (!(xp->xb_pkt_flags & SD_XB_USCSICMD)) {
17734 17730 sd_ssc_set_info(sscp, SSC_FLAGS_INVALID_SENSE, 0,
17735 17731 "sense-data");
17736 17732 }
17737 17733 goto sense_failed;
17738 17734 }
17739 17735
17740 17736 return (SD_SENSE_DATA_IS_VALID);
17741 17737
17742 17738 sense_failed:
17743 17739 /*
17744 17740 * If the request sense failed (for whatever reason), attempt
17745 17741 * to retry the original command.
17746 17742 */
17747 17743 #if defined(__i386) || defined(__amd64)
17748 17744 /*
17749 17745 * SD_RETRY_DELAY is conditionally compile (#if fibre) in
17750 17746 * sddef.h for Sparc platform, and x86 uses 1 binary
17751 17747 * for both SCSI/FC.
17752 17748 * The SD_RETRY_DELAY value need to be adjusted here
17753 17749 * when SD_RETRY_DELAY change in sddef.h
17754 17750 */
17755 17751 sd_retry_command(un, bp, SD_RETRIES_STANDARD,
17756 17752 sd_print_sense_failed_msg, msgp, EIO,
17757 17753 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0, NULL);
17758 17754 #else
17759 17755 sd_retry_command(un, bp, SD_RETRIES_STANDARD,
17760 17756 sd_print_sense_failed_msg, msgp, EIO, SD_RETRY_DELAY, NULL);
17761 17757 #endif
17762 17758
17763 17759 return (SD_SENSE_DATA_IS_INVALID);
17764 17760 }
17765 17761
17766 17762 /*
17767 17763 * Function: sd_decode_sense
17768 17764 *
17769 17765 * Description: Take recovery action(s) when SCSI Sense Data is received.
17770 17766 *
17771 17767 * Context: Interrupt context.
17772 17768 */
17773 17769
17774 17770 static void
17775 17771 sd_decode_sense(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
17776 17772 struct scsi_pkt *pktp)
17777 17773 {
17778 17774 uint8_t sense_key;
17779 17775
17780 17776 ASSERT(un != NULL);
17781 17777 ASSERT(mutex_owned(SD_MUTEX(un)));
17782 17778 ASSERT(bp != NULL);
17783 17779 ASSERT(bp != un->un_rqs_bp);
17784 17780 ASSERT(xp != NULL);
17785 17781 ASSERT(pktp != NULL);
17786 17782
17787 17783 sense_key = scsi_sense_key(xp->xb_sense_data);
17788 17784
17789 17785 switch (sense_key) {
17790 17786 case KEY_NO_SENSE:
17791 17787 sd_sense_key_no_sense(un, bp, xp, pktp);
17792 17788 break;
17793 17789 case KEY_RECOVERABLE_ERROR:
17794 17790 sd_sense_key_recoverable_error(un, xp->xb_sense_data,
17795 17791 bp, xp, pktp);
17796 17792 break;
17797 17793 case KEY_NOT_READY:
17798 17794 sd_sense_key_not_ready(un, xp->xb_sense_data,
17799 17795 bp, xp, pktp);
17800 17796 break;
17801 17797 case KEY_MEDIUM_ERROR:
17802 17798 case KEY_HARDWARE_ERROR:
17803 17799 sd_sense_key_medium_or_hardware_error(un,
17804 17800 xp->xb_sense_data, bp, xp, pktp);
17805 17801 break;
17806 17802 case KEY_ILLEGAL_REQUEST:
17807 17803 sd_sense_key_illegal_request(un, bp, xp, pktp);
17808 17804 break;
17809 17805 case KEY_UNIT_ATTENTION:
17810 17806 sd_sense_key_unit_attention(un, xp->xb_sense_data,
17811 17807 bp, xp, pktp);
17812 17808 break;
17813 17809 case KEY_WRITE_PROTECT:
17814 17810 case KEY_VOLUME_OVERFLOW:
17815 17811 case KEY_MISCOMPARE:
17816 17812 sd_sense_key_fail_command(un, bp, xp, pktp);
17817 17813 break;
17818 17814 case KEY_BLANK_CHECK:
17819 17815 sd_sense_key_blank_check(un, bp, xp, pktp);
17820 17816 break;
17821 17817 case KEY_ABORTED_COMMAND:
17822 17818 sd_sense_key_aborted_command(un, bp, xp, pktp);
17823 17819 break;
17824 17820 case KEY_VENDOR_UNIQUE:
17825 17821 case KEY_COPY_ABORTED:
17826 17822 case KEY_EQUAL:
17827 17823 case KEY_RESERVED:
17828 17824 default:
17829 17825 sd_sense_key_default(un, xp->xb_sense_data,
17830 17826 bp, xp, pktp);
17831 17827 break;
17832 17828 }
17833 17829 }
17834 17830
17835 17831
17836 17832 /*
17837 17833 * Function: sd_dump_memory
17838 17834 *
17839 17835 * Description: Debug logging routine to print the contents of a user provided
17840 17836 * buffer. The output of the buffer is broken up into 256 byte
17841 17837 * segments due to a size constraint of the scsi_log.
17842 17838 * implementation.
17843 17839 *
17844 17840 * Arguments: un - ptr to softstate
17845 17841 * comp - component mask
17846 17842 * title - "title" string to preceed data when printed
17847 17843 * data - ptr to data block to be printed
17848 17844 * len - size of data block to be printed
17849 17845 * fmt - SD_LOG_HEX (use 0x%02x format) or SD_LOG_CHAR (use %c)
17850 17846 *
17851 17847 * Context: May be called from interrupt context
17852 17848 */
17853 17849
17854 17850 #define SD_DUMP_MEMORY_BUF_SIZE 256
17855 17851
17856 17852 static char *sd_dump_format_string[] = {
17857 17853 " 0x%02x",
17858 17854 " %c"
17859 17855 };
17860 17856
17861 17857 static void
17862 17858 sd_dump_memory(struct sd_lun *un, uint_t comp, char *title, uchar_t *data,
17863 17859 int len, int fmt)
17864 17860 {
17865 17861 int i, j;
17866 17862 int avail_count;
17867 17863 int start_offset;
17868 17864 int end_offset;
17869 17865 size_t entry_len;
17870 17866 char *bufp;
17871 17867 char *local_buf;
17872 17868 char *format_string;
17873 17869
17874 17870 ASSERT((fmt == SD_LOG_HEX) || (fmt == SD_LOG_CHAR));
17875 17871
17876 17872 /*
17877 17873 * In the debug version of the driver, this function is called from a
17878 17874 * number of places which are NOPs in the release driver.
17879 17875 * The debug driver therefore has additional methods of filtering
17880 17876 * debug output.
17881 17877 */
17882 17878 #ifdef SDDEBUG
17883 17879 /*
17884 17880 * In the debug version of the driver we can reduce the amount of debug
17885 17881 * messages by setting sd_error_level to something other than
17886 17882 * SCSI_ERR_ALL and clearing bits in sd_level_mask and
17887 17883 * sd_component_mask.
17888 17884 */
17889 17885 if (((sd_level_mask & (SD_LOGMASK_DUMP_MEM | SD_LOGMASK_DIAG)) == 0) ||
17890 17886 (sd_error_level != SCSI_ERR_ALL)) {
17891 17887 return;
17892 17888 }
17893 17889 if (((sd_component_mask & comp) == 0) ||
17894 17890 (sd_error_level != SCSI_ERR_ALL)) {
17895 17891 return;
17896 17892 }
17897 17893 #else
17898 17894 if (sd_error_level != SCSI_ERR_ALL) {
17899 17895 return;
17900 17896 }
17901 17897 #endif
17902 17898
17903 17899 local_buf = kmem_zalloc(SD_DUMP_MEMORY_BUF_SIZE, KM_SLEEP);
17904 17900 bufp = local_buf;
17905 17901 /*
17906 17902 * Available length is the length of local_buf[], minus the
17907 17903 * length of the title string, minus one for the ":", minus
17908 17904 * one for the newline, minus one for the NULL terminator.
17909 17905 * This gives the #bytes available for holding the printed
17910 17906 * values from the given data buffer.
17911 17907 */
17912 17908 if (fmt == SD_LOG_HEX) {
17913 17909 format_string = sd_dump_format_string[0];
17914 17910 } else /* SD_LOG_CHAR */ {
17915 17911 format_string = sd_dump_format_string[1];
17916 17912 }
17917 17913 /*
17918 17914 * Available count is the number of elements from the given
17919 17915 * data buffer that we can fit into the available length.
17920 17916 * This is based upon the size of the format string used.
17921 17917 * Make one entry and find it's size.
17922 17918 */
17923 17919 (void) sprintf(bufp, format_string, data[0]);
17924 17920 entry_len = strlen(bufp);
17925 17921 avail_count = (SD_DUMP_MEMORY_BUF_SIZE - strlen(title) - 3) / entry_len;
17926 17922
17927 17923 j = 0;
17928 17924 while (j < len) {
17929 17925 bufp = local_buf;
17930 17926 bzero(bufp, SD_DUMP_MEMORY_BUF_SIZE);
17931 17927 start_offset = j;
17932 17928
17933 17929 end_offset = start_offset + avail_count;
17934 17930
17935 17931 (void) sprintf(bufp, "%s:", title);
17936 17932 bufp += strlen(bufp);
17937 17933 for (i = start_offset; ((i < end_offset) && (j < len));
17938 17934 i++, j++) {
17939 17935 (void) sprintf(bufp, format_string, data[i]);
17940 17936 bufp += entry_len;
17941 17937 }
17942 17938 (void) sprintf(bufp, "\n");
17943 17939
17944 17940 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE, "%s", local_buf);
17945 17941 }
17946 17942 kmem_free(local_buf, SD_DUMP_MEMORY_BUF_SIZE);
17947 17943 }
17948 17944
17949 17945 /*
17950 17946 * Function: sd_print_sense_msg
17951 17947 *
17952 17948 * Description: Log a message based upon the given sense data.
17953 17949 *
17954 17950 * Arguments: un - ptr to associated softstate
17955 17951 * bp - ptr to buf(9S) for the command
17956 17952 * arg - ptr to associate sd_sense_info struct
17957 17953 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
17958 17954 * or SD_NO_RETRY_ISSUED
17959 17955 *
17960 17956 * Context: May be called from interrupt context
17961 17957 */
17962 17958
17963 17959 static void
17964 17960 sd_print_sense_msg(struct sd_lun *un, struct buf *bp, void *arg, int code)
17965 17961 {
17966 17962 struct sd_xbuf *xp;
17967 17963 struct scsi_pkt *pktp;
17968 17964 uint8_t *sensep;
17969 17965 daddr_t request_blkno;
17970 17966 diskaddr_t err_blkno;
17971 17967 int severity;
17972 17968 int pfa_flag;
17973 17969 extern struct scsi_key_strings scsi_cmds[];
17974 17970
17975 17971 ASSERT(un != NULL);
17976 17972 ASSERT(mutex_owned(SD_MUTEX(un)));
17977 17973 ASSERT(bp != NULL);
17978 17974 xp = SD_GET_XBUF(bp);
17979 17975 ASSERT(xp != NULL);
17980 17976 pktp = SD_GET_PKTP(bp);
17981 17977 ASSERT(pktp != NULL);
17982 17978 ASSERT(arg != NULL);
17983 17979
17984 17980 severity = ((struct sd_sense_info *)(arg))->ssi_severity;
17985 17981 pfa_flag = ((struct sd_sense_info *)(arg))->ssi_pfa_flag;
17986 17982
17987 17983 if ((code == SD_DELAYED_RETRY_ISSUED) ||
17988 17984 (code == SD_IMMEDIATE_RETRY_ISSUED)) {
17989 17985 severity = SCSI_ERR_RETRYABLE;
17990 17986 }
17991 17987
17992 17988 /* Use absolute block number for the request block number */
17993 17989 request_blkno = xp->xb_blkno;
17994 17990
17995 17991 /*
17996 17992 * Now try to get the error block number from the sense data
17997 17993 */
17998 17994 sensep = xp->xb_sense_data;
17999 17995
18000 17996 if (scsi_sense_info_uint64(sensep, SENSE_LENGTH,
18001 17997 (uint64_t *)&err_blkno)) {
18002 17998 /*
18003 17999 * We retrieved the error block number from the information
18004 18000 * portion of the sense data.
18005 18001 *
18006 18002 * For USCSI commands we are better off using the error
18007 18003 * block no. as the requested block no. (This is the best
18008 18004 * we can estimate.)
18009 18005 */
18010 18006 if ((SD_IS_BUFIO(xp) == FALSE) &&
18011 18007 ((pktp->pkt_flags & FLAG_SILENT) == 0)) {
18012 18008 request_blkno = err_blkno;
18013 18009 }
18014 18010 } else {
18015 18011 /*
18016 18012 * Without the es_valid bit set (for fixed format) or an
18017 18013 * information descriptor (for descriptor format) we cannot
18018 18014 * be certain of the error blkno, so just use the
18019 18015 * request_blkno.
18020 18016 */
18021 18017 err_blkno = (diskaddr_t)request_blkno;
18022 18018 }
18023 18019
18024 18020 /*
18025 18021 * The following will log the buffer contents for the release driver
18026 18022 * if the SD_LOGMASK_DIAG bit of sd_level_mask is set, or the error
18027 18023 * level is set to verbose.
18028 18024 */
18029 18025 sd_dump_memory(un, SD_LOG_IO, "Failed CDB",
18030 18026 (uchar_t *)pktp->pkt_cdbp, CDB_SIZE, SD_LOG_HEX);
18031 18027 sd_dump_memory(un, SD_LOG_IO, "Sense Data",
18032 18028 (uchar_t *)sensep, SENSE_LENGTH, SD_LOG_HEX);
18033 18029
18034 18030 if (pfa_flag == FALSE) {
18035 18031 /* This is normally only set for USCSI */
18036 18032 if ((pktp->pkt_flags & FLAG_SILENT) != 0) {
18037 18033 return;
18038 18034 }
18039 18035
18040 18036 if ((SD_IS_BUFIO(xp) == TRUE) &&
18041 18037 (((sd_level_mask & SD_LOGMASK_DIAG) == 0) &&
18042 18038 (severity < sd_error_level))) {
18043 18039 return;
18044 18040 }
18045 18041 }
18046 18042 /*
18047 18043 * Check for Sonoma Failover and keep a count of how many failed I/O's
18048 18044 */
18049 18045 if ((SD_IS_LSI(un)) &&
18050 18046 (scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) &&
18051 18047 (scsi_sense_asc(sensep) == 0x94) &&
18052 18048 (scsi_sense_ascq(sensep) == 0x01)) {
18053 18049 un->un_sonoma_failure_count++;
18054 18050 if (un->un_sonoma_failure_count > 1) {
18055 18051 return;
18056 18052 }
18057 18053 }
18058 18054
18059 18055 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP ||
18060 18056 ((scsi_sense_key(sensep) == KEY_RECOVERABLE_ERROR) &&
18061 18057 (pktp->pkt_resid == 0))) {
18062 18058 scsi_vu_errmsg(SD_SCSI_DEVP(un), pktp, sd_label, severity,
18063 18059 request_blkno, err_blkno, scsi_cmds,
18064 18060 (struct scsi_extended_sense *)sensep,
18065 18061 un->un_additional_codes, NULL);
18066 18062 }
18067 18063 }
18068 18064
18069 18065 /*
18070 18066 * Function: sd_sense_key_no_sense
18071 18067 *
18072 18068 * Description: Recovery action when sense data was not received.
18073 18069 *
18074 18070 * Context: May be called from interrupt context
18075 18071 */
18076 18072
18077 18073 static void
18078 18074 sd_sense_key_no_sense(struct sd_lun *un, struct buf *bp,
18079 18075 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18080 18076 {
18081 18077 struct sd_sense_info si;
18082 18078
18083 18079 ASSERT(un != NULL);
18084 18080 ASSERT(mutex_owned(SD_MUTEX(un)));
18085 18081 ASSERT(bp != NULL);
18086 18082 ASSERT(xp != NULL);
18087 18083 ASSERT(pktp != NULL);
18088 18084
18089 18085 si.ssi_severity = SCSI_ERR_FATAL;
18090 18086 si.ssi_pfa_flag = FALSE;
18091 18087
18092 18088 SD_UPDATE_ERRSTATS(un, sd_softerrs);
18093 18089
18094 18090 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18095 18091 &si, EIO, (clock_t)0, NULL);
18096 18092 }
18097 18093
18098 18094
18099 18095 /*
18100 18096 * Function: sd_sense_key_recoverable_error
18101 18097 *
18102 18098 * Description: Recovery actions for a SCSI "Recovered Error" sense key.
18103 18099 *
18104 18100 * Context: May be called from interrupt context
18105 18101 */
18106 18102
18107 18103 static void
18108 18104 sd_sense_key_recoverable_error(struct sd_lun *un,
18109 18105 uint8_t *sense_datap,
18110 18106 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18111 18107 {
18112 18108 struct sd_sense_info si;
18113 18109 uint8_t asc = scsi_sense_asc(sense_datap);
18114 18110
18115 18111 ASSERT(un != NULL);
18116 18112 ASSERT(mutex_owned(SD_MUTEX(un)));
18117 18113 ASSERT(bp != NULL);
18118 18114 ASSERT(xp != NULL);
18119 18115 ASSERT(pktp != NULL);
18120 18116
18121 18117 /*
18122 18118 * 0x5D: FAILURE PREDICTION THRESHOLD EXCEEDED
18123 18119 */
18124 18120 if ((asc == 0x5D) && (sd_report_pfa != 0)) {
18125 18121 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err);
18126 18122 si.ssi_severity = SCSI_ERR_INFO;
18127 18123 si.ssi_pfa_flag = TRUE;
18128 18124 } else {
18129 18125 SD_UPDATE_ERRSTATS(un, sd_softerrs);
18130 18126 SD_UPDATE_ERRSTATS(un, sd_rq_recov_err);
18131 18127 si.ssi_severity = SCSI_ERR_RECOVERED;
18132 18128 si.ssi_pfa_flag = FALSE;
18133 18129 }
18134 18130
18135 18131 if (pktp->pkt_resid == 0) {
18136 18132 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18137 18133 sd_return_command(un, bp);
18138 18134 return;
18139 18135 }
18140 18136
18141 18137 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18142 18138 &si, EIO, (clock_t)0, NULL);
18143 18139 }
18144 18140
18145 18141
18146 18142
18147 18143
18148 18144 /*
18149 18145 * Function: sd_sense_key_not_ready
18150 18146 *
18151 18147 * Description: Recovery actions for a SCSI "Not Ready" sense key.
18152 18148 *
18153 18149 * Context: May be called from interrupt context
18154 18150 */
18155 18151
18156 18152 static void
18157 18153 sd_sense_key_not_ready(struct sd_lun *un,
18158 18154 uint8_t *sense_datap,
18159 18155 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18160 18156 {
18161 18157 struct sd_sense_info si;
18162 18158 uint8_t asc = scsi_sense_asc(sense_datap);
18163 18159 uint8_t ascq = scsi_sense_ascq(sense_datap);
18164 18160
18165 18161 ASSERT(un != NULL);
18166 18162 ASSERT(mutex_owned(SD_MUTEX(un)));
18167 18163 ASSERT(bp != NULL);
18168 18164 ASSERT(xp != NULL);
18169 18165 ASSERT(pktp != NULL);
18170 18166
18171 18167 si.ssi_severity = SCSI_ERR_FATAL;
18172 18168 si.ssi_pfa_flag = FALSE;
18173 18169
18174 18170 /*
18175 18171 * Update error stats after first NOT READY error. Disks may have
18176 18172 * been powered down and may need to be restarted. For CDROMs,
18177 18173 * report NOT READY errors only if media is present.
18178 18174 */
18179 18175 if ((ISCD(un) && (asc == 0x3A)) ||
18180 18176 (xp->xb_nr_retry_count > 0)) {
18181 18177 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18182 18178 SD_UPDATE_ERRSTATS(un, sd_rq_ntrdy_err);
18183 18179 }
18184 18180
18185 18181 /*
18186 18182 * Just fail if the "not ready" retry limit has been reached.
18187 18183 */
18188 18184 if (xp->xb_nr_retry_count >= un->un_notready_retry_count) {
18189 18185 /* Special check for error message printing for removables. */
18190 18186 if (un->un_f_has_removable_media && (asc == 0x04) &&
18191 18187 (ascq >= 0x04)) {
18192 18188 si.ssi_severity = SCSI_ERR_ALL;
18193 18189 }
18194 18190 goto fail_command;
18195 18191 }
18196 18192
18197 18193 /*
18198 18194 * Check the ASC and ASCQ in the sense data as needed, to determine
18199 18195 * what to do.
18200 18196 */
18201 18197 switch (asc) {
18202 18198 case 0x04: /* LOGICAL UNIT NOT READY */
18203 18199 /*
18204 18200 * disk drives that don't spin up result in a very long delay
18205 18201 * in format without warning messages. We will log a message
18206 18202 * if the error level is set to verbose.
18207 18203 */
18208 18204 if (sd_error_level < SCSI_ERR_RETRYABLE) {
18209 18205 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18210 18206 "logical unit not ready, resetting disk\n");
18211 18207 }
18212 18208
18213 18209 /*
18214 18210 * There are different requirements for CDROMs and disks for
18215 18211 * the number of retries. If a CD-ROM is giving this, it is
18216 18212 * probably reading TOC and is in the process of getting
18217 18213 * ready, so we should keep on trying for a long time to make
18218 18214 * sure that all types of media are taken in account (for
18219 18215 * some media the drive takes a long time to read TOC). For
18220 18216 * disks we do not want to retry this too many times as this
18221 18217 * can cause a long hang in format when the drive refuses to
18222 18218 * spin up (a very common failure).
18223 18219 */
18224 18220 switch (ascq) {
18225 18221 case 0x00: /* LUN NOT READY, CAUSE NOT REPORTABLE */
18226 18222 /*
18227 18223 * Disk drives frequently refuse to spin up which
18228 18224 * results in a very long hang in format without
18229 18225 * warning messages.
18230 18226 *
18231 18227 * Note: This code preserves the legacy behavior of
18232 18228 * comparing xb_nr_retry_count against zero for fibre
18233 18229 * channel targets instead of comparing against the
18234 18230 * un_reset_retry_count value. The reason for this
18235 18231 * discrepancy has been so utterly lost beneath the
18236 18232 * Sands of Time that even Indiana Jones could not
18237 18233 * find it.
18238 18234 */
18239 18235 if (un->un_f_is_fibre == TRUE) {
18240 18236 if (((sd_level_mask & SD_LOGMASK_DIAG) ||
18241 18237 (xp->xb_nr_retry_count > 0)) &&
18242 18238 (un->un_startstop_timeid == NULL)) {
18243 18239 scsi_log(SD_DEVINFO(un), sd_label,
18244 18240 CE_WARN, "logical unit not ready, "
18245 18241 "resetting disk\n");
18246 18242 sd_reset_target(un, pktp);
18247 18243 }
18248 18244 } else {
18249 18245 if (((sd_level_mask & SD_LOGMASK_DIAG) ||
18250 18246 (xp->xb_nr_retry_count >
18251 18247 un->un_reset_retry_count)) &&
18252 18248 (un->un_startstop_timeid == NULL)) {
18253 18249 scsi_log(SD_DEVINFO(un), sd_label,
18254 18250 CE_WARN, "logical unit not ready, "
18255 18251 "resetting disk\n");
18256 18252 sd_reset_target(un, pktp);
18257 18253 }
18258 18254 }
18259 18255 break;
18260 18256
18261 18257 case 0x01: /* LUN IS IN PROCESS OF BECOMING READY */
18262 18258 /*
18263 18259 * If the target is in the process of becoming
18264 18260 * ready, just proceed with the retry. This can
18265 18261 * happen with CD-ROMs that take a long time to
18266 18262 * read TOC after a power cycle or reset.
18267 18263 */
18268 18264 goto do_retry;
18269 18265
18270 18266 case 0x02: /* LUN NOT READY, INITITIALIZING CMD REQUIRED */
18271 18267 break;
18272 18268
18273 18269 case 0x03: /* LUN NOT READY, MANUAL INTERVENTION REQUIRED */
18274 18270 /*
18275 18271 * Retries cannot help here so just fail right away.
18276 18272 */
18277 18273 goto fail_command;
18278 18274
18279 18275 case 0x88:
18280 18276 /*
18281 18277 * Vendor-unique code for T3/T4: it indicates a
18282 18278 * path problem in a mutipathed config, but as far as
18283 18279 * the target driver is concerned it equates to a fatal
18284 18280 * error, so we should just fail the command right away
18285 18281 * (without printing anything to the console). If this
18286 18282 * is not a T3/T4, fall thru to the default recovery
18287 18283 * action.
18288 18284 * T3/T4 is FC only, don't need to check is_fibre
18289 18285 */
18290 18286 if (SD_IS_T3(un) || SD_IS_T4(un)) {
18291 18287 sd_return_failed_command(un, bp, EIO);
18292 18288 return;
18293 18289 }
18294 18290 /* FALLTHRU */
18295 18291
18296 18292 case 0x04: /* LUN NOT READY, FORMAT IN PROGRESS */
18297 18293 case 0x05: /* LUN NOT READY, REBUILD IN PROGRESS */
18298 18294 case 0x06: /* LUN NOT READY, RECALCULATION IN PROGRESS */
18299 18295 case 0x07: /* LUN NOT READY, OPERATION IN PROGRESS */
18300 18296 case 0x08: /* LUN NOT READY, LONG WRITE IN PROGRESS */
18301 18297 default: /* Possible future codes in SCSI spec? */
18302 18298 /*
18303 18299 * For removable-media devices, do not retry if
18304 18300 * ASCQ > 2 as these result mostly from USCSI commands
18305 18301 * on MMC devices issued to check status of an
18306 18302 * operation initiated in immediate mode. Also for
18307 18303 * ASCQ >= 4 do not print console messages as these
18308 18304 * mainly represent a user-initiated operation
18309 18305 * instead of a system failure.
18310 18306 */
18311 18307 if (un->un_f_has_removable_media) {
18312 18308 si.ssi_severity = SCSI_ERR_ALL;
18313 18309 goto fail_command;
18314 18310 }
18315 18311 break;
18316 18312 }
18317 18313
18318 18314 /*
18319 18315 * As part of our recovery attempt for the NOT READY
18320 18316 * condition, we issue a START STOP UNIT command. However
18321 18317 * we want to wait for a short delay before attempting this
18322 18318 * as there may still be more commands coming back from the
18323 18319 * target with the check condition. To do this we use
18324 18320 * timeout(9F) to call sd_start_stop_unit_callback() after
18325 18321 * the delay interval expires. (sd_start_stop_unit_callback()
18326 18322 * dispatches sd_start_stop_unit_task(), which will issue
18327 18323 * the actual START STOP UNIT command. The delay interval
18328 18324 * is one-half of the delay that we will use to retry the
18329 18325 * command that generated the NOT READY condition.
18330 18326 *
18331 18327 * Note that we could just dispatch sd_start_stop_unit_task()
18332 18328 * from here and allow it to sleep for the delay interval,
18333 18329 * but then we would be tying up the taskq thread
18334 18330 * uncesessarily for the duration of the delay.
18335 18331 *
18336 18332 * Do not issue the START STOP UNIT if the current command
18337 18333 * is already a START STOP UNIT.
18338 18334 */
18339 18335 if (pktp->pkt_cdbp[0] == SCMD_START_STOP) {
18340 18336 break;
18341 18337 }
18342 18338
18343 18339 /*
18344 18340 * Do not schedule the timeout if one is already pending.
18345 18341 */
18346 18342 if (un->un_startstop_timeid != NULL) {
18347 18343 SD_INFO(SD_LOG_ERROR, un,
18348 18344 "sd_sense_key_not_ready: restart already issued to"
18349 18345 " %s%d\n", ddi_driver_name(SD_DEVINFO(un)),
18350 18346 ddi_get_instance(SD_DEVINFO(un)));
18351 18347 break;
18352 18348 }
18353 18349
18354 18350 /*
18355 18351 * Schedule the START STOP UNIT command, then queue the command
18356 18352 * for a retry.
18357 18353 *
18358 18354 * Note: A timeout is not scheduled for this retry because we
18359 18355 * want the retry to be serial with the START_STOP_UNIT. The
18360 18356 * retry will be started when the START_STOP_UNIT is completed
18361 18357 * in sd_start_stop_unit_task.
18362 18358 */
18363 18359 un->un_startstop_timeid = timeout(sd_start_stop_unit_callback,
18364 18360 un, un->un_busy_timeout / 2);
18365 18361 xp->xb_nr_retry_count++;
18366 18362 sd_set_retry_bp(un, bp, 0, kstat_waitq_enter);
18367 18363 return;
18368 18364
18369 18365 case 0x05: /* LOGICAL UNIT DOES NOT RESPOND TO SELECTION */
18370 18366 if (sd_error_level < SCSI_ERR_RETRYABLE) {
18371 18367 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18372 18368 "unit does not respond to selection\n");
18373 18369 }
18374 18370 break;
18375 18371
18376 18372 case 0x3A: /* MEDIUM NOT PRESENT */
18377 18373 if (sd_error_level >= SCSI_ERR_FATAL) {
18378 18374 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18379 18375 "Caddy not inserted in drive\n");
18380 18376 }
18381 18377
18382 18378 sr_ejected(un);
18383 18379 un->un_mediastate = DKIO_EJECTED;
18384 18380 /* The state has changed, inform the media watch routines */
18385 18381 cv_broadcast(&un->un_state_cv);
18386 18382 /* Just fail if no media is present in the drive. */
18387 18383 goto fail_command;
18388 18384
18389 18385 default:
18390 18386 if (sd_error_level < SCSI_ERR_RETRYABLE) {
18391 18387 scsi_log(SD_DEVINFO(un), sd_label, CE_NOTE,
18392 18388 "Unit not Ready. Additional sense code 0x%x\n",
18393 18389 asc);
18394 18390 }
18395 18391 break;
18396 18392 }
18397 18393
18398 18394 do_retry:
18399 18395
18400 18396 /*
18401 18397 * Retry the command, as some targets may report NOT READY for
18402 18398 * several seconds after being reset.
18403 18399 */
18404 18400 xp->xb_nr_retry_count++;
18405 18401 si.ssi_severity = SCSI_ERR_RETRYABLE;
18406 18402 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, sd_print_sense_msg,
18407 18403 &si, EIO, un->un_busy_timeout, NULL);
18408 18404
18409 18405 return;
18410 18406
18411 18407 fail_command:
18412 18408 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18413 18409 sd_return_failed_command(un, bp, EIO);
18414 18410 }
18415 18411
18416 18412
18417 18413
18418 18414 /*
18419 18415 * Function: sd_sense_key_medium_or_hardware_error
18420 18416 *
18421 18417 * Description: Recovery actions for a SCSI "Medium Error" or "Hardware Error"
18422 18418 * sense key.
18423 18419 *
18424 18420 * Context: May be called from interrupt context
18425 18421 */
18426 18422
18427 18423 static void
18428 18424 sd_sense_key_medium_or_hardware_error(struct sd_lun *un,
18429 18425 uint8_t *sense_datap,
18430 18426 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18431 18427 {
18432 18428 struct sd_sense_info si;
18433 18429 uint8_t sense_key = scsi_sense_key(sense_datap);
18434 18430 uint8_t asc = scsi_sense_asc(sense_datap);
18435 18431
18436 18432 ASSERT(un != NULL);
18437 18433 ASSERT(mutex_owned(SD_MUTEX(un)));
18438 18434 ASSERT(bp != NULL);
18439 18435 ASSERT(xp != NULL);
18440 18436 ASSERT(pktp != NULL);
18441 18437
18442 18438 si.ssi_severity = SCSI_ERR_FATAL;
18443 18439 si.ssi_pfa_flag = FALSE;
18444 18440
18445 18441 if (sense_key == KEY_MEDIUM_ERROR) {
18446 18442 SD_UPDATE_ERRSTATS(un, sd_rq_media_err);
18447 18443 }
18448 18444
18449 18445 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18450 18446
18451 18447 if ((un->un_reset_retry_count != 0) &&
18452 18448 (xp->xb_retry_count == un->un_reset_retry_count)) {
18453 18449 mutex_exit(SD_MUTEX(un));
18454 18450 /* Do NOT do a RESET_ALL here: too intrusive. (4112858) */
18455 18451 if (un->un_f_allow_bus_device_reset == TRUE) {
18456 18452
18457 18453 boolean_t try_resetting_target = B_TRUE;
18458 18454
18459 18455 /*
18460 18456 * We need to be able to handle specific ASC when we are
18461 18457 * handling a KEY_HARDWARE_ERROR. In particular
18462 18458 * taking the default action of resetting the target may
18463 18459 * not be the appropriate way to attempt recovery.
18464 18460 * Resetting a target because of a single LUN failure
18465 18461 * victimizes all LUNs on that target.
18466 18462 *
18467 18463 * This is true for the LSI arrays, if an LSI
18468 18464 * array controller returns an ASC of 0x84 (LUN Dead) we
18469 18465 * should trust it.
18470 18466 */
18471 18467
18472 18468 if (sense_key == KEY_HARDWARE_ERROR) {
18473 18469 switch (asc) {
18474 18470 case 0x84:
18475 18471 if (SD_IS_LSI(un)) {
18476 18472 try_resetting_target = B_FALSE;
18477 18473 }
18478 18474 break;
18479 18475 default:
18480 18476 break;
18481 18477 }
18482 18478 }
18483 18479
18484 18480 if (try_resetting_target == B_TRUE) {
18485 18481 int reset_retval = 0;
18486 18482 if (un->un_f_lun_reset_enabled == TRUE) {
18487 18483 SD_TRACE(SD_LOG_IO_CORE, un,
18488 18484 "sd_sense_key_medium_or_hardware_"
18489 18485 "error: issuing RESET_LUN\n");
18490 18486 reset_retval =
18491 18487 scsi_reset(SD_ADDRESS(un),
18492 18488 RESET_LUN);
18493 18489 }
18494 18490 if (reset_retval == 0) {
18495 18491 SD_TRACE(SD_LOG_IO_CORE, un,
18496 18492 "sd_sense_key_medium_or_hardware_"
18497 18493 "error: issuing RESET_TARGET\n");
18498 18494 (void) scsi_reset(SD_ADDRESS(un),
18499 18495 RESET_TARGET);
18500 18496 }
18501 18497 }
18502 18498 }
18503 18499 mutex_enter(SD_MUTEX(un));
18504 18500 }
18505 18501
18506 18502 /*
18507 18503 * This really ought to be a fatal error, but we will retry anyway
18508 18504 * as some drives report this as a spurious error.
18509 18505 */
18510 18506 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18511 18507 &si, EIO, (clock_t)0, NULL);
18512 18508 }
18513 18509
18514 18510
18515 18511
18516 18512 /*
18517 18513 * Function: sd_sense_key_illegal_request
18518 18514 *
18519 18515 * Description: Recovery actions for a SCSI "Illegal Request" sense key.
18520 18516 *
18521 18517 * Context: May be called from interrupt context
18522 18518 */
18523 18519
18524 18520 static void
18525 18521 sd_sense_key_illegal_request(struct sd_lun *un, struct buf *bp,
18526 18522 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18527 18523 {
18528 18524 struct sd_sense_info si;
18529 18525
18530 18526 ASSERT(un != NULL);
18531 18527 ASSERT(mutex_owned(SD_MUTEX(un)));
18532 18528 ASSERT(bp != NULL);
18533 18529 ASSERT(xp != NULL);
18534 18530 ASSERT(pktp != NULL);
18535 18531
18536 18532 SD_UPDATE_ERRSTATS(un, sd_rq_illrq_err);
18537 18533
18538 18534 si.ssi_severity = SCSI_ERR_INFO;
18539 18535 si.ssi_pfa_flag = FALSE;
18540 18536
18541 18537 /* Pointless to retry if the target thinks it's an illegal request */
18542 18538 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18543 18539 sd_return_failed_command(un, bp, EIO);
18544 18540 }
18545 18541
18546 18542
18547 18543
18548 18544
18549 18545 /*
18550 18546 * Function: sd_sense_key_unit_attention
18551 18547 *
18552 18548 * Description: Recovery actions for a SCSI "Unit Attention" sense key.
18553 18549 *
18554 18550 * Context: May be called from interrupt context
18555 18551 */
18556 18552
18557 18553 static void
18558 18554 sd_sense_key_unit_attention(struct sd_lun *un,
18559 18555 uint8_t *sense_datap,
18560 18556 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18561 18557 {
18562 18558 /*
18563 18559 * For UNIT ATTENTION we allow retries for one minute. Devices
18564 18560 * like Sonoma can return UNIT ATTENTION close to a minute
18565 18561 * under certain conditions.
18566 18562 */
18567 18563 int retry_check_flag = SD_RETRIES_UA;
18568 18564 boolean_t kstat_updated = B_FALSE;
18569 18565 struct sd_sense_info si;
18570 18566 uint8_t asc = scsi_sense_asc(sense_datap);
18571 18567 uint8_t ascq = scsi_sense_ascq(sense_datap);
18572 18568
18573 18569 ASSERT(un != NULL);
18574 18570 ASSERT(mutex_owned(SD_MUTEX(un)));
18575 18571 ASSERT(bp != NULL);
18576 18572 ASSERT(xp != NULL);
18577 18573 ASSERT(pktp != NULL);
18578 18574
18579 18575 si.ssi_severity = SCSI_ERR_INFO;
18580 18576 si.ssi_pfa_flag = FALSE;
18581 18577
18582 18578
18583 18579 switch (asc) {
18584 18580 case 0x5D: /* FAILURE PREDICTION THRESHOLD EXCEEDED */
18585 18581 if (sd_report_pfa != 0) {
18586 18582 SD_UPDATE_ERRSTATS(un, sd_rq_pfa_err);
18587 18583 si.ssi_pfa_flag = TRUE;
18588 18584 retry_check_flag = SD_RETRIES_STANDARD;
18589 18585 goto do_retry;
18590 18586 }
18591 18587
18592 18588 break;
18593 18589
18594 18590 case 0x29: /* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED */
18595 18591 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
18596 18592 un->un_resvd_status |=
18597 18593 (SD_LOST_RESERVE | SD_WANT_RESERVE);
18598 18594 }
18599 18595 #ifdef _LP64
18600 18596 if (un->un_blockcount + 1 > SD_GROUP1_MAX_ADDRESS) {
18601 18597 if (taskq_dispatch(sd_tq, sd_reenable_dsense_task,
18602 18598 un, KM_NOSLEEP) == 0) {
18603 18599 /*
18604 18600 * If we can't dispatch the task we'll just
18605 18601 * live without descriptor sense. We can
18606 18602 * try again on the next "unit attention"
18607 18603 */
18608 18604 SD_ERROR(SD_LOG_ERROR, un,
18609 18605 "sd_sense_key_unit_attention: "
18610 18606 "Could not dispatch "
18611 18607 "sd_reenable_dsense_task\n");
18612 18608 }
18613 18609 }
18614 18610 #endif /* _LP64 */
18615 18611 /* FALLTHRU */
18616 18612
18617 18613 case 0x28: /* NOT READY TO READY CHANGE, MEDIUM MAY HAVE CHANGED */
18618 18614 if (!un->un_f_has_removable_media) {
18619 18615 break;
18620 18616 }
18621 18617
18622 18618 /*
18623 18619 * When we get a unit attention from a removable-media device,
18624 18620 * it may be in a state that will take a long time to recover
18625 18621 * (e.g., from a reset). Since we are executing in interrupt
18626 18622 * context here, we cannot wait around for the device to come
18627 18623 * back. So hand this command off to sd_media_change_task()
18628 18624 * for deferred processing under taskq thread context. (Note
18629 18625 * that the command still may be failed if a problem is
18630 18626 * encountered at a later time.)
18631 18627 */
18632 18628 if (taskq_dispatch(sd_tq, sd_media_change_task, pktp,
18633 18629 KM_NOSLEEP) == 0) {
18634 18630 /*
18635 18631 * Cannot dispatch the request so fail the command.
18636 18632 */
18637 18633 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18638 18634 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err);
18639 18635 si.ssi_severity = SCSI_ERR_FATAL;
18640 18636 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18641 18637 sd_return_failed_command(un, bp, EIO);
18642 18638 }
18643 18639
18644 18640 /*
18645 18641 * If failed to dispatch sd_media_change_task(), we already
18646 18642 * updated kstat. If succeed to dispatch sd_media_change_task(),
18647 18643 * we should update kstat later if it encounters an error. So,
18648 18644 * we update kstat_updated flag here.
18649 18645 */
18650 18646 kstat_updated = B_TRUE;
18651 18647
18652 18648 /*
18653 18649 * Either the command has been successfully dispatched to a
18654 18650 * task Q for retrying, or the dispatch failed. In either case
18655 18651 * do NOT retry again by calling sd_retry_command. This sets up
18656 18652 * two retries of the same command and when one completes and
18657 18653 * frees the resources the other will access freed memory,
18658 18654 * a bad thing.
18659 18655 */
18660 18656 return;
18661 18657
18662 18658 default:
18663 18659 break;
18664 18660 }
18665 18661
18666 18662 /*
18667 18663 * ASC ASCQ
18668 18664 * 2A 09 Capacity data has changed
18669 18665 * 2A 01 Mode parameters changed
18670 18666 * 3F 0E Reported luns data has changed
18671 18667 * Arrays that support logical unit expansion should report
18672 18668 * capacity changes(2Ah/09). Mode parameters changed and
18673 18669 * reported luns data has changed are the approximation.
18674 18670 */
18675 18671 if (((asc == 0x2a) && (ascq == 0x09)) ||
18676 18672 ((asc == 0x2a) && (ascq == 0x01)) ||
18677 18673 ((asc == 0x3f) && (ascq == 0x0e))) {
18678 18674 if (taskq_dispatch(sd_tq, sd_target_change_task, un,
18679 18675 KM_NOSLEEP) == 0) {
18680 18676 SD_ERROR(SD_LOG_ERROR, un,
18681 18677 "sd_sense_key_unit_attention: "
18682 18678 "Could not dispatch sd_target_change_task\n");
18683 18679 }
18684 18680 }
18685 18681
18686 18682 /*
18687 18683 * Update kstat if we haven't done that.
18688 18684 */
18689 18685 if (!kstat_updated) {
18690 18686 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18691 18687 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err);
18692 18688 }
18693 18689
18694 18690 do_retry:
18695 18691 sd_retry_command(un, bp, retry_check_flag, sd_print_sense_msg, &si,
18696 18692 EIO, SD_UA_RETRY_DELAY, NULL);
18697 18693 }
18698 18694
18699 18695
18700 18696
18701 18697 /*
18702 18698 * Function: sd_sense_key_fail_command
18703 18699 *
18704 18700 * Description: Use to fail a command when we don't like the sense key that
18705 18701 * was returned.
18706 18702 *
18707 18703 * Context: May be called from interrupt context
18708 18704 */
18709 18705
18710 18706 static void
18711 18707 sd_sense_key_fail_command(struct sd_lun *un, struct buf *bp,
18712 18708 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18713 18709 {
18714 18710 struct sd_sense_info si;
18715 18711
18716 18712 ASSERT(un != NULL);
18717 18713 ASSERT(mutex_owned(SD_MUTEX(un)));
18718 18714 ASSERT(bp != NULL);
18719 18715 ASSERT(xp != NULL);
18720 18716 ASSERT(pktp != NULL);
18721 18717
18722 18718 si.ssi_severity = SCSI_ERR_FATAL;
18723 18719 si.ssi_pfa_flag = FALSE;
18724 18720
18725 18721 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18726 18722 sd_return_failed_command(un, bp, EIO);
18727 18723 }
18728 18724
18729 18725
18730 18726
18731 18727 /*
18732 18728 * Function: sd_sense_key_blank_check
18733 18729 *
18734 18730 * Description: Recovery actions for a SCSI "Blank Check" sense key.
18735 18731 * Has no monetary connotation.
18736 18732 *
18737 18733 * Context: May be called from interrupt context
18738 18734 */
18739 18735
18740 18736 static void
18741 18737 sd_sense_key_blank_check(struct sd_lun *un, struct buf *bp,
18742 18738 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18743 18739 {
18744 18740 struct sd_sense_info si;
18745 18741
18746 18742 ASSERT(un != NULL);
18747 18743 ASSERT(mutex_owned(SD_MUTEX(un)));
18748 18744 ASSERT(bp != NULL);
18749 18745 ASSERT(xp != NULL);
18750 18746 ASSERT(pktp != NULL);
18751 18747
18752 18748 /*
18753 18749 * Blank check is not fatal for removable devices, therefore
18754 18750 * it does not require a console message.
18755 18751 */
18756 18752 si.ssi_severity = (un->un_f_has_removable_media) ? SCSI_ERR_ALL :
18757 18753 SCSI_ERR_FATAL;
18758 18754 si.ssi_pfa_flag = FALSE;
18759 18755
18760 18756 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
18761 18757 sd_return_failed_command(un, bp, EIO);
18762 18758 }
18763 18759
18764 18760
18765 18761
18766 18762
18767 18763 /*
18768 18764 * Function: sd_sense_key_aborted_command
18769 18765 *
18770 18766 * Description: Recovery actions for a SCSI "Aborted Command" sense key.
18771 18767 *
18772 18768 * Context: May be called from interrupt context
18773 18769 */
18774 18770
18775 18771 static void
18776 18772 sd_sense_key_aborted_command(struct sd_lun *un, struct buf *bp,
18777 18773 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18778 18774 {
18779 18775 struct sd_sense_info si;
18780 18776
18781 18777 ASSERT(un != NULL);
18782 18778 ASSERT(mutex_owned(SD_MUTEX(un)));
18783 18779 ASSERT(bp != NULL);
18784 18780 ASSERT(xp != NULL);
18785 18781 ASSERT(pktp != NULL);
18786 18782
18787 18783 si.ssi_severity = SCSI_ERR_FATAL;
18788 18784 si.ssi_pfa_flag = FALSE;
18789 18785
18790 18786 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18791 18787
18792 18788 /*
18793 18789 * This really ought to be a fatal error, but we will retry anyway
18794 18790 * as some drives report this as a spurious error.
18795 18791 */
18796 18792 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18797 18793 &si, EIO, drv_usectohz(100000), NULL);
18798 18794 }
18799 18795
18800 18796
18801 18797
18802 18798 /*
18803 18799 * Function: sd_sense_key_default
18804 18800 *
18805 18801 * Description: Default recovery action for several SCSI sense keys (basically
18806 18802 * attempts a retry).
18807 18803 *
18808 18804 * Context: May be called from interrupt context
18809 18805 */
18810 18806
18811 18807 static void
18812 18808 sd_sense_key_default(struct sd_lun *un,
18813 18809 uint8_t *sense_datap,
18814 18810 struct buf *bp, struct sd_xbuf *xp, struct scsi_pkt *pktp)
18815 18811 {
18816 18812 struct sd_sense_info si;
18817 18813 uint8_t sense_key = scsi_sense_key(sense_datap);
18818 18814
18819 18815 ASSERT(un != NULL);
18820 18816 ASSERT(mutex_owned(SD_MUTEX(un)));
18821 18817 ASSERT(bp != NULL);
18822 18818 ASSERT(xp != NULL);
18823 18819 ASSERT(pktp != NULL);
18824 18820
18825 18821 SD_UPDATE_ERRSTATS(un, sd_harderrs);
18826 18822
18827 18823 /*
18828 18824 * Undecoded sense key. Attempt retries and hope that will fix
18829 18825 * the problem. Otherwise, we're dead.
18830 18826 */
18831 18827 if ((pktp->pkt_flags & FLAG_SILENT) == 0) {
18832 18828 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18833 18829 "Unhandled Sense Key '%s'\n", sense_keys[sense_key]);
18834 18830 }
18835 18831
18836 18832 si.ssi_severity = SCSI_ERR_FATAL;
18837 18833 si.ssi_pfa_flag = FALSE;
18838 18834
18839 18835 sd_retry_command(un, bp, SD_RETRIES_STANDARD, sd_print_sense_msg,
18840 18836 &si, EIO, (clock_t)0, NULL);
18841 18837 }
18842 18838
18843 18839
18844 18840
18845 18841 /*
18846 18842 * Function: sd_print_retry_msg
18847 18843 *
18848 18844 * Description: Print a message indicating the retry action being taken.
18849 18845 *
18850 18846 * Arguments: un - ptr to associated softstate
18851 18847 * bp - ptr to buf(9S) for the command
18852 18848 * arg - not used.
18853 18849 * flag - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
18854 18850 * or SD_NO_RETRY_ISSUED
18855 18851 *
18856 18852 * Context: May be called from interrupt context
18857 18853 */
18858 18854 /* ARGSUSED */
18859 18855 static void
18860 18856 sd_print_retry_msg(struct sd_lun *un, struct buf *bp, void *arg, int flag)
18861 18857 {
18862 18858 struct sd_xbuf *xp;
18863 18859 struct scsi_pkt *pktp;
18864 18860 char *reasonp;
18865 18861 char *msgp;
18866 18862
18867 18863 ASSERT(un != NULL);
18868 18864 ASSERT(mutex_owned(SD_MUTEX(un)));
18869 18865 ASSERT(bp != NULL);
18870 18866 pktp = SD_GET_PKTP(bp);
18871 18867 ASSERT(pktp != NULL);
18872 18868 xp = SD_GET_XBUF(bp);
18873 18869 ASSERT(xp != NULL);
18874 18870
18875 18871 ASSERT(!mutex_owned(&un->un_pm_mutex));
18876 18872 mutex_enter(&un->un_pm_mutex);
18877 18873 if ((un->un_state == SD_STATE_SUSPENDED) ||
18878 18874 (SD_DEVICE_IS_IN_LOW_POWER(un)) ||
18879 18875 (pktp->pkt_flags & FLAG_SILENT)) {
18880 18876 mutex_exit(&un->un_pm_mutex);
18881 18877 goto update_pkt_reason;
18882 18878 }
18883 18879 mutex_exit(&un->un_pm_mutex);
18884 18880
18885 18881 /*
18886 18882 * Suppress messages if they are all the same pkt_reason; with
18887 18883 * TQ, many (up to 256) are returned with the same pkt_reason.
18888 18884 * If we are in panic, then suppress the retry messages.
18889 18885 */
18890 18886 switch (flag) {
18891 18887 case SD_NO_RETRY_ISSUED:
18892 18888 msgp = "giving up";
18893 18889 break;
18894 18890 case SD_IMMEDIATE_RETRY_ISSUED:
18895 18891 case SD_DELAYED_RETRY_ISSUED:
18896 18892 if (ddi_in_panic() || (un->un_state == SD_STATE_OFFLINE) ||
18897 18893 ((pktp->pkt_reason == un->un_last_pkt_reason) &&
18898 18894 (sd_error_level != SCSI_ERR_ALL))) {
18899 18895 return;
18900 18896 }
18901 18897 msgp = "retrying command";
18902 18898 break;
18903 18899 default:
18904 18900 goto update_pkt_reason;
18905 18901 }
18906 18902
18907 18903 reasonp = (((pktp->pkt_statistics & STAT_PERR) != 0) ? "parity error" :
18908 18904 scsi_rname(pktp->pkt_reason));
18909 18905
18910 18906 if (SD_FM_LOG(un) == SD_FM_LOG_NSUP) {
18911 18907 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18912 18908 "SCSI transport failed: reason '%s': %s\n", reasonp, msgp);
18913 18909 }
18914 18910
18915 18911 update_pkt_reason:
18916 18912 /*
18917 18913 * Update un->un_last_pkt_reason with the value in pktp->pkt_reason.
18918 18914 * This is to prevent multiple console messages for the same failure
18919 18915 * condition. Note that un->un_last_pkt_reason is NOT restored if &
18920 18916 * when the command is retried successfully because there still may be
18921 18917 * more commands coming back with the same value of pktp->pkt_reason.
18922 18918 */
18923 18919 if ((pktp->pkt_reason != CMD_CMPLT) || (xp->xb_retry_count == 0)) {
18924 18920 un->un_last_pkt_reason = pktp->pkt_reason;
18925 18921 }
18926 18922 }
18927 18923
18928 18924
18929 18925 /*
18930 18926 * Function: sd_print_cmd_incomplete_msg
18931 18927 *
18932 18928 * Description: Message logging fn. for a SCSA "CMD_INCOMPLETE" pkt_reason.
18933 18929 *
18934 18930 * Arguments: un - ptr to associated softstate
18935 18931 * bp - ptr to buf(9S) for the command
18936 18932 * arg - passed to sd_print_retry_msg()
18937 18933 * code - SD_IMMEDIATE_RETRY_ISSUED, SD_DELAYED_RETRY_ISSUED,
18938 18934 * or SD_NO_RETRY_ISSUED
18939 18935 *
18940 18936 * Context: May be called from interrupt context
18941 18937 */
18942 18938
18943 18939 static void
18944 18940 sd_print_cmd_incomplete_msg(struct sd_lun *un, struct buf *bp, void *arg,
18945 18941 int code)
18946 18942 {
18947 18943 dev_info_t *dip;
18948 18944
18949 18945 ASSERT(un != NULL);
18950 18946 ASSERT(mutex_owned(SD_MUTEX(un)));
18951 18947 ASSERT(bp != NULL);
18952 18948
18953 18949 switch (code) {
18954 18950 case SD_NO_RETRY_ISSUED:
18955 18951 /* Command was failed. Someone turned off this target? */
18956 18952 if (un->un_state != SD_STATE_OFFLINE) {
18957 18953 /*
18958 18954 * Suppress message if we are detaching and
18959 18955 * device has been disconnected
18960 18956 * Note that DEVI_IS_DEVICE_REMOVED is a consolidation
18961 18957 * private interface and not part of the DDI
18962 18958 */
18963 18959 dip = un->un_sd->sd_dev;
18964 18960 if (!(DEVI_IS_DETACHING(dip) &&
18965 18961 DEVI_IS_DEVICE_REMOVED(dip))) {
18966 18962 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
18967 18963 "disk not responding to selection\n");
18968 18964 }
18969 18965 New_state(un, SD_STATE_OFFLINE);
18970 18966 }
18971 18967 break;
18972 18968
18973 18969 case SD_DELAYED_RETRY_ISSUED:
18974 18970 case SD_IMMEDIATE_RETRY_ISSUED:
18975 18971 default:
18976 18972 /* Command was successfully queued for retry */
18977 18973 sd_print_retry_msg(un, bp, arg, code);
18978 18974 break;
18979 18975 }
18980 18976 }
18981 18977
18982 18978
18983 18979 /*
18984 18980 * Function: sd_pkt_reason_cmd_incomplete
18985 18981 *
18986 18982 * Description: Recovery actions for a SCSA "CMD_INCOMPLETE" pkt_reason.
18987 18983 *
18988 18984 * Context: May be called from interrupt context
18989 18985 */
18990 18986
18991 18987 static void
18992 18988 sd_pkt_reason_cmd_incomplete(struct sd_lun *un, struct buf *bp,
18993 18989 struct sd_xbuf *xp, struct scsi_pkt *pktp)
18994 18990 {
18995 18991 int flag = SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE;
18996 18992
18997 18993 ASSERT(un != NULL);
18998 18994 ASSERT(mutex_owned(SD_MUTEX(un)));
18999 18995 ASSERT(bp != NULL);
19000 18996 ASSERT(xp != NULL);
19001 18997 ASSERT(pktp != NULL);
19002 18998
19003 18999 /* Do not do a reset if selection did not complete */
19004 19000 /* Note: Should this not just check the bit? */
19005 19001 if (pktp->pkt_state != STATE_GOT_BUS) {
19006 19002 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19007 19003 sd_reset_target(un, pktp);
19008 19004 }
19009 19005
19010 19006 /*
19011 19007 * If the target was not successfully selected, then set
19012 19008 * SD_RETRIES_FAILFAST to indicate that we lost communication
19013 19009 * with the target, and further retries and/or commands are
19014 19010 * likely to take a long time.
19015 19011 */
19016 19012 if ((pktp->pkt_state & STATE_GOT_TARGET) == 0) {
19017 19013 flag |= SD_RETRIES_FAILFAST;
19018 19014 }
19019 19015
19020 19016 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19021 19017
19022 19018 sd_retry_command(un, bp, flag,
19023 19019 sd_print_cmd_incomplete_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19024 19020 }
19025 19021
19026 19022
19027 19023
19028 19024 /*
19029 19025 * Function: sd_pkt_reason_cmd_tran_err
19030 19026 *
19031 19027 * Description: Recovery actions for a SCSA "CMD_TRAN_ERR" pkt_reason.
19032 19028 *
19033 19029 * Context: May be called from interrupt context
19034 19030 */
19035 19031
19036 19032 static void
19037 19033 sd_pkt_reason_cmd_tran_err(struct sd_lun *un, struct buf *bp,
19038 19034 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19039 19035 {
19040 19036 ASSERT(un != NULL);
19041 19037 ASSERT(mutex_owned(SD_MUTEX(un)));
19042 19038 ASSERT(bp != NULL);
19043 19039 ASSERT(xp != NULL);
19044 19040 ASSERT(pktp != NULL);
19045 19041
19046 19042 /*
19047 19043 * Do not reset if we got a parity error, or if
19048 19044 * selection did not complete.
19049 19045 */
19050 19046 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19051 19047 /* Note: Should this not just check the bit for pkt_state? */
19052 19048 if (((pktp->pkt_statistics & STAT_PERR) == 0) &&
19053 19049 (pktp->pkt_state != STATE_GOT_BUS)) {
19054 19050 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19055 19051 sd_reset_target(un, pktp);
19056 19052 }
19057 19053
19058 19054 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19059 19055
19060 19056 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE),
19061 19057 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19062 19058 }
19063 19059
19064 19060
19065 19061
19066 19062 /*
19067 19063 * Function: sd_pkt_reason_cmd_reset
19068 19064 *
19069 19065 * Description: Recovery actions for a SCSA "CMD_RESET" pkt_reason.
19070 19066 *
19071 19067 * Context: May be called from interrupt context
19072 19068 */
19073 19069
19074 19070 static void
19075 19071 sd_pkt_reason_cmd_reset(struct sd_lun *un, struct buf *bp,
19076 19072 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19077 19073 {
19078 19074 ASSERT(un != NULL);
19079 19075 ASSERT(mutex_owned(SD_MUTEX(un)));
19080 19076 ASSERT(bp != NULL);
19081 19077 ASSERT(xp != NULL);
19082 19078 ASSERT(pktp != NULL);
19083 19079
19084 19080 /* The target may still be running the command, so try to reset. */
19085 19081 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19086 19082 sd_reset_target(un, pktp);
19087 19083
19088 19084 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19089 19085
19090 19086 /*
19091 19087 * If pkt_reason is CMD_RESET chances are that this pkt got
19092 19088 * reset because another target on this bus caused it. The target
19093 19089 * that caused it should get CMD_TIMEOUT with pkt_statistics
19094 19090 * of STAT_TIMEOUT/STAT_DEV_RESET.
19095 19091 */
19096 19092
19097 19093 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE),
19098 19094 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19099 19095 }
19100 19096
19101 19097
19102 19098
19103 19099
19104 19100 /*
19105 19101 * Function: sd_pkt_reason_cmd_aborted
19106 19102 *
19107 19103 * Description: Recovery actions for a SCSA "CMD_ABORTED" pkt_reason.
19108 19104 *
19109 19105 * Context: May be called from interrupt context
19110 19106 */
19111 19107
19112 19108 static void
19113 19109 sd_pkt_reason_cmd_aborted(struct sd_lun *un, struct buf *bp,
19114 19110 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19115 19111 {
19116 19112 ASSERT(un != NULL);
19117 19113 ASSERT(mutex_owned(SD_MUTEX(un)));
19118 19114 ASSERT(bp != NULL);
19119 19115 ASSERT(xp != NULL);
19120 19116 ASSERT(pktp != NULL);
19121 19117
19122 19118 /* The target may still be running the command, so try to reset. */
19123 19119 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19124 19120 sd_reset_target(un, pktp);
19125 19121
19126 19122 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19127 19123
19128 19124 /*
19129 19125 * If pkt_reason is CMD_ABORTED chances are that this pkt got
19130 19126 * aborted because another target on this bus caused it. The target
19131 19127 * that caused it should get CMD_TIMEOUT with pkt_statistics
19132 19128 * of STAT_TIMEOUT/STAT_DEV_RESET.
19133 19129 */
19134 19130
19135 19131 sd_retry_command(un, bp, (SD_RETRIES_VICTIM | SD_RETRIES_ISOLATE),
19136 19132 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19137 19133 }
19138 19134
19139 19135
19140 19136
19141 19137 /*
19142 19138 * Function: sd_pkt_reason_cmd_timeout
19143 19139 *
19144 19140 * Description: Recovery actions for a SCSA "CMD_TIMEOUT" pkt_reason.
19145 19141 *
19146 19142 * Context: May be called from interrupt context
19147 19143 */
19148 19144
19149 19145 static void
19150 19146 sd_pkt_reason_cmd_timeout(struct sd_lun *un, struct buf *bp,
19151 19147 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19152 19148 {
19153 19149 ASSERT(un != NULL);
19154 19150 ASSERT(mutex_owned(SD_MUTEX(un)));
19155 19151 ASSERT(bp != NULL);
19156 19152 ASSERT(xp != NULL);
19157 19153 ASSERT(pktp != NULL);
19158 19154
19159 19155
19160 19156 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19161 19157 sd_reset_target(un, pktp);
19162 19158
19163 19159 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19164 19160
19165 19161 /*
19166 19162 * A command timeout indicates that we could not establish
19167 19163 * communication with the target, so set SD_RETRIES_FAILFAST
19168 19164 * as further retries/commands are likely to take a long time.
19169 19165 */
19170 19166 sd_retry_command(un, bp,
19171 19167 (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE | SD_RETRIES_FAILFAST),
19172 19168 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19173 19169 }
19174 19170
19175 19171
19176 19172
19177 19173 /*
19178 19174 * Function: sd_pkt_reason_cmd_unx_bus_free
19179 19175 *
19180 19176 * Description: Recovery actions for a SCSA "CMD_UNX_BUS_FREE" pkt_reason.
19181 19177 *
19182 19178 * Context: May be called from interrupt context
19183 19179 */
19184 19180
19185 19181 static void
19186 19182 sd_pkt_reason_cmd_unx_bus_free(struct sd_lun *un, struct buf *bp,
19187 19183 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19188 19184 {
19189 19185 void (*funcp)(struct sd_lun *un, struct buf *bp, void *arg, int code);
19190 19186
19191 19187 ASSERT(un != NULL);
19192 19188 ASSERT(mutex_owned(SD_MUTEX(un)));
19193 19189 ASSERT(bp != NULL);
19194 19190 ASSERT(xp != NULL);
19195 19191 ASSERT(pktp != NULL);
19196 19192
19197 19193 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19198 19194 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19199 19195
19200 19196 funcp = ((pktp->pkt_statistics & STAT_PERR) == 0) ?
19201 19197 sd_print_retry_msg : NULL;
19202 19198
19203 19199 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE),
19204 19200 funcp, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19205 19201 }
19206 19202
19207 19203
19208 19204 /*
19209 19205 * Function: sd_pkt_reason_cmd_tag_reject
19210 19206 *
19211 19207 * Description: Recovery actions for a SCSA "CMD_TAG_REJECT" pkt_reason.
19212 19208 *
19213 19209 * Context: May be called from interrupt context
19214 19210 */
19215 19211
19216 19212 static void
19217 19213 sd_pkt_reason_cmd_tag_reject(struct sd_lun *un, struct buf *bp,
19218 19214 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19219 19215 {
19220 19216 ASSERT(un != NULL);
19221 19217 ASSERT(mutex_owned(SD_MUTEX(un)));
19222 19218 ASSERT(bp != NULL);
19223 19219 ASSERT(xp != NULL);
19224 19220 ASSERT(pktp != NULL);
19225 19221
19226 19222 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19227 19223 pktp->pkt_flags = 0;
19228 19224 un->un_tagflags = 0;
19229 19225 if (un->un_f_opt_queueing == TRUE) {
19230 19226 un->un_throttle = min(un->un_throttle, 3);
19231 19227 } else {
19232 19228 un->un_throttle = 1;
19233 19229 }
19234 19230 mutex_exit(SD_MUTEX(un));
19235 19231 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
19236 19232 mutex_enter(SD_MUTEX(un));
19237 19233
19238 19234 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19239 19235
19240 19236 /* Legacy behavior not to check retry counts here. */
19241 19237 sd_retry_command(un, bp, (SD_RETRIES_NOCHECK | SD_RETRIES_ISOLATE),
19242 19238 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19243 19239 }
19244 19240
19245 19241
19246 19242 /*
19247 19243 * Function: sd_pkt_reason_default
19248 19244 *
19249 19245 * Description: Default recovery actions for SCSA pkt_reason values that
19250 19246 * do not have more explicit recovery actions.
19251 19247 *
19252 19248 * Context: May be called from interrupt context
19253 19249 */
19254 19250
19255 19251 static void
19256 19252 sd_pkt_reason_default(struct sd_lun *un, struct buf *bp,
19257 19253 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19258 19254 {
19259 19255 ASSERT(un != NULL);
19260 19256 ASSERT(mutex_owned(SD_MUTEX(un)));
19261 19257 ASSERT(bp != NULL);
19262 19258 ASSERT(xp != NULL);
19263 19259 ASSERT(pktp != NULL);
19264 19260
19265 19261 SD_UPDATE_ERRSTATS(un, sd_transerrs);
19266 19262 sd_reset_target(un, pktp);
19267 19263
19268 19264 SD_UPDATE_RESERVATION_STATUS(un, pktp);
19269 19265
19270 19266 sd_retry_command(un, bp, (SD_RETRIES_STANDARD | SD_RETRIES_ISOLATE),
19271 19267 sd_print_retry_msg, NULL, EIO, SD_RESTART_TIMEOUT, NULL);
19272 19268 }
19273 19269
19274 19270
19275 19271
19276 19272 /*
19277 19273 * Function: sd_pkt_status_check_condition
19278 19274 *
19279 19275 * Description: Recovery actions for a "STATUS_CHECK" SCSI command status.
19280 19276 *
19281 19277 * Context: May be called from interrupt context
19282 19278 */
19283 19279
19284 19280 static void
19285 19281 sd_pkt_status_check_condition(struct sd_lun *un, struct buf *bp,
19286 19282 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19287 19283 {
19288 19284 ASSERT(un != NULL);
19289 19285 ASSERT(mutex_owned(SD_MUTEX(un)));
19290 19286 ASSERT(bp != NULL);
19291 19287 ASSERT(xp != NULL);
19292 19288 ASSERT(pktp != NULL);
19293 19289
19294 19290 SD_TRACE(SD_LOG_IO, un, "sd_pkt_status_check_condition: "
19295 19291 "entry: buf:0x%p xp:0x%p\n", bp, xp);
19296 19292
19297 19293 /*
19298 19294 * If ARQ is NOT enabled, then issue a REQUEST SENSE command (the
19299 19295 * command will be retried after the request sense). Otherwise, retry
19300 19296 * the command. Note: we are issuing the request sense even though the
19301 19297 * retry limit may have been reached for the failed command.
19302 19298 */
19303 19299 if (un->un_f_arq_enabled == FALSE) {
19304 19300 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: "
19305 19301 "no ARQ, sending request sense command\n");
19306 19302 sd_send_request_sense_command(un, bp, pktp);
19307 19303 } else {
19308 19304 SD_INFO(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: "
19309 19305 "ARQ,retrying request sense command\n");
19310 19306 #if defined(__i386) || defined(__amd64)
19311 19307 /*
19312 19308 * The SD_RETRY_DELAY value need to be adjusted here
19313 19309 * when SD_RETRY_DELAY change in sddef.h
19314 19310 */
19315 19311 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO,
19316 19312 un->un_f_is_fibre?drv_usectohz(100000):(clock_t)0,
19317 19313 NULL);
19318 19314 #else
19319 19315 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL,
19320 19316 EIO, SD_RETRY_DELAY, NULL);
19321 19317 #endif
19322 19318 }
19323 19319
19324 19320 SD_TRACE(SD_LOG_IO_CORE, un, "sd_pkt_status_check_condition: exit\n");
19325 19321 }
19326 19322
19327 19323
19328 19324 /*
19329 19325 * Function: sd_pkt_status_busy
19330 19326 *
19331 19327 * Description: Recovery actions for a "STATUS_BUSY" SCSI command status.
19332 19328 *
19333 19329 * Context: May be called from interrupt context
19334 19330 */
19335 19331
19336 19332 static void
19337 19333 sd_pkt_status_busy(struct sd_lun *un, struct buf *bp, struct sd_xbuf *xp,
19338 19334 struct scsi_pkt *pktp)
19339 19335 {
19340 19336 ASSERT(un != NULL);
19341 19337 ASSERT(mutex_owned(SD_MUTEX(un)));
19342 19338 ASSERT(bp != NULL);
19343 19339 ASSERT(xp != NULL);
19344 19340 ASSERT(pktp != NULL);
19345 19341
19346 19342 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19347 19343 "sd_pkt_status_busy: entry\n");
19348 19344
19349 19345 /* If retries are exhausted, just fail the command. */
19350 19346 if (xp->xb_retry_count >= un->un_busy_retry_count) {
19351 19347 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
19352 19348 "device busy too long\n");
19353 19349 sd_return_failed_command(un, bp, EIO);
19354 19350 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19355 19351 "sd_pkt_status_busy: exit\n");
19356 19352 return;
19357 19353 }
19358 19354 xp->xb_retry_count++;
19359 19355
19360 19356 /*
19361 19357 * Try to reset the target. However, we do not want to perform
19362 19358 * more than one reset if the device continues to fail. The reset
19363 19359 * will be performed when the retry count reaches the reset
19364 19360 * threshold. This threshold should be set such that at least
19365 19361 * one retry is issued before the reset is performed.
19366 19362 */
19367 19363 if (xp->xb_retry_count ==
19368 19364 ((un->un_reset_retry_count < 2) ? 2 : un->un_reset_retry_count)) {
19369 19365 int rval = 0;
19370 19366 mutex_exit(SD_MUTEX(un));
19371 19367 if (un->un_f_allow_bus_device_reset == TRUE) {
19372 19368 /*
19373 19369 * First try to reset the LUN; if we cannot then
19374 19370 * try to reset the target.
19375 19371 */
19376 19372 if (un->un_f_lun_reset_enabled == TRUE) {
19377 19373 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19378 19374 "sd_pkt_status_busy: RESET_LUN\n");
19379 19375 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN);
19380 19376 }
19381 19377 if (rval == 0) {
19382 19378 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19383 19379 "sd_pkt_status_busy: RESET_TARGET\n");
19384 19380 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET);
19385 19381 }
19386 19382 }
19387 19383 if (rval == 0) {
19388 19384 /*
19389 19385 * If the RESET_LUN and/or RESET_TARGET failed,
19390 19386 * try RESET_ALL
19391 19387 */
19392 19388 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19393 19389 "sd_pkt_status_busy: RESET_ALL\n");
19394 19390 rval = scsi_reset(SD_ADDRESS(un), RESET_ALL);
19395 19391 }
19396 19392 mutex_enter(SD_MUTEX(un));
19397 19393 if (rval == 0) {
19398 19394 /*
19399 19395 * The RESET_LUN, RESET_TARGET, and/or RESET_ALL failed.
19400 19396 * At this point we give up & fail the command.
19401 19397 */
19402 19398 sd_return_failed_command(un, bp, EIO);
19403 19399 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19404 19400 "sd_pkt_status_busy: exit (failed cmd)\n");
19405 19401 return;
19406 19402 }
19407 19403 }
19408 19404
19409 19405 /*
19410 19406 * Retry the command. Be sure to specify SD_RETRIES_NOCHECK as
19411 19407 * we have already checked the retry counts above.
19412 19408 */
19413 19409 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL,
19414 19410 EIO, un->un_busy_timeout, NULL);
19415 19411
19416 19412 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19417 19413 "sd_pkt_status_busy: exit\n");
19418 19414 }
19419 19415
19420 19416
19421 19417 /*
19422 19418 * Function: sd_pkt_status_reservation_conflict
19423 19419 *
19424 19420 * Description: Recovery actions for a "STATUS_RESERVATION_CONFLICT" SCSI
19425 19421 * command status.
19426 19422 *
19427 19423 * Context: May be called from interrupt context
19428 19424 */
19429 19425
19430 19426 static void
19431 19427 sd_pkt_status_reservation_conflict(struct sd_lun *un, struct buf *bp,
19432 19428 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19433 19429 {
19434 19430 ASSERT(un != NULL);
19435 19431 ASSERT(mutex_owned(SD_MUTEX(un)));
19436 19432 ASSERT(bp != NULL);
19437 19433 ASSERT(xp != NULL);
19438 19434 ASSERT(pktp != NULL);
19439 19435
19440 19436 /*
19441 19437 * If the command was PERSISTENT_RESERVATION_[IN|OUT] then reservation
19442 19438 * conflict could be due to various reasons like incorrect keys, not
19443 19439 * registered or not reserved etc. So, we return EACCES to the caller.
19444 19440 */
19445 19441 if (un->un_reservation_type == SD_SCSI3_RESERVATION) {
19446 19442 int cmd = SD_GET_PKT_OPCODE(pktp);
19447 19443 if ((cmd == SCMD_PERSISTENT_RESERVE_IN) ||
19448 19444 (cmd == SCMD_PERSISTENT_RESERVE_OUT)) {
19449 19445 sd_return_failed_command(un, bp, EACCES);
19450 19446 return;
19451 19447 }
19452 19448 }
19453 19449
19454 19450 un->un_resvd_status |= SD_RESERVATION_CONFLICT;
19455 19451
19456 19452 if ((un->un_resvd_status & SD_FAILFAST) != 0) {
19457 19453 if (sd_failfast_enable != 0) {
19458 19454 /* By definition, we must panic here.... */
19459 19455 sd_panic_for_res_conflict(un);
19460 19456 /*NOTREACHED*/
19461 19457 }
19462 19458 SD_ERROR(SD_LOG_IO, un,
19463 19459 "sd_handle_resv_conflict: Disk Reserved\n");
19464 19460 sd_return_failed_command(un, bp, EACCES);
19465 19461 return;
19466 19462 }
19467 19463
19468 19464 /*
19469 19465 * 1147670: retry only if sd_retry_on_reservation_conflict
19470 19466 * property is set (default is 1). Retries will not succeed
19471 19467 * on a disk reserved by another initiator. HA systems
19472 19468 * may reset this via sd.conf to avoid these retries.
19473 19469 *
19474 19470 * Note: The legacy return code for this failure is EIO, however EACCES
19475 19471 * seems more appropriate for a reservation conflict.
19476 19472 */
19477 19473 if (sd_retry_on_reservation_conflict == 0) {
19478 19474 SD_ERROR(SD_LOG_IO, un,
19479 19475 "sd_handle_resv_conflict: Device Reserved\n");
19480 19476 sd_return_failed_command(un, bp, EIO);
19481 19477 return;
19482 19478 }
19483 19479
19484 19480 /*
19485 19481 * Retry the command if we can.
19486 19482 *
19487 19483 * Note: The legacy return code for this failure is EIO, however EACCES
19488 19484 * seems more appropriate for a reservation conflict.
19489 19485 */
19490 19486 sd_retry_command(un, bp, SD_RETRIES_STANDARD, NULL, NULL, EIO,
19491 19487 (clock_t)2, NULL);
19492 19488 }
19493 19489
19494 19490
19495 19491
19496 19492 /*
19497 19493 * Function: sd_pkt_status_qfull
19498 19494 *
19499 19495 * Description: Handle a QUEUE FULL condition from the target. This can
19500 19496 * occur if the HBA does not handle the queue full condition.
19501 19497 * (Basically this means third-party HBAs as Sun HBAs will
19502 19498 * handle the queue full condition.) Note that if there are
19503 19499 * some commands already in the transport, then the queue full
19504 19500 * has occurred because the queue for this nexus is actually
19505 19501 * full. If there are no commands in the transport, then the
19506 19502 * queue full is resulting from some other initiator or lun
19507 19503 * consuming all the resources at the target.
19508 19504 *
19509 19505 * Context: May be called from interrupt context
19510 19506 */
19511 19507
19512 19508 static void
19513 19509 sd_pkt_status_qfull(struct sd_lun *un, struct buf *bp,
19514 19510 struct sd_xbuf *xp, struct scsi_pkt *pktp)
19515 19511 {
19516 19512 ASSERT(un != NULL);
19517 19513 ASSERT(mutex_owned(SD_MUTEX(un)));
19518 19514 ASSERT(bp != NULL);
19519 19515 ASSERT(xp != NULL);
19520 19516 ASSERT(pktp != NULL);
19521 19517
19522 19518 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19523 19519 "sd_pkt_status_qfull: entry\n");
19524 19520
19525 19521 /*
19526 19522 * Just lower the QFULL throttle and retry the command. Note that
19527 19523 * we do not limit the number of retries here.
19528 19524 */
19529 19525 sd_reduce_throttle(un, SD_THROTTLE_QFULL);
19530 19526 sd_retry_command(un, bp, SD_RETRIES_NOCHECK, NULL, NULL, 0,
19531 19527 SD_RESTART_TIMEOUT, NULL);
19532 19528
19533 19529 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19534 19530 "sd_pkt_status_qfull: exit\n");
19535 19531 }
19536 19532
19537 19533
19538 19534 /*
19539 19535 * Function: sd_reset_target
19540 19536 *
19541 19537 * Description: Issue a scsi_reset(9F), with either RESET_LUN,
19542 19538 * RESET_TARGET, or RESET_ALL.
19543 19539 *
19544 19540 * Context: May be called under interrupt context.
19545 19541 */
19546 19542
19547 19543 static void
19548 19544 sd_reset_target(struct sd_lun *un, struct scsi_pkt *pktp)
19549 19545 {
19550 19546 int rval = 0;
19551 19547
19552 19548 ASSERT(un != NULL);
19553 19549 ASSERT(mutex_owned(SD_MUTEX(un)));
19554 19550 ASSERT(pktp != NULL);
19555 19551
19556 19552 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: entry\n");
19557 19553
19558 19554 /*
19559 19555 * No need to reset if the transport layer has already done so.
19560 19556 */
19561 19557 if ((pktp->pkt_statistics &
19562 19558 (STAT_BUS_RESET | STAT_DEV_RESET | STAT_ABORTED)) != 0) {
19563 19559 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19564 19560 "sd_reset_target: no reset\n");
19565 19561 return;
19566 19562 }
19567 19563
19568 19564 mutex_exit(SD_MUTEX(un));
19569 19565
19570 19566 if (un->un_f_allow_bus_device_reset == TRUE) {
19571 19567 if (un->un_f_lun_reset_enabled == TRUE) {
19572 19568 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19573 19569 "sd_reset_target: RESET_LUN\n");
19574 19570 rval = scsi_reset(SD_ADDRESS(un), RESET_LUN);
19575 19571 }
19576 19572 if (rval == 0) {
19577 19573 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19578 19574 "sd_reset_target: RESET_TARGET\n");
19579 19575 rval = scsi_reset(SD_ADDRESS(un), RESET_TARGET);
19580 19576 }
19581 19577 }
19582 19578
19583 19579 if (rval == 0) {
19584 19580 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
19585 19581 "sd_reset_target: RESET_ALL\n");
19586 19582 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
19587 19583 }
19588 19584
19589 19585 mutex_enter(SD_MUTEX(un));
19590 19586
19591 19587 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un, "sd_reset_target: exit\n");
19592 19588 }
19593 19589
19594 19590 /*
19595 19591 * Function: sd_target_change_task
19596 19592 *
19597 19593 * Description: Handle dynamic target change
19598 19594 *
19599 19595 * Context: Executes in a taskq() thread context
19600 19596 */
19601 19597 static void
19602 19598 sd_target_change_task(void *arg)
19603 19599 {
19604 19600 struct sd_lun *un = arg;
19605 19601 uint64_t capacity;
19606 19602 diskaddr_t label_cap;
19607 19603 uint_t lbasize;
19608 19604 sd_ssc_t *ssc;
19609 19605
19610 19606 ASSERT(un != NULL);
19611 19607 ASSERT(!mutex_owned(SD_MUTEX(un)));
19612 19608
19613 19609 if ((un->un_f_blockcount_is_valid == FALSE) ||
19614 19610 (un->un_f_tgt_blocksize_is_valid == FALSE)) {
19615 19611 return;
19616 19612 }
19617 19613
19618 19614 ssc = sd_ssc_init(un);
19619 19615
19620 19616 if (sd_send_scsi_READ_CAPACITY(ssc, &capacity,
19621 19617 &lbasize, SD_PATH_DIRECT) != 0) {
19622 19618 SD_ERROR(SD_LOG_ERROR, un,
19623 19619 "sd_target_change_task: fail to read capacity\n");
19624 19620 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
19625 19621 goto task_exit;
19626 19622 }
19627 19623
19628 19624 mutex_enter(SD_MUTEX(un));
19629 19625 if (capacity <= un->un_blockcount) {
19630 19626 mutex_exit(SD_MUTEX(un));
19631 19627 goto task_exit;
19632 19628 }
19633 19629
19634 19630 sd_update_block_info(un, lbasize, capacity);
19635 19631 mutex_exit(SD_MUTEX(un));
19636 19632
19637 19633 /*
19638 19634 * If lun is EFI labeled and lun capacity is greater than the
19639 19635 * capacity contained in the label, log a sys event.
19640 19636 */
19641 19637 if (cmlb_efi_label_capacity(un->un_cmlbhandle, &label_cap,
19642 19638 (void*)SD_PATH_DIRECT) == 0) {
19643 19639 mutex_enter(SD_MUTEX(un));
19644 19640 if (un->un_f_blockcount_is_valid &&
19645 19641 un->un_blockcount > label_cap) {
19646 19642 mutex_exit(SD_MUTEX(un));
19647 19643 sd_log_lun_expansion_event(un, KM_SLEEP);
19648 19644 } else {
19649 19645 mutex_exit(SD_MUTEX(un));
19650 19646 }
19651 19647 }
19652 19648
19653 19649 task_exit:
19654 19650 sd_ssc_fini(ssc);
19655 19651 }
19656 19652
19657 19653
19658 19654 /*
19659 19655 * Function: sd_log_dev_status_event
19660 19656 *
19661 19657 * Description: Log EC_dev_status sysevent
19662 19658 *
19663 19659 * Context: Never called from interrupt context
19664 19660 */
19665 19661 static void
19666 19662 sd_log_dev_status_event(struct sd_lun *un, char *esc, int km_flag)
19667 19663 {
19668 19664 int err;
19669 19665 char *path;
19670 19666 nvlist_t *attr_list;
19671 19667
19672 19668 /* Allocate and build sysevent attribute list */
19673 19669 err = nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, km_flag);
19674 19670 if (err != 0) {
19675 19671 SD_ERROR(SD_LOG_ERROR, un,
19676 19672 "sd_log_dev_status_event: fail to allocate space\n");
19677 19673 return;
19678 19674 }
19679 19675
19680 19676 path = kmem_alloc(MAXPATHLEN, km_flag);
19681 19677 if (path == NULL) {
19682 19678 nvlist_free(attr_list);
19683 19679 SD_ERROR(SD_LOG_ERROR, un,
19684 19680 "sd_log_dev_status_event: fail to allocate space\n");
19685 19681 return;
19686 19682 }
19687 19683 /*
19688 19684 * Add path attribute to identify the lun.
19689 19685 * We are using minor node 'a' as the sysevent attribute.
19690 19686 */
19691 19687 (void) snprintf(path, MAXPATHLEN, "/devices");
19692 19688 (void) ddi_pathname(SD_DEVINFO(un), path + strlen(path));
19693 19689 (void) snprintf(path + strlen(path), MAXPATHLEN - strlen(path),
19694 19690 ":a");
19695 19691
19696 19692 err = nvlist_add_string(attr_list, DEV_PHYS_PATH, path);
19697 19693 if (err != 0) {
19698 19694 nvlist_free(attr_list);
19699 19695 kmem_free(path, MAXPATHLEN);
19700 19696 SD_ERROR(SD_LOG_ERROR, un,
19701 19697 "sd_log_dev_status_event: fail to add attribute\n");
19702 19698 return;
19703 19699 }
19704 19700
19705 19701 /* Log dynamic lun expansion sysevent */
19706 19702 err = ddi_log_sysevent(SD_DEVINFO(un), SUNW_VENDOR, EC_DEV_STATUS,
19707 19703 esc, attr_list, NULL, km_flag);
19708 19704 if (err != DDI_SUCCESS) {
19709 19705 SD_ERROR(SD_LOG_ERROR, un,
19710 19706 "sd_log_dev_status_event: fail to log sysevent\n");
19711 19707 }
19712 19708
19713 19709 nvlist_free(attr_list);
19714 19710 kmem_free(path, MAXPATHLEN);
19715 19711 }
19716 19712
19717 19713
19718 19714 /*
19719 19715 * Function: sd_log_lun_expansion_event
19720 19716 *
19721 19717 * Description: Log lun expansion sys event
19722 19718 *
19723 19719 * Context: Never called from interrupt context
19724 19720 */
19725 19721 static void
19726 19722 sd_log_lun_expansion_event(struct sd_lun *un, int km_flag)
19727 19723 {
19728 19724 sd_log_dev_status_event(un, ESC_DEV_DLE, km_flag);
19729 19725 }
19730 19726
19731 19727
19732 19728 /*
19733 19729 * Function: sd_log_eject_request_event
19734 19730 *
19735 19731 * Description: Log eject request sysevent
19736 19732 *
19737 19733 * Context: Never called from interrupt context
19738 19734 */
19739 19735 static void
19740 19736 sd_log_eject_request_event(struct sd_lun *un, int km_flag)
19741 19737 {
19742 19738 sd_log_dev_status_event(un, ESC_DEV_EJECT_REQUEST, km_flag);
19743 19739 }
19744 19740
19745 19741
19746 19742 /*
19747 19743 * Function: sd_media_change_task
19748 19744 *
19749 19745 * Description: Recovery action for CDROM to become available.
19750 19746 *
19751 19747 * Context: Executes in a taskq() thread context
19752 19748 */
19753 19749
19754 19750 static void
19755 19751 sd_media_change_task(void *arg)
19756 19752 {
19757 19753 struct scsi_pkt *pktp = arg;
19758 19754 struct sd_lun *un;
19759 19755 struct buf *bp;
19760 19756 struct sd_xbuf *xp;
19761 19757 int err = 0;
19762 19758 int retry_count = 0;
19763 19759 int retry_limit = SD_UNIT_ATTENTION_RETRY/10;
19764 19760 struct sd_sense_info si;
19765 19761
19766 19762 ASSERT(pktp != NULL);
19767 19763 bp = (struct buf *)pktp->pkt_private;
19768 19764 ASSERT(bp != NULL);
19769 19765 xp = SD_GET_XBUF(bp);
19770 19766 ASSERT(xp != NULL);
19771 19767 un = SD_GET_UN(bp);
19772 19768 ASSERT(un != NULL);
19773 19769 ASSERT(!mutex_owned(SD_MUTEX(un)));
19774 19770 ASSERT(un->un_f_monitor_media_state);
19775 19771
19776 19772 si.ssi_severity = SCSI_ERR_INFO;
19777 19773 si.ssi_pfa_flag = FALSE;
19778 19774
19779 19775 /*
19780 19776 * When a reset is issued on a CDROM, it takes a long time to
19781 19777 * recover. First few attempts to read capacity and other things
19782 19778 * related to handling unit attention fail (with a ASC 0x4 and
19783 19779 * ASCQ 0x1). In that case we want to do enough retries and we want
19784 19780 * to limit the retries in other cases of genuine failures like
19785 19781 * no media in drive.
19786 19782 */
19787 19783 while (retry_count++ < retry_limit) {
19788 19784 if ((err = sd_handle_mchange(un)) == 0) {
19789 19785 break;
19790 19786 }
19791 19787 if (err == EAGAIN) {
19792 19788 retry_limit = SD_UNIT_ATTENTION_RETRY;
19793 19789 }
19794 19790 /* Sleep for 0.5 sec. & try again */
19795 19791 delay(drv_usectohz(500000));
19796 19792 }
19797 19793
19798 19794 /*
19799 19795 * Dispatch (retry or fail) the original command here,
19800 19796 * along with appropriate console messages....
19801 19797 *
19802 19798 * Must grab the mutex before calling sd_retry_command,
19803 19799 * sd_print_sense_msg and sd_return_failed_command.
19804 19800 */
19805 19801 mutex_enter(SD_MUTEX(un));
19806 19802 if (err != SD_CMD_SUCCESS) {
19807 19803 SD_UPDATE_ERRSTATS(un, sd_harderrs);
19808 19804 SD_UPDATE_ERRSTATS(un, sd_rq_nodev_err);
19809 19805 si.ssi_severity = SCSI_ERR_FATAL;
19810 19806 sd_print_sense_msg(un, bp, &si, SD_NO_RETRY_ISSUED);
19811 19807 sd_return_failed_command(un, bp, EIO);
19812 19808 } else {
19813 19809 sd_retry_command(un, bp, SD_RETRIES_UA, sd_print_sense_msg,
19814 19810 &si, EIO, (clock_t)0, NULL);
19815 19811 }
19816 19812 mutex_exit(SD_MUTEX(un));
19817 19813 }
19818 19814
19819 19815
19820 19816
19821 19817 /*
19822 19818 * Function: sd_handle_mchange
19823 19819 *
19824 19820 * Description: Perform geometry validation & other recovery when CDROM
19825 19821 * has been removed from drive.
19826 19822 *
19827 19823 * Return Code: 0 for success
19828 19824 * errno-type return code of either sd_send_scsi_DOORLOCK() or
19829 19825 * sd_send_scsi_READ_CAPACITY()
19830 19826 *
19831 19827 * Context: Executes in a taskq() thread context
19832 19828 */
19833 19829
19834 19830 static int
19835 19831 sd_handle_mchange(struct sd_lun *un)
19836 19832 {
19837 19833 uint64_t capacity;
19838 19834 uint32_t lbasize;
19839 19835 int rval;
19840 19836 sd_ssc_t *ssc;
19841 19837
19842 19838 ASSERT(!mutex_owned(SD_MUTEX(un)));
19843 19839 ASSERT(un->un_f_monitor_media_state);
19844 19840
19845 19841 ssc = sd_ssc_init(un);
19846 19842 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize,
19847 19843 SD_PATH_DIRECT_PRIORITY);
19848 19844
19849 19845 if (rval != 0)
19850 19846 goto failed;
19851 19847
19852 19848 mutex_enter(SD_MUTEX(un));
19853 19849 sd_update_block_info(un, lbasize, capacity);
19854 19850
19855 19851 if (un->un_errstats != NULL) {
19856 19852 struct sd_errstats *stp =
19857 19853 (struct sd_errstats *)un->un_errstats->ks_data;
19858 19854 stp->sd_capacity.value.ui64 = (uint64_t)
19859 19855 ((uint64_t)un->un_blockcount *
19860 19856 (uint64_t)un->un_tgt_blocksize);
19861 19857 }
19862 19858
19863 19859 /*
19864 19860 * Check if the media in the device is writable or not
19865 19861 */
19866 19862 if (ISCD(un)) {
19867 19863 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT_PRIORITY);
19868 19864 }
19869 19865
19870 19866 /*
19871 19867 * Note: Maybe let the strategy/partitioning chain worry about getting
19872 19868 * valid geometry.
19873 19869 */
19874 19870 mutex_exit(SD_MUTEX(un));
19875 19871 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY);
19876 19872
19877 19873
19878 19874 if (cmlb_validate(un->un_cmlbhandle, 0,
19879 19875 (void *)SD_PATH_DIRECT_PRIORITY) != 0) {
19880 19876 sd_ssc_fini(ssc);
19881 19877 return (EIO);
19882 19878 } else {
19883 19879 if (un->un_f_pkstats_enabled) {
19884 19880 sd_set_pstats(un);
19885 19881 SD_TRACE(SD_LOG_IO_PARTITION, un,
19886 19882 "sd_handle_mchange: un:0x%p pstats created and "
19887 19883 "set\n", un);
19888 19884 }
19889 19885 }
19890 19886
19891 19887 /*
19892 19888 * Try to lock the door
19893 19889 */
19894 19890 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
19895 19891 SD_PATH_DIRECT_PRIORITY);
19896 19892 failed:
19897 19893 if (rval != 0)
19898 19894 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
19899 19895 sd_ssc_fini(ssc);
19900 19896 return (rval);
19901 19897 }
19902 19898
19903 19899
19904 19900 /*
19905 19901 * Function: sd_send_scsi_DOORLOCK
19906 19902 *
19907 19903 * Description: Issue the scsi DOOR LOCK command
19908 19904 *
19909 19905 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
19910 19906 * structure for this target.
19911 19907 * flag - SD_REMOVAL_ALLOW
19912 19908 * SD_REMOVAL_PREVENT
19913 19909 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
19914 19910 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
19915 19911 * to use the USCSI "direct" chain and bypass the normal
19916 19912 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this
19917 19913 * command is issued as part of an error recovery action.
19918 19914 *
19919 19915 * Return Code: 0 - Success
19920 19916 * errno return code from sd_ssc_send()
19921 19917 *
19922 19918 * Context: Can sleep.
19923 19919 */
19924 19920
19925 19921 static int
19926 19922 sd_send_scsi_DOORLOCK(sd_ssc_t *ssc, int flag, int path_flag)
19927 19923 {
19928 19924 struct scsi_extended_sense sense_buf;
19929 19925 union scsi_cdb cdb;
19930 19926 struct uscsi_cmd ucmd_buf;
19931 19927 int status;
19932 19928 struct sd_lun *un;
19933 19929
19934 19930 ASSERT(ssc != NULL);
19935 19931 un = ssc->ssc_un;
19936 19932 ASSERT(un != NULL);
19937 19933 ASSERT(!mutex_owned(SD_MUTEX(un)));
19938 19934
19939 19935 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_DOORLOCK: entry: un:0x%p\n", un);
19940 19936
19941 19937 /* already determined doorlock is not supported, fake success */
19942 19938 if (un->un_f_doorlock_supported == FALSE) {
19943 19939 return (0);
19944 19940 }
19945 19941
19946 19942 /*
19947 19943 * If we are ejecting and see an SD_REMOVAL_PREVENT
19948 19944 * ignore the command so we can complete the eject
19949 19945 * operation.
19950 19946 */
19951 19947 if (flag == SD_REMOVAL_PREVENT) {
19952 19948 mutex_enter(SD_MUTEX(un));
19953 19949 if (un->un_f_ejecting == TRUE) {
19954 19950 mutex_exit(SD_MUTEX(un));
19955 19951 return (EAGAIN);
19956 19952 }
19957 19953 mutex_exit(SD_MUTEX(un));
19958 19954 }
19959 19955
19960 19956 bzero(&cdb, sizeof (cdb));
19961 19957 bzero(&ucmd_buf, sizeof (ucmd_buf));
19962 19958
19963 19959 cdb.scc_cmd = SCMD_DOORLOCK;
19964 19960 cdb.cdb_opaque[4] = (uchar_t)flag;
19965 19961
19966 19962 ucmd_buf.uscsi_cdb = (char *)&cdb;
19967 19963 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
19968 19964 ucmd_buf.uscsi_bufaddr = NULL;
19969 19965 ucmd_buf.uscsi_buflen = 0;
19970 19966 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
19971 19967 ucmd_buf.uscsi_rqlen = sizeof (sense_buf);
19972 19968 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
19973 19969 ucmd_buf.uscsi_timeout = 15;
19974 19970
19975 19971 SD_TRACE(SD_LOG_IO, un,
19976 19972 "sd_send_scsi_DOORLOCK: returning sd_ssc_send\n");
19977 19973
19978 19974 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
19979 19975 UIO_SYSSPACE, path_flag);
19980 19976
19981 19977 if (status == 0)
19982 19978 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
19983 19979
19984 19980 if ((status == EIO) && (ucmd_buf.uscsi_status == STATUS_CHECK) &&
19985 19981 (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
19986 19982 (scsi_sense_key((uint8_t *)&sense_buf) == KEY_ILLEGAL_REQUEST)) {
19987 19983 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
19988 19984
19989 19985 /* fake success and skip subsequent doorlock commands */
19990 19986 un->un_f_doorlock_supported = FALSE;
19991 19987 return (0);
19992 19988 }
19993 19989
19994 19990 return (status);
19995 19991 }
19996 19992
19997 19993 /*
19998 19994 * Function: sd_send_scsi_READ_CAPACITY
19999 19995 *
20000 19996 * Description: This routine uses the scsi READ CAPACITY command to determine
20001 19997 * the device capacity in number of blocks and the device native
20002 19998 * block size. If this function returns a failure, then the
20003 19999 * values in *capp and *lbap are undefined. If the capacity
20004 20000 * returned is 0xffffffff then the lun is too large for a
20005 20001 * normal READ CAPACITY command and the results of a
20006 20002 * READ CAPACITY 16 will be used instead.
20007 20003 *
20008 20004 * Arguments: ssc - ssc contains ptr to soft state struct for the target
20009 20005 * capp - ptr to unsigned 64-bit variable to receive the
20010 20006 * capacity value from the command.
20011 20007 * lbap - ptr to unsigned 32-bit varaible to receive the
20012 20008 * block size value from the command
20013 20009 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
20014 20010 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
20015 20011 * to use the USCSI "direct" chain and bypass the normal
20016 20012 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this
20017 20013 * command is issued as part of an error recovery action.
20018 20014 *
20019 20015 * Return Code: 0 - Success
20020 20016 * EIO - IO error
20021 20017 * EACCES - Reservation conflict detected
20022 20018 * EAGAIN - Device is becoming ready
20023 20019 * errno return code from sd_ssc_send()
20024 20020 *
20025 20021 * Context: Can sleep. Blocks until command completes.
20026 20022 */
20027 20023
20028 20024 #define SD_CAPACITY_SIZE sizeof (struct scsi_capacity)
20029 20025
20030 20026 static int
20031 20027 sd_send_scsi_READ_CAPACITY(sd_ssc_t *ssc, uint64_t *capp, uint32_t *lbap,
20032 20028 int path_flag)
20033 20029 {
20034 20030 struct scsi_extended_sense sense_buf;
20035 20031 struct uscsi_cmd ucmd_buf;
20036 20032 union scsi_cdb cdb;
20037 20033 uint32_t *capacity_buf;
20038 20034 uint64_t capacity;
20039 20035 uint32_t lbasize;
20040 20036 uint32_t pbsize;
20041 20037 int status;
20042 20038 struct sd_lun *un;
20043 20039
20044 20040 ASSERT(ssc != NULL);
20045 20041
20046 20042 un = ssc->ssc_un;
20047 20043 ASSERT(un != NULL);
20048 20044 ASSERT(!mutex_owned(SD_MUTEX(un)));
20049 20045 ASSERT(capp != NULL);
20050 20046 ASSERT(lbap != NULL);
20051 20047
20052 20048 SD_TRACE(SD_LOG_IO, un,
20053 20049 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un);
20054 20050
20055 20051 /*
20056 20052 * First send a READ_CAPACITY command to the target.
20057 20053 * (This command is mandatory under SCSI-2.)
20058 20054 *
20059 20055 * Set up the CDB for the READ_CAPACITY command. The Partial
20060 20056 * Medium Indicator bit is cleared. The address field must be
20061 20057 * zero if the PMI bit is zero.
20062 20058 */
20063 20059 bzero(&cdb, sizeof (cdb));
20064 20060 bzero(&ucmd_buf, sizeof (ucmd_buf));
20065 20061
20066 20062 capacity_buf = kmem_zalloc(SD_CAPACITY_SIZE, KM_SLEEP);
20067 20063
20068 20064 cdb.scc_cmd = SCMD_READ_CAPACITY;
20069 20065
20070 20066 ucmd_buf.uscsi_cdb = (char *)&cdb;
20071 20067 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
20072 20068 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity_buf;
20073 20069 ucmd_buf.uscsi_buflen = SD_CAPACITY_SIZE;
20074 20070 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20075 20071 ucmd_buf.uscsi_rqlen = sizeof (sense_buf);
20076 20072 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
20077 20073 ucmd_buf.uscsi_timeout = 60;
20078 20074
20079 20075 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20080 20076 UIO_SYSSPACE, path_flag);
20081 20077
20082 20078 switch (status) {
20083 20079 case 0:
20084 20080 /* Return failure if we did not get valid capacity data. */
20085 20081 if (ucmd_buf.uscsi_resid != 0) {
20086 20082 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20087 20083 "sd_send_scsi_READ_CAPACITY received invalid "
20088 20084 "capacity data");
20089 20085 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20090 20086 return (EIO);
20091 20087 }
20092 20088 /*
20093 20089 * Read capacity and block size from the READ CAPACITY 10 data.
20094 20090 * This data may be adjusted later due to device specific
20095 20091 * issues.
20096 20092 *
20097 20093 * According to the SCSI spec, the READ CAPACITY 10
20098 20094 * command returns the following:
20099 20095 *
20100 20096 * bytes 0-3: Maximum logical block address available.
20101 20097 * (MSB in byte:0 & LSB in byte:3)
20102 20098 *
20103 20099 * bytes 4-7: Block length in bytes
20104 20100 * (MSB in byte:4 & LSB in byte:7)
20105 20101 *
20106 20102 */
20107 20103 capacity = BE_32(capacity_buf[0]);
20108 20104 lbasize = BE_32(capacity_buf[1]);
20109 20105
20110 20106 /*
20111 20107 * Done with capacity_buf
20112 20108 */
20113 20109 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20114 20110
20115 20111 /*
20116 20112 * if the reported capacity is set to all 0xf's, then
20117 20113 * this disk is too large and requires SBC-2 commands.
20118 20114 * Reissue the request using READ CAPACITY 16.
20119 20115 */
20120 20116 if (capacity == 0xffffffff) {
20121 20117 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
20122 20118 status = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity,
20123 20119 &lbasize, &pbsize, path_flag);
20124 20120 if (status != 0) {
20125 20121 return (status);
20126 20122 } else {
20127 20123 goto rc16_done;
20128 20124 }
20129 20125 }
20130 20126 break; /* Success! */
20131 20127 case EIO:
20132 20128 switch (ucmd_buf.uscsi_status) {
20133 20129 case STATUS_RESERVATION_CONFLICT:
20134 20130 status = EACCES;
20135 20131 break;
20136 20132 case STATUS_CHECK:
20137 20133 /*
20138 20134 * Check condition; look for ASC/ASCQ of 0x04/0x01
20139 20135 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY)
20140 20136 */
20141 20137 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20142 20138 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) &&
20143 20139 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) {
20144 20140 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20145 20141 return (EAGAIN);
20146 20142 }
20147 20143 break;
20148 20144 default:
20149 20145 break;
20150 20146 }
20151 20147 /* FALLTHRU */
20152 20148 default:
20153 20149 kmem_free(capacity_buf, SD_CAPACITY_SIZE);
20154 20150 return (status);
20155 20151 }
20156 20152
20157 20153 /*
20158 20154 * Some ATAPI CD-ROM drives report inaccurate LBA size values
20159 20155 * (2352 and 0 are common) so for these devices always force the value
20160 20156 * to 2048 as required by the ATAPI specs.
20161 20157 */
20162 20158 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) {
20163 20159 lbasize = 2048;
20164 20160 }
20165 20161
20166 20162 /*
20167 20163 * Get the maximum LBA value from the READ CAPACITY data.
20168 20164 * Here we assume that the Partial Medium Indicator (PMI) bit
20169 20165 * was cleared when issuing the command. This means that the LBA
20170 20166 * returned from the device is the LBA of the last logical block
20171 20167 * on the logical unit. The actual logical block count will be
20172 20168 * this value plus one.
20173 20169 */
20174 20170 capacity += 1;
20175 20171
20176 20172 /*
20177 20173 * Currently, for removable media, the capacity is saved in terms
20178 20174 * of un->un_sys_blocksize, so scale the capacity value to reflect this.
20179 20175 */
20180 20176 if (un->un_f_has_removable_media)
20181 20177 capacity *= (lbasize / un->un_sys_blocksize);
20182 20178
20183 20179 rc16_done:
20184 20180
20185 20181 /*
20186 20182 * Copy the values from the READ CAPACITY command into the space
20187 20183 * provided by the caller.
20188 20184 */
20189 20185 *capp = capacity;
20190 20186 *lbap = lbasize;
20191 20187
20192 20188 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY: "
20193 20189 "capacity:0x%llx lbasize:0x%x\n", capacity, lbasize);
20194 20190
20195 20191 /*
20196 20192 * Both the lbasize and capacity from the device must be nonzero,
20197 20193 * otherwise we assume that the values are not valid and return
20198 20194 * failure to the caller. (4203735)
20199 20195 */
20200 20196 if ((capacity == 0) || (lbasize == 0)) {
20201 20197 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20202 20198 "sd_send_scsi_READ_CAPACITY received invalid value "
20203 20199 "capacity %llu lbasize %d", capacity, lbasize);
20204 20200 return (EIO);
20205 20201 }
20206 20202 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20207 20203 return (0);
20208 20204 }
20209 20205
20210 20206 /*
20211 20207 * Function: sd_send_scsi_READ_CAPACITY_16
20212 20208 *
20213 20209 * Description: This routine uses the scsi READ CAPACITY 16 command to
20214 20210 * determine the device capacity in number of blocks and the
20215 20211 * device native block size. If this function returns a failure,
20216 20212 * then the values in *capp and *lbap are undefined.
20217 20213 * This routine should be called by sd_send_scsi_READ_CAPACITY
20218 20214 * which will apply any device specific adjustments to capacity
20219 20215 * and lbasize. One exception is it is also called by
20220 20216 * sd_get_media_info_ext. In that function, there is no need to
20221 20217 * adjust the capacity and lbasize.
20222 20218 *
20223 20219 * Arguments: ssc - ssc contains ptr to soft state struct for the target
20224 20220 * capp - ptr to unsigned 64-bit variable to receive the
20225 20221 * capacity value from the command.
20226 20222 * lbap - ptr to unsigned 32-bit varaible to receive the
20227 20223 * block size value from the command
20228 20224 * psp - ptr to unsigned 32-bit variable to receive the
20229 20225 * physical block size value from the command
20230 20226 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
20231 20227 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
20232 20228 * to use the USCSI "direct" chain and bypass the normal
20233 20229 * command waitq. SD_PATH_DIRECT_PRIORITY is used when
20234 20230 * this command is issued as part of an error recovery
20235 20231 * action.
20236 20232 *
20237 20233 * Return Code: 0 - Success
20238 20234 * EIO - IO error
20239 20235 * EACCES - Reservation conflict detected
20240 20236 * EAGAIN - Device is becoming ready
20241 20237 * errno return code from sd_ssc_send()
20242 20238 *
20243 20239 * Context: Can sleep. Blocks until command completes.
20244 20240 */
20245 20241
20246 20242 #define SD_CAPACITY_16_SIZE sizeof (struct scsi_capacity_16)
20247 20243
20248 20244 static int
20249 20245 sd_send_scsi_READ_CAPACITY_16(sd_ssc_t *ssc, uint64_t *capp,
20250 20246 uint32_t *lbap, uint32_t *psp, int path_flag)
20251 20247 {
20252 20248 struct scsi_extended_sense sense_buf;
20253 20249 struct uscsi_cmd ucmd_buf;
20254 20250 union scsi_cdb cdb;
20255 20251 uint64_t *capacity16_buf;
20256 20252 uint64_t capacity;
20257 20253 uint32_t lbasize;
20258 20254 uint32_t pbsize;
20259 20255 uint32_t lbpb_exp;
20260 20256 int status;
20261 20257 struct sd_lun *un;
20262 20258
20263 20259 ASSERT(ssc != NULL);
20264 20260
20265 20261 un = ssc->ssc_un;
20266 20262 ASSERT(un != NULL);
20267 20263 ASSERT(!mutex_owned(SD_MUTEX(un)));
20268 20264 ASSERT(capp != NULL);
20269 20265 ASSERT(lbap != NULL);
20270 20266
20271 20267 SD_TRACE(SD_LOG_IO, un,
20272 20268 "sd_send_scsi_READ_CAPACITY: entry: un:0x%p\n", un);
20273 20269
20274 20270 /*
20275 20271 * First send a READ_CAPACITY_16 command to the target.
20276 20272 *
20277 20273 * Set up the CDB for the READ_CAPACITY_16 command. The Partial
20278 20274 * Medium Indicator bit is cleared. The address field must be
20279 20275 * zero if the PMI bit is zero.
20280 20276 */
20281 20277 bzero(&cdb, sizeof (cdb));
20282 20278 bzero(&ucmd_buf, sizeof (ucmd_buf));
20283 20279
20284 20280 capacity16_buf = kmem_zalloc(SD_CAPACITY_16_SIZE, KM_SLEEP);
20285 20281
20286 20282 ucmd_buf.uscsi_cdb = (char *)&cdb;
20287 20283 ucmd_buf.uscsi_cdblen = CDB_GROUP4;
20288 20284 ucmd_buf.uscsi_bufaddr = (caddr_t)capacity16_buf;
20289 20285 ucmd_buf.uscsi_buflen = SD_CAPACITY_16_SIZE;
20290 20286 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20291 20287 ucmd_buf.uscsi_rqlen = sizeof (sense_buf);
20292 20288 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
20293 20289 ucmd_buf.uscsi_timeout = 60;
20294 20290
20295 20291 /*
20296 20292 * Read Capacity (16) is a Service Action In command. One
20297 20293 * command byte (0x9E) is overloaded for multiple operations,
20298 20294 * with the second CDB byte specifying the desired operation
20299 20295 */
20300 20296 cdb.scc_cmd = SCMD_SVC_ACTION_IN_G4;
20301 20297 cdb.cdb_opaque[1] = SSVC_ACTION_READ_CAPACITY_G4;
20302 20298
20303 20299 /*
20304 20300 * Fill in allocation length field
20305 20301 */
20306 20302 FORMG4COUNT(&cdb, ucmd_buf.uscsi_buflen);
20307 20303
20308 20304 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20309 20305 UIO_SYSSPACE, path_flag);
20310 20306
20311 20307 switch (status) {
20312 20308 case 0:
20313 20309 /* Return failure if we did not get valid capacity data. */
20314 20310 if (ucmd_buf.uscsi_resid > 20) {
20315 20311 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20316 20312 "sd_send_scsi_READ_CAPACITY_16 received invalid "
20317 20313 "capacity data");
20318 20314 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20319 20315 return (EIO);
20320 20316 }
20321 20317
20322 20318 /*
20323 20319 * Read capacity and block size from the READ CAPACITY 16 data.
20324 20320 * This data may be adjusted later due to device specific
20325 20321 * issues.
20326 20322 *
20327 20323 * According to the SCSI spec, the READ CAPACITY 16
20328 20324 * command returns the following:
20329 20325 *
20330 20326 * bytes 0-7: Maximum logical block address available.
20331 20327 * (MSB in byte:0 & LSB in byte:7)
20332 20328 *
20333 20329 * bytes 8-11: Block length in bytes
20334 20330 * (MSB in byte:8 & LSB in byte:11)
20335 20331 *
20336 20332 * byte 13: LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT
20337 20333 */
20338 20334 capacity = BE_64(capacity16_buf[0]);
20339 20335 lbasize = BE_32(*(uint32_t *)&capacity16_buf[1]);
20340 20336 lbpb_exp = (BE_64(capacity16_buf[1]) >> 16) & 0x0f;
20341 20337
20342 20338 pbsize = lbasize << lbpb_exp;
20343 20339
20344 20340 /*
20345 20341 * Done with capacity16_buf
20346 20342 */
20347 20343 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20348 20344
20349 20345 /*
20350 20346 * if the reported capacity is set to all 0xf's, then
20351 20347 * this disk is too large. This could only happen with
20352 20348 * a device that supports LBAs larger than 64 bits which
20353 20349 * are not defined by any current T10 standards.
20354 20350 */
20355 20351 if (capacity == 0xffffffffffffffff) {
20356 20352 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20357 20353 "disk is too large");
20358 20354 return (EIO);
20359 20355 }
20360 20356 break; /* Success! */
20361 20357 case EIO:
20362 20358 switch (ucmd_buf.uscsi_status) {
20363 20359 case STATUS_RESERVATION_CONFLICT:
20364 20360 status = EACCES;
20365 20361 break;
20366 20362 case STATUS_CHECK:
20367 20363 /*
20368 20364 * Check condition; look for ASC/ASCQ of 0x04/0x01
20369 20365 * (LOGICAL UNIT IS IN PROCESS OF BECOMING READY)
20370 20366 */
20371 20367 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20372 20368 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x04) &&
20373 20369 (scsi_sense_ascq((uint8_t *)&sense_buf) == 0x01)) {
20374 20370 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20375 20371 return (EAGAIN);
20376 20372 }
20377 20373 break;
20378 20374 default:
20379 20375 break;
20380 20376 }
20381 20377 /* FALLTHRU */
20382 20378 default:
20383 20379 kmem_free(capacity16_buf, SD_CAPACITY_16_SIZE);
20384 20380 return (status);
20385 20381 }
20386 20382
20387 20383 /*
20388 20384 * Some ATAPI CD-ROM drives report inaccurate LBA size values
20389 20385 * (2352 and 0 are common) so for these devices always force the value
20390 20386 * to 2048 as required by the ATAPI specs.
20391 20387 */
20392 20388 if ((un->un_f_cfg_is_atapi == TRUE) && (ISCD(un))) {
20393 20389 lbasize = 2048;
20394 20390 }
20395 20391
20396 20392 /*
20397 20393 * Get the maximum LBA value from the READ CAPACITY 16 data.
20398 20394 * Here we assume that the Partial Medium Indicator (PMI) bit
20399 20395 * was cleared when issuing the command. This means that the LBA
20400 20396 * returned from the device is the LBA of the last logical block
20401 20397 * on the logical unit. The actual logical block count will be
20402 20398 * this value plus one.
20403 20399 */
20404 20400 capacity += 1;
20405 20401
20406 20402 /*
20407 20403 * Currently, for removable media, the capacity is saved in terms
20408 20404 * of un->un_sys_blocksize, so scale the capacity value to reflect this.
20409 20405 */
20410 20406 if (un->un_f_has_removable_media)
20411 20407 capacity *= (lbasize / un->un_sys_blocksize);
20412 20408
20413 20409 *capp = capacity;
20414 20410 *lbap = lbasize;
20415 20411 *psp = pbsize;
20416 20412
20417 20413 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_READ_CAPACITY_16: "
20418 20414 "capacity:0x%llx lbasize:0x%x, pbsize: 0x%x\n",
20419 20415 capacity, lbasize, pbsize);
20420 20416
20421 20417 if ((capacity == 0) || (lbasize == 0) || (pbsize == 0)) {
20422 20418 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
20423 20419 "sd_send_scsi_READ_CAPACITY_16 received invalid value "
20424 20420 "capacity %llu lbasize %d pbsize %d", capacity, lbasize);
20425 20421 return (EIO);
20426 20422 }
20427 20423
20428 20424 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20429 20425 return (0);
20430 20426 }
20431 20427
20432 20428
20433 20429 /*
20434 20430 * Function: sd_send_scsi_START_STOP_UNIT
20435 20431 *
20436 20432 * Description: Issue a scsi START STOP UNIT command to the target.
20437 20433 *
20438 20434 * Arguments: ssc - ssc contatins pointer to driver soft state (unit)
20439 20435 * structure for this target.
20440 20436 * pc_flag - SD_POWER_CONDITION
20441 20437 * SD_START_STOP
20442 20438 * flag - SD_TARGET_START
20443 20439 * SD_TARGET_STOP
20444 20440 * SD_TARGET_EJECT
20445 20441 * SD_TARGET_CLOSE
20446 20442 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
20447 20443 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
20448 20444 * to use the USCSI "direct" chain and bypass the normal
20449 20445 * command waitq. SD_PATH_DIRECT_PRIORITY is used when this
20450 20446 * command is issued as part of an error recovery action.
20451 20447 *
20452 20448 * Return Code: 0 - Success
20453 20449 * EIO - IO error
20454 20450 * EACCES - Reservation conflict detected
20455 20451 * ENXIO - Not Ready, medium not present
20456 20452 * errno return code from sd_ssc_send()
20457 20453 *
20458 20454 * Context: Can sleep.
20459 20455 */
20460 20456
20461 20457 static int
20462 20458 sd_send_scsi_START_STOP_UNIT(sd_ssc_t *ssc, int pc_flag, int flag,
20463 20459 int path_flag)
20464 20460 {
20465 20461 struct scsi_extended_sense sense_buf;
20466 20462 union scsi_cdb cdb;
20467 20463 struct uscsi_cmd ucmd_buf;
20468 20464 int status;
20469 20465 struct sd_lun *un;
20470 20466
20471 20467 ASSERT(ssc != NULL);
20472 20468 un = ssc->ssc_un;
20473 20469 ASSERT(un != NULL);
20474 20470 ASSERT(!mutex_owned(SD_MUTEX(un)));
20475 20471
20476 20472 SD_TRACE(SD_LOG_IO, un,
20477 20473 "sd_send_scsi_START_STOP_UNIT: entry: un:0x%p\n", un);
20478 20474
20479 20475 if (un->un_f_check_start_stop &&
20480 20476 (pc_flag == SD_START_STOP) &&
20481 20477 ((flag == SD_TARGET_START) || (flag == SD_TARGET_STOP)) &&
20482 20478 (un->un_f_start_stop_supported != TRUE)) {
20483 20479 return (0);
20484 20480 }
20485 20481
20486 20482 /*
20487 20483 * If we are performing an eject operation and
20488 20484 * we receive any command other than SD_TARGET_EJECT
20489 20485 * we should immediately return.
20490 20486 */
20491 20487 if (flag != SD_TARGET_EJECT) {
20492 20488 mutex_enter(SD_MUTEX(un));
20493 20489 if (un->un_f_ejecting == TRUE) {
20494 20490 mutex_exit(SD_MUTEX(un));
20495 20491 return (EAGAIN);
20496 20492 }
20497 20493 mutex_exit(SD_MUTEX(un));
20498 20494 }
20499 20495
20500 20496 bzero(&cdb, sizeof (cdb));
20501 20497 bzero(&ucmd_buf, sizeof (ucmd_buf));
20502 20498 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
20503 20499
20504 20500 cdb.scc_cmd = SCMD_START_STOP;
20505 20501 cdb.cdb_opaque[4] = (pc_flag == SD_POWER_CONDITION) ?
20506 20502 (uchar_t)(flag << 4) : (uchar_t)flag;
20507 20503
20508 20504 ucmd_buf.uscsi_cdb = (char *)&cdb;
20509 20505 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
20510 20506 ucmd_buf.uscsi_bufaddr = NULL;
20511 20507 ucmd_buf.uscsi_buflen = 0;
20512 20508 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20513 20509 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
20514 20510 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
20515 20511 ucmd_buf.uscsi_timeout = 200;
20516 20512
20517 20513 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20518 20514 UIO_SYSSPACE, path_flag);
20519 20515
20520 20516 switch (status) {
20521 20517 case 0:
20522 20518 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20523 20519 break; /* Success! */
20524 20520 case EIO:
20525 20521 switch (ucmd_buf.uscsi_status) {
20526 20522 case STATUS_RESERVATION_CONFLICT:
20527 20523 status = EACCES;
20528 20524 break;
20529 20525 case STATUS_CHECK:
20530 20526 if (ucmd_buf.uscsi_rqstatus == STATUS_GOOD) {
20531 20527 switch (scsi_sense_key(
20532 20528 (uint8_t *)&sense_buf)) {
20533 20529 case KEY_ILLEGAL_REQUEST:
20534 20530 status = ENOTSUP;
20535 20531 break;
20536 20532 case KEY_NOT_READY:
20537 20533 if (scsi_sense_asc(
20538 20534 (uint8_t *)&sense_buf)
20539 20535 == 0x3A) {
20540 20536 status = ENXIO;
20541 20537 }
20542 20538 break;
20543 20539 default:
20544 20540 break;
20545 20541 }
20546 20542 }
20547 20543 break;
20548 20544 default:
20549 20545 break;
20550 20546 }
20551 20547 break;
20552 20548 default:
20553 20549 break;
20554 20550 }
20555 20551
20556 20552 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_START_STOP_UNIT: exit\n");
20557 20553
20558 20554 return (status);
20559 20555 }
20560 20556
20561 20557
20562 20558 /*
20563 20559 * Function: sd_start_stop_unit_callback
20564 20560 *
20565 20561 * Description: timeout(9F) callback to begin recovery process for a
20566 20562 * device that has spun down.
20567 20563 *
20568 20564 * Arguments: arg - pointer to associated softstate struct.
20569 20565 *
20570 20566 * Context: Executes in a timeout(9F) thread context
20571 20567 */
20572 20568
20573 20569 static void
20574 20570 sd_start_stop_unit_callback(void *arg)
20575 20571 {
20576 20572 struct sd_lun *un = arg;
20577 20573 ASSERT(un != NULL);
20578 20574 ASSERT(!mutex_owned(SD_MUTEX(un)));
20579 20575
20580 20576 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_callback: entry\n");
20581 20577
20582 20578 (void) taskq_dispatch(sd_tq, sd_start_stop_unit_task, un, KM_NOSLEEP);
20583 20579 }
20584 20580
20585 20581
20586 20582 /*
20587 20583 * Function: sd_start_stop_unit_task
20588 20584 *
20589 20585 * Description: Recovery procedure when a drive is spun down.
20590 20586 *
20591 20587 * Arguments: arg - pointer to associated softstate struct.
20592 20588 *
20593 20589 * Context: Executes in a taskq() thread context
20594 20590 */
20595 20591
20596 20592 static void
20597 20593 sd_start_stop_unit_task(void *arg)
20598 20594 {
20599 20595 struct sd_lun *un = arg;
20600 20596 sd_ssc_t *ssc;
20601 20597 int power_level;
20602 20598 int rval;
20603 20599
20604 20600 ASSERT(un != NULL);
20605 20601 ASSERT(!mutex_owned(SD_MUTEX(un)));
20606 20602
20607 20603 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: entry\n");
20608 20604
20609 20605 /*
20610 20606 * Some unformatted drives report not ready error, no need to
20611 20607 * restart if format has been initiated.
20612 20608 */
20613 20609 mutex_enter(SD_MUTEX(un));
20614 20610 if (un->un_f_format_in_progress == TRUE) {
20615 20611 mutex_exit(SD_MUTEX(un));
20616 20612 return;
20617 20613 }
20618 20614 mutex_exit(SD_MUTEX(un));
20619 20615
20620 20616 ssc = sd_ssc_init(un);
20621 20617 /*
20622 20618 * When a START STOP command is issued from here, it is part of a
20623 20619 * failure recovery operation and must be issued before any other
20624 20620 * commands, including any pending retries. Thus it must be sent
20625 20621 * using SD_PATH_DIRECT_PRIORITY. It doesn't matter if the spin up
20626 20622 * succeeds or not, we will start I/O after the attempt.
20627 20623 * If power condition is supported and the current power level
20628 20624 * is capable of performing I/O, we should set the power condition
20629 20625 * to that level. Otherwise, set the power condition to ACTIVE.
20630 20626 */
20631 20627 if (un->un_f_power_condition_supported) {
20632 20628 mutex_enter(SD_MUTEX(un));
20633 20629 ASSERT(SD_PM_IS_LEVEL_VALID(un, un->un_power_level));
20634 20630 power_level = sd_pwr_pc.ran_perf[un->un_power_level]
20635 20631 > 0 ? un->un_power_level : SD_SPINDLE_ACTIVE;
20636 20632 mutex_exit(SD_MUTEX(un));
20637 20633 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_POWER_CONDITION,
20638 20634 sd_pl2pc[power_level], SD_PATH_DIRECT_PRIORITY);
20639 20635 } else {
20640 20636 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
20641 20637 SD_TARGET_START, SD_PATH_DIRECT_PRIORITY);
20642 20638 }
20643 20639
20644 20640 if (rval != 0)
20645 20641 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
20646 20642 sd_ssc_fini(ssc);
20647 20643 /*
20648 20644 * The above call blocks until the START_STOP_UNIT command completes.
20649 20645 * Now that it has completed, we must re-try the original IO that
20650 20646 * received the NOT READY condition in the first place. There are
20651 20647 * three possible conditions here:
20652 20648 *
20653 20649 * (1) The original IO is on un_retry_bp.
20654 20650 * (2) The original IO is on the regular wait queue, and un_retry_bp
20655 20651 * is NULL.
20656 20652 * (3) The original IO is on the regular wait queue, and un_retry_bp
20657 20653 * points to some other, unrelated bp.
20658 20654 *
20659 20655 * For each case, we must call sd_start_cmds() with un_retry_bp
20660 20656 * as the argument. If un_retry_bp is NULL, this will initiate
20661 20657 * processing of the regular wait queue. If un_retry_bp is not NULL,
20662 20658 * then this will process the bp on un_retry_bp. That may or may not
20663 20659 * be the original IO, but that does not matter: the important thing
20664 20660 * is to keep the IO processing going at this point.
20665 20661 *
20666 20662 * Note: This is a very specific error recovery sequence associated
20667 20663 * with a drive that is not spun up. We attempt a START_STOP_UNIT and
20668 20664 * serialize the I/O with completion of the spin-up.
20669 20665 */
20670 20666 mutex_enter(SD_MUTEX(un));
20671 20667 SD_TRACE(SD_LOG_IO_CORE | SD_LOG_ERROR, un,
20672 20668 "sd_start_stop_unit_task: un:0x%p starting bp:0x%p\n",
20673 20669 un, un->un_retry_bp);
20674 20670 un->un_startstop_timeid = NULL; /* Timeout is no longer pending */
20675 20671 sd_start_cmds(un, un->un_retry_bp);
20676 20672 mutex_exit(SD_MUTEX(un));
20677 20673
20678 20674 SD_TRACE(SD_LOG_IO, un, "sd_start_stop_unit_task: exit\n");
20679 20675 }
20680 20676
20681 20677
20682 20678 /*
20683 20679 * Function: sd_send_scsi_INQUIRY
20684 20680 *
20685 20681 * Description: Issue the scsi INQUIRY command.
20686 20682 *
20687 20683 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
20688 20684 * structure for this target.
20689 20685 * bufaddr
20690 20686 * buflen
20691 20687 * evpd
20692 20688 * page_code
20693 20689 * page_length
20694 20690 *
20695 20691 * Return Code: 0 - Success
20696 20692 * errno return code from sd_ssc_send()
20697 20693 *
20698 20694 * Context: Can sleep. Does not return until command is completed.
20699 20695 */
20700 20696
20701 20697 static int
20702 20698 sd_send_scsi_INQUIRY(sd_ssc_t *ssc, uchar_t *bufaddr, size_t buflen,
20703 20699 uchar_t evpd, uchar_t page_code, size_t *residp)
20704 20700 {
20705 20701 union scsi_cdb cdb;
20706 20702 struct uscsi_cmd ucmd_buf;
20707 20703 int status;
20708 20704 struct sd_lun *un;
20709 20705
20710 20706 ASSERT(ssc != NULL);
20711 20707 un = ssc->ssc_un;
20712 20708 ASSERT(un != NULL);
20713 20709 ASSERT(!mutex_owned(SD_MUTEX(un)));
20714 20710 ASSERT(bufaddr != NULL);
20715 20711
20716 20712 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: entry: un:0x%p\n", un);
20717 20713
20718 20714 bzero(&cdb, sizeof (cdb));
20719 20715 bzero(&ucmd_buf, sizeof (ucmd_buf));
20720 20716 bzero(bufaddr, buflen);
20721 20717
20722 20718 cdb.scc_cmd = SCMD_INQUIRY;
20723 20719 cdb.cdb_opaque[1] = evpd;
20724 20720 cdb.cdb_opaque[2] = page_code;
20725 20721 FORMG0COUNT(&cdb, buflen);
20726 20722
20727 20723 ucmd_buf.uscsi_cdb = (char *)&cdb;
20728 20724 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
20729 20725 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
20730 20726 ucmd_buf.uscsi_buflen = buflen;
20731 20727 ucmd_buf.uscsi_rqbuf = NULL;
20732 20728 ucmd_buf.uscsi_rqlen = 0;
20733 20729 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT;
20734 20730 ucmd_buf.uscsi_timeout = 200; /* Excessive legacy value */
20735 20731
20736 20732 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20737 20733 UIO_SYSSPACE, SD_PATH_DIRECT);
20738 20734
20739 20735 /*
20740 20736 * Only handle status == 0, the upper-level caller
20741 20737 * will put different assessment based on the context.
20742 20738 */
20743 20739 if (status == 0)
20744 20740 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20745 20741
20746 20742 if ((status == 0) && (residp != NULL)) {
20747 20743 *residp = ucmd_buf.uscsi_resid;
20748 20744 }
20749 20745
20750 20746 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_INQUIRY: exit\n");
20751 20747
20752 20748 return (status);
20753 20749 }
20754 20750
20755 20751
20756 20752 /*
20757 20753 * Function: sd_send_scsi_TEST_UNIT_READY
20758 20754 *
20759 20755 * Description: Issue the scsi TEST UNIT READY command.
20760 20756 * This routine can be told to set the flag USCSI_DIAGNOSE to
20761 20757 * prevent retrying failed commands. Use this when the intent
20762 20758 * is either to check for device readiness, to clear a Unit
20763 20759 * Attention, or to clear any outstanding sense data.
20764 20760 * However under specific conditions the expected behavior
20765 20761 * is for retries to bring a device ready, so use the flag
20766 20762 * with caution.
20767 20763 *
20768 20764 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
20769 20765 * structure for this target.
20770 20766 * flag: SD_CHECK_FOR_MEDIA: return ENXIO if no media present
20771 20767 * SD_DONT_RETRY_TUR: include uscsi flag USCSI_DIAGNOSE.
20772 20768 * 0: dont check for media present, do retries on cmd.
20773 20769 *
20774 20770 * Return Code: 0 - Success
20775 20771 * EIO - IO error
20776 20772 * EACCES - Reservation conflict detected
20777 20773 * ENXIO - Not Ready, medium not present
20778 20774 * errno return code from sd_ssc_send()
20779 20775 *
20780 20776 * Context: Can sleep. Does not return until command is completed.
20781 20777 */
20782 20778
20783 20779 static int
20784 20780 sd_send_scsi_TEST_UNIT_READY(sd_ssc_t *ssc, int flag)
20785 20781 {
20786 20782 struct scsi_extended_sense sense_buf;
20787 20783 union scsi_cdb cdb;
20788 20784 struct uscsi_cmd ucmd_buf;
20789 20785 int status;
20790 20786 struct sd_lun *un;
20791 20787
20792 20788 ASSERT(ssc != NULL);
20793 20789 un = ssc->ssc_un;
20794 20790 ASSERT(un != NULL);
20795 20791 ASSERT(!mutex_owned(SD_MUTEX(un)));
20796 20792
20797 20793 SD_TRACE(SD_LOG_IO, un,
20798 20794 "sd_send_scsi_TEST_UNIT_READY: entry: un:0x%p\n", un);
20799 20795
20800 20796 /*
20801 20797 * Some Seagate elite1 TQ devices get hung with disconnect/reconnect
20802 20798 * timeouts when they receive a TUR and the queue is not empty. Check
20803 20799 * the configuration flag set during attach (indicating the drive has
20804 20800 * this firmware bug) and un_ncmds_in_transport before issuing the
20805 20801 * TUR. If there are
20806 20802 * pending commands return success, this is a bit arbitrary but is ok
20807 20803 * for non-removables (i.e. the eliteI disks) and non-clustering
20808 20804 * configurations.
20809 20805 */
20810 20806 if (un->un_f_cfg_tur_check == TRUE) {
20811 20807 mutex_enter(SD_MUTEX(un));
20812 20808 if (un->un_ncmds_in_transport != 0) {
20813 20809 mutex_exit(SD_MUTEX(un));
20814 20810 return (0);
20815 20811 }
20816 20812 mutex_exit(SD_MUTEX(un));
20817 20813 }
20818 20814
20819 20815 bzero(&cdb, sizeof (cdb));
20820 20816 bzero(&ucmd_buf, sizeof (ucmd_buf));
20821 20817 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
20822 20818
20823 20819 cdb.scc_cmd = SCMD_TEST_UNIT_READY;
20824 20820
20825 20821 ucmd_buf.uscsi_cdb = (char *)&cdb;
20826 20822 ucmd_buf.uscsi_cdblen = CDB_GROUP0;
20827 20823 ucmd_buf.uscsi_bufaddr = NULL;
20828 20824 ucmd_buf.uscsi_buflen = 0;
20829 20825 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20830 20826 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
20831 20827 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
20832 20828
20833 20829 /* Use flag USCSI_DIAGNOSE to prevent retries if it fails. */
20834 20830 if ((flag & SD_DONT_RETRY_TUR) != 0) {
20835 20831 ucmd_buf.uscsi_flags |= USCSI_DIAGNOSE;
20836 20832 }
20837 20833 ucmd_buf.uscsi_timeout = 60;
20838 20834
20839 20835 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20840 20836 UIO_SYSSPACE, ((flag & SD_BYPASS_PM) ? SD_PATH_DIRECT :
20841 20837 SD_PATH_STANDARD));
20842 20838
20843 20839 switch (status) {
20844 20840 case 0:
20845 20841 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20846 20842 break; /* Success! */
20847 20843 case EIO:
20848 20844 switch (ucmd_buf.uscsi_status) {
20849 20845 case STATUS_RESERVATION_CONFLICT:
20850 20846 status = EACCES;
20851 20847 break;
20852 20848 case STATUS_CHECK:
20853 20849 if ((flag & SD_CHECK_FOR_MEDIA) == 0) {
20854 20850 break;
20855 20851 }
20856 20852 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20857 20853 (scsi_sense_key((uint8_t *)&sense_buf) ==
20858 20854 KEY_NOT_READY) &&
20859 20855 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x3A)) {
20860 20856 status = ENXIO;
20861 20857 }
20862 20858 break;
20863 20859 default:
20864 20860 break;
20865 20861 }
20866 20862 break;
20867 20863 default:
20868 20864 break;
20869 20865 }
20870 20866
20871 20867 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_TEST_UNIT_READY: exit\n");
20872 20868
20873 20869 return (status);
20874 20870 }
20875 20871
20876 20872 /*
20877 20873 * Function: sd_send_scsi_PERSISTENT_RESERVE_IN
20878 20874 *
20879 20875 * Description: Issue the scsi PERSISTENT RESERVE IN command.
20880 20876 *
20881 20877 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
20882 20878 * structure for this target.
20883 20879 *
20884 20880 * Return Code: 0 - Success
20885 20881 * EACCES
20886 20882 * ENOTSUP
20887 20883 * errno return code from sd_ssc_send()
20888 20884 *
20889 20885 * Context: Can sleep. Does not return until command is completed.
20890 20886 */
20891 20887
20892 20888 static int
20893 20889 sd_send_scsi_PERSISTENT_RESERVE_IN(sd_ssc_t *ssc, uchar_t usr_cmd,
20894 20890 uint16_t data_len, uchar_t *data_bufp)
20895 20891 {
20896 20892 struct scsi_extended_sense sense_buf;
20897 20893 union scsi_cdb cdb;
20898 20894 struct uscsi_cmd ucmd_buf;
20899 20895 int status;
20900 20896 int no_caller_buf = FALSE;
20901 20897 struct sd_lun *un;
20902 20898
20903 20899 ASSERT(ssc != NULL);
20904 20900 un = ssc->ssc_un;
20905 20901 ASSERT(un != NULL);
20906 20902 ASSERT(!mutex_owned(SD_MUTEX(un)));
20907 20903 ASSERT((usr_cmd == SD_READ_KEYS) || (usr_cmd == SD_READ_RESV));
20908 20904
20909 20905 SD_TRACE(SD_LOG_IO, un,
20910 20906 "sd_send_scsi_PERSISTENT_RESERVE_IN: entry: un:0x%p\n", un);
20911 20907
20912 20908 bzero(&cdb, sizeof (cdb));
20913 20909 bzero(&ucmd_buf, sizeof (ucmd_buf));
20914 20910 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
20915 20911 if (data_bufp == NULL) {
20916 20912 /* Allocate a default buf if the caller did not give one */
20917 20913 ASSERT(data_len == 0);
20918 20914 data_len = MHIOC_RESV_KEY_SIZE;
20919 20915 data_bufp = kmem_zalloc(MHIOC_RESV_KEY_SIZE, KM_SLEEP);
20920 20916 no_caller_buf = TRUE;
20921 20917 }
20922 20918
20923 20919 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_IN;
20924 20920 cdb.cdb_opaque[1] = usr_cmd;
20925 20921 FORMG1COUNT(&cdb, data_len);
20926 20922
20927 20923 ucmd_buf.uscsi_cdb = (char *)&cdb;
20928 20924 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
20929 20925 ucmd_buf.uscsi_bufaddr = (caddr_t)data_bufp;
20930 20926 ucmd_buf.uscsi_buflen = data_len;
20931 20927 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
20932 20928 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
20933 20929 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
20934 20930 ucmd_buf.uscsi_timeout = 60;
20935 20931
20936 20932 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
20937 20933 UIO_SYSSPACE, SD_PATH_STANDARD);
20938 20934
20939 20935 switch (status) {
20940 20936 case 0:
20941 20937 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
20942 20938
20943 20939 break; /* Success! */
20944 20940 case EIO:
20945 20941 switch (ucmd_buf.uscsi_status) {
20946 20942 case STATUS_RESERVATION_CONFLICT:
20947 20943 status = EACCES;
20948 20944 break;
20949 20945 case STATUS_CHECK:
20950 20946 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
20951 20947 (scsi_sense_key((uint8_t *)&sense_buf) ==
20952 20948 KEY_ILLEGAL_REQUEST)) {
20953 20949 status = ENOTSUP;
20954 20950 }
20955 20951 break;
20956 20952 default:
20957 20953 break;
20958 20954 }
20959 20955 break;
20960 20956 default:
20961 20957 break;
20962 20958 }
20963 20959
20964 20960 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_IN: exit\n");
20965 20961
20966 20962 if (no_caller_buf == TRUE) {
20967 20963 kmem_free(data_bufp, data_len);
20968 20964 }
20969 20965
20970 20966 return (status);
20971 20967 }
20972 20968
20973 20969
20974 20970 /*
20975 20971 * Function: sd_send_scsi_PERSISTENT_RESERVE_OUT
20976 20972 *
20977 20973 * Description: This routine is the driver entry point for handling CD-ROM
20978 20974 * multi-host persistent reservation requests (MHIOCGRP_INKEYS,
20979 20975 * MHIOCGRP_INRESV) by sending the SCSI-3 PROUT commands to the
20980 20976 * device.
20981 20977 *
20982 20978 * Arguments: ssc - ssc contains un - pointer to soft state struct
20983 20979 * for the target.
20984 20980 * usr_cmd SCSI-3 reservation facility command (one of
20985 20981 * SD_SCSI3_REGISTER, SD_SCSI3_RESERVE, SD_SCSI3_RELEASE,
20986 20982 * SD_SCSI3_PREEMPTANDABORT, SD_SCSI3_CLEAR)
20987 20983 * usr_bufp - user provided pointer register, reserve descriptor or
20988 20984 * preempt and abort structure (mhioc_register_t,
20989 20985 * mhioc_resv_desc_t, mhioc_preemptandabort_t)
20990 20986 *
20991 20987 * Return Code: 0 - Success
20992 20988 * EACCES
20993 20989 * ENOTSUP
20994 20990 * errno return code from sd_ssc_send()
20995 20991 *
20996 20992 * Context: Can sleep. Does not return until command is completed.
20997 20993 */
20998 20994
20999 20995 static int
21000 20996 sd_send_scsi_PERSISTENT_RESERVE_OUT(sd_ssc_t *ssc, uchar_t usr_cmd,
21001 20997 uchar_t *usr_bufp)
21002 20998 {
21003 20999 struct scsi_extended_sense sense_buf;
21004 21000 union scsi_cdb cdb;
21005 21001 struct uscsi_cmd ucmd_buf;
21006 21002 int status;
21007 21003 uchar_t data_len = sizeof (sd_prout_t);
21008 21004 sd_prout_t *prp;
21009 21005 struct sd_lun *un;
21010 21006
21011 21007 ASSERT(ssc != NULL);
21012 21008 un = ssc->ssc_un;
21013 21009 ASSERT(un != NULL);
21014 21010 ASSERT(!mutex_owned(SD_MUTEX(un)));
21015 21011 ASSERT(data_len == 24); /* required by scsi spec */
21016 21012
21017 21013 SD_TRACE(SD_LOG_IO, un,
21018 21014 "sd_send_scsi_PERSISTENT_RESERVE_OUT: entry: un:0x%p\n", un);
21019 21015
21020 21016 if (usr_bufp == NULL) {
21021 21017 return (EINVAL);
21022 21018 }
21023 21019
21024 21020 bzero(&cdb, sizeof (cdb));
21025 21021 bzero(&ucmd_buf, sizeof (ucmd_buf));
21026 21022 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21027 21023 prp = kmem_zalloc(data_len, KM_SLEEP);
21028 21024
21029 21025 cdb.scc_cmd = SCMD_PERSISTENT_RESERVE_OUT;
21030 21026 cdb.cdb_opaque[1] = usr_cmd;
21031 21027 FORMG1COUNT(&cdb, data_len);
21032 21028
21033 21029 ucmd_buf.uscsi_cdb = (char *)&cdb;
21034 21030 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
21035 21031 ucmd_buf.uscsi_bufaddr = (caddr_t)prp;
21036 21032 ucmd_buf.uscsi_buflen = data_len;
21037 21033 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21038 21034 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21039 21035 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT;
21040 21036 ucmd_buf.uscsi_timeout = 60;
21041 21037
21042 21038 switch (usr_cmd) {
21043 21039 case SD_SCSI3_REGISTER: {
21044 21040 mhioc_register_t *ptr = (mhioc_register_t *)usr_bufp;
21045 21041
21046 21042 bcopy(ptr->oldkey.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21047 21043 bcopy(ptr->newkey.key, prp->service_key,
21048 21044 MHIOC_RESV_KEY_SIZE);
21049 21045 prp->aptpl = ptr->aptpl;
21050 21046 break;
21051 21047 }
21052 21048 case SD_SCSI3_CLEAR: {
21053 21049 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp;
21054 21050
21055 21051 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21056 21052 break;
21057 21053 }
21058 21054 case SD_SCSI3_RESERVE:
21059 21055 case SD_SCSI3_RELEASE: {
21060 21056 mhioc_resv_desc_t *ptr = (mhioc_resv_desc_t *)usr_bufp;
21061 21057
21062 21058 bcopy(ptr->key.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21063 21059 prp->scope_address = BE_32(ptr->scope_specific_addr);
21064 21060 cdb.cdb_opaque[2] = ptr->type;
21065 21061 break;
21066 21062 }
21067 21063 case SD_SCSI3_PREEMPTANDABORT: {
21068 21064 mhioc_preemptandabort_t *ptr =
21069 21065 (mhioc_preemptandabort_t *)usr_bufp;
21070 21066
21071 21067 bcopy(ptr->resvdesc.key.key, prp->res_key, MHIOC_RESV_KEY_SIZE);
21072 21068 bcopy(ptr->victim_key.key, prp->service_key,
21073 21069 MHIOC_RESV_KEY_SIZE);
21074 21070 prp->scope_address = BE_32(ptr->resvdesc.scope_specific_addr);
21075 21071 cdb.cdb_opaque[2] = ptr->resvdesc.type;
21076 21072 ucmd_buf.uscsi_flags |= USCSI_HEAD;
21077 21073 break;
21078 21074 }
21079 21075 case SD_SCSI3_REGISTERANDIGNOREKEY:
21080 21076 {
21081 21077 mhioc_registerandignorekey_t *ptr;
21082 21078 ptr = (mhioc_registerandignorekey_t *)usr_bufp;
21083 21079 bcopy(ptr->newkey.key,
21084 21080 prp->service_key, MHIOC_RESV_KEY_SIZE);
21085 21081 prp->aptpl = ptr->aptpl;
21086 21082 break;
21087 21083 }
21088 21084 default:
21089 21085 ASSERT(FALSE);
21090 21086 break;
21091 21087 }
21092 21088
21093 21089 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21094 21090 UIO_SYSSPACE, SD_PATH_STANDARD);
21095 21091
21096 21092 switch (status) {
21097 21093 case 0:
21098 21094 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21099 21095 break; /* Success! */
21100 21096 case EIO:
21101 21097 switch (ucmd_buf.uscsi_status) {
21102 21098 case STATUS_RESERVATION_CONFLICT:
21103 21099 status = EACCES;
21104 21100 break;
21105 21101 case STATUS_CHECK:
21106 21102 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
21107 21103 (scsi_sense_key((uint8_t *)&sense_buf) ==
21108 21104 KEY_ILLEGAL_REQUEST)) {
21109 21105 status = ENOTSUP;
21110 21106 }
21111 21107 break;
21112 21108 default:
21113 21109 break;
21114 21110 }
21115 21111 break;
21116 21112 default:
21117 21113 break;
21118 21114 }
21119 21115
21120 21116 kmem_free(prp, data_len);
21121 21117 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_PERSISTENT_RESERVE_OUT: exit\n");
21122 21118 return (status);
21123 21119 }
21124 21120
21125 21121
21126 21122 /*
21127 21123 * Function: sd_send_scsi_SYNCHRONIZE_CACHE
21128 21124 *
21129 21125 * Description: Issues a scsi SYNCHRONIZE CACHE command to the target
21130 21126 *
21131 21127 * Arguments: un - pointer to the target's soft state struct
21132 21128 * dkc - pointer to the callback structure
21133 21129 *
21134 21130 * Return Code: 0 - success
21135 21131 * errno-type error code
21136 21132 *
21137 21133 * Context: kernel thread context only.
21138 21134 *
21139 21135 * _______________________________________________________________
21140 21136 * | dkc_flag & | dkc_callback | DKIOCFLUSHWRITECACHE |
21141 21137 * |FLUSH_VOLATILE| | operation |
21142 21138 * |______________|______________|_________________________________|
21143 21139 * | 0 | NULL | Synchronous flush on both |
21144 21140 * | | | volatile and non-volatile cache |
21145 21141 * |______________|______________|_________________________________|
21146 21142 * | 1 | NULL | Synchronous flush on volatile |
21147 21143 * | | | cache; disk drivers may suppress|
21148 21144 * | | | flush if disk table indicates |
21149 21145 * | | | non-volatile cache |
21150 21146 * |______________|______________|_________________________________|
21151 21147 * | 0 | !NULL | Asynchronous flush on both |
21152 21148 * | | | volatile and non-volatile cache;|
21153 21149 * |______________|______________|_________________________________|
21154 21150 * | 1 | !NULL | Asynchronous flush on volatile |
21155 21151 * | | | cache; disk drivers may suppress|
21156 21152 * | | | flush if disk table indicates |
21157 21153 * | | | non-volatile cache |
21158 21154 * |______________|______________|_________________________________|
21159 21155 *
21160 21156 */
21161 21157
21162 21158 static int
21163 21159 sd_send_scsi_SYNCHRONIZE_CACHE(struct sd_lun *un, struct dk_callback *dkc)
21164 21160 {
21165 21161 struct sd_uscsi_info *uip;
21166 21162 struct uscsi_cmd *uscmd;
21167 21163 union scsi_cdb *cdb;
21168 21164 struct buf *bp;
21169 21165 int rval = 0;
21170 21166 int is_async;
21171 21167
21172 21168 SD_TRACE(SD_LOG_IO, un,
21173 21169 "sd_send_scsi_SYNCHRONIZE_CACHE: entry: un:0x%p\n", un);
21174 21170
21175 21171 ASSERT(un != NULL);
21176 21172 ASSERT(!mutex_owned(SD_MUTEX(un)));
21177 21173
21178 21174 if (dkc == NULL || dkc->dkc_callback == NULL) {
21179 21175 is_async = FALSE;
21180 21176 } else {
21181 21177 is_async = TRUE;
21182 21178 }
21183 21179
21184 21180 mutex_enter(SD_MUTEX(un));
21185 21181 /* check whether cache flush should be suppressed */
21186 21182 if (un->un_f_suppress_cache_flush == TRUE) {
21187 21183 mutex_exit(SD_MUTEX(un));
21188 21184 /*
21189 21185 * suppress the cache flush if the device is told to do
21190 21186 * so by sd.conf or disk table
21191 21187 */
21192 21188 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_SYNCHRONIZE_CACHE: \
21193 21189 skip the cache flush since suppress_cache_flush is %d!\n",
21194 21190 un->un_f_suppress_cache_flush);
21195 21191
21196 21192 if (is_async == TRUE) {
21197 21193 /* invoke callback for asynchronous flush */
21198 21194 (*dkc->dkc_callback)(dkc->dkc_cookie, 0);
21199 21195 }
21200 21196 return (rval);
21201 21197 }
21202 21198 mutex_exit(SD_MUTEX(un));
21203 21199
21204 21200 /*
21205 21201 * check dkc_flag & FLUSH_VOLATILE so SYNC_NV bit can be
21206 21202 * set properly
21207 21203 */
21208 21204 cdb = kmem_zalloc(CDB_GROUP1, KM_SLEEP);
21209 21205 cdb->scc_cmd = SCMD_SYNCHRONIZE_CACHE;
21210 21206
21211 21207 mutex_enter(SD_MUTEX(un));
21212 21208 if (dkc != NULL && un->un_f_sync_nv_supported &&
21213 21209 (dkc->dkc_flag & FLUSH_VOLATILE)) {
21214 21210 /*
21215 21211 * if the device supports SYNC_NV bit, turn on
21216 21212 * the SYNC_NV bit to only flush volatile cache
21217 21213 */
21218 21214 cdb->cdb_un.tag |= SD_SYNC_NV_BIT;
21219 21215 }
21220 21216 mutex_exit(SD_MUTEX(un));
21221 21217
21222 21218 /*
21223 21219 * First get some memory for the uscsi_cmd struct and cdb
21224 21220 * and initialize for SYNCHRONIZE_CACHE cmd.
21225 21221 */
21226 21222 uscmd = kmem_zalloc(sizeof (struct uscsi_cmd), KM_SLEEP);
21227 21223 uscmd->uscsi_cdblen = CDB_GROUP1;
21228 21224 uscmd->uscsi_cdb = (caddr_t)cdb;
21229 21225 uscmd->uscsi_bufaddr = NULL;
21230 21226 uscmd->uscsi_buflen = 0;
21231 21227 uscmd->uscsi_rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
21232 21228 uscmd->uscsi_rqlen = SENSE_LENGTH;
21233 21229 uscmd->uscsi_rqresid = SENSE_LENGTH;
21234 21230 uscmd->uscsi_flags = USCSI_RQENABLE | USCSI_SILENT;
21235 21231 uscmd->uscsi_timeout = sd_io_time;
21236 21232
21237 21233 /*
21238 21234 * Allocate an sd_uscsi_info struct and fill it with the info
21239 21235 * needed by sd_initpkt_for_uscsi(). Then put the pointer into
21240 21236 * b_private in the buf for sd_initpkt_for_uscsi(). Note that
21241 21237 * since we allocate the buf here in this function, we do not
21242 21238 * need to preserve the prior contents of b_private.
21243 21239 * The sd_uscsi_info struct is also used by sd_uscsi_strategy()
21244 21240 */
21245 21241 uip = kmem_zalloc(sizeof (struct sd_uscsi_info), KM_SLEEP);
21246 21242 uip->ui_flags = SD_PATH_DIRECT;
21247 21243 uip->ui_cmdp = uscmd;
21248 21244
21249 21245 bp = getrbuf(KM_SLEEP);
21250 21246 bp->b_private = uip;
21251 21247
21252 21248 /*
21253 21249 * Setup buffer to carry uscsi request.
21254 21250 */
21255 21251 bp->b_flags = B_BUSY;
21256 21252 bp->b_bcount = 0;
21257 21253 bp->b_blkno = 0;
21258 21254
21259 21255 if (is_async == TRUE) {
21260 21256 bp->b_iodone = sd_send_scsi_SYNCHRONIZE_CACHE_biodone;
21261 21257 uip->ui_dkc = *dkc;
21262 21258 }
21263 21259
21264 21260 bp->b_edev = SD_GET_DEV(un);
21265 21261 bp->b_dev = cmpdev(bp->b_edev); /* maybe unnecessary? */
21266 21262
21267 21263 /*
21268 21264 * Unset un_f_sync_cache_required flag
21269 21265 */
21270 21266 mutex_enter(SD_MUTEX(un));
21271 21267 un->un_f_sync_cache_required = FALSE;
21272 21268 mutex_exit(SD_MUTEX(un));
21273 21269
21274 21270 (void) sd_uscsi_strategy(bp);
21275 21271
21276 21272 /*
21277 21273 * If synchronous request, wait for completion
21278 21274 * If async just return and let b_iodone callback
21279 21275 * cleanup.
21280 21276 * NOTE: On return, u_ncmds_in_driver will be decremented,
21281 21277 * but it was also incremented in sd_uscsi_strategy(), so
21282 21278 * we should be ok.
21283 21279 */
21284 21280 if (is_async == FALSE) {
21285 21281 (void) biowait(bp);
21286 21282 rval = sd_send_scsi_SYNCHRONIZE_CACHE_biodone(bp);
21287 21283 }
21288 21284
21289 21285 return (rval);
21290 21286 }
21291 21287
21292 21288
21293 21289 static int
21294 21290 sd_send_scsi_SYNCHRONIZE_CACHE_biodone(struct buf *bp)
21295 21291 {
21296 21292 struct sd_uscsi_info *uip;
21297 21293 struct uscsi_cmd *uscmd;
21298 21294 uint8_t *sense_buf;
21299 21295 struct sd_lun *un;
21300 21296 int status;
21301 21297 union scsi_cdb *cdb;
21302 21298
21303 21299 uip = (struct sd_uscsi_info *)(bp->b_private);
21304 21300 ASSERT(uip != NULL);
21305 21301
21306 21302 uscmd = uip->ui_cmdp;
21307 21303 ASSERT(uscmd != NULL);
21308 21304
21309 21305 sense_buf = (uint8_t *)uscmd->uscsi_rqbuf;
21310 21306 ASSERT(sense_buf != NULL);
21311 21307
21312 21308 un = ddi_get_soft_state(sd_state, SD_GET_INSTANCE_FROM_BUF(bp));
21313 21309 ASSERT(un != NULL);
21314 21310
21315 21311 cdb = (union scsi_cdb *)uscmd->uscsi_cdb;
21316 21312
21317 21313 status = geterror(bp);
21318 21314 switch (status) {
21319 21315 case 0:
21320 21316 break; /* Success! */
21321 21317 case EIO:
21322 21318 switch (uscmd->uscsi_status) {
21323 21319 case STATUS_RESERVATION_CONFLICT:
21324 21320 /* Ignore reservation conflict */
21325 21321 status = 0;
21326 21322 goto done;
21327 21323
21328 21324 case STATUS_CHECK:
21329 21325 if ((uscmd->uscsi_rqstatus == STATUS_GOOD) &&
21330 21326 (scsi_sense_key(sense_buf) ==
21331 21327 KEY_ILLEGAL_REQUEST)) {
21332 21328 /* Ignore Illegal Request error */
21333 21329 if (cdb->cdb_un.tag&SD_SYNC_NV_BIT) {
21334 21330 mutex_enter(SD_MUTEX(un));
21335 21331 un->un_f_sync_nv_supported = FALSE;
21336 21332 mutex_exit(SD_MUTEX(un));
21337 21333 status = 0;
21338 21334 SD_TRACE(SD_LOG_IO, un,
21339 21335 "un_f_sync_nv_supported \
21340 21336 is set to false.\n");
21341 21337 goto done;
21342 21338 }
21343 21339
21344 21340 mutex_enter(SD_MUTEX(un));
21345 21341 un->un_f_sync_cache_supported = FALSE;
21346 21342 mutex_exit(SD_MUTEX(un));
21347 21343 SD_TRACE(SD_LOG_IO, un,
21348 21344 "sd_send_scsi_SYNCHRONIZE_CACHE_biodone: \
21349 21345 un_f_sync_cache_supported set to false \
21350 21346 with asc = %x, ascq = %x\n",
21351 21347 scsi_sense_asc(sense_buf),
21352 21348 scsi_sense_ascq(sense_buf));
21353 21349 status = ENOTSUP;
21354 21350 goto done;
21355 21351 }
21356 21352 break;
21357 21353 default:
21358 21354 break;
21359 21355 }
21360 21356 /* FALLTHRU */
21361 21357 default:
21362 21358 /*
21363 21359 * Turn on the un_f_sync_cache_required flag
21364 21360 * since the SYNC CACHE command failed
21365 21361 */
21366 21362 mutex_enter(SD_MUTEX(un));
21367 21363 un->un_f_sync_cache_required = TRUE;
21368 21364 mutex_exit(SD_MUTEX(un));
21369 21365
21370 21366 /*
21371 21367 * Don't log an error message if this device
21372 21368 * has removable media.
21373 21369 */
21374 21370 if (!un->un_f_has_removable_media) {
21375 21371 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
21376 21372 "SYNCHRONIZE CACHE command failed (%d)\n", status);
21377 21373 }
21378 21374 break;
21379 21375 }
21380 21376
21381 21377 done:
21382 21378 if (uip->ui_dkc.dkc_callback != NULL) {
21383 21379 (*uip->ui_dkc.dkc_callback)(uip->ui_dkc.dkc_cookie, status);
21384 21380 }
21385 21381
21386 21382 ASSERT((bp->b_flags & B_REMAPPED) == 0);
21387 21383 freerbuf(bp);
21388 21384 kmem_free(uip, sizeof (struct sd_uscsi_info));
21389 21385 kmem_free(uscmd->uscsi_rqbuf, SENSE_LENGTH);
21390 21386 kmem_free(uscmd->uscsi_cdb, (size_t)uscmd->uscsi_cdblen);
21391 21387 kmem_free(uscmd, sizeof (struct uscsi_cmd));
21392 21388
21393 21389 return (status);
21394 21390 }
21395 21391
21396 21392
21397 21393 /*
21398 21394 * Function: sd_send_scsi_GET_CONFIGURATION
21399 21395 *
21400 21396 * Description: Issues the get configuration command to the device.
21401 21397 * Called from sd_check_for_writable_cd & sd_get_media_info
21402 21398 * caller needs to ensure that buflen = SD_PROFILE_HEADER_LEN
21403 21399 * Arguments: ssc
21404 21400 * ucmdbuf
21405 21401 * rqbuf
21406 21402 * rqbuflen
21407 21403 * bufaddr
21408 21404 * buflen
21409 21405 * path_flag
21410 21406 *
21411 21407 * Return Code: 0 - Success
21412 21408 * errno return code from sd_ssc_send()
21413 21409 *
21414 21410 * Context: Can sleep. Does not return until command is completed.
21415 21411 *
21416 21412 */
21417 21413
21418 21414 static int
21419 21415 sd_send_scsi_GET_CONFIGURATION(sd_ssc_t *ssc, struct uscsi_cmd *ucmdbuf,
21420 21416 uchar_t *rqbuf, uint_t rqbuflen, uchar_t *bufaddr, uint_t buflen,
21421 21417 int path_flag)
21422 21418 {
21423 21419 char cdb[CDB_GROUP1];
21424 21420 int status;
21425 21421 struct sd_lun *un;
21426 21422
21427 21423 ASSERT(ssc != NULL);
21428 21424 un = ssc->ssc_un;
21429 21425 ASSERT(un != NULL);
21430 21426 ASSERT(!mutex_owned(SD_MUTEX(un)));
21431 21427 ASSERT(bufaddr != NULL);
21432 21428 ASSERT(ucmdbuf != NULL);
21433 21429 ASSERT(rqbuf != NULL);
21434 21430
21435 21431 SD_TRACE(SD_LOG_IO, un,
21436 21432 "sd_send_scsi_GET_CONFIGURATION: entry: un:0x%p\n", un);
21437 21433
21438 21434 bzero(cdb, sizeof (cdb));
21439 21435 bzero(ucmdbuf, sizeof (struct uscsi_cmd));
21440 21436 bzero(rqbuf, rqbuflen);
21441 21437 bzero(bufaddr, buflen);
21442 21438
21443 21439 /*
21444 21440 * Set up cdb field for the get configuration command.
21445 21441 */
21446 21442 cdb[0] = SCMD_GET_CONFIGURATION;
21447 21443 cdb[1] = 0x02; /* Requested Type */
21448 21444 cdb[8] = SD_PROFILE_HEADER_LEN;
21449 21445 ucmdbuf->uscsi_cdb = cdb;
21450 21446 ucmdbuf->uscsi_cdblen = CDB_GROUP1;
21451 21447 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr;
21452 21448 ucmdbuf->uscsi_buflen = buflen;
21453 21449 ucmdbuf->uscsi_timeout = sd_io_time;
21454 21450 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf;
21455 21451 ucmdbuf->uscsi_rqlen = rqbuflen;
21456 21452 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ;
21457 21453
21458 21454 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL,
21459 21455 UIO_SYSSPACE, path_flag);
21460 21456
21461 21457 switch (status) {
21462 21458 case 0:
21463 21459 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21464 21460 break; /* Success! */
21465 21461 case EIO:
21466 21462 switch (ucmdbuf->uscsi_status) {
21467 21463 case STATUS_RESERVATION_CONFLICT:
21468 21464 status = EACCES;
21469 21465 break;
21470 21466 default:
21471 21467 break;
21472 21468 }
21473 21469 break;
21474 21470 default:
21475 21471 break;
21476 21472 }
21477 21473
21478 21474 if (status == 0) {
21479 21475 SD_DUMP_MEMORY(un, SD_LOG_IO,
21480 21476 "sd_send_scsi_GET_CONFIGURATION: data",
21481 21477 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX);
21482 21478 }
21483 21479
21484 21480 SD_TRACE(SD_LOG_IO, un,
21485 21481 "sd_send_scsi_GET_CONFIGURATION: exit\n");
21486 21482
21487 21483 return (status);
21488 21484 }
21489 21485
21490 21486 /*
21491 21487 * Function: sd_send_scsi_feature_GET_CONFIGURATION
21492 21488 *
21493 21489 * Description: Issues the get configuration command to the device to
21494 21490 * retrieve a specific feature. Called from
21495 21491 * sd_check_for_writable_cd & sd_set_mmc_caps.
21496 21492 * Arguments: ssc
21497 21493 * ucmdbuf
21498 21494 * rqbuf
21499 21495 * rqbuflen
21500 21496 * bufaddr
21501 21497 * buflen
21502 21498 * feature
21503 21499 *
21504 21500 * Return Code: 0 - Success
21505 21501 * errno return code from sd_ssc_send()
21506 21502 *
21507 21503 * Context: Can sleep. Does not return until command is completed.
21508 21504 *
21509 21505 */
21510 21506 static int
21511 21507 sd_send_scsi_feature_GET_CONFIGURATION(sd_ssc_t *ssc,
21512 21508 struct uscsi_cmd *ucmdbuf, uchar_t *rqbuf, uint_t rqbuflen,
21513 21509 uchar_t *bufaddr, uint_t buflen, char feature, int path_flag)
21514 21510 {
21515 21511 char cdb[CDB_GROUP1];
21516 21512 int status;
21517 21513 struct sd_lun *un;
21518 21514
21519 21515 ASSERT(ssc != NULL);
21520 21516 un = ssc->ssc_un;
21521 21517 ASSERT(un != NULL);
21522 21518 ASSERT(!mutex_owned(SD_MUTEX(un)));
21523 21519 ASSERT(bufaddr != NULL);
21524 21520 ASSERT(ucmdbuf != NULL);
21525 21521 ASSERT(rqbuf != NULL);
21526 21522
21527 21523 SD_TRACE(SD_LOG_IO, un,
21528 21524 "sd_send_scsi_feature_GET_CONFIGURATION: entry: un:0x%p\n", un);
21529 21525
21530 21526 bzero(cdb, sizeof (cdb));
21531 21527 bzero(ucmdbuf, sizeof (struct uscsi_cmd));
21532 21528 bzero(rqbuf, rqbuflen);
21533 21529 bzero(bufaddr, buflen);
21534 21530
21535 21531 /*
21536 21532 * Set up cdb field for the get configuration command.
21537 21533 */
21538 21534 cdb[0] = SCMD_GET_CONFIGURATION;
21539 21535 cdb[1] = 0x02; /* Requested Type */
21540 21536 cdb[3] = feature;
21541 21537 cdb[8] = buflen;
21542 21538 ucmdbuf->uscsi_cdb = cdb;
21543 21539 ucmdbuf->uscsi_cdblen = CDB_GROUP1;
21544 21540 ucmdbuf->uscsi_bufaddr = (caddr_t)bufaddr;
21545 21541 ucmdbuf->uscsi_buflen = buflen;
21546 21542 ucmdbuf->uscsi_timeout = sd_io_time;
21547 21543 ucmdbuf->uscsi_rqbuf = (caddr_t)rqbuf;
21548 21544 ucmdbuf->uscsi_rqlen = rqbuflen;
21549 21545 ucmdbuf->uscsi_flags = USCSI_RQENABLE|USCSI_SILENT|USCSI_READ;
21550 21546
21551 21547 status = sd_ssc_send(ssc, ucmdbuf, FKIOCTL,
21552 21548 UIO_SYSSPACE, path_flag);
21553 21549
21554 21550 switch (status) {
21555 21551 case 0:
21556 21552
21557 21553 break; /* Success! */
21558 21554 case EIO:
21559 21555 switch (ucmdbuf->uscsi_status) {
21560 21556 case STATUS_RESERVATION_CONFLICT:
21561 21557 status = EACCES;
21562 21558 break;
21563 21559 default:
21564 21560 break;
21565 21561 }
21566 21562 break;
21567 21563 default:
21568 21564 break;
21569 21565 }
21570 21566
21571 21567 if (status == 0) {
21572 21568 SD_DUMP_MEMORY(un, SD_LOG_IO,
21573 21569 "sd_send_scsi_feature_GET_CONFIGURATION: data",
21574 21570 (uchar_t *)bufaddr, SD_PROFILE_HEADER_LEN, SD_LOG_HEX);
21575 21571 }
21576 21572
21577 21573 SD_TRACE(SD_LOG_IO, un,
21578 21574 "sd_send_scsi_feature_GET_CONFIGURATION: exit\n");
21579 21575
21580 21576 return (status);
21581 21577 }
21582 21578
21583 21579
21584 21580 /*
21585 21581 * Function: sd_send_scsi_MODE_SENSE
21586 21582 *
21587 21583 * Description: Utility function for issuing a scsi MODE SENSE command.
21588 21584 * Note: This routine uses a consistent implementation for Group0,
21589 21585 * Group1, and Group2 commands across all platforms. ATAPI devices
21590 21586 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select
21591 21587 *
21592 21588 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21593 21589 * structure for this target.
21594 21590 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or
21595 21591 * CDB_GROUP[1|2] (10 byte).
21596 21592 * bufaddr - buffer for page data retrieved from the target.
21597 21593 * buflen - size of page to be retrieved.
21598 21594 * page_code - page code of data to be retrieved from the target.
21599 21595 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
21600 21596 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
21601 21597 * to use the USCSI "direct" chain and bypass the normal
21602 21598 * command waitq.
21603 21599 *
21604 21600 * Return Code: 0 - Success
21605 21601 * errno return code from sd_ssc_send()
21606 21602 *
21607 21603 * Context: Can sleep. Does not return until command is completed.
21608 21604 */
21609 21605
21610 21606 static int
21611 21607 sd_send_scsi_MODE_SENSE(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr,
21612 21608 size_t buflen, uchar_t page_code, int path_flag)
21613 21609 {
21614 21610 struct scsi_extended_sense sense_buf;
21615 21611 union scsi_cdb cdb;
21616 21612 struct uscsi_cmd ucmd_buf;
21617 21613 int status;
21618 21614 int headlen;
21619 21615 struct sd_lun *un;
21620 21616
21621 21617 ASSERT(ssc != NULL);
21622 21618 un = ssc->ssc_un;
21623 21619 ASSERT(un != NULL);
21624 21620 ASSERT(!mutex_owned(SD_MUTEX(un)));
21625 21621 ASSERT(bufaddr != NULL);
21626 21622 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) ||
21627 21623 (cdbsize == CDB_GROUP2));
21628 21624
21629 21625 SD_TRACE(SD_LOG_IO, un,
21630 21626 "sd_send_scsi_MODE_SENSE: entry: un:0x%p\n", un);
21631 21627
21632 21628 bzero(&cdb, sizeof (cdb));
21633 21629 bzero(&ucmd_buf, sizeof (ucmd_buf));
21634 21630 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21635 21631 bzero(bufaddr, buflen);
21636 21632
21637 21633 if (cdbsize == CDB_GROUP0) {
21638 21634 cdb.scc_cmd = SCMD_MODE_SENSE;
21639 21635 cdb.cdb_opaque[2] = page_code;
21640 21636 FORMG0COUNT(&cdb, buflen);
21641 21637 headlen = MODE_HEADER_LENGTH;
21642 21638 } else {
21643 21639 cdb.scc_cmd = SCMD_MODE_SENSE_G1;
21644 21640 cdb.cdb_opaque[2] = page_code;
21645 21641 FORMG1COUNT(&cdb, buflen);
21646 21642 headlen = MODE_HEADER_LENGTH_GRP2;
21647 21643 }
21648 21644
21649 21645 ASSERT(headlen <= buflen);
21650 21646 SD_FILL_SCSI1_LUN_CDB(un, &cdb);
21651 21647
21652 21648 ucmd_buf.uscsi_cdb = (char *)&cdb;
21653 21649 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize;
21654 21650 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
21655 21651 ucmd_buf.uscsi_buflen = buflen;
21656 21652 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21657 21653 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21658 21654 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
21659 21655 ucmd_buf.uscsi_timeout = 60;
21660 21656
21661 21657 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21662 21658 UIO_SYSSPACE, path_flag);
21663 21659
21664 21660 switch (status) {
21665 21661 case 0:
21666 21662 /*
21667 21663 * sr_check_wp() uses 0x3f page code and check the header of
21668 21664 * mode page to determine if target device is write-protected.
21669 21665 * But some USB devices return 0 bytes for 0x3f page code. For
21670 21666 * this case, make sure that mode page header is returned at
21671 21667 * least.
21672 21668 */
21673 21669 if (buflen - ucmd_buf.uscsi_resid < headlen) {
21674 21670 status = EIO;
21675 21671 sd_ssc_set_info(ssc, SSC_FLAGS_INVALID_DATA, -1,
21676 21672 "mode page header is not returned");
21677 21673 }
21678 21674 break; /* Success! */
21679 21675 case EIO:
21680 21676 switch (ucmd_buf.uscsi_status) {
21681 21677 case STATUS_RESERVATION_CONFLICT:
21682 21678 status = EACCES;
21683 21679 break;
21684 21680 default:
21685 21681 break;
21686 21682 }
21687 21683 break;
21688 21684 default:
21689 21685 break;
21690 21686 }
21691 21687
21692 21688 if (status == 0) {
21693 21689 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SENSE: data",
21694 21690 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
21695 21691 }
21696 21692 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SENSE: exit\n");
21697 21693
21698 21694 return (status);
21699 21695 }
21700 21696
21701 21697
21702 21698 /*
21703 21699 * Function: sd_send_scsi_MODE_SELECT
21704 21700 *
21705 21701 * Description: Utility function for issuing a scsi MODE SELECT command.
21706 21702 * Note: This routine uses a consistent implementation for Group0,
21707 21703 * Group1, and Group2 commands across all platforms. ATAPI devices
21708 21704 * use Group 1 Read/Write commands and Group 2 Mode Sense/Select
21709 21705 *
21710 21706 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21711 21707 * structure for this target.
21712 21708 * cdbsize - size CDB to be used (CDB_GROUP0 (6 byte), or
21713 21709 * CDB_GROUP[1|2] (10 byte).
21714 21710 * bufaddr - buffer for page data retrieved from the target.
21715 21711 * buflen - size of page to be retrieved.
21716 21712 * save_page - boolean to determin if SP bit should be set.
21717 21713 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
21718 21714 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
21719 21715 * to use the USCSI "direct" chain and bypass the normal
21720 21716 * command waitq.
21721 21717 *
21722 21718 * Return Code: 0 - Success
21723 21719 * errno return code from sd_ssc_send()
21724 21720 *
21725 21721 * Context: Can sleep. Does not return until command is completed.
21726 21722 */
21727 21723
21728 21724 static int
21729 21725 sd_send_scsi_MODE_SELECT(sd_ssc_t *ssc, int cdbsize, uchar_t *bufaddr,
21730 21726 size_t buflen, uchar_t save_page, int path_flag)
21731 21727 {
21732 21728 struct scsi_extended_sense sense_buf;
21733 21729 union scsi_cdb cdb;
21734 21730 struct uscsi_cmd ucmd_buf;
21735 21731 int status;
21736 21732 struct sd_lun *un;
21737 21733
21738 21734 ASSERT(ssc != NULL);
21739 21735 un = ssc->ssc_un;
21740 21736 ASSERT(un != NULL);
21741 21737 ASSERT(!mutex_owned(SD_MUTEX(un)));
21742 21738 ASSERT(bufaddr != NULL);
21743 21739 ASSERT((cdbsize == CDB_GROUP0) || (cdbsize == CDB_GROUP1) ||
21744 21740 (cdbsize == CDB_GROUP2));
21745 21741
21746 21742 SD_TRACE(SD_LOG_IO, un,
21747 21743 "sd_send_scsi_MODE_SELECT: entry: un:0x%p\n", un);
21748 21744
21749 21745 bzero(&cdb, sizeof (cdb));
21750 21746 bzero(&ucmd_buf, sizeof (ucmd_buf));
21751 21747 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21752 21748
21753 21749 /* Set the PF bit for many third party drives */
21754 21750 cdb.cdb_opaque[1] = 0x10;
21755 21751
21756 21752 /* Set the savepage(SP) bit if given */
21757 21753 if (save_page == SD_SAVE_PAGE) {
21758 21754 cdb.cdb_opaque[1] |= 0x01;
21759 21755 }
21760 21756
21761 21757 if (cdbsize == CDB_GROUP0) {
21762 21758 cdb.scc_cmd = SCMD_MODE_SELECT;
21763 21759 FORMG0COUNT(&cdb, buflen);
21764 21760 } else {
21765 21761 cdb.scc_cmd = SCMD_MODE_SELECT_G1;
21766 21762 FORMG1COUNT(&cdb, buflen);
21767 21763 }
21768 21764
21769 21765 SD_FILL_SCSI1_LUN_CDB(un, &cdb);
21770 21766
21771 21767 ucmd_buf.uscsi_cdb = (char *)&cdb;
21772 21768 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize;
21773 21769 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
21774 21770 ucmd_buf.uscsi_buflen = buflen;
21775 21771 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21776 21772 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21777 21773 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_WRITE | USCSI_SILENT;
21778 21774 ucmd_buf.uscsi_timeout = 60;
21779 21775
21780 21776 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21781 21777 UIO_SYSSPACE, path_flag);
21782 21778
21783 21779 switch (status) {
21784 21780 case 0:
21785 21781 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21786 21782 break; /* Success! */
21787 21783 case EIO:
21788 21784 switch (ucmd_buf.uscsi_status) {
21789 21785 case STATUS_RESERVATION_CONFLICT:
21790 21786 status = EACCES;
21791 21787 break;
21792 21788 default:
21793 21789 break;
21794 21790 }
21795 21791 break;
21796 21792 default:
21797 21793 break;
21798 21794 }
21799 21795
21800 21796 if (status == 0) {
21801 21797 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_MODE_SELECT: data",
21802 21798 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
21803 21799 }
21804 21800 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_MODE_SELECT: exit\n");
21805 21801
21806 21802 return (status);
21807 21803 }
21808 21804
21809 21805
21810 21806 /*
21811 21807 * Function: sd_send_scsi_RDWR
21812 21808 *
21813 21809 * Description: Issue a scsi READ or WRITE command with the given parameters.
21814 21810 *
21815 21811 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21816 21812 * structure for this target.
21817 21813 * cmd: SCMD_READ or SCMD_WRITE
21818 21814 * bufaddr: Address of caller's buffer to receive the RDWR data
21819 21815 * buflen: Length of caller's buffer receive the RDWR data.
21820 21816 * start_block: Block number for the start of the RDWR operation.
21821 21817 * (Assumes target-native block size.)
21822 21818 * residp: Pointer to variable to receive the redisual of the
21823 21819 * RDWR operation (may be NULL of no residual requested).
21824 21820 * path_flag - SD_PATH_DIRECT to use the USCSI "direct" chain and
21825 21821 * the normal command waitq, or SD_PATH_DIRECT_PRIORITY
21826 21822 * to use the USCSI "direct" chain and bypass the normal
21827 21823 * command waitq.
21828 21824 *
21829 21825 * Return Code: 0 - Success
21830 21826 * errno return code from sd_ssc_send()
21831 21827 *
21832 21828 * Context: Can sleep. Does not return until command is completed.
21833 21829 */
21834 21830
21835 21831 static int
21836 21832 sd_send_scsi_RDWR(sd_ssc_t *ssc, uchar_t cmd, void *bufaddr,
21837 21833 size_t buflen, daddr_t start_block, int path_flag)
21838 21834 {
21839 21835 struct scsi_extended_sense sense_buf;
21840 21836 union scsi_cdb cdb;
21841 21837 struct uscsi_cmd ucmd_buf;
21842 21838 uint32_t block_count;
21843 21839 int status;
21844 21840 int cdbsize;
21845 21841 uchar_t flag;
21846 21842 struct sd_lun *un;
21847 21843
21848 21844 ASSERT(ssc != NULL);
21849 21845 un = ssc->ssc_un;
21850 21846 ASSERT(un != NULL);
21851 21847 ASSERT(!mutex_owned(SD_MUTEX(un)));
21852 21848 ASSERT(bufaddr != NULL);
21853 21849 ASSERT((cmd == SCMD_READ) || (cmd == SCMD_WRITE));
21854 21850
21855 21851 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: entry: un:0x%p\n", un);
21856 21852
21857 21853 if (un->un_f_tgt_blocksize_is_valid != TRUE) {
21858 21854 return (EINVAL);
21859 21855 }
21860 21856
21861 21857 mutex_enter(SD_MUTEX(un));
21862 21858 block_count = SD_BYTES2TGTBLOCKS(un, buflen);
21863 21859 mutex_exit(SD_MUTEX(un));
21864 21860
21865 21861 flag = (cmd == SCMD_READ) ? USCSI_READ : USCSI_WRITE;
21866 21862
21867 21863 SD_INFO(SD_LOG_IO, un, "sd_send_scsi_RDWR: "
21868 21864 "bufaddr:0x%p buflen:0x%x start_block:0x%p block_count:0x%x\n",
21869 21865 bufaddr, buflen, start_block, block_count);
21870 21866
21871 21867 bzero(&cdb, sizeof (cdb));
21872 21868 bzero(&ucmd_buf, sizeof (ucmd_buf));
21873 21869 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21874 21870
21875 21871 /* Compute CDB size to use */
21876 21872 if (start_block > 0xffffffff)
21877 21873 cdbsize = CDB_GROUP4;
21878 21874 else if ((start_block & 0xFFE00000) ||
21879 21875 (un->un_f_cfg_is_atapi == TRUE))
21880 21876 cdbsize = CDB_GROUP1;
21881 21877 else
21882 21878 cdbsize = CDB_GROUP0;
21883 21879
21884 21880 switch (cdbsize) {
21885 21881 case CDB_GROUP0: /* 6-byte CDBs */
21886 21882 cdb.scc_cmd = cmd;
21887 21883 FORMG0ADDR(&cdb, start_block);
21888 21884 FORMG0COUNT(&cdb, block_count);
21889 21885 break;
21890 21886 case CDB_GROUP1: /* 10-byte CDBs */
21891 21887 cdb.scc_cmd = cmd | SCMD_GROUP1;
21892 21888 FORMG1ADDR(&cdb, start_block);
21893 21889 FORMG1COUNT(&cdb, block_count);
21894 21890 break;
21895 21891 case CDB_GROUP4: /* 16-byte CDBs */
21896 21892 cdb.scc_cmd = cmd | SCMD_GROUP4;
21897 21893 FORMG4LONGADDR(&cdb, (uint64_t)start_block);
21898 21894 FORMG4COUNT(&cdb, block_count);
21899 21895 break;
21900 21896 case CDB_GROUP5: /* 12-byte CDBs (currently unsupported) */
21901 21897 default:
21902 21898 /* All others reserved */
21903 21899 return (EINVAL);
21904 21900 }
21905 21901
21906 21902 /* Set LUN bit(s) in CDB if this is a SCSI-1 device */
21907 21903 SD_FILL_SCSI1_LUN_CDB(un, &cdb);
21908 21904
21909 21905 ucmd_buf.uscsi_cdb = (char *)&cdb;
21910 21906 ucmd_buf.uscsi_cdblen = (uchar_t)cdbsize;
21911 21907 ucmd_buf.uscsi_bufaddr = bufaddr;
21912 21908 ucmd_buf.uscsi_buflen = buflen;
21913 21909 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21914 21910 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21915 21911 ucmd_buf.uscsi_flags = flag | USCSI_RQENABLE | USCSI_SILENT;
21916 21912 ucmd_buf.uscsi_timeout = 60;
21917 21913 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
21918 21914 UIO_SYSSPACE, path_flag);
21919 21915
21920 21916 switch (status) {
21921 21917 case 0:
21922 21918 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
21923 21919 break; /* Success! */
21924 21920 case EIO:
21925 21921 switch (ucmd_buf.uscsi_status) {
21926 21922 case STATUS_RESERVATION_CONFLICT:
21927 21923 status = EACCES;
21928 21924 break;
21929 21925 default:
21930 21926 break;
21931 21927 }
21932 21928 break;
21933 21929 default:
21934 21930 break;
21935 21931 }
21936 21932
21937 21933 if (status == 0) {
21938 21934 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_RDWR: data",
21939 21935 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
21940 21936 }
21941 21937
21942 21938 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_RDWR: exit\n");
21943 21939
21944 21940 return (status);
21945 21941 }
21946 21942
21947 21943
21948 21944 /*
21949 21945 * Function: sd_send_scsi_LOG_SENSE
21950 21946 *
21951 21947 * Description: Issue a scsi LOG_SENSE command with the given parameters.
21952 21948 *
21953 21949 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
21954 21950 * structure for this target.
21955 21951 *
21956 21952 * Return Code: 0 - Success
21957 21953 * errno return code from sd_ssc_send()
21958 21954 *
21959 21955 * Context: Can sleep. Does not return until command is completed.
21960 21956 */
21961 21957
21962 21958 static int
21963 21959 sd_send_scsi_LOG_SENSE(sd_ssc_t *ssc, uchar_t *bufaddr, uint16_t buflen,
21964 21960 uchar_t page_code, uchar_t page_control, uint16_t param_ptr,
21965 21961 int path_flag)
21966 21962
21967 21963 {
21968 21964 struct scsi_extended_sense sense_buf;
21969 21965 union scsi_cdb cdb;
21970 21966 struct uscsi_cmd ucmd_buf;
21971 21967 int status;
21972 21968 struct sd_lun *un;
21973 21969
21974 21970 ASSERT(ssc != NULL);
21975 21971 un = ssc->ssc_un;
21976 21972 ASSERT(un != NULL);
21977 21973 ASSERT(!mutex_owned(SD_MUTEX(un)));
21978 21974
21979 21975 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: entry: un:0x%p\n", un);
21980 21976
21981 21977 bzero(&cdb, sizeof (cdb));
21982 21978 bzero(&ucmd_buf, sizeof (ucmd_buf));
21983 21979 bzero(&sense_buf, sizeof (struct scsi_extended_sense));
21984 21980
21985 21981 cdb.scc_cmd = SCMD_LOG_SENSE_G1;
21986 21982 cdb.cdb_opaque[2] = (page_control << 6) | page_code;
21987 21983 cdb.cdb_opaque[5] = (uchar_t)((param_ptr & 0xFF00) >> 8);
21988 21984 cdb.cdb_opaque[6] = (uchar_t)(param_ptr & 0x00FF);
21989 21985 FORMG1COUNT(&cdb, buflen);
21990 21986
21991 21987 ucmd_buf.uscsi_cdb = (char *)&cdb;
21992 21988 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
21993 21989 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
21994 21990 ucmd_buf.uscsi_buflen = buflen;
21995 21991 ucmd_buf.uscsi_rqbuf = (caddr_t)&sense_buf;
21996 21992 ucmd_buf.uscsi_rqlen = sizeof (struct scsi_extended_sense);
21997 21993 ucmd_buf.uscsi_flags = USCSI_RQENABLE | USCSI_READ | USCSI_SILENT;
21998 21994 ucmd_buf.uscsi_timeout = 60;
21999 21995
22000 21996 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
22001 21997 UIO_SYSSPACE, path_flag);
22002 21998
22003 21999 switch (status) {
22004 22000 case 0:
22005 22001 break;
22006 22002 case EIO:
22007 22003 switch (ucmd_buf.uscsi_status) {
22008 22004 case STATUS_RESERVATION_CONFLICT:
22009 22005 status = EACCES;
22010 22006 break;
22011 22007 case STATUS_CHECK:
22012 22008 if ((ucmd_buf.uscsi_rqstatus == STATUS_GOOD) &&
22013 22009 (scsi_sense_key((uint8_t *)&sense_buf) ==
22014 22010 KEY_ILLEGAL_REQUEST) &&
22015 22011 (scsi_sense_asc((uint8_t *)&sense_buf) == 0x24)) {
22016 22012 /*
22017 22013 * ASC 0x24: INVALID FIELD IN CDB
22018 22014 */
22019 22015 switch (page_code) {
22020 22016 case START_STOP_CYCLE_PAGE:
22021 22017 /*
22022 22018 * The start stop cycle counter is
22023 22019 * implemented as page 0x31 in earlier
22024 22020 * generation disks. In new generation
22025 22021 * disks the start stop cycle counter is
22026 22022 * implemented as page 0xE. To properly
22027 22023 * handle this case if an attempt for
22028 22024 * log page 0xE is made and fails we
22029 22025 * will try again using page 0x31.
22030 22026 *
22031 22027 * Network storage BU committed to
22032 22028 * maintain the page 0x31 for this
22033 22029 * purpose and will not have any other
22034 22030 * page implemented with page code 0x31
22035 22031 * until all disks transition to the
22036 22032 * standard page.
22037 22033 */
22038 22034 mutex_enter(SD_MUTEX(un));
22039 22035 un->un_start_stop_cycle_page =
22040 22036 START_STOP_CYCLE_VU_PAGE;
22041 22037 cdb.cdb_opaque[2] =
22042 22038 (char)(page_control << 6) |
22043 22039 un->un_start_stop_cycle_page;
22044 22040 mutex_exit(SD_MUTEX(un));
22045 22041 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
22046 22042 status = sd_ssc_send(
22047 22043 ssc, &ucmd_buf, FKIOCTL,
22048 22044 UIO_SYSSPACE, path_flag);
22049 22045
22050 22046 break;
22051 22047 case TEMPERATURE_PAGE:
22052 22048 status = ENOTTY;
22053 22049 break;
22054 22050 default:
22055 22051 break;
22056 22052 }
22057 22053 }
22058 22054 break;
22059 22055 default:
22060 22056 break;
22061 22057 }
22062 22058 break;
22063 22059 default:
22064 22060 break;
22065 22061 }
22066 22062
22067 22063 if (status == 0) {
22068 22064 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
22069 22065 SD_DUMP_MEMORY(un, SD_LOG_IO, "sd_send_scsi_LOG_SENSE: data",
22070 22066 (uchar_t *)bufaddr, buflen, SD_LOG_HEX);
22071 22067 }
22072 22068
22073 22069 SD_TRACE(SD_LOG_IO, un, "sd_send_scsi_LOG_SENSE: exit\n");
22074 22070
22075 22071 return (status);
22076 22072 }
22077 22073
22078 22074
22079 22075 /*
22080 22076 * Function: sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION
22081 22077 *
22082 22078 * Description: Issue the scsi GET EVENT STATUS NOTIFICATION command.
22083 22079 *
22084 22080 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
22085 22081 * structure for this target.
22086 22082 * bufaddr
22087 22083 * buflen
22088 22084 * class_req
22089 22085 *
22090 22086 * Return Code: 0 - Success
22091 22087 * errno return code from sd_ssc_send()
22092 22088 *
22093 22089 * Context: Can sleep. Does not return until command is completed.
22094 22090 */
22095 22091
22096 22092 static int
22097 22093 sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION(sd_ssc_t *ssc, uchar_t *bufaddr,
22098 22094 size_t buflen, uchar_t class_req)
22099 22095 {
22100 22096 union scsi_cdb cdb;
22101 22097 struct uscsi_cmd ucmd_buf;
22102 22098 int status;
22103 22099 struct sd_lun *un;
22104 22100
22105 22101 ASSERT(ssc != NULL);
22106 22102 un = ssc->ssc_un;
22107 22103 ASSERT(un != NULL);
22108 22104 ASSERT(!mutex_owned(SD_MUTEX(un)));
22109 22105 ASSERT(bufaddr != NULL);
22110 22106
22111 22107 SD_TRACE(SD_LOG_IO, un,
22112 22108 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: entry: un:0x%p\n", un);
22113 22109
22114 22110 bzero(&cdb, sizeof (cdb));
22115 22111 bzero(&ucmd_buf, sizeof (ucmd_buf));
22116 22112 bzero(bufaddr, buflen);
22117 22113
22118 22114 cdb.scc_cmd = SCMD_GET_EVENT_STATUS_NOTIFICATION;
22119 22115 cdb.cdb_opaque[1] = 1; /* polled */
22120 22116 cdb.cdb_opaque[4] = class_req;
22121 22117 FORMG1COUNT(&cdb, buflen);
22122 22118
22123 22119 ucmd_buf.uscsi_cdb = (char *)&cdb;
22124 22120 ucmd_buf.uscsi_cdblen = CDB_GROUP1;
22125 22121 ucmd_buf.uscsi_bufaddr = (caddr_t)bufaddr;
22126 22122 ucmd_buf.uscsi_buflen = buflen;
22127 22123 ucmd_buf.uscsi_rqbuf = NULL;
22128 22124 ucmd_buf.uscsi_rqlen = 0;
22129 22125 ucmd_buf.uscsi_flags = USCSI_READ | USCSI_SILENT;
22130 22126 ucmd_buf.uscsi_timeout = 60;
22131 22127
22132 22128 status = sd_ssc_send(ssc, &ucmd_buf, FKIOCTL,
22133 22129 UIO_SYSSPACE, SD_PATH_DIRECT);
22134 22130
22135 22131 /*
22136 22132 * Only handle status == 0, the upper-level caller
22137 22133 * will put different assessment based on the context.
22138 22134 */
22139 22135 if (status == 0) {
22140 22136 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
22141 22137
22142 22138 if (ucmd_buf.uscsi_resid != 0) {
22143 22139 status = EIO;
22144 22140 }
22145 22141 }
22146 22142
22147 22143 SD_TRACE(SD_LOG_IO, un,
22148 22144 "sd_send_scsi_GET_EVENT_STATUS_NOTIFICATION: exit\n");
22149 22145
22150 22146 return (status);
22151 22147 }
22152 22148
22153 22149
22154 22150 static boolean_t
22155 22151 sd_gesn_media_data_valid(uchar_t *data)
22156 22152 {
22157 22153 uint16_t len;
22158 22154
22159 22155 len = (data[1] << 8) | data[0];
22160 22156 return ((len >= 6) &&
22161 22157 ((data[2] & SD_GESN_HEADER_NEA) == 0) &&
22162 22158 ((data[2] & SD_GESN_HEADER_CLASS) == SD_GESN_MEDIA_CLASS) &&
22163 22159 ((data[3] & (1 << SD_GESN_MEDIA_CLASS)) != 0));
22164 22160 }
22165 22161
22166 22162
22167 22163 /*
22168 22164 * Function: sdioctl
22169 22165 *
22170 22166 * Description: Driver's ioctl(9e) entry point function.
22171 22167 *
22172 22168 * Arguments: dev - device number
22173 22169 * cmd - ioctl operation to be performed
22174 22170 * arg - user argument, contains data to be set or reference
22175 22171 * parameter for get
22176 22172 * flag - bit flag, indicating open settings, 32/64 bit type
22177 22173 * cred_p - user credential pointer
22178 22174 * rval_p - calling process return value (OPT)
22179 22175 *
22180 22176 * Return Code: EINVAL
22181 22177 * ENOTTY
22182 22178 * ENXIO
22183 22179 * EIO
22184 22180 * EFAULT
22185 22181 * ENOTSUP
22186 22182 * EPERM
22187 22183 *
22188 22184 * Context: Called from the device switch at normal priority.
22189 22185 */
22190 22186
22191 22187 static int
22192 22188 sdioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p)
22193 22189 {
22194 22190 struct sd_lun *un = NULL;
22195 22191 int err = 0;
22196 22192 int i = 0;
22197 22193 cred_t *cr;
22198 22194 int tmprval = EINVAL;
22199 22195 boolean_t is_valid;
22200 22196 sd_ssc_t *ssc;
22201 22197
22202 22198 /*
22203 22199 * All device accesses go thru sdstrategy where we check on suspend
22204 22200 * status
22205 22201 */
22206 22202 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
22207 22203 return (ENXIO);
22208 22204 }
22209 22205
22210 22206 ASSERT(!mutex_owned(SD_MUTEX(un)));
22211 22207
22212 22208 /* Initialize sd_ssc_t for internal uscsi commands */
22213 22209 ssc = sd_ssc_init(un);
22214 22210
22215 22211 is_valid = SD_IS_VALID_LABEL(un);
22216 22212
22217 22213 /*
22218 22214 * Moved this wait from sd_uscsi_strategy to here for
22219 22215 * reasons of deadlock prevention. Internal driver commands,
22220 22216 * specifically those to change a devices power level, result
22221 22217 * in a call to sd_uscsi_strategy.
22222 22218 */
22223 22219 mutex_enter(SD_MUTEX(un));
22224 22220 while ((un->un_state == SD_STATE_SUSPENDED) ||
22225 22221 (un->un_state == SD_STATE_PM_CHANGING)) {
22226 22222 cv_wait(&un->un_suspend_cv, SD_MUTEX(un));
22227 22223 }
22228 22224 /*
22229 22225 * Twiddling the counter here protects commands from now
22230 22226 * through to the top of sd_uscsi_strategy. Without the
22231 22227 * counter inc. a power down, for example, could get in
22232 22228 * after the above check for state is made and before
22233 22229 * execution gets to the top of sd_uscsi_strategy.
22234 22230 * That would cause problems.
22235 22231 */
22236 22232 un->un_ncmds_in_driver++;
22237 22233
22238 22234 if (!is_valid &&
22239 22235 (flag & (FNDELAY | FNONBLOCK))) {
22240 22236 switch (cmd) {
22241 22237 case DKIOCGGEOM: /* SD_PATH_DIRECT */
22242 22238 case DKIOCGVTOC:
22243 22239 case DKIOCGEXTVTOC:
22244 22240 case DKIOCGAPART:
22245 22241 case DKIOCPARTINFO:
22246 22242 case DKIOCEXTPARTINFO:
22247 22243 case DKIOCSGEOM:
22248 22244 case DKIOCSAPART:
22249 22245 case DKIOCGETEFI:
22250 22246 case DKIOCPARTITION:
22251 22247 case DKIOCSVTOC:
22252 22248 case DKIOCSEXTVTOC:
22253 22249 case DKIOCSETEFI:
22254 22250 case DKIOCGMBOOT:
22255 22251 case DKIOCSMBOOT:
22256 22252 case DKIOCG_PHYGEOM:
22257 22253 case DKIOCG_VIRTGEOM:
22258 22254 #if defined(__i386) || defined(__amd64)
22259 22255 case DKIOCSETEXTPART:
22260 22256 #endif
22261 22257 /* let cmlb handle it */
22262 22258 goto skip_ready_valid;
22263 22259
22264 22260 case CDROMPAUSE:
22265 22261 case CDROMRESUME:
22266 22262 case CDROMPLAYMSF:
22267 22263 case CDROMPLAYTRKIND:
22268 22264 case CDROMREADTOCHDR:
22269 22265 case CDROMREADTOCENTRY:
22270 22266 case CDROMSTOP:
22271 22267 case CDROMSTART:
22272 22268 case CDROMVOLCTRL:
22273 22269 case CDROMSUBCHNL:
22274 22270 case CDROMREADMODE2:
22275 22271 case CDROMREADMODE1:
22276 22272 case CDROMREADOFFSET:
22277 22273 case CDROMSBLKMODE:
22278 22274 case CDROMGBLKMODE:
22279 22275 case CDROMGDRVSPEED:
22280 22276 case CDROMSDRVSPEED:
22281 22277 case CDROMCDDA:
22282 22278 case CDROMCDXA:
22283 22279 case CDROMSUBCODE:
22284 22280 if (!ISCD(un)) {
22285 22281 un->un_ncmds_in_driver--;
22286 22282 ASSERT(un->un_ncmds_in_driver >= 0);
22287 22283 mutex_exit(SD_MUTEX(un));
22288 22284 err = ENOTTY;
22289 22285 goto done_without_assess;
22290 22286 }
22291 22287 break;
22292 22288 case FDEJECT:
22293 22289 case DKIOCEJECT:
22294 22290 case CDROMEJECT:
22295 22291 if (!un->un_f_eject_media_supported) {
22296 22292 un->un_ncmds_in_driver--;
22297 22293 ASSERT(un->un_ncmds_in_driver >= 0);
22298 22294 mutex_exit(SD_MUTEX(un));
22299 22295 err = ENOTTY;
22300 22296 goto done_without_assess;
22301 22297 }
22302 22298 break;
22303 22299 case DKIOCFLUSHWRITECACHE:
22304 22300 mutex_exit(SD_MUTEX(un));
22305 22301 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
22306 22302 if (err != 0) {
22307 22303 mutex_enter(SD_MUTEX(un));
22308 22304 un->un_ncmds_in_driver--;
22309 22305 ASSERT(un->un_ncmds_in_driver >= 0);
22310 22306 mutex_exit(SD_MUTEX(un));
22311 22307 err = EIO;
22312 22308 goto done_quick_assess;
22313 22309 }
22314 22310 mutex_enter(SD_MUTEX(un));
22315 22311 /* FALLTHROUGH */
22316 22312 case DKIOCREMOVABLE:
22317 22313 case DKIOCHOTPLUGGABLE:
22318 22314 case DKIOCINFO:
22319 22315 case DKIOCGMEDIAINFO:
22320 22316 case DKIOCGMEDIAINFOEXT:
22321 22317 case MHIOCENFAILFAST:
22322 22318 case MHIOCSTATUS:
22323 22319 case MHIOCTKOWN:
22324 22320 case MHIOCRELEASE:
22325 22321 case MHIOCGRP_INKEYS:
22326 22322 case MHIOCGRP_INRESV:
22327 22323 case MHIOCGRP_REGISTER:
22328 22324 case MHIOCGRP_CLEAR:
22329 22325 case MHIOCGRP_RESERVE:
22330 22326 case MHIOCGRP_PREEMPTANDABORT:
22331 22327 case MHIOCGRP_REGISTERANDIGNOREKEY:
22332 22328 case CDROMCLOSETRAY:
22333 22329 case USCSICMD:
22334 22330 goto skip_ready_valid;
22335 22331 default:
22336 22332 break;
22337 22333 }
22338 22334
22339 22335 mutex_exit(SD_MUTEX(un));
22340 22336 err = sd_ready_and_valid(ssc, SDPART(dev));
22341 22337 mutex_enter(SD_MUTEX(un));
22342 22338
22343 22339 if (err != SD_READY_VALID) {
22344 22340 switch (cmd) {
22345 22341 case DKIOCSTATE:
22346 22342 case CDROMGDRVSPEED:
22347 22343 case CDROMSDRVSPEED:
22348 22344 case FDEJECT: /* for eject command */
22349 22345 case DKIOCEJECT:
22350 22346 case CDROMEJECT:
22351 22347 case DKIOCREMOVABLE:
22352 22348 case DKIOCHOTPLUGGABLE:
22353 22349 break;
22354 22350 default:
22355 22351 if (un->un_f_has_removable_media) {
22356 22352 err = ENXIO;
22357 22353 } else {
22358 22354 /* Do not map SD_RESERVED_BY_OTHERS to EIO */
22359 22355 if (err == SD_RESERVED_BY_OTHERS) {
22360 22356 err = EACCES;
22361 22357 } else {
22362 22358 err = EIO;
22363 22359 }
22364 22360 }
22365 22361 un->un_ncmds_in_driver--;
22366 22362 ASSERT(un->un_ncmds_in_driver >= 0);
22367 22363 mutex_exit(SD_MUTEX(un));
22368 22364
22369 22365 goto done_without_assess;
22370 22366 }
22371 22367 }
22372 22368 }
22373 22369
22374 22370 skip_ready_valid:
22375 22371 mutex_exit(SD_MUTEX(un));
22376 22372
22377 22373 switch (cmd) {
22378 22374 case DKIOCINFO:
22379 22375 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCINFO\n");
22380 22376 err = sd_dkio_ctrl_info(dev, (caddr_t)arg, flag);
22381 22377 break;
22382 22378
22383 22379 case DKIOCGMEDIAINFO:
22384 22380 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFO\n");
22385 22381 err = sd_get_media_info(dev, (caddr_t)arg, flag);
22386 22382 break;
22387 22383
22388 22384 case DKIOCGMEDIAINFOEXT:
22389 22385 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGMEDIAINFOEXT\n");
22390 22386 err = sd_get_media_info_ext(dev, (caddr_t)arg, flag);
22391 22387 break;
22392 22388
22393 22389 case DKIOCGGEOM:
22394 22390 case DKIOCGVTOC:
22395 22391 case DKIOCGEXTVTOC:
22396 22392 case DKIOCGAPART:
22397 22393 case DKIOCPARTINFO:
22398 22394 case DKIOCEXTPARTINFO:
22399 22395 case DKIOCSGEOM:
22400 22396 case DKIOCSAPART:
22401 22397 case DKIOCGETEFI:
22402 22398 case DKIOCPARTITION:
22403 22399 case DKIOCSVTOC:
22404 22400 case DKIOCSEXTVTOC:
22405 22401 case DKIOCSETEFI:
22406 22402 case DKIOCGMBOOT:
22407 22403 case DKIOCSMBOOT:
22408 22404 case DKIOCG_PHYGEOM:
22409 22405 case DKIOCG_VIRTGEOM:
22410 22406 #if defined(__i386) || defined(__amd64)
22411 22407 case DKIOCSETEXTPART:
22412 22408 #endif
22413 22409 SD_TRACE(SD_LOG_IOCTL, un, "DKIOC %d\n", cmd);
22414 22410
22415 22411 /* TUR should spin up */
22416 22412
22417 22413 if (un->un_f_has_removable_media)
22418 22414 err = sd_send_scsi_TEST_UNIT_READY(ssc,
22419 22415 SD_CHECK_FOR_MEDIA);
22420 22416
22421 22417 else
22422 22418 err = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
22423 22419
22424 22420 if (err != 0)
22425 22421 goto done_with_assess;
22426 22422
22427 22423 err = cmlb_ioctl(un->un_cmlbhandle, dev,
22428 22424 cmd, arg, flag, cred_p, rval_p, (void *)SD_PATH_DIRECT);
22429 22425
22430 22426 if ((err == 0) &&
22431 22427 ((cmd == DKIOCSETEFI) ||
22432 22428 (un->un_f_pkstats_enabled) &&
22433 22429 (cmd == DKIOCSAPART || cmd == DKIOCSVTOC ||
22434 22430 cmd == DKIOCSEXTVTOC))) {
22435 22431
22436 22432 tmprval = cmlb_validate(un->un_cmlbhandle, CMLB_SILENT,
22437 22433 (void *)SD_PATH_DIRECT);
22438 22434 if ((tmprval == 0) && un->un_f_pkstats_enabled) {
22439 22435 sd_set_pstats(un);
22440 22436 SD_TRACE(SD_LOG_IO_PARTITION, un,
22441 22437 "sd_ioctl: un:0x%p pstats created and "
22442 22438 "set\n", un);
22443 22439 }
22444 22440 }
22445 22441
22446 22442 if ((cmd == DKIOCSVTOC || cmd == DKIOCSEXTVTOC) ||
22447 22443 ((cmd == DKIOCSETEFI) && (tmprval == 0))) {
22448 22444
22449 22445 mutex_enter(SD_MUTEX(un));
22450 22446 if (un->un_f_devid_supported &&
22451 22447 (un->un_f_opt_fab_devid == TRUE)) {
22452 22448 if (un->un_devid == NULL) {
22453 22449 sd_register_devid(ssc, SD_DEVINFO(un),
22454 22450 SD_TARGET_IS_UNRESERVED);
22455 22451 } else {
22456 22452 /*
22457 22453 * The device id for this disk
22458 22454 * has been fabricated. The
22459 22455 * device id must be preserved
22460 22456 * by writing it back out to
22461 22457 * disk.
22462 22458 */
22463 22459 if (sd_write_deviceid(ssc) != 0) {
22464 22460 ddi_devid_free(un->un_devid);
22465 22461 un->un_devid = NULL;
22466 22462 }
22467 22463 }
22468 22464 }
22469 22465 mutex_exit(SD_MUTEX(un));
22470 22466 }
22471 22467
22472 22468 break;
22473 22469
22474 22470 case DKIOCLOCK:
22475 22471 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCLOCK\n");
22476 22472 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
22477 22473 SD_PATH_STANDARD);
22478 22474 goto done_with_assess;
22479 22475
22480 22476 case DKIOCUNLOCK:
22481 22477 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCUNLOCK\n");
22482 22478 err = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW,
22483 22479 SD_PATH_STANDARD);
22484 22480 goto done_with_assess;
22485 22481
22486 22482 case DKIOCSTATE: {
22487 22483 enum dkio_state state;
22488 22484 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCSTATE\n");
22489 22485
22490 22486 if (ddi_copyin((void *)arg, &state, sizeof (int), flag) != 0) {
22491 22487 err = EFAULT;
22492 22488 } else {
22493 22489 err = sd_check_media(dev, state);
22494 22490 if (err == 0) {
22495 22491 if (ddi_copyout(&un->un_mediastate, (void *)arg,
22496 22492 sizeof (int), flag) != 0)
22497 22493 err = EFAULT;
22498 22494 }
22499 22495 }
22500 22496 break;
22501 22497 }
22502 22498
22503 22499 case DKIOCREMOVABLE:
22504 22500 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREMOVABLE\n");
22505 22501 i = un->un_f_has_removable_media ? 1 : 0;
22506 22502 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
22507 22503 err = EFAULT;
22508 22504 } else {
22509 22505 err = 0;
22510 22506 }
22511 22507 break;
22512 22508
22513 22509 case DKIOCHOTPLUGGABLE:
22514 22510 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCHOTPLUGGABLE\n");
22515 22511 i = un->un_f_is_hotpluggable ? 1 : 0;
22516 22512 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
22517 22513 err = EFAULT;
22518 22514 } else {
22519 22515 err = 0;
22520 22516 }
22521 22517 break;
22522 22518
22523 22519 case DKIOCREADONLY:
22524 22520 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCREADONLY\n");
22525 22521 i = 0;
22526 22522 if ((ISCD(un) && !un->un_f_mmc_writable_media) ||
22527 22523 (sr_check_wp(dev) != 0)) {
22528 22524 i = 1;
22529 22525 }
22530 22526 if (ddi_copyout(&i, (void *)arg, sizeof (int), flag) != 0) {
22531 22527 err = EFAULT;
22532 22528 } else {
22533 22529 err = 0;
22534 22530 }
22535 22531 break;
22536 22532
22537 22533 case DKIOCGTEMPERATURE:
22538 22534 SD_TRACE(SD_LOG_IOCTL, un, "DKIOCGTEMPERATURE\n");
22539 22535 err = sd_dkio_get_temp(dev, (caddr_t)arg, flag);
22540 22536 break;
22541 22537
22542 22538 case MHIOCENFAILFAST:
22543 22539 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCENFAILFAST\n");
22544 22540 if ((err = drv_priv(cred_p)) == 0) {
22545 22541 err = sd_mhdioc_failfast(dev, (caddr_t)arg, flag);
22546 22542 }
22547 22543 break;
22548 22544
22549 22545 case MHIOCTKOWN:
22550 22546 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCTKOWN\n");
22551 22547 if ((err = drv_priv(cred_p)) == 0) {
22552 22548 err = sd_mhdioc_takeown(dev, (caddr_t)arg, flag);
22553 22549 }
22554 22550 break;
22555 22551
22556 22552 case MHIOCRELEASE:
22557 22553 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCRELEASE\n");
22558 22554 if ((err = drv_priv(cred_p)) == 0) {
22559 22555 err = sd_mhdioc_release(dev);
22560 22556 }
22561 22557 break;
22562 22558
22563 22559 case MHIOCSTATUS:
22564 22560 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCSTATUS\n");
22565 22561 if ((err = drv_priv(cred_p)) == 0) {
22566 22562 switch (sd_send_scsi_TEST_UNIT_READY(ssc, 0)) {
22567 22563 case 0:
22568 22564 err = 0;
22569 22565 break;
22570 22566 case EACCES:
22571 22567 *rval_p = 1;
22572 22568 err = 0;
22573 22569 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
22574 22570 break;
22575 22571 default:
22576 22572 err = EIO;
22577 22573 goto done_with_assess;
22578 22574 }
22579 22575 }
22580 22576 break;
22581 22577
22582 22578 case MHIOCQRESERVE:
22583 22579 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCQRESERVE\n");
22584 22580 if ((err = drv_priv(cred_p)) == 0) {
22585 22581 err = sd_reserve_release(dev, SD_RESERVE);
22586 22582 }
22587 22583 break;
22588 22584
22589 22585 case MHIOCREREGISTERDEVID:
22590 22586 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCREREGISTERDEVID\n");
22591 22587 if (drv_priv(cred_p) == EPERM) {
22592 22588 err = EPERM;
22593 22589 } else if (!un->un_f_devid_supported) {
22594 22590 err = ENOTTY;
22595 22591 } else {
22596 22592 err = sd_mhdioc_register_devid(dev);
22597 22593 }
22598 22594 break;
22599 22595
22600 22596 case MHIOCGRP_INKEYS:
22601 22597 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INKEYS\n");
22602 22598 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) {
22603 22599 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22604 22600 err = ENOTSUP;
22605 22601 } else {
22606 22602 err = sd_mhdioc_inkeys(dev, (caddr_t)arg,
22607 22603 flag);
22608 22604 }
22609 22605 }
22610 22606 break;
22611 22607
22612 22608 case MHIOCGRP_INRESV:
22613 22609 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_INRESV\n");
22614 22610 if (((err = drv_priv(cred_p)) != EPERM) && arg != NULL) {
22615 22611 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22616 22612 err = ENOTSUP;
22617 22613 } else {
22618 22614 err = sd_mhdioc_inresv(dev, (caddr_t)arg, flag);
22619 22615 }
22620 22616 }
22621 22617 break;
22622 22618
22623 22619 case MHIOCGRP_REGISTER:
22624 22620 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTER\n");
22625 22621 if ((err = drv_priv(cred_p)) != EPERM) {
22626 22622 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22627 22623 err = ENOTSUP;
22628 22624 } else if (arg != NULL) {
22629 22625 mhioc_register_t reg;
22630 22626 if (ddi_copyin((void *)arg, ®,
22631 22627 sizeof (mhioc_register_t), flag) != 0) {
22632 22628 err = EFAULT;
22633 22629 } else {
22634 22630 err =
22635 22631 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22636 22632 ssc, SD_SCSI3_REGISTER,
22637 22633 (uchar_t *)®);
22638 22634 if (err != 0)
22639 22635 goto done_with_assess;
22640 22636 }
22641 22637 }
22642 22638 }
22643 22639 break;
22644 22640
22645 22641 case MHIOCGRP_CLEAR:
22646 22642 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_CLEAR\n");
22647 22643 if ((err = drv_priv(cred_p)) != EPERM) {
22648 22644 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22649 22645 err = ENOTSUP;
22650 22646 } else if (arg != NULL) {
22651 22647 mhioc_register_t reg;
22652 22648 if (ddi_copyin((void *)arg, ®,
22653 22649 sizeof (mhioc_register_t), flag) != 0) {
22654 22650 err = EFAULT;
22655 22651 } else {
22656 22652 err =
22657 22653 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22658 22654 ssc, SD_SCSI3_CLEAR,
22659 22655 (uchar_t *)®);
22660 22656 if (err != 0)
22661 22657 goto done_with_assess;
22662 22658 }
22663 22659 }
22664 22660 }
22665 22661 break;
22666 22662
22667 22663 case MHIOCGRP_RESERVE:
22668 22664 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_RESERVE\n");
22669 22665 if ((err = drv_priv(cred_p)) != EPERM) {
22670 22666 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22671 22667 err = ENOTSUP;
22672 22668 } else if (arg != NULL) {
22673 22669 mhioc_resv_desc_t resv_desc;
22674 22670 if (ddi_copyin((void *)arg, &resv_desc,
22675 22671 sizeof (mhioc_resv_desc_t), flag) != 0) {
22676 22672 err = EFAULT;
22677 22673 } else {
22678 22674 err =
22679 22675 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22680 22676 ssc, SD_SCSI3_RESERVE,
22681 22677 (uchar_t *)&resv_desc);
22682 22678 if (err != 0)
22683 22679 goto done_with_assess;
22684 22680 }
22685 22681 }
22686 22682 }
22687 22683 break;
22688 22684
22689 22685 case MHIOCGRP_PREEMPTANDABORT:
22690 22686 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_PREEMPTANDABORT\n");
22691 22687 if ((err = drv_priv(cred_p)) != EPERM) {
22692 22688 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22693 22689 err = ENOTSUP;
22694 22690 } else if (arg != NULL) {
22695 22691 mhioc_preemptandabort_t preempt_abort;
22696 22692 if (ddi_copyin((void *)arg, &preempt_abort,
22697 22693 sizeof (mhioc_preemptandabort_t),
22698 22694 flag) != 0) {
22699 22695 err = EFAULT;
22700 22696 } else {
22701 22697 err =
22702 22698 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22703 22699 ssc, SD_SCSI3_PREEMPTANDABORT,
22704 22700 (uchar_t *)&preempt_abort);
22705 22701 if (err != 0)
22706 22702 goto done_with_assess;
22707 22703 }
22708 22704 }
22709 22705 }
22710 22706 break;
22711 22707
22712 22708 case MHIOCGRP_REGISTERANDIGNOREKEY:
22713 22709 SD_TRACE(SD_LOG_IOCTL, un, "MHIOCGRP_REGISTERANDIGNOREKEY\n");
22714 22710 if ((err = drv_priv(cred_p)) != EPERM) {
22715 22711 if (un->un_reservation_type == SD_SCSI2_RESERVATION) {
22716 22712 err = ENOTSUP;
22717 22713 } else if (arg != NULL) {
22718 22714 mhioc_registerandignorekey_t r_and_i;
22719 22715 if (ddi_copyin((void *)arg, (void *)&r_and_i,
22720 22716 sizeof (mhioc_registerandignorekey_t),
22721 22717 flag) != 0) {
22722 22718 err = EFAULT;
22723 22719 } else {
22724 22720 err =
22725 22721 sd_send_scsi_PERSISTENT_RESERVE_OUT(
22726 22722 ssc, SD_SCSI3_REGISTERANDIGNOREKEY,
22727 22723 (uchar_t *)&r_and_i);
22728 22724 if (err != 0)
22729 22725 goto done_with_assess;
22730 22726 }
22731 22727 }
22732 22728 }
22733 22729 break;
22734 22730
22735 22731 case USCSICMD:
22736 22732 SD_TRACE(SD_LOG_IOCTL, un, "USCSICMD\n");
22737 22733 cr = ddi_get_cred();
22738 22734 if ((drv_priv(cred_p) != 0) && (drv_priv(cr) != 0)) {
22739 22735 err = EPERM;
22740 22736 } else {
22741 22737 enum uio_seg uioseg;
22742 22738
22743 22739 uioseg = (flag & FKIOCTL) ? UIO_SYSSPACE :
22744 22740 UIO_USERSPACE;
22745 22741 if (un->un_f_format_in_progress == TRUE) {
22746 22742 err = EAGAIN;
22747 22743 break;
22748 22744 }
22749 22745
22750 22746 err = sd_ssc_send(ssc,
22751 22747 (struct uscsi_cmd *)arg,
22752 22748 flag, uioseg, SD_PATH_STANDARD);
22753 22749 if (err != 0)
22754 22750 goto done_with_assess;
22755 22751 else
22756 22752 sd_ssc_assessment(ssc, SD_FMT_STANDARD);
22757 22753 }
22758 22754 break;
22759 22755
22760 22756 case CDROMPAUSE:
22761 22757 case CDROMRESUME:
22762 22758 SD_TRACE(SD_LOG_IOCTL, un, "PAUSE-RESUME\n");
22763 22759 if (!ISCD(un)) {
22764 22760 err = ENOTTY;
22765 22761 } else {
22766 22762 err = sr_pause_resume(dev, cmd);
22767 22763 }
22768 22764 break;
22769 22765
22770 22766 case CDROMPLAYMSF:
22771 22767 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYMSF\n");
22772 22768 if (!ISCD(un)) {
22773 22769 err = ENOTTY;
22774 22770 } else {
22775 22771 err = sr_play_msf(dev, (caddr_t)arg, flag);
22776 22772 }
22777 22773 break;
22778 22774
22779 22775 case CDROMPLAYTRKIND:
22780 22776 SD_TRACE(SD_LOG_IOCTL, un, "CDROMPLAYTRKIND\n");
22781 22777 #if defined(__i386) || defined(__amd64)
22782 22778 /*
22783 22779 * not supported on ATAPI CD drives, use CDROMPLAYMSF instead
22784 22780 */
22785 22781 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) {
22786 22782 #else
22787 22783 if (!ISCD(un)) {
22788 22784 #endif
22789 22785 err = ENOTTY;
22790 22786 } else {
22791 22787 err = sr_play_trkind(dev, (caddr_t)arg, flag);
22792 22788 }
22793 22789 break;
22794 22790
22795 22791 case CDROMREADTOCHDR:
22796 22792 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCHDR\n");
22797 22793 if (!ISCD(un)) {
22798 22794 err = ENOTTY;
22799 22795 } else {
22800 22796 err = sr_read_tochdr(dev, (caddr_t)arg, flag);
22801 22797 }
22802 22798 break;
22803 22799
22804 22800 case CDROMREADTOCENTRY:
22805 22801 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADTOCENTRY\n");
22806 22802 if (!ISCD(un)) {
22807 22803 err = ENOTTY;
22808 22804 } else {
22809 22805 err = sr_read_tocentry(dev, (caddr_t)arg, flag);
22810 22806 }
22811 22807 break;
22812 22808
22813 22809 case CDROMSTOP:
22814 22810 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTOP\n");
22815 22811 if (!ISCD(un)) {
22816 22812 err = ENOTTY;
22817 22813 } else {
22818 22814 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
22819 22815 SD_TARGET_STOP, SD_PATH_STANDARD);
22820 22816 goto done_with_assess;
22821 22817 }
22822 22818 break;
22823 22819
22824 22820 case CDROMSTART:
22825 22821 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSTART\n");
22826 22822 if (!ISCD(un)) {
22827 22823 err = ENOTTY;
22828 22824 } else {
22829 22825 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
22830 22826 SD_TARGET_START, SD_PATH_STANDARD);
22831 22827 goto done_with_assess;
22832 22828 }
22833 22829 break;
22834 22830
22835 22831 case CDROMCLOSETRAY:
22836 22832 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCLOSETRAY\n");
22837 22833 if (!ISCD(un)) {
22838 22834 err = ENOTTY;
22839 22835 } else {
22840 22836 err = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
22841 22837 SD_TARGET_CLOSE, SD_PATH_STANDARD);
22842 22838 goto done_with_assess;
22843 22839 }
22844 22840 break;
22845 22841
22846 22842 case FDEJECT: /* for eject command */
22847 22843 case DKIOCEJECT:
22848 22844 case CDROMEJECT:
22849 22845 SD_TRACE(SD_LOG_IOCTL, un, "EJECT\n");
22850 22846 if (!un->un_f_eject_media_supported) {
22851 22847 err = ENOTTY;
22852 22848 } else {
22853 22849 err = sr_eject(dev);
22854 22850 }
22855 22851 break;
22856 22852
22857 22853 case CDROMVOLCTRL:
22858 22854 SD_TRACE(SD_LOG_IOCTL, un, "CDROMVOLCTRL\n");
22859 22855 if (!ISCD(un)) {
22860 22856 err = ENOTTY;
22861 22857 } else {
22862 22858 err = sr_volume_ctrl(dev, (caddr_t)arg, flag);
22863 22859 }
22864 22860 break;
22865 22861
22866 22862 case CDROMSUBCHNL:
22867 22863 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCHNL\n");
22868 22864 if (!ISCD(un)) {
22869 22865 err = ENOTTY;
22870 22866 } else {
22871 22867 err = sr_read_subchannel(dev, (caddr_t)arg, flag);
22872 22868 }
22873 22869 break;
22874 22870
22875 22871 case CDROMREADMODE2:
22876 22872 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE2\n");
22877 22873 if (!ISCD(un)) {
22878 22874 err = ENOTTY;
22879 22875 } else if (un->un_f_cfg_is_atapi == TRUE) {
22880 22876 /*
22881 22877 * If the drive supports READ CD, use that instead of
22882 22878 * switching the LBA size via a MODE SELECT
22883 22879 * Block Descriptor
22884 22880 */
22885 22881 err = sr_read_cd_mode2(dev, (caddr_t)arg, flag);
22886 22882 } else {
22887 22883 err = sr_read_mode2(dev, (caddr_t)arg, flag);
22888 22884 }
22889 22885 break;
22890 22886
22891 22887 case CDROMREADMODE1:
22892 22888 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADMODE1\n");
22893 22889 if (!ISCD(un)) {
22894 22890 err = ENOTTY;
22895 22891 } else {
22896 22892 err = sr_read_mode1(dev, (caddr_t)arg, flag);
22897 22893 }
22898 22894 break;
22899 22895
22900 22896 case CDROMREADOFFSET:
22901 22897 SD_TRACE(SD_LOG_IOCTL, un, "CDROMREADOFFSET\n");
22902 22898 if (!ISCD(un)) {
22903 22899 err = ENOTTY;
22904 22900 } else {
22905 22901 err = sr_read_sony_session_offset(dev, (caddr_t)arg,
22906 22902 flag);
22907 22903 }
22908 22904 break;
22909 22905
22910 22906 case CDROMSBLKMODE:
22911 22907 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSBLKMODE\n");
22912 22908 /*
22913 22909 * There is no means of changing block size in case of atapi
22914 22910 * drives, thus return ENOTTY if drive type is atapi
22915 22911 */
22916 22912 if (!ISCD(un) || (un->un_f_cfg_is_atapi == TRUE)) {
22917 22913 err = ENOTTY;
22918 22914 } else if (un->un_f_mmc_cap == TRUE) {
22919 22915
22920 22916 /*
22921 22917 * MMC Devices do not support changing the
22922 22918 * logical block size
22923 22919 *
22924 22920 * Note: EINVAL is being returned instead of ENOTTY to
22925 22921 * maintain consistancy with the original mmc
22926 22922 * driver update.
22927 22923 */
22928 22924 err = EINVAL;
22929 22925 } else {
22930 22926 mutex_enter(SD_MUTEX(un));
22931 22927 if ((!(un->un_exclopen & (1<<SDPART(dev)))) ||
22932 22928 (un->un_ncmds_in_transport > 0)) {
22933 22929 mutex_exit(SD_MUTEX(un));
22934 22930 err = EINVAL;
22935 22931 } else {
22936 22932 mutex_exit(SD_MUTEX(un));
22937 22933 err = sr_change_blkmode(dev, cmd, arg, flag);
22938 22934 }
22939 22935 }
22940 22936 break;
22941 22937
22942 22938 case CDROMGBLKMODE:
22943 22939 SD_TRACE(SD_LOG_IOCTL, un, "CDROMGBLKMODE\n");
22944 22940 if (!ISCD(un)) {
22945 22941 err = ENOTTY;
22946 22942 } else if ((un->un_f_cfg_is_atapi != FALSE) &&
22947 22943 (un->un_f_blockcount_is_valid != FALSE)) {
22948 22944 /*
22949 22945 * Drive is an ATAPI drive so return target block
22950 22946 * size for ATAPI drives since we cannot change the
22951 22947 * blocksize on ATAPI drives. Used primarily to detect
22952 22948 * if an ATAPI cdrom is present.
22953 22949 */
22954 22950 if (ddi_copyout(&un->un_tgt_blocksize, (void *)arg,
22955 22951 sizeof (int), flag) != 0) {
22956 22952 err = EFAULT;
22957 22953 } else {
22958 22954 err = 0;
22959 22955 }
22960 22956
22961 22957 } else {
22962 22958 /*
22963 22959 * Drive supports changing block sizes via a Mode
22964 22960 * Select.
22965 22961 */
22966 22962 err = sr_change_blkmode(dev, cmd, arg, flag);
22967 22963 }
22968 22964 break;
22969 22965
22970 22966 case CDROMGDRVSPEED:
22971 22967 case CDROMSDRVSPEED:
22972 22968 SD_TRACE(SD_LOG_IOCTL, un, "CDROMXDRVSPEED\n");
22973 22969 if (!ISCD(un)) {
22974 22970 err = ENOTTY;
22975 22971 } else if (un->un_f_mmc_cap == TRUE) {
22976 22972 /*
22977 22973 * Note: In the future the driver implementation
22978 22974 * for getting and
22979 22975 * setting cd speed should entail:
22980 22976 * 1) If non-mmc try the Toshiba mode page
22981 22977 * (sr_change_speed)
22982 22978 * 2) If mmc but no support for Real Time Streaming try
22983 22979 * the SET CD SPEED (0xBB) command
22984 22980 * (sr_atapi_change_speed)
22985 22981 * 3) If mmc and support for Real Time Streaming
22986 22982 * try the GET PERFORMANCE and SET STREAMING
22987 22983 * commands (not yet implemented, 4380808)
22988 22984 */
22989 22985 /*
22990 22986 * As per recent MMC spec, CD-ROM speed is variable
22991 22987 * and changes with LBA. Since there is no such
22992 22988 * things as drive speed now, fail this ioctl.
22993 22989 *
22994 22990 * Note: EINVAL is returned for consistancy of original
22995 22991 * implementation which included support for getting
22996 22992 * the drive speed of mmc devices but not setting
22997 22993 * the drive speed. Thus EINVAL would be returned
22998 22994 * if a set request was made for an mmc device.
22999 22995 * We no longer support get or set speed for
23000 22996 * mmc but need to remain consistent with regard
23001 22997 * to the error code returned.
23002 22998 */
23003 22999 err = EINVAL;
23004 23000 } else if (un->un_f_cfg_is_atapi == TRUE) {
23005 23001 err = sr_atapi_change_speed(dev, cmd, arg, flag);
23006 23002 } else {
23007 23003 err = sr_change_speed(dev, cmd, arg, flag);
23008 23004 }
23009 23005 break;
23010 23006
23011 23007 case CDROMCDDA:
23012 23008 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDDA\n");
23013 23009 if (!ISCD(un)) {
23014 23010 err = ENOTTY;
23015 23011 } else {
23016 23012 err = sr_read_cdda(dev, (void *)arg, flag);
23017 23013 }
23018 23014 break;
23019 23015
23020 23016 case CDROMCDXA:
23021 23017 SD_TRACE(SD_LOG_IOCTL, un, "CDROMCDXA\n");
23022 23018 if (!ISCD(un)) {
23023 23019 err = ENOTTY;
23024 23020 } else {
23025 23021 err = sr_read_cdxa(dev, (caddr_t)arg, flag);
23026 23022 }
23027 23023 break;
23028 23024
23029 23025 case CDROMSUBCODE:
23030 23026 SD_TRACE(SD_LOG_IOCTL, un, "CDROMSUBCODE\n");
23031 23027 if (!ISCD(un)) {
23032 23028 err = ENOTTY;
23033 23029 } else {
23034 23030 err = sr_read_all_subcodes(dev, (caddr_t)arg, flag);
23035 23031 }
23036 23032 break;
23037 23033
23038 23034
23039 23035 #ifdef SDDEBUG
23040 23036 /* RESET/ABORTS testing ioctls */
23041 23037 case DKIOCRESET: {
23042 23038 int reset_level;
23043 23039
23044 23040 if (ddi_copyin((void *)arg, &reset_level, sizeof (int), flag)) {
23045 23041 err = EFAULT;
23046 23042 } else {
23047 23043 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCRESET: "
23048 23044 "reset_level = 0x%lx\n", reset_level);
23049 23045 if (scsi_reset(SD_ADDRESS(un), reset_level)) {
23050 23046 err = 0;
23051 23047 } else {
23052 23048 err = EIO;
23053 23049 }
23054 23050 }
23055 23051 break;
23056 23052 }
23057 23053
23058 23054 case DKIOCABORT:
23059 23055 SD_INFO(SD_LOG_IOCTL, un, "sdioctl: DKIOCABORT:\n");
23060 23056 if (scsi_abort(SD_ADDRESS(un), NULL)) {
23061 23057 err = 0;
23062 23058 } else {
23063 23059 err = EIO;
23064 23060 }
23065 23061 break;
23066 23062 #endif
23067 23063
23068 23064 #ifdef SD_FAULT_INJECTION
23069 23065 /* SDIOC FaultInjection testing ioctls */
23070 23066 case SDIOCSTART:
23071 23067 case SDIOCSTOP:
23072 23068 case SDIOCINSERTPKT:
23073 23069 case SDIOCINSERTXB:
23074 23070 case SDIOCINSERTUN:
23075 23071 case SDIOCINSERTARQ:
23076 23072 case SDIOCPUSH:
23077 23073 case SDIOCRETRIEVE:
23078 23074 case SDIOCRUN:
23079 23075 SD_INFO(SD_LOG_SDTEST, un, "sdioctl:"
23080 23076 "SDIOC detected cmd:0x%X:\n", cmd);
23081 23077 /* call error generator */
23082 23078 sd_faultinjection_ioctl(cmd, arg, un);
23083 23079 err = 0;
23084 23080 break;
23085 23081
23086 23082 #endif /* SD_FAULT_INJECTION */
23087 23083
23088 23084 case DKIOCFLUSHWRITECACHE:
23089 23085 {
23090 23086 struct dk_callback *dkc = (struct dk_callback *)arg;
23091 23087
23092 23088 mutex_enter(SD_MUTEX(un));
23093 23089 if (!un->un_f_sync_cache_supported ||
23094 23090 !un->un_f_write_cache_enabled) {
23095 23091 err = un->un_f_sync_cache_supported ?
23096 23092 0 : ENOTSUP;
23097 23093 mutex_exit(SD_MUTEX(un));
23098 23094 if ((flag & FKIOCTL) && dkc != NULL &&
23099 23095 dkc->dkc_callback != NULL) {
23100 23096 (*dkc->dkc_callback)(dkc->dkc_cookie,
23101 23097 err);
23102 23098 /*
23103 23099 * Did callback and reported error.
23104 23100 * Since we did a callback, ioctl
23105 23101 * should return 0.
23106 23102 */
23107 23103 err = 0;
23108 23104 }
23109 23105 break;
23110 23106 }
23111 23107 mutex_exit(SD_MUTEX(un));
23112 23108
23113 23109 if ((flag & FKIOCTL) && dkc != NULL &&
23114 23110 dkc->dkc_callback != NULL) {
23115 23111 /* async SYNC CACHE request */
23116 23112 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, dkc);
23117 23113 } else {
23118 23114 /* synchronous SYNC CACHE request */
23119 23115 err = sd_send_scsi_SYNCHRONIZE_CACHE(un, NULL);
23120 23116 }
23121 23117 }
23122 23118 break;
23123 23119
23124 23120 case DKIOCGETWCE: {
23125 23121
23126 23122 int wce;
23127 23123
23128 23124 if ((err = sd_get_write_cache_enabled(ssc, &wce)) != 0) {
23129 23125 break;
23130 23126 }
23131 23127
23132 23128 if (ddi_copyout(&wce, (void *)arg, sizeof (wce), flag)) {
23133 23129 err = EFAULT;
23134 23130 }
23135 23131 break;
23136 23132 }
23137 23133
23138 23134 case DKIOCSETWCE: {
23139 23135
23140 23136 int wce, sync_supported;
23141 23137 int cur_wce = 0;
23142 23138
23143 23139 if (ddi_copyin((void *)arg, &wce, sizeof (wce), flag)) {
23144 23140 err = EFAULT;
23145 23141 break;
23146 23142 }
23147 23143
23148 23144 /*
23149 23145 * Synchronize multiple threads trying to enable
23150 23146 * or disable the cache via the un_f_wcc_cv
23151 23147 * condition variable.
23152 23148 */
23153 23149 mutex_enter(SD_MUTEX(un));
23154 23150
23155 23151 /*
23156 23152 * Don't allow the cache to be enabled if the
23157 23153 * config file has it disabled.
23158 23154 */
23159 23155 if (un->un_f_opt_disable_cache && wce) {
23160 23156 mutex_exit(SD_MUTEX(un));
23161 23157 err = EINVAL;
23162 23158 break;
23163 23159 }
23164 23160
23165 23161 /*
23166 23162 * Wait for write cache change in progress
23167 23163 * bit to be clear before proceeding.
23168 23164 */
23169 23165 while (un->un_f_wcc_inprog)
23170 23166 cv_wait(&un->un_wcc_cv, SD_MUTEX(un));
23171 23167
23172 23168 un->un_f_wcc_inprog = 1;
23173 23169
23174 23170 mutex_exit(SD_MUTEX(un));
23175 23171
23176 23172 /*
23177 23173 * Get the current write cache state
23178 23174 */
23179 23175 if ((err = sd_get_write_cache_enabled(ssc, &cur_wce)) != 0) {
23180 23176 mutex_enter(SD_MUTEX(un));
23181 23177 un->un_f_wcc_inprog = 0;
23182 23178 cv_broadcast(&un->un_wcc_cv);
23183 23179 mutex_exit(SD_MUTEX(un));
23184 23180 break;
23185 23181 }
23186 23182
23187 23183 mutex_enter(SD_MUTEX(un));
23188 23184 un->un_f_write_cache_enabled = (cur_wce != 0);
23189 23185
23190 23186 if (un->un_f_write_cache_enabled && wce == 0) {
23191 23187 /*
23192 23188 * Disable the write cache. Don't clear
23193 23189 * un_f_write_cache_enabled until after
23194 23190 * the mode select and flush are complete.
23195 23191 */
23196 23192 sync_supported = un->un_f_sync_cache_supported;
23197 23193
23198 23194 /*
23199 23195 * If cache flush is suppressed, we assume that the
23200 23196 * controller firmware will take care of managing the
23201 23197 * write cache for us: no need to explicitly
23202 23198 * disable it.
23203 23199 */
23204 23200 if (!un->un_f_suppress_cache_flush) {
23205 23201 mutex_exit(SD_MUTEX(un));
23206 23202 if ((err = sd_cache_control(ssc,
23207 23203 SD_CACHE_NOCHANGE,
23208 23204 SD_CACHE_DISABLE)) == 0 &&
23209 23205 sync_supported) {
23210 23206 err = sd_send_scsi_SYNCHRONIZE_CACHE(un,
23211 23207 NULL);
23212 23208 }
23213 23209 } else {
23214 23210 mutex_exit(SD_MUTEX(un));
23215 23211 }
23216 23212
23217 23213 mutex_enter(SD_MUTEX(un));
23218 23214 if (err == 0) {
23219 23215 un->un_f_write_cache_enabled = 0;
23220 23216 }
23221 23217
23222 23218 } else if (!un->un_f_write_cache_enabled && wce != 0) {
23223 23219 /*
23224 23220 * Set un_f_write_cache_enabled first, so there is
23225 23221 * no window where the cache is enabled, but the
23226 23222 * bit says it isn't.
23227 23223 */
23228 23224 un->un_f_write_cache_enabled = 1;
23229 23225
23230 23226 /*
23231 23227 * If cache flush is suppressed, we assume that the
23232 23228 * controller firmware will take care of managing the
23233 23229 * write cache for us: no need to explicitly
23234 23230 * enable it.
23235 23231 */
23236 23232 if (!un->un_f_suppress_cache_flush) {
23237 23233 mutex_exit(SD_MUTEX(un));
23238 23234 err = sd_cache_control(ssc, SD_CACHE_NOCHANGE,
23239 23235 SD_CACHE_ENABLE);
23240 23236 } else {
23241 23237 mutex_exit(SD_MUTEX(un));
23242 23238 }
23243 23239
23244 23240 mutex_enter(SD_MUTEX(un));
23245 23241
23246 23242 if (err) {
23247 23243 un->un_f_write_cache_enabled = 0;
23248 23244 }
23249 23245 }
23250 23246
23251 23247 un->un_f_wcc_inprog = 0;
23252 23248 cv_broadcast(&un->un_wcc_cv);
23253 23249 mutex_exit(SD_MUTEX(un));
23254 23250 break;
23255 23251 }
23256 23252
23257 23253 default:
23258 23254 err = ENOTTY;
23259 23255 break;
23260 23256 }
23261 23257 mutex_enter(SD_MUTEX(un));
23262 23258 un->un_ncmds_in_driver--;
23263 23259 ASSERT(un->un_ncmds_in_driver >= 0);
23264 23260 mutex_exit(SD_MUTEX(un));
23265 23261
23266 23262
23267 23263 done_without_assess:
23268 23264 sd_ssc_fini(ssc);
23269 23265
23270 23266 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err);
23271 23267 return (err);
23272 23268
23273 23269 done_with_assess:
23274 23270 mutex_enter(SD_MUTEX(un));
23275 23271 un->un_ncmds_in_driver--;
23276 23272 ASSERT(un->un_ncmds_in_driver >= 0);
23277 23273 mutex_exit(SD_MUTEX(un));
23278 23274
23279 23275 done_quick_assess:
23280 23276 if (err != 0)
23281 23277 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23282 23278 /* Uninitialize sd_ssc_t pointer */
23283 23279 sd_ssc_fini(ssc);
23284 23280
23285 23281 SD_TRACE(SD_LOG_IOCTL, un, "sdioctl: exit: %d\n", err);
23286 23282 return (err);
23287 23283 }
23288 23284
23289 23285
23290 23286 /*
23291 23287 * Function: sd_dkio_ctrl_info
23292 23288 *
23293 23289 * Description: This routine is the driver entry point for handling controller
23294 23290 * information ioctl requests (DKIOCINFO).
23295 23291 *
23296 23292 * Arguments: dev - the device number
23297 23293 * arg - pointer to user provided dk_cinfo structure
23298 23294 * specifying the controller type and attributes.
23299 23295 * flag - this argument is a pass through to ddi_copyxxx()
23300 23296 * directly from the mode argument of ioctl().
23301 23297 *
23302 23298 * Return Code: 0
23303 23299 * EFAULT
23304 23300 * ENXIO
23305 23301 */
23306 23302
23307 23303 static int
23308 23304 sd_dkio_ctrl_info(dev_t dev, caddr_t arg, int flag)
23309 23305 {
23310 23306 struct sd_lun *un = NULL;
23311 23307 struct dk_cinfo *info;
23312 23308 dev_info_t *pdip;
23313 23309 int lun, tgt;
23314 23310
23315 23311 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
23316 23312 return (ENXIO);
23317 23313 }
23318 23314
23319 23315 info = (struct dk_cinfo *)
23320 23316 kmem_zalloc(sizeof (struct dk_cinfo), KM_SLEEP);
23321 23317
23322 23318 switch (un->un_ctype) {
23323 23319 case CTYPE_CDROM:
23324 23320 info->dki_ctype = DKC_CDROM;
23325 23321 break;
23326 23322 default:
23327 23323 info->dki_ctype = DKC_SCSI_CCS;
23328 23324 break;
23329 23325 }
23330 23326 pdip = ddi_get_parent(SD_DEVINFO(un));
23331 23327 info->dki_cnum = ddi_get_instance(pdip);
23332 23328 if (strlen(ddi_get_name(pdip)) < DK_DEVLEN) {
23333 23329 (void) strcpy(info->dki_cname, ddi_get_name(pdip));
23334 23330 } else {
23335 23331 (void) strncpy(info->dki_cname, ddi_node_name(pdip),
23336 23332 DK_DEVLEN - 1);
23337 23333 }
23338 23334
23339 23335 lun = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
23340 23336 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_LUN, 0);
23341 23337 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, SD_DEVINFO(un),
23342 23338 DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET, 0);
23343 23339
23344 23340 /* Unit Information */
23345 23341 info->dki_unit = ddi_get_instance(SD_DEVINFO(un));
23346 23342 info->dki_slave = ((tgt << 3) | lun);
23347 23343 (void) strncpy(info->dki_dname, ddi_driver_name(SD_DEVINFO(un)),
23348 23344 DK_DEVLEN - 1);
23349 23345 info->dki_flags = DKI_FMTVOL;
23350 23346 info->dki_partition = SDPART(dev);
23351 23347
23352 23348 /* Max Transfer size of this device in blocks */
23353 23349 info->dki_maxtransfer = un->un_max_xfer_size / un->un_sys_blocksize;
23354 23350 info->dki_addr = 0;
23355 23351 info->dki_space = 0;
23356 23352 info->dki_prio = 0;
23357 23353 info->dki_vec = 0;
23358 23354
23359 23355 if (ddi_copyout(info, arg, sizeof (struct dk_cinfo), flag) != 0) {
23360 23356 kmem_free(info, sizeof (struct dk_cinfo));
23361 23357 return (EFAULT);
23362 23358 } else {
23363 23359 kmem_free(info, sizeof (struct dk_cinfo));
23364 23360 return (0);
23365 23361 }
23366 23362 }
23367 23363
23368 23364 /*
23369 23365 * Function: sd_get_media_info_com
23370 23366 *
23371 23367 * Description: This routine returns the information required to populate
23372 23368 * the fields for the dk_minfo/dk_minfo_ext structures.
23373 23369 *
23374 23370 * Arguments: dev - the device number
23375 23371 * dki_media_type - media_type
23376 23372 * dki_lbsize - logical block size
23377 23373 * dki_capacity - capacity in blocks
23378 23374 * dki_pbsize - physical block size (if requested)
23379 23375 *
23380 23376 * Return Code: 0
23381 23377 * EACCESS
23382 23378 * EFAULT
23383 23379 * ENXIO
23384 23380 * EIO
23385 23381 */
23386 23382 static int
23387 23383 sd_get_media_info_com(dev_t dev, uint_t *dki_media_type, uint_t *dki_lbsize,
23388 23384 diskaddr_t *dki_capacity, uint_t *dki_pbsize)
23389 23385 {
23390 23386 struct sd_lun *un = NULL;
23391 23387 struct uscsi_cmd com;
23392 23388 struct scsi_inquiry *sinq;
23393 23389 u_longlong_t media_capacity;
23394 23390 uint64_t capacity;
23395 23391 uint_t lbasize;
23396 23392 uint_t pbsize;
23397 23393 uchar_t *out_data;
23398 23394 uchar_t *rqbuf;
23399 23395 int rval = 0;
23400 23396 int rtn;
23401 23397 sd_ssc_t *ssc;
23402 23398
23403 23399 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
23404 23400 (un->un_state == SD_STATE_OFFLINE)) {
23405 23401 return (ENXIO);
23406 23402 }
23407 23403
23408 23404 SD_TRACE(SD_LOG_IOCTL_DKIO, un, "sd_get_media_info_com: entry\n");
23409 23405
23410 23406 out_data = kmem_zalloc(SD_PROFILE_HEADER_LEN, KM_SLEEP);
23411 23407 rqbuf = kmem_zalloc(SENSE_LENGTH, KM_SLEEP);
23412 23408 ssc = sd_ssc_init(un);
23413 23409
23414 23410 /* Issue a TUR to determine if the drive is ready with media present */
23415 23411 rval = sd_send_scsi_TEST_UNIT_READY(ssc, SD_CHECK_FOR_MEDIA);
23416 23412 if (rval == ENXIO) {
23417 23413 goto done;
23418 23414 } else if (rval != 0) {
23419 23415 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23420 23416 }
23421 23417
23422 23418 /* Now get configuration data */
23423 23419 if (ISCD(un)) {
23424 23420 *dki_media_type = DK_CDROM;
23425 23421
23426 23422 /* Allow SCMD_GET_CONFIGURATION to MMC devices only */
23427 23423 if (un->un_f_mmc_cap == TRUE) {
23428 23424 rtn = sd_send_scsi_GET_CONFIGURATION(ssc, &com, rqbuf,
23429 23425 SENSE_LENGTH, out_data, SD_PROFILE_HEADER_LEN,
23430 23426 SD_PATH_STANDARD);
23431 23427
23432 23428 if (rtn) {
23433 23429 /*
23434 23430 * We ignore all failures for CD and need to
23435 23431 * put the assessment before processing code
23436 23432 * to avoid missing assessment for FMA.
23437 23433 */
23438 23434 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23439 23435 /*
23440 23436 * Failed for other than an illegal request
23441 23437 * or command not supported
23442 23438 */
23443 23439 if ((com.uscsi_status == STATUS_CHECK) &&
23444 23440 (com.uscsi_rqstatus == STATUS_GOOD)) {
23445 23441 if ((rqbuf[2] != KEY_ILLEGAL_REQUEST) ||
23446 23442 (rqbuf[12] != 0x20)) {
23447 23443 rval = EIO;
23448 23444 goto no_assessment;
23449 23445 }
23450 23446 }
23451 23447 } else {
23452 23448 /*
23453 23449 * The GET CONFIGURATION command succeeded
23454 23450 * so set the media type according to the
23455 23451 * returned data
23456 23452 */
23457 23453 *dki_media_type = out_data[6];
23458 23454 *dki_media_type <<= 8;
23459 23455 *dki_media_type |= out_data[7];
23460 23456 }
23461 23457 }
23462 23458 } else {
23463 23459 /*
23464 23460 * The profile list is not available, so we attempt to identify
23465 23461 * the media type based on the inquiry data
23466 23462 */
23467 23463 sinq = un->un_sd->sd_inq;
23468 23464 if ((sinq->inq_dtype == DTYPE_DIRECT) ||
23469 23465 (sinq->inq_dtype == DTYPE_OPTICAL)) {
23470 23466 /* This is a direct access device or optical disk */
23471 23467 *dki_media_type = DK_FIXED_DISK;
23472 23468
23473 23469 if ((bcmp(sinq->inq_vid, "IOMEGA", 6) == 0) ||
23474 23470 (bcmp(sinq->inq_vid, "iomega", 6) == 0)) {
23475 23471 if ((bcmp(sinq->inq_pid, "ZIP", 3) == 0)) {
23476 23472 *dki_media_type = DK_ZIP;
23477 23473 } else if (
23478 23474 (bcmp(sinq->inq_pid, "jaz", 3) == 0)) {
23479 23475 *dki_media_type = DK_JAZ;
23480 23476 }
23481 23477 }
23482 23478 } else {
23483 23479 /*
23484 23480 * Not a CD, direct access or optical disk so return
23485 23481 * unknown media
23486 23482 */
23487 23483 *dki_media_type = DK_UNKNOWN;
23488 23484 }
23489 23485 }
23490 23486
23491 23487 /*
23492 23488 * Now read the capacity so we can provide the lbasize,
23493 23489 * pbsize and capacity.
23494 23490 */
23495 23491 if (dki_pbsize && un->un_f_descr_format_supported) {
23496 23492 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize,
23497 23493 &pbsize, SD_PATH_DIRECT);
23498 23494
23499 23495 /*
23500 23496 * Override the physical blocksize if the instance already
23501 23497 * has a larger value.
23502 23498 */
23503 23499 pbsize = MAX(pbsize, un->un_phy_blocksize);
23504 23500 }
23505 23501
23506 23502 if (dki_pbsize == NULL || rval != 0 ||
23507 23503 !un->un_f_descr_format_supported) {
23508 23504 rval = sd_send_scsi_READ_CAPACITY(ssc, &capacity, &lbasize,
23509 23505 SD_PATH_DIRECT);
23510 23506
23511 23507 switch (rval) {
23512 23508 case 0:
23513 23509 if (un->un_f_enable_rmw &&
23514 23510 un->un_phy_blocksize != 0) {
23515 23511 pbsize = un->un_phy_blocksize;
23516 23512 } else {
23517 23513 pbsize = lbasize;
23518 23514 }
23519 23515 media_capacity = capacity;
23520 23516
23521 23517 /*
23522 23518 * sd_send_scsi_READ_CAPACITY() reports capacity in
23523 23519 * un->un_sys_blocksize chunks. So we need to convert
23524 23520 * it into cap.lbsize chunks.
23525 23521 */
23526 23522 if (un->un_f_has_removable_media) {
23527 23523 media_capacity *= un->un_sys_blocksize;
23528 23524 media_capacity /= lbasize;
23529 23525 }
23530 23526 break;
23531 23527 case EACCES:
23532 23528 rval = EACCES;
23533 23529 goto done;
23534 23530 default:
23535 23531 rval = EIO;
23536 23532 goto done;
23537 23533 }
23538 23534 } else {
23539 23535 if (un->un_f_enable_rmw &&
23540 23536 !ISP2(pbsize % DEV_BSIZE)) {
23541 23537 pbsize = SSD_SECSIZE;
23542 23538 } else if (!ISP2(lbasize % DEV_BSIZE) ||
23543 23539 !ISP2(pbsize % DEV_BSIZE)) {
23544 23540 pbsize = lbasize = DEV_BSIZE;
23545 23541 }
23546 23542 media_capacity = capacity;
23547 23543 }
23548 23544
23549 23545 /*
23550 23546 * If lun is expanded dynamically, update the un structure.
23551 23547 */
23552 23548 mutex_enter(SD_MUTEX(un));
23553 23549 if ((un->un_f_blockcount_is_valid == TRUE) &&
23554 23550 (un->un_f_tgt_blocksize_is_valid == TRUE) &&
23555 23551 (capacity > un->un_blockcount)) {
23556 23552 un->un_f_expnevent = B_FALSE;
23557 23553 sd_update_block_info(un, lbasize, capacity);
23558 23554 }
23559 23555 mutex_exit(SD_MUTEX(un));
23560 23556
23561 23557 *dki_lbsize = lbasize;
23562 23558 *dki_capacity = media_capacity;
23563 23559 if (dki_pbsize)
23564 23560 *dki_pbsize = pbsize;
23565 23561
23566 23562 done:
23567 23563 if (rval != 0) {
23568 23564 if (rval == EIO)
23569 23565 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
23570 23566 else
23571 23567 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23572 23568 }
23573 23569 no_assessment:
23574 23570 sd_ssc_fini(ssc);
23575 23571 kmem_free(out_data, SD_PROFILE_HEADER_LEN);
23576 23572 kmem_free(rqbuf, SENSE_LENGTH);
23577 23573 return (rval);
23578 23574 }
23579 23575
23580 23576 /*
23581 23577 * Function: sd_get_media_info
23582 23578 *
23583 23579 * Description: This routine is the driver entry point for handling ioctl
23584 23580 * requests for the media type or command set profile used by the
23585 23581 * drive to operate on the media (DKIOCGMEDIAINFO).
23586 23582 *
23587 23583 * Arguments: dev - the device number
23588 23584 * arg - pointer to user provided dk_minfo structure
23589 23585 * specifying the media type, logical block size and
23590 23586 * drive capacity.
23591 23587 * flag - this argument is a pass through to ddi_copyxxx()
23592 23588 * directly from the mode argument of ioctl().
23593 23589 *
23594 23590 * Return Code: returns the value from sd_get_media_info_com
23595 23591 */
23596 23592 static int
23597 23593 sd_get_media_info(dev_t dev, caddr_t arg, int flag)
23598 23594 {
23599 23595 struct dk_minfo mi;
23600 23596 int rval;
23601 23597
23602 23598 rval = sd_get_media_info_com(dev, &mi.dki_media_type,
23603 23599 &mi.dki_lbsize, &mi.dki_capacity, NULL);
23604 23600
23605 23601 if (rval)
23606 23602 return (rval);
23607 23603 if (ddi_copyout(&mi, arg, sizeof (struct dk_minfo), flag))
23608 23604 rval = EFAULT;
23609 23605 return (rval);
23610 23606 }
23611 23607
23612 23608 /*
23613 23609 * Function: sd_get_media_info_ext
23614 23610 *
23615 23611 * Description: This routine is the driver entry point for handling ioctl
23616 23612 * requests for the media type or command set profile used by the
23617 23613 * drive to operate on the media (DKIOCGMEDIAINFOEXT). The
23618 23614 * difference this ioctl and DKIOCGMEDIAINFO is the return value
23619 23615 * of this ioctl contains both logical block size and physical
23620 23616 * block size.
23621 23617 *
23622 23618 *
23623 23619 * Arguments: dev - the device number
23624 23620 * arg - pointer to user provided dk_minfo_ext structure
23625 23621 * specifying the media type, logical block size,
23626 23622 * physical block size and disk capacity.
23627 23623 * flag - this argument is a pass through to ddi_copyxxx()
23628 23624 * directly from the mode argument of ioctl().
23629 23625 *
23630 23626 * Return Code: returns the value from sd_get_media_info_com
23631 23627 */
23632 23628 static int
23633 23629 sd_get_media_info_ext(dev_t dev, caddr_t arg, int flag)
23634 23630 {
23635 23631 struct dk_minfo_ext mie;
23636 23632 int rval = 0;
23637 23633
23638 23634 rval = sd_get_media_info_com(dev, &mie.dki_media_type,
23639 23635 &mie.dki_lbsize, &mie.dki_capacity, &mie.dki_pbsize);
23640 23636
23641 23637 if (rval)
23642 23638 return (rval);
23643 23639 if (ddi_copyout(&mie, arg, sizeof (struct dk_minfo_ext), flag))
23644 23640 rval = EFAULT;
23645 23641 return (rval);
23646 23642
23647 23643 }
23648 23644
23649 23645 /*
23650 23646 * Function: sd_watch_request_submit
23651 23647 *
23652 23648 * Description: Call scsi_watch_request_submit or scsi_mmc_watch_request_submit
23653 23649 * depending on which is supported by device.
23654 23650 */
23655 23651 static opaque_t
23656 23652 sd_watch_request_submit(struct sd_lun *un)
23657 23653 {
23658 23654 dev_t dev;
23659 23655
23660 23656 /* All submissions are unified to use same device number */
23661 23657 dev = sd_make_device(SD_DEVINFO(un));
23662 23658
23663 23659 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) {
23664 23660 return (scsi_mmc_watch_request_submit(SD_SCSI_DEVP(un),
23665 23661 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb,
23666 23662 (caddr_t)dev));
23667 23663 } else {
23668 23664 return (scsi_watch_request_submit(SD_SCSI_DEVP(un),
23669 23665 sd_check_media_time, SENSE_LENGTH, sd_media_watch_cb,
23670 23666 (caddr_t)dev));
23671 23667 }
23672 23668 }
23673 23669
23674 23670
23675 23671 /*
23676 23672 * Function: sd_check_media
23677 23673 *
23678 23674 * Description: This utility routine implements the functionality for the
23679 23675 * DKIOCSTATE ioctl. This ioctl blocks the user thread until the
23680 23676 * driver state changes from that specified by the user
23681 23677 * (inserted or ejected). For example, if the user specifies
23682 23678 * DKIO_EJECTED and the current media state is inserted this
23683 23679 * routine will immediately return DKIO_INSERTED. However, if the
23684 23680 * current media state is not inserted the user thread will be
23685 23681 * blocked until the drive state changes. If DKIO_NONE is specified
23686 23682 * the user thread will block until a drive state change occurs.
23687 23683 *
23688 23684 * Arguments: dev - the device number
23689 23685 * state - user pointer to a dkio_state, updated with the current
23690 23686 * drive state at return.
23691 23687 *
23692 23688 * Return Code: ENXIO
23693 23689 * EIO
23694 23690 * EAGAIN
23695 23691 * EINTR
23696 23692 */
23697 23693
23698 23694 static int
23699 23695 sd_check_media(dev_t dev, enum dkio_state state)
23700 23696 {
23701 23697 struct sd_lun *un = NULL;
23702 23698 enum dkio_state prev_state;
23703 23699 opaque_t token = NULL;
23704 23700 int rval = 0;
23705 23701 sd_ssc_t *ssc;
23706 23702
23707 23703 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
23708 23704 return (ENXIO);
23709 23705 }
23710 23706
23711 23707 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: entry\n");
23712 23708
23713 23709 ssc = sd_ssc_init(un);
23714 23710
23715 23711 mutex_enter(SD_MUTEX(un));
23716 23712
23717 23713 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: "
23718 23714 "state=%x, mediastate=%x\n", state, un->un_mediastate);
23719 23715
23720 23716 prev_state = un->un_mediastate;
23721 23717
23722 23718 /* is there anything to do? */
23723 23719 if (state == un->un_mediastate || un->un_mediastate == DKIO_NONE) {
23724 23720 /*
23725 23721 * submit the request to the scsi_watch service;
23726 23722 * scsi_media_watch_cb() does the real work
23727 23723 */
23728 23724 mutex_exit(SD_MUTEX(un));
23729 23725
23730 23726 /*
23731 23727 * This change handles the case where a scsi watch request is
23732 23728 * added to a device that is powered down. To accomplish this
23733 23729 * we power up the device before adding the scsi watch request,
23734 23730 * since the scsi watch sends a TUR directly to the device
23735 23731 * which the device cannot handle if it is powered down.
23736 23732 */
23737 23733 if (sd_pm_entry(un) != DDI_SUCCESS) {
23738 23734 mutex_enter(SD_MUTEX(un));
23739 23735 goto done;
23740 23736 }
23741 23737
23742 23738 token = sd_watch_request_submit(un);
23743 23739
23744 23740 sd_pm_exit(un);
23745 23741
23746 23742 mutex_enter(SD_MUTEX(un));
23747 23743 if (token == NULL) {
23748 23744 rval = EAGAIN;
23749 23745 goto done;
23750 23746 }
23751 23747
23752 23748 /*
23753 23749 * This is a special case IOCTL that doesn't return
23754 23750 * until the media state changes. Routine sdpower
23755 23751 * knows about and handles this so don't count it
23756 23752 * as an active cmd in the driver, which would
23757 23753 * keep the device busy to the pm framework.
23758 23754 * If the count isn't decremented the device can't
23759 23755 * be powered down.
23760 23756 */
23761 23757 un->un_ncmds_in_driver--;
23762 23758 ASSERT(un->un_ncmds_in_driver >= 0);
23763 23759
23764 23760 /*
23765 23761 * if a prior request had been made, this will be the same
23766 23762 * token, as scsi_watch was designed that way.
23767 23763 */
23768 23764 un->un_swr_token = token;
23769 23765 un->un_specified_mediastate = state;
23770 23766
23771 23767 /*
23772 23768 * now wait for media change
23773 23769 * we will not be signalled unless mediastate == state but it is
23774 23770 * still better to test for this condition, since there is a
23775 23771 * 2 sec cv_broadcast delay when mediastate == DKIO_INSERTED
23776 23772 */
23777 23773 SD_TRACE(SD_LOG_COMMON, un,
23778 23774 "sd_check_media: waiting for media state change\n");
23779 23775 while (un->un_mediastate == state) {
23780 23776 if (cv_wait_sig(&un->un_state_cv, SD_MUTEX(un)) == 0) {
23781 23777 SD_TRACE(SD_LOG_COMMON, un,
23782 23778 "sd_check_media: waiting for media state "
23783 23779 "was interrupted\n");
23784 23780 un->un_ncmds_in_driver++;
23785 23781 rval = EINTR;
23786 23782 goto done;
23787 23783 }
23788 23784 SD_TRACE(SD_LOG_COMMON, un,
23789 23785 "sd_check_media: received signal, state=%x\n",
23790 23786 un->un_mediastate);
23791 23787 }
23792 23788 /*
23793 23789 * Inc the counter to indicate the device once again
23794 23790 * has an active outstanding cmd.
23795 23791 */
23796 23792 un->un_ncmds_in_driver++;
23797 23793 }
23798 23794
23799 23795 /* invalidate geometry */
23800 23796 if (prev_state == DKIO_INSERTED && un->un_mediastate == DKIO_EJECTED) {
23801 23797 sr_ejected(un);
23802 23798 }
23803 23799
23804 23800 if (un->un_mediastate == DKIO_INSERTED && prev_state != DKIO_INSERTED) {
23805 23801 uint64_t capacity;
23806 23802 uint_t lbasize;
23807 23803
23808 23804 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: media inserted\n");
23809 23805 mutex_exit(SD_MUTEX(un));
23810 23806 /*
23811 23807 * Since the following routines use SD_PATH_DIRECT, we must
23812 23808 * call PM directly before the upcoming disk accesses. This
23813 23809 * may cause the disk to be power/spin up.
23814 23810 */
23815 23811
23816 23812 if (sd_pm_entry(un) == DDI_SUCCESS) {
23817 23813 rval = sd_send_scsi_READ_CAPACITY(ssc,
23818 23814 &capacity, &lbasize, SD_PATH_DIRECT);
23819 23815 if (rval != 0) {
23820 23816 sd_pm_exit(un);
23821 23817 if (rval == EIO)
23822 23818 sd_ssc_assessment(ssc,
23823 23819 SD_FMT_STATUS_CHECK);
23824 23820 else
23825 23821 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23826 23822 mutex_enter(SD_MUTEX(un));
23827 23823 goto done;
23828 23824 }
23829 23825 } else {
23830 23826 rval = EIO;
23831 23827 mutex_enter(SD_MUTEX(un));
23832 23828 goto done;
23833 23829 }
23834 23830 mutex_enter(SD_MUTEX(un));
23835 23831
23836 23832 sd_update_block_info(un, lbasize, capacity);
23837 23833
23838 23834 /*
23839 23835 * Check if the media in the device is writable or not
23840 23836 */
23841 23837 if (ISCD(un)) {
23842 23838 sd_check_for_writable_cd(ssc, SD_PATH_DIRECT);
23843 23839 }
23844 23840
23845 23841 mutex_exit(SD_MUTEX(un));
23846 23842 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT);
23847 23843 if ((cmlb_validate(un->un_cmlbhandle, 0,
23848 23844 (void *)SD_PATH_DIRECT) == 0) && un->un_f_pkstats_enabled) {
23849 23845 sd_set_pstats(un);
23850 23846 SD_TRACE(SD_LOG_IO_PARTITION, un,
23851 23847 "sd_check_media: un:0x%p pstats created and "
23852 23848 "set\n", un);
23853 23849 }
23854 23850
23855 23851 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_PREVENT,
23856 23852 SD_PATH_DIRECT);
23857 23853
23858 23854 sd_pm_exit(un);
23859 23855
23860 23856 if (rval != 0) {
23861 23857 if (rval == EIO)
23862 23858 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
23863 23859 else
23864 23860 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
23865 23861 }
23866 23862
23867 23863 mutex_enter(SD_MUTEX(un));
23868 23864 }
23869 23865 done:
23870 23866 sd_ssc_fini(ssc);
23871 23867 un->un_f_watcht_stopped = FALSE;
23872 23868 if (token != NULL && un->un_swr_token != NULL) {
23873 23869 /*
23874 23870 * Use of this local token and the mutex ensures that we avoid
23875 23871 * some race conditions associated with terminating the
23876 23872 * scsi watch.
23877 23873 */
23878 23874 token = un->un_swr_token;
23879 23875 mutex_exit(SD_MUTEX(un));
23880 23876 (void) scsi_watch_request_terminate(token,
23881 23877 SCSI_WATCH_TERMINATE_WAIT);
23882 23878 if (scsi_watch_get_ref_count(token) == 0) {
23883 23879 mutex_enter(SD_MUTEX(un));
23884 23880 un->un_swr_token = (opaque_t)NULL;
23885 23881 } else {
23886 23882 mutex_enter(SD_MUTEX(un));
23887 23883 }
23888 23884 }
23889 23885
23890 23886 /*
23891 23887 * Update the capacity kstat value, if no media previously
23892 23888 * (capacity kstat is 0) and a media has been inserted
23893 23889 * (un_f_blockcount_is_valid == TRUE)
23894 23890 */
23895 23891 if (un->un_errstats) {
23896 23892 struct sd_errstats *stp = NULL;
23897 23893
23898 23894 stp = (struct sd_errstats *)un->un_errstats->ks_data;
23899 23895 if ((stp->sd_capacity.value.ui64 == 0) &&
23900 23896 (un->un_f_blockcount_is_valid == TRUE)) {
23901 23897 stp->sd_capacity.value.ui64 =
23902 23898 (uint64_t)((uint64_t)un->un_blockcount *
23903 23899 un->un_sys_blocksize);
23904 23900 }
23905 23901 }
23906 23902 mutex_exit(SD_MUTEX(un));
23907 23903 SD_TRACE(SD_LOG_COMMON, un, "sd_check_media: done\n");
23908 23904 return (rval);
23909 23905 }
23910 23906
23911 23907
23912 23908 /*
23913 23909 * Function: sd_delayed_cv_broadcast
23914 23910 *
23915 23911 * Description: Delayed cv_broadcast to allow for target to recover from media
23916 23912 * insertion.
23917 23913 *
23918 23914 * Arguments: arg - driver soft state (unit) structure
23919 23915 */
23920 23916
23921 23917 static void
23922 23918 sd_delayed_cv_broadcast(void *arg)
23923 23919 {
23924 23920 struct sd_lun *un = arg;
23925 23921
23926 23922 SD_TRACE(SD_LOG_COMMON, un, "sd_delayed_cv_broadcast\n");
23927 23923
23928 23924 mutex_enter(SD_MUTEX(un));
23929 23925 un->un_dcvb_timeid = NULL;
23930 23926 cv_broadcast(&un->un_state_cv);
23931 23927 mutex_exit(SD_MUTEX(un));
23932 23928 }
23933 23929
23934 23930
23935 23931 /*
23936 23932 * Function: sd_media_watch_cb
23937 23933 *
23938 23934 * Description: Callback routine used for support of the DKIOCSTATE ioctl. This
23939 23935 * routine processes the TUR sense data and updates the driver
23940 23936 * state if a transition has occurred. The user thread
23941 23937 * (sd_check_media) is then signalled.
23942 23938 *
23943 23939 * Arguments: arg - the device 'dev_t' is used for context to discriminate
23944 23940 * among multiple watches that share this callback function
23945 23941 * resultp - scsi watch facility result packet containing scsi
23946 23942 * packet, status byte and sense data
23947 23943 *
23948 23944 * Return Code: 0 for success, -1 for failure
23949 23945 */
23950 23946
23951 23947 static int
23952 23948 sd_media_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
23953 23949 {
23954 23950 struct sd_lun *un;
23955 23951 struct scsi_status *statusp = resultp->statusp;
23956 23952 uint8_t *sensep = (uint8_t *)resultp->sensep;
23957 23953 enum dkio_state state = DKIO_NONE;
23958 23954 dev_t dev = (dev_t)arg;
23959 23955 uchar_t actual_sense_length;
23960 23956 uint8_t skey, asc, ascq;
23961 23957
23962 23958 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
23963 23959 return (-1);
23964 23960 }
23965 23961 actual_sense_length = resultp->actual_sense_length;
23966 23962
23967 23963 mutex_enter(SD_MUTEX(un));
23968 23964 SD_TRACE(SD_LOG_COMMON, un,
23969 23965 "sd_media_watch_cb: status=%x, sensep=%p, len=%x\n",
23970 23966 *((char *)statusp), (void *)sensep, actual_sense_length);
23971 23967
23972 23968 if (resultp->pkt->pkt_reason == CMD_DEV_GONE) {
23973 23969 un->un_mediastate = DKIO_DEV_GONE;
23974 23970 cv_broadcast(&un->un_state_cv);
23975 23971 mutex_exit(SD_MUTEX(un));
23976 23972
23977 23973 return (0);
23978 23974 }
23979 23975
23980 23976 if (un->un_f_mmc_cap && un->un_f_mmc_gesn_polling) {
23981 23977 if (sd_gesn_media_data_valid(resultp->mmc_data)) {
23982 23978 if ((resultp->mmc_data[5] &
23983 23979 SD_GESN_MEDIA_EVENT_STATUS_PRESENT) != 0) {
23984 23980 state = DKIO_INSERTED;
23985 23981 } else {
23986 23982 state = DKIO_EJECTED;
23987 23983 }
23988 23984 if ((resultp->mmc_data[4] & SD_GESN_MEDIA_EVENT_CODE) ==
23989 23985 SD_GESN_MEDIA_EVENT_EJECTREQUEST) {
23990 23986 sd_log_eject_request_event(un, KM_NOSLEEP);
23991 23987 }
23992 23988 }
23993 23989 } else if (sensep != NULL) {
23994 23990 /*
23995 23991 * If there was a check condition then sensep points to valid
23996 23992 * sense data. If status was not a check condition but a
23997 23993 * reservation or busy status then the new state is DKIO_NONE.
23998 23994 */
23999 23995 skey = scsi_sense_key(sensep);
24000 23996 asc = scsi_sense_asc(sensep);
24001 23997 ascq = scsi_sense_ascq(sensep);
24002 23998
24003 23999 SD_INFO(SD_LOG_COMMON, un,
24004 24000 "sd_media_watch_cb: sense KEY=%x, ASC=%x, ASCQ=%x\n",
24005 24001 skey, asc, ascq);
24006 24002 /* This routine only uses up to 13 bytes of sense data. */
24007 24003 if (actual_sense_length >= 13) {
24008 24004 if (skey == KEY_UNIT_ATTENTION) {
24009 24005 if (asc == 0x28) {
24010 24006 state = DKIO_INSERTED;
24011 24007 }
24012 24008 } else if (skey == KEY_NOT_READY) {
24013 24009 /*
24014 24010 * Sense data of 02/06/00 means that the
24015 24011 * drive could not read the media (No
24016 24012 * reference position found). In this case
24017 24013 * to prevent a hang on the DKIOCSTATE IOCTL
24018 24014 * we set the media state to DKIO_INSERTED.
24019 24015 */
24020 24016 if (asc == 0x06 && ascq == 0x00)
24021 24017 state = DKIO_INSERTED;
24022 24018
24023 24019 /*
24024 24020 * if 02/04/02 means that the host
24025 24021 * should send start command. Explicitly
24026 24022 * leave the media state as is
24027 24023 * (inserted) as the media is inserted
24028 24024 * and host has stopped device for PM
24029 24025 * reasons. Upon next true read/write
24030 24026 * to this media will bring the
24031 24027 * device to the right state good for
24032 24028 * media access.
24033 24029 */
24034 24030 if (asc == 0x3a) {
24035 24031 state = DKIO_EJECTED;
24036 24032 } else {
24037 24033 /*
24038 24034 * If the drive is busy with an
24039 24035 * operation or long write, keep the
24040 24036 * media in an inserted state.
24041 24037 */
24042 24038
24043 24039 if ((asc == 0x04) &&
24044 24040 ((ascq == 0x02) ||
24045 24041 (ascq == 0x07) ||
24046 24042 (ascq == 0x08))) {
24047 24043 state = DKIO_INSERTED;
24048 24044 }
24049 24045 }
24050 24046 } else if (skey == KEY_NO_SENSE) {
24051 24047 if ((asc == 0x00) && (ascq == 0x00)) {
24052 24048 /*
24053 24049 * Sense Data 00/00/00 does not provide
24054 24050 * any information about the state of
24055 24051 * the media. Ignore it.
24056 24052 */
24057 24053 mutex_exit(SD_MUTEX(un));
24058 24054 return (0);
24059 24055 }
24060 24056 }
24061 24057 }
24062 24058 } else if ((*((char *)statusp) == STATUS_GOOD) &&
24063 24059 (resultp->pkt->pkt_reason == CMD_CMPLT)) {
24064 24060 state = DKIO_INSERTED;
24065 24061 }
24066 24062
24067 24063 SD_TRACE(SD_LOG_COMMON, un,
24068 24064 "sd_media_watch_cb: state=%x, specified=%x\n",
24069 24065 state, un->un_specified_mediastate);
24070 24066
24071 24067 /*
24072 24068 * now signal the waiting thread if this is *not* the specified state;
24073 24069 * delay the signal if the state is DKIO_INSERTED to allow the target
24074 24070 * to recover
24075 24071 */
24076 24072 if (state != un->un_specified_mediastate) {
24077 24073 un->un_mediastate = state;
24078 24074 if (state == DKIO_INSERTED) {
24079 24075 /*
24080 24076 * delay the signal to give the drive a chance
24081 24077 * to do what it apparently needs to do
24082 24078 */
24083 24079 SD_TRACE(SD_LOG_COMMON, un,
24084 24080 "sd_media_watch_cb: delayed cv_broadcast\n");
24085 24081 if (un->un_dcvb_timeid == NULL) {
24086 24082 un->un_dcvb_timeid =
24087 24083 timeout(sd_delayed_cv_broadcast, un,
24088 24084 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY));
24089 24085 }
24090 24086 } else {
24091 24087 SD_TRACE(SD_LOG_COMMON, un,
24092 24088 "sd_media_watch_cb: immediate cv_broadcast\n");
24093 24089 cv_broadcast(&un->un_state_cv);
24094 24090 }
24095 24091 }
24096 24092 mutex_exit(SD_MUTEX(un));
24097 24093 return (0);
24098 24094 }
24099 24095
24100 24096
24101 24097 /*
24102 24098 * Function: sd_dkio_get_temp
24103 24099 *
24104 24100 * Description: This routine is the driver entry point for handling ioctl
24105 24101 * requests to get the disk temperature.
24106 24102 *
24107 24103 * Arguments: dev - the device number
24108 24104 * arg - pointer to user provided dk_temperature structure.
24109 24105 * flag - this argument is a pass through to ddi_copyxxx()
24110 24106 * directly from the mode argument of ioctl().
24111 24107 *
24112 24108 * Return Code: 0
24113 24109 * EFAULT
24114 24110 * ENXIO
24115 24111 * EAGAIN
24116 24112 */
24117 24113
24118 24114 static int
24119 24115 sd_dkio_get_temp(dev_t dev, caddr_t arg, int flag)
24120 24116 {
24121 24117 struct sd_lun *un = NULL;
24122 24118 struct dk_temperature *dktemp = NULL;
24123 24119 uchar_t *temperature_page;
24124 24120 int rval = 0;
24125 24121 int path_flag = SD_PATH_STANDARD;
24126 24122 sd_ssc_t *ssc;
24127 24123
24128 24124 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24129 24125 return (ENXIO);
24130 24126 }
24131 24127
24132 24128 ssc = sd_ssc_init(un);
24133 24129 dktemp = kmem_zalloc(sizeof (struct dk_temperature), KM_SLEEP);
24134 24130
24135 24131 /* copyin the disk temp argument to get the user flags */
24136 24132 if (ddi_copyin((void *)arg, dktemp,
24137 24133 sizeof (struct dk_temperature), flag) != 0) {
24138 24134 rval = EFAULT;
24139 24135 goto done;
24140 24136 }
24141 24137
24142 24138 /* Initialize the temperature to invalid. */
24143 24139 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP;
24144 24140 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP;
24145 24141
24146 24142 /*
24147 24143 * Note: Investigate removing the "bypass pm" semantic.
24148 24144 * Can we just bypass PM always?
24149 24145 */
24150 24146 if (dktemp->dkt_flags & DKT_BYPASS_PM) {
24151 24147 path_flag = SD_PATH_DIRECT;
24152 24148 ASSERT(!mutex_owned(&un->un_pm_mutex));
24153 24149 mutex_enter(&un->un_pm_mutex);
24154 24150 if (SD_DEVICE_IS_IN_LOW_POWER(un)) {
24155 24151 /*
24156 24152 * If DKT_BYPASS_PM is set, and the drive happens to be
24157 24153 * in low power mode, we can not wake it up, Need to
24158 24154 * return EAGAIN.
24159 24155 */
24160 24156 mutex_exit(&un->un_pm_mutex);
24161 24157 rval = EAGAIN;
24162 24158 goto done;
24163 24159 } else {
24164 24160 /*
24165 24161 * Indicate to PM the device is busy. This is required
24166 24162 * to avoid a race - i.e. the ioctl is issuing a
24167 24163 * command and the pm framework brings down the device
24168 24164 * to low power mode (possible power cut-off on some
24169 24165 * platforms).
24170 24166 */
24171 24167 mutex_exit(&un->un_pm_mutex);
24172 24168 if (sd_pm_entry(un) != DDI_SUCCESS) {
24173 24169 rval = EAGAIN;
24174 24170 goto done;
24175 24171 }
24176 24172 }
24177 24173 }
24178 24174
24179 24175 temperature_page = kmem_zalloc(TEMPERATURE_PAGE_SIZE, KM_SLEEP);
24180 24176
24181 24177 rval = sd_send_scsi_LOG_SENSE(ssc, temperature_page,
24182 24178 TEMPERATURE_PAGE_SIZE, TEMPERATURE_PAGE, 1, 0, path_flag);
24183 24179 if (rval != 0)
24184 24180 goto done2;
24185 24181
24186 24182 /*
24187 24183 * For the current temperature verify that the parameter length is 0x02
24188 24184 * and the parameter code is 0x00
24189 24185 */
24190 24186 if ((temperature_page[7] == 0x02) && (temperature_page[4] == 0x00) &&
24191 24187 (temperature_page[5] == 0x00)) {
24192 24188 if (temperature_page[9] == 0xFF) {
24193 24189 dktemp->dkt_cur_temp = (short)DKT_INVALID_TEMP;
24194 24190 } else {
24195 24191 dktemp->dkt_cur_temp = (short)(temperature_page[9]);
24196 24192 }
24197 24193 }
24198 24194
24199 24195 /*
24200 24196 * For the reference temperature verify that the parameter
24201 24197 * length is 0x02 and the parameter code is 0x01
24202 24198 */
24203 24199 if ((temperature_page[13] == 0x02) && (temperature_page[10] == 0x00) &&
24204 24200 (temperature_page[11] == 0x01)) {
24205 24201 if (temperature_page[15] == 0xFF) {
24206 24202 dktemp->dkt_ref_temp = (short)DKT_INVALID_TEMP;
24207 24203 } else {
24208 24204 dktemp->dkt_ref_temp = (short)(temperature_page[15]);
24209 24205 }
24210 24206 }
24211 24207
24212 24208 /* Do the copyout regardless of the temperature commands status. */
24213 24209 if (ddi_copyout(dktemp, (void *)arg, sizeof (struct dk_temperature),
24214 24210 flag) != 0) {
24215 24211 rval = EFAULT;
24216 24212 goto done1;
24217 24213 }
24218 24214
24219 24215 done2:
24220 24216 if (rval != 0) {
24221 24217 if (rval == EIO)
24222 24218 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
24223 24219 else
24224 24220 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
24225 24221 }
24226 24222 done1:
24227 24223 if (path_flag == SD_PATH_DIRECT) {
24228 24224 sd_pm_exit(un);
24229 24225 }
24230 24226
24231 24227 kmem_free(temperature_page, TEMPERATURE_PAGE_SIZE);
24232 24228 done:
24233 24229 sd_ssc_fini(ssc);
24234 24230 if (dktemp != NULL) {
24235 24231 kmem_free(dktemp, sizeof (struct dk_temperature));
24236 24232 }
24237 24233
24238 24234 return (rval);
24239 24235 }
24240 24236
24241 24237
24242 24238 /*
24243 24239 * Function: sd_log_page_supported
24244 24240 *
24245 24241 * Description: This routine uses sd_send_scsi_LOG_SENSE to find the list of
24246 24242 * supported log pages.
24247 24243 *
24248 24244 * Arguments: ssc - ssc contains pointer to driver soft state (unit)
24249 24245 * structure for this target.
24250 24246 * log_page -
24251 24247 *
24252 24248 * Return Code: -1 - on error (log sense is optional and may not be supported).
24253 24249 * 0 - log page not found.
24254 24250 * 1 - log page found.
24255 24251 */
24256 24252
24257 24253 static int
24258 24254 sd_log_page_supported(sd_ssc_t *ssc, int log_page)
24259 24255 {
24260 24256 uchar_t *log_page_data;
24261 24257 int i;
24262 24258 int match = 0;
24263 24259 int log_size;
24264 24260 int status = 0;
24265 24261 struct sd_lun *un;
24266 24262
24267 24263 ASSERT(ssc != NULL);
24268 24264 un = ssc->ssc_un;
24269 24265 ASSERT(un != NULL);
24270 24266
24271 24267 log_page_data = kmem_zalloc(0xFF, KM_SLEEP);
24272 24268
24273 24269 status = sd_send_scsi_LOG_SENSE(ssc, log_page_data, 0xFF, 0, 0x01, 0,
24274 24270 SD_PATH_DIRECT);
24275 24271
24276 24272 if (status != 0) {
24277 24273 if (status == EIO) {
24278 24274 /*
24279 24275 * Some disks do not support log sense, we
24280 24276 * should ignore this kind of error(sense key is
24281 24277 * 0x5 - illegal request).
24282 24278 */
24283 24279 uint8_t *sensep;
24284 24280 int senlen;
24285 24281
24286 24282 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
24287 24283 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
24288 24284 ssc->ssc_uscsi_cmd->uscsi_rqresid);
24289 24285
24290 24286 if (senlen > 0 &&
24291 24287 scsi_sense_key(sensep) == KEY_ILLEGAL_REQUEST) {
24292 24288 sd_ssc_assessment(ssc,
24293 24289 SD_FMT_IGNORE_COMPROMISE);
24294 24290 } else {
24295 24291 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
24296 24292 }
24297 24293 } else {
24298 24294 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
24299 24295 }
24300 24296
24301 24297 SD_ERROR(SD_LOG_COMMON, un,
24302 24298 "sd_log_page_supported: failed log page retrieval\n");
24303 24299 kmem_free(log_page_data, 0xFF);
24304 24300 return (-1);
24305 24301 }
24306 24302
24307 24303 log_size = log_page_data[3];
24308 24304
24309 24305 /*
24310 24306 * The list of supported log pages start from the fourth byte. Check
24311 24307 * until we run out of log pages or a match is found.
24312 24308 */
24313 24309 for (i = 4; (i < (log_size + 4)) && !match; i++) {
24314 24310 if (log_page_data[i] == log_page) {
24315 24311 match++;
24316 24312 }
24317 24313 }
24318 24314 kmem_free(log_page_data, 0xFF);
24319 24315 return (match);
24320 24316 }
24321 24317
24322 24318
24323 24319 /*
24324 24320 * Function: sd_mhdioc_failfast
24325 24321 *
24326 24322 * Description: This routine is the driver entry point for handling ioctl
24327 24323 * requests to enable/disable the multihost failfast option.
24328 24324 * (MHIOCENFAILFAST)
24329 24325 *
24330 24326 * Arguments: dev - the device number
24331 24327 * arg - user specified probing interval.
24332 24328 * flag - this argument is a pass through to ddi_copyxxx()
24333 24329 * directly from the mode argument of ioctl().
24334 24330 *
24335 24331 * Return Code: 0
24336 24332 * EFAULT
24337 24333 * ENXIO
24338 24334 */
24339 24335
24340 24336 static int
24341 24337 sd_mhdioc_failfast(dev_t dev, caddr_t arg, int flag)
24342 24338 {
24343 24339 struct sd_lun *un = NULL;
24344 24340 int mh_time;
24345 24341 int rval = 0;
24346 24342
24347 24343 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24348 24344 return (ENXIO);
24349 24345 }
24350 24346
24351 24347 if (ddi_copyin((void *)arg, &mh_time, sizeof (int), flag))
24352 24348 return (EFAULT);
24353 24349
24354 24350 if (mh_time) {
24355 24351 mutex_enter(SD_MUTEX(un));
24356 24352 un->un_resvd_status |= SD_FAILFAST;
24357 24353 mutex_exit(SD_MUTEX(un));
24358 24354 /*
24359 24355 * If mh_time is INT_MAX, then this ioctl is being used for
24360 24356 * SCSI-3 PGR purposes, and we don't need to spawn watch thread.
24361 24357 */
24362 24358 if (mh_time != INT_MAX) {
24363 24359 rval = sd_check_mhd(dev, mh_time);
24364 24360 }
24365 24361 } else {
24366 24362 (void) sd_check_mhd(dev, 0);
24367 24363 mutex_enter(SD_MUTEX(un));
24368 24364 un->un_resvd_status &= ~SD_FAILFAST;
24369 24365 mutex_exit(SD_MUTEX(un));
24370 24366 }
24371 24367 return (rval);
24372 24368 }
24373 24369
24374 24370
24375 24371 /*
24376 24372 * Function: sd_mhdioc_takeown
24377 24373 *
24378 24374 * Description: This routine is the driver entry point for handling ioctl
24379 24375 * requests to forcefully acquire exclusive access rights to the
24380 24376 * multihost disk (MHIOCTKOWN).
24381 24377 *
24382 24378 * Arguments: dev - the device number
24383 24379 * arg - user provided structure specifying the delay
24384 24380 * parameters in milliseconds
24385 24381 * flag - this argument is a pass through to ddi_copyxxx()
24386 24382 * directly from the mode argument of ioctl().
24387 24383 *
24388 24384 * Return Code: 0
24389 24385 * EFAULT
24390 24386 * ENXIO
24391 24387 */
24392 24388
24393 24389 static int
24394 24390 sd_mhdioc_takeown(dev_t dev, caddr_t arg, int flag)
24395 24391 {
24396 24392 struct sd_lun *un = NULL;
24397 24393 struct mhioctkown *tkown = NULL;
24398 24394 int rval = 0;
24399 24395
24400 24396 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24401 24397 return (ENXIO);
24402 24398 }
24403 24399
24404 24400 if (arg != NULL) {
24405 24401 tkown = (struct mhioctkown *)
24406 24402 kmem_zalloc(sizeof (struct mhioctkown), KM_SLEEP);
24407 24403 rval = ddi_copyin(arg, tkown, sizeof (struct mhioctkown), flag);
24408 24404 if (rval != 0) {
24409 24405 rval = EFAULT;
24410 24406 goto error;
24411 24407 }
24412 24408 }
24413 24409
24414 24410 rval = sd_take_ownership(dev, tkown);
24415 24411 mutex_enter(SD_MUTEX(un));
24416 24412 if (rval == 0) {
24417 24413 un->un_resvd_status |= SD_RESERVE;
24418 24414 if (tkown != NULL && tkown->reinstate_resv_delay != 0) {
24419 24415 sd_reinstate_resv_delay =
24420 24416 tkown->reinstate_resv_delay * 1000;
24421 24417 } else {
24422 24418 sd_reinstate_resv_delay = SD_REINSTATE_RESV_DELAY;
24423 24419 }
24424 24420 /*
24425 24421 * Give the scsi_watch routine interval set by
24426 24422 * the MHIOCENFAILFAST ioctl precedence here.
24427 24423 */
24428 24424 if ((un->un_resvd_status & SD_FAILFAST) == 0) {
24429 24425 mutex_exit(SD_MUTEX(un));
24430 24426 (void) sd_check_mhd(dev, sd_reinstate_resv_delay/1000);
24431 24427 SD_TRACE(SD_LOG_IOCTL_MHD, un,
24432 24428 "sd_mhdioc_takeown : %d\n",
24433 24429 sd_reinstate_resv_delay);
24434 24430 } else {
24435 24431 mutex_exit(SD_MUTEX(un));
24436 24432 }
24437 24433 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_NOTIFY,
24438 24434 sd_mhd_reset_notify_cb, (caddr_t)un);
24439 24435 } else {
24440 24436 un->un_resvd_status &= ~SD_RESERVE;
24441 24437 mutex_exit(SD_MUTEX(un));
24442 24438 }
24443 24439
24444 24440 error:
24445 24441 if (tkown != NULL) {
24446 24442 kmem_free(tkown, sizeof (struct mhioctkown));
24447 24443 }
24448 24444 return (rval);
24449 24445 }
24450 24446
24451 24447
24452 24448 /*
24453 24449 * Function: sd_mhdioc_release
24454 24450 *
24455 24451 * Description: This routine is the driver entry point for handling ioctl
24456 24452 * requests to release exclusive access rights to the multihost
24457 24453 * disk (MHIOCRELEASE).
24458 24454 *
24459 24455 * Arguments: dev - the device number
24460 24456 *
24461 24457 * Return Code: 0
24462 24458 * ENXIO
24463 24459 */
24464 24460
24465 24461 static int
24466 24462 sd_mhdioc_release(dev_t dev)
24467 24463 {
24468 24464 struct sd_lun *un = NULL;
24469 24465 timeout_id_t resvd_timeid_save;
24470 24466 int resvd_status_save;
24471 24467 int rval = 0;
24472 24468
24473 24469 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24474 24470 return (ENXIO);
24475 24471 }
24476 24472
24477 24473 mutex_enter(SD_MUTEX(un));
24478 24474 resvd_status_save = un->un_resvd_status;
24479 24475 un->un_resvd_status &=
24480 24476 ~(SD_RESERVE | SD_LOST_RESERVE | SD_WANT_RESERVE);
24481 24477 if (un->un_resvd_timeid) {
24482 24478 resvd_timeid_save = un->un_resvd_timeid;
24483 24479 un->un_resvd_timeid = NULL;
24484 24480 mutex_exit(SD_MUTEX(un));
24485 24481 (void) untimeout(resvd_timeid_save);
24486 24482 } else {
24487 24483 mutex_exit(SD_MUTEX(un));
24488 24484 }
24489 24485
24490 24486 /*
24491 24487 * destroy any pending timeout thread that may be attempting to
24492 24488 * reinstate reservation on this device.
24493 24489 */
24494 24490 sd_rmv_resv_reclaim_req(dev);
24495 24491
24496 24492 if ((rval = sd_reserve_release(dev, SD_RELEASE)) == 0) {
24497 24493 mutex_enter(SD_MUTEX(un));
24498 24494 if ((un->un_mhd_token) &&
24499 24495 ((un->un_resvd_status & SD_FAILFAST) == 0)) {
24500 24496 mutex_exit(SD_MUTEX(un));
24501 24497 (void) sd_check_mhd(dev, 0);
24502 24498 } else {
24503 24499 mutex_exit(SD_MUTEX(un));
24504 24500 }
24505 24501 (void) scsi_reset_notify(SD_ADDRESS(un), SCSI_RESET_CANCEL,
24506 24502 sd_mhd_reset_notify_cb, (caddr_t)un);
24507 24503 } else {
24508 24504 /*
24509 24505 * sd_mhd_watch_cb will restart the resvd recover timeout thread
24510 24506 */
24511 24507 mutex_enter(SD_MUTEX(un));
24512 24508 un->un_resvd_status = resvd_status_save;
24513 24509 mutex_exit(SD_MUTEX(un));
24514 24510 }
24515 24511 return (rval);
24516 24512 }
24517 24513
24518 24514
24519 24515 /*
24520 24516 * Function: sd_mhdioc_register_devid
24521 24517 *
24522 24518 * Description: This routine is the driver entry point for handling ioctl
24523 24519 * requests to register the device id (MHIOCREREGISTERDEVID).
24524 24520 *
24525 24521 * Note: The implementation for this ioctl has been updated to
24526 24522 * be consistent with the original PSARC case (1999/357)
24527 24523 * (4375899, 4241671, 4220005)
24528 24524 *
24529 24525 * Arguments: dev - the device number
24530 24526 *
24531 24527 * Return Code: 0
24532 24528 * ENXIO
24533 24529 */
24534 24530
24535 24531 static int
24536 24532 sd_mhdioc_register_devid(dev_t dev)
24537 24533 {
24538 24534 struct sd_lun *un = NULL;
24539 24535 int rval = 0;
24540 24536 sd_ssc_t *ssc;
24541 24537
24542 24538 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24543 24539 return (ENXIO);
24544 24540 }
24545 24541
24546 24542 ASSERT(!mutex_owned(SD_MUTEX(un)));
24547 24543
24548 24544 mutex_enter(SD_MUTEX(un));
24549 24545
24550 24546 /* If a devid already exists, de-register it */
24551 24547 if (un->un_devid != NULL) {
24552 24548 ddi_devid_unregister(SD_DEVINFO(un));
24553 24549 /*
24554 24550 * After unregister devid, needs to free devid memory
24555 24551 */
24556 24552 ddi_devid_free(un->un_devid);
24557 24553 un->un_devid = NULL;
24558 24554 }
24559 24555
24560 24556 /* Check for reservation conflict */
24561 24557 mutex_exit(SD_MUTEX(un));
24562 24558 ssc = sd_ssc_init(un);
24563 24559 rval = sd_send_scsi_TEST_UNIT_READY(ssc, 0);
24564 24560 mutex_enter(SD_MUTEX(un));
24565 24561
24566 24562 switch (rval) {
24567 24563 case 0:
24568 24564 sd_register_devid(ssc, SD_DEVINFO(un), SD_TARGET_IS_UNRESERVED);
24569 24565 break;
24570 24566 case EACCES:
24571 24567 break;
24572 24568 default:
24573 24569 rval = EIO;
24574 24570 }
24575 24571
24576 24572 mutex_exit(SD_MUTEX(un));
24577 24573 if (rval != 0) {
24578 24574 if (rval == EIO)
24579 24575 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
24580 24576 else
24581 24577 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
24582 24578 }
24583 24579 sd_ssc_fini(ssc);
24584 24580 return (rval);
24585 24581 }
24586 24582
24587 24583
24588 24584 /*
24589 24585 * Function: sd_mhdioc_inkeys
24590 24586 *
24591 24587 * Description: This routine is the driver entry point for handling ioctl
24592 24588 * requests to issue the SCSI-3 Persistent In Read Keys command
24593 24589 * to the device (MHIOCGRP_INKEYS).
24594 24590 *
24595 24591 * Arguments: dev - the device number
24596 24592 * arg - user provided in_keys structure
24597 24593 * flag - this argument is a pass through to ddi_copyxxx()
24598 24594 * directly from the mode argument of ioctl().
24599 24595 *
24600 24596 * Return Code: code returned by sd_persistent_reservation_in_read_keys()
24601 24597 * ENXIO
24602 24598 * EFAULT
24603 24599 */
24604 24600
24605 24601 static int
24606 24602 sd_mhdioc_inkeys(dev_t dev, caddr_t arg, int flag)
24607 24603 {
24608 24604 struct sd_lun *un;
24609 24605 mhioc_inkeys_t inkeys;
24610 24606 int rval = 0;
24611 24607
24612 24608 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24613 24609 return (ENXIO);
24614 24610 }
24615 24611
24616 24612 #ifdef _MULTI_DATAMODEL
24617 24613 switch (ddi_model_convert_from(flag & FMODELS)) {
24618 24614 case DDI_MODEL_ILP32: {
24619 24615 struct mhioc_inkeys32 inkeys32;
24620 24616
24621 24617 if (ddi_copyin(arg, &inkeys32,
24622 24618 sizeof (struct mhioc_inkeys32), flag) != 0) {
24623 24619 return (EFAULT);
24624 24620 }
24625 24621 inkeys.li = (mhioc_key_list_t *)(uintptr_t)inkeys32.li;
24626 24622 if ((rval = sd_persistent_reservation_in_read_keys(un,
24627 24623 &inkeys, flag)) != 0) {
24628 24624 return (rval);
24629 24625 }
24630 24626 inkeys32.generation = inkeys.generation;
24631 24627 if (ddi_copyout(&inkeys32, arg, sizeof (struct mhioc_inkeys32),
24632 24628 flag) != 0) {
24633 24629 return (EFAULT);
24634 24630 }
24635 24631 break;
24636 24632 }
24637 24633 case DDI_MODEL_NONE:
24638 24634 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t),
24639 24635 flag) != 0) {
24640 24636 return (EFAULT);
24641 24637 }
24642 24638 if ((rval = sd_persistent_reservation_in_read_keys(un,
24643 24639 &inkeys, flag)) != 0) {
24644 24640 return (rval);
24645 24641 }
24646 24642 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t),
24647 24643 flag) != 0) {
24648 24644 return (EFAULT);
24649 24645 }
24650 24646 break;
24651 24647 }
24652 24648
24653 24649 #else /* ! _MULTI_DATAMODEL */
24654 24650
24655 24651 if (ddi_copyin(arg, &inkeys, sizeof (mhioc_inkeys_t), flag) != 0) {
24656 24652 return (EFAULT);
24657 24653 }
24658 24654 rval = sd_persistent_reservation_in_read_keys(un, &inkeys, flag);
24659 24655 if (rval != 0) {
24660 24656 return (rval);
24661 24657 }
24662 24658 if (ddi_copyout(&inkeys, arg, sizeof (mhioc_inkeys_t), flag) != 0) {
24663 24659 return (EFAULT);
24664 24660 }
24665 24661
24666 24662 #endif /* _MULTI_DATAMODEL */
24667 24663
24668 24664 return (rval);
24669 24665 }
24670 24666
24671 24667
24672 24668 /*
24673 24669 * Function: sd_mhdioc_inresv
24674 24670 *
24675 24671 * Description: This routine is the driver entry point for handling ioctl
24676 24672 * requests to issue the SCSI-3 Persistent In Read Reservations
24677 24673 * command to the device (MHIOCGRP_INKEYS).
24678 24674 *
24679 24675 * Arguments: dev - the device number
24680 24676 * arg - user provided in_resv structure
24681 24677 * flag - this argument is a pass through to ddi_copyxxx()
24682 24678 * directly from the mode argument of ioctl().
24683 24679 *
24684 24680 * Return Code: code returned by sd_persistent_reservation_in_read_resv()
24685 24681 * ENXIO
24686 24682 * EFAULT
24687 24683 */
24688 24684
24689 24685 static int
24690 24686 sd_mhdioc_inresv(dev_t dev, caddr_t arg, int flag)
24691 24687 {
24692 24688 struct sd_lun *un;
24693 24689 mhioc_inresvs_t inresvs;
24694 24690 int rval = 0;
24695 24691
24696 24692 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24697 24693 return (ENXIO);
24698 24694 }
24699 24695
24700 24696 #ifdef _MULTI_DATAMODEL
24701 24697
24702 24698 switch (ddi_model_convert_from(flag & FMODELS)) {
24703 24699 case DDI_MODEL_ILP32: {
24704 24700 struct mhioc_inresvs32 inresvs32;
24705 24701
24706 24702 if (ddi_copyin(arg, &inresvs32,
24707 24703 sizeof (struct mhioc_inresvs32), flag) != 0) {
24708 24704 return (EFAULT);
24709 24705 }
24710 24706 inresvs.li = (mhioc_resv_desc_list_t *)(uintptr_t)inresvs32.li;
24711 24707 if ((rval = sd_persistent_reservation_in_read_resv(un,
24712 24708 &inresvs, flag)) != 0) {
24713 24709 return (rval);
24714 24710 }
24715 24711 inresvs32.generation = inresvs.generation;
24716 24712 if (ddi_copyout(&inresvs32, arg,
24717 24713 sizeof (struct mhioc_inresvs32), flag) != 0) {
24718 24714 return (EFAULT);
24719 24715 }
24720 24716 break;
24721 24717 }
24722 24718 case DDI_MODEL_NONE:
24723 24719 if (ddi_copyin(arg, &inresvs,
24724 24720 sizeof (mhioc_inresvs_t), flag) != 0) {
24725 24721 return (EFAULT);
24726 24722 }
24727 24723 if ((rval = sd_persistent_reservation_in_read_resv(un,
24728 24724 &inresvs, flag)) != 0) {
24729 24725 return (rval);
24730 24726 }
24731 24727 if (ddi_copyout(&inresvs, arg,
24732 24728 sizeof (mhioc_inresvs_t), flag) != 0) {
24733 24729 return (EFAULT);
24734 24730 }
24735 24731 break;
24736 24732 }
24737 24733
24738 24734 #else /* ! _MULTI_DATAMODEL */
24739 24735
24740 24736 if (ddi_copyin(arg, &inresvs, sizeof (mhioc_inresvs_t), flag) != 0) {
24741 24737 return (EFAULT);
24742 24738 }
24743 24739 rval = sd_persistent_reservation_in_read_resv(un, &inresvs, flag);
24744 24740 if (rval != 0) {
24745 24741 return (rval);
24746 24742 }
24747 24743 if (ddi_copyout(&inresvs, arg, sizeof (mhioc_inresvs_t), flag)) {
24748 24744 return (EFAULT);
24749 24745 }
24750 24746
24751 24747 #endif /* ! _MULTI_DATAMODEL */
24752 24748
24753 24749 return (rval);
24754 24750 }
24755 24751
24756 24752
24757 24753 /*
24758 24754 * The following routines support the clustering functionality described below
24759 24755 * and implement lost reservation reclaim functionality.
24760 24756 *
24761 24757 * Clustering
24762 24758 * ----------
24763 24759 * The clustering code uses two different, independent forms of SCSI
24764 24760 * reservation. Traditional SCSI-2 Reserve/Release and the newer SCSI-3
24765 24761 * Persistent Group Reservations. For any particular disk, it will use either
24766 24762 * SCSI-2 or SCSI-3 PGR but never both at the same time for the same disk.
24767 24763 *
24768 24764 * SCSI-2
24769 24765 * The cluster software takes ownership of a multi-hosted disk by issuing the
24770 24766 * MHIOCTKOWN ioctl to the disk driver. It releases ownership by issuing the
24771 24767 * MHIOCRELEASE ioctl. Closely related is the MHIOCENFAILFAST ioctl -- a
24772 24768 * cluster, just after taking ownership of the disk with the MHIOCTKOWN ioctl
24773 24769 * then issues the MHIOCENFAILFAST ioctl. This ioctl "enables failfast" in the
24774 24770 * driver. The meaning of failfast is that if the driver (on this host) ever
24775 24771 * encounters the scsi error return code RESERVATION_CONFLICT from the device,
24776 24772 * it should immediately panic the host. The motivation for this ioctl is that
24777 24773 * if this host does encounter reservation conflict, the underlying cause is
24778 24774 * that some other host of the cluster has decided that this host is no longer
24779 24775 * in the cluster and has seized control of the disks for itself. Since this
24780 24776 * host is no longer in the cluster, it ought to panic itself. The
24781 24777 * MHIOCENFAILFAST ioctl does two things:
24782 24778 * (a) it sets a flag that will cause any returned RESERVATION_CONFLICT
24783 24779 * error to panic the host
24784 24780 * (b) it sets up a periodic timer to test whether this host still has
24785 24781 * "access" (in that no other host has reserved the device): if the
24786 24782 * periodic timer gets RESERVATION_CONFLICT, the host is panicked. The
24787 24783 * purpose of that periodic timer is to handle scenarios where the host is
24788 24784 * otherwise temporarily quiescent, temporarily doing no real i/o.
24789 24785 * The MHIOCTKOWN ioctl will "break" a reservation that is held by another host,
24790 24786 * by issuing a SCSI Bus Device Reset. It will then issue a SCSI Reserve for
24791 24787 * the device itself.
24792 24788 *
24793 24789 * SCSI-3 PGR
24794 24790 * A direct semantic implementation of the SCSI-3 Persistent Reservation
24795 24791 * facility is supported through the shared multihost disk ioctls
24796 24792 * (MHIOCGRP_INKEYS, MHIOCGRP_INRESV, MHIOCGRP_REGISTER, MHIOCGRP_RESERVE,
24797 24793 * MHIOCGRP_PREEMPTANDABORT, MHIOCGRP_CLEAR)
24798 24794 *
24799 24795 * Reservation Reclaim:
24800 24796 * --------------------
24801 24797 * To support the lost reservation reclaim operations this driver creates a
24802 24798 * single thread to handle reinstating reservations on all devices that have
24803 24799 * lost reservations sd_resv_reclaim_requests are logged for all devices that
24804 24800 * have LOST RESERVATIONS when the scsi watch facility callsback sd_mhd_watch_cb
24805 24801 * and the reservation reclaim thread loops through the requests to regain the
24806 24802 * lost reservations.
24807 24803 */
24808 24804
24809 24805 /*
24810 24806 * Function: sd_check_mhd()
24811 24807 *
24812 24808 * Description: This function sets up and submits a scsi watch request or
24813 24809 * terminates an existing watch request. This routine is used in
24814 24810 * support of reservation reclaim.
24815 24811 *
24816 24812 * Arguments: dev - the device 'dev_t' is used for context to discriminate
24817 24813 * among multiple watches that share the callback function
24818 24814 * interval - the number of microseconds specifying the watch
24819 24815 * interval for issuing TEST UNIT READY commands. If
24820 24816 * set to 0 the watch should be terminated. If the
24821 24817 * interval is set to 0 and if the device is required
24822 24818 * to hold reservation while disabling failfast, the
24823 24819 * watch is restarted with an interval of
24824 24820 * reinstate_resv_delay.
24825 24821 *
24826 24822 * Return Code: 0 - Successful submit/terminate of scsi watch request
24827 24823 * ENXIO - Indicates an invalid device was specified
24828 24824 * EAGAIN - Unable to submit the scsi watch request
24829 24825 */
24830 24826
24831 24827 static int
24832 24828 sd_check_mhd(dev_t dev, int interval)
24833 24829 {
24834 24830 struct sd_lun *un;
24835 24831 opaque_t token;
24836 24832
24837 24833 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24838 24834 return (ENXIO);
24839 24835 }
24840 24836
24841 24837 /* is this a watch termination request? */
24842 24838 if (interval == 0) {
24843 24839 mutex_enter(SD_MUTEX(un));
24844 24840 /* if there is an existing watch task then terminate it */
24845 24841 if (un->un_mhd_token) {
24846 24842 token = un->un_mhd_token;
24847 24843 un->un_mhd_token = NULL;
24848 24844 mutex_exit(SD_MUTEX(un));
24849 24845 (void) scsi_watch_request_terminate(token,
24850 24846 SCSI_WATCH_TERMINATE_ALL_WAIT);
24851 24847 mutex_enter(SD_MUTEX(un));
24852 24848 } else {
24853 24849 mutex_exit(SD_MUTEX(un));
24854 24850 /*
24855 24851 * Note: If we return here we don't check for the
24856 24852 * failfast case. This is the original legacy
24857 24853 * implementation but perhaps we should be checking
24858 24854 * the failfast case.
24859 24855 */
24860 24856 return (0);
24861 24857 }
24862 24858 /*
24863 24859 * If the device is required to hold reservation while
24864 24860 * disabling failfast, we need to restart the scsi_watch
24865 24861 * routine with an interval of reinstate_resv_delay.
24866 24862 */
24867 24863 if (un->un_resvd_status & SD_RESERVE) {
24868 24864 interval = sd_reinstate_resv_delay/1000;
24869 24865 } else {
24870 24866 /* no failfast so bail */
24871 24867 mutex_exit(SD_MUTEX(un));
24872 24868 return (0);
24873 24869 }
24874 24870 mutex_exit(SD_MUTEX(un));
24875 24871 }
24876 24872
24877 24873 /*
24878 24874 * adjust minimum time interval to 1 second,
24879 24875 * and convert from msecs to usecs
24880 24876 */
24881 24877 if (interval > 0 && interval < 1000) {
24882 24878 interval = 1000;
24883 24879 }
24884 24880 interval *= 1000;
24885 24881
24886 24882 /*
24887 24883 * submit the request to the scsi_watch service
24888 24884 */
24889 24885 token = scsi_watch_request_submit(SD_SCSI_DEVP(un), interval,
24890 24886 SENSE_LENGTH, sd_mhd_watch_cb, (caddr_t)dev);
24891 24887 if (token == NULL) {
24892 24888 return (EAGAIN);
24893 24889 }
24894 24890
24895 24891 /*
24896 24892 * save token for termination later on
24897 24893 */
24898 24894 mutex_enter(SD_MUTEX(un));
24899 24895 un->un_mhd_token = token;
24900 24896 mutex_exit(SD_MUTEX(un));
24901 24897 return (0);
24902 24898 }
24903 24899
24904 24900
24905 24901 /*
24906 24902 * Function: sd_mhd_watch_cb()
24907 24903 *
24908 24904 * Description: This function is the call back function used by the scsi watch
24909 24905 * facility. The scsi watch facility sends the "Test Unit Ready"
24910 24906 * and processes the status. If applicable (i.e. a "Unit Attention"
24911 24907 * status and automatic "Request Sense" not used) the scsi watch
24912 24908 * facility will send a "Request Sense" and retrieve the sense data
24913 24909 * to be passed to this callback function. In either case the
24914 24910 * automatic "Request Sense" or the facility submitting one, this
24915 24911 * callback is passed the status and sense data.
24916 24912 *
24917 24913 * Arguments: arg - the device 'dev_t' is used for context to discriminate
24918 24914 * among multiple watches that share this callback function
24919 24915 * resultp - scsi watch facility result packet containing scsi
24920 24916 * packet, status byte and sense data
24921 24917 *
24922 24918 * Return Code: 0 - continue the watch task
24923 24919 * non-zero - terminate the watch task
24924 24920 */
24925 24921
24926 24922 static int
24927 24923 sd_mhd_watch_cb(caddr_t arg, struct scsi_watch_result *resultp)
24928 24924 {
24929 24925 struct sd_lun *un;
24930 24926 struct scsi_status *statusp;
24931 24927 uint8_t *sensep;
24932 24928 struct scsi_pkt *pkt;
24933 24929 uchar_t actual_sense_length;
24934 24930 dev_t dev = (dev_t)arg;
24935 24931
24936 24932 ASSERT(resultp != NULL);
24937 24933 statusp = resultp->statusp;
24938 24934 sensep = (uint8_t *)resultp->sensep;
24939 24935 pkt = resultp->pkt;
24940 24936 actual_sense_length = resultp->actual_sense_length;
24941 24937
24942 24938 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
24943 24939 return (ENXIO);
24944 24940 }
24945 24941
24946 24942 SD_TRACE(SD_LOG_IOCTL_MHD, un,
24947 24943 "sd_mhd_watch_cb: reason '%s', status '%s'\n",
24948 24944 scsi_rname(pkt->pkt_reason), sd_sname(*((unsigned char *)statusp)));
24949 24945
24950 24946 /* Begin processing of the status and/or sense data */
24951 24947 if (pkt->pkt_reason != CMD_CMPLT) {
24952 24948 /* Handle the incomplete packet */
24953 24949 sd_mhd_watch_incomplete(un, pkt);
24954 24950 return (0);
24955 24951 } else if (*((unsigned char *)statusp) != STATUS_GOOD) {
24956 24952 if (*((unsigned char *)statusp)
24957 24953 == STATUS_RESERVATION_CONFLICT) {
24958 24954 /*
24959 24955 * Handle a reservation conflict by panicking if
24960 24956 * configured for failfast or by logging the conflict
24961 24957 * and updating the reservation status
24962 24958 */
24963 24959 mutex_enter(SD_MUTEX(un));
24964 24960 if ((un->un_resvd_status & SD_FAILFAST) &&
24965 24961 (sd_failfast_enable)) {
24966 24962 sd_panic_for_res_conflict(un);
24967 24963 /*NOTREACHED*/
24968 24964 }
24969 24965 SD_INFO(SD_LOG_IOCTL_MHD, un,
24970 24966 "sd_mhd_watch_cb: Reservation Conflict\n");
24971 24967 un->un_resvd_status |= SD_RESERVATION_CONFLICT;
24972 24968 mutex_exit(SD_MUTEX(un));
24973 24969 }
24974 24970 }
24975 24971
24976 24972 if (sensep != NULL) {
24977 24973 if (actual_sense_length >= (SENSE_LENGTH - 2)) {
24978 24974 mutex_enter(SD_MUTEX(un));
24979 24975 if ((scsi_sense_asc(sensep) ==
24980 24976 SD_SCSI_RESET_SENSE_CODE) &&
24981 24977 (un->un_resvd_status & SD_RESERVE)) {
24982 24978 /*
24983 24979 * The additional sense code indicates a power
24984 24980 * on or bus device reset has occurred; update
24985 24981 * the reservation status.
24986 24982 */
24987 24983 un->un_resvd_status |=
24988 24984 (SD_LOST_RESERVE | SD_WANT_RESERVE);
24989 24985 SD_INFO(SD_LOG_IOCTL_MHD, un,
24990 24986 "sd_mhd_watch_cb: Lost Reservation\n");
24991 24987 }
24992 24988 } else {
24993 24989 return (0);
24994 24990 }
24995 24991 } else {
24996 24992 mutex_enter(SD_MUTEX(un));
24997 24993 }
24998 24994
24999 24995 if ((un->un_resvd_status & SD_RESERVE) &&
25000 24996 (un->un_resvd_status & SD_LOST_RESERVE)) {
25001 24997 if (un->un_resvd_status & SD_WANT_RESERVE) {
25002 24998 /*
25003 24999 * A reset occurred in between the last probe and this
25004 25000 * one so if a timeout is pending cancel it.
25005 25001 */
25006 25002 if (un->un_resvd_timeid) {
25007 25003 timeout_id_t temp_id = un->un_resvd_timeid;
25008 25004 un->un_resvd_timeid = NULL;
25009 25005 mutex_exit(SD_MUTEX(un));
25010 25006 (void) untimeout(temp_id);
25011 25007 mutex_enter(SD_MUTEX(un));
25012 25008 }
25013 25009 un->un_resvd_status &= ~SD_WANT_RESERVE;
25014 25010 }
25015 25011 if (un->un_resvd_timeid == 0) {
25016 25012 /* Schedule a timeout to handle the lost reservation */
25017 25013 un->un_resvd_timeid = timeout(sd_mhd_resvd_recover,
25018 25014 (void *)dev,
25019 25015 drv_usectohz(sd_reinstate_resv_delay));
25020 25016 }
25021 25017 }
25022 25018 mutex_exit(SD_MUTEX(un));
25023 25019 return (0);
25024 25020 }
25025 25021
25026 25022
25027 25023 /*
25028 25024 * Function: sd_mhd_watch_incomplete()
25029 25025 *
25030 25026 * Description: This function is used to find out why a scsi pkt sent by the
25031 25027 * scsi watch facility was not completed. Under some scenarios this
25032 25028 * routine will return. Otherwise it will send a bus reset to see
25033 25029 * if the drive is still online.
25034 25030 *
25035 25031 * Arguments: un - driver soft state (unit) structure
25036 25032 * pkt - incomplete scsi pkt
25037 25033 */
25038 25034
25039 25035 static void
25040 25036 sd_mhd_watch_incomplete(struct sd_lun *un, struct scsi_pkt *pkt)
25041 25037 {
25042 25038 int be_chatty;
25043 25039 int perr;
25044 25040
25045 25041 ASSERT(pkt != NULL);
25046 25042 ASSERT(un != NULL);
25047 25043 be_chatty = (!(pkt->pkt_flags & FLAG_SILENT));
25048 25044 perr = (pkt->pkt_statistics & STAT_PERR);
25049 25045
25050 25046 mutex_enter(SD_MUTEX(un));
25051 25047 if (un->un_state == SD_STATE_DUMPING) {
25052 25048 mutex_exit(SD_MUTEX(un));
25053 25049 return;
25054 25050 }
25055 25051
25056 25052 switch (pkt->pkt_reason) {
25057 25053 case CMD_UNX_BUS_FREE:
25058 25054 /*
25059 25055 * If we had a parity error that caused the target to drop BSY*,
25060 25056 * don't be chatty about it.
25061 25057 */
25062 25058 if (perr && be_chatty) {
25063 25059 be_chatty = 0;
25064 25060 }
25065 25061 break;
25066 25062 case CMD_TAG_REJECT:
25067 25063 /*
25068 25064 * The SCSI-2 spec states that a tag reject will be sent by the
25069 25065 * target if tagged queuing is not supported. A tag reject may
25070 25066 * also be sent during certain initialization periods or to
25071 25067 * control internal resources. For the latter case the target
25072 25068 * may also return Queue Full.
25073 25069 *
25074 25070 * If this driver receives a tag reject from a target that is
25075 25071 * going through an init period or controlling internal
25076 25072 * resources tagged queuing will be disabled. This is a less
25077 25073 * than optimal behavior but the driver is unable to determine
25078 25074 * the target state and assumes tagged queueing is not supported
25079 25075 */
25080 25076 pkt->pkt_flags = 0;
25081 25077 un->un_tagflags = 0;
25082 25078
25083 25079 if (un->un_f_opt_queueing == TRUE) {
25084 25080 un->un_throttle = min(un->un_throttle, 3);
25085 25081 } else {
25086 25082 un->un_throttle = 1;
25087 25083 }
25088 25084 mutex_exit(SD_MUTEX(un));
25089 25085 (void) scsi_ifsetcap(SD_ADDRESS(un), "tagged-qing", 0, 1);
25090 25086 mutex_enter(SD_MUTEX(un));
25091 25087 break;
25092 25088 case CMD_INCOMPLETE:
25093 25089 /*
25094 25090 * The transport stopped with an abnormal state, fallthrough and
25095 25091 * reset the target and/or bus unless selection did not complete
25096 25092 * (indicated by STATE_GOT_BUS) in which case we don't want to
25097 25093 * go through a target/bus reset
25098 25094 */
25099 25095 if (pkt->pkt_state == STATE_GOT_BUS) {
25100 25096 break;
25101 25097 }
25102 25098 /*FALLTHROUGH*/
25103 25099
25104 25100 case CMD_TIMEOUT:
25105 25101 default:
25106 25102 /*
25107 25103 * The lun may still be running the command, so a lun reset
25108 25104 * should be attempted. If the lun reset fails or cannot be
25109 25105 * issued, than try a target reset. Lastly try a bus reset.
25110 25106 */
25111 25107 if ((pkt->pkt_statistics &
25112 25108 (STAT_BUS_RESET|STAT_DEV_RESET|STAT_ABORTED)) == 0) {
25113 25109 int reset_retval = 0;
25114 25110 mutex_exit(SD_MUTEX(un));
25115 25111 if (un->un_f_allow_bus_device_reset == TRUE) {
25116 25112 if (un->un_f_lun_reset_enabled == TRUE) {
25117 25113 reset_retval =
25118 25114 scsi_reset(SD_ADDRESS(un),
25119 25115 RESET_LUN);
25120 25116 }
25121 25117 if (reset_retval == 0) {
25122 25118 reset_retval =
25123 25119 scsi_reset(SD_ADDRESS(un),
25124 25120 RESET_TARGET);
25125 25121 }
25126 25122 }
25127 25123 if (reset_retval == 0) {
25128 25124 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
25129 25125 }
25130 25126 mutex_enter(SD_MUTEX(un));
25131 25127 }
25132 25128 break;
25133 25129 }
25134 25130
25135 25131 /* A device/bus reset has occurred; update the reservation status. */
25136 25132 if ((pkt->pkt_reason == CMD_RESET) || (pkt->pkt_statistics &
25137 25133 (STAT_BUS_RESET | STAT_DEV_RESET))) {
25138 25134 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
25139 25135 un->un_resvd_status |=
25140 25136 (SD_LOST_RESERVE | SD_WANT_RESERVE);
25141 25137 SD_INFO(SD_LOG_IOCTL_MHD, un,
25142 25138 "sd_mhd_watch_incomplete: Lost Reservation\n");
25143 25139 }
25144 25140 }
25145 25141
25146 25142 /*
25147 25143 * The disk has been turned off; Update the device state.
25148 25144 *
25149 25145 * Note: Should we be offlining the disk here?
25150 25146 */
25151 25147 if (pkt->pkt_state == STATE_GOT_BUS) {
25152 25148 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_watch_incomplete: "
25153 25149 "Disk not responding to selection\n");
25154 25150 if (un->un_state != SD_STATE_OFFLINE) {
25155 25151 New_state(un, SD_STATE_OFFLINE);
25156 25152 }
25157 25153 } else if (be_chatty) {
25158 25154 /*
25159 25155 * suppress messages if they are all the same pkt reason;
25160 25156 * with TQ, many (up to 256) are returned with the same
25161 25157 * pkt_reason
25162 25158 */
25163 25159 if (pkt->pkt_reason != un->un_last_pkt_reason) {
25164 25160 SD_ERROR(SD_LOG_IOCTL_MHD, un,
25165 25161 "sd_mhd_watch_incomplete: "
25166 25162 "SCSI transport failed: reason '%s'\n",
25167 25163 scsi_rname(pkt->pkt_reason));
25168 25164 }
25169 25165 }
25170 25166 un->un_last_pkt_reason = pkt->pkt_reason;
25171 25167 mutex_exit(SD_MUTEX(un));
25172 25168 }
25173 25169
25174 25170
25175 25171 /*
25176 25172 * Function: sd_sname()
25177 25173 *
25178 25174 * Description: This is a simple little routine to return a string containing
25179 25175 * a printable description of command status byte for use in
25180 25176 * logging.
25181 25177 *
25182 25178 * Arguments: status - pointer to a status byte
25183 25179 *
25184 25180 * Return Code: char * - string containing status description.
25185 25181 */
25186 25182
25187 25183 static char *
25188 25184 sd_sname(uchar_t status)
25189 25185 {
25190 25186 switch (status & STATUS_MASK) {
25191 25187 case STATUS_GOOD:
25192 25188 return ("good status");
25193 25189 case STATUS_CHECK:
25194 25190 return ("check condition");
25195 25191 case STATUS_MET:
25196 25192 return ("condition met");
25197 25193 case STATUS_BUSY:
25198 25194 return ("busy");
25199 25195 case STATUS_INTERMEDIATE:
25200 25196 return ("intermediate");
25201 25197 case STATUS_INTERMEDIATE_MET:
25202 25198 return ("intermediate - condition met");
25203 25199 case STATUS_RESERVATION_CONFLICT:
25204 25200 return ("reservation_conflict");
25205 25201 case STATUS_TERMINATED:
25206 25202 return ("command terminated");
25207 25203 case STATUS_QFULL:
25208 25204 return ("queue full");
25209 25205 default:
25210 25206 return ("<unknown status>");
25211 25207 }
25212 25208 }
25213 25209
25214 25210
25215 25211 /*
25216 25212 * Function: sd_mhd_resvd_recover()
25217 25213 *
25218 25214 * Description: This function adds a reservation entry to the
25219 25215 * sd_resv_reclaim_request list and signals the reservation
25220 25216 * reclaim thread that there is work pending. If the reservation
25221 25217 * reclaim thread has not been previously created this function
25222 25218 * will kick it off.
25223 25219 *
25224 25220 * Arguments: arg - the device 'dev_t' is used for context to discriminate
25225 25221 * among multiple watches that share this callback function
25226 25222 *
25227 25223 * Context: This routine is called by timeout() and is run in interrupt
25228 25224 * context. It must not sleep or call other functions which may
25229 25225 * sleep.
25230 25226 */
25231 25227
25232 25228 static void
25233 25229 sd_mhd_resvd_recover(void *arg)
25234 25230 {
25235 25231 dev_t dev = (dev_t)arg;
25236 25232 struct sd_lun *un;
25237 25233 struct sd_thr_request *sd_treq = NULL;
25238 25234 struct sd_thr_request *sd_cur = NULL;
25239 25235 struct sd_thr_request *sd_prev = NULL;
25240 25236 int already_there = 0;
25241 25237
25242 25238 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
25243 25239 return;
25244 25240 }
25245 25241
25246 25242 mutex_enter(SD_MUTEX(un));
25247 25243 un->un_resvd_timeid = NULL;
25248 25244 if (un->un_resvd_status & SD_WANT_RESERVE) {
25249 25245 /*
25250 25246 * There was a reset so don't issue the reserve, allow the
25251 25247 * sd_mhd_watch_cb callback function to notice this and
25252 25248 * reschedule the timeout for reservation.
25253 25249 */
25254 25250 mutex_exit(SD_MUTEX(un));
25255 25251 return;
25256 25252 }
25257 25253 mutex_exit(SD_MUTEX(un));
25258 25254
25259 25255 /*
25260 25256 * Add this device to the sd_resv_reclaim_request list and the
25261 25257 * sd_resv_reclaim_thread should take care of the rest.
25262 25258 *
25263 25259 * Note: We can't sleep in this context so if the memory allocation
25264 25260 * fails allow the sd_mhd_watch_cb callback function to notice this and
25265 25261 * reschedule the timeout for reservation. (4378460)
25266 25262 */
25267 25263 sd_treq = (struct sd_thr_request *)
25268 25264 kmem_zalloc(sizeof (struct sd_thr_request), KM_NOSLEEP);
25269 25265 if (sd_treq == NULL) {
25270 25266 return;
25271 25267 }
25272 25268
25273 25269 sd_treq->sd_thr_req_next = NULL;
25274 25270 sd_treq->dev = dev;
25275 25271 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25276 25272 if (sd_tr.srq_thr_req_head == NULL) {
25277 25273 sd_tr.srq_thr_req_head = sd_treq;
25278 25274 } else {
25279 25275 sd_cur = sd_prev = sd_tr.srq_thr_req_head;
25280 25276 for (; sd_cur != NULL; sd_cur = sd_cur->sd_thr_req_next) {
25281 25277 if (sd_cur->dev == dev) {
25282 25278 /*
25283 25279 * already in Queue so don't log
25284 25280 * another request for the device
25285 25281 */
25286 25282 already_there = 1;
25287 25283 break;
25288 25284 }
25289 25285 sd_prev = sd_cur;
25290 25286 }
25291 25287 if (!already_there) {
25292 25288 SD_INFO(SD_LOG_IOCTL_MHD, un, "sd_mhd_resvd_recover: "
25293 25289 "logging request for %lx\n", dev);
25294 25290 sd_prev->sd_thr_req_next = sd_treq;
25295 25291 } else {
25296 25292 kmem_free(sd_treq, sizeof (struct sd_thr_request));
25297 25293 }
25298 25294 }
25299 25295
25300 25296 /*
25301 25297 * Create a kernel thread to do the reservation reclaim and free up this
25302 25298 * thread. We cannot block this thread while we go away to do the
25303 25299 * reservation reclaim
25304 25300 */
25305 25301 if (sd_tr.srq_resv_reclaim_thread == NULL)
25306 25302 sd_tr.srq_resv_reclaim_thread = thread_create(NULL, 0,
25307 25303 sd_resv_reclaim_thread, NULL,
25308 25304 0, &p0, TS_RUN, v.v_maxsyspri - 2);
25309 25305
25310 25306 /* Tell the reservation reclaim thread that it has work to do */
25311 25307 cv_signal(&sd_tr.srq_resv_reclaim_cv);
25312 25308 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25313 25309 }
25314 25310
25315 25311 /*
25316 25312 * Function: sd_resv_reclaim_thread()
25317 25313 *
25318 25314 * Description: This function implements the reservation reclaim operations
25319 25315 *
25320 25316 * Arguments: arg - the device 'dev_t' is used for context to discriminate
25321 25317 * among multiple watches that share this callback function
25322 25318 */
25323 25319
25324 25320 static void
25325 25321 sd_resv_reclaim_thread()
25326 25322 {
25327 25323 struct sd_lun *un;
25328 25324 struct sd_thr_request *sd_mhreq;
25329 25325
25330 25326 /* Wait for work */
25331 25327 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25332 25328 if (sd_tr.srq_thr_req_head == NULL) {
25333 25329 cv_wait(&sd_tr.srq_resv_reclaim_cv,
25334 25330 &sd_tr.srq_resv_reclaim_mutex);
25335 25331 }
25336 25332
25337 25333 /* Loop while we have work */
25338 25334 while ((sd_tr.srq_thr_cur_req = sd_tr.srq_thr_req_head) != NULL) {
25339 25335 un = ddi_get_soft_state(sd_state,
25340 25336 SDUNIT(sd_tr.srq_thr_cur_req->dev));
25341 25337 if (un == NULL) {
25342 25338 /*
25343 25339 * softstate structure is NULL so just
25344 25340 * dequeue the request and continue
25345 25341 */
25346 25342 sd_tr.srq_thr_req_head =
25347 25343 sd_tr.srq_thr_cur_req->sd_thr_req_next;
25348 25344 kmem_free(sd_tr.srq_thr_cur_req,
25349 25345 sizeof (struct sd_thr_request));
25350 25346 continue;
25351 25347 }
25352 25348
25353 25349 /* dequeue the request */
25354 25350 sd_mhreq = sd_tr.srq_thr_cur_req;
25355 25351 sd_tr.srq_thr_req_head =
25356 25352 sd_tr.srq_thr_cur_req->sd_thr_req_next;
25357 25353 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25358 25354
25359 25355 /*
25360 25356 * Reclaim reservation only if SD_RESERVE is still set. There
25361 25357 * may have been a call to MHIOCRELEASE before we got here.
25362 25358 */
25363 25359 mutex_enter(SD_MUTEX(un));
25364 25360 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
25365 25361 /*
25366 25362 * Note: The SD_LOST_RESERVE flag is cleared before
25367 25363 * reclaiming the reservation. If this is done after the
25368 25364 * call to sd_reserve_release a reservation loss in the
25369 25365 * window between pkt completion of reserve cmd and
25370 25366 * mutex_enter below may not be recognized
25371 25367 */
25372 25368 un->un_resvd_status &= ~SD_LOST_RESERVE;
25373 25369 mutex_exit(SD_MUTEX(un));
25374 25370
25375 25371 if (sd_reserve_release(sd_mhreq->dev,
25376 25372 SD_RESERVE) == 0) {
25377 25373 mutex_enter(SD_MUTEX(un));
25378 25374 un->un_resvd_status |= SD_RESERVE;
25379 25375 mutex_exit(SD_MUTEX(un));
25380 25376 SD_INFO(SD_LOG_IOCTL_MHD, un,
25381 25377 "sd_resv_reclaim_thread: "
25382 25378 "Reservation Recovered\n");
25383 25379 } else {
25384 25380 mutex_enter(SD_MUTEX(un));
25385 25381 un->un_resvd_status |= SD_LOST_RESERVE;
25386 25382 mutex_exit(SD_MUTEX(un));
25387 25383 SD_INFO(SD_LOG_IOCTL_MHD, un,
25388 25384 "sd_resv_reclaim_thread: Failed "
25389 25385 "Reservation Recovery\n");
25390 25386 }
25391 25387 } else {
25392 25388 mutex_exit(SD_MUTEX(un));
25393 25389 }
25394 25390 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25395 25391 ASSERT(sd_mhreq == sd_tr.srq_thr_cur_req);
25396 25392 kmem_free(sd_mhreq, sizeof (struct sd_thr_request));
25397 25393 sd_mhreq = sd_tr.srq_thr_cur_req = NULL;
25398 25394 /*
25399 25395 * wakeup the destroy thread if anyone is waiting on
25400 25396 * us to complete.
25401 25397 */
25402 25398 cv_signal(&sd_tr.srq_inprocess_cv);
25403 25399 SD_TRACE(SD_LOG_IOCTL_MHD, un,
25404 25400 "sd_resv_reclaim_thread: cv_signalling current request \n");
25405 25401 }
25406 25402
25407 25403 /*
25408 25404 * cleanup the sd_tr structure now that this thread will not exist
25409 25405 */
25410 25406 ASSERT(sd_tr.srq_thr_req_head == NULL);
25411 25407 ASSERT(sd_tr.srq_thr_cur_req == NULL);
25412 25408 sd_tr.srq_resv_reclaim_thread = NULL;
25413 25409 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25414 25410 thread_exit();
25415 25411 }
25416 25412
25417 25413
25418 25414 /*
25419 25415 * Function: sd_rmv_resv_reclaim_req()
25420 25416 *
25421 25417 * Description: This function removes any pending reservation reclaim requests
25422 25418 * for the specified device.
25423 25419 *
25424 25420 * Arguments: dev - the device 'dev_t'
25425 25421 */
25426 25422
25427 25423 static void
25428 25424 sd_rmv_resv_reclaim_req(dev_t dev)
25429 25425 {
25430 25426 struct sd_thr_request *sd_mhreq;
25431 25427 struct sd_thr_request *sd_prev;
25432 25428
25433 25429 /* Remove a reservation reclaim request from the list */
25434 25430 mutex_enter(&sd_tr.srq_resv_reclaim_mutex);
25435 25431 if (sd_tr.srq_thr_cur_req && sd_tr.srq_thr_cur_req->dev == dev) {
25436 25432 /*
25437 25433 * We are attempting to reinstate reservation for
25438 25434 * this device. We wait for sd_reserve_release()
25439 25435 * to return before we return.
25440 25436 */
25441 25437 cv_wait(&sd_tr.srq_inprocess_cv,
25442 25438 &sd_tr.srq_resv_reclaim_mutex);
25443 25439 } else {
25444 25440 sd_prev = sd_mhreq = sd_tr.srq_thr_req_head;
25445 25441 if (sd_mhreq && sd_mhreq->dev == dev) {
25446 25442 sd_tr.srq_thr_req_head = sd_mhreq->sd_thr_req_next;
25447 25443 kmem_free(sd_mhreq, sizeof (struct sd_thr_request));
25448 25444 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25449 25445 return;
25450 25446 }
25451 25447 for (; sd_mhreq != NULL; sd_mhreq = sd_mhreq->sd_thr_req_next) {
25452 25448 if (sd_mhreq && sd_mhreq->dev == dev) {
25453 25449 break;
25454 25450 }
25455 25451 sd_prev = sd_mhreq;
25456 25452 }
25457 25453 if (sd_mhreq != NULL) {
25458 25454 sd_prev->sd_thr_req_next = sd_mhreq->sd_thr_req_next;
25459 25455 kmem_free(sd_mhreq, sizeof (struct sd_thr_request));
25460 25456 }
25461 25457 }
25462 25458 mutex_exit(&sd_tr.srq_resv_reclaim_mutex);
25463 25459 }
25464 25460
25465 25461
25466 25462 /*
25467 25463 * Function: sd_mhd_reset_notify_cb()
25468 25464 *
25469 25465 * Description: This is a call back function for scsi_reset_notify. This
25470 25466 * function updates the softstate reserved status and logs the
25471 25467 * reset. The driver scsi watch facility callback function
25472 25468 * (sd_mhd_watch_cb) and reservation reclaim thread functionality
25473 25469 * will reclaim the reservation.
25474 25470 *
25475 25471 * Arguments: arg - driver soft state (unit) structure
25476 25472 */
25477 25473
25478 25474 static void
25479 25475 sd_mhd_reset_notify_cb(caddr_t arg)
25480 25476 {
25481 25477 struct sd_lun *un = (struct sd_lun *)arg;
25482 25478
25483 25479 mutex_enter(SD_MUTEX(un));
25484 25480 if ((un->un_resvd_status & SD_RESERVE) == SD_RESERVE) {
25485 25481 un->un_resvd_status |= (SD_LOST_RESERVE | SD_WANT_RESERVE);
25486 25482 SD_INFO(SD_LOG_IOCTL_MHD, un,
25487 25483 "sd_mhd_reset_notify_cb: Lost Reservation\n");
25488 25484 }
25489 25485 mutex_exit(SD_MUTEX(un));
25490 25486 }
25491 25487
25492 25488
25493 25489 /*
25494 25490 * Function: sd_take_ownership()
25495 25491 *
25496 25492 * Description: This routine implements an algorithm to achieve a stable
25497 25493 * reservation on disks which don't implement priority reserve,
25498 25494 * and makes sure that other host lose re-reservation attempts.
25499 25495 * This algorithm contains of a loop that keeps issuing the RESERVE
25500 25496 * for some period of time (min_ownership_delay, default 6 seconds)
25501 25497 * During that loop, it looks to see if there has been a bus device
25502 25498 * reset or bus reset (both of which cause an existing reservation
25503 25499 * to be lost). If the reservation is lost issue RESERVE until a
25504 25500 * period of min_ownership_delay with no resets has gone by, or
25505 25501 * until max_ownership_delay has expired. This loop ensures that
25506 25502 * the host really did manage to reserve the device, in spite of
25507 25503 * resets. The looping for min_ownership_delay (default six
25508 25504 * seconds) is important to early generation clustering products,
25509 25505 * Solstice HA 1.x and Sun Cluster 2.x. Those products use an
25510 25506 * MHIOCENFAILFAST periodic timer of two seconds. By having
25511 25507 * MHIOCTKOWN issue Reserves in a loop for six seconds, and having
25512 25508 * MHIOCENFAILFAST poll every two seconds, the idea is that by the
25513 25509 * time the MHIOCTKOWN ioctl returns, the other host (if any) will
25514 25510 * have already noticed, via the MHIOCENFAILFAST polling, that it
25515 25511 * no longer "owns" the disk and will have panicked itself. Thus,
25516 25512 * the host issuing the MHIOCTKOWN is assured (with timing
25517 25513 * dependencies) that by the time it actually starts to use the
25518 25514 * disk for real work, the old owner is no longer accessing it.
25519 25515 *
25520 25516 * min_ownership_delay is the minimum amount of time for which the
25521 25517 * disk must be reserved continuously devoid of resets before the
25522 25518 * MHIOCTKOWN ioctl will return success.
25523 25519 *
25524 25520 * max_ownership_delay indicates the amount of time by which the
25525 25521 * take ownership should succeed or timeout with an error.
25526 25522 *
25527 25523 * Arguments: dev - the device 'dev_t'
25528 25524 * *p - struct containing timing info.
25529 25525 *
25530 25526 * Return Code: 0 for success or error code
25531 25527 */
25532 25528
25533 25529 static int
25534 25530 sd_take_ownership(dev_t dev, struct mhioctkown *p)
25535 25531 {
25536 25532 struct sd_lun *un;
25537 25533 int rval;
25538 25534 int err;
25539 25535 int reservation_count = 0;
25540 25536 int min_ownership_delay = 6000000; /* in usec */
25541 25537 int max_ownership_delay = 30000000; /* in usec */
25542 25538 clock_t start_time; /* starting time of this algorithm */
25543 25539 clock_t end_time; /* time limit for giving up */
25544 25540 clock_t ownership_time; /* time limit for stable ownership */
25545 25541 clock_t current_time;
25546 25542 clock_t previous_current_time;
25547 25543
25548 25544 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
25549 25545 return (ENXIO);
25550 25546 }
25551 25547
25552 25548 /*
25553 25549 * Attempt a device reservation. A priority reservation is requested.
25554 25550 */
25555 25551 if ((rval = sd_reserve_release(dev, SD_PRIORITY_RESERVE))
25556 25552 != SD_SUCCESS) {
25557 25553 SD_ERROR(SD_LOG_IOCTL_MHD, un,
25558 25554 "sd_take_ownership: return(1)=%d\n", rval);
25559 25555 return (rval);
25560 25556 }
25561 25557
25562 25558 /* Update the softstate reserved status to indicate the reservation */
25563 25559 mutex_enter(SD_MUTEX(un));
25564 25560 un->un_resvd_status |= SD_RESERVE;
25565 25561 un->un_resvd_status &=
25566 25562 ~(SD_LOST_RESERVE | SD_WANT_RESERVE | SD_RESERVATION_CONFLICT);
25567 25563 mutex_exit(SD_MUTEX(un));
25568 25564
25569 25565 if (p != NULL) {
25570 25566 if (p->min_ownership_delay != 0) {
25571 25567 min_ownership_delay = p->min_ownership_delay * 1000;
25572 25568 }
25573 25569 if (p->max_ownership_delay != 0) {
25574 25570 max_ownership_delay = p->max_ownership_delay * 1000;
25575 25571 }
25576 25572 }
25577 25573 SD_INFO(SD_LOG_IOCTL_MHD, un,
25578 25574 "sd_take_ownership: min, max delays: %d, %d\n",
25579 25575 min_ownership_delay, max_ownership_delay);
25580 25576
25581 25577 start_time = ddi_get_lbolt();
25582 25578 current_time = start_time;
25583 25579 ownership_time = current_time + drv_usectohz(min_ownership_delay);
25584 25580 end_time = start_time + drv_usectohz(max_ownership_delay);
25585 25581
25586 25582 while (current_time - end_time < 0) {
25587 25583 delay(drv_usectohz(500000));
25588 25584
25589 25585 if ((err = sd_reserve_release(dev, SD_RESERVE)) != 0) {
25590 25586 if ((sd_reserve_release(dev, SD_RESERVE)) != 0) {
25591 25587 mutex_enter(SD_MUTEX(un));
25592 25588 rval = (un->un_resvd_status &
25593 25589 SD_RESERVATION_CONFLICT) ? EACCES : EIO;
25594 25590 mutex_exit(SD_MUTEX(un));
25595 25591 break;
25596 25592 }
25597 25593 }
25598 25594 previous_current_time = current_time;
25599 25595 current_time = ddi_get_lbolt();
25600 25596 mutex_enter(SD_MUTEX(un));
25601 25597 if (err || (un->un_resvd_status & SD_LOST_RESERVE)) {
25602 25598 ownership_time = ddi_get_lbolt() +
25603 25599 drv_usectohz(min_ownership_delay);
25604 25600 reservation_count = 0;
25605 25601 } else {
25606 25602 reservation_count++;
25607 25603 }
25608 25604 un->un_resvd_status |= SD_RESERVE;
25609 25605 un->un_resvd_status &= ~(SD_LOST_RESERVE | SD_WANT_RESERVE);
25610 25606 mutex_exit(SD_MUTEX(un));
25611 25607
25612 25608 SD_INFO(SD_LOG_IOCTL_MHD, un,
25613 25609 "sd_take_ownership: ticks for loop iteration=%ld, "
25614 25610 "reservation=%s\n", (current_time - previous_current_time),
25615 25611 reservation_count ? "ok" : "reclaimed");
25616 25612
25617 25613 if (current_time - ownership_time >= 0 &&
25618 25614 reservation_count >= 4) {
25619 25615 rval = 0; /* Achieved a stable ownership */
25620 25616 break;
25621 25617 }
25622 25618 if (current_time - end_time >= 0) {
25623 25619 rval = EACCES; /* No ownership in max possible time */
25624 25620 break;
25625 25621 }
25626 25622 }
25627 25623 SD_TRACE(SD_LOG_IOCTL_MHD, un,
25628 25624 "sd_take_ownership: return(2)=%d\n", rval);
25629 25625 return (rval);
25630 25626 }
25631 25627
25632 25628
25633 25629 /*
25634 25630 * Function: sd_reserve_release()
25635 25631 *
25636 25632 * Description: This function builds and sends scsi RESERVE, RELEASE, and
25637 25633 * PRIORITY RESERVE commands based on a user specified command type
25638 25634 *
25639 25635 * Arguments: dev - the device 'dev_t'
25640 25636 * cmd - user specified command type; one of SD_PRIORITY_RESERVE,
25641 25637 * SD_RESERVE, SD_RELEASE
25642 25638 *
25643 25639 * Return Code: 0 or Error Code
25644 25640 */
25645 25641
25646 25642 static int
25647 25643 sd_reserve_release(dev_t dev, int cmd)
25648 25644 {
25649 25645 struct uscsi_cmd *com = NULL;
25650 25646 struct sd_lun *un = NULL;
25651 25647 char cdb[CDB_GROUP0];
25652 25648 int rval;
25653 25649
25654 25650 ASSERT((cmd == SD_RELEASE) || (cmd == SD_RESERVE) ||
25655 25651 (cmd == SD_PRIORITY_RESERVE));
25656 25652
25657 25653 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
25658 25654 return (ENXIO);
25659 25655 }
25660 25656
25661 25657 /* instantiate and initialize the command and cdb */
25662 25658 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
25663 25659 bzero(cdb, CDB_GROUP0);
25664 25660 com->uscsi_flags = USCSI_SILENT;
25665 25661 com->uscsi_timeout = un->un_reserve_release_time;
25666 25662 com->uscsi_cdblen = CDB_GROUP0;
25667 25663 com->uscsi_cdb = cdb;
25668 25664 if (cmd == SD_RELEASE) {
25669 25665 cdb[0] = SCMD_RELEASE;
25670 25666 } else {
25671 25667 cdb[0] = SCMD_RESERVE;
25672 25668 }
25673 25669
25674 25670 /* Send the command. */
25675 25671 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
25676 25672 SD_PATH_STANDARD);
25677 25673
25678 25674 /*
25679 25675 * "break" a reservation that is held by another host, by issuing a
25680 25676 * reset if priority reserve is desired, and we could not get the
25681 25677 * device.
25682 25678 */
25683 25679 if ((cmd == SD_PRIORITY_RESERVE) &&
25684 25680 (rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) {
25685 25681 /*
25686 25682 * First try to reset the LUN. If we cannot, then try a target
25687 25683 * reset, followed by a bus reset if the target reset fails.
25688 25684 */
25689 25685 int reset_retval = 0;
25690 25686 if (un->un_f_lun_reset_enabled == TRUE) {
25691 25687 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_LUN);
25692 25688 }
25693 25689 if (reset_retval == 0) {
25694 25690 /* The LUN reset either failed or was not issued */
25695 25691 reset_retval = scsi_reset(SD_ADDRESS(un), RESET_TARGET);
25696 25692 }
25697 25693 if ((reset_retval == 0) &&
25698 25694 (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0)) {
25699 25695 rval = EIO;
25700 25696 kmem_free(com, sizeof (*com));
25701 25697 return (rval);
25702 25698 }
25703 25699
25704 25700 bzero(com, sizeof (struct uscsi_cmd));
25705 25701 com->uscsi_flags = USCSI_SILENT;
25706 25702 com->uscsi_cdb = cdb;
25707 25703 com->uscsi_cdblen = CDB_GROUP0;
25708 25704 com->uscsi_timeout = 5;
25709 25705
25710 25706 /*
25711 25707 * Reissue the last reserve command, this time without request
25712 25708 * sense. Assume that it is just a regular reserve command.
25713 25709 */
25714 25710 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
25715 25711 SD_PATH_STANDARD);
25716 25712 }
25717 25713
25718 25714 /* Return an error if still getting a reservation conflict. */
25719 25715 if ((rval != 0) && (com->uscsi_status == STATUS_RESERVATION_CONFLICT)) {
25720 25716 rval = EACCES;
25721 25717 }
25722 25718
25723 25719 kmem_free(com, sizeof (*com));
25724 25720 return (rval);
25725 25721 }
25726 25722
25727 25723
25728 25724 #define SD_NDUMP_RETRIES 12
25729 25725 /*
25730 25726 * System Crash Dump routine
25731 25727 */
25732 25728
25733 25729 static int
25734 25730 sddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
25735 25731 {
25736 25732 int instance;
25737 25733 int partition;
25738 25734 int i;
25739 25735 int err;
25740 25736 struct sd_lun *un;
25741 25737 struct scsi_pkt *wr_pktp;
25742 25738 struct buf *wr_bp;
25743 25739 struct buf wr_buf;
25744 25740 daddr_t tgt_byte_offset; /* rmw - byte offset for target */
25745 25741 daddr_t tgt_blkno; /* rmw - blkno for target */
25746 25742 size_t tgt_byte_count; /* rmw - # of bytes to xfer */
25747 25743 size_t tgt_nblk; /* rmw - # of tgt blks to xfer */
25748 25744 size_t io_start_offset;
25749 25745 int doing_rmw = FALSE;
25750 25746 int rval;
25751 25747 ssize_t dma_resid;
25752 25748 daddr_t oblkno;
25753 25749 diskaddr_t nblks = 0;
25754 25750 diskaddr_t start_block;
25755 25751
25756 25752 instance = SDUNIT(dev);
25757 25753 if (((un = ddi_get_soft_state(sd_state, instance)) == NULL) ||
25758 25754 !SD_IS_VALID_LABEL(un) || ISCD(un)) {
25759 25755 return (ENXIO);
25760 25756 }
25761 25757
25762 25758 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un))
25763 25759
25764 25760 SD_TRACE(SD_LOG_DUMP, un, "sddump: entry\n");
25765 25761
25766 25762 partition = SDPART(dev);
25767 25763 SD_INFO(SD_LOG_DUMP, un, "sddump: partition = %d\n", partition);
25768 25764
25769 25765 if (!(NOT_DEVBSIZE(un))) {
25770 25766 int secmask = 0;
25771 25767 int blknomask = 0;
25772 25768
25773 25769 blknomask = (un->un_tgt_blocksize / DEV_BSIZE) - 1;
25774 25770 secmask = un->un_tgt_blocksize - 1;
25775 25771
25776 25772 if (blkno & blknomask) {
25777 25773 SD_TRACE(SD_LOG_DUMP, un,
25778 25774 "sddump: dump start block not modulo %d\n",
25779 25775 un->un_tgt_blocksize);
25780 25776 return (EINVAL);
25781 25777 }
25782 25778
25783 25779 if ((nblk * DEV_BSIZE) & secmask) {
25784 25780 SD_TRACE(SD_LOG_DUMP, un,
25785 25781 "sddump: dump length not modulo %d\n",
25786 25782 un->un_tgt_blocksize);
25787 25783 return (EINVAL);
25788 25784 }
25789 25785
25790 25786 }
25791 25787
25792 25788 /* Validate blocks to dump at against partition size. */
25793 25789
25794 25790 (void) cmlb_partinfo(un->un_cmlbhandle, partition,
25795 25791 &nblks, &start_block, NULL, NULL, (void *)SD_PATH_DIRECT);
25796 25792
25797 25793 if (NOT_DEVBSIZE(un)) {
25798 25794 if ((blkno + nblk) > nblks) {
25799 25795 SD_TRACE(SD_LOG_DUMP, un,
25800 25796 "sddump: dump range larger than partition: "
25801 25797 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n",
25802 25798 blkno, nblk, nblks);
25803 25799 return (EINVAL);
25804 25800 }
25805 25801 } else {
25806 25802 if (((blkno / (un->un_tgt_blocksize / DEV_BSIZE)) +
25807 25803 (nblk / (un->un_tgt_blocksize / DEV_BSIZE))) > nblks) {
25808 25804 SD_TRACE(SD_LOG_DUMP, un,
25809 25805 "sddump: dump range larger than partition: "
25810 25806 "blkno = 0x%x, nblk = 0x%x, dkl_nblk = 0x%x\n",
25811 25807 blkno, nblk, nblks);
25812 25808 return (EINVAL);
25813 25809 }
25814 25810 }
25815 25811
25816 25812 mutex_enter(&un->un_pm_mutex);
25817 25813 if (SD_DEVICE_IS_IN_LOW_POWER(un)) {
25818 25814 struct scsi_pkt *start_pktp;
25819 25815
25820 25816 mutex_exit(&un->un_pm_mutex);
25821 25817
25822 25818 /*
25823 25819 * use pm framework to power on HBA 1st
25824 25820 */
25825 25821 (void) pm_raise_power(SD_DEVINFO(un), 0,
25826 25822 SD_PM_STATE_ACTIVE(un));
25827 25823
25828 25824 /*
25829 25825 * Dump no long uses sdpower to power on a device, it's
25830 25826 * in-line here so it can be done in polled mode.
25831 25827 */
25832 25828
25833 25829 SD_INFO(SD_LOG_DUMP, un, "sddump: starting device\n");
25834 25830
25835 25831 start_pktp = scsi_init_pkt(SD_ADDRESS(un), NULL, NULL,
25836 25832 CDB_GROUP0, un->un_status_len, 0, 0, NULL_FUNC, NULL);
25837 25833
25838 25834 if (start_pktp == NULL) {
25839 25835 /* We were not given a SCSI packet, fail. */
25840 25836 return (EIO);
25841 25837 }
25842 25838 bzero(start_pktp->pkt_cdbp, CDB_GROUP0);
25843 25839 start_pktp->pkt_cdbp[0] = SCMD_START_STOP;
25844 25840 start_pktp->pkt_cdbp[4] = SD_TARGET_START;
25845 25841 start_pktp->pkt_flags = FLAG_NOINTR;
25846 25842
25847 25843 mutex_enter(SD_MUTEX(un));
25848 25844 SD_FILL_SCSI1_LUN(un, start_pktp);
25849 25845 mutex_exit(SD_MUTEX(un));
25850 25846 /*
25851 25847 * Scsi_poll returns 0 (success) if the command completes and
25852 25848 * the status block is STATUS_GOOD.
25853 25849 */
25854 25850 if (sd_scsi_poll(un, start_pktp) != 0) {
25855 25851 scsi_destroy_pkt(start_pktp);
25856 25852 return (EIO);
25857 25853 }
25858 25854 scsi_destroy_pkt(start_pktp);
25859 25855 (void) sd_pm_state_change(un, SD_PM_STATE_ACTIVE(un),
25860 25856 SD_PM_STATE_CHANGE);
25861 25857 } else {
25862 25858 mutex_exit(&un->un_pm_mutex);
25863 25859 }
25864 25860
25865 25861 mutex_enter(SD_MUTEX(un));
25866 25862 un->un_throttle = 0;
25867 25863
25868 25864 /*
25869 25865 * The first time through, reset the specific target device.
25870 25866 * However, when cpr calls sddump we know that sd is in a
25871 25867 * a good state so no bus reset is required.
25872 25868 * Clear sense data via Request Sense cmd.
25873 25869 * In sddump we don't care about allow_bus_device_reset anymore
25874 25870 */
25875 25871
25876 25872 if ((un->un_state != SD_STATE_SUSPENDED) &&
25877 25873 (un->un_state != SD_STATE_DUMPING)) {
25878 25874
25879 25875 New_state(un, SD_STATE_DUMPING);
25880 25876
25881 25877 if (un->un_f_is_fibre == FALSE) {
25882 25878 mutex_exit(SD_MUTEX(un));
25883 25879 /*
25884 25880 * Attempt a bus reset for parallel scsi.
25885 25881 *
25886 25882 * Note: A bus reset is required because on some host
25887 25883 * systems (i.e. E420R) a bus device reset is
25888 25884 * insufficient to reset the state of the target.
25889 25885 *
25890 25886 * Note: Don't issue the reset for fibre-channel,
25891 25887 * because this tends to hang the bus (loop) for
25892 25888 * too long while everyone is logging out and in
25893 25889 * and the deadman timer for dumping will fire
25894 25890 * before the dump is complete.
25895 25891 */
25896 25892 if (scsi_reset(SD_ADDRESS(un), RESET_ALL) == 0) {
25897 25893 mutex_enter(SD_MUTEX(un));
25898 25894 Restore_state(un);
25899 25895 mutex_exit(SD_MUTEX(un));
25900 25896 return (EIO);
25901 25897 }
25902 25898
25903 25899 /* Delay to give the device some recovery time. */
25904 25900 drv_usecwait(10000);
25905 25901
25906 25902 if (sd_send_polled_RQS(un) == SD_FAILURE) {
25907 25903 SD_INFO(SD_LOG_DUMP, un,
25908 25904 "sddump: sd_send_polled_RQS failed\n");
25909 25905 }
25910 25906 mutex_enter(SD_MUTEX(un));
25911 25907 }
25912 25908 }
25913 25909
25914 25910 /*
25915 25911 * Convert the partition-relative block number to a
25916 25912 * disk physical block number.
25917 25913 */
25918 25914 if (NOT_DEVBSIZE(un)) {
25919 25915 blkno += start_block;
25920 25916 } else {
25921 25917 blkno = blkno / (un->un_tgt_blocksize / DEV_BSIZE);
25922 25918 blkno += start_block;
25923 25919 }
25924 25920
25925 25921 SD_INFO(SD_LOG_DUMP, un, "sddump: disk blkno = 0x%x\n", blkno);
25926 25922
25927 25923
25928 25924 /*
25929 25925 * Check if the device has a non-512 block size.
25930 25926 */
25931 25927 wr_bp = NULL;
25932 25928 if (NOT_DEVBSIZE(un)) {
25933 25929 tgt_byte_offset = blkno * un->un_sys_blocksize;
25934 25930 tgt_byte_count = nblk * un->un_sys_blocksize;
25935 25931 if ((tgt_byte_offset % un->un_tgt_blocksize) ||
25936 25932 (tgt_byte_count % un->un_tgt_blocksize)) {
25937 25933 doing_rmw = TRUE;
25938 25934 /*
25939 25935 * Calculate the block number and number of block
25940 25936 * in terms of the media block size.
25941 25937 */
25942 25938 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize;
25943 25939 tgt_nblk =
25944 25940 ((tgt_byte_offset + tgt_byte_count +
25945 25941 (un->un_tgt_blocksize - 1)) /
25946 25942 un->un_tgt_blocksize) - tgt_blkno;
25947 25943
25948 25944 /*
25949 25945 * Invoke the routine which is going to do read part
25950 25946 * of read-modify-write.
25951 25947 * Note that this routine returns a pointer to
25952 25948 * a valid bp in wr_bp.
25953 25949 */
25954 25950 err = sddump_do_read_of_rmw(un, tgt_blkno, tgt_nblk,
25955 25951 &wr_bp);
25956 25952 if (err) {
25957 25953 mutex_exit(SD_MUTEX(un));
25958 25954 return (err);
25959 25955 }
25960 25956 /*
25961 25957 * Offset is being calculated as -
25962 25958 * (original block # * system block size) -
25963 25959 * (new block # * target block size)
25964 25960 */
25965 25961 io_start_offset =
25966 25962 ((uint64_t)(blkno * un->un_sys_blocksize)) -
25967 25963 ((uint64_t)(tgt_blkno * un->un_tgt_blocksize));
25968 25964
25969 25965 ASSERT((io_start_offset >= 0) &&
25970 25966 (io_start_offset < un->un_tgt_blocksize));
25971 25967 /*
25972 25968 * Do the modify portion of read modify write.
25973 25969 */
25974 25970 bcopy(addr, &wr_bp->b_un.b_addr[io_start_offset],
25975 25971 (size_t)nblk * un->un_sys_blocksize);
25976 25972 } else {
25977 25973 doing_rmw = FALSE;
25978 25974 tgt_blkno = tgt_byte_offset / un->un_tgt_blocksize;
25979 25975 tgt_nblk = tgt_byte_count / un->un_tgt_blocksize;
25980 25976 }
25981 25977
25982 25978 /* Convert blkno and nblk to target blocks */
25983 25979 blkno = tgt_blkno;
25984 25980 nblk = tgt_nblk;
25985 25981 } else {
25986 25982 wr_bp = &wr_buf;
25987 25983 bzero(wr_bp, sizeof (struct buf));
25988 25984 wr_bp->b_flags = B_BUSY;
25989 25985 wr_bp->b_un.b_addr = addr;
25990 25986 wr_bp->b_bcount = nblk << DEV_BSHIFT;
25991 25987 wr_bp->b_resid = 0;
25992 25988 }
25993 25989
25994 25990 mutex_exit(SD_MUTEX(un));
25995 25991
25996 25992 /*
25997 25993 * Obtain a SCSI packet for the write command.
25998 25994 * It should be safe to call the allocator here without
25999 25995 * worrying about being locked for DVMA mapping because
26000 25996 * the address we're passed is already a DVMA mapping
26001 25997 *
26002 25998 * We are also not going to worry about semaphore ownership
26003 25999 * in the dump buffer. Dumping is single threaded at present.
26004 26000 */
26005 26001
26006 26002 wr_pktp = NULL;
26007 26003
26008 26004 dma_resid = wr_bp->b_bcount;
26009 26005 oblkno = blkno;
26010 26006
26011 26007 if (!(NOT_DEVBSIZE(un))) {
26012 26008 nblk = nblk / (un->un_tgt_blocksize / DEV_BSIZE);
26013 26009 }
26014 26010
26015 26011 while (dma_resid != 0) {
26016 26012
26017 26013 for (i = 0; i < SD_NDUMP_RETRIES; i++) {
26018 26014 wr_bp->b_flags &= ~B_ERROR;
26019 26015
26020 26016 if (un->un_partial_dma_supported == 1) {
26021 26017 blkno = oblkno +
26022 26018 ((wr_bp->b_bcount - dma_resid) /
26023 26019 un->un_tgt_blocksize);
26024 26020 nblk = dma_resid / un->un_tgt_blocksize;
26025 26021
26026 26022 if (wr_pktp) {
26027 26023 /*
26028 26024 * Partial DMA transfers after initial transfer
26029 26025 */
26030 26026 rval = sd_setup_next_rw_pkt(un, wr_pktp, wr_bp,
26031 26027 blkno, nblk);
26032 26028 } else {
26033 26029 /* Initial transfer */
26034 26030 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp,
26035 26031 un->un_pkt_flags, NULL_FUNC, NULL,
26036 26032 blkno, nblk);
26037 26033 }
26038 26034 } else {
26039 26035 rval = sd_setup_rw_pkt(un, &wr_pktp, wr_bp,
26040 26036 0, NULL_FUNC, NULL, blkno, nblk);
26041 26037 }
26042 26038
26043 26039 if (rval == 0) {
26044 26040 /* We were given a SCSI packet, continue. */
26045 26041 break;
26046 26042 }
26047 26043
26048 26044 if (i == 0) {
26049 26045 if (wr_bp->b_flags & B_ERROR) {
26050 26046 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26051 26047 "no resources for dumping; "
26052 26048 "error code: 0x%x, retrying",
26053 26049 geterror(wr_bp));
26054 26050 } else {
26055 26051 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26056 26052 "no resources for dumping; retrying");
26057 26053 }
26058 26054 } else if (i != (SD_NDUMP_RETRIES - 1)) {
26059 26055 if (wr_bp->b_flags & B_ERROR) {
26060 26056 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
26061 26057 "no resources for dumping; error code: "
26062 26058 "0x%x, retrying\n", geterror(wr_bp));
26063 26059 }
26064 26060 } else {
26065 26061 if (wr_bp->b_flags & B_ERROR) {
26066 26062 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
26067 26063 "no resources for dumping; "
26068 26064 "error code: 0x%x, retries failed, "
26069 26065 "giving up.\n", geterror(wr_bp));
26070 26066 } else {
26071 26067 scsi_log(SD_DEVINFO(un), sd_label, CE_CONT,
26072 26068 "no resources for dumping; "
26073 26069 "retries failed, giving up.\n");
26074 26070 }
26075 26071 mutex_enter(SD_MUTEX(un));
26076 26072 Restore_state(un);
26077 26073 if (NOT_DEVBSIZE(un) && (doing_rmw == TRUE)) {
26078 26074 mutex_exit(SD_MUTEX(un));
26079 26075 scsi_free_consistent_buf(wr_bp);
26080 26076 } else {
26081 26077 mutex_exit(SD_MUTEX(un));
26082 26078 }
26083 26079 return (EIO);
26084 26080 }
26085 26081 drv_usecwait(10000);
26086 26082 }
26087 26083
26088 26084 if (un->un_partial_dma_supported == 1) {
26089 26085 /*
26090 26086 * save the resid from PARTIAL_DMA
26091 26087 */
26092 26088 dma_resid = wr_pktp->pkt_resid;
26093 26089 if (dma_resid != 0)
26094 26090 nblk -= SD_BYTES2TGTBLOCKS(un, dma_resid);
26095 26091 wr_pktp->pkt_resid = 0;
26096 26092 } else {
26097 26093 dma_resid = 0;
26098 26094 }
26099 26095
26100 26096 /* SunBug 1222170 */
26101 26097 wr_pktp->pkt_flags = FLAG_NOINTR;
26102 26098
26103 26099 err = EIO;
26104 26100 for (i = 0; i < SD_NDUMP_RETRIES; i++) {
26105 26101
26106 26102 /*
26107 26103 * Scsi_poll returns 0 (success) if the command completes and
26108 26104 * the status block is STATUS_GOOD. We should only check
26109 26105 * errors if this condition is not true. Even then we should
26110 26106 * send our own request sense packet only if we have a check
26111 26107 * condition and auto request sense has not been performed by
26112 26108 * the hba.
26113 26109 */
26114 26110 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending write\n");
26115 26111
26116 26112 if ((sd_scsi_poll(un, wr_pktp) == 0) &&
26117 26113 (wr_pktp->pkt_resid == 0)) {
26118 26114 err = SD_SUCCESS;
26119 26115 break;
26120 26116 }
26121 26117
26122 26118 /*
26123 26119 * Check CMD_DEV_GONE 1st, give up if device is gone.
26124 26120 */
26125 26121 if (wr_pktp->pkt_reason == CMD_DEV_GONE) {
26126 26122 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26127 26123 "Error while dumping state...Device is gone\n");
26128 26124 break;
26129 26125 }
26130 26126
26131 26127 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_CHECK) {
26132 26128 SD_INFO(SD_LOG_DUMP, un,
26133 26129 "sddump: write failed with CHECK, try # %d\n", i);
26134 26130 if (((wr_pktp->pkt_state & STATE_ARQ_DONE) == 0)) {
26135 26131 (void) sd_send_polled_RQS(un);
26136 26132 }
26137 26133
26138 26134 continue;
26139 26135 }
26140 26136
26141 26137 if (SD_GET_PKT_STATUS(wr_pktp) == STATUS_BUSY) {
26142 26138 int reset_retval = 0;
26143 26139
26144 26140 SD_INFO(SD_LOG_DUMP, un,
26145 26141 "sddump: write failed with BUSY, try # %d\n", i);
26146 26142
26147 26143 if (un->un_f_lun_reset_enabled == TRUE) {
26148 26144 reset_retval = scsi_reset(SD_ADDRESS(un),
26149 26145 RESET_LUN);
26150 26146 }
26151 26147 if (reset_retval == 0) {
26152 26148 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET);
26153 26149 }
26154 26150 (void) sd_send_polled_RQS(un);
26155 26151
26156 26152 } else {
26157 26153 SD_INFO(SD_LOG_DUMP, un,
26158 26154 "sddump: write failed with 0x%x, try # %d\n",
26159 26155 SD_GET_PKT_STATUS(wr_pktp), i);
26160 26156 mutex_enter(SD_MUTEX(un));
26161 26157 sd_reset_target(un, wr_pktp);
26162 26158 mutex_exit(SD_MUTEX(un));
26163 26159 }
26164 26160
26165 26161 /*
26166 26162 * If we are not getting anywhere with lun/target resets,
26167 26163 * let's reset the bus.
26168 26164 */
26169 26165 if (i == SD_NDUMP_RETRIES/2) {
26170 26166 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
26171 26167 (void) sd_send_polled_RQS(un);
26172 26168 }
26173 26169 }
26174 26170 }
26175 26171
26176 26172 scsi_destroy_pkt(wr_pktp);
26177 26173 mutex_enter(SD_MUTEX(un));
26178 26174 if ((NOT_DEVBSIZE(un)) && (doing_rmw == TRUE)) {
26179 26175 mutex_exit(SD_MUTEX(un));
26180 26176 scsi_free_consistent_buf(wr_bp);
26181 26177 } else {
26182 26178 mutex_exit(SD_MUTEX(un));
26183 26179 }
26184 26180 SD_TRACE(SD_LOG_DUMP, un, "sddump: exit: err = %d\n", err);
26185 26181 return (err);
26186 26182 }
26187 26183
26188 26184 /*
26189 26185 * Function: sd_scsi_poll()
26190 26186 *
26191 26187 * Description: This is a wrapper for the scsi_poll call.
26192 26188 *
26193 26189 * Arguments: sd_lun - The unit structure
26194 26190 * scsi_pkt - The scsi packet being sent to the device.
26195 26191 *
26196 26192 * Return Code: 0 - Command completed successfully with good status
26197 26193 * -1 - Command failed. This could indicate a check condition
26198 26194 * or other status value requiring recovery action.
26199 26195 *
26200 26196 * NOTE: This code is only called off sddump().
26201 26197 */
26202 26198
26203 26199 static int
26204 26200 sd_scsi_poll(struct sd_lun *un, struct scsi_pkt *pktp)
26205 26201 {
26206 26202 int status;
26207 26203
26208 26204 ASSERT(un != NULL);
26209 26205 ASSERT(!mutex_owned(SD_MUTEX(un)));
26210 26206 ASSERT(pktp != NULL);
26211 26207
26212 26208 status = SD_SUCCESS;
26213 26209
26214 26210 if (scsi_ifgetcap(&pktp->pkt_address, "tagged-qing", 1) == 1) {
26215 26211 pktp->pkt_flags |= un->un_tagflags;
26216 26212 pktp->pkt_flags &= ~FLAG_NODISCON;
26217 26213 }
26218 26214
26219 26215 status = sd_ddi_scsi_poll(pktp);
26220 26216 /*
26221 26217 * Scsi_poll returns 0 (success) if the command completes and the
26222 26218 * status block is STATUS_GOOD. We should only check errors if this
26223 26219 * condition is not true. Even then we should send our own request
26224 26220 * sense packet only if we have a check condition and auto
26225 26221 * request sense has not been performed by the hba.
26226 26222 * Don't get RQS data if pkt_reason is CMD_DEV_GONE.
26227 26223 */
26228 26224 if ((status != SD_SUCCESS) &&
26229 26225 (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK) &&
26230 26226 (pktp->pkt_state & STATE_ARQ_DONE) == 0 &&
26231 26227 (pktp->pkt_reason != CMD_DEV_GONE))
26232 26228 (void) sd_send_polled_RQS(un);
26233 26229
26234 26230 return (status);
26235 26231 }
26236 26232
26237 26233 /*
26238 26234 * Function: sd_send_polled_RQS()
26239 26235 *
26240 26236 * Description: This sends the request sense command to a device.
26241 26237 *
26242 26238 * Arguments: sd_lun - The unit structure
26243 26239 *
26244 26240 * Return Code: 0 - Command completed successfully with good status
26245 26241 * -1 - Command failed.
26246 26242 *
26247 26243 */
26248 26244
26249 26245 static int
26250 26246 sd_send_polled_RQS(struct sd_lun *un)
26251 26247 {
26252 26248 int ret_val;
26253 26249 struct scsi_pkt *rqs_pktp;
26254 26250 struct buf *rqs_bp;
26255 26251
26256 26252 ASSERT(un != NULL);
26257 26253 ASSERT(!mutex_owned(SD_MUTEX(un)));
26258 26254
26259 26255 ret_val = SD_SUCCESS;
26260 26256
26261 26257 rqs_pktp = un->un_rqs_pktp;
26262 26258 rqs_bp = un->un_rqs_bp;
26263 26259
26264 26260 mutex_enter(SD_MUTEX(un));
26265 26261
26266 26262 if (un->un_sense_isbusy) {
26267 26263 ret_val = SD_FAILURE;
26268 26264 mutex_exit(SD_MUTEX(un));
26269 26265 return (ret_val);
26270 26266 }
26271 26267
26272 26268 /*
26273 26269 * If the request sense buffer (and packet) is not in use,
26274 26270 * let's set the un_sense_isbusy and send our packet
26275 26271 */
26276 26272 un->un_sense_isbusy = 1;
26277 26273 rqs_pktp->pkt_resid = 0;
26278 26274 rqs_pktp->pkt_reason = 0;
26279 26275 rqs_pktp->pkt_flags |= FLAG_NOINTR;
26280 26276 bzero(rqs_bp->b_un.b_addr, SENSE_LENGTH);
26281 26277
26282 26278 mutex_exit(SD_MUTEX(un));
26283 26279
26284 26280 SD_INFO(SD_LOG_COMMON, un, "sd_send_polled_RQS: req sense buf at"
26285 26281 " 0x%p\n", rqs_bp->b_un.b_addr);
26286 26282
26287 26283 /*
26288 26284 * Can't send this to sd_scsi_poll, we wrap ourselves around the
26289 26285 * axle - it has a call into us!
26290 26286 */
26291 26287 if ((ret_val = sd_ddi_scsi_poll(rqs_pktp)) != 0) {
26292 26288 SD_INFO(SD_LOG_COMMON, un,
26293 26289 "sd_send_polled_RQS: RQS failed\n");
26294 26290 }
26295 26291
26296 26292 SD_DUMP_MEMORY(un, SD_LOG_COMMON, "sd_send_polled_RQS:",
26297 26293 (uchar_t *)rqs_bp->b_un.b_addr, SENSE_LENGTH, SD_LOG_HEX);
26298 26294
26299 26295 mutex_enter(SD_MUTEX(un));
26300 26296 un->un_sense_isbusy = 0;
26301 26297 mutex_exit(SD_MUTEX(un));
26302 26298
26303 26299 return (ret_val);
26304 26300 }
26305 26301
26306 26302 /*
26307 26303 * Defines needed for localized version of the scsi_poll routine.
26308 26304 */
26309 26305 #define CSEC 10000 /* usecs */
26310 26306 #define SEC_TO_CSEC (1000000/CSEC)
26311 26307
26312 26308 /*
26313 26309 * Function: sd_ddi_scsi_poll()
26314 26310 *
26315 26311 * Description: Localized version of the scsi_poll routine. The purpose is to
26316 26312 * send a scsi_pkt to a device as a polled command. This version
26317 26313 * is to ensure more robust handling of transport errors.
26318 26314 * Specifically this routine cures not ready, coming ready
26319 26315 * transition for power up and reset of sonoma's. This can take
26320 26316 * up to 45 seconds for power-on and 20 seconds for reset of a
26321 26317 * sonoma lun.
26322 26318 *
26323 26319 * Arguments: scsi_pkt - The scsi_pkt being sent to a device
26324 26320 *
26325 26321 * Return Code: 0 - Command completed successfully with good status
26326 26322 * -1 - Command failed.
26327 26323 *
26328 26324 * NOTE: This code is almost identical to scsi_poll, however before 6668774 can
26329 26325 * be fixed (removing this code), we need to determine how to handle the
26330 26326 * KEY_UNIT_ATTENTION condition below in conditions not as limited as sddump().
26331 26327 *
26332 26328 * NOTE: This code is only called off sddump().
26333 26329 */
26334 26330 static int
26335 26331 sd_ddi_scsi_poll(struct scsi_pkt *pkt)
26336 26332 {
26337 26333 int rval = -1;
26338 26334 int savef;
26339 26335 long savet;
26340 26336 void (*savec)();
26341 26337 int timeout;
26342 26338 int busy_count;
26343 26339 int poll_delay;
26344 26340 int rc;
26345 26341 uint8_t *sensep;
26346 26342 struct scsi_arq_status *arqstat;
26347 26343 extern int do_polled_io;
26348 26344
26349 26345 ASSERT(pkt->pkt_scbp);
26350 26346
26351 26347 /*
26352 26348 * save old flags..
26353 26349 */
26354 26350 savef = pkt->pkt_flags;
26355 26351 savec = pkt->pkt_comp;
26356 26352 savet = pkt->pkt_time;
26357 26353
26358 26354 pkt->pkt_flags |= FLAG_NOINTR;
26359 26355
26360 26356 /*
26361 26357 * XXX there is nothing in the SCSA spec that states that we should not
26362 26358 * do a callback for polled cmds; however, removing this will break sd
26363 26359 * and probably other target drivers
26364 26360 */
26365 26361 pkt->pkt_comp = NULL;
26366 26362
26367 26363 /*
26368 26364 * we don't like a polled command without timeout.
26369 26365 * 60 seconds seems long enough.
26370 26366 */
26371 26367 if (pkt->pkt_time == 0)
26372 26368 pkt->pkt_time = SCSI_POLL_TIMEOUT;
26373 26369
26374 26370 /*
26375 26371 * Send polled cmd.
26376 26372 *
26377 26373 * We do some error recovery for various errors. Tran_busy,
26378 26374 * queue full, and non-dispatched commands are retried every 10 msec.
26379 26375 * as they are typically transient failures. Busy status and Not
26380 26376 * Ready are retried every second as this status takes a while to
26381 26377 * change.
26382 26378 */
26383 26379 timeout = pkt->pkt_time * SEC_TO_CSEC;
26384 26380
26385 26381 for (busy_count = 0; busy_count < timeout; busy_count++) {
26386 26382 /*
26387 26383 * Initialize pkt status variables.
26388 26384 */
26389 26385 *pkt->pkt_scbp = pkt->pkt_reason = pkt->pkt_state = 0;
26390 26386
26391 26387 if ((rc = scsi_transport(pkt)) != TRAN_ACCEPT) {
26392 26388 if (rc != TRAN_BUSY) {
26393 26389 /* Transport failed - give up. */
26394 26390 break;
26395 26391 } else {
26396 26392 /* Transport busy - try again. */
26397 26393 poll_delay = 1 * CSEC; /* 10 msec. */
26398 26394 }
26399 26395 } else {
26400 26396 /*
26401 26397 * Transport accepted - check pkt status.
26402 26398 */
26403 26399 rc = (*pkt->pkt_scbp) & STATUS_MASK;
26404 26400 if ((pkt->pkt_reason == CMD_CMPLT) &&
26405 26401 (rc == STATUS_CHECK) &&
26406 26402 (pkt->pkt_state & STATE_ARQ_DONE)) {
26407 26403 arqstat =
26408 26404 (struct scsi_arq_status *)(pkt->pkt_scbp);
26409 26405 sensep = (uint8_t *)&arqstat->sts_sensedata;
26410 26406 } else {
26411 26407 sensep = NULL;
26412 26408 }
26413 26409
26414 26410 if ((pkt->pkt_reason == CMD_CMPLT) &&
26415 26411 (rc == STATUS_GOOD)) {
26416 26412 /* No error - we're done */
26417 26413 rval = 0;
26418 26414 break;
26419 26415
26420 26416 } else if (pkt->pkt_reason == CMD_DEV_GONE) {
26421 26417 /* Lost connection - give up */
26422 26418 break;
26423 26419
26424 26420 } else if ((pkt->pkt_reason == CMD_INCOMPLETE) &&
26425 26421 (pkt->pkt_state == 0)) {
26426 26422 /* Pkt not dispatched - try again. */
26427 26423 poll_delay = 1 * CSEC; /* 10 msec. */
26428 26424
26429 26425 } else if ((pkt->pkt_reason == CMD_CMPLT) &&
26430 26426 (rc == STATUS_QFULL)) {
26431 26427 /* Queue full - try again. */
26432 26428 poll_delay = 1 * CSEC; /* 10 msec. */
26433 26429
26434 26430 } else if ((pkt->pkt_reason == CMD_CMPLT) &&
26435 26431 (rc == STATUS_BUSY)) {
26436 26432 /* Busy - try again. */
26437 26433 poll_delay = 100 * CSEC; /* 1 sec. */
26438 26434 busy_count += (SEC_TO_CSEC - 1);
26439 26435
26440 26436 } else if ((sensep != NULL) &&
26441 26437 (scsi_sense_key(sensep) == KEY_UNIT_ATTENTION)) {
26442 26438 /*
26443 26439 * Unit Attention - try again.
26444 26440 * Pretend it took 1 sec.
26445 26441 * NOTE: 'continue' avoids poll_delay
26446 26442 */
26447 26443 busy_count += (SEC_TO_CSEC - 1);
26448 26444 continue;
26449 26445
26450 26446 } else if ((sensep != NULL) &&
26451 26447 (scsi_sense_key(sensep) == KEY_NOT_READY) &&
26452 26448 (scsi_sense_asc(sensep) == 0x04) &&
26453 26449 (scsi_sense_ascq(sensep) == 0x01)) {
26454 26450 /*
26455 26451 * Not ready -> ready - try again.
26456 26452 * 04h/01h: LUN IS IN PROCESS OF BECOMING READY
26457 26453 * ...same as STATUS_BUSY
26458 26454 */
26459 26455 poll_delay = 100 * CSEC; /* 1 sec. */
26460 26456 busy_count += (SEC_TO_CSEC - 1);
26461 26457
26462 26458 } else {
26463 26459 /* BAD status - give up. */
26464 26460 break;
26465 26461 }
26466 26462 }
26467 26463
26468 26464 if (((curthread->t_flag & T_INTR_THREAD) == 0) &&
26469 26465 !do_polled_io) {
26470 26466 delay(drv_usectohz(poll_delay));
26471 26467 } else {
26472 26468 /* we busy wait during cpr_dump or interrupt threads */
26473 26469 drv_usecwait(poll_delay);
26474 26470 }
26475 26471 }
26476 26472
26477 26473 pkt->pkt_flags = savef;
26478 26474 pkt->pkt_comp = savec;
26479 26475 pkt->pkt_time = savet;
26480 26476
26481 26477 /* return on error */
26482 26478 if (rval)
26483 26479 return (rval);
26484 26480
26485 26481 /*
26486 26482 * This is not a performance critical code path.
26487 26483 *
26488 26484 * As an accommodation for scsi_poll callers, to avoid ddi_dma_sync()
26489 26485 * issues associated with looking at DMA memory prior to
26490 26486 * scsi_pkt_destroy(), we scsi_sync_pkt() prior to return.
26491 26487 */
26492 26488 scsi_sync_pkt(pkt);
26493 26489 return (0);
26494 26490 }
26495 26491
26496 26492
26497 26493
26498 26494 /*
26499 26495 * Function: sd_persistent_reservation_in_read_keys
26500 26496 *
26501 26497 * Description: This routine is the driver entry point for handling CD-ROM
26502 26498 * multi-host persistent reservation requests (MHIOCGRP_INKEYS)
26503 26499 * by sending the SCSI-3 PRIN commands to the device.
26504 26500 * Processes the read keys command response by copying the
26505 26501 * reservation key information into the user provided buffer.
26506 26502 * Support for the 32/64 bit _MULTI_DATAMODEL is implemented.
26507 26503 *
26508 26504 * Arguments: un - Pointer to soft state struct for the target.
26509 26505 * usrp - user provided pointer to multihost Persistent In Read
26510 26506 * Keys structure (mhioc_inkeys_t)
26511 26507 * flag - this argument is a pass through to ddi_copyxxx()
26512 26508 * directly from the mode argument of ioctl().
26513 26509 *
26514 26510 * Return Code: 0 - Success
26515 26511 * EACCES
26516 26512 * ENOTSUP
26517 26513 * errno return code from sd_send_scsi_cmd()
26518 26514 *
26519 26515 * Context: Can sleep. Does not return until command is completed.
26520 26516 */
26521 26517
26522 26518 static int
26523 26519 sd_persistent_reservation_in_read_keys(struct sd_lun *un,
26524 26520 mhioc_inkeys_t *usrp, int flag)
26525 26521 {
26526 26522 #ifdef _MULTI_DATAMODEL
26527 26523 struct mhioc_key_list32 li32;
26528 26524 #endif
26529 26525 sd_prin_readkeys_t *in;
26530 26526 mhioc_inkeys_t *ptr;
26531 26527 mhioc_key_list_t li;
26532 26528 uchar_t *data_bufp;
26533 26529 int data_len;
26534 26530 int rval = 0;
26535 26531 size_t copysz;
26536 26532 sd_ssc_t *ssc;
26537 26533
26538 26534 if ((ptr = (mhioc_inkeys_t *)usrp) == NULL) {
26539 26535 return (EINVAL);
26540 26536 }
26541 26537 bzero(&li, sizeof (mhioc_key_list_t));
26542 26538
26543 26539 ssc = sd_ssc_init(un);
26544 26540
26545 26541 /*
26546 26542 * Get the listsize from user
26547 26543 */
26548 26544 #ifdef _MULTI_DATAMODEL
26549 26545
26550 26546 switch (ddi_model_convert_from(flag & FMODELS)) {
26551 26547 case DDI_MODEL_ILP32:
26552 26548 copysz = sizeof (struct mhioc_key_list32);
26553 26549 if (ddi_copyin(ptr->li, &li32, copysz, flag)) {
26554 26550 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26555 26551 "sd_persistent_reservation_in_read_keys: "
26556 26552 "failed ddi_copyin: mhioc_key_list32_t\n");
26557 26553 rval = EFAULT;
26558 26554 goto done;
26559 26555 }
26560 26556 li.listsize = li32.listsize;
26561 26557 li.list = (mhioc_resv_key_t *)(uintptr_t)li32.list;
26562 26558 break;
26563 26559
26564 26560 case DDI_MODEL_NONE:
26565 26561 copysz = sizeof (mhioc_key_list_t);
26566 26562 if (ddi_copyin(ptr->li, &li, copysz, flag)) {
26567 26563 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26568 26564 "sd_persistent_reservation_in_read_keys: "
26569 26565 "failed ddi_copyin: mhioc_key_list_t\n");
26570 26566 rval = EFAULT;
26571 26567 goto done;
26572 26568 }
26573 26569 break;
26574 26570 }
26575 26571
26576 26572 #else /* ! _MULTI_DATAMODEL */
26577 26573 copysz = sizeof (mhioc_key_list_t);
26578 26574 if (ddi_copyin(ptr->li, &li, copysz, flag)) {
26579 26575 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26580 26576 "sd_persistent_reservation_in_read_keys: "
26581 26577 "failed ddi_copyin: mhioc_key_list_t\n");
26582 26578 rval = EFAULT;
26583 26579 goto done;
26584 26580 }
26585 26581 #endif
26586 26582
26587 26583 data_len = li.listsize * MHIOC_RESV_KEY_SIZE;
26588 26584 data_len += (sizeof (sd_prin_readkeys_t) - sizeof (caddr_t));
26589 26585 data_bufp = kmem_zalloc(data_len, KM_SLEEP);
26590 26586
26591 26587 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_KEYS,
26592 26588 data_len, data_bufp);
26593 26589 if (rval != 0) {
26594 26590 if (rval == EIO)
26595 26591 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE);
26596 26592 else
26597 26593 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
26598 26594 goto done;
26599 26595 }
26600 26596 in = (sd_prin_readkeys_t *)data_bufp;
26601 26597 ptr->generation = BE_32(in->generation);
26602 26598 li.listlen = BE_32(in->len) / MHIOC_RESV_KEY_SIZE;
26603 26599
26604 26600 /*
26605 26601 * Return the min(listsize, listlen) keys
26606 26602 */
26607 26603 #ifdef _MULTI_DATAMODEL
26608 26604
26609 26605 switch (ddi_model_convert_from(flag & FMODELS)) {
26610 26606 case DDI_MODEL_ILP32:
26611 26607 li32.listlen = li.listlen;
26612 26608 if (ddi_copyout(&li32, ptr->li, copysz, flag)) {
26613 26609 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26614 26610 "sd_persistent_reservation_in_read_keys: "
26615 26611 "failed ddi_copyout: mhioc_key_list32_t\n");
26616 26612 rval = EFAULT;
26617 26613 goto done;
26618 26614 }
26619 26615 break;
26620 26616
26621 26617 case DDI_MODEL_NONE:
26622 26618 if (ddi_copyout(&li, ptr->li, copysz, flag)) {
26623 26619 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26624 26620 "sd_persistent_reservation_in_read_keys: "
26625 26621 "failed ddi_copyout: mhioc_key_list_t\n");
26626 26622 rval = EFAULT;
26627 26623 goto done;
26628 26624 }
26629 26625 break;
26630 26626 }
26631 26627
26632 26628 #else /* ! _MULTI_DATAMODEL */
26633 26629
26634 26630 if (ddi_copyout(&li, ptr->li, copysz, flag)) {
26635 26631 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26636 26632 "sd_persistent_reservation_in_read_keys: "
26637 26633 "failed ddi_copyout: mhioc_key_list_t\n");
26638 26634 rval = EFAULT;
26639 26635 goto done;
26640 26636 }
26641 26637
26642 26638 #endif /* _MULTI_DATAMODEL */
26643 26639
26644 26640 copysz = min(li.listlen * MHIOC_RESV_KEY_SIZE,
26645 26641 li.listsize * MHIOC_RESV_KEY_SIZE);
26646 26642 if (ddi_copyout(&in->keylist, li.list, copysz, flag)) {
26647 26643 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26648 26644 "sd_persistent_reservation_in_read_keys: "
26649 26645 "failed ddi_copyout: keylist\n");
26650 26646 rval = EFAULT;
26651 26647 }
26652 26648 done:
26653 26649 sd_ssc_fini(ssc);
26654 26650 kmem_free(data_bufp, data_len);
26655 26651 return (rval);
26656 26652 }
26657 26653
26658 26654
26659 26655 /*
26660 26656 * Function: sd_persistent_reservation_in_read_resv
26661 26657 *
26662 26658 * Description: This routine is the driver entry point for handling CD-ROM
26663 26659 * multi-host persistent reservation requests (MHIOCGRP_INRESV)
26664 26660 * by sending the SCSI-3 PRIN commands to the device.
26665 26661 * Process the read persistent reservations command response by
26666 26662 * copying the reservation information into the user provided
26667 26663 * buffer. Support for the 32/64 _MULTI_DATAMODEL is implemented.
26668 26664 *
26669 26665 * Arguments: un - Pointer to soft state struct for the target.
26670 26666 * usrp - user provided pointer to multihost Persistent In Read
26671 26667 * Keys structure (mhioc_inkeys_t)
26672 26668 * flag - this argument is a pass through to ddi_copyxxx()
26673 26669 * directly from the mode argument of ioctl().
26674 26670 *
26675 26671 * Return Code: 0 - Success
26676 26672 * EACCES
26677 26673 * ENOTSUP
26678 26674 * errno return code from sd_send_scsi_cmd()
26679 26675 *
26680 26676 * Context: Can sleep. Does not return until command is completed.
26681 26677 */
26682 26678
26683 26679 static int
26684 26680 sd_persistent_reservation_in_read_resv(struct sd_lun *un,
26685 26681 mhioc_inresvs_t *usrp, int flag)
26686 26682 {
26687 26683 #ifdef _MULTI_DATAMODEL
26688 26684 struct mhioc_resv_desc_list32 resvlist32;
26689 26685 #endif
26690 26686 sd_prin_readresv_t *in;
26691 26687 mhioc_inresvs_t *ptr;
26692 26688 sd_readresv_desc_t *readresv_ptr;
26693 26689 mhioc_resv_desc_list_t resvlist;
26694 26690 mhioc_resv_desc_t resvdesc;
26695 26691 uchar_t *data_bufp = NULL;
26696 26692 int data_len;
26697 26693 int rval = 0;
26698 26694 int i;
26699 26695 size_t copysz;
26700 26696 mhioc_resv_desc_t *bufp;
26701 26697 sd_ssc_t *ssc;
26702 26698
26703 26699 if ((ptr = usrp) == NULL) {
26704 26700 return (EINVAL);
26705 26701 }
26706 26702
26707 26703 ssc = sd_ssc_init(un);
26708 26704
26709 26705 /*
26710 26706 * Get the listsize from user
26711 26707 */
26712 26708 #ifdef _MULTI_DATAMODEL
26713 26709 switch (ddi_model_convert_from(flag & FMODELS)) {
26714 26710 case DDI_MODEL_ILP32:
26715 26711 copysz = sizeof (struct mhioc_resv_desc_list32);
26716 26712 if (ddi_copyin(ptr->li, &resvlist32, copysz, flag)) {
26717 26713 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26718 26714 "sd_persistent_reservation_in_read_resv: "
26719 26715 "failed ddi_copyin: mhioc_resv_desc_list_t\n");
26720 26716 rval = EFAULT;
26721 26717 goto done;
26722 26718 }
26723 26719 resvlist.listsize = resvlist32.listsize;
26724 26720 resvlist.list = (mhioc_resv_desc_t *)(uintptr_t)resvlist32.list;
26725 26721 break;
26726 26722
26727 26723 case DDI_MODEL_NONE:
26728 26724 copysz = sizeof (mhioc_resv_desc_list_t);
26729 26725 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) {
26730 26726 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26731 26727 "sd_persistent_reservation_in_read_resv: "
26732 26728 "failed ddi_copyin: mhioc_resv_desc_list_t\n");
26733 26729 rval = EFAULT;
26734 26730 goto done;
26735 26731 }
26736 26732 break;
26737 26733 }
26738 26734 #else /* ! _MULTI_DATAMODEL */
26739 26735 copysz = sizeof (mhioc_resv_desc_list_t);
26740 26736 if (ddi_copyin(ptr->li, &resvlist, copysz, flag)) {
26741 26737 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26742 26738 "sd_persistent_reservation_in_read_resv: "
26743 26739 "failed ddi_copyin: mhioc_resv_desc_list_t\n");
26744 26740 rval = EFAULT;
26745 26741 goto done;
26746 26742 }
26747 26743 #endif /* ! _MULTI_DATAMODEL */
26748 26744
26749 26745 data_len = resvlist.listsize * SCSI3_RESV_DESC_LEN;
26750 26746 data_len += (sizeof (sd_prin_readresv_t) - sizeof (caddr_t));
26751 26747 data_bufp = kmem_zalloc(data_len, KM_SLEEP);
26752 26748
26753 26749 rval = sd_send_scsi_PERSISTENT_RESERVE_IN(ssc, SD_READ_RESV,
26754 26750 data_len, data_bufp);
26755 26751 if (rval != 0) {
26756 26752 if (rval == EIO)
26757 26753 sd_ssc_assessment(ssc, SD_FMT_IGNORE_COMPROMISE);
26758 26754 else
26759 26755 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
26760 26756 goto done;
26761 26757 }
26762 26758 in = (sd_prin_readresv_t *)data_bufp;
26763 26759 ptr->generation = BE_32(in->generation);
26764 26760 resvlist.listlen = BE_32(in->len) / SCSI3_RESV_DESC_LEN;
26765 26761
26766 26762 /*
26767 26763 * Return the min(listsize, listlen( keys
26768 26764 */
26769 26765 #ifdef _MULTI_DATAMODEL
26770 26766
26771 26767 switch (ddi_model_convert_from(flag & FMODELS)) {
26772 26768 case DDI_MODEL_ILP32:
26773 26769 resvlist32.listlen = resvlist.listlen;
26774 26770 if (ddi_copyout(&resvlist32, ptr->li, copysz, flag)) {
26775 26771 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26776 26772 "sd_persistent_reservation_in_read_resv: "
26777 26773 "failed ddi_copyout: mhioc_resv_desc_list_t\n");
26778 26774 rval = EFAULT;
26779 26775 goto done;
26780 26776 }
26781 26777 break;
26782 26778
26783 26779 case DDI_MODEL_NONE:
26784 26780 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) {
26785 26781 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26786 26782 "sd_persistent_reservation_in_read_resv: "
26787 26783 "failed ddi_copyout: mhioc_resv_desc_list_t\n");
26788 26784 rval = EFAULT;
26789 26785 goto done;
26790 26786 }
26791 26787 break;
26792 26788 }
26793 26789
26794 26790 #else /* ! _MULTI_DATAMODEL */
26795 26791
26796 26792 if (ddi_copyout(&resvlist, ptr->li, copysz, flag)) {
26797 26793 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26798 26794 "sd_persistent_reservation_in_read_resv: "
26799 26795 "failed ddi_copyout: mhioc_resv_desc_list_t\n");
26800 26796 rval = EFAULT;
26801 26797 goto done;
26802 26798 }
26803 26799
26804 26800 #endif /* ! _MULTI_DATAMODEL */
26805 26801
26806 26802 readresv_ptr = (sd_readresv_desc_t *)&in->readresv_desc;
26807 26803 bufp = resvlist.list;
26808 26804 copysz = sizeof (mhioc_resv_desc_t);
26809 26805 for (i = 0; i < min(resvlist.listlen, resvlist.listsize);
26810 26806 i++, readresv_ptr++, bufp++) {
26811 26807
26812 26808 bcopy(&readresv_ptr->resvkey, &resvdesc.key,
26813 26809 MHIOC_RESV_KEY_SIZE);
26814 26810 resvdesc.type = readresv_ptr->type;
26815 26811 resvdesc.scope = readresv_ptr->scope;
26816 26812 resvdesc.scope_specific_addr =
26817 26813 BE_32(readresv_ptr->scope_specific_addr);
26818 26814
26819 26815 if (ddi_copyout(&resvdesc, bufp, copysz, flag)) {
26820 26816 SD_ERROR(SD_LOG_IOCTL_MHD, un,
26821 26817 "sd_persistent_reservation_in_read_resv: "
26822 26818 "failed ddi_copyout: resvlist\n");
26823 26819 rval = EFAULT;
26824 26820 goto done;
26825 26821 }
26826 26822 }
26827 26823 done:
26828 26824 sd_ssc_fini(ssc);
26829 26825 /* only if data_bufp is allocated, we need to free it */
26830 26826 if (data_bufp) {
26831 26827 kmem_free(data_bufp, data_len);
26832 26828 }
26833 26829 return (rval);
26834 26830 }
26835 26831
26836 26832
26837 26833 /*
26838 26834 * Function: sr_change_blkmode()
26839 26835 *
26840 26836 * Description: This routine is the driver entry point for handling CD-ROM
26841 26837 * block mode ioctl requests. Support for returning and changing
26842 26838 * the current block size in use by the device is implemented. The
26843 26839 * LBA size is changed via a MODE SELECT Block Descriptor.
26844 26840 *
26845 26841 * This routine issues a mode sense with an allocation length of
26846 26842 * 12 bytes for the mode page header and a single block descriptor.
26847 26843 *
26848 26844 * Arguments: dev - the device 'dev_t'
26849 26845 * cmd - the request type; one of CDROMGBLKMODE (get) or
26850 26846 * CDROMSBLKMODE (set)
26851 26847 * data - current block size or requested block size
26852 26848 * flag - this argument is a pass through to ddi_copyxxx() directly
26853 26849 * from the mode argument of ioctl().
26854 26850 *
26855 26851 * Return Code: the code returned by sd_send_scsi_cmd()
26856 26852 * EINVAL if invalid arguments are provided
26857 26853 * EFAULT if ddi_copyxxx() fails
26858 26854 * ENXIO if fail ddi_get_soft_state
26859 26855 * EIO if invalid mode sense block descriptor length
26860 26856 *
26861 26857 */
26862 26858
26863 26859 static int
26864 26860 sr_change_blkmode(dev_t dev, int cmd, intptr_t data, int flag)
26865 26861 {
26866 26862 struct sd_lun *un = NULL;
26867 26863 struct mode_header *sense_mhp, *select_mhp;
26868 26864 struct block_descriptor *sense_desc, *select_desc;
26869 26865 int current_bsize;
26870 26866 int rval = EINVAL;
26871 26867 uchar_t *sense = NULL;
26872 26868 uchar_t *select = NULL;
26873 26869 sd_ssc_t *ssc;
26874 26870
26875 26871 ASSERT((cmd == CDROMGBLKMODE) || (cmd == CDROMSBLKMODE));
26876 26872
26877 26873 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
26878 26874 return (ENXIO);
26879 26875 }
26880 26876
26881 26877 /*
26882 26878 * The block length is changed via the Mode Select block descriptor, the
26883 26879 * "Read/Write Error Recovery" mode page (0x1) contents are not actually
26884 26880 * required as part of this routine. Therefore the mode sense allocation
26885 26881 * length is specified to be the length of a mode page header and a
26886 26882 * block descriptor.
26887 26883 */
26888 26884 sense = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP);
26889 26885
26890 26886 ssc = sd_ssc_init(un);
26891 26887 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense,
26892 26888 BUFLEN_CHG_BLK_MODE, MODEPAGE_ERR_RECOV, SD_PATH_STANDARD);
26893 26889 sd_ssc_fini(ssc);
26894 26890 if (rval != 0) {
26895 26891 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26896 26892 "sr_change_blkmode: Mode Sense Failed\n");
26897 26893 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
26898 26894 return (rval);
26899 26895 }
26900 26896
26901 26897 /* Check the block descriptor len to handle only 1 block descriptor */
26902 26898 sense_mhp = (struct mode_header *)sense;
26903 26899 if ((sense_mhp->bdesc_length == 0) ||
26904 26900 (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH)) {
26905 26901 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26906 26902 "sr_change_blkmode: Mode Sense returned invalid block"
26907 26903 " descriptor length\n");
26908 26904 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
26909 26905 return (EIO);
26910 26906 }
26911 26907 sense_desc = (struct block_descriptor *)(sense + MODE_HEADER_LENGTH);
26912 26908 current_bsize = ((sense_desc->blksize_hi << 16) |
26913 26909 (sense_desc->blksize_mid << 8) | sense_desc->blksize_lo);
26914 26910
26915 26911 /* Process command */
26916 26912 switch (cmd) {
26917 26913 case CDROMGBLKMODE:
26918 26914 /* Return the block size obtained during the mode sense */
26919 26915 if (ddi_copyout(¤t_bsize, (void *)data,
26920 26916 sizeof (int), flag) != 0)
26921 26917 rval = EFAULT;
26922 26918 break;
26923 26919 case CDROMSBLKMODE:
26924 26920 /* Validate the requested block size */
26925 26921 switch (data) {
26926 26922 case CDROM_BLK_512:
26927 26923 case CDROM_BLK_1024:
26928 26924 case CDROM_BLK_2048:
26929 26925 case CDROM_BLK_2056:
26930 26926 case CDROM_BLK_2336:
26931 26927 case CDROM_BLK_2340:
26932 26928 case CDROM_BLK_2352:
26933 26929 case CDROM_BLK_2368:
26934 26930 case CDROM_BLK_2448:
26935 26931 case CDROM_BLK_2646:
26936 26932 case CDROM_BLK_2647:
26937 26933 break;
26938 26934 default:
26939 26935 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26940 26936 "sr_change_blkmode: "
26941 26937 "Block Size '%ld' Not Supported\n", data);
26942 26938 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
26943 26939 return (EINVAL);
26944 26940 }
26945 26941
26946 26942 /*
26947 26943 * The current block size matches the requested block size so
26948 26944 * there is no need to send the mode select to change the size
26949 26945 */
26950 26946 if (current_bsize == data) {
26951 26947 break;
26952 26948 }
26953 26949
26954 26950 /* Build the select data for the requested block size */
26955 26951 select = kmem_zalloc(BUFLEN_CHG_BLK_MODE, KM_SLEEP);
26956 26952 select_mhp = (struct mode_header *)select;
26957 26953 select_desc =
26958 26954 (struct block_descriptor *)(select + MODE_HEADER_LENGTH);
26959 26955 /*
26960 26956 * The LBA size is changed via the block descriptor, so the
26961 26957 * descriptor is built according to the user data
26962 26958 */
26963 26959 select_mhp->bdesc_length = MODE_BLK_DESC_LENGTH;
26964 26960 select_desc->blksize_hi = (char)(((data) & 0x00ff0000) >> 16);
26965 26961 select_desc->blksize_mid = (char)(((data) & 0x0000ff00) >> 8);
26966 26962 select_desc->blksize_lo = (char)((data) & 0x000000ff);
26967 26963
26968 26964 /* Send the mode select for the requested block size */
26969 26965 ssc = sd_ssc_init(un);
26970 26966 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0,
26971 26967 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE,
26972 26968 SD_PATH_STANDARD);
26973 26969 sd_ssc_fini(ssc);
26974 26970 if (rval != 0) {
26975 26971 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
26976 26972 "sr_change_blkmode: Mode Select Failed\n");
26977 26973 /*
26978 26974 * The mode select failed for the requested block size,
26979 26975 * so reset the data for the original block size and
26980 26976 * send it to the target. The error is indicated by the
26981 26977 * return value for the failed mode select.
26982 26978 */
26983 26979 select_desc->blksize_hi = sense_desc->blksize_hi;
26984 26980 select_desc->blksize_mid = sense_desc->blksize_mid;
26985 26981 select_desc->blksize_lo = sense_desc->blksize_lo;
26986 26982 ssc = sd_ssc_init(un);
26987 26983 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0,
26988 26984 select, BUFLEN_CHG_BLK_MODE, SD_DONTSAVE_PAGE,
26989 26985 SD_PATH_STANDARD);
26990 26986 sd_ssc_fini(ssc);
26991 26987 } else {
26992 26988 ASSERT(!mutex_owned(SD_MUTEX(un)));
26993 26989 mutex_enter(SD_MUTEX(un));
26994 26990 sd_update_block_info(un, (uint32_t)data, 0);
26995 26991 mutex_exit(SD_MUTEX(un));
26996 26992 }
26997 26993 break;
26998 26994 default:
26999 26995 /* should not reach here, but check anyway */
27000 26996 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27001 26997 "sr_change_blkmode: Command '%x' Not Supported\n", cmd);
27002 26998 rval = EINVAL;
27003 26999 break;
27004 27000 }
27005 27001
27006 27002 if (select) {
27007 27003 kmem_free(select, BUFLEN_CHG_BLK_MODE);
27008 27004 }
27009 27005 if (sense) {
27010 27006 kmem_free(sense, BUFLEN_CHG_BLK_MODE);
27011 27007 }
27012 27008 return (rval);
27013 27009 }
27014 27010
27015 27011
27016 27012 /*
27017 27013 * Note: The following sr_change_speed() and sr_atapi_change_speed() routines
27018 27014 * implement driver support for getting and setting the CD speed. The command
27019 27015 * set used will be based on the device type. If the device has not been
27020 27016 * identified as MMC the Toshiba vendor specific mode page will be used. If
27021 27017 * the device is MMC but does not support the Real Time Streaming feature
27022 27018 * the SET CD SPEED command will be used to set speed and mode page 0x2A will
27023 27019 * be used to read the speed.
27024 27020 */
27025 27021
27026 27022 /*
27027 27023 * Function: sr_change_speed()
27028 27024 *
27029 27025 * Description: This routine is the driver entry point for handling CD-ROM
27030 27026 * drive speed ioctl requests for devices supporting the Toshiba
27031 27027 * vendor specific drive speed mode page. Support for returning
27032 27028 * and changing the current drive speed in use by the device is
27033 27029 * implemented.
27034 27030 *
27035 27031 * Arguments: dev - the device 'dev_t'
27036 27032 * cmd - the request type; one of CDROMGDRVSPEED (get) or
27037 27033 * CDROMSDRVSPEED (set)
27038 27034 * data - current drive speed or requested drive speed
27039 27035 * flag - this argument is a pass through to ddi_copyxxx() directly
27040 27036 * from the mode argument of ioctl().
27041 27037 *
27042 27038 * Return Code: the code returned by sd_send_scsi_cmd()
27043 27039 * EINVAL if invalid arguments are provided
27044 27040 * EFAULT if ddi_copyxxx() fails
27045 27041 * ENXIO if fail ddi_get_soft_state
27046 27042 * EIO if invalid mode sense block descriptor length
27047 27043 */
27048 27044
27049 27045 static int
27050 27046 sr_change_speed(dev_t dev, int cmd, intptr_t data, int flag)
27051 27047 {
27052 27048 struct sd_lun *un = NULL;
27053 27049 struct mode_header *sense_mhp, *select_mhp;
27054 27050 struct mode_speed *sense_page, *select_page;
27055 27051 int current_speed;
27056 27052 int rval = EINVAL;
27057 27053 int bd_len;
27058 27054 uchar_t *sense = NULL;
27059 27055 uchar_t *select = NULL;
27060 27056 sd_ssc_t *ssc;
27061 27057
27062 27058 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED));
27063 27059 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27064 27060 return (ENXIO);
27065 27061 }
27066 27062
27067 27063 /*
27068 27064 * Note: The drive speed is being modified here according to a Toshiba
27069 27065 * vendor specific mode page (0x31).
27070 27066 */
27071 27067 sense = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP);
27072 27068
27073 27069 ssc = sd_ssc_init(un);
27074 27070 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense,
27075 27071 BUFLEN_MODE_CDROM_SPEED, CDROM_MODE_SPEED,
27076 27072 SD_PATH_STANDARD);
27077 27073 sd_ssc_fini(ssc);
27078 27074 if (rval != 0) {
27079 27075 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27080 27076 "sr_change_speed: Mode Sense Failed\n");
27081 27077 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27082 27078 return (rval);
27083 27079 }
27084 27080 sense_mhp = (struct mode_header *)sense;
27085 27081
27086 27082 /* Check the block descriptor len to handle only 1 block descriptor */
27087 27083 bd_len = sense_mhp->bdesc_length;
27088 27084 if (bd_len > MODE_BLK_DESC_LENGTH) {
27089 27085 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27090 27086 "sr_change_speed: Mode Sense returned invalid block "
27091 27087 "descriptor length\n");
27092 27088 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27093 27089 return (EIO);
27094 27090 }
27095 27091
27096 27092 sense_page = (struct mode_speed *)
27097 27093 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length);
27098 27094 current_speed = sense_page->speed;
27099 27095
27100 27096 /* Process command */
27101 27097 switch (cmd) {
27102 27098 case CDROMGDRVSPEED:
27103 27099 /* Return the drive speed obtained during the mode sense */
27104 27100 if (current_speed == 0x2) {
27105 27101 current_speed = CDROM_TWELVE_SPEED;
27106 27102 }
27107 27103 if (ddi_copyout(¤t_speed, (void *)data,
27108 27104 sizeof (int), flag) != 0) {
27109 27105 rval = EFAULT;
27110 27106 }
27111 27107 break;
27112 27108 case CDROMSDRVSPEED:
27113 27109 /* Validate the requested drive speed */
27114 27110 switch ((uchar_t)data) {
27115 27111 case CDROM_TWELVE_SPEED:
27116 27112 data = 0x2;
27117 27113 /*FALLTHROUGH*/
27118 27114 case CDROM_NORMAL_SPEED:
27119 27115 case CDROM_DOUBLE_SPEED:
27120 27116 case CDROM_QUAD_SPEED:
27121 27117 case CDROM_MAXIMUM_SPEED:
27122 27118 break;
27123 27119 default:
27124 27120 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27125 27121 "sr_change_speed: "
27126 27122 "Drive Speed '%d' Not Supported\n", (uchar_t)data);
27127 27123 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27128 27124 return (EINVAL);
27129 27125 }
27130 27126
27131 27127 /*
27132 27128 * The current drive speed matches the requested drive speed so
27133 27129 * there is no need to send the mode select to change the speed
27134 27130 */
27135 27131 if (current_speed == data) {
27136 27132 break;
27137 27133 }
27138 27134
27139 27135 /* Build the select data for the requested drive speed */
27140 27136 select = kmem_zalloc(BUFLEN_MODE_CDROM_SPEED, KM_SLEEP);
27141 27137 select_mhp = (struct mode_header *)select;
27142 27138 select_mhp->bdesc_length = 0;
27143 27139 select_page =
27144 27140 (struct mode_speed *)(select + MODE_HEADER_LENGTH);
27145 27141 select_page =
27146 27142 (struct mode_speed *)(select + MODE_HEADER_LENGTH);
27147 27143 select_page->mode_page.code = CDROM_MODE_SPEED;
27148 27144 select_page->mode_page.length = 2;
27149 27145 select_page->speed = (uchar_t)data;
27150 27146
27151 27147 /* Send the mode select for the requested block size */
27152 27148 ssc = sd_ssc_init(un);
27153 27149 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select,
27154 27150 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH,
27155 27151 SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
27156 27152 sd_ssc_fini(ssc);
27157 27153 if (rval != 0) {
27158 27154 /*
27159 27155 * The mode select failed for the requested drive speed,
27160 27156 * so reset the data for the original drive speed and
27161 27157 * send it to the target. The error is indicated by the
27162 27158 * return value for the failed mode select.
27163 27159 */
27164 27160 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27165 27161 "sr_drive_speed: Mode Select Failed\n");
27166 27162 select_page->speed = sense_page->speed;
27167 27163 ssc = sd_ssc_init(un);
27168 27164 (void) sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select,
27169 27165 MODEPAGE_CDROM_SPEED_LEN + MODE_HEADER_LENGTH,
27170 27166 SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
27171 27167 sd_ssc_fini(ssc);
27172 27168 }
27173 27169 break;
27174 27170 default:
27175 27171 /* should not reach here, but check anyway */
27176 27172 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27177 27173 "sr_change_speed: Command '%x' Not Supported\n", cmd);
27178 27174 rval = EINVAL;
27179 27175 break;
27180 27176 }
27181 27177
27182 27178 if (select) {
27183 27179 kmem_free(select, BUFLEN_MODE_CDROM_SPEED);
27184 27180 }
27185 27181 if (sense) {
27186 27182 kmem_free(sense, BUFLEN_MODE_CDROM_SPEED);
27187 27183 }
27188 27184
27189 27185 return (rval);
27190 27186 }
27191 27187
27192 27188
27193 27189 /*
27194 27190 * Function: sr_atapi_change_speed()
27195 27191 *
27196 27192 * Description: This routine is the driver entry point for handling CD-ROM
27197 27193 * drive speed ioctl requests for MMC devices that do not support
27198 27194 * the Real Time Streaming feature (0x107).
27199 27195 *
27200 27196 * Note: This routine will use the SET SPEED command which may not
27201 27197 * be supported by all devices.
27202 27198 *
27203 27199 * Arguments: dev- the device 'dev_t'
27204 27200 * cmd- the request type; one of CDROMGDRVSPEED (get) or
27205 27201 * CDROMSDRVSPEED (set)
27206 27202 * data- current drive speed or requested drive speed
27207 27203 * flag- this argument is a pass through to ddi_copyxxx() directly
27208 27204 * from the mode argument of ioctl().
27209 27205 *
27210 27206 * Return Code: the code returned by sd_send_scsi_cmd()
27211 27207 * EINVAL if invalid arguments are provided
27212 27208 * EFAULT if ddi_copyxxx() fails
27213 27209 * ENXIO if fail ddi_get_soft_state
27214 27210 * EIO if invalid mode sense block descriptor length
27215 27211 */
27216 27212
27217 27213 static int
27218 27214 sr_atapi_change_speed(dev_t dev, int cmd, intptr_t data, int flag)
27219 27215 {
27220 27216 struct sd_lun *un;
27221 27217 struct uscsi_cmd *com = NULL;
27222 27218 struct mode_header_grp2 *sense_mhp;
27223 27219 uchar_t *sense_page;
27224 27220 uchar_t *sense = NULL;
27225 27221 char cdb[CDB_GROUP5];
27226 27222 int bd_len;
27227 27223 int current_speed = 0;
27228 27224 int max_speed = 0;
27229 27225 int rval;
27230 27226 sd_ssc_t *ssc;
27231 27227
27232 27228 ASSERT((cmd == CDROMGDRVSPEED) || (cmd == CDROMSDRVSPEED));
27233 27229
27234 27230 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27235 27231 return (ENXIO);
27236 27232 }
27237 27233
27238 27234 sense = kmem_zalloc(BUFLEN_MODE_CDROM_CAP, KM_SLEEP);
27239 27235
27240 27236 ssc = sd_ssc_init(un);
27241 27237 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense,
27242 27238 BUFLEN_MODE_CDROM_CAP, MODEPAGE_CDROM_CAP,
27243 27239 SD_PATH_STANDARD);
27244 27240 sd_ssc_fini(ssc);
27245 27241 if (rval != 0) {
27246 27242 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27247 27243 "sr_atapi_change_speed: Mode Sense Failed\n");
27248 27244 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27249 27245 return (rval);
27250 27246 }
27251 27247
27252 27248 /* Check the block descriptor len to handle only 1 block descriptor */
27253 27249 sense_mhp = (struct mode_header_grp2 *)sense;
27254 27250 bd_len = (sense_mhp->bdesc_length_hi << 8) | sense_mhp->bdesc_length_lo;
27255 27251 if (bd_len > MODE_BLK_DESC_LENGTH) {
27256 27252 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27257 27253 "sr_atapi_change_speed: Mode Sense returned invalid "
27258 27254 "block descriptor length\n");
27259 27255 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27260 27256 return (EIO);
27261 27257 }
27262 27258
27263 27259 /* Calculate the current and maximum drive speeds */
27264 27260 sense_page = (uchar_t *)(sense + MODE_HEADER_LENGTH_GRP2 + bd_len);
27265 27261 current_speed = (sense_page[14] << 8) | sense_page[15];
27266 27262 max_speed = (sense_page[8] << 8) | sense_page[9];
27267 27263
27268 27264 /* Process the command */
27269 27265 switch (cmd) {
27270 27266 case CDROMGDRVSPEED:
27271 27267 current_speed /= SD_SPEED_1X;
27272 27268 if (ddi_copyout(¤t_speed, (void *)data,
27273 27269 sizeof (int), flag) != 0)
27274 27270 rval = EFAULT;
27275 27271 break;
27276 27272 case CDROMSDRVSPEED:
27277 27273 /* Convert the speed code to KB/sec */
27278 27274 switch ((uchar_t)data) {
27279 27275 case CDROM_NORMAL_SPEED:
27280 27276 current_speed = SD_SPEED_1X;
27281 27277 break;
27282 27278 case CDROM_DOUBLE_SPEED:
27283 27279 current_speed = 2 * SD_SPEED_1X;
27284 27280 break;
27285 27281 case CDROM_QUAD_SPEED:
27286 27282 current_speed = 4 * SD_SPEED_1X;
27287 27283 break;
27288 27284 case CDROM_TWELVE_SPEED:
27289 27285 current_speed = 12 * SD_SPEED_1X;
27290 27286 break;
27291 27287 case CDROM_MAXIMUM_SPEED:
27292 27288 current_speed = 0xffff;
27293 27289 break;
27294 27290 default:
27295 27291 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27296 27292 "sr_atapi_change_speed: invalid drive speed %d\n",
27297 27293 (uchar_t)data);
27298 27294 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27299 27295 return (EINVAL);
27300 27296 }
27301 27297
27302 27298 /* Check the request against the drive's max speed. */
27303 27299 if (current_speed != 0xffff) {
27304 27300 if (current_speed > max_speed) {
27305 27301 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27306 27302 return (EINVAL);
27307 27303 }
27308 27304 }
27309 27305
27310 27306 /*
27311 27307 * Build and send the SET SPEED command
27312 27308 *
27313 27309 * Note: The SET SPEED (0xBB) command used in this routine is
27314 27310 * obsolete per the SCSI MMC spec but still supported in the
27315 27311 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI
27316 27312 * therefore the command is still implemented in this routine.
27317 27313 */
27318 27314 bzero(cdb, sizeof (cdb));
27319 27315 cdb[0] = (char)SCMD_SET_CDROM_SPEED;
27320 27316 cdb[2] = (uchar_t)(current_speed >> 8);
27321 27317 cdb[3] = (uchar_t)current_speed;
27322 27318 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27323 27319 com->uscsi_cdb = (caddr_t)cdb;
27324 27320 com->uscsi_cdblen = CDB_GROUP5;
27325 27321 com->uscsi_bufaddr = NULL;
27326 27322 com->uscsi_buflen = 0;
27327 27323 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27328 27324 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, 0, SD_PATH_STANDARD);
27329 27325 break;
27330 27326 default:
27331 27327 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27332 27328 "sr_atapi_change_speed: Command '%x' Not Supported\n", cmd);
27333 27329 rval = EINVAL;
27334 27330 }
27335 27331
27336 27332 if (sense) {
27337 27333 kmem_free(sense, BUFLEN_MODE_CDROM_CAP);
27338 27334 }
27339 27335 if (com) {
27340 27336 kmem_free(com, sizeof (*com));
27341 27337 }
27342 27338 return (rval);
27343 27339 }
27344 27340
27345 27341
27346 27342 /*
27347 27343 * Function: sr_pause_resume()
27348 27344 *
27349 27345 * Description: This routine is the driver entry point for handling CD-ROM
27350 27346 * pause/resume ioctl requests. This only affects the audio play
27351 27347 * operation.
27352 27348 *
27353 27349 * Arguments: dev - the device 'dev_t'
27354 27350 * cmd - the request type; one of CDROMPAUSE or CDROMRESUME, used
27355 27351 * for setting the resume bit of the cdb.
27356 27352 *
27357 27353 * Return Code: the code returned by sd_send_scsi_cmd()
27358 27354 * EINVAL if invalid mode specified
27359 27355 *
27360 27356 */
27361 27357
27362 27358 static int
27363 27359 sr_pause_resume(dev_t dev, int cmd)
27364 27360 {
27365 27361 struct sd_lun *un;
27366 27362 struct uscsi_cmd *com;
27367 27363 char cdb[CDB_GROUP1];
27368 27364 int rval;
27369 27365
27370 27366 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27371 27367 return (ENXIO);
27372 27368 }
27373 27369
27374 27370 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27375 27371 bzero(cdb, CDB_GROUP1);
27376 27372 cdb[0] = SCMD_PAUSE_RESUME;
27377 27373 switch (cmd) {
27378 27374 case CDROMRESUME:
27379 27375 cdb[8] = 1;
27380 27376 break;
27381 27377 case CDROMPAUSE:
27382 27378 cdb[8] = 0;
27383 27379 break;
27384 27380 default:
27385 27381 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_pause_resume:"
27386 27382 " Command '%x' Not Supported\n", cmd);
27387 27383 rval = EINVAL;
27388 27384 goto done;
27389 27385 }
27390 27386
27391 27387 com->uscsi_cdb = cdb;
27392 27388 com->uscsi_cdblen = CDB_GROUP1;
27393 27389 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27394 27390
27395 27391 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27396 27392 SD_PATH_STANDARD);
27397 27393
27398 27394 done:
27399 27395 kmem_free(com, sizeof (*com));
27400 27396 return (rval);
27401 27397 }
27402 27398
27403 27399
27404 27400 /*
27405 27401 * Function: sr_play_msf()
27406 27402 *
27407 27403 * Description: This routine is the driver entry point for handling CD-ROM
27408 27404 * ioctl requests to output the audio signals at the specified
27409 27405 * starting address and continue the audio play until the specified
27410 27406 * ending address (CDROMPLAYMSF) The address is in Minute Second
27411 27407 * Frame (MSF) format.
27412 27408 *
27413 27409 * Arguments: dev - the device 'dev_t'
27414 27410 * data - pointer to user provided audio msf structure,
27415 27411 * specifying start/end addresses.
27416 27412 * flag - this argument is a pass through to ddi_copyxxx()
27417 27413 * directly from the mode argument of ioctl().
27418 27414 *
27419 27415 * Return Code: the code returned by sd_send_scsi_cmd()
27420 27416 * EFAULT if ddi_copyxxx() fails
27421 27417 * ENXIO if fail ddi_get_soft_state
27422 27418 * EINVAL if data pointer is NULL
27423 27419 */
27424 27420
27425 27421 static int
27426 27422 sr_play_msf(dev_t dev, caddr_t data, int flag)
27427 27423 {
27428 27424 struct sd_lun *un;
27429 27425 struct uscsi_cmd *com;
27430 27426 struct cdrom_msf msf_struct;
27431 27427 struct cdrom_msf *msf = &msf_struct;
27432 27428 char cdb[CDB_GROUP1];
27433 27429 int rval;
27434 27430
27435 27431 if (data == NULL) {
27436 27432 return (EINVAL);
27437 27433 }
27438 27434
27439 27435 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27440 27436 return (ENXIO);
27441 27437 }
27442 27438
27443 27439 if (ddi_copyin(data, msf, sizeof (struct cdrom_msf), flag)) {
27444 27440 return (EFAULT);
27445 27441 }
27446 27442
27447 27443 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27448 27444 bzero(cdb, CDB_GROUP1);
27449 27445 cdb[0] = SCMD_PLAYAUDIO_MSF;
27450 27446 if (un->un_f_cfg_playmsf_bcd == TRUE) {
27451 27447 cdb[3] = BYTE_TO_BCD(msf->cdmsf_min0);
27452 27448 cdb[4] = BYTE_TO_BCD(msf->cdmsf_sec0);
27453 27449 cdb[5] = BYTE_TO_BCD(msf->cdmsf_frame0);
27454 27450 cdb[6] = BYTE_TO_BCD(msf->cdmsf_min1);
27455 27451 cdb[7] = BYTE_TO_BCD(msf->cdmsf_sec1);
27456 27452 cdb[8] = BYTE_TO_BCD(msf->cdmsf_frame1);
27457 27453 } else {
27458 27454 cdb[3] = msf->cdmsf_min0;
27459 27455 cdb[4] = msf->cdmsf_sec0;
27460 27456 cdb[5] = msf->cdmsf_frame0;
27461 27457 cdb[6] = msf->cdmsf_min1;
27462 27458 cdb[7] = msf->cdmsf_sec1;
27463 27459 cdb[8] = msf->cdmsf_frame1;
27464 27460 }
27465 27461 com->uscsi_cdb = cdb;
27466 27462 com->uscsi_cdblen = CDB_GROUP1;
27467 27463 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27468 27464 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27469 27465 SD_PATH_STANDARD);
27470 27466 kmem_free(com, sizeof (*com));
27471 27467 return (rval);
27472 27468 }
27473 27469
27474 27470
27475 27471 /*
27476 27472 * Function: sr_play_trkind()
27477 27473 *
27478 27474 * Description: This routine is the driver entry point for handling CD-ROM
27479 27475 * ioctl requests to output the audio signals at the specified
27480 27476 * starting address and continue the audio play until the specified
27481 27477 * ending address (CDROMPLAYTRKIND). The address is in Track Index
27482 27478 * format.
27483 27479 *
27484 27480 * Arguments: dev - the device 'dev_t'
27485 27481 * data - pointer to user provided audio track/index structure,
27486 27482 * specifying start/end addresses.
27487 27483 * flag - this argument is a pass through to ddi_copyxxx()
27488 27484 * directly from the mode argument of ioctl().
27489 27485 *
27490 27486 * Return Code: the code returned by sd_send_scsi_cmd()
27491 27487 * EFAULT if ddi_copyxxx() fails
27492 27488 * ENXIO if fail ddi_get_soft_state
27493 27489 * EINVAL if data pointer is NULL
27494 27490 */
27495 27491
27496 27492 static int
27497 27493 sr_play_trkind(dev_t dev, caddr_t data, int flag)
27498 27494 {
27499 27495 struct cdrom_ti ti_struct;
27500 27496 struct cdrom_ti *ti = &ti_struct;
27501 27497 struct uscsi_cmd *com = NULL;
27502 27498 char cdb[CDB_GROUP1];
27503 27499 int rval;
27504 27500
27505 27501 if (data == NULL) {
27506 27502 return (EINVAL);
27507 27503 }
27508 27504
27509 27505 if (ddi_copyin(data, ti, sizeof (struct cdrom_ti), flag)) {
27510 27506 return (EFAULT);
27511 27507 }
27512 27508
27513 27509 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27514 27510 bzero(cdb, CDB_GROUP1);
27515 27511 cdb[0] = SCMD_PLAYAUDIO_TI;
27516 27512 cdb[4] = ti->cdti_trk0;
27517 27513 cdb[5] = ti->cdti_ind0;
27518 27514 cdb[7] = ti->cdti_trk1;
27519 27515 cdb[8] = ti->cdti_ind1;
27520 27516 com->uscsi_cdb = cdb;
27521 27517 com->uscsi_cdblen = CDB_GROUP1;
27522 27518 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT;
27523 27519 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27524 27520 SD_PATH_STANDARD);
27525 27521 kmem_free(com, sizeof (*com));
27526 27522 return (rval);
27527 27523 }
27528 27524
27529 27525
27530 27526 /*
27531 27527 * Function: sr_read_all_subcodes()
27532 27528 *
27533 27529 * Description: This routine is the driver entry point for handling CD-ROM
27534 27530 * ioctl requests to return raw subcode data while the target is
27535 27531 * playing audio (CDROMSUBCODE).
27536 27532 *
27537 27533 * Arguments: dev - the device 'dev_t'
27538 27534 * data - pointer to user provided cdrom subcode structure,
27539 27535 * specifying the transfer length and address.
27540 27536 * flag - this argument is a pass through to ddi_copyxxx()
27541 27537 * directly from the mode argument of ioctl().
27542 27538 *
27543 27539 * Return Code: the code returned by sd_send_scsi_cmd()
27544 27540 * EFAULT if ddi_copyxxx() fails
27545 27541 * ENXIO if fail ddi_get_soft_state
27546 27542 * EINVAL if data pointer is NULL
27547 27543 */
27548 27544
27549 27545 static int
27550 27546 sr_read_all_subcodes(dev_t dev, caddr_t data, int flag)
27551 27547 {
27552 27548 struct sd_lun *un = NULL;
27553 27549 struct uscsi_cmd *com = NULL;
27554 27550 struct cdrom_subcode *subcode = NULL;
27555 27551 int rval;
27556 27552 size_t buflen;
27557 27553 char cdb[CDB_GROUP5];
27558 27554
27559 27555 #ifdef _MULTI_DATAMODEL
27560 27556 /* To support ILP32 applications in an LP64 world */
27561 27557 struct cdrom_subcode32 cdrom_subcode32;
27562 27558 struct cdrom_subcode32 *cdsc32 = &cdrom_subcode32;
27563 27559 #endif
27564 27560 if (data == NULL) {
27565 27561 return (EINVAL);
27566 27562 }
27567 27563
27568 27564 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
27569 27565 return (ENXIO);
27570 27566 }
27571 27567
27572 27568 subcode = kmem_zalloc(sizeof (struct cdrom_subcode), KM_SLEEP);
27573 27569
27574 27570 #ifdef _MULTI_DATAMODEL
27575 27571 switch (ddi_model_convert_from(flag & FMODELS)) {
27576 27572 case DDI_MODEL_ILP32:
27577 27573 if (ddi_copyin(data, cdsc32, sizeof (*cdsc32), flag)) {
27578 27574 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27579 27575 "sr_read_all_subcodes: ddi_copyin Failed\n");
27580 27576 kmem_free(subcode, sizeof (struct cdrom_subcode));
27581 27577 return (EFAULT);
27582 27578 }
27583 27579 /* Convert the ILP32 uscsi data from the application to LP64 */
27584 27580 cdrom_subcode32tocdrom_subcode(cdsc32, subcode);
27585 27581 break;
27586 27582 case DDI_MODEL_NONE:
27587 27583 if (ddi_copyin(data, subcode,
27588 27584 sizeof (struct cdrom_subcode), flag)) {
27589 27585 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27590 27586 "sr_read_all_subcodes: ddi_copyin Failed\n");
27591 27587 kmem_free(subcode, sizeof (struct cdrom_subcode));
27592 27588 return (EFAULT);
27593 27589 }
27594 27590 break;
27595 27591 }
27596 27592 #else /* ! _MULTI_DATAMODEL */
27597 27593 if (ddi_copyin(data, subcode, sizeof (struct cdrom_subcode), flag)) {
27598 27594 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27599 27595 "sr_read_all_subcodes: ddi_copyin Failed\n");
27600 27596 kmem_free(subcode, sizeof (struct cdrom_subcode));
27601 27597 return (EFAULT);
27602 27598 }
27603 27599 #endif /* _MULTI_DATAMODEL */
27604 27600
27605 27601 /*
27606 27602 * Since MMC-2 expects max 3 bytes for length, check if the
27607 27603 * length input is greater than 3 bytes
27608 27604 */
27609 27605 if ((subcode->cdsc_length & 0xFF000000) != 0) {
27610 27606 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
27611 27607 "sr_read_all_subcodes: "
27612 27608 "cdrom transfer length too large: %d (limit %d)\n",
27613 27609 subcode->cdsc_length, 0xFFFFFF);
27614 27610 kmem_free(subcode, sizeof (struct cdrom_subcode));
27615 27611 return (EINVAL);
27616 27612 }
27617 27613
27618 27614 buflen = CDROM_BLK_SUBCODE * subcode->cdsc_length;
27619 27615 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27620 27616 bzero(cdb, CDB_GROUP5);
27621 27617
27622 27618 if (un->un_f_mmc_cap == TRUE) {
27623 27619 cdb[0] = (char)SCMD_READ_CD;
27624 27620 cdb[2] = (char)0xff;
27625 27621 cdb[3] = (char)0xff;
27626 27622 cdb[4] = (char)0xff;
27627 27623 cdb[5] = (char)0xff;
27628 27624 cdb[6] = (((subcode->cdsc_length) & 0x00ff0000) >> 16);
27629 27625 cdb[7] = (((subcode->cdsc_length) & 0x0000ff00) >> 8);
27630 27626 cdb[8] = ((subcode->cdsc_length) & 0x000000ff);
27631 27627 cdb[10] = 1;
27632 27628 } else {
27633 27629 /*
27634 27630 * Note: A vendor specific command (0xDF) is being used her to
27635 27631 * request a read of all subcodes.
27636 27632 */
27637 27633 cdb[0] = (char)SCMD_READ_ALL_SUBCODES;
27638 27634 cdb[6] = (((subcode->cdsc_length) & 0xff000000) >> 24);
27639 27635 cdb[7] = (((subcode->cdsc_length) & 0x00ff0000) >> 16);
27640 27636 cdb[8] = (((subcode->cdsc_length) & 0x0000ff00) >> 8);
27641 27637 cdb[9] = ((subcode->cdsc_length) & 0x000000ff);
27642 27638 }
27643 27639 com->uscsi_cdb = cdb;
27644 27640 com->uscsi_cdblen = CDB_GROUP5;
27645 27641 com->uscsi_bufaddr = (caddr_t)subcode->cdsc_addr;
27646 27642 com->uscsi_buflen = buflen;
27647 27643 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
27648 27644 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
27649 27645 SD_PATH_STANDARD);
27650 27646 kmem_free(subcode, sizeof (struct cdrom_subcode));
27651 27647 kmem_free(com, sizeof (*com));
27652 27648 return (rval);
27653 27649 }
27654 27650
27655 27651
27656 27652 /*
27657 27653 * Function: sr_read_subchannel()
27658 27654 *
27659 27655 * Description: This routine is the driver entry point for handling CD-ROM
27660 27656 * ioctl requests to return the Q sub-channel data of the CD
27661 27657 * current position block. (CDROMSUBCHNL) The data includes the
27662 27658 * track number, index number, absolute CD-ROM address (LBA or MSF
27663 27659 * format per the user) , track relative CD-ROM address (LBA or MSF
27664 27660 * format per the user), control data and audio status.
27665 27661 *
27666 27662 * Arguments: dev - the device 'dev_t'
27667 27663 * data - pointer to user provided cdrom sub-channel structure
27668 27664 * flag - this argument is a pass through to ddi_copyxxx()
27669 27665 * directly from the mode argument of ioctl().
27670 27666 *
27671 27667 * Return Code: the code returned by sd_send_scsi_cmd()
27672 27668 * EFAULT if ddi_copyxxx() fails
27673 27669 * ENXIO if fail ddi_get_soft_state
27674 27670 * EINVAL if data pointer is NULL
27675 27671 */
27676 27672
27677 27673 static int
27678 27674 sr_read_subchannel(dev_t dev, caddr_t data, int flag)
27679 27675 {
27680 27676 struct sd_lun *un;
27681 27677 struct uscsi_cmd *com;
27682 27678 struct cdrom_subchnl subchanel;
27683 27679 struct cdrom_subchnl *subchnl = &subchanel;
27684 27680 char cdb[CDB_GROUP1];
27685 27681 caddr_t buffer;
27686 27682 int rval;
27687 27683
27688 27684 if (data == NULL) {
27689 27685 return (EINVAL);
27690 27686 }
27691 27687
27692 27688 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
27693 27689 (un->un_state == SD_STATE_OFFLINE)) {
27694 27690 return (ENXIO);
27695 27691 }
27696 27692
27697 27693 if (ddi_copyin(data, subchnl, sizeof (struct cdrom_subchnl), flag)) {
27698 27694 return (EFAULT);
27699 27695 }
27700 27696
27701 27697 buffer = kmem_zalloc((size_t)16, KM_SLEEP);
27702 27698 bzero(cdb, CDB_GROUP1);
27703 27699 cdb[0] = SCMD_READ_SUBCHANNEL;
27704 27700 /* Set the MSF bit based on the user requested address format */
27705 27701 cdb[1] = (subchnl->cdsc_format & CDROM_LBA) ? 0 : 0x02;
27706 27702 /*
27707 27703 * Set the Q bit in byte 2 to indicate that Q sub-channel data be
27708 27704 * returned
27709 27705 */
27710 27706 cdb[2] = 0x40;
27711 27707 /*
27712 27708 * Set byte 3 to specify the return data format. A value of 0x01
27713 27709 * indicates that the CD-ROM current position should be returned.
27714 27710 */
27715 27711 cdb[3] = 0x01;
27716 27712 cdb[8] = 0x10;
27717 27713 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27718 27714 com->uscsi_cdb = cdb;
27719 27715 com->uscsi_cdblen = CDB_GROUP1;
27720 27716 com->uscsi_bufaddr = buffer;
27721 27717 com->uscsi_buflen = 16;
27722 27718 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
27723 27719 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27724 27720 SD_PATH_STANDARD);
27725 27721 if (rval != 0) {
27726 27722 kmem_free(buffer, 16);
27727 27723 kmem_free(com, sizeof (*com));
27728 27724 return (rval);
27729 27725 }
27730 27726
27731 27727 /* Process the returned Q sub-channel data */
27732 27728 subchnl->cdsc_audiostatus = buffer[1];
27733 27729 subchnl->cdsc_adr = (buffer[5] & 0xF0) >> 4;
27734 27730 subchnl->cdsc_ctrl = (buffer[5] & 0x0F);
27735 27731 subchnl->cdsc_trk = buffer[6];
27736 27732 subchnl->cdsc_ind = buffer[7];
27737 27733 if (subchnl->cdsc_format & CDROM_LBA) {
27738 27734 subchnl->cdsc_absaddr.lba =
27739 27735 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) +
27740 27736 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]);
27741 27737 subchnl->cdsc_reladdr.lba =
27742 27738 ((uchar_t)buffer[12] << 24) + ((uchar_t)buffer[13] << 16) +
27743 27739 ((uchar_t)buffer[14] << 8) + ((uchar_t)buffer[15]);
27744 27740 } else if (un->un_f_cfg_readsub_bcd == TRUE) {
27745 27741 subchnl->cdsc_absaddr.msf.minute = BCD_TO_BYTE(buffer[9]);
27746 27742 subchnl->cdsc_absaddr.msf.second = BCD_TO_BYTE(buffer[10]);
27747 27743 subchnl->cdsc_absaddr.msf.frame = BCD_TO_BYTE(buffer[11]);
27748 27744 subchnl->cdsc_reladdr.msf.minute = BCD_TO_BYTE(buffer[13]);
27749 27745 subchnl->cdsc_reladdr.msf.second = BCD_TO_BYTE(buffer[14]);
27750 27746 subchnl->cdsc_reladdr.msf.frame = BCD_TO_BYTE(buffer[15]);
27751 27747 } else {
27752 27748 subchnl->cdsc_absaddr.msf.minute = buffer[9];
27753 27749 subchnl->cdsc_absaddr.msf.second = buffer[10];
27754 27750 subchnl->cdsc_absaddr.msf.frame = buffer[11];
27755 27751 subchnl->cdsc_reladdr.msf.minute = buffer[13];
27756 27752 subchnl->cdsc_reladdr.msf.second = buffer[14];
27757 27753 subchnl->cdsc_reladdr.msf.frame = buffer[15];
27758 27754 }
27759 27755 kmem_free(buffer, 16);
27760 27756 kmem_free(com, sizeof (*com));
27761 27757 if (ddi_copyout(subchnl, data, sizeof (struct cdrom_subchnl), flag)
27762 27758 != 0) {
27763 27759 return (EFAULT);
27764 27760 }
27765 27761 return (rval);
27766 27762 }
27767 27763
27768 27764
27769 27765 /*
27770 27766 * Function: sr_read_tocentry()
27771 27767 *
27772 27768 * Description: This routine is the driver entry point for handling CD-ROM
27773 27769 * ioctl requests to read from the Table of Contents (TOC)
27774 27770 * (CDROMREADTOCENTRY). This routine provides the ADR and CTRL
27775 27771 * fields, the starting address (LBA or MSF format per the user)
27776 27772 * and the data mode if the user specified track is a data track.
27777 27773 *
27778 27774 * Note: The READ HEADER (0x44) command used in this routine is
27779 27775 * obsolete per the SCSI MMC spec but still supported in the
27780 27776 * MT FUJI vendor spec. Most equipment is adhereing to MT FUJI
27781 27777 * therefore the command is still implemented in this routine.
27782 27778 *
27783 27779 * Arguments: dev - the device 'dev_t'
27784 27780 * data - pointer to user provided toc entry structure,
27785 27781 * specifying the track # and the address format
27786 27782 * (LBA or MSF).
27787 27783 * flag - this argument is a pass through to ddi_copyxxx()
27788 27784 * directly from the mode argument of ioctl().
27789 27785 *
27790 27786 * Return Code: the code returned by sd_send_scsi_cmd()
27791 27787 * EFAULT if ddi_copyxxx() fails
27792 27788 * ENXIO if fail ddi_get_soft_state
27793 27789 * EINVAL if data pointer is NULL
27794 27790 */
27795 27791
27796 27792 static int
27797 27793 sr_read_tocentry(dev_t dev, caddr_t data, int flag)
27798 27794 {
27799 27795 struct sd_lun *un = NULL;
27800 27796 struct uscsi_cmd *com;
27801 27797 struct cdrom_tocentry toc_entry;
27802 27798 struct cdrom_tocentry *entry = &toc_entry;
27803 27799 caddr_t buffer;
27804 27800 int rval;
27805 27801 char cdb[CDB_GROUP1];
27806 27802
27807 27803 if (data == NULL) {
27808 27804 return (EINVAL);
27809 27805 }
27810 27806
27811 27807 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
27812 27808 (un->un_state == SD_STATE_OFFLINE)) {
27813 27809 return (ENXIO);
27814 27810 }
27815 27811
27816 27812 if (ddi_copyin(data, entry, sizeof (struct cdrom_tocentry), flag)) {
27817 27813 return (EFAULT);
27818 27814 }
27819 27815
27820 27816 /* Validate the requested track and address format */
27821 27817 if (!(entry->cdte_format & (CDROM_LBA | CDROM_MSF))) {
27822 27818 return (EINVAL);
27823 27819 }
27824 27820
27825 27821 if (entry->cdte_track == 0) {
27826 27822 return (EINVAL);
27827 27823 }
27828 27824
27829 27825 buffer = kmem_zalloc((size_t)12, KM_SLEEP);
27830 27826 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
27831 27827 bzero(cdb, CDB_GROUP1);
27832 27828
27833 27829 cdb[0] = SCMD_READ_TOC;
27834 27830 /* Set the MSF bit based on the user requested address format */
27835 27831 cdb[1] = ((entry->cdte_format & CDROM_LBA) ? 0 : 2);
27836 27832 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) {
27837 27833 cdb[6] = BYTE_TO_BCD(entry->cdte_track);
27838 27834 } else {
27839 27835 cdb[6] = entry->cdte_track;
27840 27836 }
27841 27837
27842 27838 /*
27843 27839 * Bytes 7 & 8 are the 12 byte allocation length for a single entry.
27844 27840 * (4 byte TOC response header + 8 byte track descriptor)
27845 27841 */
27846 27842 cdb[8] = 12;
27847 27843 com->uscsi_cdb = cdb;
27848 27844 com->uscsi_cdblen = CDB_GROUP1;
27849 27845 com->uscsi_bufaddr = buffer;
27850 27846 com->uscsi_buflen = 0x0C;
27851 27847 com->uscsi_flags = (USCSI_DIAGNOSE | USCSI_SILENT | USCSI_READ);
27852 27848 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27853 27849 SD_PATH_STANDARD);
27854 27850 if (rval != 0) {
27855 27851 kmem_free(buffer, 12);
27856 27852 kmem_free(com, sizeof (*com));
27857 27853 return (rval);
27858 27854 }
27859 27855
27860 27856 /* Process the toc entry */
27861 27857 entry->cdte_adr = (buffer[5] & 0xF0) >> 4;
27862 27858 entry->cdte_ctrl = (buffer[5] & 0x0F);
27863 27859 if (entry->cdte_format & CDROM_LBA) {
27864 27860 entry->cdte_addr.lba =
27865 27861 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) +
27866 27862 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]);
27867 27863 } else if (un->un_f_cfg_read_toc_addr_bcd == TRUE) {
27868 27864 entry->cdte_addr.msf.minute = BCD_TO_BYTE(buffer[9]);
27869 27865 entry->cdte_addr.msf.second = BCD_TO_BYTE(buffer[10]);
27870 27866 entry->cdte_addr.msf.frame = BCD_TO_BYTE(buffer[11]);
27871 27867 /*
27872 27868 * Send a READ TOC command using the LBA address format to get
27873 27869 * the LBA for the track requested so it can be used in the
27874 27870 * READ HEADER request
27875 27871 *
27876 27872 * Note: The MSF bit of the READ HEADER command specifies the
27877 27873 * output format. The block address specified in that command
27878 27874 * must be in LBA format.
27879 27875 */
27880 27876 cdb[1] = 0;
27881 27877 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27882 27878 SD_PATH_STANDARD);
27883 27879 if (rval != 0) {
27884 27880 kmem_free(buffer, 12);
27885 27881 kmem_free(com, sizeof (*com));
27886 27882 return (rval);
27887 27883 }
27888 27884 } else {
27889 27885 entry->cdte_addr.msf.minute = buffer[9];
27890 27886 entry->cdte_addr.msf.second = buffer[10];
27891 27887 entry->cdte_addr.msf.frame = buffer[11];
27892 27888 /*
27893 27889 * Send a READ TOC command using the LBA address format to get
27894 27890 * the LBA for the track requested so it can be used in the
27895 27891 * READ HEADER request
27896 27892 *
27897 27893 * Note: The MSF bit of the READ HEADER command specifies the
27898 27894 * output format. The block address specified in that command
27899 27895 * must be in LBA format.
27900 27896 */
27901 27897 cdb[1] = 0;
27902 27898 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27903 27899 SD_PATH_STANDARD);
27904 27900 if (rval != 0) {
27905 27901 kmem_free(buffer, 12);
27906 27902 kmem_free(com, sizeof (*com));
27907 27903 return (rval);
27908 27904 }
27909 27905 }
27910 27906
27911 27907 /*
27912 27908 * Build and send the READ HEADER command to determine the data mode of
27913 27909 * the user specified track.
27914 27910 */
27915 27911 if ((entry->cdte_ctrl & CDROM_DATA_TRACK) &&
27916 27912 (entry->cdte_track != CDROM_LEADOUT)) {
27917 27913 bzero(cdb, CDB_GROUP1);
27918 27914 cdb[0] = SCMD_READ_HEADER;
27919 27915 cdb[2] = buffer[8];
27920 27916 cdb[3] = buffer[9];
27921 27917 cdb[4] = buffer[10];
27922 27918 cdb[5] = buffer[11];
27923 27919 cdb[8] = 0x08;
27924 27920 com->uscsi_buflen = 0x08;
27925 27921 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
27926 27922 SD_PATH_STANDARD);
27927 27923 if (rval == 0) {
27928 27924 entry->cdte_datamode = buffer[0];
27929 27925 } else {
27930 27926 /*
27931 27927 * READ HEADER command failed, since this is
27932 27928 * obsoleted in one spec, its better to return
27933 27929 * -1 for an invlid track so that we can still
27934 27930 * receive the rest of the TOC data.
27935 27931 */
27936 27932 entry->cdte_datamode = (uchar_t)-1;
27937 27933 }
27938 27934 } else {
27939 27935 entry->cdte_datamode = (uchar_t)-1;
27940 27936 }
27941 27937
27942 27938 kmem_free(buffer, 12);
27943 27939 kmem_free(com, sizeof (*com));
27944 27940 if (ddi_copyout(entry, data, sizeof (struct cdrom_tocentry), flag) != 0)
27945 27941 return (EFAULT);
27946 27942
27947 27943 return (rval);
27948 27944 }
27949 27945
27950 27946
27951 27947 /*
27952 27948 * Function: sr_read_tochdr()
27953 27949 *
27954 27950 * Description: This routine is the driver entry point for handling CD-ROM
27955 27951 * ioctl requests to read the Table of Contents (TOC) header
27956 27952 * (CDROMREADTOHDR). The TOC header consists of the disk starting
27957 27953 * and ending track numbers
27958 27954 *
27959 27955 * Arguments: dev - the device 'dev_t'
27960 27956 * data - pointer to user provided toc header structure,
27961 27957 * specifying the starting and ending track numbers.
27962 27958 * flag - this argument is a pass through to ddi_copyxxx()
27963 27959 * directly from the mode argument of ioctl().
27964 27960 *
27965 27961 * Return Code: the code returned by sd_send_scsi_cmd()
27966 27962 * EFAULT if ddi_copyxxx() fails
27967 27963 * ENXIO if fail ddi_get_soft_state
27968 27964 * EINVAL if data pointer is NULL
27969 27965 */
27970 27966
27971 27967 static int
27972 27968 sr_read_tochdr(dev_t dev, caddr_t data, int flag)
27973 27969 {
27974 27970 struct sd_lun *un;
27975 27971 struct uscsi_cmd *com;
27976 27972 struct cdrom_tochdr toc_header;
27977 27973 struct cdrom_tochdr *hdr = &toc_header;
27978 27974 char cdb[CDB_GROUP1];
27979 27975 int rval;
27980 27976 caddr_t buffer;
27981 27977
27982 27978 if (data == NULL) {
27983 27979 return (EINVAL);
27984 27980 }
27985 27981
27986 27982 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
27987 27983 (un->un_state == SD_STATE_OFFLINE)) {
27988 27984 return (ENXIO);
27989 27985 }
27990 27986
27991 27987 buffer = kmem_zalloc(4, KM_SLEEP);
27992 27988 bzero(cdb, CDB_GROUP1);
27993 27989 cdb[0] = SCMD_READ_TOC;
27994 27990 /*
27995 27991 * Specifying a track number of 0x00 in the READ TOC command indicates
27996 27992 * that the TOC header should be returned
27997 27993 */
27998 27994 cdb[6] = 0x00;
27999 27995 /*
28000 27996 * Bytes 7 & 8 are the 4 byte allocation length for TOC header.
28001 27997 * (2 byte data len + 1 byte starting track # + 1 byte ending track #)
28002 27998 */
28003 27999 cdb[8] = 0x04;
28004 28000 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28005 28001 com->uscsi_cdb = cdb;
28006 28002 com->uscsi_cdblen = CDB_GROUP1;
28007 28003 com->uscsi_bufaddr = buffer;
28008 28004 com->uscsi_buflen = 0x04;
28009 28005 com->uscsi_timeout = 300;
28010 28006 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28011 28007
28012 28008 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
28013 28009 SD_PATH_STANDARD);
28014 28010 if (un->un_f_cfg_read_toc_trk_bcd == TRUE) {
28015 28011 hdr->cdth_trk0 = BCD_TO_BYTE(buffer[2]);
28016 28012 hdr->cdth_trk1 = BCD_TO_BYTE(buffer[3]);
28017 28013 } else {
28018 28014 hdr->cdth_trk0 = buffer[2];
28019 28015 hdr->cdth_trk1 = buffer[3];
28020 28016 }
28021 28017 kmem_free(buffer, 4);
28022 28018 kmem_free(com, sizeof (*com));
28023 28019 if (ddi_copyout(hdr, data, sizeof (struct cdrom_tochdr), flag) != 0) {
28024 28020 return (EFAULT);
28025 28021 }
28026 28022 return (rval);
28027 28023 }
28028 28024
28029 28025
28030 28026 /*
28031 28027 * Note: The following sr_read_mode1(), sr_read_cd_mode2(), sr_read_mode2(),
28032 28028 * sr_read_cdda(), sr_read_cdxa(), routines implement driver support for
28033 28029 * handling CDROMREAD ioctl requests for mode 1 user data, mode 2 user data,
28034 28030 * digital audio and extended architecture digital audio. These modes are
28035 28031 * defined in the IEC908 (Red Book), ISO10149 (Yellow Book), and the SCSI3
28036 28032 * MMC specs.
28037 28033 *
28038 28034 * In addition to support for the various data formats these routines also
28039 28035 * include support for devices that implement only the direct access READ
28040 28036 * commands (0x08, 0x28), devices that implement the READ_CD commands
28041 28037 * (0xBE, 0xD4), and devices that implement the vendor unique READ CDDA and
28042 28038 * READ CDXA commands (0xD8, 0xDB)
28043 28039 */
28044 28040
28045 28041 /*
28046 28042 * Function: sr_read_mode1()
28047 28043 *
28048 28044 * Description: This routine is the driver entry point for handling CD-ROM
28049 28045 * ioctl read mode1 requests (CDROMREADMODE1).
28050 28046 *
28051 28047 * Arguments: dev - the device 'dev_t'
28052 28048 * data - pointer to user provided cd read structure specifying
28053 28049 * the lba buffer address and length.
28054 28050 * flag - this argument is a pass through to ddi_copyxxx()
28055 28051 * directly from the mode argument of ioctl().
28056 28052 *
28057 28053 * Return Code: the code returned by sd_send_scsi_cmd()
28058 28054 * EFAULT if ddi_copyxxx() fails
28059 28055 * ENXIO if fail ddi_get_soft_state
28060 28056 * EINVAL if data pointer is NULL
28061 28057 */
28062 28058
28063 28059 static int
28064 28060 sr_read_mode1(dev_t dev, caddr_t data, int flag)
28065 28061 {
28066 28062 struct sd_lun *un;
28067 28063 struct cdrom_read mode1_struct;
28068 28064 struct cdrom_read *mode1 = &mode1_struct;
28069 28065 int rval;
28070 28066 sd_ssc_t *ssc;
28071 28067
28072 28068 #ifdef _MULTI_DATAMODEL
28073 28069 /* To support ILP32 applications in an LP64 world */
28074 28070 struct cdrom_read32 cdrom_read32;
28075 28071 struct cdrom_read32 *cdrd32 = &cdrom_read32;
28076 28072 #endif /* _MULTI_DATAMODEL */
28077 28073
28078 28074 if (data == NULL) {
28079 28075 return (EINVAL);
28080 28076 }
28081 28077
28082 28078 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28083 28079 (un->un_state == SD_STATE_OFFLINE)) {
28084 28080 return (ENXIO);
28085 28081 }
28086 28082
28087 28083 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28088 28084 "sd_read_mode1: entry: un:0x%p\n", un);
28089 28085
28090 28086 #ifdef _MULTI_DATAMODEL
28091 28087 switch (ddi_model_convert_from(flag & FMODELS)) {
28092 28088 case DDI_MODEL_ILP32:
28093 28089 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) {
28094 28090 return (EFAULT);
28095 28091 }
28096 28092 /* Convert the ILP32 uscsi data from the application to LP64 */
28097 28093 cdrom_read32tocdrom_read(cdrd32, mode1);
28098 28094 break;
28099 28095 case DDI_MODEL_NONE:
28100 28096 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) {
28101 28097 return (EFAULT);
28102 28098 }
28103 28099 }
28104 28100 #else /* ! _MULTI_DATAMODEL */
28105 28101 if (ddi_copyin(data, mode1, sizeof (struct cdrom_read), flag)) {
28106 28102 return (EFAULT);
28107 28103 }
28108 28104 #endif /* _MULTI_DATAMODEL */
28109 28105
28110 28106 ssc = sd_ssc_init(un);
28111 28107 rval = sd_send_scsi_READ(ssc, mode1->cdread_bufaddr,
28112 28108 mode1->cdread_buflen, mode1->cdread_lba, SD_PATH_STANDARD);
28113 28109 sd_ssc_fini(ssc);
28114 28110
28115 28111 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28116 28112 "sd_read_mode1: exit: un:0x%p\n", un);
28117 28113
28118 28114 return (rval);
28119 28115 }
28120 28116
28121 28117
28122 28118 /*
28123 28119 * Function: sr_read_cd_mode2()
28124 28120 *
28125 28121 * Description: This routine is the driver entry point for handling CD-ROM
28126 28122 * ioctl read mode2 requests (CDROMREADMODE2) for devices that
28127 28123 * support the READ CD (0xBE) command or the 1st generation
28128 28124 * READ CD (0xD4) command.
28129 28125 *
28130 28126 * Arguments: dev - the device 'dev_t'
28131 28127 * data - pointer to user provided cd read structure specifying
28132 28128 * the lba buffer address and length.
28133 28129 * flag - this argument is a pass through to ddi_copyxxx()
28134 28130 * directly from the mode argument of ioctl().
28135 28131 *
28136 28132 * Return Code: the code returned by sd_send_scsi_cmd()
28137 28133 * EFAULT if ddi_copyxxx() fails
28138 28134 * ENXIO if fail ddi_get_soft_state
28139 28135 * EINVAL if data pointer is NULL
28140 28136 */
28141 28137
28142 28138 static int
28143 28139 sr_read_cd_mode2(dev_t dev, caddr_t data, int flag)
28144 28140 {
28145 28141 struct sd_lun *un;
28146 28142 struct uscsi_cmd *com;
28147 28143 struct cdrom_read mode2_struct;
28148 28144 struct cdrom_read *mode2 = &mode2_struct;
28149 28145 uchar_t cdb[CDB_GROUP5];
28150 28146 int nblocks;
28151 28147 int rval;
28152 28148 #ifdef _MULTI_DATAMODEL
28153 28149 /* To support ILP32 applications in an LP64 world */
28154 28150 struct cdrom_read32 cdrom_read32;
28155 28151 struct cdrom_read32 *cdrd32 = &cdrom_read32;
28156 28152 #endif /* _MULTI_DATAMODEL */
28157 28153
28158 28154 if (data == NULL) {
28159 28155 return (EINVAL);
28160 28156 }
28161 28157
28162 28158 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28163 28159 (un->un_state == SD_STATE_OFFLINE)) {
28164 28160 return (ENXIO);
28165 28161 }
28166 28162
28167 28163 #ifdef _MULTI_DATAMODEL
28168 28164 switch (ddi_model_convert_from(flag & FMODELS)) {
28169 28165 case DDI_MODEL_ILP32:
28170 28166 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) {
28171 28167 return (EFAULT);
28172 28168 }
28173 28169 /* Convert the ILP32 uscsi data from the application to LP64 */
28174 28170 cdrom_read32tocdrom_read(cdrd32, mode2);
28175 28171 break;
28176 28172 case DDI_MODEL_NONE:
28177 28173 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) {
28178 28174 return (EFAULT);
28179 28175 }
28180 28176 break;
28181 28177 }
28182 28178
28183 28179 #else /* ! _MULTI_DATAMODEL */
28184 28180 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) {
28185 28181 return (EFAULT);
28186 28182 }
28187 28183 #endif /* _MULTI_DATAMODEL */
28188 28184
28189 28185 bzero(cdb, sizeof (cdb));
28190 28186 if (un->un_f_cfg_read_cd_xd4 == TRUE) {
28191 28187 /* Read command supported by 1st generation atapi drives */
28192 28188 cdb[0] = SCMD_READ_CDD4;
28193 28189 } else {
28194 28190 /* Universal CD Access Command */
28195 28191 cdb[0] = SCMD_READ_CD;
28196 28192 }
28197 28193
28198 28194 /*
28199 28195 * Set expected sector type to: 2336s byte, Mode 2 Yellow Book
28200 28196 */
28201 28197 cdb[1] = CDROM_SECTOR_TYPE_MODE2;
28202 28198
28203 28199 /* set the start address */
28204 28200 cdb[2] = (uchar_t)((mode2->cdread_lba >> 24) & 0XFF);
28205 28201 cdb[3] = (uchar_t)((mode2->cdread_lba >> 16) & 0XFF);
28206 28202 cdb[4] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF);
28207 28203 cdb[5] = (uchar_t)(mode2->cdread_lba & 0xFF);
28208 28204
28209 28205 /* set the transfer length */
28210 28206 nblocks = mode2->cdread_buflen / 2336;
28211 28207 cdb[6] = (uchar_t)(nblocks >> 16);
28212 28208 cdb[7] = (uchar_t)(nblocks >> 8);
28213 28209 cdb[8] = (uchar_t)nblocks;
28214 28210
28215 28211 /* set the filter bits */
28216 28212 cdb[9] = CDROM_READ_CD_USERDATA;
28217 28213
28218 28214 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28219 28215 com->uscsi_cdb = (caddr_t)cdb;
28220 28216 com->uscsi_cdblen = sizeof (cdb);
28221 28217 com->uscsi_bufaddr = mode2->cdread_bufaddr;
28222 28218 com->uscsi_buflen = mode2->cdread_buflen;
28223 28219 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28224 28220
28225 28221 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28226 28222 SD_PATH_STANDARD);
28227 28223 kmem_free(com, sizeof (*com));
28228 28224 return (rval);
28229 28225 }
28230 28226
28231 28227
28232 28228 /*
28233 28229 * Function: sr_read_mode2()
28234 28230 *
28235 28231 * Description: This routine is the driver entry point for handling CD-ROM
28236 28232 * ioctl read mode2 requests (CDROMREADMODE2) for devices that
28237 28233 * do not support the READ CD (0xBE) command.
28238 28234 *
28239 28235 * Arguments: dev - the device 'dev_t'
28240 28236 * data - pointer to user provided cd read structure specifying
28241 28237 * the lba buffer address and length.
28242 28238 * flag - this argument is a pass through to ddi_copyxxx()
28243 28239 * directly from the mode argument of ioctl().
28244 28240 *
28245 28241 * Return Code: the code returned by sd_send_scsi_cmd()
28246 28242 * EFAULT if ddi_copyxxx() fails
28247 28243 * ENXIO if fail ddi_get_soft_state
28248 28244 * EINVAL if data pointer is NULL
28249 28245 * EIO if fail to reset block size
28250 28246 * EAGAIN if commands are in progress in the driver
28251 28247 */
28252 28248
28253 28249 static int
28254 28250 sr_read_mode2(dev_t dev, caddr_t data, int flag)
28255 28251 {
28256 28252 struct sd_lun *un;
28257 28253 struct cdrom_read mode2_struct;
28258 28254 struct cdrom_read *mode2 = &mode2_struct;
28259 28255 int rval;
28260 28256 uint32_t restore_blksize;
28261 28257 struct uscsi_cmd *com;
28262 28258 uchar_t cdb[CDB_GROUP0];
28263 28259 int nblocks;
28264 28260
28265 28261 #ifdef _MULTI_DATAMODEL
28266 28262 /* To support ILP32 applications in an LP64 world */
28267 28263 struct cdrom_read32 cdrom_read32;
28268 28264 struct cdrom_read32 *cdrd32 = &cdrom_read32;
28269 28265 #endif /* _MULTI_DATAMODEL */
28270 28266
28271 28267 if (data == NULL) {
28272 28268 return (EINVAL);
28273 28269 }
28274 28270
28275 28271 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28276 28272 (un->un_state == SD_STATE_OFFLINE)) {
28277 28273 return (ENXIO);
28278 28274 }
28279 28275
28280 28276 /*
28281 28277 * Because this routine will update the device and driver block size
28282 28278 * being used we want to make sure there are no commands in progress.
28283 28279 * If commands are in progress the user will have to try again.
28284 28280 *
28285 28281 * We check for 1 instead of 0 because we increment un_ncmds_in_driver
28286 28282 * in sdioctl to protect commands from sdioctl through to the top of
28287 28283 * sd_uscsi_strategy. See sdioctl for details.
28288 28284 */
28289 28285 mutex_enter(SD_MUTEX(un));
28290 28286 if (un->un_ncmds_in_driver != 1) {
28291 28287 mutex_exit(SD_MUTEX(un));
28292 28288 return (EAGAIN);
28293 28289 }
28294 28290 mutex_exit(SD_MUTEX(un));
28295 28291
28296 28292 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28297 28293 "sd_read_mode2: entry: un:0x%p\n", un);
28298 28294
28299 28295 #ifdef _MULTI_DATAMODEL
28300 28296 switch (ddi_model_convert_from(flag & FMODELS)) {
28301 28297 case DDI_MODEL_ILP32:
28302 28298 if (ddi_copyin(data, cdrd32, sizeof (*cdrd32), flag) != 0) {
28303 28299 return (EFAULT);
28304 28300 }
28305 28301 /* Convert the ILP32 uscsi data from the application to LP64 */
28306 28302 cdrom_read32tocdrom_read(cdrd32, mode2);
28307 28303 break;
28308 28304 case DDI_MODEL_NONE:
28309 28305 if (ddi_copyin(data, mode2, sizeof (*mode2), flag) != 0) {
28310 28306 return (EFAULT);
28311 28307 }
28312 28308 break;
28313 28309 }
28314 28310 #else /* ! _MULTI_DATAMODEL */
28315 28311 if (ddi_copyin(data, mode2, sizeof (*mode2), flag)) {
28316 28312 return (EFAULT);
28317 28313 }
28318 28314 #endif /* _MULTI_DATAMODEL */
28319 28315
28320 28316 /* Store the current target block size for restoration later */
28321 28317 restore_blksize = un->un_tgt_blocksize;
28322 28318
28323 28319 /* Change the device and soft state target block size to 2336 */
28324 28320 if (sr_sector_mode(dev, SD_MODE2_BLKSIZE) != 0) {
28325 28321 rval = EIO;
28326 28322 goto done;
28327 28323 }
28328 28324
28329 28325
28330 28326 bzero(cdb, sizeof (cdb));
28331 28327
28332 28328 /* set READ operation */
28333 28329 cdb[0] = SCMD_READ;
28334 28330
28335 28331 /* adjust lba for 2kbyte blocks from 512 byte blocks */
28336 28332 mode2->cdread_lba >>= 2;
28337 28333
28338 28334 /* set the start address */
28339 28335 cdb[1] = (uchar_t)((mode2->cdread_lba >> 16) & 0X1F);
28340 28336 cdb[2] = (uchar_t)((mode2->cdread_lba >> 8) & 0xFF);
28341 28337 cdb[3] = (uchar_t)(mode2->cdread_lba & 0xFF);
28342 28338
28343 28339 /* set the transfer length */
28344 28340 nblocks = mode2->cdread_buflen / 2336;
28345 28341 cdb[4] = (uchar_t)nblocks & 0xFF;
28346 28342
28347 28343 /* build command */
28348 28344 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28349 28345 com->uscsi_cdb = (caddr_t)cdb;
28350 28346 com->uscsi_cdblen = sizeof (cdb);
28351 28347 com->uscsi_bufaddr = mode2->cdread_bufaddr;
28352 28348 com->uscsi_buflen = mode2->cdread_buflen;
28353 28349 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28354 28350
28355 28351 /*
28356 28352 * Issue SCSI command with user space address for read buffer.
28357 28353 *
28358 28354 * This sends the command through main channel in the driver.
28359 28355 *
28360 28356 * Since this is accessed via an IOCTL call, we go through the
28361 28357 * standard path, so that if the device was powered down, then
28362 28358 * it would be 'awakened' to handle the command.
28363 28359 */
28364 28360 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28365 28361 SD_PATH_STANDARD);
28366 28362
28367 28363 kmem_free(com, sizeof (*com));
28368 28364
28369 28365 /* Restore the device and soft state target block size */
28370 28366 if (sr_sector_mode(dev, restore_blksize) != 0) {
28371 28367 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28372 28368 "can't do switch back to mode 1\n");
28373 28369 /*
28374 28370 * If sd_send_scsi_READ succeeded we still need to report
28375 28371 * an error because we failed to reset the block size
28376 28372 */
28377 28373 if (rval == 0) {
28378 28374 rval = EIO;
28379 28375 }
28380 28376 }
28381 28377
28382 28378 done:
28383 28379 SD_TRACE(SD_LOG_ATTACH_DETACH, un,
28384 28380 "sd_read_mode2: exit: un:0x%p\n", un);
28385 28381
28386 28382 return (rval);
28387 28383 }
28388 28384
28389 28385
28390 28386 /*
28391 28387 * Function: sr_sector_mode()
28392 28388 *
28393 28389 * Description: This utility function is used by sr_read_mode2 to set the target
28394 28390 * block size based on the user specified size. This is a legacy
28395 28391 * implementation based upon a vendor specific mode page
28396 28392 *
28397 28393 * Arguments: dev - the device 'dev_t'
28398 28394 * data - flag indicating if block size is being set to 2336 or
28399 28395 * 512.
28400 28396 *
28401 28397 * Return Code: the code returned by sd_send_scsi_cmd()
28402 28398 * EFAULT if ddi_copyxxx() fails
28403 28399 * ENXIO if fail ddi_get_soft_state
28404 28400 * EINVAL if data pointer is NULL
28405 28401 */
28406 28402
28407 28403 static int
28408 28404 sr_sector_mode(dev_t dev, uint32_t blksize)
28409 28405 {
28410 28406 struct sd_lun *un;
28411 28407 uchar_t *sense;
28412 28408 uchar_t *select;
28413 28409 int rval;
28414 28410 sd_ssc_t *ssc;
28415 28411
28416 28412 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28417 28413 (un->un_state == SD_STATE_OFFLINE)) {
28418 28414 return (ENXIO);
28419 28415 }
28420 28416
28421 28417 sense = kmem_zalloc(20, KM_SLEEP);
28422 28418
28423 28419 /* Note: This is a vendor specific mode page (0x81) */
28424 28420 ssc = sd_ssc_init(un);
28425 28421 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, 20, 0x81,
28426 28422 SD_PATH_STANDARD);
28427 28423 sd_ssc_fini(ssc);
28428 28424 if (rval != 0) {
28429 28425 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
28430 28426 "sr_sector_mode: Mode Sense failed\n");
28431 28427 kmem_free(sense, 20);
28432 28428 return (rval);
28433 28429 }
28434 28430 select = kmem_zalloc(20, KM_SLEEP);
28435 28431 select[3] = 0x08;
28436 28432 select[10] = ((blksize >> 8) & 0xff);
28437 28433 select[11] = (blksize & 0xff);
28438 28434 select[12] = 0x01;
28439 28435 select[13] = 0x06;
28440 28436 select[14] = sense[14];
28441 28437 select[15] = sense[15];
28442 28438 if (blksize == SD_MODE2_BLKSIZE) {
28443 28439 select[14] |= 0x01;
28444 28440 }
28445 28441
28446 28442 ssc = sd_ssc_init(un);
28447 28443 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select, 20,
28448 28444 SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
28449 28445 sd_ssc_fini(ssc);
28450 28446 if (rval != 0) {
28451 28447 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
28452 28448 "sr_sector_mode: Mode Select failed\n");
28453 28449 } else {
28454 28450 /*
28455 28451 * Only update the softstate block size if we successfully
28456 28452 * changed the device block mode.
28457 28453 */
28458 28454 mutex_enter(SD_MUTEX(un));
28459 28455 sd_update_block_info(un, blksize, 0);
28460 28456 mutex_exit(SD_MUTEX(un));
28461 28457 }
28462 28458 kmem_free(sense, 20);
28463 28459 kmem_free(select, 20);
28464 28460 return (rval);
28465 28461 }
28466 28462
28467 28463
28468 28464 /*
28469 28465 * Function: sr_read_cdda()
28470 28466 *
28471 28467 * Description: This routine is the driver entry point for handling CD-ROM
28472 28468 * ioctl requests to return CD-DA or subcode data. (CDROMCDDA) If
28473 28469 * the target supports CDDA these requests are handled via a vendor
28474 28470 * specific command (0xD8) If the target does not support CDDA
28475 28471 * these requests are handled via the READ CD command (0xBE).
28476 28472 *
28477 28473 * Arguments: dev - the device 'dev_t'
28478 28474 * data - pointer to user provided CD-DA structure specifying
28479 28475 * the track starting address, transfer length, and
28480 28476 * subcode options.
28481 28477 * flag - this argument is a pass through to ddi_copyxxx()
28482 28478 * directly from the mode argument of ioctl().
28483 28479 *
28484 28480 * Return Code: the code returned by sd_send_scsi_cmd()
28485 28481 * EFAULT if ddi_copyxxx() fails
28486 28482 * ENXIO if fail ddi_get_soft_state
28487 28483 * EINVAL if invalid arguments are provided
28488 28484 * ENOTTY
28489 28485 */
28490 28486
28491 28487 static int
28492 28488 sr_read_cdda(dev_t dev, caddr_t data, int flag)
28493 28489 {
28494 28490 struct sd_lun *un;
28495 28491 struct uscsi_cmd *com;
28496 28492 struct cdrom_cdda *cdda;
28497 28493 int rval;
28498 28494 size_t buflen;
28499 28495 char cdb[CDB_GROUP5];
28500 28496
28501 28497 #ifdef _MULTI_DATAMODEL
28502 28498 /* To support ILP32 applications in an LP64 world */
28503 28499 struct cdrom_cdda32 cdrom_cdda32;
28504 28500 struct cdrom_cdda32 *cdda32 = &cdrom_cdda32;
28505 28501 #endif /* _MULTI_DATAMODEL */
28506 28502
28507 28503 if (data == NULL) {
28508 28504 return (EINVAL);
28509 28505 }
28510 28506
28511 28507 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
28512 28508 return (ENXIO);
28513 28509 }
28514 28510
28515 28511 cdda = kmem_zalloc(sizeof (struct cdrom_cdda), KM_SLEEP);
28516 28512
28517 28513 #ifdef _MULTI_DATAMODEL
28518 28514 switch (ddi_model_convert_from(flag & FMODELS)) {
28519 28515 case DDI_MODEL_ILP32:
28520 28516 if (ddi_copyin(data, cdda32, sizeof (*cdda32), flag)) {
28521 28517 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28522 28518 "sr_read_cdda: ddi_copyin Failed\n");
28523 28519 kmem_free(cdda, sizeof (struct cdrom_cdda));
28524 28520 return (EFAULT);
28525 28521 }
28526 28522 /* Convert the ILP32 uscsi data from the application to LP64 */
28527 28523 cdrom_cdda32tocdrom_cdda(cdda32, cdda);
28528 28524 break;
28529 28525 case DDI_MODEL_NONE:
28530 28526 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) {
28531 28527 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28532 28528 "sr_read_cdda: ddi_copyin Failed\n");
28533 28529 kmem_free(cdda, sizeof (struct cdrom_cdda));
28534 28530 return (EFAULT);
28535 28531 }
28536 28532 break;
28537 28533 }
28538 28534 #else /* ! _MULTI_DATAMODEL */
28539 28535 if (ddi_copyin(data, cdda, sizeof (struct cdrom_cdda), flag)) {
28540 28536 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28541 28537 "sr_read_cdda: ddi_copyin Failed\n");
28542 28538 kmem_free(cdda, sizeof (struct cdrom_cdda));
28543 28539 return (EFAULT);
28544 28540 }
28545 28541 #endif /* _MULTI_DATAMODEL */
28546 28542
28547 28543 /*
28548 28544 * Since MMC-2 expects max 3 bytes for length, check if the
28549 28545 * length input is greater than 3 bytes
28550 28546 */
28551 28547 if ((cdda->cdda_length & 0xFF000000) != 0) {
28552 28548 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdda: "
28553 28549 "cdrom transfer length too large: %d (limit %d)\n",
28554 28550 cdda->cdda_length, 0xFFFFFF);
28555 28551 kmem_free(cdda, sizeof (struct cdrom_cdda));
28556 28552 return (EINVAL);
28557 28553 }
28558 28554
28559 28555 switch (cdda->cdda_subcode) {
28560 28556 case CDROM_DA_NO_SUBCODE:
28561 28557 buflen = CDROM_BLK_2352 * cdda->cdda_length;
28562 28558 break;
28563 28559 case CDROM_DA_SUBQ:
28564 28560 buflen = CDROM_BLK_2368 * cdda->cdda_length;
28565 28561 break;
28566 28562 case CDROM_DA_ALL_SUBCODE:
28567 28563 buflen = CDROM_BLK_2448 * cdda->cdda_length;
28568 28564 break;
28569 28565 case CDROM_DA_SUBCODE_ONLY:
28570 28566 buflen = CDROM_BLK_SUBCODE * cdda->cdda_length;
28571 28567 break;
28572 28568 default:
28573 28569 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28574 28570 "sr_read_cdda: Subcode '0x%x' Not Supported\n",
28575 28571 cdda->cdda_subcode);
28576 28572 kmem_free(cdda, sizeof (struct cdrom_cdda));
28577 28573 return (EINVAL);
28578 28574 }
28579 28575
28580 28576 /* Build and send the command */
28581 28577 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28582 28578 bzero(cdb, CDB_GROUP5);
28583 28579
28584 28580 if (un->un_f_cfg_cdda == TRUE) {
28585 28581 cdb[0] = (char)SCMD_READ_CD;
28586 28582 cdb[1] = 0x04;
28587 28583 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24);
28588 28584 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16);
28589 28585 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8);
28590 28586 cdb[5] = ((cdda->cdda_addr) & 0x000000ff);
28591 28587 cdb[6] = (((cdda->cdda_length) & 0x00ff0000) >> 16);
28592 28588 cdb[7] = (((cdda->cdda_length) & 0x0000ff00) >> 8);
28593 28589 cdb[8] = ((cdda->cdda_length) & 0x000000ff);
28594 28590 cdb[9] = 0x10;
28595 28591 switch (cdda->cdda_subcode) {
28596 28592 case CDROM_DA_NO_SUBCODE :
28597 28593 cdb[10] = 0x0;
28598 28594 break;
28599 28595 case CDROM_DA_SUBQ :
28600 28596 cdb[10] = 0x2;
28601 28597 break;
28602 28598 case CDROM_DA_ALL_SUBCODE :
28603 28599 cdb[10] = 0x1;
28604 28600 break;
28605 28601 case CDROM_DA_SUBCODE_ONLY :
28606 28602 /* FALLTHROUGH */
28607 28603 default :
28608 28604 kmem_free(cdda, sizeof (struct cdrom_cdda));
28609 28605 kmem_free(com, sizeof (*com));
28610 28606 return (ENOTTY);
28611 28607 }
28612 28608 } else {
28613 28609 cdb[0] = (char)SCMD_READ_CDDA;
28614 28610 cdb[2] = (((cdda->cdda_addr) & 0xff000000) >> 24);
28615 28611 cdb[3] = (((cdda->cdda_addr) & 0x00ff0000) >> 16);
28616 28612 cdb[4] = (((cdda->cdda_addr) & 0x0000ff00) >> 8);
28617 28613 cdb[5] = ((cdda->cdda_addr) & 0x000000ff);
28618 28614 cdb[6] = (((cdda->cdda_length) & 0xff000000) >> 24);
28619 28615 cdb[7] = (((cdda->cdda_length) & 0x00ff0000) >> 16);
28620 28616 cdb[8] = (((cdda->cdda_length) & 0x0000ff00) >> 8);
28621 28617 cdb[9] = ((cdda->cdda_length) & 0x000000ff);
28622 28618 cdb[10] = cdda->cdda_subcode;
28623 28619 }
28624 28620
28625 28621 com->uscsi_cdb = cdb;
28626 28622 com->uscsi_cdblen = CDB_GROUP5;
28627 28623 com->uscsi_bufaddr = (caddr_t)cdda->cdda_data;
28628 28624 com->uscsi_buflen = buflen;
28629 28625 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28630 28626
28631 28627 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28632 28628 SD_PATH_STANDARD);
28633 28629
28634 28630 kmem_free(cdda, sizeof (struct cdrom_cdda));
28635 28631 kmem_free(com, sizeof (*com));
28636 28632 return (rval);
28637 28633 }
28638 28634
28639 28635
28640 28636 /*
28641 28637 * Function: sr_read_cdxa()
28642 28638 *
28643 28639 * Description: This routine is the driver entry point for handling CD-ROM
28644 28640 * ioctl requests to return CD-XA (Extended Architecture) data.
28645 28641 * (CDROMCDXA).
28646 28642 *
28647 28643 * Arguments: dev - the device 'dev_t'
28648 28644 * data - pointer to user provided CD-XA structure specifying
28649 28645 * the data starting address, transfer length, and format
28650 28646 * flag - this argument is a pass through to ddi_copyxxx()
28651 28647 * directly from the mode argument of ioctl().
28652 28648 *
28653 28649 * Return Code: the code returned by sd_send_scsi_cmd()
28654 28650 * EFAULT if ddi_copyxxx() fails
28655 28651 * ENXIO if fail ddi_get_soft_state
28656 28652 * EINVAL if data pointer is NULL
28657 28653 */
28658 28654
28659 28655 static int
28660 28656 sr_read_cdxa(dev_t dev, caddr_t data, int flag)
28661 28657 {
28662 28658 struct sd_lun *un;
28663 28659 struct uscsi_cmd *com;
28664 28660 struct cdrom_cdxa *cdxa;
28665 28661 int rval;
28666 28662 size_t buflen;
28667 28663 char cdb[CDB_GROUP5];
28668 28664 uchar_t read_flags;
28669 28665
28670 28666 #ifdef _MULTI_DATAMODEL
28671 28667 /* To support ILP32 applications in an LP64 world */
28672 28668 struct cdrom_cdxa32 cdrom_cdxa32;
28673 28669 struct cdrom_cdxa32 *cdxa32 = &cdrom_cdxa32;
28674 28670 #endif /* _MULTI_DATAMODEL */
28675 28671
28676 28672 if (data == NULL) {
28677 28673 return (EINVAL);
28678 28674 }
28679 28675
28680 28676 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
28681 28677 return (ENXIO);
28682 28678 }
28683 28679
28684 28680 cdxa = kmem_zalloc(sizeof (struct cdrom_cdxa), KM_SLEEP);
28685 28681
28686 28682 #ifdef _MULTI_DATAMODEL
28687 28683 switch (ddi_model_convert_from(flag & FMODELS)) {
28688 28684 case DDI_MODEL_ILP32:
28689 28685 if (ddi_copyin(data, cdxa32, sizeof (*cdxa32), flag)) {
28690 28686 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28691 28687 return (EFAULT);
28692 28688 }
28693 28689 /*
28694 28690 * Convert the ILP32 uscsi data from the
28695 28691 * application to LP64 for internal use.
28696 28692 */
28697 28693 cdrom_cdxa32tocdrom_cdxa(cdxa32, cdxa);
28698 28694 break;
28699 28695 case DDI_MODEL_NONE:
28700 28696 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) {
28701 28697 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28702 28698 return (EFAULT);
28703 28699 }
28704 28700 break;
28705 28701 }
28706 28702 #else /* ! _MULTI_DATAMODEL */
28707 28703 if (ddi_copyin(data, cdxa, sizeof (struct cdrom_cdxa), flag)) {
28708 28704 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28709 28705 return (EFAULT);
28710 28706 }
28711 28707 #endif /* _MULTI_DATAMODEL */
28712 28708
28713 28709 /*
28714 28710 * Since MMC-2 expects max 3 bytes for length, check if the
28715 28711 * length input is greater than 3 bytes
28716 28712 */
28717 28713 if ((cdxa->cdxa_length & 0xFF000000) != 0) {
28718 28714 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN, "sr_read_cdxa: "
28719 28715 "cdrom transfer length too large: %d (limit %d)\n",
28720 28716 cdxa->cdxa_length, 0xFFFFFF);
28721 28717 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28722 28718 return (EINVAL);
28723 28719 }
28724 28720
28725 28721 switch (cdxa->cdxa_format) {
28726 28722 case CDROM_XA_DATA:
28727 28723 buflen = CDROM_BLK_2048 * cdxa->cdxa_length;
28728 28724 read_flags = 0x10;
28729 28725 break;
28730 28726 case CDROM_XA_SECTOR_DATA:
28731 28727 buflen = CDROM_BLK_2352 * cdxa->cdxa_length;
28732 28728 read_flags = 0xf8;
28733 28729 break;
28734 28730 case CDROM_XA_DATA_W_ERROR:
28735 28731 buflen = CDROM_BLK_2646 * cdxa->cdxa_length;
28736 28732 read_flags = 0xfc;
28737 28733 break;
28738 28734 default:
28739 28735 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
28740 28736 "sr_read_cdxa: Format '0x%x' Not Supported\n",
28741 28737 cdxa->cdxa_format);
28742 28738 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28743 28739 return (EINVAL);
28744 28740 }
28745 28741
28746 28742 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
28747 28743 bzero(cdb, CDB_GROUP5);
28748 28744 if (un->un_f_mmc_cap == TRUE) {
28749 28745 cdb[0] = (char)SCMD_READ_CD;
28750 28746 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24);
28751 28747 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16);
28752 28748 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8);
28753 28749 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff);
28754 28750 cdb[6] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16);
28755 28751 cdb[7] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8);
28756 28752 cdb[8] = ((cdxa->cdxa_length) & 0x000000ff);
28757 28753 cdb[9] = (char)read_flags;
28758 28754 } else {
28759 28755 /*
28760 28756 * Note: A vendor specific command (0xDB) is being used her to
28761 28757 * request a read of all subcodes.
28762 28758 */
28763 28759 cdb[0] = (char)SCMD_READ_CDXA;
28764 28760 cdb[2] = (((cdxa->cdxa_addr) & 0xff000000) >> 24);
28765 28761 cdb[3] = (((cdxa->cdxa_addr) & 0x00ff0000) >> 16);
28766 28762 cdb[4] = (((cdxa->cdxa_addr) & 0x0000ff00) >> 8);
28767 28763 cdb[5] = ((cdxa->cdxa_addr) & 0x000000ff);
28768 28764 cdb[6] = (((cdxa->cdxa_length) & 0xff000000) >> 24);
28769 28765 cdb[7] = (((cdxa->cdxa_length) & 0x00ff0000) >> 16);
28770 28766 cdb[8] = (((cdxa->cdxa_length) & 0x0000ff00) >> 8);
28771 28767 cdb[9] = ((cdxa->cdxa_length) & 0x000000ff);
28772 28768 cdb[10] = cdxa->cdxa_format;
28773 28769 }
28774 28770 com->uscsi_cdb = cdb;
28775 28771 com->uscsi_cdblen = CDB_GROUP5;
28776 28772 com->uscsi_bufaddr = (caddr_t)cdxa->cdxa_data;
28777 28773 com->uscsi_buflen = buflen;
28778 28774 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
28779 28775 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_USERSPACE,
28780 28776 SD_PATH_STANDARD);
28781 28777 kmem_free(cdxa, sizeof (struct cdrom_cdxa));
28782 28778 kmem_free(com, sizeof (*com));
28783 28779 return (rval);
28784 28780 }
28785 28781
28786 28782
28787 28783 /*
28788 28784 * Function: sr_eject()
28789 28785 *
28790 28786 * Description: This routine is the driver entry point for handling CD-ROM
28791 28787 * eject ioctl requests (FDEJECT, DKIOCEJECT, CDROMEJECT)
28792 28788 *
28793 28789 * Arguments: dev - the device 'dev_t'
28794 28790 *
28795 28791 * Return Code: the code returned by sd_send_scsi_cmd()
28796 28792 */
28797 28793
28798 28794 static int
28799 28795 sr_eject(dev_t dev)
28800 28796 {
28801 28797 struct sd_lun *un;
28802 28798 int rval;
28803 28799 sd_ssc_t *ssc;
28804 28800
28805 28801 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
28806 28802 (un->un_state == SD_STATE_OFFLINE)) {
28807 28803 return (ENXIO);
28808 28804 }
28809 28805
28810 28806 /*
28811 28807 * To prevent race conditions with the eject
28812 28808 * command, keep track of an eject command as
28813 28809 * it progresses. If we are already handling
28814 28810 * an eject command in the driver for the given
28815 28811 * unit and another request to eject is received
28816 28812 * immediately return EAGAIN so we don't lose
28817 28813 * the command if the current eject command fails.
28818 28814 */
28819 28815 mutex_enter(SD_MUTEX(un));
28820 28816 if (un->un_f_ejecting == TRUE) {
28821 28817 mutex_exit(SD_MUTEX(un));
28822 28818 return (EAGAIN);
28823 28819 }
28824 28820 un->un_f_ejecting = TRUE;
28825 28821 mutex_exit(SD_MUTEX(un));
28826 28822
28827 28823 ssc = sd_ssc_init(un);
28828 28824 rval = sd_send_scsi_DOORLOCK(ssc, SD_REMOVAL_ALLOW,
28829 28825 SD_PATH_STANDARD);
28830 28826 sd_ssc_fini(ssc);
28831 28827
28832 28828 if (rval != 0) {
28833 28829 mutex_enter(SD_MUTEX(un));
28834 28830 un->un_f_ejecting = FALSE;
28835 28831 mutex_exit(SD_MUTEX(un));
28836 28832 return (rval);
28837 28833 }
28838 28834
28839 28835 ssc = sd_ssc_init(un);
28840 28836 rval = sd_send_scsi_START_STOP_UNIT(ssc, SD_START_STOP,
28841 28837 SD_TARGET_EJECT, SD_PATH_STANDARD);
28842 28838 sd_ssc_fini(ssc);
28843 28839
28844 28840 if (rval == 0) {
28845 28841 mutex_enter(SD_MUTEX(un));
28846 28842 sr_ejected(un);
28847 28843 un->un_mediastate = DKIO_EJECTED;
28848 28844 un->un_f_ejecting = FALSE;
28849 28845 cv_broadcast(&un->un_state_cv);
28850 28846 mutex_exit(SD_MUTEX(un));
28851 28847 } else {
28852 28848 mutex_enter(SD_MUTEX(un));
28853 28849 un->un_f_ejecting = FALSE;
28854 28850 mutex_exit(SD_MUTEX(un));
28855 28851 }
28856 28852 return (rval);
28857 28853 }
28858 28854
28859 28855
28860 28856 /*
28861 28857 * Function: sr_ejected()
28862 28858 *
28863 28859 * Description: This routine updates the soft state structure to invalidate the
28864 28860 * geometry information after the media has been ejected or a
28865 28861 * media eject has been detected.
28866 28862 *
28867 28863 * Arguments: un - driver soft state (unit) structure
28868 28864 */
28869 28865
28870 28866 static void
28871 28867 sr_ejected(struct sd_lun *un)
28872 28868 {
28873 28869 struct sd_errstats *stp;
28874 28870
28875 28871 ASSERT(un != NULL);
28876 28872 ASSERT(mutex_owned(SD_MUTEX(un)));
28877 28873
28878 28874 un->un_f_blockcount_is_valid = FALSE;
28879 28875 un->un_f_tgt_blocksize_is_valid = FALSE;
28880 28876 mutex_exit(SD_MUTEX(un));
28881 28877 cmlb_invalidate(un->un_cmlbhandle, (void *)SD_PATH_DIRECT_PRIORITY);
28882 28878 mutex_enter(SD_MUTEX(un));
28883 28879
28884 28880 if (un->un_errstats != NULL) {
28885 28881 stp = (struct sd_errstats *)un->un_errstats->ks_data;
28886 28882 stp->sd_capacity.value.ui64 = 0;
28887 28883 }
28888 28884 }
28889 28885
28890 28886
28891 28887 /*
28892 28888 * Function: sr_check_wp()
28893 28889 *
28894 28890 * Description: This routine checks the write protection of a removable
28895 28891 * media disk and hotpluggable devices via the write protect bit of
28896 28892 * the Mode Page Header device specific field. Some devices choke
28897 28893 * on unsupported mode page. In order to workaround this issue,
28898 28894 * this routine has been implemented to use 0x3f mode page(request
28899 28895 * for all pages) for all device types.
28900 28896 *
28901 28897 * Arguments: dev - the device 'dev_t'
28902 28898 *
28903 28899 * Return Code: int indicating if the device is write protected (1) or not (0)
28904 28900 *
28905 28901 * Context: Kernel thread.
28906 28902 *
28907 28903 */
28908 28904
28909 28905 static int
28910 28906 sr_check_wp(dev_t dev)
28911 28907 {
28912 28908 struct sd_lun *un;
28913 28909 uchar_t device_specific;
28914 28910 uchar_t *sense;
28915 28911 int hdrlen;
28916 28912 int rval = FALSE;
28917 28913 int status;
28918 28914 sd_ssc_t *ssc;
28919 28915
28920 28916 /*
28921 28917 * Note: The return codes for this routine should be reworked to
28922 28918 * properly handle the case of a NULL softstate.
28923 28919 */
28924 28920 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL) {
28925 28921 return (FALSE);
28926 28922 }
28927 28923
28928 28924 if (un->un_f_cfg_is_atapi == TRUE) {
28929 28925 /*
28930 28926 * The mode page contents are not required; set the allocation
28931 28927 * length for the mode page header only
28932 28928 */
28933 28929 hdrlen = MODE_HEADER_LENGTH_GRP2;
28934 28930 sense = kmem_zalloc(hdrlen, KM_SLEEP);
28935 28931 ssc = sd_ssc_init(un);
28936 28932 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense, hdrlen,
28937 28933 MODEPAGE_ALLPAGES, SD_PATH_STANDARD);
28938 28934 sd_ssc_fini(ssc);
28939 28935 if (status != 0)
28940 28936 goto err_exit;
28941 28937 device_specific =
28942 28938 ((struct mode_header_grp2 *)sense)->device_specific;
28943 28939 } else {
28944 28940 hdrlen = MODE_HEADER_LENGTH;
28945 28941 sense = kmem_zalloc(hdrlen, KM_SLEEP);
28946 28942 ssc = sd_ssc_init(un);
28947 28943 status = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense, hdrlen,
28948 28944 MODEPAGE_ALLPAGES, SD_PATH_STANDARD);
28949 28945 sd_ssc_fini(ssc);
28950 28946 if (status != 0)
28951 28947 goto err_exit;
28952 28948 device_specific =
28953 28949 ((struct mode_header *)sense)->device_specific;
28954 28950 }
28955 28951
28956 28952
28957 28953 /*
28958 28954 * Write protect mode sense failed; not all disks
28959 28955 * understand this query. Return FALSE assuming that
28960 28956 * these devices are not writable.
28961 28957 */
28962 28958 if (device_specific & WRITE_PROTECT) {
28963 28959 rval = TRUE;
28964 28960 }
28965 28961
28966 28962 err_exit:
28967 28963 kmem_free(sense, hdrlen);
28968 28964 return (rval);
28969 28965 }
28970 28966
28971 28967 /*
28972 28968 * Function: sr_volume_ctrl()
28973 28969 *
28974 28970 * Description: This routine is the driver entry point for handling CD-ROM
28975 28971 * audio output volume ioctl requests. (CDROMVOLCTRL)
28976 28972 *
28977 28973 * Arguments: dev - the device 'dev_t'
28978 28974 * data - pointer to user audio volume control structure
28979 28975 * flag - this argument is a pass through to ddi_copyxxx()
28980 28976 * directly from the mode argument of ioctl().
28981 28977 *
28982 28978 * Return Code: the code returned by sd_send_scsi_cmd()
28983 28979 * EFAULT if ddi_copyxxx() fails
28984 28980 * ENXIO if fail ddi_get_soft_state
28985 28981 * EINVAL if data pointer is NULL
28986 28982 *
28987 28983 */
28988 28984
28989 28985 static int
28990 28986 sr_volume_ctrl(dev_t dev, caddr_t data, int flag)
28991 28987 {
28992 28988 struct sd_lun *un;
28993 28989 struct cdrom_volctrl volume;
28994 28990 struct cdrom_volctrl *vol = &volume;
28995 28991 uchar_t *sense_page;
28996 28992 uchar_t *select_page;
28997 28993 uchar_t *sense;
28998 28994 uchar_t *select;
28999 28995 int sense_buflen;
29000 28996 int select_buflen;
29001 28997 int rval;
29002 28998 sd_ssc_t *ssc;
29003 28999
29004 29000 if (data == NULL) {
29005 29001 return (EINVAL);
29006 29002 }
29007 29003
29008 29004 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
29009 29005 (un->un_state == SD_STATE_OFFLINE)) {
29010 29006 return (ENXIO);
29011 29007 }
29012 29008
29013 29009 if (ddi_copyin(data, vol, sizeof (struct cdrom_volctrl), flag)) {
29014 29010 return (EFAULT);
29015 29011 }
29016 29012
29017 29013 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) {
29018 29014 struct mode_header_grp2 *sense_mhp;
29019 29015 struct mode_header_grp2 *select_mhp;
29020 29016 int bd_len;
29021 29017
29022 29018 sense_buflen = MODE_PARAM_LENGTH_GRP2 + MODEPAGE_AUDIO_CTRL_LEN;
29023 29019 select_buflen = MODE_HEADER_LENGTH_GRP2 +
29024 29020 MODEPAGE_AUDIO_CTRL_LEN;
29025 29021 sense = kmem_zalloc(sense_buflen, KM_SLEEP);
29026 29022 select = kmem_zalloc(select_buflen, KM_SLEEP);
29027 29023 ssc = sd_ssc_init(un);
29028 29024 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP1, sense,
29029 29025 sense_buflen, MODEPAGE_AUDIO_CTRL,
29030 29026 SD_PATH_STANDARD);
29031 29027 sd_ssc_fini(ssc);
29032 29028
29033 29029 if (rval != 0) {
29034 29030 SD_ERROR(SD_LOG_IOCTL_RMMEDIA, un,
29035 29031 "sr_volume_ctrl: Mode Sense Failed\n");
29036 29032 kmem_free(sense, sense_buflen);
29037 29033 kmem_free(select, select_buflen);
29038 29034 return (rval);
29039 29035 }
29040 29036 sense_mhp = (struct mode_header_grp2 *)sense;
29041 29037 select_mhp = (struct mode_header_grp2 *)select;
29042 29038 bd_len = (sense_mhp->bdesc_length_hi << 8) |
29043 29039 sense_mhp->bdesc_length_lo;
29044 29040 if (bd_len > MODE_BLK_DESC_LENGTH) {
29045 29041 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29046 29042 "sr_volume_ctrl: Mode Sense returned invalid "
29047 29043 "block descriptor length\n");
29048 29044 kmem_free(sense, sense_buflen);
29049 29045 kmem_free(select, select_buflen);
29050 29046 return (EIO);
29051 29047 }
29052 29048 sense_page = (uchar_t *)
29053 29049 (sense + MODE_HEADER_LENGTH_GRP2 + bd_len);
29054 29050 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH_GRP2);
29055 29051 select_mhp->length_msb = 0;
29056 29052 select_mhp->length_lsb = 0;
29057 29053 select_mhp->bdesc_length_hi = 0;
29058 29054 select_mhp->bdesc_length_lo = 0;
29059 29055 } else {
29060 29056 struct mode_header *sense_mhp, *select_mhp;
29061 29057
29062 29058 sense_buflen = MODE_PARAM_LENGTH + MODEPAGE_AUDIO_CTRL_LEN;
29063 29059 select_buflen = MODE_HEADER_LENGTH + MODEPAGE_AUDIO_CTRL_LEN;
29064 29060 sense = kmem_zalloc(sense_buflen, KM_SLEEP);
29065 29061 select = kmem_zalloc(select_buflen, KM_SLEEP);
29066 29062 ssc = sd_ssc_init(un);
29067 29063 rval = sd_send_scsi_MODE_SENSE(ssc, CDB_GROUP0, sense,
29068 29064 sense_buflen, MODEPAGE_AUDIO_CTRL,
29069 29065 SD_PATH_STANDARD);
29070 29066 sd_ssc_fini(ssc);
29071 29067
29072 29068 if (rval != 0) {
29073 29069 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29074 29070 "sr_volume_ctrl: Mode Sense Failed\n");
29075 29071 kmem_free(sense, sense_buflen);
29076 29072 kmem_free(select, select_buflen);
29077 29073 return (rval);
29078 29074 }
29079 29075 sense_mhp = (struct mode_header *)sense;
29080 29076 select_mhp = (struct mode_header *)select;
29081 29077 if (sense_mhp->bdesc_length > MODE_BLK_DESC_LENGTH) {
29082 29078 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29083 29079 "sr_volume_ctrl: Mode Sense returned invalid "
29084 29080 "block descriptor length\n");
29085 29081 kmem_free(sense, sense_buflen);
29086 29082 kmem_free(select, select_buflen);
29087 29083 return (EIO);
29088 29084 }
29089 29085 sense_page = (uchar_t *)
29090 29086 (sense + MODE_HEADER_LENGTH + sense_mhp->bdesc_length);
29091 29087 select_page = (uchar_t *)(select + MODE_HEADER_LENGTH);
29092 29088 select_mhp->length = 0;
29093 29089 select_mhp->bdesc_length = 0;
29094 29090 }
29095 29091 /*
29096 29092 * Note: An audio control data structure could be created and overlayed
29097 29093 * on the following in place of the array indexing method implemented.
29098 29094 */
29099 29095
29100 29096 /* Build the select data for the user volume data */
29101 29097 select_page[0] = MODEPAGE_AUDIO_CTRL;
29102 29098 select_page[1] = 0xE;
29103 29099 /* Set the immediate bit */
29104 29100 select_page[2] = 0x04;
29105 29101 /* Zero out reserved fields */
29106 29102 select_page[3] = 0x00;
29107 29103 select_page[4] = 0x00;
29108 29104 /* Return sense data for fields not to be modified */
29109 29105 select_page[5] = sense_page[5];
29110 29106 select_page[6] = sense_page[6];
29111 29107 select_page[7] = sense_page[7];
29112 29108 /* Set the user specified volume levels for channel 0 and 1 */
29113 29109 select_page[8] = 0x01;
29114 29110 select_page[9] = vol->channel0;
29115 29111 select_page[10] = 0x02;
29116 29112 select_page[11] = vol->channel1;
29117 29113 /* Channel 2 and 3 are currently unsupported so return the sense data */
29118 29114 select_page[12] = sense_page[12];
29119 29115 select_page[13] = sense_page[13];
29120 29116 select_page[14] = sense_page[14];
29121 29117 select_page[15] = sense_page[15];
29122 29118
29123 29119 ssc = sd_ssc_init(un);
29124 29120 if ((un->un_f_cfg_is_atapi == TRUE) || (un->un_f_mmc_cap == TRUE)) {
29125 29121 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP1, select,
29126 29122 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
29127 29123 } else {
29128 29124 rval = sd_send_scsi_MODE_SELECT(ssc, CDB_GROUP0, select,
29129 29125 select_buflen, SD_DONTSAVE_PAGE, SD_PATH_STANDARD);
29130 29126 }
29131 29127 sd_ssc_fini(ssc);
29132 29128
29133 29129 kmem_free(sense, sense_buflen);
29134 29130 kmem_free(select, select_buflen);
29135 29131 return (rval);
29136 29132 }
29137 29133
29138 29134
29139 29135 /*
29140 29136 * Function: sr_read_sony_session_offset()
29141 29137 *
29142 29138 * Description: This routine is the driver entry point for handling CD-ROM
29143 29139 * ioctl requests for session offset information. (CDROMREADOFFSET)
29144 29140 * The address of the first track in the last session of a
29145 29141 * multi-session CD-ROM is returned
29146 29142 *
29147 29143 * Note: This routine uses a vendor specific key value in the
29148 29144 * command control field without implementing any vendor check here
29149 29145 * or in the ioctl routine.
29150 29146 *
29151 29147 * Arguments: dev - the device 'dev_t'
29152 29148 * data - pointer to an int to hold the requested address
29153 29149 * flag - this argument is a pass through to ddi_copyxxx()
29154 29150 * directly from the mode argument of ioctl().
29155 29151 *
29156 29152 * Return Code: the code returned by sd_send_scsi_cmd()
29157 29153 * EFAULT if ddi_copyxxx() fails
29158 29154 * ENXIO if fail ddi_get_soft_state
29159 29155 * EINVAL if data pointer is NULL
29160 29156 */
29161 29157
29162 29158 static int
29163 29159 sr_read_sony_session_offset(dev_t dev, caddr_t data, int flag)
29164 29160 {
29165 29161 struct sd_lun *un;
29166 29162 struct uscsi_cmd *com;
29167 29163 caddr_t buffer;
29168 29164 char cdb[CDB_GROUP1];
29169 29165 int session_offset = 0;
29170 29166 int rval;
29171 29167
29172 29168 if (data == NULL) {
29173 29169 return (EINVAL);
29174 29170 }
29175 29171
29176 29172 if ((un = ddi_get_soft_state(sd_state, SDUNIT(dev))) == NULL ||
29177 29173 (un->un_state == SD_STATE_OFFLINE)) {
29178 29174 return (ENXIO);
29179 29175 }
29180 29176
29181 29177 buffer = kmem_zalloc((size_t)SONY_SESSION_OFFSET_LEN, KM_SLEEP);
29182 29178 bzero(cdb, CDB_GROUP1);
29183 29179 cdb[0] = SCMD_READ_TOC;
29184 29180 /*
29185 29181 * Bytes 7 & 8 are the 12 byte allocation length for a single entry.
29186 29182 * (4 byte TOC response header + 8 byte response data)
29187 29183 */
29188 29184 cdb[8] = SONY_SESSION_OFFSET_LEN;
29189 29185 /* Byte 9 is the control byte. A vendor specific value is used */
29190 29186 cdb[9] = SONY_SESSION_OFFSET_KEY;
29191 29187 com = kmem_zalloc(sizeof (*com), KM_SLEEP);
29192 29188 com->uscsi_cdb = cdb;
29193 29189 com->uscsi_cdblen = CDB_GROUP1;
29194 29190 com->uscsi_bufaddr = buffer;
29195 29191 com->uscsi_buflen = SONY_SESSION_OFFSET_LEN;
29196 29192 com->uscsi_flags = USCSI_DIAGNOSE|USCSI_SILENT|USCSI_READ;
29197 29193
29198 29194 rval = sd_send_scsi_cmd(dev, com, FKIOCTL, UIO_SYSSPACE,
29199 29195 SD_PATH_STANDARD);
29200 29196 if (rval != 0) {
29201 29197 kmem_free(buffer, SONY_SESSION_OFFSET_LEN);
29202 29198 kmem_free(com, sizeof (*com));
29203 29199 return (rval);
29204 29200 }
29205 29201 if (buffer[1] == SONY_SESSION_OFFSET_VALID) {
29206 29202 session_offset =
29207 29203 ((uchar_t)buffer[8] << 24) + ((uchar_t)buffer[9] << 16) +
29208 29204 ((uchar_t)buffer[10] << 8) + ((uchar_t)buffer[11]);
29209 29205 /*
29210 29206 * Offset returned offset in current lbasize block's. Convert to
29211 29207 * 2k block's to return to the user
29212 29208 */
29213 29209 if (un->un_tgt_blocksize == CDROM_BLK_512) {
29214 29210 session_offset >>= 2;
29215 29211 } else if (un->un_tgt_blocksize == CDROM_BLK_1024) {
29216 29212 session_offset >>= 1;
29217 29213 }
29218 29214 }
29219 29215
29220 29216 if (ddi_copyout(&session_offset, data, sizeof (int), flag) != 0) {
29221 29217 rval = EFAULT;
29222 29218 }
29223 29219
29224 29220 kmem_free(buffer, SONY_SESSION_OFFSET_LEN);
29225 29221 kmem_free(com, sizeof (*com));
29226 29222 return (rval);
29227 29223 }
29228 29224
29229 29225
29230 29226 /*
29231 29227 * Function: sd_wm_cache_constructor()
29232 29228 *
29233 29229 * Description: Cache Constructor for the wmap cache for the read/modify/write
29234 29230 * devices.
29235 29231 *
29236 29232 * Arguments: wm - A pointer to the sd_w_map to be initialized.
29237 29233 * un - sd_lun structure for the device.
29238 29234 * flag - the km flags passed to constructor
29239 29235 *
29240 29236 * Return Code: 0 on success.
29241 29237 * -1 on failure.
29242 29238 */
29243 29239
29244 29240 /*ARGSUSED*/
29245 29241 static int
29246 29242 sd_wm_cache_constructor(void *wm, void *un, int flags)
29247 29243 {
29248 29244 bzero(wm, sizeof (struct sd_w_map));
29249 29245 cv_init(&((struct sd_w_map *)wm)->wm_avail, NULL, CV_DRIVER, NULL);
29250 29246 return (0);
29251 29247 }
29252 29248
29253 29249
29254 29250 /*
29255 29251 * Function: sd_wm_cache_destructor()
29256 29252 *
29257 29253 * Description: Cache destructor for the wmap cache for the read/modify/write
29258 29254 * devices.
29259 29255 *
29260 29256 * Arguments: wm - A pointer to the sd_w_map to be initialized.
29261 29257 * un - sd_lun structure for the device.
29262 29258 */
29263 29259 /*ARGSUSED*/
29264 29260 static void
29265 29261 sd_wm_cache_destructor(void *wm, void *un)
29266 29262 {
29267 29263 cv_destroy(&((struct sd_w_map *)wm)->wm_avail);
29268 29264 }
29269 29265
29270 29266
29271 29267 /*
29272 29268 * Function: sd_range_lock()
29273 29269 *
29274 29270 * Description: Lock the range of blocks specified as parameter to ensure
29275 29271 * that read, modify write is atomic and no other i/o writes
29276 29272 * to the same location. The range is specified in terms
29277 29273 * of start and end blocks. Block numbers are the actual
29278 29274 * media block numbers and not system.
29279 29275 *
29280 29276 * Arguments: un - sd_lun structure for the device.
29281 29277 * startb - The starting block number
29282 29278 * endb - The end block number
29283 29279 * typ - type of i/o - simple/read_modify_write
29284 29280 *
29285 29281 * Return Code: wm - pointer to the wmap structure.
29286 29282 *
29287 29283 * Context: This routine can sleep.
29288 29284 */
29289 29285
29290 29286 static struct sd_w_map *
29291 29287 sd_range_lock(struct sd_lun *un, daddr_t startb, daddr_t endb, ushort_t typ)
29292 29288 {
29293 29289 struct sd_w_map *wmp = NULL;
29294 29290 struct sd_w_map *sl_wmp = NULL;
29295 29291 struct sd_w_map *tmp_wmp;
29296 29292 wm_state state = SD_WM_CHK_LIST;
29297 29293
29298 29294
29299 29295 ASSERT(un != NULL);
29300 29296 ASSERT(!mutex_owned(SD_MUTEX(un)));
29301 29297
29302 29298 mutex_enter(SD_MUTEX(un));
29303 29299
29304 29300 while (state != SD_WM_DONE) {
29305 29301
29306 29302 switch (state) {
29307 29303 case SD_WM_CHK_LIST:
29308 29304 /*
29309 29305 * This is the starting state. Check the wmap list
29310 29306 * to see if the range is currently available.
29311 29307 */
29312 29308 if (!(typ & SD_WTYPE_RMW) && !(un->un_rmw_count)) {
29313 29309 /*
29314 29310 * If this is a simple write and no rmw
29315 29311 * i/o is pending then try to lock the
29316 29312 * range as the range should be available.
29317 29313 */
29318 29314 state = SD_WM_LOCK_RANGE;
29319 29315 } else {
29320 29316 tmp_wmp = sd_get_range(un, startb, endb);
29321 29317 if (tmp_wmp != NULL) {
29322 29318 if ((wmp != NULL) && ONLIST(un, wmp)) {
29323 29319 /*
29324 29320 * Should not keep onlist wmps
29325 29321 * while waiting this macro
29326 29322 * will also do wmp = NULL;
29327 29323 */
29328 29324 FREE_ONLIST_WMAP(un, wmp);
29329 29325 }
29330 29326 /*
29331 29327 * sl_wmp is the wmap on which wait
29332 29328 * is done, since the tmp_wmp points
29333 29329 * to the inuse wmap, set sl_wmp to
29334 29330 * tmp_wmp and change the state to sleep
29335 29331 */
29336 29332 sl_wmp = tmp_wmp;
29337 29333 state = SD_WM_WAIT_MAP;
29338 29334 } else {
29339 29335 state = SD_WM_LOCK_RANGE;
29340 29336 }
29341 29337
29342 29338 }
29343 29339 break;
29344 29340
29345 29341 case SD_WM_LOCK_RANGE:
29346 29342 ASSERT(un->un_wm_cache);
29347 29343 /*
29348 29344 * The range need to be locked, try to get a wmap.
29349 29345 * First attempt it with NO_SLEEP, want to avoid a sleep
29350 29346 * if possible as we will have to release the sd mutex
29351 29347 * if we have to sleep.
29352 29348 */
29353 29349 if (wmp == NULL)
29354 29350 wmp = kmem_cache_alloc(un->un_wm_cache,
29355 29351 KM_NOSLEEP);
29356 29352 if (wmp == NULL) {
29357 29353 mutex_exit(SD_MUTEX(un));
29358 29354 _NOTE(DATA_READABLE_WITHOUT_LOCK
29359 29355 (sd_lun::un_wm_cache))
29360 29356 wmp = kmem_cache_alloc(un->un_wm_cache,
29361 29357 KM_SLEEP);
29362 29358 mutex_enter(SD_MUTEX(un));
29363 29359 /*
29364 29360 * we released the mutex so recheck and go to
29365 29361 * check list state.
29366 29362 */
29367 29363 state = SD_WM_CHK_LIST;
29368 29364 } else {
29369 29365 /*
29370 29366 * We exit out of state machine since we
29371 29367 * have the wmap. Do the housekeeping first.
29372 29368 * place the wmap on the wmap list if it is not
29373 29369 * on it already and then set the state to done.
29374 29370 */
29375 29371 wmp->wm_start = startb;
29376 29372 wmp->wm_end = endb;
29377 29373 wmp->wm_flags = typ | SD_WM_BUSY;
29378 29374 if (typ & SD_WTYPE_RMW) {
29379 29375 un->un_rmw_count++;
29380 29376 }
29381 29377 /*
29382 29378 * If not already on the list then link
29383 29379 */
29384 29380 if (!ONLIST(un, wmp)) {
29385 29381 wmp->wm_next = un->un_wm;
29386 29382 wmp->wm_prev = NULL;
29387 29383 if (wmp->wm_next)
29388 29384 wmp->wm_next->wm_prev = wmp;
29389 29385 un->un_wm = wmp;
29390 29386 }
29391 29387 state = SD_WM_DONE;
29392 29388 }
29393 29389 break;
29394 29390
29395 29391 case SD_WM_WAIT_MAP:
29396 29392 ASSERT(sl_wmp->wm_flags & SD_WM_BUSY);
29397 29393 /*
29398 29394 * Wait is done on sl_wmp, which is set in the
29399 29395 * check_list state.
29400 29396 */
29401 29397 sl_wmp->wm_wanted_count++;
29402 29398 cv_wait(&sl_wmp->wm_avail, SD_MUTEX(un));
29403 29399 sl_wmp->wm_wanted_count--;
29404 29400 /*
29405 29401 * We can reuse the memory from the completed sl_wmp
29406 29402 * lock range for our new lock, but only if noone is
29407 29403 * waiting for it.
29408 29404 */
29409 29405 ASSERT(!(sl_wmp->wm_flags & SD_WM_BUSY));
29410 29406 if (sl_wmp->wm_wanted_count == 0) {
29411 29407 if (wmp != NULL)
29412 29408 CHK_N_FREEWMP(un, wmp);
29413 29409 wmp = sl_wmp;
29414 29410 }
29415 29411 sl_wmp = NULL;
29416 29412 /*
29417 29413 * After waking up, need to recheck for availability of
29418 29414 * range.
29419 29415 */
29420 29416 state = SD_WM_CHK_LIST;
29421 29417 break;
29422 29418
29423 29419 default:
29424 29420 panic("sd_range_lock: "
29425 29421 "Unknown state %d in sd_range_lock", state);
29426 29422 /*NOTREACHED*/
29427 29423 } /* switch(state) */
29428 29424
29429 29425 } /* while(state != SD_WM_DONE) */
29430 29426
29431 29427 mutex_exit(SD_MUTEX(un));
29432 29428
29433 29429 ASSERT(wmp != NULL);
29434 29430
29435 29431 return (wmp);
29436 29432 }
29437 29433
29438 29434
29439 29435 /*
29440 29436 * Function: sd_get_range()
29441 29437 *
29442 29438 * Description: Find if there any overlapping I/O to this one
29443 29439 * Returns the write-map of 1st such I/O, NULL otherwise.
29444 29440 *
29445 29441 * Arguments: un - sd_lun structure for the device.
29446 29442 * startb - The starting block number
29447 29443 * endb - The end block number
29448 29444 *
29449 29445 * Return Code: wm - pointer to the wmap structure.
29450 29446 */
29451 29447
29452 29448 static struct sd_w_map *
29453 29449 sd_get_range(struct sd_lun *un, daddr_t startb, daddr_t endb)
29454 29450 {
29455 29451 struct sd_w_map *wmp;
29456 29452
29457 29453 ASSERT(un != NULL);
29458 29454
29459 29455 for (wmp = un->un_wm; wmp != NULL; wmp = wmp->wm_next) {
29460 29456 if (!(wmp->wm_flags & SD_WM_BUSY)) {
29461 29457 continue;
29462 29458 }
29463 29459 if ((startb >= wmp->wm_start) && (startb <= wmp->wm_end)) {
29464 29460 break;
29465 29461 }
29466 29462 if ((endb >= wmp->wm_start) && (endb <= wmp->wm_end)) {
29467 29463 break;
29468 29464 }
29469 29465 }
29470 29466
29471 29467 return (wmp);
29472 29468 }
29473 29469
29474 29470
29475 29471 /*
29476 29472 * Function: sd_free_inlist_wmap()
29477 29473 *
29478 29474 * Description: Unlink and free a write map struct.
29479 29475 *
29480 29476 * Arguments: un - sd_lun structure for the device.
29481 29477 * wmp - sd_w_map which needs to be unlinked.
29482 29478 */
29483 29479
29484 29480 static void
29485 29481 sd_free_inlist_wmap(struct sd_lun *un, struct sd_w_map *wmp)
29486 29482 {
29487 29483 ASSERT(un != NULL);
29488 29484
29489 29485 if (un->un_wm == wmp) {
29490 29486 un->un_wm = wmp->wm_next;
29491 29487 } else {
29492 29488 wmp->wm_prev->wm_next = wmp->wm_next;
29493 29489 }
29494 29490
29495 29491 if (wmp->wm_next) {
29496 29492 wmp->wm_next->wm_prev = wmp->wm_prev;
29497 29493 }
29498 29494
29499 29495 wmp->wm_next = wmp->wm_prev = NULL;
29500 29496
29501 29497 kmem_cache_free(un->un_wm_cache, wmp);
29502 29498 }
29503 29499
29504 29500
29505 29501 /*
29506 29502 * Function: sd_range_unlock()
29507 29503 *
29508 29504 * Description: Unlock the range locked by wm.
29509 29505 * Free write map if nobody else is waiting on it.
29510 29506 *
29511 29507 * Arguments: un - sd_lun structure for the device.
29512 29508 * wmp - sd_w_map which needs to be unlinked.
29513 29509 */
29514 29510
29515 29511 static void
29516 29512 sd_range_unlock(struct sd_lun *un, struct sd_w_map *wm)
29517 29513 {
29518 29514 ASSERT(un != NULL);
29519 29515 ASSERT(wm != NULL);
29520 29516 ASSERT(!mutex_owned(SD_MUTEX(un)));
29521 29517
29522 29518 mutex_enter(SD_MUTEX(un));
29523 29519
29524 29520 if (wm->wm_flags & SD_WTYPE_RMW) {
29525 29521 un->un_rmw_count--;
29526 29522 }
29527 29523
29528 29524 if (wm->wm_wanted_count) {
29529 29525 wm->wm_flags = 0;
29530 29526 /*
29531 29527 * Broadcast that the wmap is available now.
29532 29528 */
29533 29529 cv_broadcast(&wm->wm_avail);
29534 29530 } else {
29535 29531 /*
29536 29532 * If no one is waiting on the map, it should be free'ed.
29537 29533 */
29538 29534 sd_free_inlist_wmap(un, wm);
29539 29535 }
29540 29536
29541 29537 mutex_exit(SD_MUTEX(un));
29542 29538 }
29543 29539
29544 29540
29545 29541 /*
29546 29542 * Function: sd_read_modify_write_task
29547 29543 *
29548 29544 * Description: Called from a taskq thread to initiate the write phase of
29549 29545 * a read-modify-write request. This is used for targets where
29550 29546 * un->un_sys_blocksize != un->un_tgt_blocksize.
29551 29547 *
29552 29548 * Arguments: arg - a pointer to the buf(9S) struct for the write command.
29553 29549 *
29554 29550 * Context: Called under taskq thread context.
29555 29551 */
29556 29552
29557 29553 static void
29558 29554 sd_read_modify_write_task(void *arg)
29559 29555 {
29560 29556 struct sd_mapblocksize_info *bsp;
29561 29557 struct buf *bp;
29562 29558 struct sd_xbuf *xp;
29563 29559 struct sd_lun *un;
29564 29560
29565 29561 bp = arg; /* The bp is given in arg */
29566 29562 ASSERT(bp != NULL);
29567 29563
29568 29564 /* Get the pointer to the layer-private data struct */
29569 29565 xp = SD_GET_XBUF(bp);
29570 29566 ASSERT(xp != NULL);
29571 29567 bsp = xp->xb_private;
29572 29568 ASSERT(bsp != NULL);
29573 29569
29574 29570 un = SD_GET_UN(bp);
29575 29571 ASSERT(un != NULL);
29576 29572 ASSERT(!mutex_owned(SD_MUTEX(un)));
29577 29573
29578 29574 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
29579 29575 "sd_read_modify_write_task: entry: buf:0x%p\n", bp);
29580 29576
29581 29577 /*
29582 29578 * This is the write phase of a read-modify-write request, called
29583 29579 * under the context of a taskq thread in response to the completion
29584 29580 * of the read portion of the rmw request completing under interrupt
29585 29581 * context. The write request must be sent from here down the iostart
29586 29582 * chain as if it were being sent from sd_mapblocksize_iostart(), so
29587 29583 * we use the layer index saved in the layer-private data area.
29588 29584 */
29589 29585 SD_NEXT_IOSTART(bsp->mbs_layer_index, un, bp);
29590 29586
29591 29587 SD_TRACE(SD_LOG_IO_RMMEDIA, un,
29592 29588 "sd_read_modify_write_task: exit: buf:0x%p\n", bp);
29593 29589 }
29594 29590
29595 29591
29596 29592 /*
29597 29593 * Function: sddump_do_read_of_rmw()
29598 29594 *
29599 29595 * Description: This routine will be called from sddump, If sddump is called
29600 29596 * with an I/O which not aligned on device blocksize boundary
29601 29597 * then the write has to be converted to read-modify-write.
29602 29598 * Do the read part here in order to keep sddump simple.
29603 29599 * Note - That the sd_mutex is held across the call to this
29604 29600 * routine.
29605 29601 *
29606 29602 * Arguments: un - sd_lun
29607 29603 * blkno - block number in terms of media block size.
29608 29604 * nblk - number of blocks.
29609 29605 * bpp - pointer to pointer to the buf structure. On return
29610 29606 * from this function, *bpp points to the valid buffer
29611 29607 * to which the write has to be done.
29612 29608 *
29613 29609 * Return Code: 0 for success or errno-type return code
29614 29610 */
29615 29611
29616 29612 static int
29617 29613 sddump_do_read_of_rmw(struct sd_lun *un, uint64_t blkno, uint64_t nblk,
29618 29614 struct buf **bpp)
29619 29615 {
29620 29616 int err;
29621 29617 int i;
29622 29618 int rval;
29623 29619 struct buf *bp;
29624 29620 struct scsi_pkt *pkt = NULL;
29625 29621 uint32_t target_blocksize;
29626 29622
29627 29623 ASSERT(un != NULL);
29628 29624 ASSERT(mutex_owned(SD_MUTEX(un)));
29629 29625
29630 29626 target_blocksize = un->un_tgt_blocksize;
29631 29627
29632 29628 mutex_exit(SD_MUTEX(un));
29633 29629
29634 29630 bp = scsi_alloc_consistent_buf(SD_ADDRESS(un), (struct buf *)NULL,
29635 29631 (size_t)(nblk * target_blocksize), B_READ, NULL_FUNC, NULL);
29636 29632 if (bp == NULL) {
29637 29633 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29638 29634 "no resources for dumping; giving up");
29639 29635 err = ENOMEM;
29640 29636 goto done;
29641 29637 }
29642 29638
29643 29639 rval = sd_setup_rw_pkt(un, &pkt, bp, 0, NULL_FUNC, NULL,
29644 29640 blkno, nblk);
29645 29641 if (rval != 0) {
29646 29642 scsi_free_consistent_buf(bp);
29647 29643 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29648 29644 "no resources for dumping; giving up");
29649 29645 err = ENOMEM;
29650 29646 goto done;
29651 29647 }
29652 29648
29653 29649 pkt->pkt_flags |= FLAG_NOINTR;
29654 29650
29655 29651 err = EIO;
29656 29652 for (i = 0; i < SD_NDUMP_RETRIES; i++) {
29657 29653
29658 29654 /*
29659 29655 * Scsi_poll returns 0 (success) if the command completes and
29660 29656 * the status block is STATUS_GOOD. We should only check
29661 29657 * errors if this condition is not true. Even then we should
29662 29658 * send our own request sense packet only if we have a check
29663 29659 * condition and auto request sense has not been performed by
29664 29660 * the hba.
29665 29661 */
29666 29662 SD_TRACE(SD_LOG_DUMP, un, "sddump: sending read\n");
29667 29663
29668 29664 if ((sd_scsi_poll(un, pkt) == 0) && (pkt->pkt_resid == 0)) {
29669 29665 err = 0;
29670 29666 break;
29671 29667 }
29672 29668
29673 29669 /*
29674 29670 * Check CMD_DEV_GONE 1st, give up if device is gone,
29675 29671 * no need to read RQS data.
29676 29672 */
29677 29673 if (pkt->pkt_reason == CMD_DEV_GONE) {
29678 29674 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29679 29675 "Error while dumping state with rmw..."
29680 29676 "Device is gone\n");
29681 29677 break;
29682 29678 }
29683 29679
29684 29680 if (SD_GET_PKT_STATUS(pkt) == STATUS_CHECK) {
29685 29681 SD_INFO(SD_LOG_DUMP, un,
29686 29682 "sddump: read failed with CHECK, try # %d\n", i);
29687 29683 if (((pkt->pkt_state & STATE_ARQ_DONE) == 0)) {
29688 29684 (void) sd_send_polled_RQS(un);
29689 29685 }
29690 29686
29691 29687 continue;
29692 29688 }
29693 29689
29694 29690 if (SD_GET_PKT_STATUS(pkt) == STATUS_BUSY) {
29695 29691 int reset_retval = 0;
29696 29692
29697 29693 SD_INFO(SD_LOG_DUMP, un,
29698 29694 "sddump: read failed with BUSY, try # %d\n", i);
29699 29695
29700 29696 if (un->un_f_lun_reset_enabled == TRUE) {
29701 29697 reset_retval = scsi_reset(SD_ADDRESS(un),
29702 29698 RESET_LUN);
29703 29699 }
29704 29700 if (reset_retval == 0) {
29705 29701 (void) scsi_reset(SD_ADDRESS(un), RESET_TARGET);
29706 29702 }
29707 29703 (void) sd_send_polled_RQS(un);
29708 29704
29709 29705 } else {
29710 29706 SD_INFO(SD_LOG_DUMP, un,
29711 29707 "sddump: read failed with 0x%x, try # %d\n",
29712 29708 SD_GET_PKT_STATUS(pkt), i);
29713 29709 mutex_enter(SD_MUTEX(un));
29714 29710 sd_reset_target(un, pkt);
29715 29711 mutex_exit(SD_MUTEX(un));
29716 29712 }
29717 29713
29718 29714 /*
29719 29715 * If we are not getting anywhere with lun/target resets,
29720 29716 * let's reset the bus.
29721 29717 */
29722 29718 if (i > SD_NDUMP_RETRIES/2) {
29723 29719 (void) scsi_reset(SD_ADDRESS(un), RESET_ALL);
29724 29720 (void) sd_send_polled_RQS(un);
29725 29721 }
29726 29722
29727 29723 }
29728 29724 scsi_destroy_pkt(pkt);
29729 29725
29730 29726 if (err != 0) {
29731 29727 scsi_free_consistent_buf(bp);
29732 29728 *bpp = NULL;
29733 29729 } else {
29734 29730 *bpp = bp;
29735 29731 }
29736 29732
29737 29733 done:
29738 29734 mutex_enter(SD_MUTEX(un));
29739 29735 return (err);
29740 29736 }
29741 29737
29742 29738
29743 29739 /*
29744 29740 * Function: sd_failfast_flushq
29745 29741 *
29746 29742 * Description: Take all bp's on the wait queue that have B_FAILFAST set
29747 29743 * in b_flags and move them onto the failfast queue, then kick
29748 29744 * off a thread to return all bp's on the failfast queue to
29749 29745 * their owners with an error set.
29750 29746 *
29751 29747 * Arguments: un - pointer to the soft state struct for the instance.
29752 29748 *
29753 29749 * Context: may execute in interrupt context.
29754 29750 */
29755 29751
29756 29752 static void
29757 29753 sd_failfast_flushq(struct sd_lun *un)
29758 29754 {
29759 29755 struct buf *bp;
29760 29756 struct buf *next_waitq_bp;
29761 29757 struct buf *prev_waitq_bp = NULL;
29762 29758
29763 29759 ASSERT(un != NULL);
29764 29760 ASSERT(mutex_owned(SD_MUTEX(un)));
29765 29761 ASSERT(un->un_failfast_state == SD_FAILFAST_ACTIVE);
29766 29762 ASSERT(un->un_failfast_bp == NULL);
29767 29763
29768 29764 SD_TRACE(SD_LOG_IO_FAILFAST, un,
29769 29765 "sd_failfast_flushq: entry: un:0x%p\n", un);
29770 29766
29771 29767 /*
29772 29768 * Check if we should flush all bufs when entering failfast state, or
29773 29769 * just those with B_FAILFAST set.
29774 29770 */
29775 29771 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) {
29776 29772 /*
29777 29773 * Move *all* bp's on the wait queue to the failfast flush
29778 29774 * queue, including those that do NOT have B_FAILFAST set.
29779 29775 */
29780 29776 if (un->un_failfast_headp == NULL) {
29781 29777 ASSERT(un->un_failfast_tailp == NULL);
29782 29778 un->un_failfast_headp = un->un_waitq_headp;
29783 29779 } else {
29784 29780 ASSERT(un->un_failfast_tailp != NULL);
29785 29781 un->un_failfast_tailp->av_forw = un->un_waitq_headp;
29786 29782 }
29787 29783
29788 29784 un->un_failfast_tailp = un->un_waitq_tailp;
29789 29785
29790 29786 /* update kstat for each bp moved out of the waitq */
29791 29787 for (bp = un->un_waitq_headp; bp != NULL; bp = bp->av_forw) {
29792 29788 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp);
29793 29789 }
29794 29790
29795 29791 /* empty the waitq */
29796 29792 un->un_waitq_headp = un->un_waitq_tailp = NULL;
29797 29793
29798 29794 } else {
29799 29795 /*
29800 29796 * Go thru the wait queue, pick off all entries with
29801 29797 * B_FAILFAST set, and move these onto the failfast queue.
29802 29798 */
29803 29799 for (bp = un->un_waitq_headp; bp != NULL; bp = next_waitq_bp) {
29804 29800 /*
29805 29801 * Save the pointer to the next bp on the wait queue,
29806 29802 * so we get to it on the next iteration of this loop.
29807 29803 */
29808 29804 next_waitq_bp = bp->av_forw;
29809 29805
29810 29806 /*
29811 29807 * If this bp from the wait queue does NOT have
29812 29808 * B_FAILFAST set, just move on to the next element
29813 29809 * in the wait queue. Note, this is the only place
29814 29810 * where it is correct to set prev_waitq_bp.
29815 29811 */
29816 29812 if ((bp->b_flags & B_FAILFAST) == 0) {
29817 29813 prev_waitq_bp = bp;
29818 29814 continue;
29819 29815 }
29820 29816
29821 29817 /*
29822 29818 * Remove the bp from the wait queue.
29823 29819 */
29824 29820 if (bp == un->un_waitq_headp) {
29825 29821 /* The bp is the first element of the waitq. */
29826 29822 un->un_waitq_headp = next_waitq_bp;
29827 29823 if (un->un_waitq_headp == NULL) {
29828 29824 /* The wait queue is now empty */
29829 29825 un->un_waitq_tailp = NULL;
29830 29826 }
29831 29827 } else {
29832 29828 /*
29833 29829 * The bp is either somewhere in the middle
29834 29830 * or at the end of the wait queue.
29835 29831 */
29836 29832 ASSERT(un->un_waitq_headp != NULL);
29837 29833 ASSERT(prev_waitq_bp != NULL);
29838 29834 ASSERT((prev_waitq_bp->b_flags & B_FAILFAST)
29839 29835 == 0);
29840 29836 if (bp == un->un_waitq_tailp) {
29841 29837 /* bp is the last entry on the waitq. */
29842 29838 ASSERT(next_waitq_bp == NULL);
29843 29839 un->un_waitq_tailp = prev_waitq_bp;
29844 29840 }
29845 29841 prev_waitq_bp->av_forw = next_waitq_bp;
29846 29842 }
29847 29843 bp->av_forw = NULL;
29848 29844
29849 29845 /*
29850 29846 * update kstat since the bp is moved out of
29851 29847 * the waitq
29852 29848 */
29853 29849 SD_UPDATE_KSTATS(un, kstat_waitq_exit, bp);
29854 29850
29855 29851 /*
29856 29852 * Now put the bp onto the failfast queue.
29857 29853 */
29858 29854 if (un->un_failfast_headp == NULL) {
29859 29855 /* failfast queue is currently empty */
29860 29856 ASSERT(un->un_failfast_tailp == NULL);
29861 29857 un->un_failfast_headp =
29862 29858 un->un_failfast_tailp = bp;
29863 29859 } else {
29864 29860 /* Add the bp to the end of the failfast q */
29865 29861 ASSERT(un->un_failfast_tailp != NULL);
29866 29862 ASSERT(un->un_failfast_tailp->b_flags &
29867 29863 B_FAILFAST);
29868 29864 un->un_failfast_tailp->av_forw = bp;
29869 29865 un->un_failfast_tailp = bp;
29870 29866 }
29871 29867 }
29872 29868 }
29873 29869
29874 29870 /*
29875 29871 * Now return all bp's on the failfast queue to their owners.
29876 29872 */
29877 29873 while ((bp = un->un_failfast_headp) != NULL) {
29878 29874
29879 29875 un->un_failfast_headp = bp->av_forw;
29880 29876 if (un->un_failfast_headp == NULL) {
29881 29877 un->un_failfast_tailp = NULL;
29882 29878 }
29883 29879
29884 29880 /*
29885 29881 * We want to return the bp with a failure error code, but
29886 29882 * we do not want a call to sd_start_cmds() to occur here,
29887 29883 * so use sd_return_failed_command_no_restart() instead of
29888 29884 * sd_return_failed_command().
29889 29885 */
29890 29886 sd_return_failed_command_no_restart(un, bp, EIO);
29891 29887 }
29892 29888
29893 29889 /* Flush the xbuf queues if required. */
29894 29890 if (sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_QUEUES) {
29895 29891 ddi_xbuf_flushq(un->un_xbuf_attr, sd_failfast_flushq_callback);
29896 29892 }
29897 29893
29898 29894 SD_TRACE(SD_LOG_IO_FAILFAST, un,
29899 29895 "sd_failfast_flushq: exit: un:0x%p\n", un);
29900 29896 }
29901 29897
29902 29898
29903 29899 /*
29904 29900 * Function: sd_failfast_flushq_callback
29905 29901 *
29906 29902 * Description: Return TRUE if the given bp meets the criteria for failfast
29907 29903 * flushing. Used with ddi_xbuf_flushq(9F).
29908 29904 *
29909 29905 * Arguments: bp - ptr to buf struct to be examined.
29910 29906 *
29911 29907 * Context: Any
29912 29908 */
29913 29909
29914 29910 static int
29915 29911 sd_failfast_flushq_callback(struct buf *bp)
29916 29912 {
29917 29913 /*
29918 29914 * Return TRUE if (1) we want to flush ALL bufs when the failfast
29919 29915 * state is entered; OR (2) the given bp has B_FAILFAST set.
29920 29916 */
29921 29917 return (((sd_failfast_flushctl & SD_FAILFAST_FLUSH_ALL_BUFS) ||
29922 29918 (bp->b_flags & B_FAILFAST)) ? TRUE : FALSE);
29923 29919 }
29924 29920
29925 29921
29926 29922
29927 29923 /*
29928 29924 * Function: sd_setup_next_xfer
29929 29925 *
29930 29926 * Description: Prepare next I/O operation using DMA_PARTIAL
29931 29927 *
29932 29928 */
29933 29929
29934 29930 static int
29935 29931 sd_setup_next_xfer(struct sd_lun *un, struct buf *bp,
29936 29932 struct scsi_pkt *pkt, struct sd_xbuf *xp)
29937 29933 {
29938 29934 ssize_t num_blks_not_xfered;
29939 29935 daddr_t strt_blk_num;
29940 29936 ssize_t bytes_not_xfered;
29941 29937 int rval;
29942 29938
29943 29939 ASSERT(pkt->pkt_resid == 0);
29944 29940
29945 29941 /*
29946 29942 * Calculate next block number and amount to be transferred.
29947 29943 *
29948 29944 * How much data NOT transfered to the HBA yet.
29949 29945 */
29950 29946 bytes_not_xfered = xp->xb_dma_resid;
29951 29947
29952 29948 /*
29953 29949 * figure how many blocks NOT transfered to the HBA yet.
29954 29950 */
29955 29951 num_blks_not_xfered = SD_BYTES2TGTBLOCKS(un, bytes_not_xfered);
29956 29952
29957 29953 /*
29958 29954 * set starting block number to the end of what WAS transfered.
29959 29955 */
29960 29956 strt_blk_num = xp->xb_blkno +
29961 29957 SD_BYTES2TGTBLOCKS(un, bp->b_bcount - bytes_not_xfered);
29962 29958
29963 29959 /*
29964 29960 * Move pkt to the next portion of the xfer. sd_setup_next_rw_pkt
29965 29961 * will call scsi_initpkt with NULL_FUNC so we do not have to release
29966 29962 * the disk mutex here.
29967 29963 */
29968 29964 rval = sd_setup_next_rw_pkt(un, pkt, bp,
29969 29965 strt_blk_num, num_blks_not_xfered);
29970 29966
29971 29967 if (rval == 0) {
29972 29968
29973 29969 /*
29974 29970 * Success.
29975 29971 *
29976 29972 * Adjust things if there are still more blocks to be
29977 29973 * transfered.
29978 29974 */
29979 29975 xp->xb_dma_resid = pkt->pkt_resid;
29980 29976 pkt->pkt_resid = 0;
29981 29977
29982 29978 return (1);
29983 29979 }
29984 29980
29985 29981 /*
29986 29982 * There's really only one possible return value from
29987 29983 * sd_setup_next_rw_pkt which occurs when scsi_init_pkt
29988 29984 * returns NULL.
29989 29985 */
29990 29986 ASSERT(rval == SD_PKT_ALLOC_FAILURE);
29991 29987
29992 29988 bp->b_resid = bp->b_bcount;
29993 29989 bp->b_flags |= B_ERROR;
29994 29990
29995 29991 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
29996 29992 "Error setting up next portion of DMA transfer\n");
29997 29993
29998 29994 return (0);
29999 29995 }
30000 29996
30001 29997 /*
30002 29998 * Function: sd_panic_for_res_conflict
30003 29999 *
30004 30000 * Description: Call panic with a string formatted with "Reservation Conflict"
30005 30001 * and a human readable identifier indicating the SD instance
30006 30002 * that experienced the reservation conflict.
30007 30003 *
30008 30004 * Arguments: un - pointer to the soft state struct for the instance.
30009 30005 *
30010 30006 * Context: may execute in interrupt context.
30011 30007 */
30012 30008
30013 30009 #define SD_RESV_CONFLICT_FMT_LEN 40
30014 30010 void
30015 30011 sd_panic_for_res_conflict(struct sd_lun *un)
30016 30012 {
30017 30013 char panic_str[SD_RESV_CONFLICT_FMT_LEN+MAXPATHLEN];
30018 30014 char path_str[MAXPATHLEN];
30019 30015
30020 30016 (void) snprintf(panic_str, sizeof (panic_str),
30021 30017 "Reservation Conflict\nDisk: %s",
30022 30018 ddi_pathname(SD_DEVINFO(un), path_str));
30023 30019
30024 30020 panic(panic_str);
30025 30021 }
30026 30022
30027 30023 /*
30028 30024 * Note: The following sd_faultinjection_ioctl( ) routines implement
30029 30025 * driver support for handling fault injection for error analysis
30030 30026 * causing faults in multiple layers of the driver.
30031 30027 *
30032 30028 */
30033 30029
30034 30030 #ifdef SD_FAULT_INJECTION
30035 30031 static uint_t sd_fault_injection_on = 0;
30036 30032
30037 30033 /*
30038 30034 * Function: sd_faultinjection_ioctl()
30039 30035 *
30040 30036 * Description: This routine is the driver entry point for handling
30041 30037 * faultinjection ioctls to inject errors into the
30042 30038 * layer model
30043 30039 *
30044 30040 * Arguments: cmd - the ioctl cmd received
30045 30041 * arg - the arguments from user and returns
30046 30042 */
30047 30043
30048 30044 static void
30049 30045 sd_faultinjection_ioctl(int cmd, intptr_t arg, struct sd_lun *un) {
30050 30046
30051 30047 uint_t i = 0;
30052 30048 uint_t rval;
30053 30049
30054 30050 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl: entry\n");
30055 30051
30056 30052 mutex_enter(SD_MUTEX(un));
30057 30053
30058 30054 switch (cmd) {
30059 30055 case SDIOCRUN:
30060 30056 /* Allow pushed faults to be injected */
30061 30057 SD_INFO(SD_LOG_SDTEST, un,
30062 30058 "sd_faultinjection_ioctl: Injecting Fault Run\n");
30063 30059
30064 30060 sd_fault_injection_on = 1;
30065 30061
30066 30062 SD_INFO(SD_LOG_IOERR, un,
30067 30063 "sd_faultinjection_ioctl: run finished\n");
30068 30064 break;
30069 30065
30070 30066 case SDIOCSTART:
30071 30067 /* Start Injection Session */
30072 30068 SD_INFO(SD_LOG_SDTEST, un,
30073 30069 "sd_faultinjection_ioctl: Injecting Fault Start\n");
30074 30070
30075 30071 sd_fault_injection_on = 0;
30076 30072 un->sd_injection_mask = 0xFFFFFFFF;
30077 30073 for (i = 0; i < SD_FI_MAX_ERROR; i++) {
30078 30074 un->sd_fi_fifo_pkt[i] = NULL;
30079 30075 un->sd_fi_fifo_xb[i] = NULL;
30080 30076 un->sd_fi_fifo_un[i] = NULL;
30081 30077 un->sd_fi_fifo_arq[i] = NULL;
30082 30078 }
30083 30079 un->sd_fi_fifo_start = 0;
30084 30080 un->sd_fi_fifo_end = 0;
30085 30081
30086 30082 mutex_enter(&(un->un_fi_mutex));
30087 30083 un->sd_fi_log[0] = '\0';
30088 30084 un->sd_fi_buf_len = 0;
30089 30085 mutex_exit(&(un->un_fi_mutex));
30090 30086
30091 30087 SD_INFO(SD_LOG_IOERR, un,
30092 30088 "sd_faultinjection_ioctl: start finished\n");
30093 30089 break;
30094 30090
30095 30091 case SDIOCSTOP:
30096 30092 /* Stop Injection Session */
30097 30093 SD_INFO(SD_LOG_SDTEST, un,
30098 30094 "sd_faultinjection_ioctl: Injecting Fault Stop\n");
30099 30095 sd_fault_injection_on = 0;
30100 30096 un->sd_injection_mask = 0x0;
30101 30097
30102 30098 /* Empty stray or unuseds structs from fifo */
30103 30099 for (i = 0; i < SD_FI_MAX_ERROR; i++) {
30104 30100 if (un->sd_fi_fifo_pkt[i] != NULL) {
30105 30101 kmem_free(un->sd_fi_fifo_pkt[i],
30106 30102 sizeof (struct sd_fi_pkt));
30107 30103 }
30108 30104 if (un->sd_fi_fifo_xb[i] != NULL) {
30109 30105 kmem_free(un->sd_fi_fifo_xb[i],
30110 30106 sizeof (struct sd_fi_xb));
30111 30107 }
30112 30108 if (un->sd_fi_fifo_un[i] != NULL) {
30113 30109 kmem_free(un->sd_fi_fifo_un[i],
30114 30110 sizeof (struct sd_fi_un));
30115 30111 }
30116 30112 if (un->sd_fi_fifo_arq[i] != NULL) {
30117 30113 kmem_free(un->sd_fi_fifo_arq[i],
30118 30114 sizeof (struct sd_fi_arq));
30119 30115 }
30120 30116 un->sd_fi_fifo_pkt[i] = NULL;
30121 30117 un->sd_fi_fifo_un[i] = NULL;
30122 30118 un->sd_fi_fifo_xb[i] = NULL;
30123 30119 un->sd_fi_fifo_arq[i] = NULL;
30124 30120 }
30125 30121 un->sd_fi_fifo_start = 0;
30126 30122 un->sd_fi_fifo_end = 0;
30127 30123
30128 30124 SD_INFO(SD_LOG_IOERR, un,
30129 30125 "sd_faultinjection_ioctl: stop finished\n");
30130 30126 break;
30131 30127
30132 30128 case SDIOCINSERTPKT:
30133 30129 /* Store a packet struct to be pushed onto fifo */
30134 30130 SD_INFO(SD_LOG_SDTEST, un,
30135 30131 "sd_faultinjection_ioctl: Injecting Fault Insert Pkt\n");
30136 30132
30137 30133 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30138 30134
30139 30135 sd_fault_injection_on = 0;
30140 30136
30141 30137 /* No more that SD_FI_MAX_ERROR allowed in Queue */
30142 30138 if (un->sd_fi_fifo_pkt[i] != NULL) {
30143 30139 kmem_free(un->sd_fi_fifo_pkt[i],
30144 30140 sizeof (struct sd_fi_pkt));
30145 30141 }
30146 30142 if (arg != NULL) {
30147 30143 un->sd_fi_fifo_pkt[i] =
30148 30144 kmem_alloc(sizeof (struct sd_fi_pkt), KM_NOSLEEP);
30149 30145 if (un->sd_fi_fifo_pkt[i] == NULL) {
30150 30146 /* Alloc failed don't store anything */
30151 30147 break;
30152 30148 }
30153 30149 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_pkt[i],
30154 30150 sizeof (struct sd_fi_pkt), 0);
30155 30151 if (rval == -1) {
30156 30152 kmem_free(un->sd_fi_fifo_pkt[i],
30157 30153 sizeof (struct sd_fi_pkt));
30158 30154 un->sd_fi_fifo_pkt[i] = NULL;
30159 30155 }
30160 30156 } else {
30161 30157 SD_INFO(SD_LOG_IOERR, un,
30162 30158 "sd_faultinjection_ioctl: pkt null\n");
30163 30159 }
30164 30160 break;
30165 30161
30166 30162 case SDIOCINSERTXB:
30167 30163 /* Store a xb struct to be pushed onto fifo */
30168 30164 SD_INFO(SD_LOG_SDTEST, un,
30169 30165 "sd_faultinjection_ioctl: Injecting Fault Insert XB\n");
30170 30166
30171 30167 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30172 30168
30173 30169 sd_fault_injection_on = 0;
30174 30170
30175 30171 if (un->sd_fi_fifo_xb[i] != NULL) {
30176 30172 kmem_free(un->sd_fi_fifo_xb[i],
30177 30173 sizeof (struct sd_fi_xb));
30178 30174 un->sd_fi_fifo_xb[i] = NULL;
30179 30175 }
30180 30176 if (arg != NULL) {
30181 30177 un->sd_fi_fifo_xb[i] =
30182 30178 kmem_alloc(sizeof (struct sd_fi_xb), KM_NOSLEEP);
30183 30179 if (un->sd_fi_fifo_xb[i] == NULL) {
30184 30180 /* Alloc failed don't store anything */
30185 30181 break;
30186 30182 }
30187 30183 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_xb[i],
30188 30184 sizeof (struct sd_fi_xb), 0);
30189 30185
30190 30186 if (rval == -1) {
30191 30187 kmem_free(un->sd_fi_fifo_xb[i],
30192 30188 sizeof (struct sd_fi_xb));
30193 30189 un->sd_fi_fifo_xb[i] = NULL;
30194 30190 }
30195 30191 } else {
30196 30192 SD_INFO(SD_LOG_IOERR, un,
30197 30193 "sd_faultinjection_ioctl: xb null\n");
30198 30194 }
30199 30195 break;
30200 30196
30201 30197 case SDIOCINSERTUN:
30202 30198 /* Store a un struct to be pushed onto fifo */
30203 30199 SD_INFO(SD_LOG_SDTEST, un,
30204 30200 "sd_faultinjection_ioctl: Injecting Fault Insert UN\n");
30205 30201
30206 30202 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30207 30203
30208 30204 sd_fault_injection_on = 0;
30209 30205
30210 30206 if (un->sd_fi_fifo_un[i] != NULL) {
30211 30207 kmem_free(un->sd_fi_fifo_un[i],
30212 30208 sizeof (struct sd_fi_un));
30213 30209 un->sd_fi_fifo_un[i] = NULL;
30214 30210 }
30215 30211 if (arg != NULL) {
30216 30212 un->sd_fi_fifo_un[i] =
30217 30213 kmem_alloc(sizeof (struct sd_fi_un), KM_NOSLEEP);
30218 30214 if (un->sd_fi_fifo_un[i] == NULL) {
30219 30215 /* Alloc failed don't store anything */
30220 30216 break;
30221 30217 }
30222 30218 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_un[i],
30223 30219 sizeof (struct sd_fi_un), 0);
30224 30220 if (rval == -1) {
30225 30221 kmem_free(un->sd_fi_fifo_un[i],
30226 30222 sizeof (struct sd_fi_un));
30227 30223 un->sd_fi_fifo_un[i] = NULL;
30228 30224 }
30229 30225
30230 30226 } else {
30231 30227 SD_INFO(SD_LOG_IOERR, un,
30232 30228 "sd_faultinjection_ioctl: un null\n");
30233 30229 }
30234 30230
30235 30231 break;
30236 30232
30237 30233 case SDIOCINSERTARQ:
30238 30234 /* Store a arq struct to be pushed onto fifo */
30239 30235 SD_INFO(SD_LOG_SDTEST, un,
30240 30236 "sd_faultinjection_ioctl: Injecting Fault Insert ARQ\n");
30241 30237 i = un->sd_fi_fifo_end % SD_FI_MAX_ERROR;
30242 30238
30243 30239 sd_fault_injection_on = 0;
30244 30240
30245 30241 if (un->sd_fi_fifo_arq[i] != NULL) {
30246 30242 kmem_free(un->sd_fi_fifo_arq[i],
30247 30243 sizeof (struct sd_fi_arq));
30248 30244 un->sd_fi_fifo_arq[i] = NULL;
30249 30245 }
30250 30246 if (arg != NULL) {
30251 30247 un->sd_fi_fifo_arq[i] =
30252 30248 kmem_alloc(sizeof (struct sd_fi_arq), KM_NOSLEEP);
30253 30249 if (un->sd_fi_fifo_arq[i] == NULL) {
30254 30250 /* Alloc failed don't store anything */
30255 30251 break;
30256 30252 }
30257 30253 rval = ddi_copyin((void *)arg, un->sd_fi_fifo_arq[i],
30258 30254 sizeof (struct sd_fi_arq), 0);
30259 30255 if (rval == -1) {
30260 30256 kmem_free(un->sd_fi_fifo_arq[i],
30261 30257 sizeof (struct sd_fi_arq));
30262 30258 un->sd_fi_fifo_arq[i] = NULL;
30263 30259 }
30264 30260
30265 30261 } else {
30266 30262 SD_INFO(SD_LOG_IOERR, un,
30267 30263 "sd_faultinjection_ioctl: arq null\n");
30268 30264 }
30269 30265
30270 30266 break;
30271 30267
30272 30268 case SDIOCPUSH:
30273 30269 /* Push stored xb, pkt, un, and arq onto fifo */
30274 30270 sd_fault_injection_on = 0;
30275 30271
30276 30272 if (arg != NULL) {
30277 30273 rval = ddi_copyin((void *)arg, &i, sizeof (uint_t), 0);
30278 30274 if (rval != -1 &&
30279 30275 un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) {
30280 30276 un->sd_fi_fifo_end += i;
30281 30277 }
30282 30278 } else {
30283 30279 SD_INFO(SD_LOG_IOERR, un,
30284 30280 "sd_faultinjection_ioctl: push arg null\n");
30285 30281 if (un->sd_fi_fifo_end + i < SD_FI_MAX_ERROR) {
30286 30282 un->sd_fi_fifo_end++;
30287 30283 }
30288 30284 }
30289 30285 SD_INFO(SD_LOG_IOERR, un,
30290 30286 "sd_faultinjection_ioctl: push to end=%d\n",
30291 30287 un->sd_fi_fifo_end);
30292 30288 break;
30293 30289
30294 30290 case SDIOCRETRIEVE:
30295 30291 /* Return buffer of log from Injection session */
30296 30292 SD_INFO(SD_LOG_SDTEST, un,
30297 30293 "sd_faultinjection_ioctl: Injecting Fault Retreive");
30298 30294
30299 30295 sd_fault_injection_on = 0;
30300 30296
30301 30297 mutex_enter(&(un->un_fi_mutex));
30302 30298 rval = ddi_copyout(un->sd_fi_log, (void *)arg,
30303 30299 un->sd_fi_buf_len+1, 0);
30304 30300 mutex_exit(&(un->un_fi_mutex));
30305 30301
30306 30302 if (rval == -1) {
30307 30303 /*
30308 30304 * arg is possibly invalid setting
30309 30305 * it to NULL for return
30310 30306 */
30311 30307 arg = NULL;
30312 30308 }
30313 30309 break;
30314 30310 }
30315 30311
30316 30312 mutex_exit(SD_MUTEX(un));
30317 30313 SD_TRACE(SD_LOG_IOERR, un, "sd_faultinjection_ioctl:"
30318 30314 " exit\n");
30319 30315 }
30320 30316
30321 30317
30322 30318 /*
30323 30319 * Function: sd_injection_log()
30324 30320 *
30325 30321 * Description: This routine adds buff to the already existing injection log
30326 30322 * for retrieval via faultinjection_ioctl for use in fault
30327 30323 * detection and recovery
30328 30324 *
30329 30325 * Arguments: buf - the string to add to the log
30330 30326 */
30331 30327
30332 30328 static void
30333 30329 sd_injection_log(char *buf, struct sd_lun *un)
30334 30330 {
30335 30331 uint_t len;
30336 30332
30337 30333 ASSERT(un != NULL);
30338 30334 ASSERT(buf != NULL);
30339 30335
30340 30336 mutex_enter(&(un->un_fi_mutex));
30341 30337
30342 30338 len = min(strlen(buf), 255);
30343 30339 /* Add logged value to Injection log to be returned later */
30344 30340 if (len + un->sd_fi_buf_len < SD_FI_MAX_BUF) {
30345 30341 uint_t offset = strlen((char *)un->sd_fi_log);
30346 30342 char *destp = (char *)un->sd_fi_log + offset;
30347 30343 int i;
30348 30344 for (i = 0; i < len; i++) {
30349 30345 *destp++ = *buf++;
30350 30346 }
30351 30347 un->sd_fi_buf_len += len;
30352 30348 un->sd_fi_log[un->sd_fi_buf_len] = '\0';
30353 30349 }
30354 30350
30355 30351 mutex_exit(&(un->un_fi_mutex));
30356 30352 }
30357 30353
30358 30354
30359 30355 /*
30360 30356 * Function: sd_faultinjection()
30361 30357 *
30362 30358 * Description: This routine takes the pkt and changes its
30363 30359 * content based on error injection scenerio.
30364 30360 *
30365 30361 * Arguments: pktp - packet to be changed
30366 30362 */
30367 30363
30368 30364 static void
30369 30365 sd_faultinjection(struct scsi_pkt *pktp)
30370 30366 {
30371 30367 uint_t i;
30372 30368 struct sd_fi_pkt *fi_pkt;
30373 30369 struct sd_fi_xb *fi_xb;
30374 30370 struct sd_fi_un *fi_un;
30375 30371 struct sd_fi_arq *fi_arq;
30376 30372 struct buf *bp;
30377 30373 struct sd_xbuf *xb;
30378 30374 struct sd_lun *un;
30379 30375
30380 30376 ASSERT(pktp != NULL);
30381 30377
30382 30378 /* pull bp xb and un from pktp */
30383 30379 bp = (struct buf *)pktp->pkt_private;
30384 30380 xb = SD_GET_XBUF(bp);
30385 30381 un = SD_GET_UN(bp);
30386 30382
30387 30383 ASSERT(un != NULL);
30388 30384
30389 30385 mutex_enter(SD_MUTEX(un));
30390 30386
30391 30387 SD_TRACE(SD_LOG_SDTEST, un,
30392 30388 "sd_faultinjection: entry Injection from sdintr\n");
30393 30389
30394 30390 /* if injection is off return */
30395 30391 if (sd_fault_injection_on == 0 ||
30396 30392 un->sd_fi_fifo_start == un->sd_fi_fifo_end) {
30397 30393 mutex_exit(SD_MUTEX(un));
30398 30394 return;
30399 30395 }
30400 30396
30401 30397 SD_INFO(SD_LOG_SDTEST, un,
30402 30398 "sd_faultinjection: is working for copying\n");
30403 30399
30404 30400 /* take next set off fifo */
30405 30401 i = un->sd_fi_fifo_start % SD_FI_MAX_ERROR;
30406 30402
30407 30403 fi_pkt = un->sd_fi_fifo_pkt[i];
30408 30404 fi_xb = un->sd_fi_fifo_xb[i];
30409 30405 fi_un = un->sd_fi_fifo_un[i];
30410 30406 fi_arq = un->sd_fi_fifo_arq[i];
30411 30407
30412 30408
30413 30409 /* set variables accordingly */
30414 30410 /* set pkt if it was on fifo */
30415 30411 if (fi_pkt != NULL) {
30416 30412 SD_CONDSET(pktp, pkt, pkt_flags, "pkt_flags");
30417 30413 SD_CONDSET(*pktp, pkt, pkt_scbp, "pkt_scbp");
30418 30414 if (fi_pkt->pkt_cdbp != 0xff)
30419 30415 SD_CONDSET(*pktp, pkt, pkt_cdbp, "pkt_cdbp");
30420 30416 SD_CONDSET(pktp, pkt, pkt_state, "pkt_state");
30421 30417 SD_CONDSET(pktp, pkt, pkt_statistics, "pkt_statistics");
30422 30418 SD_CONDSET(pktp, pkt, pkt_reason, "pkt_reason");
30423 30419
30424 30420 }
30425 30421 /* set xb if it was on fifo */
30426 30422 if (fi_xb != NULL) {
30427 30423 SD_CONDSET(xb, xb, xb_blkno, "xb_blkno");
30428 30424 SD_CONDSET(xb, xb, xb_dma_resid, "xb_dma_resid");
30429 30425 if (fi_xb->xb_retry_count != 0)
30430 30426 SD_CONDSET(xb, xb, xb_retry_count, "xb_retry_count");
30431 30427 SD_CONDSET(xb, xb, xb_victim_retry_count,
30432 30428 "xb_victim_retry_count");
30433 30429 SD_CONDSET(xb, xb, xb_sense_status, "xb_sense_status");
30434 30430 SD_CONDSET(xb, xb, xb_sense_state, "xb_sense_state");
30435 30431 SD_CONDSET(xb, xb, xb_sense_resid, "xb_sense_resid");
30436 30432
30437 30433 /* copy in block data from sense */
30438 30434 /*
30439 30435 * if (fi_xb->xb_sense_data[0] != -1) {
30440 30436 * bcopy(fi_xb->xb_sense_data, xb->xb_sense_data,
30441 30437 * SENSE_LENGTH);
30442 30438 * }
30443 30439 */
30444 30440 bcopy(fi_xb->xb_sense_data, xb->xb_sense_data, SENSE_LENGTH);
30445 30441
30446 30442 /* copy in extended sense codes */
30447 30443 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30448 30444 xb, es_code, "es_code");
30449 30445 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30450 30446 xb, es_key, "es_key");
30451 30447 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30452 30448 xb, es_add_code, "es_add_code");
30453 30449 SD_CONDSET(((struct scsi_extended_sense *)xb->xb_sense_data),
30454 30450 xb, es_qual_code, "es_qual_code");
30455 30451 struct scsi_extended_sense *esp;
30456 30452 esp = (struct scsi_extended_sense *)xb->xb_sense_data;
30457 30453 esp->es_class = CLASS_EXTENDED_SENSE;
30458 30454 }
30459 30455
30460 30456 /* set un if it was on fifo */
30461 30457 if (fi_un != NULL) {
30462 30458 SD_CONDSET(un->un_sd->sd_inq, un, inq_rmb, "inq_rmb");
30463 30459 SD_CONDSET(un, un, un_ctype, "un_ctype");
30464 30460 SD_CONDSET(un, un, un_reset_retry_count,
30465 30461 "un_reset_retry_count");
30466 30462 SD_CONDSET(un, un, un_reservation_type, "un_reservation_type");
30467 30463 SD_CONDSET(un, un, un_resvd_status, "un_resvd_status");
30468 30464 SD_CONDSET(un, un, un_f_arq_enabled, "un_f_arq_enabled");
30469 30465 SD_CONDSET(un, un, un_f_allow_bus_device_reset,
30470 30466 "un_f_allow_bus_device_reset");
30471 30467 SD_CONDSET(un, un, un_f_opt_queueing, "un_f_opt_queueing");
30472 30468
30473 30469 }
30474 30470
30475 30471 /* copy in auto request sense if it was on fifo */
30476 30472 if (fi_arq != NULL) {
30477 30473 bcopy(fi_arq, pktp->pkt_scbp, sizeof (struct sd_fi_arq));
30478 30474 }
30479 30475
30480 30476 /* free structs */
30481 30477 if (un->sd_fi_fifo_pkt[i] != NULL) {
30482 30478 kmem_free(un->sd_fi_fifo_pkt[i], sizeof (struct sd_fi_pkt));
30483 30479 }
30484 30480 if (un->sd_fi_fifo_xb[i] != NULL) {
30485 30481 kmem_free(un->sd_fi_fifo_xb[i], sizeof (struct sd_fi_xb));
30486 30482 }
30487 30483 if (un->sd_fi_fifo_un[i] != NULL) {
30488 30484 kmem_free(un->sd_fi_fifo_un[i], sizeof (struct sd_fi_un));
30489 30485 }
30490 30486 if (un->sd_fi_fifo_arq[i] != NULL) {
30491 30487 kmem_free(un->sd_fi_fifo_arq[i], sizeof (struct sd_fi_arq));
30492 30488 }
30493 30489
30494 30490 /*
30495 30491 * kmem_free does not gurantee to set to NULL
30496 30492 * since we uses these to determine if we set
30497 30493 * values or not lets confirm they are always
30498 30494 * NULL after free
30499 30495 */
30500 30496 un->sd_fi_fifo_pkt[i] = NULL;
30501 30497 un->sd_fi_fifo_un[i] = NULL;
30502 30498 un->sd_fi_fifo_xb[i] = NULL;
30503 30499 un->sd_fi_fifo_arq[i] = NULL;
30504 30500
30505 30501 un->sd_fi_fifo_start++;
30506 30502
30507 30503 mutex_exit(SD_MUTEX(un));
30508 30504
30509 30505 SD_INFO(SD_LOG_SDTEST, un, "sd_faultinjection: exit\n");
30510 30506 }
30511 30507
30512 30508 #endif /* SD_FAULT_INJECTION */
30513 30509
30514 30510 /*
30515 30511 * This routine is invoked in sd_unit_attach(). Before calling it, the
30516 30512 * properties in conf file should be processed already, and "hotpluggable"
30517 30513 * property was processed also.
30518 30514 *
30519 30515 * The sd driver distinguishes 3 different type of devices: removable media,
30520 30516 * non-removable media, and hotpluggable. Below the differences are defined:
30521 30517 *
30522 30518 * 1. Device ID
30523 30519 *
30524 30520 * The device ID of a device is used to identify this device. Refer to
30525 30521 * ddi_devid_register(9F).
30526 30522 *
30527 30523 * For a non-removable media disk device which can provide 0x80 or 0x83
30528 30524 * VPD page (refer to INQUIRY command of SCSI SPC specification), a unique
30529 30525 * device ID is created to identify this device. For other non-removable
30530 30526 * media devices, a default device ID is created only if this device has
30531 30527 * at least 2 alter cylinders. Otherwise, this device has no devid.
30532 30528 *
30533 30529 * -------------------------------------------------------
30534 30530 * removable media hotpluggable | Can Have Device ID
30535 30531 * -------------------------------------------------------
30536 30532 * false false | Yes
30537 30533 * false true | Yes
30538 30534 * true x | No
30539 30535 * ------------------------------------------------------
30540 30536 *
30541 30537 *
30542 30538 * 2. SCSI group 4 commands
30543 30539 *
30544 30540 * In SCSI specs, only some commands in group 4 command set can use
30545 30541 * 8-byte addresses that can be used to access >2TB storage spaces.
30546 30542 * Other commands have no such capability. Without supporting group4,
30547 30543 * it is impossible to make full use of storage spaces of a disk with
30548 30544 * capacity larger than 2TB.
30549 30545 *
30550 30546 * -----------------------------------------------
30551 30547 * removable media hotpluggable LP64 | Group
30552 30548 * -----------------------------------------------
30553 30549 * false false false | 1
30554 30550 * false false true | 4
30555 30551 * false true false | 1
30556 30552 * false true true | 4
30557 30553 * true x x | 5
30558 30554 * -----------------------------------------------
30559 30555 *
30560 30556 *
30561 30557 * 3. Check for VTOC Label
30562 30558 *
30563 30559 * If a direct-access disk has no EFI label, sd will check if it has a
30564 30560 * valid VTOC label. Now, sd also does that check for removable media
30565 30561 * and hotpluggable devices.
30566 30562 *
30567 30563 * --------------------------------------------------------------
30568 30564 * Direct-Access removable media hotpluggable | Check Label
30569 30565 * -------------------------------------------------------------
30570 30566 * false false false | No
30571 30567 * false false true | No
30572 30568 * false true false | Yes
30573 30569 * false true true | Yes
30574 30570 * true x x | Yes
30575 30571 * --------------------------------------------------------------
30576 30572 *
30577 30573 *
30578 30574 * 4. Building default VTOC label
30579 30575 *
30580 30576 * As section 3 says, sd checks if some kinds of devices have VTOC label.
30581 30577 * If those devices have no valid VTOC label, sd(7d) will attempt to
30582 30578 * create default VTOC for them. Currently sd creates default VTOC label
30583 30579 * for all devices on x86 platform (VTOC_16), but only for removable
30584 30580 * media devices on SPARC (VTOC_8).
30585 30581 *
30586 30582 * -----------------------------------------------------------
30587 30583 * removable media hotpluggable platform | Default Label
30588 30584 * -----------------------------------------------------------
30589 30585 * false false sparc | No
30590 30586 * false true x86 | Yes
30591 30587 * false true sparc | Yes
30592 30588 * true x x | Yes
30593 30589 * ----------------------------------------------------------
30594 30590 *
30595 30591 *
30596 30592 * 5. Supported blocksizes of target devices
30597 30593 *
30598 30594 * Sd supports non-512-byte blocksize for removable media devices only.
30599 30595 * For other devices, only 512-byte blocksize is supported. This may be
30600 30596 * changed in near future because some RAID devices require non-512-byte
30601 30597 * blocksize
30602 30598 *
30603 30599 * -----------------------------------------------------------
30604 30600 * removable media hotpluggable | non-512-byte blocksize
30605 30601 * -----------------------------------------------------------
30606 30602 * false false | No
30607 30603 * false true | No
30608 30604 * true x | Yes
30609 30605 * -----------------------------------------------------------
30610 30606 *
30611 30607 *
30612 30608 * 6. Automatic mount & unmount
30613 30609 *
30614 30610 * Sd(7d) driver provides DKIOCREMOVABLE ioctl. This ioctl is used to query
30615 30611 * if a device is removable media device. It return 1 for removable media
30616 30612 * devices, and 0 for others.
30617 30613 *
30618 30614 * The automatic mounting subsystem should distinguish between the types
30619 30615 * of devices and apply automounting policies to each.
30620 30616 *
30621 30617 *
30622 30618 * 7. fdisk partition management
30623 30619 *
30624 30620 * Fdisk is traditional partition method on x86 platform. Sd(7d) driver
30625 30621 * just supports fdisk partitions on x86 platform. On sparc platform, sd
30626 30622 * doesn't support fdisk partitions at all. Note: pcfs(7fs) can recognize
30627 30623 * fdisk partitions on both x86 and SPARC platform.
30628 30624 *
30629 30625 * -----------------------------------------------------------
30630 30626 * platform removable media USB/1394 | fdisk supported
30631 30627 * -----------------------------------------------------------
30632 30628 * x86 X X | true
30633 30629 * ------------------------------------------------------------
30634 30630 * sparc X X | false
30635 30631 * ------------------------------------------------------------
30636 30632 *
30637 30633 *
30638 30634 * 8. MBOOT/MBR
30639 30635 *
30640 30636 * Although sd(7d) doesn't support fdisk on SPARC platform, it does support
30641 30637 * read/write mboot for removable media devices on sparc platform.
30642 30638 *
30643 30639 * -----------------------------------------------------------
30644 30640 * platform removable media USB/1394 | mboot supported
30645 30641 * -----------------------------------------------------------
30646 30642 * x86 X X | true
30647 30643 * ------------------------------------------------------------
30648 30644 * sparc false false | false
30649 30645 * sparc false true | true
30650 30646 * sparc true false | true
30651 30647 * sparc true true | true
30652 30648 * ------------------------------------------------------------
30653 30649 *
30654 30650 *
30655 30651 * 9. error handling during opening device
30656 30652 *
30657 30653 * If failed to open a disk device, an errno is returned. For some kinds
30658 30654 * of errors, different errno is returned depending on if this device is
30659 30655 * a removable media device. This brings USB/1394 hard disks in line with
30660 30656 * expected hard disk behavior. It is not expected that this breaks any
30661 30657 * application.
30662 30658 *
30663 30659 * ------------------------------------------------------
30664 30660 * removable media hotpluggable | errno
30665 30661 * ------------------------------------------------------
30666 30662 * false false | EIO
30667 30663 * false true | EIO
30668 30664 * true x | ENXIO
30669 30665 * ------------------------------------------------------
30670 30666 *
30671 30667 *
30672 30668 * 11. ioctls: DKIOCEJECT, CDROMEJECT
30673 30669 *
30674 30670 * These IOCTLs are applicable only to removable media devices.
30675 30671 *
30676 30672 * -----------------------------------------------------------
30677 30673 * removable media hotpluggable |DKIOCEJECT, CDROMEJECT
30678 30674 * -----------------------------------------------------------
30679 30675 * false false | No
30680 30676 * false true | No
30681 30677 * true x | Yes
30682 30678 * -----------------------------------------------------------
30683 30679 *
30684 30680 *
30685 30681 * 12. Kstats for partitions
30686 30682 *
30687 30683 * sd creates partition kstat for non-removable media devices. USB and
30688 30684 * Firewire hard disks now have partition kstats
30689 30685 *
30690 30686 * ------------------------------------------------------
30691 30687 * removable media hotpluggable | kstat
30692 30688 * ------------------------------------------------------
30693 30689 * false false | Yes
30694 30690 * false true | Yes
30695 30691 * true x | No
30696 30692 * ------------------------------------------------------
30697 30693 *
30698 30694 *
30699 30695 * 13. Removable media & hotpluggable properties
30700 30696 *
30701 30697 * Sd driver creates a "removable-media" property for removable media
30702 30698 * devices. Parent nexus drivers create a "hotpluggable" property if
30703 30699 * it supports hotplugging.
30704 30700 *
30705 30701 * ---------------------------------------------------------------------
30706 30702 * removable media hotpluggable | "removable-media" " hotpluggable"
30707 30703 * ---------------------------------------------------------------------
30708 30704 * false false | No No
30709 30705 * false true | No Yes
30710 30706 * true false | Yes No
30711 30707 * true true | Yes Yes
30712 30708 * ---------------------------------------------------------------------
30713 30709 *
30714 30710 *
30715 30711 * 14. Power Management
30716 30712 *
30717 30713 * sd only power manages removable media devices or devices that support
30718 30714 * LOG_SENSE or have a "pm-capable" property (PSARC/2002/250)
30719 30715 *
30720 30716 * A parent nexus that supports hotplugging can also set "pm-capable"
30721 30717 * if the disk can be power managed.
30722 30718 *
30723 30719 * ------------------------------------------------------------
30724 30720 * removable media hotpluggable pm-capable | power manage
30725 30721 * ------------------------------------------------------------
30726 30722 * false false false | No
30727 30723 * false false true | Yes
30728 30724 * false true false | No
30729 30725 * false true true | Yes
30730 30726 * true x x | Yes
30731 30727 * ------------------------------------------------------------
30732 30728 *
30733 30729 * USB and firewire hard disks can now be power managed independently
30734 30730 * of the framebuffer
30735 30731 *
30736 30732 *
30737 30733 * 15. Support for USB disks with capacity larger than 1TB
30738 30734 *
30739 30735 * Currently, sd doesn't permit a fixed disk device with capacity
30740 30736 * larger than 1TB to be used in a 32-bit operating system environment.
30741 30737 * However, sd doesn't do that for removable media devices. Instead, it
30742 30738 * assumes that removable media devices cannot have a capacity larger
30743 30739 * than 1TB. Therefore, using those devices on 32-bit system is partially
30744 30740 * supported, which can cause some unexpected results.
30745 30741 *
30746 30742 * ---------------------------------------------------------------------
30747 30743 * removable media USB/1394 | Capacity > 1TB | Used in 32-bit env
30748 30744 * ---------------------------------------------------------------------
30749 30745 * false false | true | no
30750 30746 * false true | true | no
30751 30747 * true false | true | Yes
30752 30748 * true true | true | Yes
30753 30749 * ---------------------------------------------------------------------
30754 30750 *
30755 30751 *
30756 30752 * 16. Check write-protection at open time
30757 30753 *
30758 30754 * When a removable media device is being opened for writing without NDELAY
30759 30755 * flag, sd will check if this device is writable. If attempting to open
30760 30756 * without NDELAY flag a write-protected device, this operation will abort.
30761 30757 *
30762 30758 * ------------------------------------------------------------
30763 30759 * removable media USB/1394 | WP Check
30764 30760 * ------------------------------------------------------------
30765 30761 * false false | No
30766 30762 * false true | No
30767 30763 * true false | Yes
30768 30764 * true true | Yes
30769 30765 * ------------------------------------------------------------
30770 30766 *
30771 30767 *
30772 30768 * 17. syslog when corrupted VTOC is encountered
30773 30769 *
30774 30770 * Currently, if an invalid VTOC is encountered, sd only print syslog
30775 30771 * for fixed SCSI disks.
30776 30772 * ------------------------------------------------------------
30777 30773 * removable media USB/1394 | print syslog
30778 30774 * ------------------------------------------------------------
30779 30775 * false false | Yes
30780 30776 * false true | No
30781 30777 * true false | No
30782 30778 * true true | No
30783 30779 * ------------------------------------------------------------
30784 30780 */
30785 30781 static void
30786 30782 sd_set_unit_attributes(struct sd_lun *un, dev_info_t *devi)
30787 30783 {
30788 30784 int pm_cap;
30789 30785
30790 30786 ASSERT(un->un_sd);
30791 30787 ASSERT(un->un_sd->sd_inq);
30792 30788
30793 30789 /*
30794 30790 * Enable SYNC CACHE support for all devices.
30795 30791 */
30796 30792 un->un_f_sync_cache_supported = TRUE;
30797 30793
30798 30794 /*
30799 30795 * Set the sync cache required flag to false.
30800 30796 * This would ensure that there is no SYNC CACHE
30801 30797 * sent when there are no writes
30802 30798 */
30803 30799 un->un_f_sync_cache_required = FALSE;
30804 30800
30805 30801 if (un->un_sd->sd_inq->inq_rmb) {
30806 30802 /*
30807 30803 * The media of this device is removable. And for this kind
30808 30804 * of devices, it is possible to change medium after opening
30809 30805 * devices. Thus we should support this operation.
30810 30806 */
30811 30807 un->un_f_has_removable_media = TRUE;
30812 30808
30813 30809 /*
30814 30810 * support non-512-byte blocksize of removable media devices
30815 30811 */
30816 30812 un->un_f_non_devbsize_supported = TRUE;
30817 30813
30818 30814 /*
30819 30815 * Assume that all removable media devices support DOOR_LOCK
30820 30816 */
30821 30817 un->un_f_doorlock_supported = TRUE;
30822 30818
30823 30819 /*
30824 30820 * For a removable media device, it is possible to be opened
30825 30821 * with NDELAY flag when there is no media in drive, in this
30826 30822 * case we don't care if device is writable. But if without
30827 30823 * NDELAY flag, we need to check if media is write-protected.
30828 30824 */
30829 30825 un->un_f_chk_wp_open = TRUE;
30830 30826
30831 30827 /*
30832 30828 * need to start a SCSI watch thread to monitor media state,
30833 30829 * when media is being inserted or ejected, notify syseventd.
30834 30830 */
30835 30831 un->un_f_monitor_media_state = TRUE;
30836 30832
30837 30833 /*
30838 30834 * Some devices don't support START_STOP_UNIT command.
30839 30835 * Therefore, we'd better check if a device supports it
30840 30836 * before sending it.
30841 30837 */
30842 30838 un->un_f_check_start_stop = TRUE;
30843 30839
30844 30840 /*
30845 30841 * support eject media ioctl:
30846 30842 * FDEJECT, DKIOCEJECT, CDROMEJECT
30847 30843 */
30848 30844 un->un_f_eject_media_supported = TRUE;
30849 30845
30850 30846 /*
30851 30847 * Because many removable-media devices don't support
30852 30848 * LOG_SENSE, we couldn't use this command to check if
30853 30849 * a removable media device support power-management.
30854 30850 * We assume that they support power-management via
30855 30851 * START_STOP_UNIT command and can be spun up and down
30856 30852 * without limitations.
30857 30853 */
30858 30854 un->un_f_pm_supported = TRUE;
30859 30855
30860 30856 /*
30861 30857 * Need to create a zero length (Boolean) property
30862 30858 * removable-media for the removable media devices.
30863 30859 * Note that the return value of the property is not being
30864 30860 * checked, since if unable to create the property
30865 30861 * then do not want the attach to fail altogether. Consistent
30866 30862 * with other property creation in attach.
30867 30863 */
30868 30864 (void) ddi_prop_create(DDI_DEV_T_NONE, devi,
30869 30865 DDI_PROP_CANSLEEP, "removable-media", NULL, 0);
30870 30866
30871 30867 } else {
30872 30868 /*
30873 30869 * create device ID for device
30874 30870 */
30875 30871 un->un_f_devid_supported = TRUE;
30876 30872
30877 30873 /*
30878 30874 * Spin up non-removable-media devices once it is attached
30879 30875 */
30880 30876 un->un_f_attach_spinup = TRUE;
30881 30877
30882 30878 /*
30883 30879 * According to SCSI specification, Sense data has two kinds of
30884 30880 * format: fixed format, and descriptor format. At present, we
30885 30881 * don't support descriptor format sense data for removable
30886 30882 * media.
30887 30883 */
30888 30884 if (SD_INQUIRY(un)->inq_dtype == DTYPE_DIRECT) {
30889 30885 un->un_f_descr_format_supported = TRUE;
30890 30886 }
30891 30887
30892 30888 /*
30893 30889 * kstats are created only for non-removable media devices.
30894 30890 *
30895 30891 * Set this in sd.conf to 0 in order to disable kstats. The
30896 30892 * default is 1, so they are enabled by default.
30897 30893 */
30898 30894 un->un_f_pkstats_enabled = (ddi_prop_get_int(DDI_DEV_T_ANY,
30899 30895 SD_DEVINFO(un), DDI_PROP_DONTPASS,
30900 30896 "enable-partition-kstats", 1));
30901 30897
30902 30898 /*
30903 30899 * Check if HBA has set the "pm-capable" property.
30904 30900 * If "pm-capable" exists and is non-zero then we can
30905 30901 * power manage the device without checking the start/stop
30906 30902 * cycle count log sense page.
30907 30903 *
30908 30904 * If "pm-capable" exists and is set to be false (0),
30909 30905 * then we should not power manage the device.
30910 30906 *
30911 30907 * If "pm-capable" doesn't exist then pm_cap will
30912 30908 * be set to SD_PM_CAPABLE_UNDEFINED (-1). In this case,
30913 30909 * sd will check the start/stop cycle count log sense page
30914 30910 * and power manage the device if the cycle count limit has
30915 30911 * not been exceeded.
30916 30912 */
30917 30913 pm_cap = ddi_prop_get_int(DDI_DEV_T_ANY, devi,
30918 30914 DDI_PROP_DONTPASS, "pm-capable", SD_PM_CAPABLE_UNDEFINED);
30919 30915 if (SD_PM_CAPABLE_IS_UNDEFINED(pm_cap)) {
30920 30916 un->un_f_log_sense_supported = TRUE;
30921 30917 if (!un->un_f_power_condition_disabled &&
30922 30918 SD_INQUIRY(un)->inq_ansi == 6) {
30923 30919 un->un_f_power_condition_supported = TRUE;
30924 30920 }
30925 30921 } else {
30926 30922 /*
30927 30923 * pm-capable property exists.
30928 30924 *
30929 30925 * Convert "TRUE" values for pm_cap to
30930 30926 * SD_PM_CAPABLE_IS_TRUE to make it easier to check
30931 30927 * later. "TRUE" values are any values defined in
30932 30928 * inquiry.h.
30933 30929 */
30934 30930 if (SD_PM_CAPABLE_IS_FALSE(pm_cap)) {
30935 30931 un->un_f_log_sense_supported = FALSE;
30936 30932 } else {
30937 30933 /* SD_PM_CAPABLE_IS_TRUE case */
30938 30934 un->un_f_pm_supported = TRUE;
30939 30935 if (!un->un_f_power_condition_disabled &&
30940 30936 SD_PM_CAPABLE_IS_SPC_4(pm_cap)) {
30941 30937 un->un_f_power_condition_supported =
30942 30938 TRUE;
30943 30939 }
30944 30940 if (SD_PM_CAP_LOG_SUPPORTED(pm_cap)) {
30945 30941 un->un_f_log_sense_supported = TRUE;
30946 30942 un->un_f_pm_log_sense_smart =
30947 30943 SD_PM_CAP_SMART_LOG(pm_cap);
30948 30944 }
30949 30945 }
30950 30946
30951 30947 SD_INFO(SD_LOG_ATTACH_DETACH, un,
30952 30948 "sd_unit_attach: un:0x%p pm-capable "
30953 30949 "property set to %d.\n", un, un->un_f_pm_supported);
30954 30950 }
30955 30951 }
30956 30952
30957 30953 if (un->un_f_is_hotpluggable) {
30958 30954
30959 30955 /*
30960 30956 * Have to watch hotpluggable devices as well, since
30961 30957 * that's the only way for userland applications to
30962 30958 * detect hot removal while device is busy/mounted.
30963 30959 */
30964 30960 un->un_f_monitor_media_state = TRUE;
30965 30961
30966 30962 un->un_f_check_start_stop = TRUE;
30967 30963
30968 30964 }
30969 30965 }
30970 30966
30971 30967 /*
30972 30968 * sd_tg_rdwr:
30973 30969 * Provides rdwr access for cmlb via sd_tgops. The start_block is
30974 30970 * in sys block size, req_length in bytes.
30975 30971 *
30976 30972 */
30977 30973 static int
30978 30974 sd_tg_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
30979 30975 diskaddr_t start_block, size_t reqlength, void *tg_cookie)
30980 30976 {
30981 30977 struct sd_lun *un;
30982 30978 int path_flag = (int)(uintptr_t)tg_cookie;
30983 30979 char *dkl = NULL;
30984 30980 diskaddr_t real_addr = start_block;
30985 30981 diskaddr_t first_byte, end_block;
30986 30982
30987 30983 size_t buffer_size = reqlength;
30988 30984 int rval = 0;
30989 30985 diskaddr_t cap;
30990 30986 uint32_t lbasize;
30991 30987 sd_ssc_t *ssc;
30992 30988
30993 30989 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
30994 30990 if (un == NULL)
30995 30991 return (ENXIO);
30996 30992
30997 30993 if (cmd != TG_READ && cmd != TG_WRITE)
30998 30994 return (EINVAL);
30999 30995
31000 30996 ssc = sd_ssc_init(un);
31001 30997 mutex_enter(SD_MUTEX(un));
31002 30998 if (un->un_f_tgt_blocksize_is_valid == FALSE) {
31003 30999 mutex_exit(SD_MUTEX(un));
31004 31000 rval = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap,
31005 31001 &lbasize, path_flag);
31006 31002 if (rval != 0)
31007 31003 goto done1;
31008 31004 mutex_enter(SD_MUTEX(un));
31009 31005 sd_update_block_info(un, lbasize, cap);
31010 31006 if ((un->un_f_tgt_blocksize_is_valid == FALSE)) {
31011 31007 mutex_exit(SD_MUTEX(un));
31012 31008 rval = EIO;
31013 31009 goto done;
31014 31010 }
31015 31011 }
31016 31012
31017 31013 if (NOT_DEVBSIZE(un)) {
31018 31014 /*
31019 31015 * sys_blocksize != tgt_blocksize, need to re-adjust
31020 31016 * blkno and save the index to beginning of dk_label
31021 31017 */
31022 31018 first_byte = SD_SYSBLOCKS2BYTES(start_block);
31023 31019 real_addr = first_byte / un->un_tgt_blocksize;
31024 31020
31025 31021 end_block = (first_byte + reqlength +
31026 31022 un->un_tgt_blocksize - 1) / un->un_tgt_blocksize;
31027 31023
31028 31024 /* round up buffer size to multiple of target block size */
31029 31025 buffer_size = (end_block - real_addr) * un->un_tgt_blocksize;
31030 31026
31031 31027 SD_TRACE(SD_LOG_IO_PARTITION, un, "sd_tg_rdwr",
31032 31028 "label_addr: 0x%x allocation size: 0x%x\n",
31033 31029 real_addr, buffer_size);
31034 31030
31035 31031 if (((first_byte % un->un_tgt_blocksize) != 0) ||
31036 31032 (reqlength % un->un_tgt_blocksize) != 0)
31037 31033 /* the request is not aligned */
31038 31034 dkl = kmem_zalloc(buffer_size, KM_SLEEP);
31039 31035 }
31040 31036
31041 31037 /*
31042 31038 * The MMC standard allows READ CAPACITY to be
31043 31039 * inaccurate by a bounded amount (in the interest of
31044 31040 * response latency). As a result, failed READs are
31045 31041 * commonplace (due to the reading of metadata and not
31046 31042 * data). Depending on the per-Vendor/drive Sense data,
31047 31043 * the failed READ can cause many (unnecessary) retries.
31048 31044 */
31049 31045
31050 31046 if (ISCD(un) && (cmd == TG_READ) &&
31051 31047 (un->un_f_blockcount_is_valid == TRUE) &&
31052 31048 ((start_block == (un->un_blockcount - 1))||
31053 31049 (start_block == (un->un_blockcount - 2)))) {
31054 31050 path_flag = SD_PATH_DIRECT_PRIORITY;
31055 31051 }
31056 31052
31057 31053 mutex_exit(SD_MUTEX(un));
31058 31054 if (cmd == TG_READ) {
31059 31055 rval = sd_send_scsi_READ(ssc, (dkl != NULL)? dkl: bufaddr,
31060 31056 buffer_size, real_addr, path_flag);
31061 31057 if (dkl != NULL)
31062 31058 bcopy(dkl + SD_TGTBYTEOFFSET(un, start_block,
31063 31059 real_addr), bufaddr, reqlength);
31064 31060 } else {
31065 31061 if (dkl) {
31066 31062 rval = sd_send_scsi_READ(ssc, dkl, buffer_size,
31067 31063 real_addr, path_flag);
31068 31064 if (rval) {
31069 31065 goto done1;
31070 31066 }
31071 31067 bcopy(bufaddr, dkl + SD_TGTBYTEOFFSET(un, start_block,
31072 31068 real_addr), reqlength);
31073 31069 }
31074 31070 rval = sd_send_scsi_WRITE(ssc, (dkl != NULL)? dkl: bufaddr,
31075 31071 buffer_size, real_addr, path_flag);
31076 31072 }
31077 31073
31078 31074 done1:
31079 31075 if (dkl != NULL)
31080 31076 kmem_free(dkl, buffer_size);
31081 31077
31082 31078 if (rval != 0) {
31083 31079 if (rval == EIO)
31084 31080 sd_ssc_assessment(ssc, SD_FMT_STATUS_CHECK);
31085 31081 else
31086 31082 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
31087 31083 }
31088 31084 done:
31089 31085 sd_ssc_fini(ssc);
31090 31086 return (rval);
31091 31087 }
31092 31088
31093 31089
31094 31090 static int
31095 31091 sd_tg_getinfo(dev_info_t *devi, int cmd, void *arg, void *tg_cookie)
31096 31092 {
31097 31093
31098 31094 struct sd_lun *un;
31099 31095 diskaddr_t cap;
31100 31096 uint32_t lbasize;
31101 31097 int path_flag = (int)(uintptr_t)tg_cookie;
31102 31098 int ret = 0;
31103 31099
31104 31100 un = ddi_get_soft_state(sd_state, ddi_get_instance(devi));
31105 31101 if (un == NULL)
31106 31102 return (ENXIO);
31107 31103
31108 31104 switch (cmd) {
31109 31105 case TG_GETPHYGEOM:
31110 31106 case TG_GETVIRTGEOM:
31111 31107 case TG_GETCAPACITY:
31112 31108 case TG_GETBLOCKSIZE:
31113 31109 mutex_enter(SD_MUTEX(un));
31114 31110
31115 31111 if ((un->un_f_blockcount_is_valid == TRUE) &&
31116 31112 (un->un_f_tgt_blocksize_is_valid == TRUE)) {
31117 31113 cap = un->un_blockcount;
31118 31114 lbasize = un->un_tgt_blocksize;
31119 31115 mutex_exit(SD_MUTEX(un));
31120 31116 } else {
31121 31117 sd_ssc_t *ssc;
31122 31118 mutex_exit(SD_MUTEX(un));
31123 31119 ssc = sd_ssc_init(un);
31124 31120 ret = sd_send_scsi_READ_CAPACITY(ssc, (uint64_t *)&cap,
31125 31121 &lbasize, path_flag);
31126 31122 if (ret != 0) {
31127 31123 if (ret == EIO)
31128 31124 sd_ssc_assessment(ssc,
31129 31125 SD_FMT_STATUS_CHECK);
31130 31126 else
31131 31127 sd_ssc_assessment(ssc,
31132 31128 SD_FMT_IGNORE);
31133 31129 sd_ssc_fini(ssc);
31134 31130 return (ret);
31135 31131 }
31136 31132 sd_ssc_fini(ssc);
31137 31133 mutex_enter(SD_MUTEX(un));
31138 31134 sd_update_block_info(un, lbasize, cap);
31139 31135 if ((un->un_f_blockcount_is_valid == FALSE) ||
31140 31136 (un->un_f_tgt_blocksize_is_valid == FALSE)) {
31141 31137 mutex_exit(SD_MUTEX(un));
31142 31138 return (EIO);
31143 31139 }
31144 31140 mutex_exit(SD_MUTEX(un));
31145 31141 }
31146 31142
31147 31143 if (cmd == TG_GETCAPACITY) {
31148 31144 *(diskaddr_t *)arg = cap;
31149 31145 return (0);
31150 31146 }
31151 31147
31152 31148 if (cmd == TG_GETBLOCKSIZE) {
31153 31149 *(uint32_t *)arg = lbasize;
31154 31150 return (0);
31155 31151 }
31156 31152
31157 31153 if (cmd == TG_GETPHYGEOM)
31158 31154 ret = sd_get_physical_geometry(un, (cmlb_geom_t *)arg,
31159 31155 cap, lbasize, path_flag);
31160 31156 else
31161 31157 /* TG_GETVIRTGEOM */
31162 31158 ret = sd_get_virtual_geometry(un,
31163 31159 (cmlb_geom_t *)arg, cap, lbasize);
31164 31160
31165 31161 return (ret);
31166 31162
31167 31163 case TG_GETATTR:
31168 31164 mutex_enter(SD_MUTEX(un));
31169 31165 ((tg_attribute_t *)arg)->media_is_writable =
31170 31166 un->un_f_mmc_writable_media;
31171 31167 ((tg_attribute_t *)arg)->media_is_solid_state =
31172 31168 un->un_f_is_solid_state;
31173 31169 mutex_exit(SD_MUTEX(un));
31174 31170 return (0);
31175 31171 default:
31176 31172 return (ENOTTY);
31177 31173
31178 31174 }
31179 31175 }
31180 31176
31181 31177 /*
31182 31178 * Function: sd_ssc_ereport_post
31183 31179 *
31184 31180 * Description: Will be called when SD driver need to post an ereport.
31185 31181 *
31186 31182 * Context: Kernel thread or interrupt context.
31187 31183 */
31188 31184
31189 31185 #define DEVID_IF_KNOWN(d) "devid", DATA_TYPE_STRING, (d) ? (d) : "unknown"
31190 31186
31191 31187 static void
31192 31188 sd_ssc_ereport_post(sd_ssc_t *ssc, enum sd_driver_assessment drv_assess)
31193 31189 {
31194 31190 int uscsi_path_instance = 0;
31195 31191 uchar_t uscsi_pkt_reason;
31196 31192 uint32_t uscsi_pkt_state;
31197 31193 uint32_t uscsi_pkt_statistics;
31198 31194 uint64_t uscsi_ena;
31199 31195 uchar_t op_code;
31200 31196 uint8_t *sensep;
31201 31197 union scsi_cdb *cdbp;
31202 31198 uint_t cdblen = 0;
31203 31199 uint_t senlen = 0;
31204 31200 struct sd_lun *un;
31205 31201 dev_info_t *dip;
31206 31202 char *devid;
31207 31203 int ssc_invalid_flags = SSC_FLAGS_INVALID_PKT_REASON |
31208 31204 SSC_FLAGS_INVALID_STATUS |
31209 31205 SSC_FLAGS_INVALID_SENSE |
31210 31206 SSC_FLAGS_INVALID_DATA;
31211 31207 char assessment[16];
31212 31208
31213 31209 ASSERT(ssc != NULL);
31214 31210 ASSERT(ssc->ssc_uscsi_cmd != NULL);
31215 31211 ASSERT(ssc->ssc_uscsi_info != NULL);
31216 31212
31217 31213 un = ssc->ssc_un;
31218 31214 ASSERT(un != NULL);
31219 31215
31220 31216 dip = un->un_sd->sd_dev;
31221 31217
31222 31218 /*
31223 31219 * Get the devid:
31224 31220 * devid will only be passed to non-transport error reports.
31225 31221 */
31226 31222 devid = DEVI(dip)->devi_devid_str;
31227 31223
31228 31224 /*
31229 31225 * If we are syncing or dumping, the command will not be executed
31230 31226 * so we bypass this situation.
31231 31227 */
31232 31228 if (ddi_in_panic() || (un->un_state == SD_STATE_SUSPENDED) ||
31233 31229 (un->un_state == SD_STATE_DUMPING))
31234 31230 return;
31235 31231
31236 31232 uscsi_pkt_reason = ssc->ssc_uscsi_info->ui_pkt_reason;
31237 31233 uscsi_path_instance = ssc->ssc_uscsi_cmd->uscsi_path_instance;
31238 31234 uscsi_pkt_state = ssc->ssc_uscsi_info->ui_pkt_state;
31239 31235 uscsi_pkt_statistics = ssc->ssc_uscsi_info->ui_pkt_statistics;
31240 31236 uscsi_ena = ssc->ssc_uscsi_info->ui_ena;
31241 31237
31242 31238 sensep = (uint8_t *)ssc->ssc_uscsi_cmd->uscsi_rqbuf;
31243 31239 cdbp = (union scsi_cdb *)ssc->ssc_uscsi_cmd->uscsi_cdb;
31244 31240
31245 31241 /* In rare cases, EG:DOORLOCK, the cdb could be NULL */
31246 31242 if (cdbp == NULL) {
31247 31243 scsi_log(SD_DEVINFO(un), sd_label, CE_WARN,
31248 31244 "sd_ssc_ereport_post meet empty cdb\n");
31249 31245 return;
31250 31246 }
31251 31247
31252 31248 op_code = cdbp->scc_cmd;
31253 31249
31254 31250 cdblen = (int)ssc->ssc_uscsi_cmd->uscsi_cdblen;
31255 31251 senlen = (int)(ssc->ssc_uscsi_cmd->uscsi_rqlen -
31256 31252 ssc->ssc_uscsi_cmd->uscsi_rqresid);
31257 31253
31258 31254 if (senlen > 0)
31259 31255 ASSERT(sensep != NULL);
31260 31256
31261 31257 /*
31262 31258 * Initialize drv_assess to corresponding values.
31263 31259 * SD_FM_DRV_FATAL will be mapped to "fail" or "fatal" depending
31264 31260 * on the sense-key returned back.
31265 31261 */
31266 31262 switch (drv_assess) {
31267 31263 case SD_FM_DRV_RECOVERY:
31268 31264 (void) sprintf(assessment, "%s", "recovered");
31269 31265 break;
31270 31266 case SD_FM_DRV_RETRY:
31271 31267 (void) sprintf(assessment, "%s", "retry");
31272 31268 break;
31273 31269 case SD_FM_DRV_NOTICE:
31274 31270 (void) sprintf(assessment, "%s", "info");
31275 31271 break;
31276 31272 case SD_FM_DRV_FATAL:
31277 31273 default:
31278 31274 (void) sprintf(assessment, "%s", "unknown");
31279 31275 }
31280 31276 /*
31281 31277 * If drv_assess == SD_FM_DRV_RECOVERY, this should be a recovered
31282 31278 * command, we will post ereport.io.scsi.cmd.disk.recovered.
31283 31279 * driver-assessment will always be "recovered" here.
31284 31280 */
31285 31281 if (drv_assess == SD_FM_DRV_RECOVERY) {
31286 31282 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL,
31287 31283 "cmd.disk.recovered", uscsi_ena, devid, NULL,
31288 31284 DDI_NOSLEEP, NULL,
31289 31285 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31290 31286 DEVID_IF_KNOWN(devid),
31291 31287 "driver-assessment", DATA_TYPE_STRING, assessment,
31292 31288 "op-code", DATA_TYPE_UINT8, op_code,
31293 31289 "cdb", DATA_TYPE_UINT8_ARRAY,
31294 31290 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31295 31291 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31296 31292 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state,
31297 31293 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics,
31298 31294 NULL);
31299 31295 return;
31300 31296 }
31301 31297
31302 31298 /*
31303 31299 * If there is un-expected/un-decodable data, we should post
31304 31300 * ereport.io.scsi.cmd.disk.dev.uderr.
31305 31301 * driver-assessment will be set based on parameter drv_assess.
31306 31302 * SSC_FLAGS_INVALID_SENSE - invalid sense data sent back.
31307 31303 * SSC_FLAGS_INVALID_PKT_REASON - invalid pkt-reason encountered.
31308 31304 * SSC_FLAGS_INVALID_STATUS - invalid stat-code encountered.
31309 31305 * SSC_FLAGS_INVALID_DATA - invalid data sent back.
31310 31306 */
31311 31307 if (ssc->ssc_flags & ssc_invalid_flags) {
31312 31308 if (ssc->ssc_flags & SSC_FLAGS_INVALID_SENSE) {
31313 31309 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
31314 31310 NULL, "cmd.disk.dev.uderr", uscsi_ena, devid,
31315 31311 NULL, DDI_NOSLEEP, NULL,
31316 31312 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31317 31313 DEVID_IF_KNOWN(devid),
31318 31314 "driver-assessment", DATA_TYPE_STRING,
31319 31315 drv_assess == SD_FM_DRV_FATAL ?
31320 31316 "fail" : assessment,
31321 31317 "op-code", DATA_TYPE_UINT8, op_code,
31322 31318 "cdb", DATA_TYPE_UINT8_ARRAY,
31323 31319 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31324 31320 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31325 31321 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state,
31326 31322 "pkt-stats", DATA_TYPE_UINT32,
31327 31323 uscsi_pkt_statistics,
31328 31324 "stat-code", DATA_TYPE_UINT8,
31329 31325 ssc->ssc_uscsi_cmd->uscsi_status,
31330 31326 "un-decode-info", DATA_TYPE_STRING,
31331 31327 ssc->ssc_info,
31332 31328 "un-decode-value", DATA_TYPE_UINT8_ARRAY,
31333 31329 senlen, sensep,
31334 31330 NULL);
31335 31331 } else {
31336 31332 /*
31337 31333 * For other type of invalid data, the
31338 31334 * un-decode-value field would be empty because the
31339 31335 * un-decodable content could be seen from upper
31340 31336 * level payload or inside un-decode-info.
31341 31337 */
31342 31338 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
31343 31339 NULL,
31344 31340 "cmd.disk.dev.uderr", uscsi_ena, devid,
31345 31341 NULL, DDI_NOSLEEP, NULL,
31346 31342 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31347 31343 DEVID_IF_KNOWN(devid),
31348 31344 "driver-assessment", DATA_TYPE_STRING,
31349 31345 drv_assess == SD_FM_DRV_FATAL ?
31350 31346 "fail" : assessment,
31351 31347 "op-code", DATA_TYPE_UINT8, op_code,
31352 31348 "cdb", DATA_TYPE_UINT8_ARRAY,
31353 31349 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31354 31350 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31355 31351 "pkt-state", DATA_TYPE_UINT32, uscsi_pkt_state,
31356 31352 "pkt-stats", DATA_TYPE_UINT32,
31357 31353 uscsi_pkt_statistics,
31358 31354 "stat-code", DATA_TYPE_UINT8,
31359 31355 ssc->ssc_uscsi_cmd->uscsi_status,
31360 31356 "un-decode-info", DATA_TYPE_STRING,
31361 31357 ssc->ssc_info,
31362 31358 "un-decode-value", DATA_TYPE_UINT8_ARRAY,
31363 31359 0, NULL,
31364 31360 NULL);
31365 31361 }
31366 31362 ssc->ssc_flags &= ~ssc_invalid_flags;
31367 31363 return;
31368 31364 }
31369 31365
31370 31366 if (uscsi_pkt_reason != CMD_CMPLT ||
31371 31367 (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)) {
31372 31368 /*
31373 31369 * pkt-reason != CMD_CMPLT or SSC_FLAGS_TRAN_ABORT was
31374 31370 * set inside sd_start_cmds due to errors(bad packet or
31375 31371 * fatal transport error), we should take it as a
31376 31372 * transport error, so we post ereport.io.scsi.cmd.disk.tran.
31377 31373 * driver-assessment will be set based on drv_assess.
31378 31374 * We will set devid to NULL because it is a transport
31379 31375 * error.
31380 31376 */
31381 31377 if (ssc->ssc_flags & SSC_FLAGS_TRAN_ABORT)
31382 31378 ssc->ssc_flags &= ~SSC_FLAGS_TRAN_ABORT;
31383 31379
31384 31380 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance, NULL,
31385 31381 "cmd.disk.tran", uscsi_ena, NULL, NULL, DDI_NOSLEEP, NULL,
31386 31382 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31387 31383 DEVID_IF_KNOWN(devid),
31388 31384 "driver-assessment", DATA_TYPE_STRING,
31389 31385 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment,
31390 31386 "op-code", DATA_TYPE_UINT8, op_code,
31391 31387 "cdb", DATA_TYPE_UINT8_ARRAY,
31392 31388 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31393 31389 "pkt-reason", DATA_TYPE_UINT8, uscsi_pkt_reason,
31394 31390 "pkt-state", DATA_TYPE_UINT8, uscsi_pkt_state,
31395 31391 "pkt-stats", DATA_TYPE_UINT32, uscsi_pkt_statistics,
31396 31392 NULL);
31397 31393 } else {
31398 31394 /*
31399 31395 * If we got here, we have a completed command, and we need
31400 31396 * to further investigate the sense data to see what kind
31401 31397 * of ereport we should post.
31402 31398 * Post ereport.io.scsi.cmd.disk.dev.rqs.merr
31403 31399 * if sense-key == 0x3.
31404 31400 * Post ereport.io.scsi.cmd.disk.dev.rqs.derr otherwise.
31405 31401 * driver-assessment will be set based on the parameter
31406 31402 * drv_assess.
31407 31403 */
31408 31404 if (senlen > 0) {
31409 31405 /*
31410 31406 * Here we have sense data available.
31411 31407 */
31412 31408 uint8_t sense_key;
31413 31409 sense_key = scsi_sense_key(sensep);
31414 31410 if (sense_key == 0x3) {
31415 31411 /*
31416 31412 * sense-key == 0x3(medium error),
31417 31413 * driver-assessment should be "fatal" if
31418 31414 * drv_assess is SD_FM_DRV_FATAL.
31419 31415 */
31420 31416 scsi_fm_ereport_post(un->un_sd,
31421 31417 uscsi_path_instance, NULL,
31422 31418 "cmd.disk.dev.rqs.merr",
31423 31419 uscsi_ena, devid, NULL, DDI_NOSLEEP, NULL,
31424 31420 FM_VERSION, DATA_TYPE_UINT8,
31425 31421 FM_EREPORT_VERS0,
31426 31422 DEVID_IF_KNOWN(devid),
31427 31423 "driver-assessment",
31428 31424 DATA_TYPE_STRING,
31429 31425 drv_assess == SD_FM_DRV_FATAL ?
31430 31426 "fatal" : assessment,
31431 31427 "op-code",
31432 31428 DATA_TYPE_UINT8, op_code,
31433 31429 "cdb",
31434 31430 DATA_TYPE_UINT8_ARRAY, cdblen,
31435 31431 ssc->ssc_uscsi_cmd->uscsi_cdb,
31436 31432 "pkt-reason",
31437 31433 DATA_TYPE_UINT8, uscsi_pkt_reason,
31438 31434 "pkt-state",
31439 31435 DATA_TYPE_UINT8, uscsi_pkt_state,
31440 31436 "pkt-stats",
31441 31437 DATA_TYPE_UINT32,
31442 31438 uscsi_pkt_statistics,
31443 31439 "stat-code",
31444 31440 DATA_TYPE_UINT8,
31445 31441 ssc->ssc_uscsi_cmd->uscsi_status,
31446 31442 "key",
31447 31443 DATA_TYPE_UINT8,
31448 31444 scsi_sense_key(sensep),
31449 31445 "asc",
31450 31446 DATA_TYPE_UINT8,
31451 31447 scsi_sense_asc(sensep),
31452 31448 "ascq",
31453 31449 DATA_TYPE_UINT8,
31454 31450 scsi_sense_ascq(sensep),
31455 31451 "sense-data",
31456 31452 DATA_TYPE_UINT8_ARRAY,
31457 31453 senlen, sensep,
31458 31454 "lba",
31459 31455 DATA_TYPE_UINT64,
31460 31456 ssc->ssc_uscsi_info->ui_lba,
31461 31457 NULL);
31462 31458 } else {
31463 31459 /*
31464 31460 * if sense-key == 0x4(hardware
31465 31461 * error), driver-assessment should
31466 31462 * be "fatal" if drv_assess is
31467 31463 * SD_FM_DRV_FATAL.
31468 31464 */
31469 31465 scsi_fm_ereport_post(un->un_sd,
31470 31466 uscsi_path_instance, NULL,
31471 31467 "cmd.disk.dev.rqs.derr",
31472 31468 uscsi_ena, devid,
31473 31469 NULL, DDI_NOSLEEP, NULL,
31474 31470 FM_VERSION,
31475 31471 DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31476 31472 DEVID_IF_KNOWN(devid),
31477 31473 "driver-assessment",
31478 31474 DATA_TYPE_STRING,
31479 31475 drv_assess == SD_FM_DRV_FATAL ?
31480 31476 (sense_key == 0x4 ?
31481 31477 "fatal" : "fail") : assessment,
31482 31478 "op-code",
31483 31479 DATA_TYPE_UINT8, op_code,
31484 31480 "cdb",
31485 31481 DATA_TYPE_UINT8_ARRAY, cdblen,
31486 31482 ssc->ssc_uscsi_cmd->uscsi_cdb,
31487 31483 "pkt-reason",
31488 31484 DATA_TYPE_UINT8, uscsi_pkt_reason,
31489 31485 "pkt-state",
31490 31486 DATA_TYPE_UINT8, uscsi_pkt_state,
31491 31487 "pkt-stats",
31492 31488 DATA_TYPE_UINT32,
31493 31489 uscsi_pkt_statistics,
31494 31490 "stat-code",
31495 31491 DATA_TYPE_UINT8,
31496 31492 ssc->ssc_uscsi_cmd->uscsi_status,
31497 31493 "key",
31498 31494 DATA_TYPE_UINT8,
31499 31495 scsi_sense_key(sensep),
31500 31496 "asc",
31501 31497 DATA_TYPE_UINT8,
31502 31498 scsi_sense_asc(sensep),
31503 31499 "ascq",
31504 31500 DATA_TYPE_UINT8,
31505 31501 scsi_sense_ascq(sensep),
31506 31502 "sense-data",
31507 31503 DATA_TYPE_UINT8_ARRAY,
31508 31504 senlen, sensep,
31509 31505 NULL);
31510 31506 }
31511 31507 } else {
31512 31508 /*
31513 31509 * For stat_code == STATUS_GOOD, this is not a
31514 31510 * hardware error.
31515 31511 */
31516 31512 if (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD)
31517 31513 return;
31518 31514
31519 31515 /*
31520 31516 * Post ereport.io.scsi.cmd.disk.dev.serr if we got the
31521 31517 * stat-code but with sense data unavailable.
31522 31518 * driver-assessment will be set based on parameter
31523 31519 * drv_assess.
31524 31520 */
31525 31521 scsi_fm_ereport_post(un->un_sd, uscsi_path_instance,
31526 31522 NULL,
31527 31523 "cmd.disk.dev.serr", uscsi_ena,
31528 31524 devid, NULL, DDI_NOSLEEP, NULL,
31529 31525 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0,
31530 31526 DEVID_IF_KNOWN(devid),
31531 31527 "driver-assessment", DATA_TYPE_STRING,
31532 31528 drv_assess == SD_FM_DRV_FATAL ? "fail" : assessment,
31533 31529 "op-code", DATA_TYPE_UINT8, op_code,
31534 31530 "cdb",
31535 31531 DATA_TYPE_UINT8_ARRAY,
31536 31532 cdblen, ssc->ssc_uscsi_cmd->uscsi_cdb,
31537 31533 "pkt-reason",
31538 31534 DATA_TYPE_UINT8, uscsi_pkt_reason,
31539 31535 "pkt-state",
31540 31536 DATA_TYPE_UINT8, uscsi_pkt_state,
31541 31537 "pkt-stats",
31542 31538 DATA_TYPE_UINT32, uscsi_pkt_statistics,
31543 31539 "stat-code",
31544 31540 DATA_TYPE_UINT8,
31545 31541 ssc->ssc_uscsi_cmd->uscsi_status,
31546 31542 NULL);
31547 31543 }
31548 31544 }
31549 31545 }
31550 31546
31551 31547 /*
31552 31548 * Function: sd_ssc_extract_info
31553 31549 *
31554 31550 * Description: Extract information available to help generate ereport.
31555 31551 *
31556 31552 * Context: Kernel thread or interrupt context.
31557 31553 */
31558 31554 static void
31559 31555 sd_ssc_extract_info(sd_ssc_t *ssc, struct sd_lun *un, struct scsi_pkt *pktp,
31560 31556 struct buf *bp, struct sd_xbuf *xp)
31561 31557 {
31562 31558 size_t senlen = 0;
31563 31559 union scsi_cdb *cdbp;
31564 31560 int path_instance;
31565 31561 /*
31566 31562 * Need scsi_cdb_size array to determine the cdb length.
31567 31563 */
31568 31564 extern uchar_t scsi_cdb_size[];
31569 31565
31570 31566 ASSERT(un != NULL);
31571 31567 ASSERT(pktp != NULL);
31572 31568 ASSERT(bp != NULL);
31573 31569 ASSERT(xp != NULL);
31574 31570 ASSERT(ssc != NULL);
31575 31571 ASSERT(mutex_owned(SD_MUTEX(un)));
31576 31572
31577 31573 /*
31578 31574 * Transfer the cdb buffer pointer here.
31579 31575 */
31580 31576 cdbp = (union scsi_cdb *)pktp->pkt_cdbp;
31581 31577
31582 31578 ssc->ssc_uscsi_cmd->uscsi_cdblen = scsi_cdb_size[GETGROUP(cdbp)];
31583 31579 ssc->ssc_uscsi_cmd->uscsi_cdb = (caddr_t)cdbp;
31584 31580
31585 31581 /*
31586 31582 * Transfer the sense data buffer pointer if sense data is available,
31587 31583 * calculate the sense data length first.
31588 31584 */
31589 31585 if ((xp->xb_sense_state & STATE_XARQ_DONE) ||
31590 31586 (xp->xb_sense_state & STATE_ARQ_DONE)) {
31591 31587 /*
31592 31588 * For arq case, we will enter here.
31593 31589 */
31594 31590 if (xp->xb_sense_state & STATE_XARQ_DONE) {
31595 31591 senlen = MAX_SENSE_LENGTH - xp->xb_sense_resid;
31596 31592 } else {
31597 31593 senlen = SENSE_LENGTH;
31598 31594 }
31599 31595 } else {
31600 31596 /*
31601 31597 * For non-arq case, we will enter this branch.
31602 31598 */
31603 31599 if (SD_GET_PKT_STATUS(pktp) == STATUS_CHECK &&
31604 31600 (xp->xb_sense_state & STATE_XFERRED_DATA)) {
31605 31601 senlen = SENSE_LENGTH - xp->xb_sense_resid;
31606 31602 }
31607 31603
31608 31604 }
31609 31605
31610 31606 ssc->ssc_uscsi_cmd->uscsi_rqlen = (senlen & 0xff);
31611 31607 ssc->ssc_uscsi_cmd->uscsi_rqresid = 0;
31612 31608 ssc->ssc_uscsi_cmd->uscsi_rqbuf = (caddr_t)xp->xb_sense_data;
31613 31609
31614 31610 ssc->ssc_uscsi_cmd->uscsi_status = ((*(pktp)->pkt_scbp) & STATUS_MASK);
31615 31611
31616 31612 /*
31617 31613 * Only transfer path_instance when scsi_pkt was properly allocated.
31618 31614 */
31619 31615 path_instance = pktp->pkt_path_instance;
31620 31616 if (scsi_pkt_allocated_correctly(pktp) && path_instance)
31621 31617 ssc->ssc_uscsi_cmd->uscsi_path_instance = path_instance;
31622 31618 else
31623 31619 ssc->ssc_uscsi_cmd->uscsi_path_instance = 0;
31624 31620
31625 31621 /*
31626 31622 * Copy in the other fields we may need when posting ereport.
31627 31623 */
31628 31624 ssc->ssc_uscsi_info->ui_pkt_reason = pktp->pkt_reason;
31629 31625 ssc->ssc_uscsi_info->ui_pkt_state = pktp->pkt_state;
31630 31626 ssc->ssc_uscsi_info->ui_pkt_statistics = pktp->pkt_statistics;
31631 31627 ssc->ssc_uscsi_info->ui_lba = (uint64_t)SD_GET_BLKNO(bp);
31632 31628
31633 31629 /*
31634 31630 * For partially read/write command, we will not create ena
31635 31631 * in case of a successful command be reconized as recovered.
31636 31632 */
31637 31633 if ((pktp->pkt_reason == CMD_CMPLT) &&
31638 31634 (ssc->ssc_uscsi_cmd->uscsi_status == STATUS_GOOD) &&
31639 31635 (senlen == 0)) {
31640 31636 return;
31641 31637 }
31642 31638
31643 31639 /*
31644 31640 * To associate ereports of a single command execution flow, we
31645 31641 * need a shared ena for a specific command.
31646 31642 */
31647 31643 if (xp->xb_ena == 0)
31648 31644 xp->xb_ena = fm_ena_generate(0, FM_ENA_FMT1);
31649 31645 ssc->ssc_uscsi_info->ui_ena = xp->xb_ena;
31650 31646 }
31651 31647
31652 31648
31653 31649 /*
31654 31650 * Function: sd_check_solid_state
31655 31651 *
31656 31652 * Description: Query the optional INQUIRY VPD page 0xb1. If the device
31657 31653 * supports VPD page 0xb1, sd examines the MEDIUM ROTATION
31658 31654 * RATE. If the MEDIUM ROTATION RATE is 1, sd assumes the
31659 31655 * device is a solid state drive.
31660 31656 *
31661 31657 * Context: Kernel thread or interrupt context.
31662 31658 */
31663 31659
31664 31660 static void
31665 31661 sd_check_solid_state(sd_ssc_t *ssc)
31666 31662 {
31667 31663 int rval = 0;
31668 31664 uchar_t *inqb1 = NULL;
31669 31665 size_t inqb1_len = MAX_INQUIRY_SIZE;
31670 31666 size_t inqb1_resid = 0;
31671 31667 struct sd_lun *un;
31672 31668
31673 31669 ASSERT(ssc != NULL);
31674 31670 un = ssc->ssc_un;
31675 31671 ASSERT(un != NULL);
31676 31672 ASSERT(!mutex_owned(SD_MUTEX(un)));
31677 31673
31678 31674 mutex_enter(SD_MUTEX(un));
31679 31675 un->un_f_is_solid_state = FALSE;
31680 31676
31681 31677 if (ISCD(un)) {
31682 31678 mutex_exit(SD_MUTEX(un));
31683 31679 return;
31684 31680 }
31685 31681
31686 31682 if (sd_check_vpd_page_support(ssc) == 0 &&
31687 31683 un->un_vpd_page_mask & SD_VPD_DEV_CHARACTER_PG) {
31688 31684 mutex_exit(SD_MUTEX(un));
31689 31685 /* collect page b1 data */
31690 31686 inqb1 = kmem_zalloc(inqb1_len, KM_SLEEP);
31691 31687
31692 31688 rval = sd_send_scsi_INQUIRY(ssc, inqb1, inqb1_len,
31693 31689 0x01, 0xB1, &inqb1_resid);
31694 31690
31695 31691 if (rval == 0 && (inqb1_len - inqb1_resid > 5)) {
31696 31692 SD_TRACE(SD_LOG_COMMON, un,
31697 31693 "sd_check_solid_state: \
31698 31694 successfully get VPD page: %x \
31699 31695 PAGE LENGTH: %x BYTE 4: %x \
31700 31696 BYTE 5: %x", inqb1[1], inqb1[3], inqb1[4],
31701 31697 inqb1[5]);
31702 31698
31703 31699 mutex_enter(SD_MUTEX(un));
31704 31700 /*
31705 31701 * Check the MEDIUM ROTATION RATE. If it is set
31706 31702 * to 1, the device is a solid state drive.
31707 31703 */
31708 31704 if (inqb1[4] == 0 && inqb1[5] == 1) {
31709 31705 un->un_f_is_solid_state = TRUE;
31710 31706 /* solid state drives don't need disksort */
31711 31707 un->un_f_disksort_disabled = TRUE;
31712 31708 }
31713 31709 mutex_exit(SD_MUTEX(un));
31714 31710 } else if (rval != 0) {
31715 31711 sd_ssc_assessment(ssc, SD_FMT_IGNORE);
31716 31712 }
31717 31713
31718 31714 kmem_free(inqb1, inqb1_len);
31719 31715 } else {
31720 31716 mutex_exit(SD_MUTEX(un));
31721 31717 }
31722 31718 }
31723 31719
31724 31720 /*
31725 31721 * Function: sd_check_emulation_mode
31726 31722 *
31727 31723 * Description: Check whether the SSD is at emulation mode
31728 31724 * by issuing READ_CAPACITY_16 to see whether
31729 31725 * we can get physical block size of the drive.
31730 31726 *
31731 31727 * Context: Kernel thread or interrupt context.
31732 31728 */
31733 31729
31734 31730 static void
31735 31731 sd_check_emulation_mode(sd_ssc_t *ssc)
31736 31732 {
31737 31733 int rval = 0;
31738 31734 uint64_t capacity;
31739 31735 uint_t lbasize;
31740 31736 uint_t pbsize;
31741 31737 int i;
31742 31738 int devid_len;
31743 31739 struct sd_lun *un;
31744 31740
31745 31741 ASSERT(ssc != NULL);
31746 31742 un = ssc->ssc_un;
31747 31743 ASSERT(un != NULL);
31748 31744 ASSERT(!mutex_owned(SD_MUTEX(un)));
31749 31745
31750 31746 mutex_enter(SD_MUTEX(un));
31751 31747 if (ISCD(un)) {
31752 31748 mutex_exit(SD_MUTEX(un));
31753 31749 return;
31754 31750 }
31755 31751
31756 31752 if (un->un_f_descr_format_supported) {
31757 31753 mutex_exit(SD_MUTEX(un));
31758 31754 rval = sd_send_scsi_READ_CAPACITY_16(ssc, &capacity, &lbasize,
31759 31755 &pbsize, SD_PATH_DIRECT);
31760 31756 mutex_enter(SD_MUTEX(un));
31761 31757
31762 31758 if (rval != 0) {
31763 31759 un->un_phy_blocksize = DEV_BSIZE;
31764 31760 } else {
31765 31761 if (!ISP2(pbsize % DEV_BSIZE) || pbsize == 0) {
31766 31762 un->un_phy_blocksize = DEV_BSIZE;
31767 31763 } else if (pbsize > un->un_phy_blocksize) {
31768 31764 /*
31769 31765 * Don't reset the physical blocksize
31770 31766 * unless we've detected a larger value.
31771 31767 */
31772 31768 un->un_phy_blocksize = pbsize;
31773 31769 }
31774 31770 }
31775 31771 }
31776 31772
31777 31773 for (i = 0; i < sd_flash_dev_table_size; i++) {
31778 31774 devid_len = (int)strlen(sd_flash_dev_table[i]);
31779 31775 if (sd_sdconf_id_match(un, sd_flash_dev_table[i], devid_len)
31780 31776 == SD_SUCCESS) {
31781 31777 un->un_phy_blocksize = SSD_SECSIZE;
31782 31778 if (un->un_f_is_solid_state &&
31783 31779 un->un_phy_blocksize != un->un_tgt_blocksize)
31784 31780 un->un_f_enable_rmw = TRUE;
31785 31781 }
31786 31782 }
31787 31783
31788 31784 mutex_exit(SD_MUTEX(un));
31789 31785 }
↓ open down ↓ |
19098 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX