1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * Copyright (c) 2011 Bayard G. Bell. All rights reserved. 25 */ 26 27 28 /* 29 * SCSA HBA nexus driver that emulates an HBA connected to SCSI target 30 * devices (large disks). 31 */ 32 33 #ifdef DEBUG 34 #define EMUL64DEBUG 35 #endif 36 37 #include <sys/scsi/scsi.h> 38 #include <sys/ddi.h> 39 #include <sys/sunddi.h> 40 #include <sys/taskq.h> 41 #include <sys/disp.h> 42 #include <sys/types.h> 43 #include <sys/buf.h> 44 #include <sys/cpuvar.h> 45 #include <sys/dklabel.h> 46 47 #include <sys/emul64.h> 48 #include <sys/emul64cmd.h> 49 #include <sys/emul64var.h> 50 51 int emul64_usetaskq = 1; /* set to zero for debugging */ 52 int emul64debug = 0; 53 #ifdef EMUL64DEBUG 54 static int emul64_cdb_debug = 0; 55 #include <sys/debug.h> 56 #endif 57 58 /* 59 * cb_ops function prototypes 60 */ 61 static int emul64_ioctl(dev_t, int cmd, intptr_t arg, int mode, 62 cred_t *credp, int *rvalp); 63 64 /* 65 * dev_ops functions prototypes 66 */ 67 static int emul64_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 68 void *arg, void **result); 69 static int emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 70 static int emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 71 72 /* 73 * Function prototypes 74 * 75 * SCSA functions exported by means of the transport table 76 */ 77 static int emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 78 scsi_hba_tran_t *tran, struct scsi_device *sd); 79 static int emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt); 80 static void emul64_pkt_comp(void *); 81 static int emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt); 82 static int emul64_scsi_reset(struct scsi_address *ap, int level); 83 static int emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom); 84 static int emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value, 85 int whom); 86 static struct scsi_pkt *emul64_scsi_init_pkt(struct scsi_address *ap, 87 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen, 88 int tgtlen, int flags, int (*callback)(), caddr_t arg); 89 static void emul64_scsi_destroy_pkt(struct scsi_address *ap, 90 struct scsi_pkt *pkt); 91 static void emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt); 92 static void emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt); 93 static int emul64_scsi_reset_notify(struct scsi_address *ap, int flag, 94 void (*callback)(caddr_t), caddr_t arg); 95 96 /* 97 * internal functions 98 */ 99 static void emul64_i_initcap(struct emul64 *emul64); 100 101 static void emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...); 102 static int emul64_get_tgtrange(struct emul64 *, 103 intptr_t, 104 emul64_tgt_t **, 105 emul64_tgt_range_t *); 106 static int emul64_write_off(struct emul64 *, 107 emul64_tgt_t *, 108 emul64_tgt_range_t *); 109 static int emul64_write_on(struct emul64 *, 110 emul64_tgt_t *, 111 emul64_tgt_range_t *); 112 static emul64_nowrite_t *emul64_nowrite_alloc(emul64_range_t *); 113 static void emul64_nowrite_free(emul64_nowrite_t *); 114 static emul64_nowrite_t *emul64_find_nowrite(emul64_tgt_t *, 115 diskaddr_t start_block, 116 size_t blkcnt, 117 emul64_rng_overlap_t *overlapp, 118 emul64_nowrite_t ***prevp); 119 120 extern emul64_tgt_t *find_tgt(struct emul64 *, ushort_t, ushort_t); 121 122 #ifdef EMUL64DEBUG 123 static void emul64_debug_dump_cdb(struct scsi_address *ap, 124 struct scsi_pkt *pkt); 125 #endif 126 127 128 #ifdef _DDICT 129 static int ddi_in_panic(void); 130 static int ddi_in_panic() { return (0); } 131 #ifndef SCSI_CAP_RESET_NOTIFICATION 132 #define SCSI_CAP_RESET_NOTIFICATION 14 133 #endif 134 #ifndef SCSI_RESET_NOTIFY 135 #define SCSI_RESET_NOTIFY 0x01 136 #endif 137 #ifndef SCSI_RESET_CANCEL 138 #define SCSI_RESET_CANCEL 0x02 139 #endif 140 #endif 141 142 /* 143 * Tunables: 144 * 145 * emul64_max_task 146 * The taskq facility is used to queue up SCSI start requests on a per 147 * controller basis. If the maximum number of queued tasks is hit, 148 * taskq_ent_alloc() delays for a second, which adversely impacts our 149 * performance. This value establishes the maximum number of task 150 * queue entries when taskq_create is called. 151 * 152 * emul64_task_nthreads 153 * Specifies the number of threads that should be used to process a 154 * controller's task queue. Our init function sets this to the number 155 * of CPUs on the system, but this can be overridden in emul64.conf. 156 */ 157 int emul64_max_task = 16; 158 int emul64_task_nthreads = 1; 159 160 /* 161 * Local static data 162 */ 163 static void *emul64_state = NULL; 164 165 /* 166 * Character/block operations. 167 */ 168 static struct cb_ops emul64_cbops = { 169 scsi_hba_open, /* cb_open */ 170 scsi_hba_close, /* cb_close */ 171 nodev, /* cb_strategy */ 172 nodev, /* cb_print */ 173 nodev, /* cb_dump */ 174 nodev, /* cb_read */ 175 nodev, /* cb_write */ 176 emul64_ioctl, /* cb_ioctl */ 177 nodev, /* cb_devmap */ 178 nodev, /* cb_mmap */ 179 nodev, /* cb_segmap */ 180 nochpoll, /* cb_chpoll */ 181 ddi_prop_op, /* cb_prop_op */ 182 NULL, /* cb_str */ 183 D_MP | D_64BIT | D_HOTPLUG, /* cb_flag */ 184 CB_REV, /* cb_rev */ 185 nodev, /* cb_aread */ 186 nodev /* cb_awrite */ 187 }; 188 189 /* 190 * autoconfiguration routines. 191 */ 192 193 static struct dev_ops emul64_ops = { 194 DEVO_REV, /* rev, */ 195 0, /* refcnt */ 196 emul64_info, /* getinfo */ 197 nulldev, /* identify */ 198 nulldev, /* probe */ 199 emul64_attach, /* attach */ 200 emul64_detach, /* detach */ 201 nodev, /* reset */ 202 &emul64_cbops, /* char/block ops */ 203 NULL, /* bus ops */ 204 NULL, /* power */ 205 ddi_quiesce_not_needed, /* quiesce */ 206 }; 207 208 static struct modldrv modldrv = { 209 &mod_driverops, /* module type - driver */ 210 "emul64 SCSI Host Bus Adapter", /* module name */ 211 &emul64_ops, /* driver ops */ 212 }; 213 214 static struct modlinkage modlinkage = { 215 MODREV_1, /* ml_rev - must be MODREV_1 */ 216 { &modldrv, NULL } /* ml_linkage */ 217 }; 218 219 int 220 _init(void) 221 { 222 int ret; 223 224 ret = ddi_soft_state_init(&emul64_state, sizeof (struct emul64), 225 EMUL64_INITIAL_SOFT_SPACE); 226 if (ret != 0) 227 return (ret); 228 229 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 230 ddi_soft_state_fini(&emul64_state); 231 return (ret); 232 } 233 234 /* Set the number of task threads to the number of CPUs */ 235 if (boot_max_ncpus == -1) { 236 emul64_task_nthreads = max_ncpus; 237 } else { 238 emul64_task_nthreads = boot_max_ncpus; 239 } 240 241 emul64_bsd_init(); 242 243 ret = mod_install(&modlinkage); 244 if (ret != 0) { 245 emul64_bsd_fini(); 246 scsi_hba_fini(&modlinkage); 247 ddi_soft_state_fini(&emul64_state); 248 } 249 250 return (ret); 251 } 252 253 int 254 _fini(void) 255 { 256 int ret; 257 258 if ((ret = mod_remove(&modlinkage)) != 0) 259 return (ret); 260 261 emul64_bsd_fini(); 262 263 scsi_hba_fini(&modlinkage); 264 265 ddi_soft_state_fini(&emul64_state); 266 267 return (ret); 268 } 269 270 int 271 _info(struct modinfo *modinfop) 272 { 273 return (mod_info(&modlinkage, modinfop)); 274 } 275 276 /* 277 * Given the device number return the devinfo pointer 278 * from the scsi_device structure. 279 */ 280 /*ARGSUSED*/ 281 static int 282 emul64_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 283 { 284 struct emul64 *foo; 285 int instance = getminor((dev_t)arg); 286 287 switch (cmd) { 288 case DDI_INFO_DEVT2DEVINFO: 289 foo = ddi_get_soft_state(emul64_state, instance); 290 if (foo != NULL) 291 *result = (void *)foo->emul64_dip; 292 else { 293 *result = NULL; 294 return (DDI_FAILURE); 295 } 296 break; 297 298 case DDI_INFO_DEVT2INSTANCE: 299 *result = (void *)(uintptr_t)instance; 300 break; 301 302 default: 303 return (DDI_FAILURE); 304 } 305 306 return (DDI_SUCCESS); 307 } 308 309 /* 310 * Attach an instance of an emul64 host adapter. Allocate data structures, 311 * initialize the emul64 and we're on the air. 312 */ 313 /*ARGSUSED*/ 314 static int 315 emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 316 { 317 int mutex_initted = 0; 318 struct emul64 *emul64; 319 int instance; 320 scsi_hba_tran_t *tran = NULL; 321 ddi_dma_attr_t tmp_dma_attr; 322 323 emul64_bsd_get_props(dip); 324 325 bzero((void *) &tmp_dma_attr, sizeof (tmp_dma_attr)); 326 instance = ddi_get_instance(dip); 327 328 switch (cmd) { 329 case DDI_ATTACH: 330 break; 331 332 case DDI_RESUME: 333 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip); 334 if (!tran) { 335 return (DDI_FAILURE); 336 } 337 emul64 = TRAN2EMUL64(tran); 338 339 return (DDI_SUCCESS); 340 341 default: 342 emul64_i_log(NULL, CE_WARN, 343 "emul64%d: Cmd != DDI_ATTACH/DDI_RESUME", instance); 344 return (DDI_FAILURE); 345 } 346 347 /* 348 * Allocate emul64 data structure. 349 */ 350 if (ddi_soft_state_zalloc(emul64_state, instance) != DDI_SUCCESS) { 351 emul64_i_log(NULL, CE_WARN, 352 "emul64%d: Failed to alloc soft state", 353 instance); 354 return (DDI_FAILURE); 355 } 356 357 emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance); 358 if (emul64 == (struct emul64 *)NULL) { 359 emul64_i_log(NULL, CE_WARN, "emul64%d: Bad soft state", 360 instance); 361 ddi_soft_state_free(emul64_state, instance); 362 return (DDI_FAILURE); 363 } 364 365 366 /* 367 * Allocate a transport structure 368 */ 369 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP); 370 if (tran == NULL) { 371 cmn_err(CE_WARN, "emul64: scsi_hba_tran_alloc failed\n"); 372 goto fail; 373 } 374 375 emul64->emul64_tran = tran; 376 emul64->emul64_dip = dip; 377 378 tran->tran_hba_private = emul64; 379 tran->tran_tgt_private = NULL; 380 tran->tran_tgt_init = emul64_tran_tgt_init; 381 tran->tran_tgt_probe = scsi_hba_probe; 382 tran->tran_tgt_free = NULL; 383 384 tran->tran_start = emul64_scsi_start; 385 tran->tran_abort = emul64_scsi_abort; 386 tran->tran_reset = emul64_scsi_reset; 387 tran->tran_getcap = emul64_scsi_getcap; 388 tran->tran_setcap = emul64_scsi_setcap; 389 tran->tran_init_pkt = emul64_scsi_init_pkt; 390 tran->tran_destroy_pkt = emul64_scsi_destroy_pkt; 391 tran->tran_dmafree = emul64_scsi_dmafree; 392 tran->tran_sync_pkt = emul64_scsi_sync_pkt; 393 tran->tran_reset_notify = emul64_scsi_reset_notify; 394 395 tmp_dma_attr.dma_attr_minxfer = 0x1; 396 tmp_dma_attr.dma_attr_burstsizes = 0x7f; 397 398 /* 399 * Attach this instance of the hba 400 */ 401 if (scsi_hba_attach_setup(dip, &tmp_dma_attr, tran, 402 0) != DDI_SUCCESS) { 403 cmn_err(CE_WARN, "emul64: scsi_hba_attach failed\n"); 404 goto fail; 405 } 406 407 emul64->emul64_initiator_id = 2; 408 409 /* 410 * Look up the scsi-options property 411 */ 412 emul64->emul64_scsi_options = 413 ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-options", 414 EMUL64_DEFAULT_SCSI_OPTIONS); 415 EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64 scsi-options=%x", 416 emul64->emul64_scsi_options); 417 418 419 /* mutexes to protect the emul64 request and response queue */ 420 mutex_init(EMUL64_REQ_MUTEX(emul64), NULL, MUTEX_DRIVER, 421 emul64->emul64_iblock); 422 mutex_init(EMUL64_RESP_MUTEX(emul64), NULL, MUTEX_DRIVER, 423 emul64->emul64_iblock); 424 425 mutex_initted = 1; 426 427 EMUL64_MUTEX_ENTER(emul64); 428 429 /* 430 * Initialize the default Target Capabilities and Sync Rates 431 */ 432 emul64_i_initcap(emul64); 433 434 EMUL64_MUTEX_EXIT(emul64); 435 436 437 ddi_report_dev(dip); 438 emul64->emul64_taskq = taskq_create("emul64_comp", 439 emul64_task_nthreads, MINCLSYSPRI, 1, emul64_max_task, 0); 440 441 return (DDI_SUCCESS); 442 443 fail: 444 emul64_i_log(NULL, CE_WARN, "emul64%d: Unable to attach", instance); 445 446 if (mutex_initted) { 447 mutex_destroy(EMUL64_REQ_MUTEX(emul64)); 448 mutex_destroy(EMUL64_RESP_MUTEX(emul64)); 449 } 450 if (tran) { 451 scsi_hba_tran_free(tran); 452 } 453 ddi_soft_state_free(emul64_state, instance); 454 return (DDI_FAILURE); 455 } 456 457 /*ARGSUSED*/ 458 static int 459 emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 460 { 461 struct emul64 *emul64; 462 scsi_hba_tran_t *tran; 463 int instance = ddi_get_instance(dip); 464 465 466 /* get transport structure pointer from the dip */ 467 if (!(tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip))) { 468 return (DDI_FAILURE); 469 } 470 471 /* get soft state from transport structure */ 472 emul64 = TRAN2EMUL64(tran); 473 474 if (!emul64) { 475 return (DDI_FAILURE); 476 } 477 478 EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: cmd = %d", cmd); 479 480 switch (cmd) { 481 case DDI_DETACH: 482 EMUL64_MUTEX_ENTER(emul64); 483 484 taskq_destroy(emul64->emul64_taskq); 485 (void) scsi_hba_detach(dip); 486 487 scsi_hba_tran_free(emul64->emul64_tran); 488 489 490 EMUL64_MUTEX_EXIT(emul64); 491 492 mutex_destroy(EMUL64_REQ_MUTEX(emul64)); 493 mutex_destroy(EMUL64_RESP_MUTEX(emul64)); 494 495 496 EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: done"); 497 ddi_soft_state_free(emul64_state, instance); 498 499 return (DDI_SUCCESS); 500 501 case DDI_SUSPEND: 502 return (DDI_SUCCESS); 503 504 default: 505 return (DDI_FAILURE); 506 } 507 } 508 509 /* 510 * Function name : emul64_tran_tgt_init 511 * 512 * Return Values : DDI_SUCCESS if target supported, DDI_FAILURE otherwise 513 * 514 */ 515 /*ARGSUSED*/ 516 static int 517 emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip, 518 scsi_hba_tran_t *tran, struct scsi_device *sd) 519 { 520 struct emul64 *emul64; 521 emul64_tgt_t *tgt; 522 char **geo_vidpid = NULL; 523 char *geo, *vidpid; 524 uint32_t *geoip = NULL; 525 uint_t length; 526 uint_t length2; 527 lldaddr_t sector_count; 528 char prop_name[15]; 529 int ret = DDI_FAILURE; 530 531 emul64 = TRAN2EMUL64(tran); 532 EMUL64_MUTEX_ENTER(emul64); 533 534 /* 535 * We get called for each target driver.conf node, multiple 536 * nodes may map to the same tgt,lun (sd.conf, st.conf, etc). 537 * Check to see if transport to tgt,lun already established. 538 */ 539 tgt = find_tgt(emul64, sd->sd_address.a_target, sd->sd_address.a_lun); 540 if (tgt) { 541 ret = DDI_SUCCESS; 542 goto out; 543 } 544 545 /* see if we have driver.conf specified device for this target,lun */ 546 (void) snprintf(prop_name, sizeof (prop_name), "targ_%d_%d", 547 sd->sd_address.a_target, sd->sd_address.a_lun); 548 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba_dip, 549 DDI_PROP_DONTPASS, prop_name, 550 &geo_vidpid, &length) != DDI_PROP_SUCCESS) 551 goto out; 552 if (length < 2) { 553 cmn_err(CE_WARN, "emul64: %s property does not have 2 " 554 "elements", prop_name); 555 goto out; 556 } 557 558 /* pick geometry name and vidpid string from string array */ 559 geo = *geo_vidpid; 560 vidpid = *(geo_vidpid + 1); 561 562 /* lookup geometry property integer array */ 563 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hba_dip, DDI_PROP_DONTPASS, 564 geo, (int **)&geoip, &length2) != DDI_PROP_SUCCESS) { 565 cmn_err(CE_WARN, "emul64: didn't get prop '%s'", geo); 566 goto out; 567 } 568 if (length2 < 6) { 569 cmn_err(CE_WARN, "emul64: property %s does not have 6 " 570 "elements", *geo_vidpid); 571 goto out; 572 } 573 574 /* allocate and initialize tgt structure for tgt,lun */ 575 tgt = kmem_zalloc(sizeof (emul64_tgt_t), KM_SLEEP); 576 rw_init(&tgt->emul64_tgt_nw_lock, NULL, RW_DRIVER, NULL); 577 mutex_init(&tgt->emul64_tgt_blk_lock, NULL, MUTEX_DRIVER, NULL); 578 579 /* create avl for data block storage */ 580 avl_create(&tgt->emul64_tgt_data, emul64_bsd_blkcompare, 581 sizeof (blklist_t), offsetof(blklist_t, bl_node)); 582 583 /* save scsi_address and vidpid */ 584 bcopy(sd, &tgt->emul64_tgt_saddr, sizeof (struct scsi_address)); 585 (void) strncpy(tgt->emul64_tgt_inq, vidpid, 586 sizeof (emul64->emul64_tgt->emul64_tgt_inq)); 587 588 /* 589 * The high order 4 bytes of the sector count always come first in 590 * emul64.conf. They are followed by the low order 4 bytes. Not 591 * all CPU types want them in this order, but laddr_t takes care of 592 * this for us. We then pick up geometry (ncyl X nheads X nsect). 593 */ 594 sector_count._p._u = *(geoip + 0); 595 sector_count._p._l = *(geoip + 1); 596 /* 597 * On 32-bit platforms, fix block size if it's greater than the 598 * allowable maximum. 599 */ 600 #if !defined(_LP64) 601 if (sector_count._f > DK_MAX_BLOCKS) 602 sector_count._f = DK_MAX_BLOCKS; 603 #endif 604 tgt->emul64_tgt_sectors = sector_count._f; 605 tgt->emul64_tgt_dtype = *(geoip + 2); 606 tgt->emul64_tgt_ncyls = *(geoip + 3); 607 tgt->emul64_tgt_nheads = *(geoip + 4); 608 tgt->emul64_tgt_nsect = *(geoip + 5); 609 610 /* insert target structure into list */ 611 tgt->emul64_tgt_next = emul64->emul64_tgt; 612 emul64->emul64_tgt = tgt; 613 ret = DDI_SUCCESS; 614 615 out: EMUL64_MUTEX_EXIT(emul64); 616 if (geoip) 617 ddi_prop_free(geoip); 618 if (geo_vidpid) 619 ddi_prop_free(geo_vidpid); 620 return (ret); 621 } 622 623 /* 624 * Function name : emul64_i_initcap 625 * 626 * Return Values : NONE 627 * Description : Initializes the default target capabilities and 628 * Sync Rates. 629 * 630 * Context : Called from the user thread through attach. 631 * 632 */ 633 static void 634 emul64_i_initcap(struct emul64 *emul64) 635 { 636 uint16_t cap, synch; 637 int i; 638 639 cap = 0; 640 synch = 0; 641 for (i = 0; i < NTARGETS_WIDE; i++) { 642 emul64->emul64_cap[i] = cap; 643 emul64->emul64_synch[i] = synch; 644 } 645 EMUL64_DEBUG(emul64, SCSI_DEBUG, "default cap = 0x%x", cap); 646 } 647 648 /* 649 * Function name : emul64_scsi_getcap() 650 * 651 * Return Values : current value of capability, if defined 652 * -1 if capability is not defined 653 * Description : returns current capability value 654 * 655 * Context : Can be called from different kernel process threads. 656 * Can be called by interrupt thread. 657 */ 658 static int 659 emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom) 660 { 661 struct emul64 *emul64 = ADDR2EMUL64(ap); 662 int rval = 0; 663 664 /* 665 * We don't allow inquiring about capabilities for other targets 666 */ 667 if (cap == NULL || whom == 0) { 668 return (-1); 669 } 670 671 EMUL64_MUTEX_ENTER(emul64); 672 673 switch (scsi_hba_lookup_capstr(cap)) { 674 case SCSI_CAP_DMA_MAX: 675 rval = 1 << 24; /* Limit to 16MB max transfer */ 676 break; 677 case SCSI_CAP_MSG_OUT: 678 rval = 1; 679 break; 680 case SCSI_CAP_DISCONNECT: 681 rval = 1; 682 break; 683 case SCSI_CAP_SYNCHRONOUS: 684 rval = 1; 685 break; 686 case SCSI_CAP_WIDE_XFER: 687 rval = 1; 688 break; 689 case SCSI_CAP_TAGGED_QING: 690 rval = 1; 691 break; 692 case SCSI_CAP_UNTAGGED_QING: 693 rval = 1; 694 break; 695 case SCSI_CAP_PARITY: 696 rval = 1; 697 break; 698 case SCSI_CAP_INITIATOR_ID: 699 rval = emul64->emul64_initiator_id; 700 break; 701 case SCSI_CAP_ARQ: 702 rval = 1; 703 break; 704 case SCSI_CAP_LINKED_CMDS: 705 break; 706 case SCSI_CAP_RESET_NOTIFICATION: 707 rval = 1; 708 break; 709 710 default: 711 rval = -1; 712 break; 713 } 714 715 EMUL64_MUTEX_EXIT(emul64); 716 717 return (rval); 718 } 719 720 /* 721 * Function name : emul64_scsi_setcap() 722 * 723 * Return Values : 1 - capability exists and can be set to new value 724 * 0 - capability could not be set to new value 725 * -1 - no such capability 726 * 727 * Description : sets a capability for a target 728 * 729 * Context : Can be called from different kernel process threads. 730 * Can be called by interrupt thread. 731 */ 732 static int 733 emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom) 734 { 735 struct emul64 *emul64 = ADDR2EMUL64(ap); 736 int rval = 0; 737 738 /* 739 * We don't allow setting capabilities for other targets 740 */ 741 if (cap == NULL || whom == 0) { 742 return (-1); 743 } 744 745 EMUL64_MUTEX_ENTER(emul64); 746 747 switch (scsi_hba_lookup_capstr(cap)) { 748 case SCSI_CAP_DMA_MAX: 749 case SCSI_CAP_MSG_OUT: 750 case SCSI_CAP_PARITY: 751 case SCSI_CAP_UNTAGGED_QING: 752 case SCSI_CAP_LINKED_CMDS: 753 case SCSI_CAP_RESET_NOTIFICATION: 754 /* 755 * None of these are settable via 756 * the capability interface. 757 */ 758 break; 759 case SCSI_CAP_DISCONNECT: 760 rval = 1; 761 break; 762 case SCSI_CAP_SYNCHRONOUS: 763 rval = 1; 764 break; 765 case SCSI_CAP_TAGGED_QING: 766 rval = 1; 767 break; 768 case SCSI_CAP_WIDE_XFER: 769 rval = 1; 770 break; 771 case SCSI_CAP_INITIATOR_ID: 772 rval = -1; 773 break; 774 case SCSI_CAP_ARQ: 775 rval = 1; 776 break; 777 case SCSI_CAP_TOTAL_SECTORS: 778 emul64->nt_total_sectors[ap->a_target][ap->a_lun] = value; 779 rval = TRUE; 780 break; 781 case SCSI_CAP_SECTOR_SIZE: 782 rval = TRUE; 783 break; 784 default: 785 rval = -1; 786 break; 787 } 788 789 790 EMUL64_MUTEX_EXIT(emul64); 791 792 return (rval); 793 } 794 795 /* 796 * Function name : emul64_scsi_init_pkt 797 * 798 * Return Values : pointer to scsi_pkt, or NULL 799 * Description : Called by kernel on behalf of a target driver 800 * calling scsi_init_pkt(9F). 801 * Refer to tran_init_pkt(9E) man page 802 * 803 * Context : Can be called from different kernel process threads. 804 * Can be called by interrupt thread. 805 */ 806 /* ARGSUSED */ 807 static struct scsi_pkt * 808 emul64_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, 809 struct buf *bp, int cmdlen, int statuslen, int tgtlen, 810 int flags, int (*callback)(), caddr_t arg) 811 { 812 struct emul64 *emul64 = ADDR2EMUL64(ap); 813 struct emul64_cmd *sp; 814 815 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC); 816 817 /* 818 * First step of emul64_scsi_init_pkt: pkt allocation 819 */ 820 if (pkt == NULL) { 821 pkt = scsi_hba_pkt_alloc(emul64->emul64_dip, ap, cmdlen, 822 statuslen, 823 tgtlen, sizeof (struct emul64_cmd), callback, arg); 824 if (pkt == NULL) { 825 cmn_err(CE_WARN, "emul64_scsi_init_pkt: " 826 "scsi_hba_pkt_alloc failed"); 827 return (NULL); 828 } 829 830 sp = PKT2CMD(pkt); 831 832 /* 833 * Initialize the new pkt - we redundantly initialize 834 * all the fields for illustrative purposes. 835 */ 836 sp->cmd_pkt = pkt; 837 sp->cmd_flags = 0; 838 sp->cmd_scblen = statuslen; 839 sp->cmd_cdblen = cmdlen; 840 sp->cmd_emul64 = emul64; 841 pkt->pkt_address = *ap; 842 pkt->pkt_comp = (void (*)())NULL; 843 pkt->pkt_flags = 0; 844 pkt->pkt_time = 0; 845 pkt->pkt_resid = 0; 846 pkt->pkt_statistics = 0; 847 pkt->pkt_reason = 0; 848 849 } else { 850 sp = PKT2CMD(pkt); 851 } 852 853 /* 854 * Second step of emul64_scsi_init_pkt: dma allocation/move 855 */ 856 if (bp && bp->b_bcount != 0) { 857 if (bp->b_flags & B_READ) { 858 sp->cmd_flags &= ~CFLAG_DMASEND; 859 } else { 860 sp->cmd_flags |= CFLAG_DMASEND; 861 } 862 bp_mapin(bp); 863 sp->cmd_addr = (unsigned char *) bp->b_un.b_addr; 864 sp->cmd_count = bp->b_bcount; 865 pkt->pkt_resid = 0; 866 } 867 868 return (pkt); 869 } 870 871 872 /* 873 * Function name : emul64_scsi_destroy_pkt 874 * 875 * Return Values : none 876 * Description : Called by kernel on behalf of a target driver 877 * calling scsi_destroy_pkt(9F). 878 * Refer to tran_destroy_pkt(9E) man page 879 * 880 * Context : Can be called from different kernel process threads. 881 * Can be called by interrupt thread. 882 */ 883 static void 884 emul64_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 885 { 886 struct emul64_cmd *sp = PKT2CMD(pkt); 887 888 /* 889 * emul64_scsi_dmafree inline to make things faster 890 */ 891 if (sp->cmd_flags & CFLAG_DMAVALID) { 892 /* 893 * Free the mapping. 894 */ 895 sp->cmd_flags &= ~CFLAG_DMAVALID; 896 } 897 898 /* 899 * Free the pkt 900 */ 901 scsi_hba_pkt_free(ap, pkt); 902 } 903 904 905 /* 906 * Function name : emul64_scsi_dmafree() 907 * 908 * Return Values : none 909 * Description : free dvma resources 910 * 911 * Context : Can be called from different kernel process threads. 912 * Can be called by interrupt thread. 913 */ 914 /*ARGSUSED*/ 915 static void 916 emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt) 917 { 918 } 919 920 /* 921 * Function name : emul64_scsi_sync_pkt() 922 * 923 * Return Values : none 924 * Description : sync dma 925 * 926 * Context : Can be called from different kernel process threads. 927 * Can be called by interrupt thread. 928 */ 929 /*ARGSUSED*/ 930 static void 931 emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt) 932 { 933 } 934 935 /* 936 * routine for reset notification setup, to register or cancel. 937 */ 938 static int 939 emul64_scsi_reset_notify(struct scsi_address *ap, int flag, 940 void (*callback)(caddr_t), caddr_t arg) 941 { 942 struct emul64 *emul64 = ADDR2EMUL64(ap); 943 struct emul64_reset_notify_entry *p, *beforep; 944 int rval = DDI_FAILURE; 945 946 mutex_enter(EMUL64_REQ_MUTEX(emul64)); 947 948 p = emul64->emul64_reset_notify_listf; 949 beforep = NULL; 950 951 while (p) { 952 if (p->ap == ap) 953 break; /* An entry exists for this target */ 954 beforep = p; 955 p = p->next; 956 } 957 958 if ((flag & SCSI_RESET_CANCEL) && (p != NULL)) { 959 if (beforep == NULL) { 960 emul64->emul64_reset_notify_listf = p->next; 961 } else { 962 beforep->next = p->next; 963 } 964 kmem_free((caddr_t)p, 965 sizeof (struct emul64_reset_notify_entry)); 966 rval = DDI_SUCCESS; 967 968 } else if ((flag & SCSI_RESET_NOTIFY) && (p == NULL)) { 969 p = kmem_zalloc(sizeof (struct emul64_reset_notify_entry), 970 KM_SLEEP); 971 p->ap = ap; 972 p->callback = callback; 973 p->arg = arg; 974 p->next = emul64->emul64_reset_notify_listf; 975 emul64->emul64_reset_notify_listf = p; 976 rval = DDI_SUCCESS; 977 } 978 979 mutex_exit(EMUL64_REQ_MUTEX(emul64)); 980 981 return (rval); 982 } 983 984 /* 985 * Function name : emul64_scsi_start() 986 * 987 * Return Values : TRAN_FATAL_ERROR - emul64 has been shutdown 988 * TRAN_BUSY - request queue is full 989 * TRAN_ACCEPT - pkt has been submitted to emul64 990 * 991 * Description : init pkt, start the request 992 * 993 * Context : Can be called from different kernel process threads. 994 * Can be called by interrupt thread. 995 */ 996 static int 997 emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt) 998 { 999 struct emul64_cmd *sp = PKT2CMD(pkt); 1000 int rval = TRAN_ACCEPT; 1001 struct emul64 *emul64 = ADDR2EMUL64(ap); 1002 clock_t cur_lbolt; 1003 taskqid_t dispatched; 1004 1005 ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic()); 1006 ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic()); 1007 1008 EMUL64_DEBUG2(emul64, SCSI_DEBUG, "emul64_scsi_start %x", sp); 1009 1010 pkt->pkt_reason = CMD_CMPLT; 1011 1012 #ifdef EMUL64DEBUG 1013 if (emul64_cdb_debug) { 1014 emul64_debug_dump_cdb(ap, pkt); 1015 } 1016 #endif /* EMUL64DEBUG */ 1017 1018 /* 1019 * calculate deadline from pkt_time 1020 * Instead of multiplying by 100 (ie. HZ), we multiply by 128 so 1021 * we can shift and at the same time have a 28% grace period 1022 * we ignore the rare case of pkt_time == 0 and deal with it 1023 * in emul64_i_watch() 1024 */ 1025 cur_lbolt = ddi_get_lbolt(); 1026 sp->cmd_deadline = cur_lbolt + (pkt->pkt_time * 128); 1027 1028 if ((emul64_usetaskq == 0) || (pkt->pkt_flags & FLAG_NOINTR) != 0) { 1029 emul64_pkt_comp((caddr_t)pkt); 1030 } else { 1031 dispatched = NULL; 1032 if (emul64_collect_stats) { 1033 /* 1034 * If we are collecting statistics, call 1035 * taskq_dispatch in no sleep mode, so that we can 1036 * detect if we are exceeding the queue length that 1037 * was established in the call to taskq_create in 1038 * emul64_attach. If the no sleep call fails 1039 * (returns NULL), the task will be dispatched in 1040 * sleep mode below. 1041 */ 1042 dispatched = taskq_dispatch(emul64->emul64_taskq, 1043 emul64_pkt_comp, (void *)pkt, TQ_NOSLEEP); 1044 if (dispatched == NULL) { 1045 /* Queue was full. dispatch failed. */ 1046 mutex_enter(&emul64_stats_mutex); 1047 emul64_taskq_max++; 1048 mutex_exit(&emul64_stats_mutex); 1049 } 1050 } 1051 if (dispatched == NULL) { 1052 (void) taskq_dispatch(emul64->emul64_taskq, 1053 emul64_pkt_comp, (void *)pkt, TQ_SLEEP); 1054 } 1055 } 1056 1057 done: 1058 ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic()); 1059 ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic()); 1060 1061 return (rval); 1062 } 1063 1064 void 1065 emul64_check_cond(struct scsi_pkt *pkt, uchar_t key, uchar_t asc, uchar_t ascq) 1066 { 1067 struct scsi_arq_status *arq = 1068 (struct scsi_arq_status *)pkt->pkt_scbp; 1069 1070 /* got check, no data transferred and ARQ done */ 1071 arq->sts_status.sts_chk = 1; 1072 pkt->pkt_state |= STATE_ARQ_DONE; 1073 pkt->pkt_state &= ~STATE_XFERRED_DATA; 1074 1075 /* for ARQ */ 1076 arq->sts_rqpkt_reason = CMD_CMPLT; 1077 arq->sts_rqpkt_resid = 0; 1078 arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 1079 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; 1080 arq->sts_sensedata.es_valid = 1; 1081 arq->sts_sensedata.es_class = 0x7; 1082 arq->sts_sensedata.es_key = key; 1083 arq->sts_sensedata.es_add_code = asc; 1084 arq->sts_sensedata.es_qual_code = ascq; 1085 } 1086 1087 ushort_t 1088 emul64_error_inject(struct scsi_pkt *pkt) 1089 { 1090 struct emul64_cmd *sp = PKT2CMD(pkt); 1091 emul64_tgt_t *tgt; 1092 struct scsi_arq_status *arq = 1093 (struct scsi_arq_status *)pkt->pkt_scbp; 1094 uint_t max_sense_len; 1095 1096 EMUL64_MUTEX_ENTER(sp->cmd_emul64); 1097 tgt = find_tgt(sp->cmd_emul64, 1098 pkt->pkt_address.a_target, pkt->pkt_address.a_lun); 1099 EMUL64_MUTEX_EXIT(sp->cmd_emul64); 1100 1101 /* 1102 * If there is no target, skip the error injection and 1103 * let the packet be handled normally. This would normally 1104 * never happen since a_target and a_lun are setup in 1105 * emul64_scsi_init_pkt. 1106 */ 1107 if (tgt == NULL) { 1108 return (ERR_INJ_DISABLE); 1109 } 1110 1111 if (tgt->emul64_einj_state != ERR_INJ_DISABLE) { 1112 arq->sts_status = tgt->emul64_einj_scsi_status; 1113 pkt->pkt_state = tgt->emul64_einj_pkt_state; 1114 pkt->pkt_reason = tgt->emul64_einj_pkt_reason; 1115 1116 /* 1117 * Calculate available sense buffer length. We could just 1118 * assume sizeof(struct scsi_extended_sense) but hopefully 1119 * that limitation will go away soon. 1120 */ 1121 max_sense_len = sp->cmd_scblen - 1122 (sizeof (struct scsi_arq_status) - 1123 sizeof (struct scsi_extended_sense)); 1124 if (max_sense_len > tgt->emul64_einj_sense_length) { 1125 max_sense_len = tgt->emul64_einj_sense_length; 1126 } 1127 1128 /* for ARQ */ 1129 arq->sts_rqpkt_reason = CMD_CMPLT; 1130 arq->sts_rqpkt_resid = 0; 1131 arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 1132 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; 1133 1134 /* Copy sense data */ 1135 if (tgt->emul64_einj_sense_data != 0) { 1136 bcopy(tgt->emul64_einj_sense_data, 1137 (uint8_t *)&arq->sts_sensedata, 1138 max_sense_len); 1139 } 1140 } 1141 1142 /* Return current error injection state */ 1143 return (tgt->emul64_einj_state); 1144 } 1145 1146 int 1147 emul64_error_inject_req(struct emul64 *emul64, intptr_t arg) 1148 { 1149 emul64_tgt_t *tgt; 1150 struct emul64_error_inj_data error_inj_req; 1151 1152 /* Check args */ 1153 if (arg == NULL) { 1154 return (EINVAL); 1155 } 1156 1157 if (ddi_copyin((void *)arg, &error_inj_req, 1158 sizeof (error_inj_req), 0) != 0) { 1159 cmn_err(CE_WARN, "emul64: ioctl - inj copyin failed\n"); 1160 return (EFAULT); 1161 } 1162 1163 EMUL64_MUTEX_ENTER(emul64); 1164 tgt = find_tgt(emul64, error_inj_req.eccd_target, 1165 error_inj_req.eccd_lun); 1166 EMUL64_MUTEX_EXIT(emul64); 1167 1168 /* Make sure device exists */ 1169 if (tgt == NULL) { 1170 return (ENODEV); 1171 } 1172 1173 /* Free old sense buffer if we have one */ 1174 if (tgt->emul64_einj_sense_data != NULL) { 1175 ASSERT(tgt->emul64_einj_sense_length != 0); 1176 kmem_free(tgt->emul64_einj_sense_data, 1177 tgt->emul64_einj_sense_length); 1178 tgt->emul64_einj_sense_data = NULL; 1179 tgt->emul64_einj_sense_length = 0; 1180 } 1181 1182 /* 1183 * Now handle error injection request. If error injection 1184 * is requested we will return the sense data provided for 1185 * any I/O to this target until told to stop. 1186 */ 1187 tgt->emul64_einj_state = error_inj_req.eccd_inj_state; 1188 tgt->emul64_einj_sense_length = error_inj_req.eccd_sns_dlen; 1189 tgt->emul64_einj_pkt_state = error_inj_req.eccd_pkt_state; 1190 tgt->emul64_einj_pkt_reason = error_inj_req.eccd_pkt_reason; 1191 tgt->emul64_einj_scsi_status = error_inj_req.eccd_scsi_status; 1192 switch (error_inj_req.eccd_inj_state) { 1193 case ERR_INJ_ENABLE: 1194 case ERR_INJ_ENABLE_NODATA: 1195 if (error_inj_req.eccd_sns_dlen) { 1196 tgt->emul64_einj_sense_data = 1197 kmem_alloc(error_inj_req.eccd_sns_dlen, KM_SLEEP); 1198 /* Copy sense data */ 1199 if (ddi_copyin((void *)(arg + sizeof (error_inj_req)), 1200 tgt->emul64_einj_sense_data, 1201 error_inj_req.eccd_sns_dlen, 0) != 0) { 1202 cmn_err(CE_WARN, 1203 "emul64: sense data copy in failed\n"); 1204 return (EFAULT); 1205 } 1206 } 1207 break; 1208 case ERR_INJ_DISABLE: 1209 default: 1210 break; 1211 } 1212 1213 return (0); 1214 } 1215 1216 int bsd_scsi_start_stop_unit(struct scsi_pkt *); 1217 int bsd_scsi_test_unit_ready(struct scsi_pkt *); 1218 int bsd_scsi_request_sense(struct scsi_pkt *); 1219 int bsd_scsi_inquiry(struct scsi_pkt *); 1220 int bsd_scsi_format(struct scsi_pkt *); 1221 int bsd_scsi_io(struct scsi_pkt *); 1222 int bsd_scsi_log_sense(struct scsi_pkt *); 1223 int bsd_scsi_mode_sense(struct scsi_pkt *); 1224 int bsd_scsi_mode_select(struct scsi_pkt *); 1225 int bsd_scsi_read_capacity(struct scsi_pkt *); 1226 int bsd_scsi_read_capacity_16(struct scsi_pkt *); 1227 int bsd_scsi_reserve(struct scsi_pkt *); 1228 int bsd_scsi_format(struct scsi_pkt *); 1229 int bsd_scsi_release(struct scsi_pkt *); 1230 int bsd_scsi_read_defect_list(struct scsi_pkt *); 1231 int bsd_scsi_reassign_block(struct scsi_pkt *); 1232 int bsd_freeblkrange(emul64_tgt_t *, emul64_range_t *); 1233 1234 static void 1235 emul64_handle_cmd(struct scsi_pkt *pkt) 1236 { 1237 if (emul64_error_inject(pkt) == ERR_INJ_ENABLE_NODATA) { 1238 /* 1239 * If error injection is configured to return with 1240 * no data return now without handling the command. 1241 * This is how normal check conditions work. 1242 * 1243 * If the error injection state is ERR_INJ_ENABLE 1244 * (or if error injection is disabled) continue and 1245 * handle the command. This would be used for 1246 * KEY_RECOVERABLE_ERROR type conditions. 1247 */ 1248 return; 1249 } 1250 1251 switch (pkt->pkt_cdbp[0]) { 1252 case SCMD_START_STOP: 1253 (void) bsd_scsi_start_stop_unit(pkt); 1254 break; 1255 case SCMD_TEST_UNIT_READY: 1256 (void) bsd_scsi_test_unit_ready(pkt); 1257 break; 1258 case SCMD_REQUEST_SENSE: 1259 (void) bsd_scsi_request_sense(pkt); 1260 break; 1261 case SCMD_INQUIRY: 1262 (void) bsd_scsi_inquiry(pkt); 1263 break; 1264 case SCMD_FORMAT: 1265 (void) bsd_scsi_format(pkt); 1266 break; 1267 case SCMD_READ: 1268 case SCMD_WRITE: 1269 case SCMD_READ_G1: 1270 case SCMD_WRITE_G1: 1271 case SCMD_READ_G4: 1272 case SCMD_WRITE_G4: 1273 (void) bsd_scsi_io(pkt); 1274 break; 1275 case SCMD_LOG_SENSE_G1: 1276 (void) bsd_scsi_log_sense(pkt); 1277 break; 1278 case SCMD_MODE_SENSE: 1279 case SCMD_MODE_SENSE_G1: 1280 (void) bsd_scsi_mode_sense(pkt); 1281 break; 1282 case SCMD_MODE_SELECT: 1283 case SCMD_MODE_SELECT_G1: 1284 (void) bsd_scsi_mode_select(pkt); 1285 break; 1286 case SCMD_READ_CAPACITY: 1287 (void) bsd_scsi_read_capacity(pkt); 1288 break; 1289 case SCMD_SVC_ACTION_IN_G4: 1290 if (pkt->pkt_cdbp[1] == SSVC_ACTION_READ_CAPACITY_G4) { 1291 (void) bsd_scsi_read_capacity_16(pkt); 1292 } else { 1293 cmn_err(CE_WARN, "emul64: unrecognized G4 service " 1294 "action 0x%x", pkt->pkt_cdbp[1]); 1295 } 1296 break; 1297 case SCMD_RESERVE: 1298 case SCMD_RESERVE_G1: 1299 (void) bsd_scsi_reserve(pkt); 1300 break; 1301 case SCMD_RELEASE: 1302 case SCMD_RELEASE_G1: 1303 (void) bsd_scsi_release(pkt); 1304 break; 1305 case SCMD_REASSIGN_BLOCK: 1306 (void) bsd_scsi_reassign_block(pkt); 1307 break; 1308 case SCMD_READ_DEFECT_LIST: 1309 (void) bsd_scsi_read_defect_list(pkt); 1310 break; 1311 case SCMD_PRIN: 1312 case SCMD_PROUT: 1313 case SCMD_REPORT_LUNS: 1314 /* ASC 0x24 INVALID FIELD IN CDB */ 1315 emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0); 1316 break; 1317 default: 1318 cmn_err(CE_WARN, "emul64: unrecognized " 1319 "SCSI cmd 0x%x", pkt->pkt_cdbp[0]); 1320 emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0); 1321 break; 1322 case SCMD_GET_CONFIGURATION: 1323 case 0x35: /* SCMD_SYNCHRONIZE_CACHE */ 1324 /* Don't complain */ 1325 break; 1326 } 1327 } 1328 1329 static void 1330 emul64_pkt_comp(void * arg) 1331 { 1332 struct scsi_pkt *pkt = (struct scsi_pkt *)arg; 1333 struct emul64_cmd *sp = PKT2CMD(pkt); 1334 emul64_tgt_t *tgt; 1335 1336 EMUL64_MUTEX_ENTER(sp->cmd_emul64); 1337 tgt = find_tgt(sp->cmd_emul64, 1338 pkt->pkt_address.a_target, pkt->pkt_address.a_lun); 1339 EMUL64_MUTEX_EXIT(sp->cmd_emul64); 1340 if (!tgt) { 1341 pkt->pkt_reason = CMD_TIMEOUT; 1342 pkt->pkt_state = STATE_GOT_BUS | STATE_SENT_CMD; 1343 pkt->pkt_statistics = STAT_TIMEOUT; 1344 } else { 1345 pkt->pkt_reason = CMD_CMPLT; 1346 *pkt->pkt_scbp = STATUS_GOOD; 1347 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET | 1348 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS; 1349 pkt->pkt_statistics = 0; 1350 emul64_handle_cmd(pkt); 1351 } 1352 scsi_hba_pkt_comp(pkt); 1353 } 1354 1355 /* ARGSUSED */ 1356 static int 1357 emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt) 1358 { 1359 return (1); 1360 } 1361 1362 /* ARGSUSED */ 1363 static int 1364 emul64_scsi_reset(struct scsi_address *ap, int level) 1365 { 1366 return (1); 1367 } 1368 1369 static int 1370 emul64_get_tgtrange(struct emul64 *emul64, 1371 intptr_t arg, 1372 emul64_tgt_t **tgtp, 1373 emul64_tgt_range_t *tgtr) 1374 { 1375 if (ddi_copyin((void *)arg, tgtr, sizeof (*tgtr), 0) != 0) { 1376 cmn_err(CE_WARN, "emul64: ioctl - copy in failed\n"); 1377 return (EFAULT); 1378 } 1379 EMUL64_MUTEX_ENTER(emul64); 1380 *tgtp = find_tgt(emul64, tgtr->emul64_target, tgtr->emul64_lun); 1381 EMUL64_MUTEX_EXIT(emul64); 1382 if (*tgtp == NULL) { 1383 cmn_err(CE_WARN, "emul64: ioctl - no target for %d,%d on %d", 1384 tgtr->emul64_target, tgtr->emul64_lun, 1385 ddi_get_instance(emul64->emul64_dip)); 1386 return (ENXIO); 1387 } 1388 return (0); 1389 } 1390 1391 static int 1392 emul64_ioctl(dev_t dev, 1393 int cmd, 1394 intptr_t arg, 1395 int mode, 1396 cred_t *credp, 1397 int *rvalp) 1398 { 1399 struct emul64 *emul64; 1400 int instance; 1401 int rv = 0; 1402 emul64_tgt_range_t tgtr; 1403 emul64_tgt_t *tgt; 1404 1405 instance = MINOR2INST(getminor(dev)); 1406 emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance); 1407 if (emul64 == NULL) { 1408 cmn_err(CE_WARN, "emul64: ioctl - no softstate for %d\n", 1409 getminor(dev)); 1410 return (ENXIO); 1411 } 1412 1413 switch (cmd) { 1414 case EMUL64_WRITE_OFF: 1415 rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 1416 if (rv == 0) { 1417 rv = emul64_write_off(emul64, tgt, &tgtr); 1418 } 1419 break; 1420 case EMUL64_WRITE_ON: 1421 rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 1422 if (rv == 0) { 1423 rv = emul64_write_on(emul64, tgt, &tgtr); 1424 } 1425 break; 1426 case EMUL64_ZERO_RANGE: 1427 rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr); 1428 if (rv == 0) { 1429 mutex_enter(&tgt->emul64_tgt_blk_lock); 1430 rv = bsd_freeblkrange(tgt, &tgtr.emul64_blkrange); 1431 mutex_exit(&tgt->emul64_tgt_blk_lock); 1432 } 1433 break; 1434 case EMUL64_ERROR_INJECT: 1435 rv = emul64_error_inject_req(emul64, arg); 1436 break; 1437 default: 1438 rv = scsi_hba_ioctl(dev, cmd, arg, mode, credp, rvalp); 1439 break; 1440 } 1441 return (rv); 1442 } 1443 1444 /* ARGSUSED */ 1445 static int 1446 emul64_write_off(struct emul64 *emul64, 1447 emul64_tgt_t *tgt, 1448 emul64_tgt_range_t *tgtr) 1449 { 1450 size_t blkcnt = tgtr->emul64_blkrange.emul64_blkcnt; 1451 emul64_nowrite_t *cur; 1452 emul64_nowrite_t *nowrite; 1453 emul64_rng_overlap_t overlap = O_NONE; 1454 emul64_nowrite_t **prev = NULL; 1455 diskaddr_t sb = tgtr->emul64_blkrange.emul64_sb; 1456 1457 nowrite = emul64_nowrite_alloc(&tgtr->emul64_blkrange); 1458 1459 /* Find spot in list */ 1460 rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER); 1461 cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev); 1462 if (overlap == O_NONE) { 1463 /* Insert into list */ 1464 *prev = nowrite; 1465 nowrite->emul64_nwnext = cur; 1466 } 1467 rw_exit(&tgt->emul64_tgt_nw_lock); 1468 if (overlap == O_NONE) { 1469 if (emul64_collect_stats) { 1470 mutex_enter(&emul64_stats_mutex); 1471 emul64_nowrite_count++; 1472 mutex_exit(&emul64_stats_mutex); 1473 } 1474 } else { 1475 cmn_err(CE_WARN, "emul64: EMUL64_WRITE_OFF 0x%llx,0x%" 1476 PRIx64 "overlaps 0x%llx,0x%" PRIx64 "\n", 1477 nowrite->emul64_blocked.emul64_sb, 1478 nowrite->emul64_blocked.emul64_blkcnt, 1479 cur->emul64_blocked.emul64_sb, 1480 cur->emul64_blocked.emul64_blkcnt); 1481 emul64_nowrite_free(nowrite); 1482 return (EINVAL); 1483 } 1484 return (0); 1485 } 1486 1487 /* ARGSUSED */ 1488 static int 1489 emul64_write_on(struct emul64 *emul64, 1490 emul64_tgt_t *tgt, 1491 emul64_tgt_range_t *tgtr) 1492 { 1493 size_t blkcnt = tgtr->emul64_blkrange.emul64_blkcnt; 1494 emul64_nowrite_t *cur; 1495 emul64_rng_overlap_t overlap = O_NONE; 1496 emul64_nowrite_t **prev = NULL; 1497 int rv = 0; 1498 diskaddr_t sb = tgtr->emul64_blkrange.emul64_sb; 1499 1500 /* Find spot in list */ 1501 rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER); 1502 cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev); 1503 if (overlap == O_SAME) { 1504 /* Remove from list */ 1505 *prev = cur->emul64_nwnext; 1506 } 1507 rw_exit(&tgt->emul64_tgt_nw_lock); 1508 1509 switch (overlap) { 1510 case O_NONE: 1511 cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx " 1512 "range not found\n", sb, blkcnt); 1513 rv = ENXIO; 1514 break; 1515 case O_SAME: 1516 if (emul64_collect_stats) { 1517 mutex_enter(&emul64_stats_mutex); 1518 emul64_nowrite_count--; 1519 mutex_exit(&emul64_stats_mutex); 1520 } 1521 emul64_nowrite_free(cur); 1522 break; 1523 case O_OVERLAP: 1524 case O_SUBSET: 1525 cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx " 1526 "overlaps 0x%llx,0x%" PRIx64 "\n", 1527 sb, blkcnt, cur->emul64_blocked.emul64_sb, 1528 cur->emul64_blocked.emul64_blkcnt); 1529 rv = EINVAL; 1530 break; 1531 } 1532 return (rv); 1533 } 1534 1535 static emul64_nowrite_t * 1536 emul64_find_nowrite(emul64_tgt_t *tgt, 1537 diskaddr_t sb, 1538 size_t blkcnt, 1539 emul64_rng_overlap_t *overlap, 1540 emul64_nowrite_t ***prevp) 1541 { 1542 emul64_nowrite_t *cur; 1543 emul64_nowrite_t **prev; 1544 1545 /* Find spot in list */ 1546 *overlap = O_NONE; 1547 prev = &tgt->emul64_tgt_nowrite; 1548 cur = tgt->emul64_tgt_nowrite; 1549 while (cur != NULL) { 1550 *overlap = emul64_overlap(&cur->emul64_blocked, sb, blkcnt); 1551 if (*overlap != O_NONE) 1552 break; 1553 prev = &cur->emul64_nwnext; 1554 cur = cur->emul64_nwnext; 1555 } 1556 1557 *prevp = prev; 1558 return (cur); 1559 } 1560 1561 static emul64_nowrite_t * 1562 emul64_nowrite_alloc(emul64_range_t *range) 1563 { 1564 emul64_nowrite_t *nw; 1565 1566 nw = kmem_zalloc(sizeof (*nw), KM_SLEEP); 1567 bcopy((void *) range, 1568 (void *) &nw->emul64_blocked, 1569 sizeof (nw->emul64_blocked)); 1570 return (nw); 1571 } 1572 1573 static void 1574 emul64_nowrite_free(emul64_nowrite_t *nw) 1575 { 1576 kmem_free((void *) nw, sizeof (*nw)); 1577 } 1578 1579 emul64_rng_overlap_t 1580 emul64_overlap(emul64_range_t *rng, diskaddr_t sb, size_t cnt) 1581 { 1582 1583 if (rng->emul64_sb >= sb + cnt) 1584 return (O_NONE); 1585 if (rng->emul64_sb + rng->emul64_blkcnt <= sb) 1586 return (O_NONE); 1587 if ((rng->emul64_sb == sb) && (rng->emul64_blkcnt == cnt)) 1588 return (O_SAME); 1589 if ((sb >= rng->emul64_sb) && 1590 ((sb + cnt) <= (rng->emul64_sb + rng->emul64_blkcnt))) { 1591 return (O_SUBSET); 1592 } 1593 return (O_OVERLAP); 1594 } 1595 1596 #include <sys/varargs.h> 1597 1598 /* 1599 * Error logging, printing, and debug print routines 1600 */ 1601 1602 /*VARARGS3*/ 1603 static void 1604 emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...) 1605 { 1606 char buf[256]; 1607 va_list ap; 1608 1609 va_start(ap, fmt); 1610 (void) vsnprintf(buf, sizeof (buf), fmt, ap); 1611 va_end(ap); 1612 1613 scsi_log(emul64 ? emul64->emul64_dip : NULL, 1614 "emul64", level, "%s\n", buf); 1615 } 1616 1617 1618 #ifdef EMUL64DEBUG 1619 1620 static void 1621 emul64_debug_dump_cdb(struct scsi_address *ap, struct scsi_pkt *pkt) 1622 { 1623 static char hex[] = "0123456789abcdef"; 1624 struct emul64 *emul64 = ADDR2EMUL64(ap); 1625 struct emul64_cmd *sp = PKT2CMD(pkt); 1626 uint8_t *cdb = pkt->pkt_cdbp; 1627 char buf [256]; 1628 char *p; 1629 int i; 1630 1631 (void) snprintf(buf, sizeof (buf), "emul64%d: <%d,%d> ", 1632 ddi_get_instance(emul64->emul64_dip), 1633 ap->a_target, ap->a_lun); 1634 1635 p = buf + strlen(buf); 1636 1637 *p++ = '['; 1638 for (i = 0; i < sp->cmd_cdblen; i++, cdb++) { 1639 if (i != 0) 1640 *p++ = ' '; 1641 *p++ = hex[(*cdb >> 4) & 0x0f]; 1642 *p++ = hex[*cdb & 0x0f]; 1643 } 1644 *p++ = ']'; 1645 *p++ = '\n'; 1646 *p = 0; 1647 1648 cmn_err(CE_CONT, buf); 1649 } 1650 #endif /* EMUL64DEBUG */