Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/emul64.c
+++ new/usr/src/uts/common/io/emul64.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
25 25 */
26 26
27 27
28 28 /*
29 29 * SCSA HBA nexus driver that emulates an HBA connected to SCSI target
30 30 * devices (large disks).
31 31 */
32 32
33 33 #ifdef DEBUG
34 34 #define EMUL64DEBUG
35 35 #endif
36 36
37 37 #include <sys/scsi/scsi.h>
38 38 #include <sys/ddi.h>
39 39 #include <sys/sunddi.h>
40 40 #include <sys/taskq.h>
41 41 #include <sys/disp.h>
42 42 #include <sys/types.h>
43 43 #include <sys/buf.h>
44 44 #include <sys/cpuvar.h>
45 45 #include <sys/dklabel.h>
46 46
47 47 #include <sys/emul64.h>
48 48 #include <sys/emul64cmd.h>
49 49 #include <sys/emul64var.h>
50 50
51 51 int emul64_usetaskq = 1; /* set to zero for debugging */
52 52 int emul64debug = 0;
53 53 #ifdef EMUL64DEBUG
54 54 static int emul64_cdb_debug = 0;
55 55 #include <sys/debug.h>
56 56 #endif
57 57
58 58 /*
59 59 * cb_ops function prototypes
60 60 */
61 61 static int emul64_ioctl(dev_t, int cmd, intptr_t arg, int mode,
62 62 cred_t *credp, int *rvalp);
63 63
64 64 /*
65 65 * dev_ops functions prototypes
66 66 */
67 67 static int emul64_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
68 68 void *arg, void **result);
69 69 static int emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
70 70 static int emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
71 71
72 72 /*
73 73 * Function prototypes
74 74 *
75 75 * SCSA functions exported by means of the transport table
76 76 */
77 77 static int emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
78 78 scsi_hba_tran_t *tran, struct scsi_device *sd);
79 79 static int emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
80 80 static void emul64_pkt_comp(void *);
81 81 static int emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
82 82 static int emul64_scsi_reset(struct scsi_address *ap, int level);
83 83 static int emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
84 84 static int emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value,
85 85 int whom);
86 86 static struct scsi_pkt *emul64_scsi_init_pkt(struct scsi_address *ap,
87 87 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
88 88 int tgtlen, int flags, int (*callback)(), caddr_t arg);
89 89 static void emul64_scsi_destroy_pkt(struct scsi_address *ap,
90 90 struct scsi_pkt *pkt);
91 91 static void emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
92 92 static void emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
93 93 static int emul64_scsi_reset_notify(struct scsi_address *ap, int flag,
94 94 void (*callback)(caddr_t), caddr_t arg);
95 95
96 96 /*
97 97 * internal functions
98 98 */
99 99 static void emul64_i_initcap(struct emul64 *emul64);
100 100
101 101 static void emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...);
102 102 static int emul64_get_tgtrange(struct emul64 *,
103 103 intptr_t,
104 104 emul64_tgt_t **,
105 105 emul64_tgt_range_t *);
106 106 static int emul64_write_off(struct emul64 *,
107 107 emul64_tgt_t *,
108 108 emul64_tgt_range_t *);
109 109 static int emul64_write_on(struct emul64 *,
110 110 emul64_tgt_t *,
111 111 emul64_tgt_range_t *);
112 112 static emul64_nowrite_t *emul64_nowrite_alloc(emul64_range_t *);
113 113 static void emul64_nowrite_free(emul64_nowrite_t *);
114 114 static emul64_nowrite_t *emul64_find_nowrite(emul64_tgt_t *,
115 115 diskaddr_t start_block,
116 116 size_t blkcnt,
117 117 emul64_rng_overlap_t *overlapp,
118 118 emul64_nowrite_t ***prevp);
119 119
120 120 extern emul64_tgt_t *find_tgt(struct emul64 *, ushort_t, ushort_t);
121 121
122 122 #ifdef EMUL64DEBUG
123 123 static void emul64_debug_dump_cdb(struct scsi_address *ap,
124 124 struct scsi_pkt *pkt);
125 125 #endif
126 126
127 127
128 128 #ifdef _DDICT
129 129 static int ddi_in_panic(void);
130 130 static int ddi_in_panic() { return (0); }
131 131 #ifndef SCSI_CAP_RESET_NOTIFICATION
132 132 #define SCSI_CAP_RESET_NOTIFICATION 14
133 133 #endif
134 134 #ifndef SCSI_RESET_NOTIFY
135 135 #define SCSI_RESET_NOTIFY 0x01
136 136 #endif
137 137 #ifndef SCSI_RESET_CANCEL
138 138 #define SCSI_RESET_CANCEL 0x02
139 139 #endif
140 140 #endif
141 141
142 142 /*
143 143 * Tunables:
144 144 *
145 145 * emul64_max_task
146 146 * The taskq facility is used to queue up SCSI start requests on a per
147 147 * controller basis. If the maximum number of queued tasks is hit,
148 148 * taskq_ent_alloc() delays for a second, which adversely impacts our
149 149 * performance. This value establishes the maximum number of task
150 150 * queue entries when taskq_create is called.
151 151 *
152 152 * emul64_task_nthreads
153 153 * Specifies the number of threads that should be used to process a
154 154 * controller's task queue. Our init function sets this to the number
155 155 * of CPUs on the system, but this can be overridden in emul64.conf.
156 156 */
157 157 int emul64_max_task = 16;
158 158 int emul64_task_nthreads = 1;
159 159
160 160 /*
161 161 * Local static data
162 162 */
163 163 static void *emul64_state = NULL;
164 164
165 165 /*
166 166 * Character/block operations.
167 167 */
168 168 static struct cb_ops emul64_cbops = {
169 169 scsi_hba_open, /* cb_open */
170 170 scsi_hba_close, /* cb_close */
171 171 nodev, /* cb_strategy */
172 172 nodev, /* cb_print */
173 173 nodev, /* cb_dump */
174 174 nodev, /* cb_read */
175 175 nodev, /* cb_write */
176 176 emul64_ioctl, /* cb_ioctl */
177 177 nodev, /* cb_devmap */
178 178 nodev, /* cb_mmap */
179 179 nodev, /* cb_segmap */
180 180 nochpoll, /* cb_chpoll */
181 181 ddi_prop_op, /* cb_prop_op */
182 182 NULL, /* cb_str */
183 183 D_MP | D_64BIT | D_HOTPLUG, /* cb_flag */
184 184 CB_REV, /* cb_rev */
185 185 nodev, /* cb_aread */
186 186 nodev /* cb_awrite */
187 187 };
188 188
189 189 /*
190 190 * autoconfiguration routines.
191 191 */
192 192
193 193 static struct dev_ops emul64_ops = {
194 194 DEVO_REV, /* rev, */
195 195 0, /* refcnt */
196 196 emul64_info, /* getinfo */
197 197 nulldev, /* identify */
198 198 nulldev, /* probe */
199 199 emul64_attach, /* attach */
200 200 emul64_detach, /* detach */
201 201 nodev, /* reset */
202 202 &emul64_cbops, /* char/block ops */
203 203 NULL, /* bus ops */
204 204 NULL, /* power */
205 205 ddi_quiesce_not_needed, /* quiesce */
↓ open down ↓ |
205 lines elided |
↑ open up ↑ |
206 206 };
207 207
208 208 static struct modldrv modldrv = {
209 209 &mod_driverops, /* module type - driver */
210 210 "emul64 SCSI Host Bus Adapter", /* module name */
211 211 &emul64_ops, /* driver ops */
212 212 };
213 213
214 214 static struct modlinkage modlinkage = {
215 215 MODREV_1, /* ml_rev - must be MODREV_1 */
216 - &modldrv, /* ml_linkage */
217 - NULL /* end of driver linkage */
216 + { &modldrv, NULL } /* ml_linkage */
218 217 };
219 218
220 219 int
221 220 _init(void)
222 221 {
223 222 int ret;
224 223
225 224 ret = ddi_soft_state_init(&emul64_state, sizeof (struct emul64),
226 225 EMUL64_INITIAL_SOFT_SPACE);
227 226 if (ret != 0)
228 227 return (ret);
229 228
230 229 if ((ret = scsi_hba_init(&modlinkage)) != 0) {
231 230 ddi_soft_state_fini(&emul64_state);
232 231 return (ret);
233 232 }
234 233
235 234 /* Set the number of task threads to the number of CPUs */
236 235 if (boot_max_ncpus == -1) {
237 236 emul64_task_nthreads = max_ncpus;
238 237 } else {
239 238 emul64_task_nthreads = boot_max_ncpus;
240 239 }
241 240
242 241 emul64_bsd_init();
243 242
244 243 ret = mod_install(&modlinkage);
245 244 if (ret != 0) {
246 245 emul64_bsd_fini();
247 246 scsi_hba_fini(&modlinkage);
248 247 ddi_soft_state_fini(&emul64_state);
249 248 }
250 249
251 250 return (ret);
252 251 }
253 252
254 253 int
255 254 _fini(void)
256 255 {
257 256 int ret;
258 257
259 258 if ((ret = mod_remove(&modlinkage)) != 0)
260 259 return (ret);
261 260
262 261 emul64_bsd_fini();
263 262
264 263 scsi_hba_fini(&modlinkage);
265 264
266 265 ddi_soft_state_fini(&emul64_state);
267 266
268 267 return (ret);
269 268 }
270 269
271 270 int
272 271 _info(struct modinfo *modinfop)
273 272 {
274 273 return (mod_info(&modlinkage, modinfop));
275 274 }
276 275
277 276 /*
278 277 * Given the device number return the devinfo pointer
279 278 * from the scsi_device structure.
280 279 */
281 280 /*ARGSUSED*/
282 281 static int
283 282 emul64_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
284 283 {
285 284 struct emul64 *foo;
286 285 int instance = getminor((dev_t)arg);
287 286
288 287 switch (cmd) {
289 288 case DDI_INFO_DEVT2DEVINFO:
290 289 foo = ddi_get_soft_state(emul64_state, instance);
291 290 if (foo != NULL)
292 291 *result = (void *)foo->emul64_dip;
293 292 else {
294 293 *result = NULL;
295 294 return (DDI_FAILURE);
296 295 }
297 296 break;
298 297
299 298 case DDI_INFO_DEVT2INSTANCE:
300 299 *result = (void *)(uintptr_t)instance;
301 300 break;
302 301
303 302 default:
304 303 return (DDI_FAILURE);
305 304 }
306 305
307 306 return (DDI_SUCCESS);
308 307 }
309 308
310 309 /*
311 310 * Attach an instance of an emul64 host adapter. Allocate data structures,
312 311 * initialize the emul64 and we're on the air.
313 312 */
314 313 /*ARGSUSED*/
315 314 static int
316 315 emul64_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
317 316 {
318 317 int mutex_initted = 0;
319 318 struct emul64 *emul64;
320 319 int instance;
321 320 scsi_hba_tran_t *tran = NULL;
322 321 ddi_dma_attr_t tmp_dma_attr;
323 322
324 323 emul64_bsd_get_props(dip);
325 324
326 325 bzero((void *) &tmp_dma_attr, sizeof (tmp_dma_attr));
327 326 instance = ddi_get_instance(dip);
328 327
329 328 switch (cmd) {
330 329 case DDI_ATTACH:
331 330 break;
332 331
333 332 case DDI_RESUME:
334 333 tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip);
335 334 if (!tran) {
336 335 return (DDI_FAILURE);
337 336 }
338 337 emul64 = TRAN2EMUL64(tran);
339 338
340 339 return (DDI_SUCCESS);
341 340
342 341 default:
343 342 emul64_i_log(NULL, CE_WARN,
344 343 "emul64%d: Cmd != DDI_ATTACH/DDI_RESUME", instance);
345 344 return (DDI_FAILURE);
346 345 }
347 346
348 347 /*
349 348 * Allocate emul64 data structure.
350 349 */
351 350 if (ddi_soft_state_zalloc(emul64_state, instance) != DDI_SUCCESS) {
352 351 emul64_i_log(NULL, CE_WARN,
353 352 "emul64%d: Failed to alloc soft state",
354 353 instance);
355 354 return (DDI_FAILURE);
356 355 }
357 356
358 357 emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance);
359 358 if (emul64 == (struct emul64 *)NULL) {
360 359 emul64_i_log(NULL, CE_WARN, "emul64%d: Bad soft state",
361 360 instance);
362 361 ddi_soft_state_free(emul64_state, instance);
363 362 return (DDI_FAILURE);
364 363 }
365 364
366 365
367 366 /*
368 367 * Allocate a transport structure
369 368 */
370 369 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
371 370 if (tran == NULL) {
372 371 cmn_err(CE_WARN, "emul64: scsi_hba_tran_alloc failed\n");
373 372 goto fail;
374 373 }
375 374
376 375 emul64->emul64_tran = tran;
377 376 emul64->emul64_dip = dip;
378 377
379 378 tran->tran_hba_private = emul64;
380 379 tran->tran_tgt_private = NULL;
381 380 tran->tran_tgt_init = emul64_tran_tgt_init;
382 381 tran->tran_tgt_probe = scsi_hba_probe;
383 382 tran->tran_tgt_free = NULL;
384 383
385 384 tran->tran_start = emul64_scsi_start;
386 385 tran->tran_abort = emul64_scsi_abort;
387 386 tran->tran_reset = emul64_scsi_reset;
388 387 tran->tran_getcap = emul64_scsi_getcap;
389 388 tran->tran_setcap = emul64_scsi_setcap;
390 389 tran->tran_init_pkt = emul64_scsi_init_pkt;
391 390 tran->tran_destroy_pkt = emul64_scsi_destroy_pkt;
392 391 tran->tran_dmafree = emul64_scsi_dmafree;
393 392 tran->tran_sync_pkt = emul64_scsi_sync_pkt;
394 393 tran->tran_reset_notify = emul64_scsi_reset_notify;
395 394
396 395 tmp_dma_attr.dma_attr_minxfer = 0x1;
397 396 tmp_dma_attr.dma_attr_burstsizes = 0x7f;
398 397
399 398 /*
400 399 * Attach this instance of the hba
401 400 */
402 401 if (scsi_hba_attach_setup(dip, &tmp_dma_attr, tran,
403 402 0) != DDI_SUCCESS) {
404 403 cmn_err(CE_WARN, "emul64: scsi_hba_attach failed\n");
405 404 goto fail;
406 405 }
407 406
408 407 emul64->emul64_initiator_id = 2;
409 408
410 409 /*
411 410 * Look up the scsi-options property
412 411 */
413 412 emul64->emul64_scsi_options =
414 413 ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "scsi-options",
415 414 EMUL64_DEFAULT_SCSI_OPTIONS);
416 415 EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64 scsi-options=%x",
417 416 emul64->emul64_scsi_options);
418 417
419 418
420 419 /* mutexes to protect the emul64 request and response queue */
421 420 mutex_init(EMUL64_REQ_MUTEX(emul64), NULL, MUTEX_DRIVER,
422 421 emul64->emul64_iblock);
423 422 mutex_init(EMUL64_RESP_MUTEX(emul64), NULL, MUTEX_DRIVER,
424 423 emul64->emul64_iblock);
425 424
426 425 mutex_initted = 1;
427 426
428 427 EMUL64_MUTEX_ENTER(emul64);
429 428
430 429 /*
431 430 * Initialize the default Target Capabilities and Sync Rates
432 431 */
433 432 emul64_i_initcap(emul64);
434 433
435 434 EMUL64_MUTEX_EXIT(emul64);
436 435
437 436
438 437 ddi_report_dev(dip);
439 438 emul64->emul64_taskq = taskq_create("emul64_comp",
440 439 emul64_task_nthreads, MINCLSYSPRI, 1, emul64_max_task, 0);
441 440
442 441 return (DDI_SUCCESS);
443 442
444 443 fail:
445 444 emul64_i_log(NULL, CE_WARN, "emul64%d: Unable to attach", instance);
446 445
447 446 if (mutex_initted) {
448 447 mutex_destroy(EMUL64_REQ_MUTEX(emul64));
449 448 mutex_destroy(EMUL64_RESP_MUTEX(emul64));
450 449 }
451 450 if (tran) {
452 451 scsi_hba_tran_free(tran);
453 452 }
454 453 ddi_soft_state_free(emul64_state, instance);
455 454 return (DDI_FAILURE);
456 455 }
457 456
458 457 /*ARGSUSED*/
459 458 static int
460 459 emul64_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
461 460 {
462 461 struct emul64 *emul64;
463 462 scsi_hba_tran_t *tran;
464 463 int instance = ddi_get_instance(dip);
465 464
466 465
467 466 /* get transport structure pointer from the dip */
468 467 if (!(tran = (scsi_hba_tran_t *)ddi_get_driver_private(dip))) {
469 468 return (DDI_FAILURE);
470 469 }
471 470
472 471 /* get soft state from transport structure */
473 472 emul64 = TRAN2EMUL64(tran);
474 473
475 474 if (!emul64) {
476 475 return (DDI_FAILURE);
477 476 }
478 477
479 478 EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: cmd = %d", cmd);
480 479
481 480 switch (cmd) {
482 481 case DDI_DETACH:
483 482 EMUL64_MUTEX_ENTER(emul64);
484 483
485 484 taskq_destroy(emul64->emul64_taskq);
486 485 (void) scsi_hba_detach(dip);
487 486
488 487 scsi_hba_tran_free(emul64->emul64_tran);
489 488
490 489
491 490 EMUL64_MUTEX_EXIT(emul64);
492 491
493 492 mutex_destroy(EMUL64_REQ_MUTEX(emul64));
494 493 mutex_destroy(EMUL64_RESP_MUTEX(emul64));
495 494
496 495
497 496 EMUL64_DEBUG(emul64, SCSI_DEBUG, "emul64_detach: done");
498 497 ddi_soft_state_free(emul64_state, instance);
499 498
500 499 return (DDI_SUCCESS);
501 500
502 501 case DDI_SUSPEND:
503 502 return (DDI_SUCCESS);
504 503
505 504 default:
506 505 return (DDI_FAILURE);
507 506 }
508 507 }
509 508
510 509 /*
511 510 * Function name : emul64_tran_tgt_init
512 511 *
513 512 * Return Values : DDI_SUCCESS if target supported, DDI_FAILURE otherwise
514 513 *
515 514 */
516 515 /*ARGSUSED*/
517 516 static int
518 517 emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
519 518 scsi_hba_tran_t *tran, struct scsi_device *sd)
520 519 {
521 520 struct emul64 *emul64;
522 521 emul64_tgt_t *tgt;
523 522 char **geo_vidpid = NULL;
524 523 char *geo, *vidpid;
525 524 uint32_t *geoip = NULL;
526 525 uint_t length;
527 526 uint_t length2;
528 527 lldaddr_t sector_count;
529 528 char prop_name[15];
530 529 int ret = DDI_FAILURE;
531 530
532 531 emul64 = TRAN2EMUL64(tran);
533 532 EMUL64_MUTEX_ENTER(emul64);
534 533
535 534 /*
536 535 * We get called for each target driver.conf node, multiple
537 536 * nodes may map to the same tgt,lun (sd.conf, st.conf, etc).
538 537 * Check to see if transport to tgt,lun already established.
539 538 */
540 539 tgt = find_tgt(emul64, sd->sd_address.a_target, sd->sd_address.a_lun);
541 540 if (tgt) {
542 541 ret = DDI_SUCCESS;
543 542 goto out;
544 543 }
545 544
546 545 /* see if we have driver.conf specified device for this target,lun */
547 546 (void) snprintf(prop_name, sizeof (prop_name), "targ_%d_%d",
548 547 sd->sd_address.a_target, sd->sd_address.a_lun);
549 548 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba_dip,
550 549 DDI_PROP_DONTPASS, prop_name,
551 550 &geo_vidpid, &length) != DDI_PROP_SUCCESS)
552 551 goto out;
553 552 if (length < 2) {
554 553 cmn_err(CE_WARN, "emul64: %s property does not have 2 "
555 554 "elements", prop_name);
556 555 goto out;
557 556 }
558 557
559 558 /* pick geometry name and vidpid string from string array */
560 559 geo = *geo_vidpid;
561 560 vidpid = *(geo_vidpid + 1);
562 561
563 562 /* lookup geometry property integer array */
564 563 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hba_dip, DDI_PROP_DONTPASS,
565 564 geo, (int **)&geoip, &length2) != DDI_PROP_SUCCESS) {
566 565 cmn_err(CE_WARN, "emul64: didn't get prop '%s'", geo);
567 566 goto out;
568 567 }
569 568 if (length2 < 6) {
570 569 cmn_err(CE_WARN, "emul64: property %s does not have 6 "
571 570 "elements", *geo_vidpid);
572 571 goto out;
573 572 }
574 573
575 574 /* allocate and initialize tgt structure for tgt,lun */
576 575 tgt = kmem_zalloc(sizeof (emul64_tgt_t), KM_SLEEP);
577 576 rw_init(&tgt->emul64_tgt_nw_lock, NULL, RW_DRIVER, NULL);
578 577 mutex_init(&tgt->emul64_tgt_blk_lock, NULL, MUTEX_DRIVER, NULL);
579 578
580 579 /* create avl for data block storage */
581 580 avl_create(&tgt->emul64_tgt_data, emul64_bsd_blkcompare,
582 581 sizeof (blklist_t), offsetof(blklist_t, bl_node));
583 582
584 583 /* save scsi_address and vidpid */
585 584 bcopy(sd, &tgt->emul64_tgt_saddr, sizeof (struct scsi_address));
586 585 (void) strncpy(tgt->emul64_tgt_inq, vidpid,
587 586 sizeof (emul64->emul64_tgt->emul64_tgt_inq));
588 587
589 588 /*
590 589 * The high order 4 bytes of the sector count always come first in
591 590 * emul64.conf. They are followed by the low order 4 bytes. Not
592 591 * all CPU types want them in this order, but laddr_t takes care of
593 592 * this for us. We then pick up geometry (ncyl X nheads X nsect).
594 593 */
595 594 sector_count._p._u = *(geoip + 0);
596 595 sector_count._p._l = *(geoip + 1);
597 596 /*
598 597 * On 32-bit platforms, fix block size if it's greater than the
599 598 * allowable maximum.
600 599 */
601 600 #if !defined(_LP64)
602 601 if (sector_count._f > DK_MAX_BLOCKS)
603 602 sector_count._f = DK_MAX_BLOCKS;
604 603 #endif
605 604 tgt->emul64_tgt_sectors = sector_count._f;
606 605 tgt->emul64_tgt_dtype = *(geoip + 2);
607 606 tgt->emul64_tgt_ncyls = *(geoip + 3);
608 607 tgt->emul64_tgt_nheads = *(geoip + 4);
609 608 tgt->emul64_tgt_nsect = *(geoip + 5);
610 609
611 610 /* insert target structure into list */
612 611 tgt->emul64_tgt_next = emul64->emul64_tgt;
613 612 emul64->emul64_tgt = tgt;
614 613 ret = DDI_SUCCESS;
615 614
616 615 out: EMUL64_MUTEX_EXIT(emul64);
617 616 if (geoip)
618 617 ddi_prop_free(geoip);
619 618 if (geo_vidpid)
620 619 ddi_prop_free(geo_vidpid);
621 620 return (ret);
622 621 }
623 622
624 623 /*
625 624 * Function name : emul64_i_initcap
626 625 *
627 626 * Return Values : NONE
628 627 * Description : Initializes the default target capabilities and
629 628 * Sync Rates.
630 629 *
631 630 * Context : Called from the user thread through attach.
632 631 *
633 632 */
634 633 static void
635 634 emul64_i_initcap(struct emul64 *emul64)
636 635 {
637 636 uint16_t cap, synch;
638 637 int i;
639 638
640 639 cap = 0;
641 640 synch = 0;
642 641 for (i = 0; i < NTARGETS_WIDE; i++) {
643 642 emul64->emul64_cap[i] = cap;
644 643 emul64->emul64_synch[i] = synch;
645 644 }
646 645 EMUL64_DEBUG(emul64, SCSI_DEBUG, "default cap = 0x%x", cap);
647 646 }
648 647
649 648 /*
650 649 * Function name : emul64_scsi_getcap()
651 650 *
652 651 * Return Values : current value of capability, if defined
653 652 * -1 if capability is not defined
654 653 * Description : returns current capability value
655 654 *
656 655 * Context : Can be called from different kernel process threads.
657 656 * Can be called by interrupt thread.
658 657 */
659 658 static int
660 659 emul64_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
661 660 {
662 661 struct emul64 *emul64 = ADDR2EMUL64(ap);
663 662 int rval = 0;
664 663
665 664 /*
666 665 * We don't allow inquiring about capabilities for other targets
667 666 */
668 667 if (cap == NULL || whom == 0) {
669 668 return (-1);
670 669 }
671 670
672 671 EMUL64_MUTEX_ENTER(emul64);
673 672
674 673 switch (scsi_hba_lookup_capstr(cap)) {
675 674 case SCSI_CAP_DMA_MAX:
676 675 rval = 1 << 24; /* Limit to 16MB max transfer */
677 676 break;
678 677 case SCSI_CAP_MSG_OUT:
679 678 rval = 1;
680 679 break;
681 680 case SCSI_CAP_DISCONNECT:
682 681 rval = 1;
683 682 break;
684 683 case SCSI_CAP_SYNCHRONOUS:
685 684 rval = 1;
686 685 break;
687 686 case SCSI_CAP_WIDE_XFER:
688 687 rval = 1;
689 688 break;
690 689 case SCSI_CAP_TAGGED_QING:
691 690 rval = 1;
692 691 break;
693 692 case SCSI_CAP_UNTAGGED_QING:
694 693 rval = 1;
695 694 break;
696 695 case SCSI_CAP_PARITY:
697 696 rval = 1;
698 697 break;
699 698 case SCSI_CAP_INITIATOR_ID:
700 699 rval = emul64->emul64_initiator_id;
701 700 break;
702 701 case SCSI_CAP_ARQ:
703 702 rval = 1;
704 703 break;
705 704 case SCSI_CAP_LINKED_CMDS:
706 705 break;
707 706 case SCSI_CAP_RESET_NOTIFICATION:
708 707 rval = 1;
709 708 break;
710 709
711 710 default:
712 711 rval = -1;
713 712 break;
714 713 }
715 714
716 715 EMUL64_MUTEX_EXIT(emul64);
717 716
718 717 return (rval);
719 718 }
720 719
721 720 /*
722 721 * Function name : emul64_scsi_setcap()
723 722 *
724 723 * Return Values : 1 - capability exists and can be set to new value
725 724 * 0 - capability could not be set to new value
726 725 * -1 - no such capability
727 726 *
728 727 * Description : sets a capability for a target
729 728 *
730 729 * Context : Can be called from different kernel process threads.
731 730 * Can be called by interrupt thread.
732 731 */
733 732 static int
734 733 emul64_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
735 734 {
736 735 struct emul64 *emul64 = ADDR2EMUL64(ap);
737 736 int rval = 0;
738 737
739 738 /*
740 739 * We don't allow setting capabilities for other targets
741 740 */
742 741 if (cap == NULL || whom == 0) {
743 742 return (-1);
744 743 }
745 744
746 745 EMUL64_MUTEX_ENTER(emul64);
747 746
748 747 switch (scsi_hba_lookup_capstr(cap)) {
749 748 case SCSI_CAP_DMA_MAX:
750 749 case SCSI_CAP_MSG_OUT:
751 750 case SCSI_CAP_PARITY:
752 751 case SCSI_CAP_UNTAGGED_QING:
753 752 case SCSI_CAP_LINKED_CMDS:
754 753 case SCSI_CAP_RESET_NOTIFICATION:
755 754 /*
756 755 * None of these are settable via
757 756 * the capability interface.
758 757 */
759 758 break;
760 759 case SCSI_CAP_DISCONNECT:
761 760 rval = 1;
762 761 break;
763 762 case SCSI_CAP_SYNCHRONOUS:
764 763 rval = 1;
765 764 break;
766 765 case SCSI_CAP_TAGGED_QING:
767 766 rval = 1;
768 767 break;
769 768 case SCSI_CAP_WIDE_XFER:
770 769 rval = 1;
771 770 break;
772 771 case SCSI_CAP_INITIATOR_ID:
773 772 rval = -1;
774 773 break;
775 774 case SCSI_CAP_ARQ:
776 775 rval = 1;
777 776 break;
778 777 case SCSI_CAP_TOTAL_SECTORS:
779 778 emul64->nt_total_sectors[ap->a_target][ap->a_lun] = value;
780 779 rval = TRUE;
781 780 break;
782 781 case SCSI_CAP_SECTOR_SIZE:
783 782 rval = TRUE;
784 783 break;
785 784 default:
786 785 rval = -1;
787 786 break;
788 787 }
789 788
790 789
791 790 EMUL64_MUTEX_EXIT(emul64);
792 791
793 792 return (rval);
794 793 }
795 794
796 795 /*
797 796 * Function name : emul64_scsi_init_pkt
798 797 *
799 798 * Return Values : pointer to scsi_pkt, or NULL
800 799 * Description : Called by kernel on behalf of a target driver
801 800 * calling scsi_init_pkt(9F).
802 801 * Refer to tran_init_pkt(9E) man page
803 802 *
804 803 * Context : Can be called from different kernel process threads.
805 804 * Can be called by interrupt thread.
806 805 */
807 806 /* ARGSUSED */
808 807 static struct scsi_pkt *
809 808 emul64_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
810 809 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
811 810 int flags, int (*callback)(), caddr_t arg)
812 811 {
813 812 struct emul64 *emul64 = ADDR2EMUL64(ap);
814 813 struct emul64_cmd *sp;
815 814
816 815 ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
817 816
818 817 /*
819 818 * First step of emul64_scsi_init_pkt: pkt allocation
820 819 */
821 820 if (pkt == NULL) {
822 821 pkt = scsi_hba_pkt_alloc(emul64->emul64_dip, ap, cmdlen,
823 822 statuslen,
824 823 tgtlen, sizeof (struct emul64_cmd), callback, arg);
825 824 if (pkt == NULL) {
826 825 cmn_err(CE_WARN, "emul64_scsi_init_pkt: "
827 826 "scsi_hba_pkt_alloc failed");
828 827 return (NULL);
829 828 }
830 829
831 830 sp = PKT2CMD(pkt);
832 831
833 832 /*
834 833 * Initialize the new pkt - we redundantly initialize
835 834 * all the fields for illustrative purposes.
836 835 */
837 836 sp->cmd_pkt = pkt;
838 837 sp->cmd_flags = 0;
839 838 sp->cmd_scblen = statuslen;
840 839 sp->cmd_cdblen = cmdlen;
841 840 sp->cmd_emul64 = emul64;
842 841 pkt->pkt_address = *ap;
843 842 pkt->pkt_comp = (void (*)())NULL;
844 843 pkt->pkt_flags = 0;
845 844 pkt->pkt_time = 0;
846 845 pkt->pkt_resid = 0;
847 846 pkt->pkt_statistics = 0;
848 847 pkt->pkt_reason = 0;
849 848
850 849 } else {
851 850 sp = PKT2CMD(pkt);
852 851 }
853 852
854 853 /*
855 854 * Second step of emul64_scsi_init_pkt: dma allocation/move
856 855 */
857 856 if (bp && bp->b_bcount != 0) {
858 857 if (bp->b_flags & B_READ) {
859 858 sp->cmd_flags &= ~CFLAG_DMASEND;
860 859 } else {
861 860 sp->cmd_flags |= CFLAG_DMASEND;
862 861 }
863 862 bp_mapin(bp);
864 863 sp->cmd_addr = (unsigned char *) bp->b_un.b_addr;
865 864 sp->cmd_count = bp->b_bcount;
866 865 pkt->pkt_resid = 0;
867 866 }
868 867
869 868 return (pkt);
870 869 }
871 870
872 871
873 872 /*
874 873 * Function name : emul64_scsi_destroy_pkt
875 874 *
876 875 * Return Values : none
877 876 * Description : Called by kernel on behalf of a target driver
878 877 * calling scsi_destroy_pkt(9F).
879 878 * Refer to tran_destroy_pkt(9E) man page
880 879 *
881 880 * Context : Can be called from different kernel process threads.
882 881 * Can be called by interrupt thread.
883 882 */
884 883 static void
885 884 emul64_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
886 885 {
887 886 struct emul64_cmd *sp = PKT2CMD(pkt);
888 887
889 888 /*
890 889 * emul64_scsi_dmafree inline to make things faster
891 890 */
892 891 if (sp->cmd_flags & CFLAG_DMAVALID) {
893 892 /*
894 893 * Free the mapping.
895 894 */
896 895 sp->cmd_flags &= ~CFLAG_DMAVALID;
897 896 }
898 897
899 898 /*
900 899 * Free the pkt
901 900 */
902 901 scsi_hba_pkt_free(ap, pkt);
903 902 }
904 903
905 904
906 905 /*
907 906 * Function name : emul64_scsi_dmafree()
908 907 *
909 908 * Return Values : none
910 909 * Description : free dvma resources
911 910 *
912 911 * Context : Can be called from different kernel process threads.
913 912 * Can be called by interrupt thread.
914 913 */
915 914 /*ARGSUSED*/
916 915 static void
917 916 emul64_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
918 917 {
919 918 }
920 919
921 920 /*
922 921 * Function name : emul64_scsi_sync_pkt()
923 922 *
924 923 * Return Values : none
925 924 * Description : sync dma
926 925 *
927 926 * Context : Can be called from different kernel process threads.
928 927 * Can be called by interrupt thread.
929 928 */
930 929 /*ARGSUSED*/
931 930 static void
932 931 emul64_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
933 932 {
934 933 }
935 934
936 935 /*
937 936 * routine for reset notification setup, to register or cancel.
938 937 */
939 938 static int
940 939 emul64_scsi_reset_notify(struct scsi_address *ap, int flag,
941 940 void (*callback)(caddr_t), caddr_t arg)
942 941 {
943 942 struct emul64 *emul64 = ADDR2EMUL64(ap);
944 943 struct emul64_reset_notify_entry *p, *beforep;
945 944 int rval = DDI_FAILURE;
946 945
947 946 mutex_enter(EMUL64_REQ_MUTEX(emul64));
948 947
949 948 p = emul64->emul64_reset_notify_listf;
950 949 beforep = NULL;
951 950
952 951 while (p) {
953 952 if (p->ap == ap)
954 953 break; /* An entry exists for this target */
955 954 beforep = p;
956 955 p = p->next;
957 956 }
958 957
959 958 if ((flag & SCSI_RESET_CANCEL) && (p != NULL)) {
960 959 if (beforep == NULL) {
961 960 emul64->emul64_reset_notify_listf = p->next;
962 961 } else {
963 962 beforep->next = p->next;
964 963 }
965 964 kmem_free((caddr_t)p,
966 965 sizeof (struct emul64_reset_notify_entry));
967 966 rval = DDI_SUCCESS;
968 967
969 968 } else if ((flag & SCSI_RESET_NOTIFY) && (p == NULL)) {
970 969 p = kmem_zalloc(sizeof (struct emul64_reset_notify_entry),
971 970 KM_SLEEP);
972 971 p->ap = ap;
973 972 p->callback = callback;
974 973 p->arg = arg;
975 974 p->next = emul64->emul64_reset_notify_listf;
976 975 emul64->emul64_reset_notify_listf = p;
977 976 rval = DDI_SUCCESS;
978 977 }
979 978
980 979 mutex_exit(EMUL64_REQ_MUTEX(emul64));
981 980
982 981 return (rval);
983 982 }
984 983
985 984 /*
986 985 * Function name : emul64_scsi_start()
987 986 *
988 987 * Return Values : TRAN_FATAL_ERROR - emul64 has been shutdown
989 988 * TRAN_BUSY - request queue is full
990 989 * TRAN_ACCEPT - pkt has been submitted to emul64
991 990 *
992 991 * Description : init pkt, start the request
993 992 *
994 993 * Context : Can be called from different kernel process threads.
995 994 * Can be called by interrupt thread.
996 995 */
997 996 static int
998 997 emul64_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
999 998 {
1000 999 struct emul64_cmd *sp = PKT2CMD(pkt);
1001 1000 int rval = TRAN_ACCEPT;
1002 1001 struct emul64 *emul64 = ADDR2EMUL64(ap);
1003 1002 clock_t cur_lbolt;
1004 1003 taskqid_t dispatched;
1005 1004
1006 1005 ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic());
1007 1006 ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic());
1008 1007
1009 1008 EMUL64_DEBUG2(emul64, SCSI_DEBUG, "emul64_scsi_start %x", sp);
1010 1009
1011 1010 pkt->pkt_reason = CMD_CMPLT;
1012 1011
1013 1012 #ifdef EMUL64DEBUG
1014 1013 if (emul64_cdb_debug) {
1015 1014 emul64_debug_dump_cdb(ap, pkt);
1016 1015 }
1017 1016 #endif /* EMUL64DEBUG */
1018 1017
1019 1018 /*
1020 1019 * calculate deadline from pkt_time
1021 1020 * Instead of multiplying by 100 (ie. HZ), we multiply by 128 so
1022 1021 * we can shift and at the same time have a 28% grace period
1023 1022 * we ignore the rare case of pkt_time == 0 and deal with it
1024 1023 * in emul64_i_watch()
1025 1024 */
1026 1025 cur_lbolt = ddi_get_lbolt();
1027 1026 sp->cmd_deadline = cur_lbolt + (pkt->pkt_time * 128);
1028 1027
1029 1028 if ((emul64_usetaskq == 0) || (pkt->pkt_flags & FLAG_NOINTR) != 0) {
1030 1029 emul64_pkt_comp((caddr_t)pkt);
1031 1030 } else {
1032 1031 dispatched = NULL;
1033 1032 if (emul64_collect_stats) {
1034 1033 /*
1035 1034 * If we are collecting statistics, call
1036 1035 * taskq_dispatch in no sleep mode, so that we can
1037 1036 * detect if we are exceeding the queue length that
1038 1037 * was established in the call to taskq_create in
1039 1038 * emul64_attach. If the no sleep call fails
1040 1039 * (returns NULL), the task will be dispatched in
1041 1040 * sleep mode below.
1042 1041 */
1043 1042 dispatched = taskq_dispatch(emul64->emul64_taskq,
1044 1043 emul64_pkt_comp, (void *)pkt, TQ_NOSLEEP);
1045 1044 if (dispatched == NULL) {
1046 1045 /* Queue was full. dispatch failed. */
1047 1046 mutex_enter(&emul64_stats_mutex);
1048 1047 emul64_taskq_max++;
1049 1048 mutex_exit(&emul64_stats_mutex);
1050 1049 }
1051 1050 }
1052 1051 if (dispatched == NULL) {
1053 1052 (void) taskq_dispatch(emul64->emul64_taskq,
1054 1053 emul64_pkt_comp, (void *)pkt, TQ_SLEEP);
1055 1054 }
1056 1055 }
1057 1056
1058 1057 done:
1059 1058 ASSERT(mutex_owned(EMUL64_REQ_MUTEX(emul64)) == 0 || ddi_in_panic());
1060 1059 ASSERT(mutex_owned(EMUL64_RESP_MUTEX(emul64)) == 0 || ddi_in_panic());
1061 1060
1062 1061 return (rval);
1063 1062 }
1064 1063
1065 1064 void
1066 1065 emul64_check_cond(struct scsi_pkt *pkt, uchar_t key, uchar_t asc, uchar_t ascq)
1067 1066 {
1068 1067 struct scsi_arq_status *arq =
1069 1068 (struct scsi_arq_status *)pkt->pkt_scbp;
1070 1069
1071 1070 /* got check, no data transferred and ARQ done */
1072 1071 arq->sts_status.sts_chk = 1;
1073 1072 pkt->pkt_state |= STATE_ARQ_DONE;
1074 1073 pkt->pkt_state &= ~STATE_XFERRED_DATA;
1075 1074
1076 1075 /* for ARQ */
1077 1076 arq->sts_rqpkt_reason = CMD_CMPLT;
1078 1077 arq->sts_rqpkt_resid = 0;
1079 1078 arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
1080 1079 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
1081 1080 arq->sts_sensedata.es_valid = 1;
1082 1081 arq->sts_sensedata.es_class = 0x7;
1083 1082 arq->sts_sensedata.es_key = key;
1084 1083 arq->sts_sensedata.es_add_code = asc;
1085 1084 arq->sts_sensedata.es_qual_code = ascq;
1086 1085 }
1087 1086
1088 1087 ushort_t
1089 1088 emul64_error_inject(struct scsi_pkt *pkt)
1090 1089 {
1091 1090 struct emul64_cmd *sp = PKT2CMD(pkt);
1092 1091 emul64_tgt_t *tgt;
1093 1092 struct scsi_arq_status *arq =
1094 1093 (struct scsi_arq_status *)pkt->pkt_scbp;
1095 1094 uint_t max_sense_len;
1096 1095
1097 1096 EMUL64_MUTEX_ENTER(sp->cmd_emul64);
1098 1097 tgt = find_tgt(sp->cmd_emul64,
1099 1098 pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
1100 1099 EMUL64_MUTEX_EXIT(sp->cmd_emul64);
1101 1100
1102 1101 /*
1103 1102 * If there is no target, skip the error injection and
1104 1103 * let the packet be handled normally. This would normally
1105 1104 * never happen since a_target and a_lun are setup in
1106 1105 * emul64_scsi_init_pkt.
1107 1106 */
1108 1107 if (tgt == NULL) {
1109 1108 return (ERR_INJ_DISABLE);
1110 1109 }
1111 1110
1112 1111 if (tgt->emul64_einj_state != ERR_INJ_DISABLE) {
1113 1112 arq->sts_status = tgt->emul64_einj_scsi_status;
1114 1113 pkt->pkt_state = tgt->emul64_einj_pkt_state;
1115 1114 pkt->pkt_reason = tgt->emul64_einj_pkt_reason;
1116 1115
1117 1116 /*
1118 1117 * Calculate available sense buffer length. We could just
1119 1118 * assume sizeof(struct scsi_extended_sense) but hopefully
1120 1119 * that limitation will go away soon.
1121 1120 */
1122 1121 max_sense_len = sp->cmd_scblen -
1123 1122 (sizeof (struct scsi_arq_status) -
1124 1123 sizeof (struct scsi_extended_sense));
1125 1124 if (max_sense_len > tgt->emul64_einj_sense_length) {
1126 1125 max_sense_len = tgt->emul64_einj_sense_length;
1127 1126 }
1128 1127
1129 1128 /* for ARQ */
1130 1129 arq->sts_rqpkt_reason = CMD_CMPLT;
1131 1130 arq->sts_rqpkt_resid = 0;
1132 1131 arq->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
1133 1132 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
1134 1133
1135 1134 /* Copy sense data */
1136 1135 if (tgt->emul64_einj_sense_data != 0) {
1137 1136 bcopy(tgt->emul64_einj_sense_data,
1138 1137 (uint8_t *)&arq->sts_sensedata,
1139 1138 max_sense_len);
1140 1139 }
1141 1140 }
1142 1141
1143 1142 /* Return current error injection state */
1144 1143 return (tgt->emul64_einj_state);
1145 1144 }
1146 1145
1147 1146 int
1148 1147 emul64_error_inject_req(struct emul64 *emul64, intptr_t arg)
1149 1148 {
1150 1149 emul64_tgt_t *tgt;
1151 1150 struct emul64_error_inj_data error_inj_req;
1152 1151
1153 1152 /* Check args */
1154 1153 if (arg == NULL) {
1155 1154 return (EINVAL);
1156 1155 }
1157 1156
1158 1157 if (ddi_copyin((void *)arg, &error_inj_req,
1159 1158 sizeof (error_inj_req), 0) != 0) {
1160 1159 cmn_err(CE_WARN, "emul64: ioctl - inj copyin failed\n");
1161 1160 return (EFAULT);
1162 1161 }
1163 1162
1164 1163 EMUL64_MUTEX_ENTER(emul64);
1165 1164 tgt = find_tgt(emul64, error_inj_req.eccd_target,
1166 1165 error_inj_req.eccd_lun);
1167 1166 EMUL64_MUTEX_EXIT(emul64);
1168 1167
1169 1168 /* Make sure device exists */
1170 1169 if (tgt == NULL) {
1171 1170 return (ENODEV);
1172 1171 }
1173 1172
1174 1173 /* Free old sense buffer if we have one */
1175 1174 if (tgt->emul64_einj_sense_data != NULL) {
1176 1175 ASSERT(tgt->emul64_einj_sense_length != 0);
1177 1176 kmem_free(tgt->emul64_einj_sense_data,
1178 1177 tgt->emul64_einj_sense_length);
1179 1178 tgt->emul64_einj_sense_data = NULL;
1180 1179 tgt->emul64_einj_sense_length = 0;
1181 1180 }
1182 1181
1183 1182 /*
1184 1183 * Now handle error injection request. If error injection
1185 1184 * is requested we will return the sense data provided for
1186 1185 * any I/O to this target until told to stop.
1187 1186 */
1188 1187 tgt->emul64_einj_state = error_inj_req.eccd_inj_state;
1189 1188 tgt->emul64_einj_sense_length = error_inj_req.eccd_sns_dlen;
1190 1189 tgt->emul64_einj_pkt_state = error_inj_req.eccd_pkt_state;
1191 1190 tgt->emul64_einj_pkt_reason = error_inj_req.eccd_pkt_reason;
1192 1191 tgt->emul64_einj_scsi_status = error_inj_req.eccd_scsi_status;
1193 1192 switch (error_inj_req.eccd_inj_state) {
1194 1193 case ERR_INJ_ENABLE:
1195 1194 case ERR_INJ_ENABLE_NODATA:
1196 1195 if (error_inj_req.eccd_sns_dlen) {
1197 1196 tgt->emul64_einj_sense_data =
1198 1197 kmem_alloc(error_inj_req.eccd_sns_dlen, KM_SLEEP);
1199 1198 /* Copy sense data */
1200 1199 if (ddi_copyin((void *)(arg + sizeof (error_inj_req)),
1201 1200 tgt->emul64_einj_sense_data,
1202 1201 error_inj_req.eccd_sns_dlen, 0) != 0) {
1203 1202 cmn_err(CE_WARN,
1204 1203 "emul64: sense data copy in failed\n");
1205 1204 return (EFAULT);
1206 1205 }
1207 1206 }
1208 1207 break;
1209 1208 case ERR_INJ_DISABLE:
1210 1209 default:
1211 1210 break;
1212 1211 }
1213 1212
1214 1213 return (0);
1215 1214 }
1216 1215
1217 1216 int bsd_scsi_start_stop_unit(struct scsi_pkt *);
1218 1217 int bsd_scsi_test_unit_ready(struct scsi_pkt *);
1219 1218 int bsd_scsi_request_sense(struct scsi_pkt *);
1220 1219 int bsd_scsi_inquiry(struct scsi_pkt *);
1221 1220 int bsd_scsi_format(struct scsi_pkt *);
1222 1221 int bsd_scsi_io(struct scsi_pkt *);
1223 1222 int bsd_scsi_log_sense(struct scsi_pkt *);
1224 1223 int bsd_scsi_mode_sense(struct scsi_pkt *);
1225 1224 int bsd_scsi_mode_select(struct scsi_pkt *);
1226 1225 int bsd_scsi_read_capacity(struct scsi_pkt *);
1227 1226 int bsd_scsi_read_capacity_16(struct scsi_pkt *);
1228 1227 int bsd_scsi_reserve(struct scsi_pkt *);
1229 1228 int bsd_scsi_format(struct scsi_pkt *);
1230 1229 int bsd_scsi_release(struct scsi_pkt *);
1231 1230 int bsd_scsi_read_defect_list(struct scsi_pkt *);
1232 1231 int bsd_scsi_reassign_block(struct scsi_pkt *);
1233 1232 int bsd_freeblkrange(emul64_tgt_t *, emul64_range_t *);
1234 1233
1235 1234 static void
1236 1235 emul64_handle_cmd(struct scsi_pkt *pkt)
1237 1236 {
1238 1237 if (emul64_error_inject(pkt) == ERR_INJ_ENABLE_NODATA) {
1239 1238 /*
1240 1239 * If error injection is configured to return with
1241 1240 * no data return now without handling the command.
1242 1241 * This is how normal check conditions work.
1243 1242 *
1244 1243 * If the error injection state is ERR_INJ_ENABLE
1245 1244 * (or if error injection is disabled) continue and
1246 1245 * handle the command. This would be used for
1247 1246 * KEY_RECOVERABLE_ERROR type conditions.
1248 1247 */
1249 1248 return;
1250 1249 }
1251 1250
1252 1251 switch (pkt->pkt_cdbp[0]) {
1253 1252 case SCMD_START_STOP:
1254 1253 (void) bsd_scsi_start_stop_unit(pkt);
1255 1254 break;
1256 1255 case SCMD_TEST_UNIT_READY:
1257 1256 (void) bsd_scsi_test_unit_ready(pkt);
1258 1257 break;
1259 1258 case SCMD_REQUEST_SENSE:
1260 1259 (void) bsd_scsi_request_sense(pkt);
1261 1260 break;
1262 1261 case SCMD_INQUIRY:
1263 1262 (void) bsd_scsi_inquiry(pkt);
1264 1263 break;
1265 1264 case SCMD_FORMAT:
1266 1265 (void) bsd_scsi_format(pkt);
1267 1266 break;
1268 1267 case SCMD_READ:
1269 1268 case SCMD_WRITE:
1270 1269 case SCMD_READ_G1:
1271 1270 case SCMD_WRITE_G1:
1272 1271 case SCMD_READ_G4:
1273 1272 case SCMD_WRITE_G4:
1274 1273 (void) bsd_scsi_io(pkt);
1275 1274 break;
1276 1275 case SCMD_LOG_SENSE_G1:
1277 1276 (void) bsd_scsi_log_sense(pkt);
1278 1277 break;
1279 1278 case SCMD_MODE_SENSE:
1280 1279 case SCMD_MODE_SENSE_G1:
1281 1280 (void) bsd_scsi_mode_sense(pkt);
1282 1281 break;
1283 1282 case SCMD_MODE_SELECT:
1284 1283 case SCMD_MODE_SELECT_G1:
1285 1284 (void) bsd_scsi_mode_select(pkt);
1286 1285 break;
1287 1286 case SCMD_READ_CAPACITY:
1288 1287 (void) bsd_scsi_read_capacity(pkt);
1289 1288 break;
1290 1289 case SCMD_SVC_ACTION_IN_G4:
1291 1290 if (pkt->pkt_cdbp[1] == SSVC_ACTION_READ_CAPACITY_G4) {
1292 1291 (void) bsd_scsi_read_capacity_16(pkt);
1293 1292 } else {
1294 1293 cmn_err(CE_WARN, "emul64: unrecognized G4 service "
1295 1294 "action 0x%x", pkt->pkt_cdbp[1]);
1296 1295 }
1297 1296 break;
1298 1297 case SCMD_RESERVE:
1299 1298 case SCMD_RESERVE_G1:
1300 1299 (void) bsd_scsi_reserve(pkt);
1301 1300 break;
1302 1301 case SCMD_RELEASE:
1303 1302 case SCMD_RELEASE_G1:
1304 1303 (void) bsd_scsi_release(pkt);
1305 1304 break;
1306 1305 case SCMD_REASSIGN_BLOCK:
1307 1306 (void) bsd_scsi_reassign_block(pkt);
1308 1307 break;
1309 1308 case SCMD_READ_DEFECT_LIST:
1310 1309 (void) bsd_scsi_read_defect_list(pkt);
1311 1310 break;
1312 1311 case SCMD_PRIN:
1313 1312 case SCMD_PROUT:
1314 1313 case SCMD_REPORT_LUNS:
1315 1314 /* ASC 0x24 INVALID FIELD IN CDB */
1316 1315 emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0);
1317 1316 break;
1318 1317 default:
1319 1318 cmn_err(CE_WARN, "emul64: unrecognized "
1320 1319 "SCSI cmd 0x%x", pkt->pkt_cdbp[0]);
1321 1320 emul64_check_cond(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x0);
1322 1321 break;
1323 1322 case SCMD_GET_CONFIGURATION:
1324 1323 case 0x35: /* SCMD_SYNCHRONIZE_CACHE */
1325 1324 /* Don't complain */
1326 1325 break;
1327 1326 }
1328 1327 }
1329 1328
1330 1329 static void
1331 1330 emul64_pkt_comp(void * arg)
1332 1331 {
1333 1332 struct scsi_pkt *pkt = (struct scsi_pkt *)arg;
1334 1333 struct emul64_cmd *sp = PKT2CMD(pkt);
1335 1334 emul64_tgt_t *tgt;
1336 1335
1337 1336 EMUL64_MUTEX_ENTER(sp->cmd_emul64);
1338 1337 tgt = find_tgt(sp->cmd_emul64,
1339 1338 pkt->pkt_address.a_target, pkt->pkt_address.a_lun);
1340 1339 EMUL64_MUTEX_EXIT(sp->cmd_emul64);
1341 1340 if (!tgt) {
1342 1341 pkt->pkt_reason = CMD_TIMEOUT;
1343 1342 pkt->pkt_state = STATE_GOT_BUS | STATE_SENT_CMD;
1344 1343 pkt->pkt_statistics = STAT_TIMEOUT;
1345 1344 } else {
1346 1345 pkt->pkt_reason = CMD_CMPLT;
1347 1346 *pkt->pkt_scbp = STATUS_GOOD;
1348 1347 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
1349 1348 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
1350 1349 pkt->pkt_statistics = 0;
1351 1350 emul64_handle_cmd(pkt);
1352 1351 }
1353 1352 scsi_hba_pkt_comp(pkt);
1354 1353 }
1355 1354
1356 1355 /* ARGSUSED */
1357 1356 static int
1358 1357 emul64_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1359 1358 {
1360 1359 return (1);
1361 1360 }
1362 1361
1363 1362 /* ARGSUSED */
1364 1363 static int
1365 1364 emul64_scsi_reset(struct scsi_address *ap, int level)
1366 1365 {
1367 1366 return (1);
1368 1367 }
1369 1368
1370 1369 static int
1371 1370 emul64_get_tgtrange(struct emul64 *emul64,
1372 1371 intptr_t arg,
1373 1372 emul64_tgt_t **tgtp,
1374 1373 emul64_tgt_range_t *tgtr)
1375 1374 {
1376 1375 if (ddi_copyin((void *)arg, tgtr, sizeof (*tgtr), 0) != 0) {
1377 1376 cmn_err(CE_WARN, "emul64: ioctl - copy in failed\n");
1378 1377 return (EFAULT);
1379 1378 }
1380 1379 EMUL64_MUTEX_ENTER(emul64);
1381 1380 *tgtp = find_tgt(emul64, tgtr->emul64_target, tgtr->emul64_lun);
1382 1381 EMUL64_MUTEX_EXIT(emul64);
1383 1382 if (*tgtp == NULL) {
1384 1383 cmn_err(CE_WARN, "emul64: ioctl - no target for %d,%d on %d",
1385 1384 tgtr->emul64_target, tgtr->emul64_lun,
1386 1385 ddi_get_instance(emul64->emul64_dip));
1387 1386 return (ENXIO);
1388 1387 }
1389 1388 return (0);
1390 1389 }
1391 1390
1392 1391 static int
1393 1392 emul64_ioctl(dev_t dev,
1394 1393 int cmd,
1395 1394 intptr_t arg,
1396 1395 int mode,
1397 1396 cred_t *credp,
1398 1397 int *rvalp)
1399 1398 {
1400 1399 struct emul64 *emul64;
1401 1400 int instance;
1402 1401 int rv = 0;
1403 1402 emul64_tgt_range_t tgtr;
1404 1403 emul64_tgt_t *tgt;
1405 1404
1406 1405 instance = MINOR2INST(getminor(dev));
1407 1406 emul64 = (struct emul64 *)ddi_get_soft_state(emul64_state, instance);
1408 1407 if (emul64 == NULL) {
1409 1408 cmn_err(CE_WARN, "emul64: ioctl - no softstate for %d\n",
1410 1409 getminor(dev));
1411 1410 return (ENXIO);
1412 1411 }
1413 1412
1414 1413 switch (cmd) {
1415 1414 case EMUL64_WRITE_OFF:
1416 1415 rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr);
1417 1416 if (rv == 0) {
1418 1417 rv = emul64_write_off(emul64, tgt, &tgtr);
1419 1418 }
1420 1419 break;
1421 1420 case EMUL64_WRITE_ON:
1422 1421 rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr);
1423 1422 if (rv == 0) {
1424 1423 rv = emul64_write_on(emul64, tgt, &tgtr);
1425 1424 }
1426 1425 break;
1427 1426 case EMUL64_ZERO_RANGE:
1428 1427 rv = emul64_get_tgtrange(emul64, arg, &tgt, &tgtr);
1429 1428 if (rv == 0) {
1430 1429 mutex_enter(&tgt->emul64_tgt_blk_lock);
1431 1430 rv = bsd_freeblkrange(tgt, &tgtr.emul64_blkrange);
1432 1431 mutex_exit(&tgt->emul64_tgt_blk_lock);
1433 1432 }
1434 1433 break;
1435 1434 case EMUL64_ERROR_INJECT:
1436 1435 rv = emul64_error_inject_req(emul64, arg);
1437 1436 break;
1438 1437 default:
1439 1438 rv = scsi_hba_ioctl(dev, cmd, arg, mode, credp, rvalp);
1440 1439 break;
1441 1440 }
1442 1441 return (rv);
1443 1442 }
1444 1443
1445 1444 /* ARGSUSED */
1446 1445 static int
1447 1446 emul64_write_off(struct emul64 *emul64,
1448 1447 emul64_tgt_t *tgt,
1449 1448 emul64_tgt_range_t *tgtr)
1450 1449 {
1451 1450 size_t blkcnt = tgtr->emul64_blkrange.emul64_blkcnt;
1452 1451 emul64_nowrite_t *cur;
1453 1452 emul64_nowrite_t *nowrite;
1454 1453 emul64_rng_overlap_t overlap = O_NONE;
1455 1454 emul64_nowrite_t **prev = NULL;
1456 1455 diskaddr_t sb = tgtr->emul64_blkrange.emul64_sb;
1457 1456
1458 1457 nowrite = emul64_nowrite_alloc(&tgtr->emul64_blkrange);
1459 1458
1460 1459 /* Find spot in list */
1461 1460 rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER);
1462 1461 cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev);
1463 1462 if (overlap == O_NONE) {
1464 1463 /* Insert into list */
1465 1464 *prev = nowrite;
1466 1465 nowrite->emul64_nwnext = cur;
1467 1466 }
1468 1467 rw_exit(&tgt->emul64_tgt_nw_lock);
1469 1468 if (overlap == O_NONE) {
1470 1469 if (emul64_collect_stats) {
1471 1470 mutex_enter(&emul64_stats_mutex);
1472 1471 emul64_nowrite_count++;
1473 1472 mutex_exit(&emul64_stats_mutex);
1474 1473 }
1475 1474 } else {
1476 1475 cmn_err(CE_WARN, "emul64: EMUL64_WRITE_OFF 0x%llx,0x%"
1477 1476 PRIx64 "overlaps 0x%llx,0x%" PRIx64 "\n",
1478 1477 nowrite->emul64_blocked.emul64_sb,
1479 1478 nowrite->emul64_blocked.emul64_blkcnt,
1480 1479 cur->emul64_blocked.emul64_sb,
1481 1480 cur->emul64_blocked.emul64_blkcnt);
1482 1481 emul64_nowrite_free(nowrite);
1483 1482 return (EINVAL);
1484 1483 }
1485 1484 return (0);
1486 1485 }
1487 1486
1488 1487 /* ARGSUSED */
1489 1488 static int
1490 1489 emul64_write_on(struct emul64 *emul64,
1491 1490 emul64_tgt_t *tgt,
1492 1491 emul64_tgt_range_t *tgtr)
1493 1492 {
1494 1493 size_t blkcnt = tgtr->emul64_blkrange.emul64_blkcnt;
1495 1494 emul64_nowrite_t *cur;
1496 1495 emul64_rng_overlap_t overlap = O_NONE;
1497 1496 emul64_nowrite_t **prev = NULL;
1498 1497 int rv = 0;
1499 1498 diskaddr_t sb = tgtr->emul64_blkrange.emul64_sb;
1500 1499
1501 1500 /* Find spot in list */
1502 1501 rw_enter(&tgt->emul64_tgt_nw_lock, RW_WRITER);
1503 1502 cur = emul64_find_nowrite(tgt, sb, blkcnt, &overlap, &prev);
1504 1503 if (overlap == O_SAME) {
1505 1504 /* Remove from list */
1506 1505 *prev = cur->emul64_nwnext;
1507 1506 }
1508 1507 rw_exit(&tgt->emul64_tgt_nw_lock);
1509 1508
1510 1509 switch (overlap) {
1511 1510 case O_NONE:
1512 1511 cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx "
1513 1512 "range not found\n", sb, blkcnt);
1514 1513 rv = ENXIO;
1515 1514 break;
1516 1515 case O_SAME:
1517 1516 if (emul64_collect_stats) {
1518 1517 mutex_enter(&emul64_stats_mutex);
1519 1518 emul64_nowrite_count--;
1520 1519 mutex_exit(&emul64_stats_mutex);
1521 1520 }
1522 1521 emul64_nowrite_free(cur);
1523 1522 break;
1524 1523 case O_OVERLAP:
1525 1524 case O_SUBSET:
1526 1525 cmn_err(CE_WARN, "emul64: EMUL64_WRITE_ON 0x%llx,0x%lx "
1527 1526 "overlaps 0x%llx,0x%" PRIx64 "\n",
1528 1527 sb, blkcnt, cur->emul64_blocked.emul64_sb,
1529 1528 cur->emul64_blocked.emul64_blkcnt);
1530 1529 rv = EINVAL;
1531 1530 break;
1532 1531 }
1533 1532 return (rv);
1534 1533 }
1535 1534
1536 1535 static emul64_nowrite_t *
1537 1536 emul64_find_nowrite(emul64_tgt_t *tgt,
1538 1537 diskaddr_t sb,
1539 1538 size_t blkcnt,
1540 1539 emul64_rng_overlap_t *overlap,
1541 1540 emul64_nowrite_t ***prevp)
1542 1541 {
1543 1542 emul64_nowrite_t *cur;
1544 1543 emul64_nowrite_t **prev;
1545 1544
1546 1545 /* Find spot in list */
1547 1546 *overlap = O_NONE;
1548 1547 prev = &tgt->emul64_tgt_nowrite;
1549 1548 cur = tgt->emul64_tgt_nowrite;
1550 1549 while (cur != NULL) {
1551 1550 *overlap = emul64_overlap(&cur->emul64_blocked, sb, blkcnt);
1552 1551 if (*overlap != O_NONE)
1553 1552 break;
1554 1553 prev = &cur->emul64_nwnext;
1555 1554 cur = cur->emul64_nwnext;
1556 1555 }
1557 1556
1558 1557 *prevp = prev;
1559 1558 return (cur);
1560 1559 }
1561 1560
1562 1561 static emul64_nowrite_t *
1563 1562 emul64_nowrite_alloc(emul64_range_t *range)
1564 1563 {
1565 1564 emul64_nowrite_t *nw;
1566 1565
1567 1566 nw = kmem_zalloc(sizeof (*nw), KM_SLEEP);
1568 1567 bcopy((void *) range,
1569 1568 (void *) &nw->emul64_blocked,
1570 1569 sizeof (nw->emul64_blocked));
1571 1570 return (nw);
1572 1571 }
1573 1572
1574 1573 static void
1575 1574 emul64_nowrite_free(emul64_nowrite_t *nw)
1576 1575 {
1577 1576 kmem_free((void *) nw, sizeof (*nw));
1578 1577 }
1579 1578
1580 1579 emul64_rng_overlap_t
1581 1580 emul64_overlap(emul64_range_t *rng, diskaddr_t sb, size_t cnt)
1582 1581 {
1583 1582
1584 1583 if (rng->emul64_sb >= sb + cnt)
1585 1584 return (O_NONE);
1586 1585 if (rng->emul64_sb + rng->emul64_blkcnt <= sb)
1587 1586 return (O_NONE);
1588 1587 if ((rng->emul64_sb == sb) && (rng->emul64_blkcnt == cnt))
1589 1588 return (O_SAME);
1590 1589 if ((sb >= rng->emul64_sb) &&
1591 1590 ((sb + cnt) <= (rng->emul64_sb + rng->emul64_blkcnt))) {
1592 1591 return (O_SUBSET);
1593 1592 }
1594 1593 return (O_OVERLAP);
1595 1594 }
1596 1595
1597 1596 #include <sys/varargs.h>
1598 1597
1599 1598 /*
1600 1599 * Error logging, printing, and debug print routines
1601 1600 */
1602 1601
1603 1602 /*VARARGS3*/
1604 1603 static void
1605 1604 emul64_i_log(struct emul64 *emul64, int level, char *fmt, ...)
1606 1605 {
1607 1606 char buf[256];
1608 1607 va_list ap;
1609 1608
1610 1609 va_start(ap, fmt);
1611 1610 (void) vsnprintf(buf, sizeof (buf), fmt, ap);
1612 1611 va_end(ap);
1613 1612
1614 1613 scsi_log(emul64 ? emul64->emul64_dip : NULL,
1615 1614 "emul64", level, "%s\n", buf);
1616 1615 }
1617 1616
1618 1617
1619 1618 #ifdef EMUL64DEBUG
1620 1619
1621 1620 static void
1622 1621 emul64_debug_dump_cdb(struct scsi_address *ap, struct scsi_pkt *pkt)
1623 1622 {
1624 1623 static char hex[] = "0123456789abcdef";
1625 1624 struct emul64 *emul64 = ADDR2EMUL64(ap);
1626 1625 struct emul64_cmd *sp = PKT2CMD(pkt);
1627 1626 uint8_t *cdb = pkt->pkt_cdbp;
1628 1627 char buf [256];
1629 1628 char *p;
1630 1629 int i;
1631 1630
1632 1631 (void) snprintf(buf, sizeof (buf), "emul64%d: <%d,%d> ",
1633 1632 ddi_get_instance(emul64->emul64_dip),
1634 1633 ap->a_target, ap->a_lun);
1635 1634
1636 1635 p = buf + strlen(buf);
1637 1636
1638 1637 *p++ = '[';
1639 1638 for (i = 0; i < sp->cmd_cdblen; i++, cdb++) {
1640 1639 if (i != 0)
1641 1640 *p++ = ' ';
1642 1641 *p++ = hex[(*cdb >> 4) & 0x0f];
1643 1642 *p++ = hex[*cdb & 0x0f];
1644 1643 }
1645 1644 *p++ = ']';
1646 1645 *p++ = '\n';
1647 1646 *p = 0;
1648 1647
1649 1648 cmn_err(CE_CONT, buf);
1650 1649 }
1651 1650 #endif /* EMUL64DEBUG */
↓ open down ↓ |
1424 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX