Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/io/dktp/dcdev/dadk.c
+++ new/usr/src/uts/intel/io/dktp/dcdev/dadk.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * Direct Attached Disk
28 28 */
29 29
30 30 #include <sys/file.h>
31 31 #include <sys/scsi/scsi.h>
32 32 #include <sys/var.h>
33 33 #include <sys/proc.h>
34 34 #include <sys/dktp/cm.h>
35 35 #include <sys/vtoc.h>
36 36 #include <sys/dkio.h>
37 37 #include <sys/policy.h>
38 38 #include <sys/priv.h>
39 39
40 40 #include <sys/dktp/dadev.h>
41 41 #include <sys/dktp/fctypes.h>
42 42 #include <sys/dktp/flowctrl.h>
43 43 #include <sys/dktp/tgcom.h>
44 44 #include <sys/dktp/tgdk.h>
45 45 #include <sys/dktp/bbh.h>
46 46 #include <sys/dktp/dadkio.h>
47 47 #include <sys/dktp/dadk.h>
48 48 #include <sys/cdio.h>
49 49
50 50 /*
51 51 * Local Function Prototypes
52 52 */
53 53 static void dadk_restart(void *pktp);
54 54 static void dadk_pktcb(struct cmpkt *pktp);
55 55 static void dadk_iodone(struct buf *bp);
56 56 static void dadk_polldone(struct buf *bp);
57 57 static void dadk_setcap(struct dadk *dadkp);
58 58 static void dadk_create_errstats(struct dadk *dadkp, int instance);
59 59 static void dadk_destroy_errstats(struct dadk *dadkp);
60 60
61 61 static int dadk_chkerr(struct cmpkt *pktp);
62 62 static int dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp);
63 63 static int dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp);
64 64 static int dadk_ioretry(struct cmpkt *pktp, int action);
65 65
66 66 static struct cmpkt *dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp,
67 67 struct buf *bp, void (*cb_func)(struct buf *), int (*func)(caddr_t),
68 68 caddr_t arg);
69 69
↓ open down ↓ |
69 lines elided |
↑ open up ↑ |
70 70 static int dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t),
71 71 caddr_t arg);
72 72 static void dadk_transport(opaque_t com_data, struct buf *bp);
73 73 static int dadk_ctl_ioctl(struct dadk *, uint32_t, uintptr_t, int);
74 74
75 75 struct tgcom_objops dadk_com_ops = {
76 76 nodev,
77 77 nodev,
78 78 dadk_pkt,
79 79 dadk_transport,
80 - 0, 0
80 + { NULL, NULL }
81 81 };
82 82
83 83 /*
84 84 * architecture dependent allocation restrictions for dadk_iob_alloc(). For
85 85 * x86, we'll set dma_attr_addr_hi to dadk_max_phys_addr and dma_attr_sgllen
86 86 * to dadk_sgl_size during _init().
87 87 */
88 88 #if defined(__sparc)
89 89 static ddi_dma_attr_t dadk_alloc_attr = {
90 90 DMA_ATTR_V0, /* version number */
91 91 0x0, /* lowest usable address */
92 92 0xFFFFFFFFull, /* high DMA address range */
93 93 0xFFFFFFFFull, /* DMA counter register */
94 94 1, /* DMA address alignment */
95 95 1, /* DMA burstsizes */
96 96 1, /* min effective DMA size */
97 97 0xFFFFFFFFull, /* max DMA xfer size */
98 98 0xFFFFFFFFull, /* segment boundary */
99 99 1, /* s/g list length */
100 100 512, /* granularity of device */
101 101 0, /* DMA transfer flags */
102 102 };
103 103 #elif defined(__x86)
104 104 static ddi_dma_attr_t dadk_alloc_attr = {
105 105 DMA_ATTR_V0, /* version number */
106 106 0x0, /* lowest usable address */
107 107 0x0, /* high DMA address range [set in _init()] */
108 108 0xFFFFull, /* DMA counter register */
109 109 512, /* DMA address alignment */
110 110 1, /* DMA burstsizes */
111 111 1, /* min effective DMA size */
112 112 0xFFFFFFFFull, /* max DMA xfer size */
113 113 0xFFFFFFFFull, /* segment boundary */
114 114 0, /* s/g list length [set in _init()] */
115 115 512, /* granularity of device */
116 116 0, /* DMA transfer flags */
117 117 };
118 118
119 119 uint64_t dadk_max_phys_addr = 0xFFFFFFFFull;
120 120 int dadk_sgl_size = 0xFF;
121 121 #endif
122 122
123 123 static int dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags,
124 124 int silent);
125 125 static void dadk_rmb_iodone(struct buf *bp);
126 126
127 127 static int dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp,
128 128 dev_t dev, enum uio_seg dataspace, int rw);
129 129 static void dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *scmdp,
130 130 struct buf *bp);
131 131 static void dadkmin(struct buf *bp);
132 132 static int dadk_dk_strategy(struct buf *bp);
133 133 static void dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp);
134 134
135 135 struct tgdk_objops dadk_ops = {
136 136 dadk_init,
137 137 dadk_free,
138 138 dadk_probe,
139 139 dadk_attach,
140 140 dadk_open,
141 141 dadk_close,
142 142 dadk_ioctl,
143 143 dadk_strategy,
144 144 dadk_setgeom,
145 145 dadk_getgeom,
↓ open down ↓ |
55 lines elided |
↑ open up ↑ |
146 146 dadk_iob_alloc,
147 147 dadk_iob_free,
148 148 dadk_iob_htoc,
149 149 dadk_iob_xfer,
150 150 dadk_dump,
151 151 dadk_getphygeom,
152 152 dadk_set_bbhobj,
153 153 dadk_check_media,
154 154 dadk_inquiry,
155 155 dadk_cleanup,
156 - 0
156 + { NULL }
157 157 };
158 158
159 159 /*
160 160 * Local static data
161 161 */
162 162
163 163 #ifdef DADK_DEBUG
164 164 #define DENT 0x0001
165 165 #define DERR 0x0002
166 166 #define DIO 0x0004
167 167 #define DGEOM 0x0010
168 168 #define DSTATE 0x0020
169 169 static int dadk_debug = DGEOM;
170 170
171 171 #endif /* DADK_DEBUG */
172 172
173 173 static int dadk_check_media_time = 3000000; /* 3 Second State Check */
174 174 static int dadk_dk_maxphys = 0x80000;
175 175
176 176 static char *dadk_cmds[] = {
177 177 "\000Unknown", /* unknown */
178 178 "\001read sector", /* DCMD_READ 1 */
179 179 "\002write sector", /* DCMD_WRITE 2 */
180 180 "\003format track", /* DCMD_FMTTRK 3 */
181 181 "\004format whole drive", /* DCMD_FMTDRV 4 */
182 182 "\005recalibrate", /* DCMD_RECAL 5 */
183 183 "\006seek sector", /* DCMD_SEEK 6 */
184 184 "\007read verify", /* DCMD_RDVER 7 */
185 185 "\010read defect list", /* DCMD_GETDEF 8 */
186 186 "\011lock door", /* DCMD_LOCK 9 */
187 187 "\012unlock door", /* DCMD_UNLOCK 10 */
188 188 "\013start motor", /* DCMD_START_MOTOR 11 */
189 189 "\014stop motor", /* DCMD_STOP_MOTOR 12 */
190 190 "\015eject", /* DCMD_EJECT 13 */
191 191 "\016update geometry", /* DCMD_UPDATE_GEOM 14 */
192 192 "\017get state", /* DCMD_GET_STATE 15 */
193 193 "\020cdrom pause", /* DCMD_PAUSE 16 */
194 194 "\021cdrom resume", /* DCMD_RESUME 17 */
195 195 "\022cdrom play track index", /* DCMD_PLAYTRKIND 18 */
196 196 "\023cdrom play msf", /* DCMD_PLAYMSF 19 */
197 197 "\024cdrom sub channel", /* DCMD_SUBCHNL 20 */
198 198 "\025cdrom read mode 1", /* DCMD_READMODE1 21 */
199 199 "\026cdrom read toc header", /* DCMD_READTOCHDR 22 */
200 200 "\027cdrom read toc entry", /* DCMD_READTOCENT 23 */
201 201 "\030cdrom read offset", /* DCMD_READOFFSET 24 */
202 202 "\031cdrom read mode 2", /* DCMD_READMODE2 25 */
203 203 "\032cdrom volume control", /* DCMD_VOLCTRL 26 */
204 204 "\033flush cache", /* DCMD_FLUSH_CACHE 27 */
205 205 NULL
206 206 };
207 207
208 208 static char *dadk_sense[] = {
209 209 "\000Success", /* DERR_SUCCESS */
210 210 "\001address mark not found", /* DERR_AMNF */
211 211 "\002track 0 not found", /* DERR_TKONF */
212 212 "\003aborted command", /* DERR_ABORT */
213 213 "\004write fault", /* DERR_DWF */
214 214 "\005ID not found", /* DERR_IDNF */
215 215 "\006drive busy", /* DERR_BUSY */
216 216 "\007uncorrectable data error", /* DERR_UNC */
217 217 "\010bad block detected", /* DERR_BBK */
218 218 "\011invalid command", /* DERR_INVCDB */
219 219 "\012device hard error", /* DERR_HARD */
220 220 "\013illegal length indicated", /* DERR_ILI */
221 221 "\014end of media", /* DERR_EOM */
222 222 "\015media change requested", /* DERR_MCR */
223 223 "\016recovered from error", /* DERR_RECOVER */
224 224 "\017device not ready", /* DERR_NOTREADY */
225 225 "\020medium error", /* DERR_MEDIUM */
226 226 "\021hardware error", /* DERR_HW */
227 227 "\022illegal request", /* DERR_ILL */
228 228 "\023unit attention", /* DERR_UNIT_ATTN */
229 229 "\024data protection", /* DERR_DATA_PROT */
230 230 "\025miscompare", /* DERR_MISCOMPARE */
231 231 "\026ICRC error during UDMA", /* DERR_ICRC */
232 232 "\027reserved", /* DERR_RESV */
233 233 NULL
234 234 };
235 235
236 236 static char *dadk_name = "Disk";
237 237
238 238 /*
239 239 * This is the loadable module wrapper
240 240 */
↓ open down ↓ |
74 lines elided |
↑ open up ↑ |
241 241 #include <sys/modctl.h>
242 242
243 243 extern struct mod_ops mod_miscops;
244 244
245 245 static struct modlmisc modlmisc = {
246 246 &mod_miscops, /* Type of module */
247 247 "Direct Attached Disk"
248 248 };
249 249
250 250 static struct modlinkage modlinkage = {
251 - MODREV_1, (void *)&modlmisc, NULL
251 + MODREV_1, { (void *)&modlmisc, NULL }
252 252 };
253 253
254 254 int
255 255 _init(void)
256 256 {
257 257 #ifdef DADK_DEBUG
258 258 if (dadk_debug & DENT)
259 259 PRF("dadk_init: call\n");
260 260 #endif
261 261
262 262 #if defined(__x86)
263 263 /* set the max physical address for iob allocs on x86 */
264 264 dadk_alloc_attr.dma_attr_addr_hi = dadk_max_phys_addr;
265 265
266 266 /*
267 267 * set the sgllen for iob allocs on x86. If this is set less than
268 268 * the number of pages the buffer will take (taking into account
269 269 * alignment), it would force the allocator to try and allocate
270 270 * contiguous pages.
271 271 */
272 272 dadk_alloc_attr.dma_attr_sgllen = dadk_sgl_size;
273 273 #endif
274 274
275 275 return (mod_install(&modlinkage));
276 276 }
277 277
278 278 int
279 279 _fini(void)
280 280 {
281 281 #ifdef DADK_DEBUG
282 282 if (dadk_debug & DENT)
283 283 PRF("dadk_fini: call\n");
284 284 #endif
285 285
286 286 return (mod_remove(&modlinkage));
287 287 }
288 288
289 289 int
290 290 _info(struct modinfo *modinfop)
291 291 {
292 292 return (mod_info(&modlinkage, modinfop));
293 293 }
294 294
295 295 struct tgdk_obj *
296 296 dadk_create()
297 297 {
298 298 struct tgdk_obj *dkobjp;
299 299 struct dadk *dadkp;
300 300
301 301 dkobjp = kmem_zalloc((sizeof (*dkobjp) + sizeof (*dadkp)), KM_NOSLEEP);
302 302 if (!dkobjp)
303 303 return (NULL);
304 304 dadkp = (struct dadk *)(dkobjp+1);
305 305
306 306 dkobjp->tg_ops = (struct tgdk_objops *)&dadk_ops;
307 307 dkobjp->tg_data = (opaque_t)dadkp;
308 308 dkobjp->tg_ext = &(dkobjp->tg_extblk);
309 309 dadkp->dad_extp = &(dkobjp->tg_extblk);
310 310
311 311 #ifdef DADK_DEBUG
312 312 if (dadk_debug & DENT)
313 313 PRF("dadk_create: tgdkobjp= 0x%x dadkp= 0x%x\n", dkobjp, dadkp);
314 314 #endif
315 315 return (dkobjp);
316 316 }
317 317
318 318 int
319 319 dadk_init(opaque_t objp, opaque_t devp, opaque_t flcobjp, opaque_t queobjp,
320 320 opaque_t bbhobjp, void *lkarg)
321 321 {
322 322 struct dadk *dadkp = (struct dadk *)objp;
323 323 struct scsi_device *sdevp = (struct scsi_device *)devp;
324 324
325 325 dadkp->dad_sd = devp;
326 326 dadkp->dad_ctlobjp = (opaque_t)sdevp->sd_address.a_hba_tran;
327 327 sdevp->sd_private = (caddr_t)dadkp;
328 328
329 329 /* initialize the communication object */
330 330 dadkp->dad_com.com_data = (opaque_t)dadkp;
331 331 dadkp->dad_com.com_ops = &dadk_com_ops;
332 332
333 333 dadkp->dad_bbhobjp = bbhobjp;
334 334 BBH_INIT(bbhobjp);
335 335
336 336 dadkp->dad_flcobjp = flcobjp;
337 337 mutex_init(&dadkp->dad_cmd_mutex, NULL, MUTEX_DRIVER, NULL);
338 338 dadkp->dad_cmd_count = 0;
339 339 return (FLC_INIT(flcobjp, &(dadkp->dad_com), queobjp, lkarg));
340 340 }
341 341
342 342 int
343 343 dadk_free(struct tgdk_obj *dkobjp)
344 344 {
345 345 TGDK_CLEANUP(dkobjp);
346 346 kmem_free(dkobjp, (sizeof (*dkobjp) + sizeof (struct dadk)));
347 347
348 348 return (DDI_SUCCESS);
349 349 }
350 350
351 351 void
352 352 dadk_cleanup(struct tgdk_obj *dkobjp)
353 353 {
354 354 struct dadk *dadkp;
355 355
356 356 dadkp = (struct dadk *)(dkobjp->tg_data);
357 357 if (dadkp->dad_sd)
358 358 dadkp->dad_sd->sd_private = NULL;
359 359 if (dadkp->dad_bbhobjp) {
360 360 BBH_FREE(dadkp->dad_bbhobjp);
361 361 dadkp->dad_bbhobjp = NULL;
362 362 }
363 363 if (dadkp->dad_flcobjp) {
364 364 FLC_FREE(dadkp->dad_flcobjp);
365 365 dadkp->dad_flcobjp = NULL;
366 366 }
367 367 mutex_destroy(&dadkp->dad_cmd_mutex);
368 368 }
369 369
370 370 /* ARGSUSED */
371 371 int
372 372 dadk_probe(opaque_t objp, int kmsflg)
373 373 {
374 374 struct dadk *dadkp = (struct dadk *)objp;
375 375 struct scsi_device *devp;
376 376 char name[80];
377 377
378 378 devp = dadkp->dad_sd;
379 379 if (!devp->sd_inq || (devp->sd_inq->inq_dtype == DTYPE_NOTPRESENT) ||
380 380 (devp->sd_inq->inq_dtype == DTYPE_UNKNOWN)) {
381 381 return (DDI_PROBE_FAILURE);
382 382 }
383 383
384 384 switch (devp->sd_inq->inq_dtype) {
385 385 case DTYPE_DIRECT:
386 386 dadkp->dad_ctype = DKC_DIRECT;
387 387 dadkp->dad_extp->tg_nodetype = DDI_NT_BLOCK;
388 388 dadkp->dad_extp->tg_ctype = DKC_DIRECT;
389 389 break;
390 390 case DTYPE_RODIRECT: /* eg cdrom */
391 391 dadkp->dad_ctype = DKC_CDROM;
392 392 dadkp->dad_extp->tg_rdonly = 1;
393 393 dadkp->dad_rdonly = 1;
394 394 dadkp->dad_cdrom = 1;
395 395 dadkp->dad_extp->tg_nodetype = DDI_NT_CD;
396 396 dadkp->dad_extp->tg_ctype = DKC_CDROM;
397 397 break;
398 398 case DTYPE_WORM:
399 399 case DTYPE_OPTICAL:
400 400 default:
401 401 return (DDI_PROBE_FAILURE);
402 402 }
403 403
404 404 dadkp->dad_extp->tg_rmb = dadkp->dad_rmb = devp->sd_inq->inq_rmb;
405 405
406 406 dadkp->dad_secshf = SCTRSHFT;
407 407 dadkp->dad_blkshf = 0;
408 408
409 409 /* display the device name */
410 410 (void) strcpy(name, "Vendor '");
411 411 gda_inqfill((caddr_t)devp->sd_inq->inq_vid, 8, &name[strlen(name)]);
412 412 (void) strcat(name, "' Product '");
413 413 gda_inqfill((caddr_t)devp->sd_inq->inq_pid, 16, &name[strlen(name)]);
414 414 (void) strcat(name, "'");
415 415 gda_log(devp->sd_dev, dadk_name, CE_NOTE, "!<%s>\n", name);
416 416
417 417 return (DDI_PROBE_SUCCESS);
418 418 }
419 419
420 420
421 421 /* ARGSUSED */
422 422 int
423 423 dadk_attach(opaque_t objp)
424 424 {
425 425 return (DDI_SUCCESS);
426 426 }
427 427
428 428 int
429 429 dadk_set_bbhobj(opaque_t objp, opaque_t bbhobjp)
430 430 {
431 431 struct dadk *dadkp = (struct dadk *)objp;
432 432 /* free the old bbh object */
433 433 if (dadkp->dad_bbhobjp)
434 434 BBH_FREE(dadkp->dad_bbhobjp);
435 435
436 436 /* initialize the new bbh object */
437 437 dadkp->dad_bbhobjp = bbhobjp;
438 438 BBH_INIT(bbhobjp);
439 439
440 440 return (DDI_SUCCESS);
441 441 }
442 442
443 443 /* ARGSUSED */
444 444 int
445 445 dadk_open(opaque_t objp, int flag)
446 446 {
447 447 struct dadk *dadkp = (struct dadk *)objp;
448 448 int error;
449 449 int wce;
450 450
451 451 if (!dadkp->dad_rmb) {
452 452 if (dadkp->dad_phyg.g_cap) {
453 453 FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
454 454 ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
455 455 return (DDI_SUCCESS);
456 456 }
457 457 } else {
458 458 mutex_enter(&dadkp->dad_mutex);
459 459 dadkp->dad_iostate = DKIO_NONE;
460 460 cv_broadcast(&dadkp->dad_state_cv);
461 461 mutex_exit(&dadkp->dad_mutex);
462 462
463 463 if (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0, 0,
464 464 DADK_SILENT) ||
465 465 dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT) ||
466 466 dadk_rmb_ioctl(dadkp, DCMD_UPDATE_GEOM, 0, 0,
467 467 DADK_SILENT)) {
468 468 return (DDI_FAILURE);
469 469 }
470 470
471 471 mutex_enter(&dadkp->dad_mutex);
472 472 dadkp->dad_iostate = DKIO_INSERTED;
473 473 cv_broadcast(&dadkp->dad_state_cv);
474 474 mutex_exit(&dadkp->dad_mutex);
475 475 }
476 476
477 477 /*
478 478 * get write cache enable state
479 479 * If there is an error, must assume that write cache
480 480 * is enabled.
481 481 * NOTE: Since there is currently no Solaris mechanism to
482 482 * change the state of the Write Cache Enable feature,
483 483 * this code just checks the value of the WCE bit
484 484 * obtained at device init time. If a mechanism
485 485 * is added to the driver to change WCE, dad_wce
486 486 * must be updated appropriately.
487 487 */
488 488 error = dadk_ctl_ioctl(dadkp, DIOCTL_GETWCE,
489 489 (uintptr_t)&wce, FKIOCTL | FNATIVE);
490 490 mutex_enter(&dadkp->dad_mutex);
491 491 dadkp->dad_wce = (error != 0) || (wce != 0);
492 492 mutex_exit(&dadkp->dad_mutex);
493 493
494 494 /* logical disk geometry */
495 495 (void) dadk_ctl_ioctl(dadkp, DIOCTL_GETGEOM,
496 496 (uintptr_t)&dadkp->dad_logg, FKIOCTL | FNATIVE);
497 497 if (dadkp->dad_logg.g_cap == 0)
498 498 return (DDI_FAILURE);
499 499
500 500 /* get physical disk geometry */
501 501 (void) dadk_ctl_ioctl(dadkp, DIOCTL_GETPHYGEOM,
502 502 (uintptr_t)&dadkp->dad_phyg, FKIOCTL | FNATIVE);
503 503 if (dadkp->dad_phyg.g_cap == 0)
504 504 return (DDI_FAILURE);
505 505
506 506 dadk_setcap(dadkp);
507 507
508 508 dadk_create_errstats(dadkp,
509 509 ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
510 510
511 511 /* start profiling */
512 512 FLC_START_KSTAT(dadkp->dad_flcobjp, "disk",
513 513 ddi_get_instance(CTL_DIP_DEV(dadkp->dad_ctlobjp)));
514 514
515 515 return (DDI_SUCCESS);
516 516 }
517 517
518 518 static void
519 519 dadk_setcap(struct dadk *dadkp)
520 520 {
521 521 int totsize;
522 522 int i;
523 523
524 524 totsize = dadkp->dad_phyg.g_secsiz;
525 525
526 526 if (totsize == 0) {
527 527 if (dadkp->dad_cdrom) {
528 528 totsize = 2048;
529 529 } else {
530 530 totsize = NBPSCTR;
531 531 }
532 532 } else {
533 533 /* Round down sector size to multiple of 512B */
534 534 totsize &= ~(NBPSCTR-1);
535 535 }
536 536 dadkp->dad_phyg.g_secsiz = totsize;
537 537
538 538 /* set sec,block shift factor - (512->0, 1024->1, 2048->2, etc.) */
539 539 totsize >>= SCTRSHFT;
540 540 for (i = 0; totsize != 1; i++, totsize >>= 1)
541 541 ;
542 542 dadkp->dad_blkshf = i;
543 543 dadkp->dad_secshf = i + SCTRSHFT;
544 544 }
545 545
546 546
547 547 static void
548 548 dadk_create_errstats(struct dadk *dadkp, int instance)
549 549 {
550 550 dadk_errstats_t *dep;
551 551 char kstatname[KSTAT_STRLEN];
552 552 dadk_ioc_string_t dadk_ioc_string;
553 553
554 554 if (dadkp->dad_errstats)
555 555 return;
556 556
557 557 (void) sprintf(kstatname, "cmdk%d,error", instance);
558 558 dadkp->dad_errstats = kstat_create("cmdkerror", instance,
559 559 kstatname, "device_error", KSTAT_TYPE_NAMED,
560 560 sizeof (dadk_errstats_t) / sizeof (kstat_named_t),
561 561 KSTAT_FLAG_PERSISTENT);
562 562
563 563 if (!dadkp->dad_errstats)
564 564 return;
565 565
566 566 dep = (dadk_errstats_t *)dadkp->dad_errstats->ks_data;
567 567
568 568 kstat_named_init(&dep->dadk_softerrs,
569 569 "Soft Errors", KSTAT_DATA_UINT32);
570 570 kstat_named_init(&dep->dadk_harderrs,
571 571 "Hard Errors", KSTAT_DATA_UINT32);
572 572 kstat_named_init(&dep->dadk_transerrs,
573 573 "Transport Errors", KSTAT_DATA_UINT32);
574 574 kstat_named_init(&dep->dadk_model,
575 575 "Model", KSTAT_DATA_CHAR);
576 576 kstat_named_init(&dep->dadk_revision,
577 577 "Revision", KSTAT_DATA_CHAR);
578 578 kstat_named_init(&dep->dadk_serial,
579 579 "Serial No", KSTAT_DATA_CHAR);
580 580 kstat_named_init(&dep->dadk_capacity,
581 581 "Size", KSTAT_DATA_ULONGLONG);
582 582 kstat_named_init(&dep->dadk_rq_media_err,
583 583 "Media Error", KSTAT_DATA_UINT32);
584 584 kstat_named_init(&dep->dadk_rq_ntrdy_err,
585 585 "Device Not Ready", KSTAT_DATA_UINT32);
586 586 kstat_named_init(&dep->dadk_rq_nodev_err,
587 587 "No Device", KSTAT_DATA_UINT32);
588 588 kstat_named_init(&dep->dadk_rq_recov_err,
589 589 "Recoverable", KSTAT_DATA_UINT32);
590 590 kstat_named_init(&dep->dadk_rq_illrq_err,
591 591 "Illegal Request", KSTAT_DATA_UINT32);
592 592
593 593 dadkp->dad_errstats->ks_private = dep;
594 594 dadkp->dad_errstats->ks_update = nulldev;
595 595 kstat_install(dadkp->dad_errstats);
596 596
597 597 /* get model */
598 598 dep->dadk_model.value.c[0] = 0;
599 599 dadk_ioc_string.is_buf = &dep->dadk_model.value.c[0];
600 600 dadk_ioc_string.is_size = sizeof (dep->dadk_model.value.c);
601 601 (void) dadk_ctl_ioctl(dadkp, DIOCTL_GETMODEL,
602 602 (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE);
603 603
604 604 /* get serial */
605 605 dep->dadk_serial.value.c[0] = 0;
606 606 dadk_ioc_string.is_buf = &dep->dadk_serial.value.c[0];
607 607 dadk_ioc_string.is_size = sizeof (dep->dadk_serial.value.c);
608 608 (void) dadk_ctl_ioctl(dadkp, DIOCTL_GETSERIAL,
609 609 (uintptr_t)&dadk_ioc_string, FKIOCTL | FNATIVE);
610 610
611 611 /* Get revision */
612 612 dep->dadk_revision.value.c[0] = 0;
613 613
614 614 /* Get capacity */
615 615
616 616 dep->dadk_capacity.value.ui64 =
617 617 (uint64_t)dadkp->dad_logg.g_cap *
618 618 (uint64_t)dadkp->dad_logg.g_secsiz;
619 619 }
620 620
621 621
622 622 int
623 623 dadk_close(opaque_t objp)
624 624 {
625 625 struct dadk *dadkp = (struct dadk *)objp;
626 626
627 627 if (dadkp->dad_rmb) {
628 628 (void) dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0, 0,
629 629 DADK_SILENT);
630 630 (void) dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT);
631 631 }
632 632 FLC_STOP_KSTAT(dadkp->dad_flcobjp);
633 633
634 634 dadk_destroy_errstats(dadkp);
635 635
636 636 return (DDI_SUCCESS);
637 637 }
638 638
639 639 static void
640 640 dadk_destroy_errstats(struct dadk *dadkp)
641 641 {
642 642 if (!dadkp->dad_errstats)
643 643 return;
644 644
645 645 kstat_delete(dadkp->dad_errstats);
646 646 dadkp->dad_errstats = NULL;
647 647 }
648 648
649 649
650 650 int
651 651 dadk_strategy(opaque_t objp, struct buf *bp)
652 652 {
653 653 struct dadk *dadkp = (struct dadk *)objp;
654 654
655 655 if (dadkp->dad_rdonly && !(bp->b_flags & B_READ)) {
656 656 bioerror(bp, EROFS);
657 657 return (DDI_FAILURE);
658 658 }
659 659
660 660 if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
661 661 bioerror(bp, ENXIO);
662 662 return (DDI_FAILURE);
663 663 }
664 664
665 665 SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
666 666 mutex_enter(&dadkp->dad_cmd_mutex);
667 667 dadkp->dad_cmd_count++;
668 668 mutex_exit(&dadkp->dad_cmd_mutex);
669 669 FLC_ENQUE(dadkp->dad_flcobjp, bp);
670 670
671 671 return (DDI_SUCCESS);
672 672 }
673 673
674 674 int
675 675 dadk_dump(opaque_t objp, struct buf *bp)
676 676 {
677 677 struct dadk *dadkp = (struct dadk *)objp;
678 678 struct cmpkt *pktp;
679 679
680 680 if (dadkp->dad_rdonly) {
681 681 bioerror(bp, EROFS);
682 682 return (DDI_FAILURE);
683 683 }
684 684
685 685 if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
686 686 bioerror(bp, ENXIO);
687 687 return (DDI_FAILURE);
688 688 }
689 689
690 690 SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
691 691
692 692 pktp = dadk_pktprep(dadkp, NULL, bp, dadk_polldone, NULL, NULL);
693 693 if (!pktp) {
694 694 cmn_err(CE_WARN, "no resources for dumping");
695 695 bioerror(bp, EIO);
696 696 return (DDI_FAILURE);
697 697 }
698 698 pktp->cp_flags |= CPF_NOINTR;
699 699
700 700 (void) dadk_ioprep(dadkp, pktp);
701 701 dadk_transport(dadkp, bp);
702 702 pktp->cp_byteleft -= pktp->cp_bytexfer;
703 703
704 704 while (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
705 705 (void) dadk_iosetup(dadkp, pktp);
706 706 dadk_transport(dadkp, bp);
707 707 pktp->cp_byteleft -= pktp->cp_bytexfer;
708 708 }
709 709
710 710 if (pktp->cp_private)
711 711 BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
712 712 gda_free(dadkp->dad_ctlobjp, pktp, NULL);
713 713 return (DDI_SUCCESS);
714 714 }
715 715
716 716 /* ARGSUSED */
717 717 int
718 718 dadk_ioctl(opaque_t objp, dev_t dev, int cmd, intptr_t arg, int flag,
719 719 cred_t *cred_p, int *rval_p)
720 720 {
721 721 struct dadk *dadkp = (struct dadk *)objp;
722 722
723 723 switch (cmd) {
724 724 case DKIOCGETDEF:
725 725 {
726 726 struct buf *bp;
727 727 int err, head;
728 728 unsigned char *secbuf;
729 729 STRUCT_DECL(defect_header, adh);
730 730
731 731 STRUCT_INIT(adh, flag & FMODELS);
732 732
733 733 /*
734 734 * copyin header ....
735 735 * yields head number and buffer address
736 736 */
737 737 if (ddi_copyin((caddr_t)arg, STRUCT_BUF(adh), STRUCT_SIZE(adh),
738 738 flag))
739 739 return (EFAULT);
740 740 head = STRUCT_FGET(adh, head);
741 741 if (head < 0 || head >= dadkp->dad_phyg.g_head)
742 742 return (ENXIO);
743 743 secbuf = kmem_zalloc(NBPSCTR, KM_SLEEP);
744 744 if (!secbuf)
745 745 return (ENOMEM);
746 746 bp = getrbuf(KM_SLEEP);
747 747 if (!bp) {
748 748 kmem_free(secbuf, NBPSCTR);
749 749 return (ENOMEM);
750 750 }
751 751
752 752 bp->b_edev = dev;
753 753 bp->b_dev = cmpdev(dev);
754 754 bp->b_flags = B_BUSY;
755 755 bp->b_resid = 0;
756 756 bp->b_bcount = NBPSCTR;
757 757 bp->b_un.b_addr = (caddr_t)secbuf;
758 758 bp->b_blkno = head; /* I had to put it somwhere! */
759 759 bp->b_forw = (struct buf *)dadkp;
760 760 bp->b_back = (struct buf *)DCMD_GETDEF;
761 761
762 762 mutex_enter(&dadkp->dad_cmd_mutex);
763 763 dadkp->dad_cmd_count++;
764 764 mutex_exit(&dadkp->dad_cmd_mutex);
765 765 FLC_ENQUE(dadkp->dad_flcobjp, bp);
766 766 err = biowait(bp);
767 767 if (!err) {
768 768 if (ddi_copyout((caddr_t)secbuf,
769 769 STRUCT_FGETP(adh, buffer), NBPSCTR, flag))
770 770 err = ENXIO;
771 771 }
772 772 kmem_free(secbuf, NBPSCTR);
773 773 freerbuf(bp);
774 774 return (err);
775 775 }
776 776 case DIOCTL_RWCMD:
777 777 {
778 778 struct dadkio_rwcmd *rwcmdp;
779 779 int status, rw;
780 780
781 781 /*
782 782 * copied in by cmdk and, if necessary, converted to the
783 783 * correct datamodel
784 784 */
785 785 rwcmdp = (struct dadkio_rwcmd *)(intptr_t)arg;
786 786
787 787 /*
788 788 * handle the complex cases here; we pass these
789 789 * through to the driver, which will queue them and
790 790 * handle the requests asynchronously. The simpler
791 791 * cases ,which can return immediately, fail here, and
792 792 * the request reverts to the dadk_ioctl routine, while
793 793 * will reroute them directly to the ata driver.
794 794 */
795 795 switch (rwcmdp->cmd) {
796 796 case DADKIO_RWCMD_READ :
797 797 /*FALLTHROUGH*/
798 798 case DADKIO_RWCMD_WRITE:
799 799 rw = ((rwcmdp->cmd == DADKIO_RWCMD_WRITE) ?
800 800 B_WRITE : B_READ);
801 801 status = dadk_dk_buf_setup(dadkp,
802 802 (opaque_t)rwcmdp, dev, ((flag &FKIOCTL) ?
803 803 UIO_SYSSPACE : UIO_USERSPACE), rw);
804 804 return (status);
805 805 default:
806 806 return (EINVAL);
807 807 }
808 808 }
809 809 case DKIOC_UPDATEFW:
810 810
811 811 /*
812 812 * Require PRIV_ALL privilege to invoke DKIOC_UPDATEFW
813 813 * to protect the firmware update from malicious use
814 814 */
815 815 if (PRIV_POLICY(cred_p, PRIV_ALL, B_FALSE, EPERM, NULL) != 0)
816 816 return (EPERM);
817 817 else
818 818 return (dadk_ctl_ioctl(dadkp, cmd, arg, flag));
819 819
820 820 case DKIOCFLUSHWRITECACHE:
821 821 {
822 822 struct buf *bp;
823 823 int err = 0;
824 824 struct dk_callback *dkc = (struct dk_callback *)arg;
825 825 struct cmpkt *pktp;
826 826 int is_sync = 1;
827 827
828 828 mutex_enter(&dadkp->dad_mutex);
829 829 if (dadkp->dad_noflush || ! dadkp->dad_wce) {
830 830 err = dadkp->dad_noflush ? ENOTSUP : 0;
831 831 mutex_exit(&dadkp->dad_mutex);
832 832 /*
833 833 * If a callback was requested: a
834 834 * callback will always be done if the
835 835 * caller saw the DKIOCFLUSHWRITECACHE
836 836 * ioctl return 0, and never done if the
837 837 * caller saw the ioctl return an error.
838 838 */
839 839 if ((flag & FKIOCTL) && dkc != NULL &&
840 840 dkc->dkc_callback != NULL) {
841 841 (*dkc->dkc_callback)(dkc->dkc_cookie,
842 842 err);
843 843 /*
844 844 * Did callback and reported error.
845 845 * Since we did a callback, ioctl
846 846 * should return 0.
847 847 */
848 848 err = 0;
849 849 }
850 850 return (err);
851 851 }
852 852 mutex_exit(&dadkp->dad_mutex);
853 853
854 854 bp = getrbuf(KM_SLEEP);
855 855
856 856 bp->b_edev = dev;
857 857 bp->b_dev = cmpdev(dev);
858 858 bp->b_flags = B_BUSY;
859 859 bp->b_resid = 0;
860 860 bp->b_bcount = 0;
861 861 SET_BP_SEC(bp, 0);
862 862
863 863 if ((flag & FKIOCTL) && dkc != NULL &&
864 864 dkc->dkc_callback != NULL) {
865 865 struct dk_callback *dkc2 =
866 866 (struct dk_callback *)kmem_zalloc(
867 867 sizeof (struct dk_callback), KM_SLEEP);
868 868
869 869 bcopy(dkc, dkc2, sizeof (*dkc2));
870 870 bp->b_private = dkc2;
871 871 bp->b_iodone = dadk_flushdone;
872 872 is_sync = 0;
873 873 }
874 874
875 875 /*
876 876 * Setup command pkt
877 877 * dadk_pktprep() can't fail since DDI_DMA_SLEEP set
878 878 */
879 879 pktp = dadk_pktprep(dadkp, NULL, bp,
880 880 dadk_iodone, DDI_DMA_SLEEP, NULL);
881 881
882 882 pktp->cp_time = DADK_FLUSH_CACHE_TIME;
883 883
884 884 *((char *)(pktp->cp_cdbp)) = DCMD_FLUSH_CACHE;
885 885 pktp->cp_byteleft = 0;
886 886 pktp->cp_private = NULL;
887 887 pktp->cp_secleft = 0;
888 888 pktp->cp_srtsec = -1;
889 889 pktp->cp_bytexfer = 0;
890 890
891 891 CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
892 892
893 893 mutex_enter(&dadkp->dad_cmd_mutex);
894 894 dadkp->dad_cmd_count++;
895 895 mutex_exit(&dadkp->dad_cmd_mutex);
896 896 FLC_ENQUE(dadkp->dad_flcobjp, bp);
897 897
898 898 if (is_sync) {
899 899 err = biowait(bp);
900 900 freerbuf(bp);
901 901 }
902 902 return (err);
903 903 }
904 904 default:
905 905 if (!dadkp->dad_rmb)
906 906 return (dadk_ctl_ioctl(dadkp, cmd, arg, flag));
907 907 }
908 908
909 909 switch (cmd) {
910 910 case CDROMSTOP:
911 911 return (dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0,
912 912 0, DADK_SILENT));
913 913 case CDROMSTART:
914 914 return (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0,
915 915 0, DADK_SILENT));
916 916 case DKIOCLOCK:
917 917 return (dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT));
918 918 case DKIOCUNLOCK:
919 919 return (dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT));
920 920 case DKIOCEJECT:
921 921 case CDROMEJECT:
922 922 {
923 923 int ret;
924 924
925 925 if (ret = dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0,
926 926 DADK_SILENT)) {
927 927 return (ret);
928 928 }
929 929 if (ret = dadk_rmb_ioctl(dadkp, DCMD_EJECT, 0, 0,
930 930 DADK_SILENT)) {
931 931 return (ret);
932 932 }
933 933 mutex_enter(&dadkp->dad_mutex);
934 934 dadkp->dad_iostate = DKIO_EJECTED;
935 935 cv_broadcast(&dadkp->dad_state_cv);
936 936 mutex_exit(&dadkp->dad_mutex);
937 937
938 938 return (0);
939 939
940 940 }
941 941 default:
942 942 return (ENOTTY);
943 943 /*
944 944 * cdrom audio commands
945 945 */
946 946 case CDROMPAUSE:
947 947 cmd = DCMD_PAUSE;
948 948 break;
949 949 case CDROMRESUME:
950 950 cmd = DCMD_RESUME;
951 951 break;
952 952 case CDROMPLAYMSF:
953 953 cmd = DCMD_PLAYMSF;
954 954 break;
955 955 case CDROMPLAYTRKIND:
956 956 cmd = DCMD_PLAYTRKIND;
957 957 break;
958 958 case CDROMREADTOCHDR:
959 959 cmd = DCMD_READTOCHDR;
960 960 break;
961 961 case CDROMREADTOCENTRY:
962 962 cmd = DCMD_READTOCENT;
963 963 break;
964 964 case CDROMVOLCTRL:
965 965 cmd = DCMD_VOLCTRL;
966 966 break;
967 967 case CDROMSUBCHNL:
968 968 cmd = DCMD_SUBCHNL;
969 969 break;
970 970 case CDROMREADMODE2:
971 971 cmd = DCMD_READMODE2;
972 972 break;
973 973 case CDROMREADMODE1:
974 974 cmd = DCMD_READMODE1;
975 975 break;
976 976 case CDROMREADOFFSET:
977 977 cmd = DCMD_READOFFSET;
978 978 break;
979 979 }
980 980 return (dadk_rmb_ioctl(dadkp, cmd, arg, flag, 0));
981 981 }
982 982
983 983 int
984 984 dadk_flushdone(struct buf *bp)
985 985 {
986 986 struct dk_callback *dkc = bp->b_private;
987 987
988 988 ASSERT(dkc != NULL && dkc->dkc_callback != NULL);
989 989
990 990 (*dkc->dkc_callback)(dkc->dkc_cookie, geterror(bp));
991 991
992 992 kmem_free(dkc, sizeof (*dkc));
993 993 freerbuf(bp);
994 994 return (0);
995 995 }
996 996
997 997 int
998 998 dadk_getphygeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
999 999 {
1000 1000 struct dadk *dadkp = (struct dadk *)objp;
1001 1001
1002 1002 bcopy((caddr_t)&dadkp->dad_phyg, (caddr_t)dkgeom_p,
1003 1003 sizeof (struct tgdk_geom));
1004 1004 return (DDI_SUCCESS);
1005 1005 }
1006 1006
1007 1007 int
1008 1008 dadk_getgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
1009 1009 {
1010 1010 struct dadk *dadkp = (struct dadk *)objp;
1011 1011 bcopy((caddr_t)&dadkp->dad_logg, (caddr_t)dkgeom_p,
1012 1012 sizeof (struct tgdk_geom));
1013 1013 return (DDI_SUCCESS);
1014 1014 }
1015 1015
1016 1016 int
1017 1017 dadk_setgeom(opaque_t objp, struct tgdk_geom *dkgeom_p)
1018 1018 {
1019 1019 struct dadk *dadkp = (struct dadk *)objp;
1020 1020
1021 1021 dadkp->dad_logg.g_cyl = dkgeom_p->g_cyl;
1022 1022 dadkp->dad_logg.g_head = dkgeom_p->g_head;
1023 1023 dadkp->dad_logg.g_sec = dkgeom_p->g_sec;
1024 1024 dadkp->dad_logg.g_cap = dkgeom_p->g_cap;
1025 1025 return (DDI_SUCCESS);
1026 1026 }
1027 1027
1028 1028
1029 1029 tgdk_iob_handle
1030 1030 dadk_iob_alloc(opaque_t objp, daddr_t blkno, ssize_t xfer, int kmsflg)
1031 1031 {
1032 1032 struct dadk *dadkp = (struct dadk *)objp;
1033 1033 struct buf *bp;
1034 1034 struct tgdk_iob *iobp;
1035 1035 size_t rlen;
1036 1036
1037 1037 iobp = kmem_zalloc(sizeof (*iobp), kmsflg);
1038 1038 if (iobp == NULL)
1039 1039 return (NULL);
1040 1040 if ((bp = getrbuf(kmsflg)) == NULL) {
1041 1041 kmem_free(iobp, sizeof (*iobp));
1042 1042 return (NULL);
1043 1043 }
1044 1044
1045 1045 iobp->b_psec = LBLK2SEC(blkno, dadkp->dad_blkshf);
1046 1046 iobp->b_pbyteoff = (blkno & ((1<<dadkp->dad_blkshf) - 1)) << SCTRSHFT;
1047 1047 iobp->b_pbytecnt = ((iobp->b_pbyteoff + xfer + dadkp->DAD_SECSIZ - 1)
1048 1048 >> dadkp->dad_secshf) << dadkp->dad_secshf;
1049 1049
1050 1050 bp->b_un.b_addr = 0;
1051 1051 /*
1052 1052 * use i_ddi_mem_alloc() for now until we have an interface to allocate
1053 1053 * memory for DMA which doesn't require a DMA handle.
1054 1054 */
1055 1055 if (i_ddi_mem_alloc((dadkp->dad_sd)->sd_dev, &dadk_alloc_attr,
1056 1056 (size_t)iobp->b_pbytecnt, ((kmsflg == KM_SLEEP) ? 1 : 0), 0, NULL,
1057 1057 &bp->b_un.b_addr, &rlen, NULL) != DDI_SUCCESS) {
1058 1058 freerbuf(bp);
1059 1059 kmem_free(iobp, sizeof (*iobp));
1060 1060 return (NULL);
1061 1061 }
1062 1062 iobp->b_flag |= IOB_BPALLOC | IOB_BPBUFALLOC;
1063 1063 iobp->b_bp = bp;
1064 1064 iobp->b_lblk = blkno;
1065 1065 iobp->b_xfer = xfer;
1066 1066 iobp->b_lblk = blkno;
1067 1067 iobp->b_xfer = xfer;
1068 1068 return (iobp);
1069 1069 }
1070 1070
1071 1071 /* ARGSUSED */
1072 1072 int
1073 1073 dadk_iob_free(opaque_t objp, struct tgdk_iob *iobp)
1074 1074 {
1075 1075 struct buf *bp;
1076 1076
1077 1077 if (iobp) {
1078 1078 if (iobp->b_bp && (iobp->b_flag & IOB_BPALLOC)) {
1079 1079 bp = iobp->b_bp;
1080 1080 if (bp->b_un.b_addr && (iobp->b_flag & IOB_BPBUFALLOC))
1081 1081 i_ddi_mem_free((caddr_t)bp->b_un.b_addr, NULL);
1082 1082 freerbuf(bp);
1083 1083 }
1084 1084 kmem_free(iobp, sizeof (*iobp));
1085 1085 }
1086 1086 return (DDI_SUCCESS);
1087 1087 }
1088 1088
1089 1089 /* ARGSUSED */
1090 1090 caddr_t
1091 1091 dadk_iob_htoc(opaque_t objp, struct tgdk_iob *iobp)
1092 1092 {
1093 1093 return (iobp->b_bp->b_un.b_addr+iobp->b_pbyteoff);
1094 1094 }
1095 1095
1096 1096
1097 1097 caddr_t
1098 1098 dadk_iob_xfer(opaque_t objp, struct tgdk_iob *iobp, int rw)
1099 1099 {
1100 1100 struct dadk *dadkp = (struct dadk *)objp;
1101 1101 struct buf *bp;
1102 1102 int err;
1103 1103
1104 1104 bp = iobp->b_bp;
1105 1105 if (dadkp->dad_rdonly && !(rw & B_READ)) {
1106 1106 bioerror(bp, EROFS);
1107 1107 return (NULL);
1108 1108 }
1109 1109
1110 1110 bp->b_flags |= (B_BUSY | rw);
1111 1111 bp->b_bcount = iobp->b_pbytecnt;
1112 1112 SET_BP_SEC(bp, iobp->b_psec);
1113 1113 bp->av_back = (struct buf *)0;
1114 1114 bp->b_resid = 0;
1115 1115
1116 1116 /* call flow control */
1117 1117 mutex_enter(&dadkp->dad_cmd_mutex);
1118 1118 dadkp->dad_cmd_count++;
1119 1119 mutex_exit(&dadkp->dad_cmd_mutex);
1120 1120 FLC_ENQUE(dadkp->dad_flcobjp, bp);
1121 1121 err = biowait(bp);
1122 1122
1123 1123 bp->b_bcount = iobp->b_xfer;
1124 1124 bp->b_flags &= ~(B_DONE|B_BUSY);
1125 1125
1126 1126 if (err)
1127 1127 return (NULL);
1128 1128
1129 1129 return (bp->b_un.b_addr+iobp->b_pbyteoff);
1130 1130 }
1131 1131
1132 1132 static void
1133 1133 dadk_transport(opaque_t com_data, struct buf *bp)
1134 1134 {
1135 1135 struct dadk *dadkp = (struct dadk *)com_data;
1136 1136
1137 1137 if (CTL_TRANSPORT(dadkp->dad_ctlobjp, GDA_BP_PKT(bp)) ==
1138 1138 CTL_SEND_SUCCESS)
1139 1139 return;
1140 1140 dadk_restart((void*)GDA_BP_PKT(bp));
1141 1141 }
1142 1142
1143 1143 static int
1144 1144 dadk_pkt(opaque_t com_data, struct buf *bp, int (*func)(caddr_t), caddr_t arg)
1145 1145 {
1146 1146 struct cmpkt *pktp;
1147 1147 struct dadk *dadkp = (struct dadk *)com_data;
1148 1148
1149 1149 if (GDA_BP_PKT(bp))
1150 1150 return (DDI_SUCCESS);
1151 1151
1152 1152 pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, func, arg);
1153 1153 if (!pktp)
1154 1154 return (DDI_FAILURE);
1155 1155
1156 1156 return (dadk_ioprep(dadkp, pktp));
1157 1157 }
1158 1158
1159 1159 /*
1160 1160 * Read, Write preparation
1161 1161 */
1162 1162 static int
1163 1163 dadk_ioprep(struct dadk *dadkp, struct cmpkt *pktp)
1164 1164 {
1165 1165 struct buf *bp;
1166 1166
1167 1167 bp = pktp->cp_bp;
1168 1168 if (bp->b_forw == (struct buf *)dadkp)
1169 1169 *((char *)(pktp->cp_cdbp)) = (char)(intptr_t)bp->b_back;
1170 1170
1171 1171 else if (bp->b_flags & B_READ)
1172 1172 *((char *)(pktp->cp_cdbp)) = DCMD_READ;
1173 1173 else
1174 1174 *((char *)(pktp->cp_cdbp)) = DCMD_WRITE;
1175 1175 pktp->cp_byteleft = bp->b_bcount;
1176 1176
1177 1177 /* setup the bad block list handle */
1178 1178 pktp->cp_private = BBH_GETHANDLE(dadkp->dad_bbhobjp, bp);
1179 1179 return (dadk_iosetup(dadkp, pktp));
1180 1180 }
1181 1181
1182 1182 static int
1183 1183 dadk_iosetup(struct dadk *dadkp, struct cmpkt *pktp)
1184 1184 {
1185 1185 struct buf *bp;
1186 1186 bbh_cookie_t bbhckp;
1187 1187 int seccnt;
1188 1188
1189 1189 seccnt = pktp->cp_bytexfer >> dadkp->dad_secshf;
1190 1190 pktp->cp_secleft -= seccnt;
1191 1191
1192 1192 if (pktp->cp_secleft) {
1193 1193 pktp->cp_srtsec += seccnt;
1194 1194 } else {
1195 1195 /* get the first cookie from the bad block list */
1196 1196 if (!pktp->cp_private) {
1197 1197 bp = pktp->cp_bp;
1198 1198 pktp->cp_srtsec = GET_BP_SEC(bp);
1199 1199 pktp->cp_secleft = (bp->b_bcount >> dadkp->dad_secshf);
1200 1200 } else {
1201 1201 bbhckp = BBH_HTOC(dadkp->dad_bbhobjp,
1202 1202 pktp->cp_private);
1203 1203 pktp->cp_srtsec = BBH_GETCK_SECTOR(dadkp->dad_bbhobjp,
1204 1204 bbhckp);
1205 1205 pktp->cp_secleft = BBH_GETCK_SECLEN(dadkp->dad_bbhobjp,
1206 1206 bbhckp);
1207 1207 }
1208 1208 }
1209 1209
1210 1210 pktp->cp_bytexfer = pktp->cp_secleft << dadkp->dad_secshf;
1211 1211
1212 1212 if (CTL_IOSETUP(dadkp->dad_ctlobjp, pktp)) {
1213 1213 return (DDI_SUCCESS);
1214 1214 } else {
1215 1215 return (DDI_FAILURE);
1216 1216 }
1217 1217
1218 1218
1219 1219
1220 1220
1221 1221 }
1222 1222
1223 1223 static struct cmpkt *
1224 1224 dadk_pktprep(struct dadk *dadkp, struct cmpkt *in_pktp, struct buf *bp,
1225 1225 void (*cb_func)(struct buf *), int (*func)(caddr_t), caddr_t arg)
1226 1226 {
1227 1227 struct cmpkt *pktp;
1228 1228
1229 1229 pktp = gda_pktprep(dadkp->dad_ctlobjp, in_pktp, (opaque_t)bp, func,
1230 1230 arg);
1231 1231
1232 1232 if (pktp) {
1233 1233 pktp->cp_callback = dadk_pktcb;
1234 1234 pktp->cp_time = DADK_IO_TIME;
1235 1235 pktp->cp_flags = 0;
1236 1236 pktp->cp_iodone = cb_func;
1237 1237 pktp->cp_dev_private = (opaque_t)dadkp;
1238 1238
1239 1239 }
1240 1240
1241 1241 return (pktp);
1242 1242 }
1243 1243
1244 1244
1245 1245 static void
1246 1246 dadk_restart(void *vpktp)
1247 1247 {
1248 1248 struct cmpkt *pktp = (struct cmpkt *)vpktp;
1249 1249
1250 1250 if (dadk_ioretry(pktp, QUE_COMMAND) == JUST_RETURN)
1251 1251 return;
1252 1252 pktp->cp_iodone(pktp->cp_bp);
1253 1253 }
1254 1254
1255 1255 static int
1256 1256 dadk_ioretry(struct cmpkt *pktp, int action)
1257 1257 {
1258 1258 struct buf *bp;
1259 1259 struct dadk *dadkp = PKT2DADK(pktp);
1260 1260
1261 1261 switch (action) {
1262 1262 case QUE_COMMAND:
1263 1263 if (pktp->cp_retry++ < DADK_RETRY_COUNT) {
1264 1264 CTL_IOSETUP(dadkp->dad_ctlobjp, pktp);
1265 1265 if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) ==
1266 1266 CTL_SEND_SUCCESS) {
1267 1267 return (JUST_RETURN);
1268 1268 }
1269 1269 gda_log(dadkp->dad_sd->sd_dev, dadk_name,
1270 1270 CE_WARN, "transport of command fails\n");
1271 1271 } else
1272 1272 gda_log(dadkp->dad_sd->sd_dev,
1273 1273 dadk_name, CE_WARN,
1274 1274 "exceeds maximum number of retries\n");
1275 1275 bioerror(pktp->cp_bp, ENXIO);
1276 1276 /*FALLTHROUGH*/
1277 1277 case COMMAND_DONE_ERROR:
1278 1278 bp = pktp->cp_bp;
1279 1279 bp->b_resid += pktp->cp_byteleft - pktp->cp_bytexfer +
1280 1280 pktp->cp_resid;
1281 1281 if (geterror(bp) == 0) {
1282 1282 if ((*((char *)(pktp->cp_cdbp)) == DCMD_FLUSH_CACHE) &&
1283 1283 (pktp->cp_dev_private == (opaque_t)dadkp) &&
1284 1284 ((int)(*(char *)pktp->cp_scbp) == DERR_ABORT)) {
1285 1285 /*
1286 1286 * Flag "unimplemented" responses for
1287 1287 * DCMD_FLUSH_CACHE as ENOTSUP
1288 1288 */
1289 1289 bioerror(bp, ENOTSUP);
1290 1290 mutex_enter(&dadkp->dad_mutex);
1291 1291 dadkp->dad_noflush = 1;
1292 1292 mutex_exit(&dadkp->dad_mutex);
1293 1293 } else {
1294 1294 bioerror(bp, EIO);
1295 1295 }
1296 1296 }
1297 1297 /*FALLTHROUGH*/
1298 1298 case COMMAND_DONE:
1299 1299 default:
1300 1300 return (COMMAND_DONE);
1301 1301 }
1302 1302 }
1303 1303
1304 1304
1305 1305 static void
1306 1306 dadk_pktcb(struct cmpkt *pktp)
1307 1307 {
1308 1308 int action;
1309 1309 struct dadkio_rwcmd *rwcmdp;
1310 1310
1311 1311 rwcmdp = (struct dadkio_rwcmd *)pktp->cp_passthru; /* ioctl packet */
1312 1312
1313 1313 if (pktp->cp_reason == CPS_SUCCESS) {
1314 1314 if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT))
1315 1315 rwcmdp->status.status = DADKIO_STAT_NO_ERROR;
1316 1316 pktp->cp_iodone(pktp->cp_bp);
1317 1317 return;
1318 1318 }
1319 1319
1320 1320 if (rwcmdp && (rwcmdp != (opaque_t)DADK_SILENT)) {
1321 1321 if (pktp->cp_reason == CPS_CHKERR)
1322 1322 dadk_recorderr(pktp, rwcmdp);
1323 1323 dadk_iodone(pktp->cp_bp);
1324 1324 return;
1325 1325 }
1326 1326
1327 1327 if (pktp->cp_reason == CPS_CHKERR)
1328 1328 action = dadk_chkerr(pktp);
1329 1329 else
1330 1330 action = COMMAND_DONE_ERROR;
1331 1331
1332 1332 if (action == JUST_RETURN)
1333 1333 return;
1334 1334
1335 1335 /*
1336 1336 * If we are panicking don't retry the command
1337 1337 * just fail it so we can go down completing all
1338 1338 * of the buffers.
1339 1339 */
1340 1340 if (ddi_in_panic() && action == QUE_COMMAND)
1341 1341 action = COMMAND_DONE_ERROR;
1342 1342
1343 1343 if (action != COMMAND_DONE) {
1344 1344 if ((dadk_ioretry(pktp, action)) == JUST_RETURN)
1345 1345 return;
1346 1346 }
1347 1347 pktp->cp_iodone(pktp->cp_bp);
1348 1348 }
1349 1349
1350 1350
1351 1351
1352 1352 static struct dadkio_derr dadk_errtab[] = {
1353 1353 {COMMAND_DONE, GDA_INFORMATIONAL}, /* 0 DERR_SUCCESS */
1354 1354 {QUE_COMMAND, GDA_FATAL}, /* 1 DERR_AMNF */
1355 1355 {QUE_COMMAND, GDA_FATAL}, /* 2 DERR_TKONF */
1356 1356 {COMMAND_DONE_ERROR, GDA_INFORMATIONAL}, /* 3 DERR_ABORT */
1357 1357 {QUE_COMMAND, GDA_RETRYABLE}, /* 4 DERR_DWF */
1358 1358 {QUE_COMMAND, GDA_FATAL}, /* 5 DERR_IDNF */
1359 1359 {JUST_RETURN, GDA_INFORMATIONAL}, /* 6 DERR_BUSY */
1360 1360 {QUE_COMMAND, GDA_FATAL}, /* 7 DERR_UNC */
1361 1361 {QUE_COMMAND, GDA_RETRYABLE}, /* 8 DERR_BBK */
1362 1362 {COMMAND_DONE_ERROR, GDA_FATAL}, /* 9 DERR_INVCDB */
1363 1363 {COMMAND_DONE_ERROR, GDA_FATAL}, /* 10 DERR_HARD */
1364 1364 {COMMAND_DONE_ERROR, GDA_FATAL}, /* 11 DERR_ILI */
1365 1365 {COMMAND_DONE_ERROR, GDA_FATAL}, /* 12 DERR_EOM */
1366 1366 {COMMAND_DONE, GDA_INFORMATIONAL}, /* 13 DERR_MCR */
1367 1367 {COMMAND_DONE, GDA_INFORMATIONAL}, /* 14 DERR_RECOVER */
1368 1368 {COMMAND_DONE_ERROR, GDA_FATAL}, /* 15 DERR_NOTREADY */
1369 1369 {QUE_COMMAND, GDA_RETRYABLE}, /* 16 DERR_MEDIUM */
1370 1370 {COMMAND_DONE_ERROR, GDA_FATAL}, /* 17 DERR_HW */
1371 1371 {COMMAND_DONE, GDA_FATAL}, /* 18 DERR_ILL */
1372 1372 {COMMAND_DONE, GDA_FATAL}, /* 19 DERR_UNIT_ATTN */
1373 1373 {COMMAND_DONE_ERROR, GDA_FATAL}, /* 20 DERR_DATA_PROT */
1374 1374 {COMMAND_DONE_ERROR, GDA_FATAL}, /* 21 DERR_MISCOMPARE */
1375 1375 {QUE_COMMAND, GDA_RETRYABLE}, /* 22 DERR_ICRC */
1376 1376 {COMMAND_DONE_ERROR, GDA_FATAL}, /* 23 DERR_RESV */
1377 1377 };
1378 1378
1379 1379 static int
1380 1380 dadk_chkerr(struct cmpkt *pktp)
1381 1381 {
1382 1382 daddr_t err_blkno;
1383 1383 struct dadk *dadkp = PKT2DADK(pktp);
1384 1384 dadk_errstats_t *dep;
1385 1385 int scb = *(char *)pktp->cp_scbp;
1386 1386
1387 1387 if (scb == DERR_SUCCESS) {
1388 1388 if (pktp->cp_retry != 0 && dadkp->dad_errstats != NULL) {
1389 1389 dep = (dadk_errstats_t *)
1390 1390 dadkp->dad_errstats->ks_data;
1391 1391 dep->dadk_rq_recov_err.value.ui32++;
1392 1392 }
1393 1393 return (COMMAND_DONE);
1394 1394 }
1395 1395
1396 1396 if (pktp->cp_retry) {
1397 1397 err_blkno = pktp->cp_srtsec + ((pktp->cp_bytexfer -
1398 1398 pktp->cp_resid) >> dadkp->dad_secshf);
1399 1399 } else
1400 1400 err_blkno = -1;
1401 1401
1402 1402 if (dadkp->dad_errstats != NULL) {
1403 1403 dep = (dadk_errstats_t *)dadkp->dad_errstats->ks_data;
1404 1404
1405 1405 switch (dadk_errtab[scb].d_severity) {
1406 1406 case GDA_RETRYABLE:
1407 1407 dep->dadk_softerrs.value.ui32++;
1408 1408 break;
1409 1409
1410 1410 case GDA_FATAL:
1411 1411 dep->dadk_harderrs.value.ui32++;
1412 1412 break;
1413 1413
1414 1414 default:
1415 1415 break;
1416 1416 }
1417 1417
1418 1418 switch (scb) {
1419 1419 case DERR_INVCDB:
1420 1420 case DERR_ILI:
1421 1421 case DERR_EOM:
1422 1422 case DERR_HW:
1423 1423 case DERR_ICRC:
1424 1424 dep->dadk_transerrs.value.ui32++;
1425 1425 break;
1426 1426
1427 1427 case DERR_AMNF:
1428 1428 case DERR_TKONF:
1429 1429 case DERR_DWF:
1430 1430 case DERR_BBK:
1431 1431 case DERR_UNC:
1432 1432 case DERR_HARD:
1433 1433 case DERR_MEDIUM:
1434 1434 case DERR_DATA_PROT:
1435 1435 case DERR_MISCOMP:
1436 1436 dep->dadk_rq_media_err.value.ui32++;
1437 1437 break;
1438 1438
1439 1439 case DERR_NOTREADY:
1440 1440 dep->dadk_rq_ntrdy_err.value.ui32++;
1441 1441 break;
1442 1442
1443 1443 case DERR_IDNF:
1444 1444 case DERR_UNIT_ATTN:
1445 1445 dep->dadk_rq_nodev_err.value.ui32++;
1446 1446 break;
1447 1447
1448 1448 case DERR_ILL:
1449 1449 case DERR_RESV:
1450 1450 dep->dadk_rq_illrq_err.value.ui32++;
1451 1451 break;
1452 1452
1453 1453 default:
1454 1454 break;
1455 1455 }
1456 1456 }
1457 1457
1458 1458 /* if attempting to read a sector from a cdrom audio disk */
1459 1459 if ((dadkp->dad_cdrom) &&
1460 1460 (*((char *)(pktp->cp_cdbp)) == DCMD_READ) &&
1461 1461 (scb == DERR_ILL)) {
1462 1462 return (COMMAND_DONE);
1463 1463 }
1464 1464 if (pktp->cp_passthru == NULL) {
1465 1465 gda_errmsg(dadkp->dad_sd, pktp, dadk_name,
1466 1466 dadk_errtab[scb].d_severity, pktp->cp_srtsec,
1467 1467 err_blkno, dadk_cmds, dadk_sense);
1468 1468 }
1469 1469
1470 1470 if (scb == DERR_BUSY) {
1471 1471 (void) timeout(dadk_restart, (void *)pktp, DADK_BSY_TIMEOUT);
1472 1472 }
1473 1473
1474 1474 return (dadk_errtab[scb].d_action);
1475 1475 }
1476 1476
1477 1477 static void
1478 1478 dadk_recorderr(struct cmpkt *pktp, struct dadkio_rwcmd *rwcmdp)
1479 1479 {
1480 1480 struct dadk *dadkp;
1481 1481 int scb;
1482 1482
1483 1483 dadkp = PKT2DADK(pktp);
1484 1484 scb = (int)(*(char *)pktp->cp_scbp);
1485 1485
1486 1486
1487 1487 rwcmdp->status.failed_blk = rwcmdp->blkaddr +
1488 1488 ((pktp->cp_bytexfer - pktp->cp_resid) >> dadkp->dad_secshf);
1489 1489
1490 1490 rwcmdp->status.resid = pktp->cp_bp->b_resid +
1491 1491 pktp->cp_byteleft - pktp->cp_bytexfer + pktp->cp_resid;
1492 1492 switch ((int)(* (char *)pktp->cp_scbp)) {
1493 1493 case DERR_AMNF:
1494 1494 case DERR_ABORT:
1495 1495 rwcmdp->status.status = DADKIO_STAT_ILLEGAL_REQUEST;
1496 1496 break;
1497 1497 case DERR_DWF:
1498 1498 case DERR_IDNF:
1499 1499 rwcmdp->status.status = DADKIO_STAT_ILLEGAL_ADDRESS;
1500 1500 break;
1501 1501 case DERR_TKONF:
1502 1502 case DERR_UNC:
1503 1503 case DERR_BBK:
1504 1504 rwcmdp->status.status = DADKIO_STAT_MEDIUM_ERROR;
1505 1505 rwcmdp->status.failed_blk_is_valid = 1;
1506 1506 rwcmdp->status.resid = 0;
1507 1507 break;
1508 1508 case DERR_BUSY:
1509 1509 rwcmdp->status.status = DADKIO_STAT_NOT_READY;
1510 1510 break;
1511 1511 case DERR_INVCDB:
1512 1512 case DERR_HARD:
1513 1513 rwcmdp->status.status = DADKIO_STAT_HARDWARE_ERROR;
1514 1514 break;
1515 1515 case DERR_ICRC:
1516 1516 default:
1517 1517 rwcmdp->status.status = DADKIO_STAT_NOT_SUPPORTED;
1518 1518 }
1519 1519
1520 1520 if (rwcmdp->flags & DADKIO_FLAG_SILENT)
1521 1521 return;
1522 1522 gda_errmsg(dadkp->dad_sd, pktp, dadk_name, dadk_errtab[scb].d_severity,
1523 1523 rwcmdp->blkaddr, rwcmdp->status.failed_blk,
1524 1524 dadk_cmds, dadk_sense);
1525 1525 }
1526 1526
1527 1527 /*ARGSUSED*/
1528 1528 static void
1529 1529 dadk_polldone(struct buf *bp)
1530 1530 {
1531 1531 struct cmpkt *pktp;
1532 1532 struct dadk *dadkp;
1533 1533
1534 1534 pktp = GDA_BP_PKT(bp);
1535 1535 dadkp = PKT2DADK(pktp);
1536 1536 mutex_enter(&dadkp->dad_cmd_mutex);
1537 1537 dadkp->dad_cmd_count--;
1538 1538 mutex_exit(&dadkp->dad_cmd_mutex);
1539 1539 }
1540 1540
1541 1541 static void
1542 1542 dadk_iodone(struct buf *bp)
1543 1543 {
1544 1544 struct cmpkt *pktp;
1545 1545 struct dadk *dadkp;
1546 1546
1547 1547 pktp = GDA_BP_PKT(bp);
1548 1548 dadkp = PKT2DADK(pktp);
1549 1549
1550 1550 /* check for all iodone */
1551 1551 pktp->cp_byteleft -= pktp->cp_bytexfer;
1552 1552 if (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
1553 1553 pktp->cp_retry = 0;
1554 1554 (void) dadk_iosetup(dadkp, pktp);
1555 1555
1556 1556
1557 1557 /* transport the next one */
1558 1558 if (CTL_TRANSPORT(dadkp->dad_ctlobjp, pktp) == CTL_SEND_SUCCESS)
1559 1559 return;
1560 1560 if ((dadk_ioretry(pktp, QUE_COMMAND)) == JUST_RETURN)
1561 1561 return;
1562 1562 }
1563 1563
1564 1564 /* start next one */
1565 1565 FLC_DEQUE(dadkp->dad_flcobjp, bp);
1566 1566
1567 1567 /* free pkt */
1568 1568 if (pktp->cp_private)
1569 1569 BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
1570 1570 gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1571 1571 mutex_enter(&dadkp->dad_cmd_mutex);
1572 1572 dadkp->dad_cmd_count--;
1573 1573 mutex_exit(&dadkp->dad_cmd_mutex);
1574 1574 biodone(bp);
1575 1575 }
1576 1576
1577 1577 int
1578 1578 dadk_check_media(opaque_t objp, int *state)
1579 1579 {
1580 1580 struct dadk *dadkp = (struct dadk *)objp;
1581 1581
1582 1582 if (!dadkp->dad_rmb) {
1583 1583 return (ENXIO);
1584 1584 }
1585 1585 #ifdef DADK_DEBUG
1586 1586 if (dadk_debug & DSTATE)
1587 1587 PRF("dadk_check_media: user state %x disk state %x\n",
1588 1588 *state, dadkp->dad_iostate);
1589 1589 #endif
1590 1590 /*
1591 1591 * If state already changed just return
1592 1592 */
1593 1593 if (*state != dadkp->dad_iostate) {
1594 1594 *state = dadkp->dad_iostate;
1595 1595 return (0);
1596 1596 }
1597 1597
1598 1598 /*
1599 1599 * Startup polling on thread state
1600 1600 */
1601 1601 mutex_enter(&dadkp->dad_mutex);
1602 1602 if (dadkp->dad_thread_cnt == 0) {
1603 1603 /*
1604 1604 * One thread per removable dadk device
1605 1605 */
1606 1606 (void) thread_create(NULL, 0, dadk_watch_thread, dadkp, 0, &p0,
1607 1607 TS_RUN, v.v_maxsyspri - 2);
1608 1608 }
1609 1609 dadkp->dad_thread_cnt++;
1610 1610
1611 1611 /*
1612 1612 * Wait for state to change
1613 1613 */
1614 1614 do {
1615 1615 if (cv_wait_sig(&dadkp->dad_state_cv, &dadkp->dad_mutex) == 0) {
1616 1616 dadkp->dad_thread_cnt--;
1617 1617 mutex_exit(&dadkp->dad_mutex);
1618 1618 return (EINTR);
1619 1619 }
1620 1620 } while (*state == dadkp->dad_iostate);
1621 1621 *state = dadkp->dad_iostate;
1622 1622 dadkp->dad_thread_cnt--;
1623 1623 mutex_exit(&dadkp->dad_mutex);
1624 1624 return (0);
1625 1625 }
1626 1626
1627 1627
1628 1628 #define MEDIA_ACCESS_DELAY 2000000
1629 1629
1630 1630 static void
1631 1631 dadk_watch_thread(struct dadk *dadkp)
1632 1632 {
1633 1633 enum dkio_state state;
1634 1634 int interval;
1635 1635
1636 1636 interval = drv_usectohz(dadk_check_media_time);
1637 1637
1638 1638 do {
1639 1639 if (dadk_rmb_ioctl(dadkp, DCMD_GET_STATE, (intptr_t)&state, 0,
1640 1640 DADK_SILENT)) {
1641 1641 /*
1642 1642 * Assume state remained the same
1643 1643 */
1644 1644 state = dadkp->dad_iostate;
1645 1645 }
1646 1646
1647 1647 /*
1648 1648 * now signal the waiting thread if this is *not* the
1649 1649 * specified state;
1650 1650 * delay the signal if the state is DKIO_INSERTED
1651 1651 * to allow the target to recover
1652 1652 */
1653 1653 if (state != dadkp->dad_iostate) {
1654 1654
1655 1655 dadkp->dad_iostate = state;
1656 1656 if (state == DKIO_INSERTED) {
1657 1657 /*
1658 1658 * delay the signal to give the drive a chance
1659 1659 * to do what it apparently needs to do
1660 1660 */
1661 1661 (void) timeout((void(*)(void *))cv_broadcast,
1662 1662 (void *)&dadkp->dad_state_cv,
1663 1663 drv_usectohz((clock_t)MEDIA_ACCESS_DELAY));
1664 1664 } else {
1665 1665 cv_broadcast(&dadkp->dad_state_cv);
1666 1666 }
1667 1667 }
1668 1668 delay(interval);
1669 1669 } while (dadkp->dad_thread_cnt);
1670 1670 }
1671 1671
1672 1672 int
1673 1673 dadk_inquiry(opaque_t objp, opaque_t *inqpp)
1674 1674 {
1675 1675 struct dadk *dadkp = (struct dadk *)objp;
1676 1676 struct scsi_inquiry **sinqpp = (struct scsi_inquiry **)inqpp;
1677 1677
1678 1678 if (dadkp && dadkp->dad_sd && dadkp->dad_sd->sd_inq) {
1679 1679 *sinqpp = dadkp->dad_sd->sd_inq;
1680 1680 return (DDI_SUCCESS);
1681 1681 }
1682 1682
1683 1683 return (DDI_FAILURE);
1684 1684 }
1685 1685
1686 1686 static int
1687 1687 dadk_rmb_ioctl(struct dadk *dadkp, int cmd, intptr_t arg, int flags, int silent)
1688 1688
1689 1689 {
1690 1690 struct buf *bp;
1691 1691 int err;
1692 1692 struct cmpkt *pktp;
1693 1693
1694 1694 if ((bp = getrbuf(KM_SLEEP)) == NULL) {
1695 1695 return (ENOMEM);
1696 1696 }
1697 1697 pktp = dadk_pktprep(dadkp, NULL, bp, dadk_rmb_iodone, NULL, NULL);
1698 1698 if (!pktp) {
1699 1699 freerbuf(bp);
1700 1700 return (ENOMEM);
1701 1701 }
1702 1702 bp->b_back = (struct buf *)arg;
1703 1703 bp->b_forw = (struct buf *)dadkp->dad_flcobjp;
1704 1704 pktp->cp_passthru = (opaque_t)(intptr_t)silent;
1705 1705
1706 1706 err = dadk_ctl_ioctl(dadkp, cmd, (uintptr_t)pktp, flags);
1707 1707 freerbuf(bp);
1708 1708 gda_free(dadkp->dad_ctlobjp, pktp, NULL);
1709 1709 return (err);
1710 1710
1711 1711
1712 1712 }
1713 1713
1714 1714 static void
1715 1715 dadk_rmb_iodone(struct buf *bp)
1716 1716 {
1717 1717 struct cmpkt *pktp;
1718 1718 struct dadk *dadkp;
1719 1719
1720 1720 pktp = GDA_BP_PKT(bp);
1721 1721 dadkp = PKT2DADK(pktp);
1722 1722
1723 1723 bp->b_flags &= ~(B_DONE|B_BUSY);
1724 1724
1725 1725 /* Start next one */
1726 1726 FLC_DEQUE(dadkp->dad_flcobjp, bp);
1727 1727
1728 1728 mutex_enter(&dadkp->dad_cmd_mutex);
1729 1729 dadkp->dad_cmd_count--;
1730 1730 mutex_exit(&dadkp->dad_cmd_mutex);
1731 1731 biodone(bp);
1732 1732 }
1733 1733
1734 1734 static int
1735 1735 dadk_dk_buf_setup(struct dadk *dadkp, opaque_t *cmdp, dev_t dev,
1736 1736 enum uio_seg dataspace, int rw)
1737 1737 {
1738 1738 struct dadkio_rwcmd *rwcmdp = (struct dadkio_rwcmd *)cmdp;
1739 1739 struct buf *bp;
1740 1740 struct iovec aiov;
1741 1741 struct uio auio;
1742 1742 struct uio *uio = &auio;
1743 1743 int status;
1744 1744
1745 1745 bp = getrbuf(KM_SLEEP);
1746 1746
1747 1747 bp->av_forw = bp->b_forw = (struct buf *)dadkp;
1748 1748 bp->b_back = (struct buf *)rwcmdp; /* ioctl packet */
1749 1749
1750 1750 bzero((caddr_t)&auio, sizeof (struct uio));
1751 1751 bzero((caddr_t)&aiov, sizeof (struct iovec));
1752 1752 aiov.iov_base = rwcmdp->bufaddr;
1753 1753 aiov.iov_len = rwcmdp->buflen;
1754 1754 uio->uio_iov = &aiov;
1755 1755
1756 1756 uio->uio_iovcnt = 1;
1757 1757 uio->uio_resid = rwcmdp->buflen;
1758 1758 uio->uio_segflg = dataspace;
1759 1759
1760 1760 /* Let physio do the rest... */
1761 1761 status = physio(dadk_dk_strategy, bp, dev, rw, dadkmin, uio);
1762 1762
1763 1763 freerbuf(bp);
1764 1764 return (status);
1765 1765
1766 1766 }
1767 1767
1768 1768 /* Do not let a user gendisk request get too big or */
1769 1769 /* else we could use to many resources. */
1770 1770
1771 1771 static void
1772 1772 dadkmin(struct buf *bp)
1773 1773 {
1774 1774 if (bp->b_bcount > dadk_dk_maxphys)
1775 1775 bp->b_bcount = dadk_dk_maxphys;
1776 1776 }
1777 1777
1778 1778 static int
1779 1779 dadk_dk_strategy(struct buf *bp)
1780 1780 {
1781 1781 dadk_dk((struct dadk *)bp->av_forw, (struct dadkio_rwcmd *)bp->b_back,
1782 1782 bp);
1783 1783 return (0);
1784 1784 }
1785 1785
1786 1786 static void
1787 1787 dadk_dk(struct dadk *dadkp, struct dadkio_rwcmd *rwcmdp, struct buf *bp)
1788 1788 {
1789 1789 struct cmpkt *pktp;
1790 1790
1791 1791 pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, NULL, NULL);
1792 1792 if (!pktp) {
1793 1793 bioerror(bp, ENOMEM);
1794 1794 biodone(bp);
1795 1795 return;
1796 1796 }
1797 1797
1798 1798 pktp->cp_passthru = rwcmdp;
1799 1799
1800 1800 (void) dadk_ioprep(dadkp, pktp);
1801 1801
1802 1802 mutex_enter(&dadkp->dad_cmd_mutex);
1803 1803 dadkp->dad_cmd_count++;
1804 1804 mutex_exit(&dadkp->dad_cmd_mutex);
1805 1805 FLC_ENQUE(dadkp->dad_flcobjp, bp);
1806 1806 }
1807 1807
1808 1808 /*
1809 1809 * There is no existing way to notify cmdk module
1810 1810 * when the command completed, so add this function
1811 1811 * to calculate how many on-going commands.
1812 1812 */
1813 1813 int
1814 1814 dadk_getcmds(opaque_t objp)
1815 1815 {
1816 1816 struct dadk *dadkp = (struct dadk *)objp;
1817 1817 int count;
1818 1818
1819 1819 mutex_enter(&dadkp->dad_cmd_mutex);
1820 1820 count = dadkp->dad_cmd_count;
1821 1821 mutex_exit(&dadkp->dad_cmd_mutex);
1822 1822 return (count);
1823 1823 }
1824 1824
1825 1825 /*
1826 1826 * this function was used to calc the cmd for CTL_IOCTL
1827 1827 */
1828 1828 static int
1829 1829 dadk_ctl_ioctl(struct dadk *dadkp, uint32_t cmd, uintptr_t arg, int flag)
1830 1830 {
1831 1831 int error;
1832 1832 mutex_enter(&dadkp->dad_cmd_mutex);
1833 1833 dadkp->dad_cmd_count++;
1834 1834 mutex_exit(&dadkp->dad_cmd_mutex);
1835 1835 error = CTL_IOCTL(dadkp->dad_ctlobjp, cmd, arg, flag);
1836 1836 mutex_enter(&dadkp->dad_cmd_mutex);
1837 1837 dadkp->dad_cmd_count--;
1838 1838 mutex_exit(&dadkp->dad_cmd_mutex);
1839 1839 return (error);
1840 1840 }
↓ open down ↓ |
1579 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX