Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/skd/skd.c
+++ new/usr/src/uts/common/io/skd/skd.c
1 1 /*
2 2 *
3 3 * skd.c: Solaris 11/10 Driver for sTec, Inc. S112x PCIe SSD card
4 4 *
5 5 * Solaris driver is based on the Linux driver authored by:
6 6 *
7 7 * Authors/Alphabetical: Dragan Stancevic <dstancevic@stec-inc.com>
8 8 * Gordon Waidhofer <gwaidhofer@stec-inc.com>
9 9 * John Hamilton <jhamilton@stec-inc.com>
10 10 */
11 11
12 12 /*
13 13 * This file and its contents are supplied under the terms of the
14 14 * Common Development and Distribution License ("CDDL"), version 1.0.
15 15 * You may only use this file in accordance with the terms of version
16 16 * 1.0 of the CDDL.
17 17 *
18 18 * A full copy of the text of the CDDL should have accompanied this
19 19 * source. A copy of the CDDL is also available via the Internet at
20 20 * http://www.illumos.org/license/CDDL.
21 21 */
22 22
23 23 /*
24 24 * Copyright 2013 STEC, Inc. All rights reserved.
25 25 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
26 26 */
27 27
28 28 #include <sys/types.h>
29 29 #include <sys/stream.h>
30 30 #include <sys/cmn_err.h>
31 31 #include <sys/kmem.h>
32 32 #include <sys/file.h>
33 33 #include <sys/buf.h>
34 34 #include <sys/uio.h>
35 35 #include <sys/cred.h>
36 36 #include <sys/modctl.h>
37 37 #include <sys/debug.h>
38 38 #include <sys/modctl.h>
39 39 #include <sys/list.h>
40 40 #include <sys/sysmacros.h>
41 41 #include <sys/errno.h>
42 42 #include <sys/pcie.h>
43 43 #include <sys/pci.h>
44 44 #include <sys/ddi.h>
45 45 #include <sys/dditypes.h>
46 46 #include <sys/sunddi.h>
47 47 #include <sys/atomic.h>
48 48 #include <sys/mutex.h>
49 49 #include <sys/param.h>
50 50 #include <sys/devops.h>
51 51 #include <sys/blkdev.h>
52 52 #include <sys/queue.h>
53 53 #include <sys/scsi/impl/inquiry.h>
54 54
55 55 #include "skd_s1120.h"
56 56 #include "skd.h"
57 57
58 58 int skd_dbg_level = 0;
59 59
60 60 void *skd_state = NULL;
61 61 int skd_disable_msi = 0;
62 62 int skd_disable_msix = 0;
63 63
64 64 /* Initialized in _init() and tunable, see _init(). */
65 65 clock_t skd_timer_ticks;
66 66
67 67 /* I/O DMA attributes structures. */
68 68 static ddi_dma_attr_t skd_64bit_io_dma_attr = {
69 69 DMA_ATTR_V0, /* dma_attr_version */
70 70 SKD_DMA_LOW_ADDRESS, /* low DMA address range */
71 71 SKD_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
72 72 SKD_DMA_XFER_COUNTER, /* DMA counter register */
73 73 SKD_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */
74 74 SKD_DMA_BURSTSIZES, /* DMA burstsizes */
75 75 SKD_DMA_MIN_XFER_SIZE, /* min effective DMA size */
76 76 SKD_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
77 77 SKD_DMA_SEGMENT_BOUNDARY, /* segment boundary */
78 78 SKD_DMA_SG_LIST_LENGTH, /* s/g list length */
79 79 SKD_DMA_GRANULARITY, /* granularity of device */
80 80 SKD_DMA_XFER_FLAGS /* DMA transfer flags */
81 81 };
82 82
83 83 int skd_isr_type = -1;
84 84
85 85 #define SKD_MAX_QUEUE_DEPTH 255
86 86 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
87 87 int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
88 88
89 89 #define SKD_MAX_REQ_PER_MSG 14
90 90 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
91 91 int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
92 92
93 93 #define SKD_MAX_N_SG_PER_REQ 4096
94 94 int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
95 95
96 96 static int skd_sys_quiesce_dev(dev_info_t *);
97 97 static int skd_quiesce_dev(skd_device_t *);
98 98 static int skd_list_skmsg(skd_device_t *, int);
99 99 static int skd_list_skreq(skd_device_t *, int);
100 100 static int skd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
101 101 static int skd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
102 102 static int skd_format_internal_skspcl(struct skd_device *skdev);
103 103 static void skd_start(skd_device_t *);
104 104 static void skd_destroy_mutex(skd_device_t *skdev);
105 105 static void skd_enable_interrupts(struct skd_device *);
106 106 static void skd_request_fn_not_online(skd_device_t *skdev);
107 107 static void skd_send_internal_skspcl(struct skd_device *,
108 108 struct skd_special_context *, uint8_t);
109 109 static void skd_queue(skd_device_t *, skd_buf_private_t *);
110 110 static void *skd_alloc_dma_mem(skd_device_t *, dma_mem_t *, uint8_t);
111 111 static void skd_release_intr(skd_device_t *skdev);
112 112 static void skd_isr_fwstate(struct skd_device *skdev);
113 113 static void skd_isr_msg_from_dev(struct skd_device *skdev);
114 114 static void skd_soft_reset(struct skd_device *skdev);
115 115 static void skd_refresh_device_data(struct skd_device *skdev);
116 116 static void skd_update_props(skd_device_t *, dev_info_t *);
117 117 static void skd_end_request_abnormal(struct skd_device *, skd_buf_private_t *,
118 118 int, int);
119 119 static char *skd_pci_info(struct skd_device *skdev, char *str, size_t len);
120 120
121 121 static skd_buf_private_t *skd_get_queued_pbuf(skd_device_t *);
122 122
123 123 static void skd_bd_driveinfo(void *arg, bd_drive_t *drive);
124 124 static int skd_bd_mediainfo(void *arg, bd_media_t *media);
125 125 static int skd_bd_read(void *arg, bd_xfer_t *xfer);
126 126 static int skd_bd_write(void *arg, bd_xfer_t *xfer);
127 127 static int skd_devid_init(void *arg, dev_info_t *, ddi_devid_t *);
128 128
129 129
130 130 static bd_ops_t skd_bd_ops = {
131 131 BD_OPS_VERSION_0,
132 132 skd_bd_driveinfo,
133 133 skd_bd_mediainfo,
134 134 skd_devid_init,
135 135 NULL, /* sync_cache */
136 136 skd_bd_read,
137 137 skd_bd_write,
138 138 };
139 139
140 140 static ddi_device_acc_attr_t dev_acc_attr = {
141 141 DDI_DEVICE_ATTR_V0,
142 142 DDI_STRUCTURE_LE_ACC,
143 143 DDI_STRICTORDER_ACC
144 144 };
145 145
146 146 /*
147 147 * Solaris module loading/unloading structures
148 148 */
149 149 struct dev_ops skd_dev_ops = {
150 150 DEVO_REV, /* devo_rev */
151 151 0, /* refcnt */
152 152 ddi_no_info, /* getinfo */
153 153 nulldev, /* identify */
154 154 nulldev, /* probe */
155 155 skd_attach, /* attach */
156 156 skd_detach, /* detach */
157 157 nodev, /* reset */
158 158 NULL, /* char/block ops */
159 159 NULL, /* bus operations */
160 160 NULL, /* power management */
161 161 skd_sys_quiesce_dev /* quiesce */
↓ open down ↓ |
161 lines elided |
↑ open up ↑ |
162 162 };
163 163
164 164 static struct modldrv modldrv = {
165 165 &mod_driverops, /* type of module: driver */
166 166 "sTec skd v" DRV_VER_COMPL, /* name of module */
167 167 &skd_dev_ops /* driver dev_ops */
168 168 };
169 169
170 170 static struct modlinkage modlinkage = {
171 171 MODREV_1,
172 - &modldrv,
173 - NULL
172 + { &modldrv, NULL }
174 173 };
175 174
176 175 /*
177 176 * sTec-required wrapper for debug printing.
178 177 */
179 178 /*PRINTFLIKE2*/
180 179 static inline void
181 180 Dcmn_err(int lvl, const char *fmt, ...)
182 181 {
183 182 va_list ap;
184 183
185 184 if (skd_dbg_level == 0)
186 185 return;
187 186
188 187 va_start(ap, fmt);
189 188 vcmn_err(lvl, fmt, ap);
190 189 va_end(ap);
191 190 }
192 191
193 192 /*
194 193 * Solaris module loading/unloading routines
195 194 */
196 195
197 196 /*
198 197 *
199 198 * Name: _init, performs initial installation
200 199 *
201 200 * Inputs: None.
202 201 *
203 202 * Returns: Returns the value returned by the ddi_softstate_init function
204 203 * on a failure to create the device state structure or the result
205 204 * of the module install routines.
206 205 *
207 206 */
208 207 int
209 208 _init(void)
210 209 {
211 210 int rval = 0;
212 211 int tgts = 0;
213 212
214 213 tgts |= 0x02;
215 214 tgts |= 0x08; /* In #ifdef NEXENTA block from original sTec drop. */
216 215
217 216 /*
218 217 * drv_usectohz() is a function, so can't initialize it at
219 218 * instantiation.
220 219 */
221 220 skd_timer_ticks = drv_usectohz(1000000);
222 221
223 222 Dcmn_err(CE_NOTE,
224 223 "<# Installing skd Driver dbg-lvl=%d %s %x>",
225 224 skd_dbg_level, DRV_BUILD_ID, tgts);
226 225
227 226 rval = ddi_soft_state_init(&skd_state, sizeof (skd_device_t), 0);
228 227 if (rval != DDI_SUCCESS)
229 228 return (rval);
230 229
231 230 bd_mod_init(&skd_dev_ops);
232 231
233 232 rval = mod_install(&modlinkage);
234 233 if (rval != DDI_SUCCESS) {
235 234 ddi_soft_state_fini(&skd_state);
236 235 bd_mod_fini(&skd_dev_ops);
237 236 }
238 237
239 238 return (rval);
240 239 }
241 240
242 241 /*
243 242 *
244 243 * Name: _info, returns information about loadable module.
245 244 *
246 245 * Inputs: modinfo, pointer to module information structure.
247 246 *
248 247 * Returns: Value returned by mod_info().
249 248 *
250 249 */
251 250 int
252 251 _info(struct modinfo *modinfop)
253 252 {
254 253 return (mod_info(&modlinkage, modinfop));
255 254 }
256 255
257 256 /*
258 257 * _fini Prepares a module for unloading. It is called when the system
259 258 * wants to unload a module. If the module determines that it can
260 259 * be unloaded, then _fini() returns the value returned by
261 260 * mod_remove(). Upon successful return from _fini() no other
262 261 * routine in the module will be called before _init() is called.
263 262 *
264 263 * Inputs: None.
265 264 *
266 265 * Returns: DDI_SUCCESS or DDI_FAILURE.
267 266 *
268 267 */
269 268 int
270 269 _fini(void)
271 270 {
272 271 int rval;
273 272
274 273 rval = mod_remove(&modlinkage);
275 274 if (rval == DDI_SUCCESS) {
276 275 ddi_soft_state_fini(&skd_state);
277 276 bd_mod_fini(&skd_dev_ops);
278 277 }
279 278
280 279 return (rval);
281 280 }
282 281
283 282 /*
284 283 * Solaris Register read/write routines
285 284 */
286 285
287 286 /*
288 287 *
289 288 * Name: skd_reg_write64, writes a 64-bit value to specified address
290 289 *
291 290 * Inputs: skdev - device state structure.
292 291 * val - 64-bit value to be written.
293 292 * offset - offset from PCI base address.
294 293 *
295 294 * Returns: Nothing.
296 295 *
297 296 */
298 297 /*
299 298 * Local vars are to keep lint silent. Any compiler worth its weight will
300 299 * optimize it all right out...
301 300 */
302 301 static inline void
303 302 skd_reg_write64(struct skd_device *skdev, uint64_t val, uint32_t offset)
304 303 {
305 304 uint64_t *addr;
306 305
307 306 ASSERT((offset & 0x7) == 0);
308 307 /* LINTED */
309 308 addr = (uint64_t *)(skdev->dev_iobase + offset);
310 309 ddi_put64(skdev->dev_handle, addr, val);
311 310 }
312 311
313 312 /*
314 313 *
315 314 * Name: skd_reg_read32, reads a 32-bit value to specified address
316 315 *
317 316 * Inputs: skdev - device state structure.
318 317 * offset - offset from PCI base address.
319 318 *
320 319 * Returns: val, 32-bit value read from specified PCI address.
321 320 *
322 321 */
323 322 static inline uint32_t
324 323 skd_reg_read32(struct skd_device *skdev, uint32_t offset)
325 324 {
326 325 uint32_t *addr;
327 326
328 327 ASSERT((offset & 0x3) == 0);
329 328 /* LINTED */
330 329 addr = (uint32_t *)(skdev->dev_iobase + offset);
331 330 return (ddi_get32(skdev->dev_handle, addr));
332 331 }
333 332
334 333 /*
335 334 *
336 335 * Name: skd_reg_write32, writes a 32-bit value to specified address
337 336 *
338 337 * Inputs: skdev - device state structure.
339 338 * val - value to be written.
340 339 * offset - offset from PCI base address.
341 340 *
342 341 * Returns: Nothing.
343 342 *
344 343 */
345 344 static inline void
346 345 skd_reg_write32(struct skd_device *skdev, uint32_t val, uint32_t offset)
347 346 {
348 347 uint32_t *addr;
349 348
350 349 ASSERT((offset & 0x3) == 0);
351 350 /* LINTED */
352 351 addr = (uint32_t *)(skdev->dev_iobase + offset);
353 352 ddi_put32(skdev->dev_handle, addr, val);
354 353 }
355 354
356 355
357 356 /*
358 357 * Solaris skd routines
359 358 */
360 359
361 360 /*
362 361 *
363 362 * Name: skd_name, generates the name of the driver.
364 363 *
365 364 * Inputs: skdev - device state structure
366 365 *
367 366 * Returns: char pointer to generated driver name.
368 367 *
369 368 */
370 369 static const char *
371 370 skd_name(struct skd_device *skdev)
372 371 {
373 372 (void) snprintf(skdev->id_str, sizeof (skdev->id_str), "%s:", DRV_NAME);
374 373
375 374 return (skdev->id_str);
376 375 }
377 376
378 377 /*
379 378 *
380 379 * Name: skd_pci_find_capability, searches the PCI capability
381 380 * list for the specified capability.
382 381 *
383 382 * Inputs: skdev - device state structure.
384 383 * cap - capability sought.
385 384 *
386 385 * Returns: Returns position where capability was found.
387 386 * If not found, returns zero.
388 387 *
389 388 */
390 389 static int
391 390 skd_pci_find_capability(struct skd_device *skdev, int cap)
392 391 {
393 392 uint16_t status;
394 393 uint8_t pos, id, hdr;
395 394 int ttl = 48;
396 395
397 396 status = pci_config_get16(skdev->pci_handle, PCI_CONF_STAT);
398 397
399 398 if (!(status & PCI_STAT_CAP))
400 399 return (0);
401 400
402 401 hdr = pci_config_get8(skdev->pci_handle, PCI_CONF_HEADER);
403 402
404 403 if ((hdr & PCI_HEADER_TYPE_M) != 0)
405 404 return (0);
406 405
407 406 pos = pci_config_get8(skdev->pci_handle, PCI_CONF_CAP_PTR);
408 407
409 408 while (ttl-- && pos >= 0x40) {
410 409 pos &= ~3;
411 410 id = pci_config_get8(skdev->pci_handle, pos+PCI_CAP_ID);
412 411 if (id == 0xff)
413 412 break;
414 413 if (id == cap)
415 414 return (pos);
416 415 pos = pci_config_get8(skdev->pci_handle, pos+PCI_CAP_NEXT_PTR);
417 416 }
418 417
419 418 return (0);
420 419 }
421 420
422 421 /*
423 422 *
424 423 * Name: skd_io_done, called to conclude an I/O operation.
425 424 *
426 425 * Inputs: skdev - device state structure.
427 426 * pbuf - I/O request
428 427 * error - contain error value.
429 428 * mode - debug only.
430 429 *
431 430 * Returns: Nothing.
432 431 *
433 432 */
434 433 static void
435 434 skd_io_done(skd_device_t *skdev, skd_buf_private_t *pbuf,
436 435 int error, int mode)
437 436 {
438 437 bd_xfer_t *xfer;
439 438
440 439 ASSERT(pbuf != NULL);
441 440
442 441 xfer = pbuf->x_xfer;
443 442
444 443 switch (mode) {
445 444 case SKD_IODONE_WIOC:
446 445 skdev->iodone_wioc++;
447 446 break;
448 447 case SKD_IODONE_WNIOC:
449 448 skdev->iodone_wnioc++;
450 449 break;
451 450 case SKD_IODONE_WDEBUG:
452 451 skdev->iodone_wdebug++;
453 452 break;
454 453 default:
455 454 skdev->iodone_unknown++;
456 455 }
457 456
458 457 if (error) {
459 458 skdev->ios_errors++;
460 459 cmn_err(CE_WARN,
461 460 "!%s:skd_io_done:ERR=%d %lld-%ld %s", skdev->name,
462 461 error, xfer->x_blkno, xfer->x_nblks,
463 462 (pbuf->dir & B_READ) ? "Read" : "Write");
464 463 }
465 464
466 465 kmem_free(pbuf, sizeof (skd_buf_private_t));
467 466
468 467 bd_xfer_done(xfer, error);
469 468 }
470 469
471 470 /*
472 471 * QUIESCE DEVICE
473 472 */
474 473
475 474 /*
476 475 *
477 476 * Name: skd_sys_quiesce_dev, quiets the device
478 477 *
479 478 * Inputs: dip - dev info strucuture
480 479 *
481 480 * Returns: Zero.
482 481 *
483 482 */
484 483 static int
485 484 skd_sys_quiesce_dev(dev_info_t *dip)
486 485 {
487 486 skd_device_t *skdev;
488 487
489 488 skdev = ddi_get_soft_state(skd_state, ddi_get_instance(dip));
490 489
491 490 /* make sure Dcmn_err() doesn't actually print anything */
492 491 skd_dbg_level = 0;
493 492
494 493 skd_disable_interrupts(skdev);
495 494 skd_soft_reset(skdev);
496 495
497 496 return (0);
498 497 }
499 498
500 499 /*
501 500 *
502 501 * Name: skd_quiesce_dev, quiets the device, but doesn't really do much.
503 502 *
504 503 * Inputs: skdev - Device state.
505 504 *
506 505 * Returns: -EINVAL if device is not in proper state otherwise
507 506 * returns zero.
508 507 *
509 508 */
510 509 static int
511 510 skd_quiesce_dev(skd_device_t *skdev)
512 511 {
513 512 int rc = 0;
514 513
515 514 if (skd_dbg_level)
516 515 Dcmn_err(CE_NOTE, "skd_quiece_dev:");
517 516
518 517 switch (skdev->state) {
519 518 case SKD_DRVR_STATE_BUSY:
520 519 case SKD_DRVR_STATE_BUSY_IMMINENT:
521 520 Dcmn_err(CE_NOTE, "%s: stopping queue", skdev->name);
522 521 break;
523 522 case SKD_DRVR_STATE_ONLINE:
524 523 case SKD_DRVR_STATE_STOPPING:
525 524 case SKD_DRVR_STATE_SYNCING:
526 525 case SKD_DRVR_STATE_PAUSING:
527 526 case SKD_DRVR_STATE_PAUSED:
528 527 case SKD_DRVR_STATE_STARTING:
529 528 case SKD_DRVR_STATE_RESTARTING:
530 529 case SKD_DRVR_STATE_RESUMING:
531 530 default:
532 531 rc = -EINVAL;
533 532 cmn_err(CE_NOTE, "state [%d] not implemented", skdev->state);
534 533 }
535 534
536 535 return (rc);
537 536 }
538 537
539 538 /*
540 539 * UNQUIESCE DEVICE:
541 540 * Note: Assumes lock is held to protect device state.
542 541 */
543 542 /*
544 543 *
545 544 * Name: skd_unquiesce_dev, awkens the device
546 545 *
547 546 * Inputs: skdev - Device state.
548 547 *
549 548 * Returns: -EINVAL if device is not in proper state otherwise
550 549 * returns zero.
551 550 *
552 551 */
553 552 static int
554 553 skd_unquiesce_dev(struct skd_device *skdev)
555 554 {
556 555 Dcmn_err(CE_NOTE, "skd_unquiece_dev:");
557 556
558 557 skd_log_skdev(skdev, "unquiesce");
559 558 if (skdev->state == SKD_DRVR_STATE_ONLINE) {
560 559 Dcmn_err(CE_NOTE, "**** device already ONLINE");
561 560
562 561 return (0);
563 562 }
564 563 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
565 564 /*
566 565 * If there has been an state change to other than
567 566 * ONLINE, we will rely on controller state change
568 567 * to come back online and restart the queue.
569 568 * The BUSY state means that driver is ready to
570 569 * continue normal processing but waiting for controller
571 570 * to become available.
572 571 */
573 572 skdev->state = SKD_DRVR_STATE_BUSY;
574 573 Dcmn_err(CE_NOTE, "drive BUSY state\n");
575 574
576 575 return (0);
577 576 }
578 577 /*
579 578 * Drive just come online, driver is either in startup,
580 579 * paused performing a task, or bust waiting for hardware.
581 580 */
582 581 switch (skdev->state) {
583 582 case SKD_DRVR_STATE_PAUSED:
584 583 case SKD_DRVR_STATE_BUSY:
585 584 case SKD_DRVR_STATE_BUSY_IMMINENT:
586 585 case SKD_DRVR_STATE_BUSY_ERASE:
587 586 case SKD_DRVR_STATE_STARTING:
588 587 case SKD_DRVR_STATE_RESTARTING:
589 588 case SKD_DRVR_STATE_FAULT:
590 589 case SKD_DRVR_STATE_IDLE:
591 590 case SKD_DRVR_STATE_LOAD:
592 591 skdev->state = SKD_DRVR_STATE_ONLINE;
593 592 Dcmn_err(CE_NOTE, "%s: sTec s1120 ONLINE", skdev->name);
594 593 Dcmn_err(CE_NOTE, "%s: Starting request queue", skdev->name);
595 594 Dcmn_err(CE_NOTE,
596 595 "%s: queue depth limit=%d hard=%d soft=%d lowat=%d",
597 596 skdev->name,
598 597 skdev->queue_depth_limit,
599 598 skdev->hard_queue_depth_limit,
600 599 skdev->soft_queue_depth_limit,
601 600 skdev->queue_depth_lowat);
602 601
603 602 skdev->gendisk_on = 1;
604 603 cv_signal(&skdev->cv_waitq);
605 604 break;
606 605 case SKD_DRVR_STATE_DISAPPEARED:
607 606 default:
608 607 cmn_err(CE_NOTE, "**** driver state %d, not implemented \n",
609 608 skdev->state);
610 609 return (-EBUSY);
611 610 }
612 611
613 612 return (0);
614 613 }
615 614
616 615 /*
617 616 * READ/WRITE REQUESTS
618 617 */
619 618
620 619 /*
621 620 *
622 621 * Name: skd_blkdev_preop_sg_list, builds the S/G list from info
623 622 * passed in by the blkdev driver.
624 623 *
625 624 * Inputs: skdev - device state structure.
626 625 * skreq - request structure.
627 626 * sg_byte_count - data transfer byte count.
628 627 *
629 628 * Returns: Nothing.
630 629 *
631 630 */
632 631 /*ARGSUSED*/
633 632 static void
634 633 skd_blkdev_preop_sg_list(struct skd_device *skdev,
635 634 struct skd_request_context *skreq, uint32_t *sg_byte_count)
636 635 {
637 636 bd_xfer_t *xfer;
638 637 skd_buf_private_t *pbuf;
639 638 int i, bcount = 0;
640 639 uint_t n_sg;
641 640
642 641 *sg_byte_count = 0;
643 642
644 643 ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
645 644 skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST);
646 645
647 646 pbuf = skreq->pbuf;
648 647 ASSERT(pbuf != NULL);
649 648
650 649 xfer = pbuf->x_xfer;
651 650 n_sg = xfer->x_ndmac;
652 651
653 652 ASSERT(n_sg <= skdev->sgs_per_request);
654 653
655 654 skreq->n_sg = n_sg;
656 655
657 656 skreq->io_dma_handle = xfer->x_dmah;
658 657
659 658 skreq->total_sg_bcount = 0;
660 659
661 660 for (i = 0; i < n_sg; i++) {
662 661 ddi_dma_cookie_t *cookiep = &xfer->x_dmac;
663 662 struct fit_sg_descriptor *sgd;
664 663 uint32_t cnt = (uint32_t)cookiep->dmac_size;
665 664
666 665 bcount += cnt;
667 666
668 667 sgd = &skreq->sksg_list[i];
669 668 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
670 669 sgd->byte_count = cnt;
671 670 sgd->host_side_addr = cookiep->dmac_laddress;
672 671 sgd->dev_side_addr = 0; /* not used */
673 672 *sg_byte_count += cnt;
674 673
675 674 skreq->total_sg_bcount += cnt;
676 675
677 676 if ((i + 1) != n_sg)
678 677 ddi_dma_nextcookie(skreq->io_dma_handle, &xfer->x_dmac);
679 678 }
680 679
681 680 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
682 681 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
683 682
684 683 (void) ddi_dma_sync(skreq->sksg_dma_address.dma_handle, 0, 0,
685 684 DDI_DMA_SYNC_FORDEV);
686 685 }
687 686
688 687 /*
689 688 *
690 689 * Name: skd_blkdev_postop_sg_list, deallocates DMA
691 690 *
692 691 * Inputs: skdev - device state structure.
693 692 * skreq - skreq data structure.
694 693 *
695 694 * Returns: Nothing.
696 695 *
697 696 */
698 697 /* ARGSUSED */ /* Upstream common source with other platforms. */
699 698 static void
700 699 skd_blkdev_postop_sg_list(struct skd_device *skdev,
701 700 struct skd_request_context *skreq)
702 701 {
703 702 /*
704 703 * restore the next ptr for next IO request so we
705 704 * don't have to set it every time.
706 705 */
707 706 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
708 707 skreq->sksg_dma_address.cookies->dmac_laddress +
709 708 ((skreq->n_sg) * sizeof (struct fit_sg_descriptor));
710 709 }
711 710
712 711 /*
713 712 *
714 713 * Name: skd_start, initiates an I/O.
715 714 *
716 715 * Inputs: skdev - device state structure.
717 716 *
718 717 * Returns: EAGAIN if devicfe is not ONLINE.
719 718 * On error, if the caller is the blkdev driver, return
720 719 * the error value. Otherwise, return zero.
721 720 *
722 721 */
723 722 /* Upstream common source with other platforms. */
724 723 static void
725 724 skd_start(skd_device_t *skdev)
726 725 {
727 726 struct skd_fitmsg_context *skmsg = NULL;
728 727 struct fit_msg_hdr *fmh = NULL;
729 728 struct skd_request_context *skreq = NULL;
730 729 struct waitqueue *waitq = &skdev->waitqueue;
731 730 struct skd_scsi_request *scsi_req;
732 731 skd_buf_private_t *pbuf = NULL;
733 732 int bcount;
734 733
735 734 uint32_t lba;
736 735 uint32_t count;
737 736 uint32_t timo_slot;
738 737 void *cmd_ptr;
739 738 uint32_t sg_byte_count = 0;
740 739
741 740 /*
742 741 * Stop conditions:
743 742 * - There are no more native requests
744 743 * - There are already the maximum number of requests is progress
745 744 * - There are no more skd_request_context entries
746 745 * - There are no more FIT msg buffers
747 746 */
748 747 for (;;) {
749 748 /* Are too many requests already in progress? */
750 749 if (skdev->queue_depth_busy >= skdev->queue_depth_limit) {
751 750 Dcmn_err(CE_NOTE, "qdepth %d, limit %d\n",
752 751 skdev->queue_depth_busy,
753 752 skdev->queue_depth_limit);
754 753 break;
755 754 }
756 755
757 756 WAITQ_LOCK(skdev);
758 757 if (SIMPLEQ_EMPTY(waitq)) {
759 758 WAITQ_UNLOCK(skdev);
760 759 break;
761 760 }
762 761
763 762 /* Is a skd_request_context available? */
764 763 skreq = skdev->skreq_free_list;
765 764 if (skreq == NULL) {
766 765 WAITQ_UNLOCK(skdev);
767 766 break;
768 767 }
769 768
770 769 ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
771 770 ASSERT((skreq->id & SKD_ID_INCR) == 0);
772 771
773 772 skdev->skreq_free_list = skreq->next;
774 773
775 774 skreq->state = SKD_REQ_STATE_BUSY;
776 775 skreq->id += SKD_ID_INCR;
777 776
778 777 /* Start a new FIT msg if there is none in progress. */
779 778 if (skmsg == NULL) {
780 779 /* Are there any FIT msg buffers available? */
781 780 skmsg = skdev->skmsg_free_list;
782 781 if (skmsg == NULL) {
783 782 WAITQ_UNLOCK(skdev);
784 783 break;
785 784 }
786 785
787 786 ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
788 787 ASSERT((skmsg->id & SKD_ID_INCR) == 0);
789 788
790 789 skdev->skmsg_free_list = skmsg->next;
791 790
792 791 skmsg->state = SKD_MSG_STATE_BUSY;
793 792 skmsg->id += SKD_ID_INCR;
794 793
795 794 /* Initialize the FIT msg header */
796 795 fmh = (struct fit_msg_hdr *)skmsg->msg_buf64;
797 796 bzero(fmh, sizeof (*fmh)); /* Too expensive */
798 797 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
799 798 skmsg->length = sizeof (struct fit_msg_hdr);
800 799 }
801 800
802 801 /*
803 802 * At this point we are committed to either start or reject
804 803 * the native request. Note that a FIT msg may have just been
805 804 * started but contains no SoFIT requests yet.
806 805 * Now - dequeue pbuf.
807 806 */
808 807 pbuf = skd_get_queued_pbuf(skdev);
809 808 WAITQ_UNLOCK(skdev);
810 809
811 810 skreq->pbuf = pbuf;
812 811 lba = pbuf->x_xfer->x_blkno;
813 812 count = pbuf->x_xfer->x_nblks;
814 813 skreq->did_complete = 0;
815 814
816 815 skreq->fitmsg_id = skmsg->id;
817 816
818 817 Dcmn_err(CE_NOTE,
819 818 "pbuf=%p lba=%u(0x%x) count=%u(0x%x) dir=%x\n",
820 819 (void *)pbuf, lba, lba, count, count, pbuf->dir);
821 820
822 821 /*
823 822 * Transcode the request.
824 823 */
825 824 cmd_ptr = &skmsg->msg_buf[skmsg->length];
826 825 bzero(cmd_ptr, 32); /* This is too expensive */
827 826
828 827 scsi_req = cmd_ptr;
829 828 scsi_req->hdr.tag = skreq->id;
830 829 scsi_req->hdr.sg_list_dma_address =
831 830 cpu_to_be64(skreq->sksg_dma_address.cookies->dmac_laddress);
832 831 scsi_req->cdb[1] = 0;
833 832 scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
834 833 scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
835 834 scsi_req->cdb[4] = (lba & 0xff00) >> 8;
836 835 scsi_req->cdb[5] = (lba & 0xff);
837 836 scsi_req->cdb[6] = 0;
838 837 scsi_req->cdb[7] = (count & 0xff00) >> 8;
839 838 scsi_req->cdb[8] = count & 0xff;
840 839 scsi_req->cdb[9] = 0;
841 840
842 841 if (pbuf->dir & B_READ) {
843 842 scsi_req->cdb[0] = 0x28;
844 843 skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
845 844 } else {
846 845 scsi_req->cdb[0] = 0x2a;
847 846 skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
848 847 }
849 848
850 849 skd_blkdev_preop_sg_list(skdev, skreq, &sg_byte_count);
851 850
852 851 scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(sg_byte_count);
853 852
854 853 bcount = (sg_byte_count + 511) / 512;
855 854 scsi_req->cdb[7] = (bcount & 0xff00) >> 8;
856 855 scsi_req->cdb[8] = bcount & 0xff;
857 856
858 857 Dcmn_err(CE_NOTE,
859 858 "skd_start: pbuf=%p skreq->id=%x opc=%x ====>>>>>",
860 859 (void *)pbuf, skreq->id, *scsi_req->cdb);
861 860
862 861 skmsg->length += sizeof (struct skd_scsi_request);
863 862 fmh->num_protocol_cmds_coalesced++;
864 863
865 864 /*
866 865 * Update the active request counts.
867 866 * Capture the timeout timestamp.
868 867 */
869 868 skreq->timeout_stamp = skdev->timeout_stamp;
870 869 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
871 870
872 871 atomic_inc_32(&skdev->timeout_slot[timo_slot]);
873 872 atomic_inc_32(&skdev->queue_depth_busy);
874 873
875 874 Dcmn_err(CE_NOTE, "req=0x%x busy=%d timo_slot=%d",
876 875 skreq->id, skdev->queue_depth_busy, timo_slot);
877 876 /*
878 877 * If the FIT msg buffer is full send it.
879 878 */
880 879 if (skmsg->length >= SKD_N_FITMSG_BYTES ||
881 880 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
882 881
883 882 atomic_inc_64(&skdev->active_cmds);
884 883 pbuf->skreq = skreq;
885 884
886 885 skdev->fitmsg_sent1++;
887 886 skd_send_fitmsg(skdev, skmsg);
888 887
889 888 skmsg = NULL;
890 889 fmh = NULL;
891 890 }
892 891 }
893 892
894 893 /*
895 894 * Is a FIT msg in progress? If it is empty put the buffer back
896 895 * on the free list. If it is non-empty send what we got.
897 896 * This minimizes latency when there are fewer requests than
898 897 * what fits in a FIT msg.
899 898 */
900 899 if (skmsg != NULL) {
901 900 ASSERT(skmsg->length > sizeof (struct fit_msg_hdr));
902 901 Dcmn_err(CE_NOTE, "sending msg=%p, len %d",
903 902 (void *)skmsg, skmsg->length);
904 903
905 904 skdev->active_cmds++;
906 905
907 906 skdev->fitmsg_sent2++;
908 907 skd_send_fitmsg(skdev, skmsg);
909 908 }
910 909 }
911 910
912 911 /*
913 912 *
914 913 * Name: skd_end_request
915 914 *
916 915 * Inputs: skdev - device state structure.
917 916 * skreq - request structure.
918 917 * error - I/O error value.
919 918 *
920 919 * Returns: Nothing.
921 920 *
922 921 */
923 922 static void
924 923 skd_end_request(struct skd_device *skdev,
925 924 struct skd_request_context *skreq, int error)
926 925 {
927 926 skdev->ios_completed++;
928 927 skd_io_done(skdev, skreq->pbuf, error, SKD_IODONE_WIOC);
929 928 skreq->pbuf = NULL;
930 929 skreq->did_complete = 1;
931 930 }
932 931
933 932 /*
934 933 *
935 934 * Name: skd_end_request_abnormal
936 935 *
937 936 * Inputs: skdev - device state structure.
938 937 * pbuf - I/O request.
939 938 * error - I/O error value.
940 939 * mode - debug
941 940 *
942 941 * Returns: Nothing.
943 942 *
944 943 */
945 944 static void
946 945 skd_end_request_abnormal(skd_device_t *skdev, skd_buf_private_t *pbuf,
947 946 int error, int mode)
948 947 {
949 948 skd_io_done(skdev, pbuf, error, mode);
950 949 }
951 950
952 951 /*
953 952 *
954 953 * Name: skd_request_fn_not_online, handles the condition
955 954 * of the device not being online.
956 955 *
957 956 * Inputs: skdev - device state structure.
958 957 *
959 958 * Returns: nothing (void).
960 959 *
961 960 */
962 961 static void
963 962 skd_request_fn_not_online(skd_device_t *skdev)
964 963 {
965 964 int error;
966 965 skd_buf_private_t *pbuf;
967 966
968 967 ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
969 968
970 969 skd_log_skdev(skdev, "req_not_online");
971 970
972 971 switch (skdev->state) {
973 972 case SKD_DRVR_STATE_PAUSING:
974 973 case SKD_DRVR_STATE_PAUSED:
975 974 case SKD_DRVR_STATE_STARTING:
976 975 case SKD_DRVR_STATE_RESTARTING:
977 976 case SKD_DRVR_STATE_WAIT_BOOT:
978 977 /*
979 978 * In case of starting, we haven't started the queue,
980 979 * so we can't get here... but requests are
981 980 * possibly hanging out waiting for us because we
982 981 * reported the dev/skd/0 already. They'll wait
983 982 * forever if connect doesn't complete.
984 983 * What to do??? delay dev/skd/0 ??
985 984 */
986 985 case SKD_DRVR_STATE_BUSY:
987 986 case SKD_DRVR_STATE_BUSY_IMMINENT:
988 987 case SKD_DRVR_STATE_BUSY_ERASE:
989 988 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
990 989 return;
991 990
992 991 case SKD_DRVR_STATE_BUSY_SANITIZE:
993 992 case SKD_DRVR_STATE_STOPPING:
994 993 case SKD_DRVR_STATE_SYNCING:
995 994 case SKD_DRVR_STATE_FAULT:
996 995 case SKD_DRVR_STATE_DISAPPEARED:
997 996 default:
998 997 error = -EIO;
999 998 break;
1000 999 }
1001 1000
1002 1001 /*
1003 1002 * If we get here, terminate all pending block requeusts
1004 1003 * with EIO and any scsi pass thru with appropriate sense
1005 1004 */
1006 1005 ASSERT(WAITQ_LOCK_HELD(skdev));
1007 1006 if (SIMPLEQ_EMPTY(&skdev->waitqueue))
1008 1007 return;
1009 1008
1010 1009 while ((pbuf = skd_get_queued_pbuf(skdev)))
1011 1010 skd_end_request_abnormal(skdev, pbuf, error, SKD_IODONE_WNIOC);
1012 1011
1013 1012 cv_signal(&skdev->cv_waitq);
1014 1013 }
1015 1014
1016 1015 /*
1017 1016 * TIMER
1018 1017 */
1019 1018
1020 1019 static void skd_timer_tick_not_online(struct skd_device *skdev);
1021 1020
1022 1021 /*
1023 1022 *
1024 1023 * Name: skd_timer_tick, monitors requests for timeouts.
1025 1024 *
1026 1025 * Inputs: skdev - device state structure.
1027 1026 *
1028 1027 * Returns: Nothing.
1029 1028 *
1030 1029 */
1031 1030 static void
1032 1031 skd_timer_tick(skd_device_t *skdev)
1033 1032 {
1034 1033 uint32_t timo_slot;
1035 1034
1036 1035 skdev->timer_active = 1;
1037 1036
1038 1037 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
1039 1038 skd_timer_tick_not_online(skdev);
1040 1039 goto timer_func_out;
1041 1040 }
1042 1041
1043 1042 skdev->timeout_stamp++;
1044 1043 timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
1045 1044
1046 1045 /*
1047 1046 * All requests that happened during the previous use of
1048 1047 * this slot should be done by now. The previous use was
1049 1048 * over 7 seconds ago.
1050 1049 */
1051 1050 if (skdev->timeout_slot[timo_slot] == 0) {
1052 1051 goto timer_func_out;
1053 1052 }
1054 1053
1055 1054 /* Something is overdue */
1056 1055 Dcmn_err(CE_NOTE, "found %d timeouts, draining busy=%d",
1057 1056 skdev->timeout_slot[timo_slot],
1058 1057 skdev->queue_depth_busy);
1059 1058 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
1060 1059 skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
1061 1060 skdev->timo_slot = timo_slot;
1062 1061
1063 1062 timer_func_out:
1064 1063 skdev->timer_active = 0;
1065 1064 }
1066 1065
1067 1066 /*
1068 1067 *
1069 1068 * Name: skd_timer_tick_not_online, handles various device
1070 1069 * state transitions.
1071 1070 *
1072 1071 * Inputs: skdev - device state structure.
1073 1072 *
1074 1073 * Returns: Nothing.
1075 1074 *
1076 1075 */
1077 1076 static void
1078 1077 skd_timer_tick_not_online(struct skd_device *skdev)
1079 1078 {
1080 1079 Dcmn_err(CE_NOTE, "skd_skd_timer_tick_not_online: state=%d tmo=%d",
1081 1080 skdev->state, skdev->timer_countdown);
1082 1081
1083 1082 ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
1084 1083
1085 1084 switch (skdev->state) {
1086 1085 case SKD_DRVR_STATE_IDLE:
1087 1086 case SKD_DRVR_STATE_LOAD:
1088 1087 break;
1089 1088 case SKD_DRVR_STATE_BUSY_SANITIZE:
1090 1089 cmn_err(CE_WARN, "!drive busy sanitize[%x], driver[%x]\n",
1091 1090 skdev->drive_state, skdev->state);
1092 1091 break;
1093 1092
1094 1093 case SKD_DRVR_STATE_BUSY:
1095 1094 case SKD_DRVR_STATE_BUSY_IMMINENT:
1096 1095 case SKD_DRVR_STATE_BUSY_ERASE:
1097 1096 Dcmn_err(CE_NOTE, "busy[%x], countdown=%d\n",
1098 1097 skdev->state, skdev->timer_countdown);
1099 1098 if (skdev->timer_countdown > 0) {
1100 1099 skdev->timer_countdown--;
1101 1100 return;
1102 1101 }
1103 1102 cmn_err(CE_WARN, "!busy[%x], timedout=%d, restarting device.",
1104 1103 skdev->state, skdev->timer_countdown);
1105 1104 skd_restart_device(skdev);
1106 1105 break;
1107 1106
1108 1107 case SKD_DRVR_STATE_WAIT_BOOT:
1109 1108 case SKD_DRVR_STATE_STARTING:
1110 1109 if (skdev->timer_countdown > 0) {
1111 1110 skdev->timer_countdown--;
1112 1111 return;
1113 1112 }
1114 1113 /*
1115 1114 * For now, we fault the drive. Could attempt resets to
1116 1115 * revcover at some point.
1117 1116 */
1118 1117 skdev->state = SKD_DRVR_STATE_FAULT;
1119 1118
1120 1119 cmn_err(CE_WARN, "!(%s): DriveFault Connect Timeout (%x)",
1121 1120 skd_name(skdev), skdev->drive_state);
1122 1121
1123 1122 /* start the queue so we can respond with error to requests */
1124 1123 skd_start(skdev);
1125 1124
1126 1125 /* wakeup anyone waiting for startup complete */
1127 1126 skdev->gendisk_on = -1;
1128 1127
1129 1128 cv_signal(&skdev->cv_waitq);
1130 1129 break;
1131 1130
1132 1131
1133 1132 case SKD_DRVR_STATE_PAUSING:
1134 1133 case SKD_DRVR_STATE_PAUSED:
1135 1134 break;
1136 1135
1137 1136 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
1138 1137 cmn_err(CE_WARN,
1139 1138 "!%s: draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1140 1139 skdev->name,
1141 1140 skdev->timo_slot,
1142 1141 skdev->timer_countdown,
1143 1142 skdev->queue_depth_busy,
1144 1143 skdev->timeout_slot[skdev->timo_slot]);
1145 1144 /* if the slot has cleared we can let the I/O continue */
1146 1145 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
1147 1146 Dcmn_err(CE_NOTE, "Slot drained, starting queue.");
1148 1147 skdev->state = SKD_DRVR_STATE_ONLINE;
1149 1148 skd_start(skdev);
1150 1149 return;
1151 1150 }
1152 1151 if (skdev->timer_countdown > 0) {
1153 1152 skdev->timer_countdown--;
1154 1153 return;
1155 1154 }
1156 1155 skd_restart_device(skdev);
1157 1156 break;
1158 1157
1159 1158 case SKD_DRVR_STATE_RESTARTING:
1160 1159 if (skdev->timer_countdown > 0) {
1161 1160 skdev->timer_countdown--;
1162 1161
1163 1162 return;
1164 1163 }
1165 1164 /*
1166 1165 * For now, we fault the drive. Could attempt resets to
1167 1166 * revcover at some point.
1168 1167 */
1169 1168 skdev->state = SKD_DRVR_STATE_FAULT;
1170 1169 cmn_err(CE_WARN, "!(%s): DriveFault Reconnect Timeout (%x)\n",
1171 1170 skd_name(skdev), skdev->drive_state);
1172 1171
1173 1172 /*
1174 1173 * Recovering does two things:
1175 1174 * 1. completes IO with error
1176 1175 * 2. reclaims dma resources
1177 1176 * When is it safe to recover requests?
1178 1177 * - if the drive state is faulted
1179 1178 * - if the state is still soft reset after out timeout
1180 1179 * - if the drive registers are dead (state = FF)
1181 1180 */
1182 1181
1183 1182 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1184 1183 (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1185 1184 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK)) {
1186 1185 /*
1187 1186 * It never came out of soft reset. Try to
1188 1187 * recover the requests and then let them
1189 1188 * fail. This is to mitigate hung processes.
1190 1189 *
1191 1190 * Acquire the interrupt lock since these lists are
1192 1191 * manipulated by interrupt handlers.
1193 1192 */
1194 1193 ASSERT(!WAITQ_LOCK_HELD(skdev));
1195 1194 INTR_LOCK(skdev);
1196 1195 skd_recover_requests(skdev);
1197 1196 INTR_UNLOCK(skdev);
1198 1197 }
1199 1198 /* start the queue so we can respond with error to requests */
1200 1199 skd_start(skdev);
1201 1200 /* wakeup anyone waiting for startup complete */
1202 1201 skdev->gendisk_on = -1;
1203 1202 cv_signal(&skdev->cv_waitq);
1204 1203 break;
1205 1204
1206 1205 case SKD_DRVR_STATE_RESUMING:
1207 1206 case SKD_DRVR_STATE_STOPPING:
1208 1207 case SKD_DRVR_STATE_SYNCING:
1209 1208 case SKD_DRVR_STATE_FAULT:
1210 1209 case SKD_DRVR_STATE_DISAPPEARED:
1211 1210 default:
1212 1211 break;
1213 1212 }
1214 1213 }
1215 1214
1216 1215 /*
1217 1216 *
1218 1217 * Name: skd_timer, kicks off the timer processing.
1219 1218 *
1220 1219 * Inputs: skdev - device state structure.
1221 1220 *
1222 1221 * Returns: Nothing.
1223 1222 *
1224 1223 */
1225 1224 static void
1226 1225 skd_timer(void *arg)
1227 1226 {
1228 1227 skd_device_t *skdev = (skd_device_t *)arg;
1229 1228
1230 1229 /* Someone set us to 0, don't bother rescheduling. */
1231 1230 ADAPTER_STATE_LOCK(skdev);
1232 1231 if (skdev->skd_timer_timeout_id != 0) {
1233 1232 ADAPTER_STATE_UNLOCK(skdev);
1234 1233 /* Pardon the drop-and-then-acquire logic here. */
1235 1234 skd_timer_tick(skdev);
1236 1235 ADAPTER_STATE_LOCK(skdev);
1237 1236 /* Restart timer, if not being stopped. */
1238 1237 if (skdev->skd_timer_timeout_id != 0) {
1239 1238 skdev->skd_timer_timeout_id =
1240 1239 timeout(skd_timer, arg, skd_timer_ticks);
1241 1240 }
1242 1241 }
1243 1242 ADAPTER_STATE_UNLOCK(skdev);
1244 1243 }
1245 1244
1246 1245 /*
1247 1246 *
1248 1247 * Name: skd_start_timer, kicks off the 1-second timer.
1249 1248 *
1250 1249 * Inputs: skdev - device state structure.
1251 1250 *
1252 1251 * Returns: Zero.
1253 1252 *
1254 1253 */
1255 1254 static void
1256 1255 skd_start_timer(struct skd_device *skdev)
1257 1256 {
1258 1257 /* Start one second driver timer. */
1259 1258 ADAPTER_STATE_LOCK(skdev);
1260 1259 ASSERT(skdev->skd_timer_timeout_id == 0);
1261 1260
1262 1261 /*
1263 1262 * Do first "timeout tick" right away, but not in this
1264 1263 * thread.
1265 1264 */
1266 1265 skdev->skd_timer_timeout_id = timeout(skd_timer, skdev, 1);
1267 1266 ADAPTER_STATE_UNLOCK(skdev);
1268 1267 }
1269 1268
1270 1269 /*
1271 1270 * INTERNAL REQUESTS -- generated by driver itself
1272 1271 */
1273 1272
1274 1273 /*
1275 1274 *
1276 1275 * Name: skd_format_internal_skspcl, setups the internal
1277 1276 * FIT request message.
1278 1277 *
1279 1278 * Inputs: skdev - device state structure.
1280 1279 *
1281 1280 * Returns: One.
1282 1281 *
1283 1282 */
1284 1283 static int
1285 1284 skd_format_internal_skspcl(struct skd_device *skdev)
1286 1285 {
1287 1286 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1288 1287 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1289 1288 struct fit_msg_hdr *fmh;
1290 1289 uint64_t dma_address;
1291 1290 struct skd_scsi_request *scsi;
1292 1291
1293 1292 fmh = (struct fit_msg_hdr *)&skspcl->msg_buf64[0];
1294 1293 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1295 1294 fmh->num_protocol_cmds_coalesced = 1;
1296 1295
1297 1296 /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */
1298 1297 scsi = (struct skd_scsi_request *)&skspcl->msg_buf64[8];
1299 1298 bzero(scsi, sizeof (*scsi));
1300 1299 dma_address = skspcl->req.sksg_dma_address.cookies->_dmu._dmac_ll;
1301 1300 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
1302 1301 sgd->control = FIT_SGD_CONTROL_LAST;
1303 1302 sgd->byte_count = 0;
1304 1303 sgd->host_side_addr = skspcl->db_dma_address.cookies->_dmu._dmac_ll;
1305 1304 sgd->dev_side_addr = 0; /* not used */
1306 1305 sgd->next_desc_ptr = 0LL;
1307 1306
1308 1307 return (1);
1309 1308 }
1310 1309
1311 1310 /*
1312 1311 *
1313 1312 * Name: skd_send_internal_skspcl, send internal requests to
1314 1313 * the hardware.
1315 1314 *
1316 1315 * Inputs: skdev - device state structure.
1317 1316 * skspcl - request structure
1318 1317 * opcode - just what it says
1319 1318 *
1320 1319 * Returns: Nothing.
1321 1320 *
1322 1321 */
1323 1322 void
1324 1323 skd_send_internal_skspcl(struct skd_device *skdev,
1325 1324 struct skd_special_context *skspcl, uint8_t opcode)
1326 1325 {
1327 1326 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1328 1327 struct skd_scsi_request *scsi;
1329 1328
1330 1329 if (SKD_REQ_STATE_IDLE != skspcl->req.state) {
1331 1330 /*
1332 1331 * A refresh is already in progress.
1333 1332 * Just wait for it to finish.
1334 1333 */
1335 1334 return;
1336 1335 }
1337 1336
1338 1337 ASSERT(0 == (skspcl->req.id & SKD_ID_INCR));
1339 1338 skspcl->req.state = SKD_REQ_STATE_BUSY;
1340 1339 skspcl->req.id += SKD_ID_INCR;
1341 1340
1342 1341 /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */
1343 1342 scsi = (struct skd_scsi_request *)&skspcl->msg_buf64[8];
1344 1343 scsi->hdr.tag = skspcl->req.id;
1345 1344
1346 1345 Dcmn_err(CE_NOTE, "internal skspcl: opcode=%x req.id=%x ==========>",
1347 1346 opcode, skspcl->req.id);
1348 1347
1349 1348 switch (opcode) {
1350 1349 case TEST_UNIT_READY:
1351 1350 scsi->cdb[0] = TEST_UNIT_READY;
1352 1351 scsi->cdb[1] = 0x00;
1353 1352 scsi->cdb[2] = 0x00;
1354 1353 scsi->cdb[3] = 0x00;
1355 1354 scsi->cdb[4] = 0x00;
1356 1355 scsi->cdb[5] = 0x00;
1357 1356 sgd->byte_count = 0;
1358 1357 scsi->hdr.sg_list_len_bytes = 0;
1359 1358 break;
1360 1359 case READ_CAPACITY_EXT:
1361 1360 scsi->cdb[0] = READ_CAPACITY_EXT;
1362 1361 scsi->cdb[1] = 0x10;
1363 1362 scsi->cdb[2] = 0x00;
1364 1363 scsi->cdb[3] = 0x00;
1365 1364 scsi->cdb[4] = 0x00;
1366 1365 scsi->cdb[5] = 0x00;
1367 1366 scsi->cdb[6] = 0x00;
1368 1367 scsi->cdb[7] = 0x00;
1369 1368 scsi->cdb[8] = 0x00;
1370 1369 scsi->cdb[9] = 0x00;
1371 1370 scsi->cdb[10] = 0x00;
1372 1371 scsi->cdb[11] = 0x00;
1373 1372 scsi->cdb[12] = 0x00;
1374 1373 scsi->cdb[13] = 0x20;
1375 1374 scsi->cdb[14] = 0x00;
1376 1375 scsi->cdb[15] = 0x00;
1377 1376 sgd->byte_count = SKD_N_READ_CAP_EXT_BYTES;
1378 1377 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1379 1378 break;
1380 1379 case 0x28:
1381 1380 (void) memset(skspcl->data_buf, 0x65, SKD_N_INTERNAL_BYTES);
1382 1381
1383 1382 scsi->cdb[0] = 0x28;
1384 1383 scsi->cdb[1] = 0x00;
1385 1384 scsi->cdb[2] = 0x00;
1386 1385 scsi->cdb[3] = 0x00;
1387 1386 scsi->cdb[4] = 0x00;
1388 1387 scsi->cdb[5] = 0x00;
1389 1388 scsi->cdb[6] = 0x00;
1390 1389 scsi->cdb[7] = 0x00;
1391 1390 scsi->cdb[8] = 0x01;
1392 1391 scsi->cdb[9] = 0x00;
1393 1392 sgd->byte_count = SKD_N_INTERNAL_BYTES;
1394 1393 scsi->hdr.sg_list_len_bytes = cpu_to_be32(SKD_N_INTERNAL_BYTES);
1395 1394 break;
1396 1395 case INQUIRY:
1397 1396 scsi->cdb[0] = INQUIRY;
1398 1397 scsi->cdb[1] = 0x01; /* evpd */
1399 1398 scsi->cdb[2] = 0x80; /* serial number page */
1400 1399 scsi->cdb[3] = 0x00;
1401 1400 scsi->cdb[4] = 0x10;
1402 1401 scsi->cdb[5] = 0x00;
1403 1402 sgd->byte_count = 16; /* SKD_N_INQ_BYTES */;
1404 1403 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1405 1404 break;
1406 1405 case INQUIRY2:
1407 1406 scsi->cdb[0] = INQUIRY;
1408 1407 scsi->cdb[1] = 0x00;
1409 1408 scsi->cdb[2] = 0x00; /* serial number page */
1410 1409 scsi->cdb[3] = 0x00;
1411 1410 scsi->cdb[4] = 0x24;
1412 1411 scsi->cdb[5] = 0x00;
1413 1412 sgd->byte_count = 36; /* SKD_N_INQ_BYTES */;
1414 1413 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1415 1414 break;
1416 1415 case SYNCHRONIZE_CACHE:
1417 1416 scsi->cdb[0] = SYNCHRONIZE_CACHE;
1418 1417 scsi->cdb[1] = 0x00;
1419 1418 scsi->cdb[2] = 0x00;
1420 1419 scsi->cdb[3] = 0x00;
1421 1420 scsi->cdb[4] = 0x00;
1422 1421 scsi->cdb[5] = 0x00;
1423 1422 scsi->cdb[6] = 0x00;
1424 1423 scsi->cdb[7] = 0x00;
1425 1424 scsi->cdb[8] = 0x00;
1426 1425 scsi->cdb[9] = 0x00;
1427 1426 sgd->byte_count = 0;
1428 1427 scsi->hdr.sg_list_len_bytes = 0;
1429 1428 break;
1430 1429 default:
1431 1430 ASSERT("Don't know what to send");
1432 1431 return;
1433 1432
1434 1433 }
1435 1434
1436 1435 skd_send_special_fitmsg(skdev, skspcl);
1437 1436 }
1438 1437
1439 1438 /*
1440 1439 *
1441 1440 * Name: skd_refresh_device_data, sends a TUR command.
1442 1441 *
1443 1442 * Inputs: skdev - device state structure.
1444 1443 *
1445 1444 * Returns: Nothing.
1446 1445 *
1447 1446 */
1448 1447 static void
1449 1448 skd_refresh_device_data(struct skd_device *skdev)
1450 1449 {
1451 1450 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1452 1451
1453 1452 Dcmn_err(CE_NOTE, "refresh_device_data: state=%d", skdev->state);
1454 1453
1455 1454 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1456 1455 }
1457 1456
1458 1457 /*
1459 1458 *
1460 1459 * Name: skd_complete_internal, handles the completion of
1461 1460 * driver-initiated I/O requests.
1462 1461 *
1463 1462 * Inputs: skdev - device state structure.
1464 1463 * skcomp - completion structure.
1465 1464 * skerr - error structure.
1466 1465 * skspcl - request structure.
1467 1466 *
1468 1467 * Returns: Nothing.
1469 1468 *
1470 1469 */
1471 1470 /* ARGSUSED */ /* Upstream common source with other platforms. */
1472 1471 static void
1473 1472 skd_complete_internal(struct skd_device *skdev,
1474 1473 volatile struct fit_completion_entry_v1 *skcomp,
1475 1474 volatile struct fit_comp_error_info *skerr,
1476 1475 struct skd_special_context *skspcl)
1477 1476 {
1478 1477 uint8_t *buf = skspcl->data_buf;
1479 1478 uint8_t status = 2;
1480 1479 /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */
1481 1480 struct skd_scsi_request *scsi =
1482 1481 (struct skd_scsi_request *)&skspcl->msg_buf64[8];
1483 1482
1484 1483 ASSERT(skspcl == &skdev->internal_skspcl);
1485 1484
1486 1485 (void) ddi_dma_sync(skspcl->db_dma_address.dma_handle, 0, 0,
1487 1486 DDI_DMA_SYNC_FORKERNEL);
1488 1487 (void) ddi_dma_sync(skspcl->mb_dma_address.dma_handle, 0, 0,
1489 1488 DDI_DMA_SYNC_FORKERNEL);
1490 1489
1491 1490 Dcmn_err(CE_NOTE, "complete internal %x", scsi->cdb[0]);
1492 1491
1493 1492 skspcl->req.completion = *skcomp;
1494 1493 skspcl->req.state = SKD_REQ_STATE_IDLE;
1495 1494 skspcl->req.id += SKD_ID_INCR;
1496 1495
1497 1496 status = skspcl->req.completion.status;
1498 1497
1499 1498 Dcmn_err(CE_NOTE, "<<<<====== complete_internal: opc=%x", *scsi->cdb);
1500 1499
1501 1500 switch (scsi->cdb[0]) {
1502 1501 case TEST_UNIT_READY:
1503 1502 if (SAM_STAT_GOOD == status) {
1504 1503 skd_send_internal_skspcl(skdev, skspcl,
1505 1504 READ_CAPACITY_EXT);
1506 1505 } else {
1507 1506 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1508 1507 cmn_err(CE_WARN,
1509 1508 "!%s: TUR failed, don't send anymore"
1510 1509 "state 0x%x", skdev->name, skdev->state);
1511 1510
1512 1511 return;
1513 1512 }
1514 1513
1515 1514 Dcmn_err(CE_NOTE, "%s: TUR failed, retry skerr",
1516 1515 skdev->name);
1517 1516 skd_send_internal_skspcl(skdev, skspcl, 0x00);
1518 1517 }
1519 1518 break;
1520 1519 case READ_CAPACITY_EXT: {
1521 1520 uint64_t cap, Nblocks;
1522 1521 uint64_t xbuf[1];
1523 1522
1524 1523 skdev->read_cap_is_valid = 0;
1525 1524 if (SAM_STAT_GOOD == status) {
1526 1525 bcopy(buf, xbuf, 8);
1527 1526 cap = be64_to_cpu(*xbuf);
1528 1527 skdev->read_cap_last_lba = cap;
1529 1528 skdev->read_cap_blocksize =
1530 1529 (buf[8] << 24) | (buf[9] << 16) |
1531 1530 (buf[10] << 8) | buf[11];
1532 1531
1533 1532 cap *= skdev->read_cap_blocksize;
1534 1533 Dcmn_err(CE_NOTE, " Last LBA: %" PRIu64 " (0x%" PRIx64
1535 1534 "), blk sz: %d, Capacity: %" PRIu64 "GB\n",
1536 1535 skdev->read_cap_last_lba,
1537 1536 skdev->read_cap_last_lba,
1538 1537 skdev->read_cap_blocksize,
1539 1538 cap >> 30ULL);
1540 1539
1541 1540 Nblocks = skdev->read_cap_last_lba + 1;
1542 1541
1543 1542 skdev->Nblocks = Nblocks;
1544 1543 skdev->read_cap_is_valid = 1;
1545 1544
1546 1545 skd_send_internal_skspcl(skdev, skspcl, INQUIRY2);
1547 1546
1548 1547 } else {
1549 1548 Dcmn_err(CE_NOTE, "**** READCAP failed, retry TUR");
1550 1549 skd_send_internal_skspcl(skdev, skspcl,
1551 1550 TEST_UNIT_READY);
1552 1551 }
1553 1552 break;
1554 1553 }
1555 1554 case INQUIRY:
1556 1555 skdev->inquiry_is_valid = 0;
1557 1556 if (SAM_STAT_GOOD == status) {
1558 1557 skdev->inquiry_is_valid = 1;
1559 1558
1560 1559 if (scsi->cdb[1] == 0x1) {
1561 1560 bcopy(&buf[4], skdev->inq_serial_num, 12);
1562 1561 skdev->inq_serial_num[12] = '\0';
1563 1562 } else {
1564 1563 char *tmp = skdev->inq_vendor_id;
1565 1564
1566 1565 bcopy(&buf[8], tmp, 8);
1567 1566 tmp[8] = '\0';
1568 1567
1569 1568 tmp = skdev->inq_product_id;
1570 1569 bcopy(&buf[16], tmp, 16);
1571 1570 tmp[16] = '\0';
1572 1571
1573 1572 tmp = skdev->inq_product_rev;
1574 1573 bcopy(&buf[32], tmp, 4);
1575 1574 tmp[4] = '\0';
1576 1575 }
1577 1576 }
1578 1577
1579 1578 if (skdev->state != SKD_DRVR_STATE_ONLINE)
1580 1579 if (skd_unquiesce_dev(skdev) < 0)
1581 1580 cmn_err(CE_NOTE, "** failed, to ONLINE device");
1582 1581 break;
1583 1582 case SYNCHRONIZE_CACHE:
1584 1583 skdev->sync_done = (SAM_STAT_GOOD == status) ? 1 : -1;
1585 1584
1586 1585 cv_signal(&skdev->cv_waitq);
1587 1586 break;
1588 1587
1589 1588 default:
1590 1589 ASSERT("we didn't send this");
1591 1590 }
1592 1591 }
1593 1592
1594 1593 /*
1595 1594 * FIT MESSAGES
1596 1595 */
1597 1596
1598 1597 /*
1599 1598 *
1600 1599 * Name: skd_send_fitmsg, send a FIT message to the hardware.
1601 1600 *
1602 1601 * Inputs: skdev - device state structure.
1603 1602 * skmsg - FIT message structure.
1604 1603 *
1605 1604 * Returns: Nothing.
1606 1605 *
1607 1606 */
1608 1607 /* ARGSUSED */ /* Upstream common source with other platforms. */
1609 1608 static void
1610 1609 skd_send_fitmsg(struct skd_device *skdev,
1611 1610 struct skd_fitmsg_context *skmsg)
1612 1611 {
1613 1612 uint64_t qcmd;
1614 1613 struct fit_msg_hdr *fmh;
1615 1614
1616 1615 Dcmn_err(CE_NOTE, "msgbuf's DMA addr: 0x%" PRIx64 ", qdepth_busy=%d",
1617 1616 skmsg->mb_dma_address.cookies->dmac_laddress,
1618 1617 skdev->queue_depth_busy);
1619 1618
1620 1619 Dcmn_err(CE_NOTE, "msg_buf 0x%p, offset %x", (void *)skmsg->msg_buf,
1621 1620 skmsg->offset);
1622 1621
1623 1622 qcmd = skmsg->mb_dma_address.cookies->dmac_laddress;
1624 1623 qcmd |= FIT_QCMD_QID_NORMAL;
1625 1624
1626 1625 fmh = (struct fit_msg_hdr *)skmsg->msg_buf64;
1627 1626 skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
1628 1627
1629 1628 if (skdev->dbg_level > 1) {
1630 1629 uint8_t *bp = skmsg->msg_buf;
1631 1630 int i;
1632 1631
1633 1632 for (i = 0; i < skmsg->length; i += 8) {
1634 1633 Dcmn_err(CE_NOTE, " msg[%2d] %02x %02x %02x %02x "
1635 1634 "%02x %02x %02x %02x",
1636 1635 i, bp[i + 0], bp[i + 1], bp[i + 2],
1637 1636 bp[i + 3], bp[i + 4], bp[i + 5],
1638 1637 bp[i + 6], bp[i + 7]);
1639 1638 if (i == 0) i = 64 - 8;
1640 1639 }
1641 1640 }
1642 1641
1643 1642 (void) ddi_dma_sync(skmsg->mb_dma_address.dma_handle, 0, 0,
1644 1643 DDI_DMA_SYNC_FORDEV);
1645 1644
1646 1645 ASSERT(skmsg->length > sizeof (struct fit_msg_hdr));
1647 1646 if (skmsg->length > 256) {
1648 1647 qcmd |= FIT_QCMD_MSGSIZE_512;
1649 1648 } else if (skmsg->length > 128) {
1650 1649 qcmd |= FIT_QCMD_MSGSIZE_256;
1651 1650 } else if (skmsg->length > 64) {
1652 1651 qcmd |= FIT_QCMD_MSGSIZE_128;
1653 1652 }
1654 1653
1655 1654 skdev->ios_started++;
1656 1655
1657 1656 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1658 1657 }
1659 1658
1660 1659 /*
1661 1660 *
1662 1661 * Name: skd_send_special_fitmsg, send a special FIT message
1663 1662 * to the hardware used driver-originated I/O requests.
1664 1663 *
1665 1664 * Inputs: skdev - device state structure.
1666 1665 * skspcl - skspcl structure.
1667 1666 *
1668 1667 * Returns: Nothing.
1669 1668 *
1670 1669 */
1671 1670 static void
1672 1671 skd_send_special_fitmsg(struct skd_device *skdev,
1673 1672 struct skd_special_context *skspcl)
1674 1673 {
1675 1674 uint64_t qcmd;
1676 1675
1677 1676 Dcmn_err(CE_NOTE, "send_special_fitmsg: pt 1");
1678 1677
1679 1678 if (skdev->dbg_level > 1) {
1680 1679 uint8_t *bp = skspcl->msg_buf;
1681 1680 int i;
1682 1681
1683 1682 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
1684 1683 cmn_err(CE_NOTE,
1685 1684 " spcl[%2d] %02x %02x %02x %02x "
1686 1685 "%02x %02x %02x %02x\n", i,
1687 1686 bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
1688 1687 bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
1689 1688 if (i == 0) i = 64 - 8;
1690 1689 }
1691 1690
1692 1691 for (i = 0; i < skspcl->req.n_sg; i++) {
1693 1692 struct fit_sg_descriptor *sgd =
1694 1693 &skspcl->req.sksg_list[i];
1695 1694
1696 1695 cmn_err(CE_NOTE, " sg[%d] count=%u ctrl=0x%x "
1697 1696 "addr=0x%" PRIx64 " next=0x%" PRIx64,
1698 1697 i, sgd->byte_count, sgd->control,
1699 1698 sgd->host_side_addr, sgd->next_desc_ptr);
1700 1699 }
1701 1700 }
1702 1701
1703 1702 (void) ddi_dma_sync(skspcl->mb_dma_address.dma_handle, 0, 0,
1704 1703 DDI_DMA_SYNC_FORDEV);
1705 1704 (void) ddi_dma_sync(skspcl->db_dma_address.dma_handle, 0, 0,
1706 1705 DDI_DMA_SYNC_FORDEV);
1707 1706
1708 1707 /*
1709 1708 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
1710 1709 * and one 64-byte SSDI command.
1711 1710 */
1712 1711 qcmd = skspcl->mb_dma_address.cookies->dmac_laddress;
1713 1712
1714 1713 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
1715 1714
1716 1715 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1717 1716 }
1718 1717
1719 1718 /*
1720 1719 * COMPLETION QUEUE
1721 1720 */
1722 1721
1723 1722 static void skd_complete_other(struct skd_device *skdev,
1724 1723 volatile struct fit_completion_entry_v1 *skcomp,
1725 1724 volatile struct fit_comp_error_info *skerr);
1726 1725
1727 1726 struct sns_info {
1728 1727 uint8_t type;
1729 1728 uint8_t stat;
1730 1729 uint8_t key;
1731 1730 uint8_t asc;
1732 1731 uint8_t ascq;
1733 1732 uint8_t mask;
1734 1733 enum skd_check_status_action action;
1735 1734 };
1736 1735
1737 1736 static struct sns_info skd_chkstat_table[] = {
1738 1737 /* Good */
1739 1738 {0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c, SKD_CHECK_STATUS_REPORT_GOOD},
1740 1739
1741 1740 /* Smart alerts */
1742 1741 {0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
1743 1742 SKD_CHECK_STATUS_REPORT_SMART_ALERT},
1744 1743 {0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
1745 1744 SKD_CHECK_STATUS_REPORT_SMART_ALERT},
1746 1745 {0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temp over trigger */
1747 1746 SKD_CHECK_STATUS_REPORT_SMART_ALERT},
1748 1747
1749 1748 /* Retry (with limits) */
1750 1749 {0x70, 0x02, ABORTED_COMMAND, 0, 0, 0x1C, /* DMA errors */
1751 1750 SKD_CHECK_STATUS_REQUEUE_REQUEST},
1752 1751 {0x70, 0x02, UNIT_ATTENTION, 0x0B, 0x00, 0x1E, /* warnings */
1753 1752 SKD_CHECK_STATUS_REQUEUE_REQUEST},
1754 1753 {0x70, 0x02, UNIT_ATTENTION, 0x5D, 0x00, 0x1E, /* thresholds */
1755 1754 SKD_CHECK_STATUS_REQUEUE_REQUEST},
1756 1755 {0x70, 0x02, UNIT_ATTENTION, 0x80, 0x30, 0x1F, /* backup power */
1757 1756 SKD_CHECK_STATUS_REQUEUE_REQUEST},
1758 1757
1759 1758 /* Busy (or about to be) */
1760 1759 {0x70, 0x02, UNIT_ATTENTION, 0x3f, 0x01, 0x1F, /* fw changed */
1761 1760 SKD_CHECK_STATUS_BUSY_IMMINENT},
1762 1761 };
1763 1762
1764 1763 /*
1765 1764 *
1766 1765 * Name: skd_check_status, checks the return status from a
1767 1766 * completed I/O request.
1768 1767 *
1769 1768 * Inputs: skdev - device state structure.
1770 1769 * cmp_status - SCSI status byte.
1771 1770 * skerr - the error data structure.
1772 1771 *
1773 1772 * Returns: Depending on the error condition, return the action
1774 1773 * to be taken as specified in the skd_chkstat_table.
1775 1774 * If no corresponding value is found in the table
1776 1775 * return SKD_CHECK_STATUS_REPORT_GOOD is no error otherwise
1777 1776 * return SKD_CHECK_STATUS_REPORT_ERROR.
1778 1777 *
1779 1778 */
1780 1779 static enum skd_check_status_action
1781 1780 skd_check_status(struct skd_device *skdev, uint8_t cmp_status,
1782 1781 volatile struct fit_comp_error_info *skerr)
1783 1782 {
1784 1783 /*
1785 1784 * Look up status and sense data to decide how to handle the error
1786 1785 * from the device.
1787 1786 * mask says which fields must match e.g., mask=0x18 means check
1788 1787 * type and stat, ignore key, asc, ascq.
1789 1788 */
1790 1789 int i, n;
1791 1790
1792 1791 Dcmn_err(CE_NOTE, "(%s): key/asc/ascq %02x/%02x/%02x",
1793 1792 skd_name(skdev), skerr->key, skerr->code, skerr->qual);
1794 1793
1795 1794 Dcmn_err(CE_NOTE, "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x",
1796 1795 skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual);
1797 1796
1798 1797 /* Does the info match an entry in the good category? */
1799 1798 n = sizeof (skd_chkstat_table) / sizeof (skd_chkstat_table[0]);
1800 1799 for (i = 0; i < n; i++) {
1801 1800 struct sns_info *sns = &skd_chkstat_table[i];
1802 1801
1803 1802 if (sns->mask & 0x10)
1804 1803 if (skerr->type != sns->type) continue;
1805 1804
1806 1805 if (sns->mask & 0x08)
1807 1806 if (cmp_status != sns->stat) continue;
1808 1807
1809 1808 if (sns->mask & 0x04)
1810 1809 if (skerr->key != sns->key) continue;
1811 1810
1812 1811 if (sns->mask & 0x02)
1813 1812 if (skerr->code != sns->asc) continue;
1814 1813
1815 1814 if (sns->mask & 0x01)
1816 1815 if (skerr->qual != sns->ascq) continue;
1817 1816
1818 1817 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
1819 1818 cmn_err(CE_WARN, "!(%s):SMART Alert: sense key/asc/ascq"
1820 1819 " %02x/%02x/%02x",
1821 1820 skd_name(skdev), skerr->key,
1822 1821 skerr->code, skerr->qual);
1823 1822 }
1824 1823
1825 1824 Dcmn_err(CE_NOTE, "skd_check_status: returning %x",
1826 1825 sns->action);
1827 1826
1828 1827 return (sns->action);
1829 1828 }
1830 1829
1831 1830 /*
1832 1831 * No other match, so nonzero status means error,
1833 1832 * zero status means good
1834 1833 */
1835 1834 if (cmp_status) {
1836 1835 cmn_err(CE_WARN,
1837 1836 "!%s: status check: qdepth=%d skmfl=%p (%d) skrfl=%p (%d)",
1838 1837 skdev->name,
1839 1838 skdev->queue_depth_busy,
1840 1839 (void *)skdev->skmsg_free_list, skd_list_skmsg(skdev, 0),
1841 1840 (void *)skdev->skreq_free_list, skd_list_skreq(skdev, 0));
1842 1841
1843 1842 cmn_err(CE_WARN, "!%s: t=%02x stat=%02x k=%02x c=%02x q=%02x",
1844 1843 skdev->name, skerr->type, cmp_status, skerr->key,
1845 1844 skerr->code, skerr->qual);
1846 1845
1847 1846 return (SKD_CHECK_STATUS_REPORT_ERROR);
1848 1847 }
1849 1848
1850 1849 Dcmn_err(CE_NOTE, "status check good default");
1851 1850
1852 1851 return (SKD_CHECK_STATUS_REPORT_GOOD);
1853 1852 }
1854 1853
1855 1854 /*
1856 1855 *
1857 1856 * Name: skd_isr_completion_posted, handles I/O completions.
1858 1857 *
1859 1858 * Inputs: skdev - device state structure.
1860 1859 *
1861 1860 * Returns: Nothing.
1862 1861 *
1863 1862 */
1864 1863 static void
1865 1864 skd_isr_completion_posted(struct skd_device *skdev)
1866 1865 {
1867 1866 volatile struct fit_completion_entry_v1 *skcmp = NULL;
1868 1867 volatile struct fit_comp_error_info *skerr;
1869 1868 struct skd_fitmsg_context *skmsg;
1870 1869 struct skd_request_context *skreq;
1871 1870 skd_buf_private_t *pbuf;
1872 1871 uint16_t req_id;
1873 1872 uint32_t req_slot;
1874 1873 uint32_t timo_slot;
1875 1874 uint32_t msg_slot;
1876 1875 uint16_t cmp_cntxt = 0;
1877 1876 uint8_t cmp_status = 0;
1878 1877 uint8_t cmp_cycle = 0;
1879 1878 uint32_t cmp_bytes = 0;
1880 1879
1881 1880 (void) ddi_dma_sync(skdev->cq_dma_address.dma_handle, 0, 0,
1882 1881 DDI_DMA_SYNC_FORKERNEL);
1883 1882
1884 1883 for (;;) {
1885 1884 ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
1886 1885
1887 1886 WAITQ_LOCK(skdev);
1888 1887
1889 1888 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
1890 1889 cmp_cycle = skcmp->cycle;
1891 1890 cmp_cntxt = skcmp->tag;
1892 1891 cmp_status = skcmp->status;
1893 1892 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
1894 1893
1895 1894 skerr = &skdev->skerr_table[skdev->skcomp_ix];
1896 1895
1897 1896 Dcmn_err(CE_NOTE,
1898 1897 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
1899 1898 "qdepth_busy=%d rbytes=0x%x proto=%d",
1900 1899 skdev->skcomp_cycle, skdev->skcomp_ix,
1901 1900 cmp_cycle, cmp_cntxt, cmp_status,
1902 1901 skdev->queue_depth_busy, cmp_bytes, skdev->proto_ver);
1903 1902
1904 1903 if (cmp_cycle != skdev->skcomp_cycle) {
1905 1904 Dcmn_err(CE_NOTE, "%s:end of completions", skdev->name);
1906 1905
1907 1906 WAITQ_UNLOCK(skdev);
1908 1907 break;
1909 1908 }
1910 1909
1911 1910
1912 1911 skdev->n_req++;
1913 1912
1914 1913 /*
1915 1914 * Update the completion queue head index and possibly
1916 1915 * the completion cycle count.
1917 1916 */
1918 1917 skdev->skcomp_ix++;
1919 1918 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
1920 1919 skdev->skcomp_ix = 0;
1921 1920 skdev->skcomp_cycle++; /* 8-bit wrap-around */
1922 1921 }
1923 1922
1924 1923
1925 1924 /*
1926 1925 * The command context is a unique 32-bit ID. The low order
1927 1926 * bits help locate the request. The request is usually a
1928 1927 * r/w request (see skd_start() above) or a special request.
1929 1928 */
1930 1929 req_id = cmp_cntxt;
1931 1930 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
1932 1931
1933 1932 Dcmn_err(CE_NOTE,
1934 1933 "<<<< completion_posted 1: req_id=%x req_slot=%x",
1935 1934 req_id, req_slot);
1936 1935
1937 1936 /* Is this other than a r/w request? */
1938 1937 if (req_slot >= skdev->num_req_context) {
1939 1938 /*
1940 1939 * This is not a completion for a r/w request.
1941 1940 */
1942 1941 skd_complete_other(skdev, skcmp, skerr);
1943 1942 WAITQ_UNLOCK(skdev);
1944 1943 continue;
1945 1944 }
1946 1945
1947 1946 skreq = &skdev->skreq_table[req_slot];
1948 1947
1949 1948 /*
1950 1949 * Make sure the request ID for the slot matches.
1951 1950 */
1952 1951 ASSERT(skreq->id == req_id);
1953 1952
1954 1953 if (SKD_REQ_STATE_ABORTED == skreq->state) {
1955 1954 Dcmn_err(CE_NOTE, "reclaim req %p id=%04x\n",
1956 1955 (void *)skreq, skreq->id);
1957 1956 /*
1958 1957 * a previously timed out command can
1959 1958 * now be cleaned up
1960 1959 */
1961 1960 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
1962 1961 ASSERT(msg_slot < skdev->num_fitmsg_context);
1963 1962 skmsg = &skdev->skmsg_table[msg_slot];
1964 1963 if (skmsg->id == skreq->fitmsg_id) {
1965 1964 ASSERT(skmsg->outstanding > 0);
1966 1965 skmsg->outstanding--;
1967 1966 if (skmsg->outstanding == 0) {
1968 1967 ASSERT(SKD_MSG_STATE_BUSY ==
1969 1968 skmsg->state);
1970 1969 skmsg->state = SKD_MSG_STATE_IDLE;
1971 1970 skmsg->id += SKD_ID_INCR;
1972 1971 skmsg->next = skdev->skmsg_free_list;
1973 1972 skdev->skmsg_free_list = skmsg;
1974 1973 }
1975 1974 }
1976 1975 /*
1977 1976 * Reclaim the skd_request_context
1978 1977 */
1979 1978 skreq->state = SKD_REQ_STATE_IDLE;
1980 1979 skreq->id += SKD_ID_INCR;
1981 1980 skreq->next = skdev->skreq_free_list;
1982 1981 skdev->skreq_free_list = skreq;
1983 1982 WAITQ_UNLOCK(skdev);
1984 1983 continue;
1985 1984 }
1986 1985
1987 1986 skreq->completion.status = cmp_status;
1988 1987
1989 1988 pbuf = skreq->pbuf;
1990 1989 ASSERT(pbuf != NULL);
1991 1990
1992 1991 Dcmn_err(CE_NOTE, "<<<< completion_posted 2: pbuf=%p "
1993 1992 "req_id=%x req_slot=%x", (void *)pbuf, req_id, req_slot);
1994 1993 if (cmp_status && skdev->disks_initialized) {
1995 1994 cmn_err(CE_WARN, "!%s: "
1996 1995 "I/O err: pbuf=%p blkno=%lld (%llx) nbklks=%ld ",
1997 1996 skdev->name, (void *)pbuf, pbuf->x_xfer->x_blkno,
1998 1997 pbuf->x_xfer->x_blkno, pbuf->x_xfer->x_nblks);
1999 1998 }
2000 1999
2001 2000 ASSERT(skdev->active_cmds);
2002 2001 atomic_dec_64(&skdev->active_cmds);
2003 2002
2004 2003 if (SAM_STAT_GOOD == cmp_status) {
2005 2004 /* Release DMA resources for the request. */
2006 2005 if (pbuf->x_xfer->x_nblks != 0)
2007 2006 skd_blkdev_postop_sg_list(skdev, skreq);
2008 2007 WAITQ_UNLOCK(skdev);
2009 2008 skd_end_request(skdev, skreq, 0);
2010 2009 WAITQ_LOCK(skdev);
2011 2010 } else {
2012 2011 switch (skd_check_status(skdev, cmp_status, skerr)) {
2013 2012 case SKD_CHECK_STATUS_REPORT_GOOD:
2014 2013 case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
2015 2014 WAITQ_UNLOCK(skdev);
2016 2015 skd_end_request(skdev, skreq, 0);
2017 2016 WAITQ_LOCK(skdev);
2018 2017 break;
2019 2018
2020 2019 case SKD_CHECK_STATUS_BUSY_IMMINENT:
2021 2020 skd_log_skreq(skdev, skreq, "retry(busy)");
2022 2021 skd_queue(skdev, pbuf);
2023 2022 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2024 2023 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2025 2024
2026 2025 (void) skd_quiesce_dev(skdev);
2027 2026 break;
2028 2027
2029 2028 /* FALLTHRU */
2030 2029 case SKD_CHECK_STATUS_REPORT_ERROR:
2031 2030 /* fall thru to report error */
2032 2031 default:
2033 2032 /*
2034 2033 * Save the entire completion
2035 2034 * and error entries for
2036 2035 * later error interpretation.
2037 2036 */
2038 2037 skreq->completion = *skcmp;
2039 2038 skreq->err_info = *skerr;
2040 2039 WAITQ_UNLOCK(skdev);
2041 2040 skd_end_request(skdev, skreq, -EIO);
2042 2041 WAITQ_LOCK(skdev);
2043 2042 break;
2044 2043 }
2045 2044 }
2046 2045
2047 2046 /*
2048 2047 * Reclaim the FIT msg buffer if this is
2049 2048 * the first of the requests it carried to
2050 2049 * be completed. The FIT msg buffer used to
2051 2050 * send this request cannot be reused until
2052 2051 * we are sure the s1120 card has copied
2053 2052 * it to its memory. The FIT msg might have
2054 2053 * contained several requests. As soon as
2055 2054 * any of them are completed we know that
2056 2055 * the entire FIT msg was transferred.
2057 2056 * Only the first completed request will
2058 2057 * match the FIT msg buffer id. The FIT
2059 2058 * msg buffer id is immediately updated.
2060 2059 * When subsequent requests complete the FIT
2061 2060 * msg buffer id won't match, so we know
2062 2061 * quite cheaply that it is already done.
2063 2062 */
2064 2063 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
2065 2064
2066 2065 ASSERT(msg_slot < skdev->num_fitmsg_context);
2067 2066 skmsg = &skdev->skmsg_table[msg_slot];
2068 2067 if (skmsg->id == skreq->fitmsg_id) {
2069 2068 ASSERT(SKD_MSG_STATE_BUSY == skmsg->state);
2070 2069 skmsg->state = SKD_MSG_STATE_IDLE;
2071 2070 skmsg->id += SKD_ID_INCR;
2072 2071 skmsg->next = skdev->skmsg_free_list;
2073 2072 skdev->skmsg_free_list = skmsg;
2074 2073 }
2075 2074
2076 2075 /*
2077 2076 * Decrease the number of active requests.
2078 2077 * This also decrements the count in the
2079 2078 * timeout slot.
2080 2079 */
2081 2080 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
2082 2081 ASSERT(skdev->timeout_slot[timo_slot] > 0);
2083 2082 ASSERT(skdev->queue_depth_busy > 0);
2084 2083
2085 2084 atomic_dec_32(&skdev->timeout_slot[timo_slot]);
2086 2085 atomic_dec_32(&skdev->queue_depth_busy);
2087 2086
2088 2087 /*
2089 2088 * Reclaim the skd_request_context
2090 2089 */
2091 2090 skreq->state = SKD_REQ_STATE_IDLE;
2092 2091 skreq->id += SKD_ID_INCR;
2093 2092 skreq->next = skdev->skreq_free_list;
2094 2093 skdev->skreq_free_list = skreq;
2095 2094
2096 2095 WAITQ_UNLOCK(skdev);
2097 2096
2098 2097 /*
2099 2098 * make sure the lock is held by caller.
2100 2099 */
2101 2100 if ((skdev->state == SKD_DRVR_STATE_PAUSING) &&
2102 2101 (0 == skdev->queue_depth_busy)) {
2103 2102 skdev->state = SKD_DRVR_STATE_PAUSED;
2104 2103 cv_signal(&skdev->cv_waitq);
2105 2104 }
2106 2105 } /* for(;;) */
2107 2106 }
2108 2107
2109 2108 /*
2110 2109 *
2111 2110 * Name: skd_complete_other, handle the completion of a
2112 2111 * non-r/w request.
2113 2112 *
2114 2113 * Inputs: skdev - device state structure.
2115 2114 * skcomp - FIT completion structure.
2116 2115 * skerr - error structure.
2117 2116 *
2118 2117 * Returns: Nothing.
2119 2118 *
2120 2119 */
2121 2120 static void
2122 2121 skd_complete_other(struct skd_device *skdev,
2123 2122 volatile struct fit_completion_entry_v1 *skcomp,
2124 2123 volatile struct fit_comp_error_info *skerr)
2125 2124 {
2126 2125 uint32_t req_id = 0;
2127 2126 uint32_t req_table;
2128 2127 uint32_t req_slot;
2129 2128 struct skd_special_context *skspcl;
2130 2129
2131 2130 req_id = skcomp->tag;
2132 2131 req_table = req_id & SKD_ID_TABLE_MASK;
2133 2132 req_slot = req_id & SKD_ID_SLOT_MASK;
2134 2133
2135 2134 Dcmn_err(CE_NOTE, "complete_other: table=0x%x id=0x%x slot=%d",
2136 2135 req_table, req_id, req_slot);
2137 2136
2138 2137 /*
2139 2138 * Based on the request id, determine how to dispatch this completion.
2140 2139 * This swich/case is finding the good cases and forwarding the
2141 2140 * completion entry. Errors are reported below the switch.
2142 2141 */
2143 2142 ASSERT(req_table == SKD_ID_INTERNAL);
2144 2143 ASSERT(req_slot == 0);
2145 2144
2146 2145 skspcl = &skdev->internal_skspcl;
2147 2146 ASSERT(skspcl->req.id == req_id);
2148 2147 ASSERT(skspcl->req.state == SKD_REQ_STATE_BUSY);
2149 2148
2150 2149 Dcmn_err(CE_NOTE, "<<<<== complete_other: ID_INTERNAL");
2151 2150 skd_complete_internal(skdev, skcomp, skerr, skspcl);
2152 2151 }
2153 2152
2154 2153 /*
2155 2154 *
2156 2155 * Name: skd_reset_skcomp, does what it says, resetting completion
2157 2156 * tables.
2158 2157 *
2159 2158 * Inputs: skdev - device state structure.
2160 2159 *
2161 2160 * Returns: Nothing.
2162 2161 *
2163 2162 */
2164 2163 static void
2165 2164 skd_reset_skcomp(struct skd_device *skdev)
2166 2165 {
2167 2166 uint32_t nbytes;
2168 2167
2169 2168 nbytes = sizeof (struct fit_completion_entry_v1) *
2170 2169 SKD_N_COMPLETION_ENTRY;
2171 2170 nbytes += sizeof (struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
2172 2171
2173 2172 if (skdev->skcomp_table)
2174 2173 bzero(skdev->skcomp_table, nbytes);
2175 2174
2176 2175 skdev->skcomp_ix = 0;
2177 2176 skdev->skcomp_cycle = 1;
2178 2177 }
2179 2178
2180 2179
2181 2180
2182 2181 /*
2183 2182 * INTERRUPTS
2184 2183 */
2185 2184
2186 2185 /*
2187 2186 *
2188 2187 * Name: skd_isr_aif, handles the device interrupts.
2189 2188 *
2190 2189 * Inputs: arg - skdev device state structure.
2191 2190 * intvec - not referenced
2192 2191 *
2193 2192 * Returns: DDI_INTR_CLAIMED if interrupt is handled otherwise
2194 2193 * return DDI_INTR_UNCLAIMED.
2195 2194 *
2196 2195 */
2197 2196 /* ARGSUSED */ /* Upstream common source with other platforms. */
2198 2197 static uint_t
2199 2198 skd_isr_aif(caddr_t arg, caddr_t intvec)
2200 2199 {
2201 2200 uint32_t intstat;
2202 2201 uint32_t ack;
2203 2202 int rc = DDI_INTR_UNCLAIMED;
2204 2203 struct skd_device *skdev;
2205 2204
2206 2205 skdev = (skd_device_t *)(uintptr_t)arg;
2207 2206
2208 2207 ASSERT(skdev != NULL);
2209 2208
2210 2209 skdev->intr_cntr++;
2211 2210
2212 2211 Dcmn_err(CE_NOTE, "skd_isr_aif: intr=%" PRId64 "\n", skdev->intr_cntr);
2213 2212
2214 2213 for (;;) {
2215 2214
2216 2215 ASSERT(!WAITQ_LOCK_HELD(skdev));
2217 2216 INTR_LOCK(skdev);
2218 2217
2219 2218 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2220 2219
2221 2220 ack = FIT_INT_DEF_MASK;
2222 2221 ack &= intstat;
2223 2222
2224 2223 Dcmn_err(CE_NOTE, "intstat=0x%x ack=0x%x", intstat, ack);
2225 2224
2226 2225 /*
2227 2226 * As long as there is an int pending on device, keep
2228 2227 * running loop. When none, get out, but if we've never
2229 2228 * done any processing, call completion handler?
2230 2229 */
2231 2230 if (ack == 0) {
2232 2231 /*
2233 2232 * No interrupts on device, but run the completion
2234 2233 * processor anyway?
2235 2234 */
2236 2235 if (rc == DDI_INTR_UNCLAIMED &&
2237 2236 skdev->state == SKD_DRVR_STATE_ONLINE) {
2238 2237 Dcmn_err(CE_NOTE,
2239 2238 "1: Want isr_comp_posted call");
2240 2239 skd_isr_completion_posted(skdev);
2241 2240 }
2242 2241 INTR_UNLOCK(skdev);
2243 2242
2244 2243 break;
2245 2244 }
2246 2245 rc = DDI_INTR_CLAIMED;
2247 2246
2248 2247 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
2249 2248
2250 2249 if ((skdev->state != SKD_DRVR_STATE_LOAD) &&
2251 2250 (skdev->state != SKD_DRVR_STATE_STOPPING)) {
2252 2251 if (intstat & FIT_ISH_COMPLETION_POSTED) {
2253 2252 Dcmn_err(CE_NOTE,
2254 2253 "2: Want isr_comp_posted call");
2255 2254 skd_isr_completion_posted(skdev);
2256 2255 }
2257 2256
2258 2257 if (intstat & FIT_ISH_FW_STATE_CHANGE) {
2259 2258 Dcmn_err(CE_NOTE, "isr: fwstate change");
2260 2259
2261 2260 skd_isr_fwstate(skdev);
2262 2261 if (skdev->state == SKD_DRVR_STATE_FAULT ||
2263 2262 skdev->state ==
2264 2263 SKD_DRVR_STATE_DISAPPEARED) {
2265 2264 INTR_UNLOCK(skdev);
2266 2265
2267 2266 return (rc);
2268 2267 }
2269 2268 }
2270 2269
2271 2270 if (intstat & FIT_ISH_MSG_FROM_DEV) {
2272 2271 Dcmn_err(CE_NOTE, "isr: msg_from_dev change");
2273 2272 skd_isr_msg_from_dev(skdev);
2274 2273 }
2275 2274 }
2276 2275
2277 2276 INTR_UNLOCK(skdev);
2278 2277 }
2279 2278
2280 2279 if (!SIMPLEQ_EMPTY(&skdev->waitqueue))
2281 2280 skd_start(skdev);
2282 2281
2283 2282 return (rc);
2284 2283 }
2285 2284
2286 2285 /*
2287 2286 *
2288 2287 * Name: skd_drive_fault, set the drive state to DRV_STATE_FAULT.
2289 2288 *
2290 2289 * Inputs: skdev - device state structure.
2291 2290 *
2292 2291 * Returns: Nothing.
2293 2292 *
2294 2293 */
2295 2294 static void
2296 2295 skd_drive_fault(struct skd_device *skdev)
2297 2296 {
2298 2297 skdev->state = SKD_DRVR_STATE_FAULT;
2299 2298 cmn_err(CE_WARN, "!(%s): Drive FAULT\n",
2300 2299 skd_name(skdev));
2301 2300 }
2302 2301
2303 2302 /*
2304 2303 *
2305 2304 * Name: skd_drive_disappeared, set the drive state to DISAPPEARED..
2306 2305 *
2307 2306 * Inputs: skdev - device state structure.
2308 2307 *
2309 2308 * Returns: Nothing.
2310 2309 *
2311 2310 */
2312 2311 static void
2313 2312 skd_drive_disappeared(struct skd_device *skdev)
2314 2313 {
2315 2314 skdev->state = SKD_DRVR_STATE_DISAPPEARED;
2316 2315 cmn_err(CE_WARN, "!(%s): Drive DISAPPEARED\n",
2317 2316 skd_name(skdev));
2318 2317 }
2319 2318
2320 2319 /*
2321 2320 *
2322 2321 * Name: skd_isr_fwstate, handles the various device states.
2323 2322 *
2324 2323 * Inputs: skdev - device state structure.
2325 2324 *
2326 2325 * Returns: Nothing.
2327 2326 *
2328 2327 */
2329 2328 static void
2330 2329 skd_isr_fwstate(struct skd_device *skdev)
2331 2330 {
2332 2331 uint32_t sense;
2333 2332 uint32_t state;
2334 2333 int prev_driver_state;
2335 2334 uint32_t mtd;
2336 2335
2337 2336 prev_driver_state = skdev->state;
2338 2337
2339 2338 sense = SKD_READL(skdev, FIT_STATUS);
2340 2339 state = sense & FIT_SR_DRIVE_STATE_MASK;
2341 2340
2342 2341 Dcmn_err(CE_NOTE, "s1120 state %s(%d)=>%s(%d)",
2343 2342 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
2344 2343 skd_drive_state_to_str(state), state);
2345 2344
2346 2345 skdev->drive_state = state;
2347 2346
2348 2347 switch (skdev->drive_state) {
2349 2348 case FIT_SR_DRIVE_INIT:
2350 2349 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
2351 2350 skd_disable_interrupts(skdev);
2352 2351 break;
2353 2352 }
2354 2353 if (skdev->state == SKD_DRVR_STATE_RESTARTING) {
2355 2354 skd_recover_requests(skdev);
2356 2355 }
2357 2356 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
2358 2357 skdev->timer_countdown =
2359 2358 SKD_TIMER_SECONDS(SKD_STARTING_TO);
2360 2359 skdev->state = SKD_DRVR_STATE_STARTING;
2361 2360 skd_soft_reset(skdev);
2362 2361 break;
2363 2362 }
2364 2363 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
2365 2364 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2366 2365 skdev->last_mtd = mtd;
2367 2366 break;
2368 2367
2369 2368 case FIT_SR_DRIVE_ONLINE:
2370 2369 skdev->queue_depth_limit = skdev->soft_queue_depth_limit;
2371 2370 if (skdev->queue_depth_limit > skdev->hard_queue_depth_limit) {
2372 2371 skdev->queue_depth_limit =
2373 2372 skdev->hard_queue_depth_limit;
2374 2373 }
2375 2374
2376 2375 skdev->queue_depth_lowat = skdev->queue_depth_limit * 2 / 3 + 1;
2377 2376 if (skdev->queue_depth_lowat < 1)
2378 2377 skdev->queue_depth_lowat = 1;
2379 2378 Dcmn_err(CE_NOTE,
2380 2379 "%s queue depth limit=%d hard=%d soft=%d lowat=%d",
2381 2380 DRV_NAME,
2382 2381 skdev->queue_depth_limit,
2383 2382 skdev->hard_queue_depth_limit,
2384 2383 skdev->soft_queue_depth_limit,
2385 2384 skdev->queue_depth_lowat);
2386 2385
2387 2386 skd_refresh_device_data(skdev);
2388 2387 break;
2389 2388 case FIT_SR_DRIVE_BUSY:
2390 2389 skdev->state = SKD_DRVR_STATE_BUSY;
2391 2390 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2392 2391 (void) skd_quiesce_dev(skdev);
2393 2392 break;
2394 2393 case FIT_SR_DRIVE_BUSY_SANITIZE:
2395 2394 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
2396 2395 skd_start(skdev);
2397 2396 break;
2398 2397 case FIT_SR_DRIVE_BUSY_ERASE:
2399 2398 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
2400 2399 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2401 2400 break;
2402 2401 case FIT_SR_DRIVE_OFFLINE:
2403 2402 skdev->state = SKD_DRVR_STATE_IDLE;
2404 2403 break;
2405 2404 case FIT_SR_DRIVE_SOFT_RESET:
2406 2405 skdev->state = SKD_DRVR_STATE_RESTARTING;
2407 2406
2408 2407 switch (skdev->state) {
2409 2408 case SKD_DRVR_STATE_STARTING:
2410 2409 case SKD_DRVR_STATE_RESTARTING:
2411 2410 break;
2412 2411 default:
2413 2412 skdev->state = SKD_DRVR_STATE_RESTARTING;
2414 2413 break;
2415 2414 }
2416 2415 break;
2417 2416 case FIT_SR_DRIVE_FW_BOOTING:
2418 2417 Dcmn_err(CE_NOTE,
2419 2418 "ISR FIT_SR_DRIVE_FW_BOOTING %s", skdev->name);
2420 2419 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
2421 2420 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_WAIT_BOOT_TO);
2422 2421 break;
2423 2422
2424 2423 case FIT_SR_DRIVE_DEGRADED:
2425 2424 case FIT_SR_PCIE_LINK_DOWN:
2426 2425 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
2427 2426 break;
2428 2427
2429 2428 case FIT_SR_DRIVE_FAULT:
2430 2429 skd_drive_fault(skdev);
2431 2430 skd_recover_requests(skdev);
2432 2431 skd_start(skdev);
2433 2432 break;
2434 2433
2435 2434 case 0xFF:
2436 2435 skd_drive_disappeared(skdev);
2437 2436 skd_recover_requests(skdev);
2438 2437 skd_start(skdev);
2439 2438 break;
2440 2439 default:
2441 2440 /*
2442 2441 * Uknown FW State. Wait for a state we recognize.
2443 2442 */
2444 2443 break;
2445 2444 }
2446 2445
2447 2446 Dcmn_err(CE_NOTE, "Driver state %s(%d)=>%s(%d)",
2448 2447 skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
2449 2448 skd_skdev_state_to_str(skdev->state), skdev->state);
2450 2449 }
2451 2450
2452 2451 /*
2453 2452 *
2454 2453 * Name: skd_recover_requests, attempts to recover requests.
2455 2454 *
2456 2455 * Inputs: skdev - device state structure.
2457 2456 *
2458 2457 * Returns: Nothing.
2459 2458 *
2460 2459 */
2461 2460 static void
2462 2461 skd_recover_requests(struct skd_device *skdev)
2463 2462 {
2464 2463 int i;
2465 2464
2466 2465 ASSERT(INTR_LOCK_HELD(skdev));
2467 2466
2468 2467 for (i = 0; i < skdev->num_req_context; i++) {
2469 2468 struct skd_request_context *skreq = &skdev->skreq_table[i];
2470 2469
2471 2470 if (skreq->state == SKD_REQ_STATE_BUSY) {
2472 2471 skd_log_skreq(skdev, skreq, "requeue");
2473 2472
2474 2473 ASSERT(0 != (skreq->id & SKD_ID_INCR));
2475 2474 ASSERT(skreq->pbuf != NULL);
2476 2475 /* Release DMA resources for the request. */
2477 2476 skd_blkdev_postop_sg_list(skdev, skreq);
2478 2477
2479 2478 skd_end_request(skdev, skreq, EAGAIN);
2480 2479 skreq->pbuf = NULL;
2481 2480 skreq->state = SKD_REQ_STATE_IDLE;
2482 2481 skreq->id += SKD_ID_INCR;
2483 2482 }
2484 2483 if (i > 0) {
2485 2484 skreq[-1].next = skreq;
2486 2485 }
2487 2486 skreq->next = NULL;
2488 2487 }
2489 2488
2490 2489 WAITQ_LOCK(skdev);
2491 2490 skdev->skreq_free_list = skdev->skreq_table;
2492 2491 WAITQ_UNLOCK(skdev);
2493 2492
2494 2493 for (i = 0; i < skdev->num_fitmsg_context; i++) {
2495 2494 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
2496 2495
2497 2496 if (skmsg->state == SKD_MSG_STATE_BUSY) {
2498 2497 skd_log_skmsg(skdev, skmsg, "salvaged");
2499 2498 ASSERT((skmsg->id & SKD_ID_INCR) != 0);
2500 2499 skmsg->state = SKD_MSG_STATE_IDLE;
2501 2500 skmsg->id &= ~SKD_ID_INCR;
2502 2501 }
2503 2502 if (i > 0) {
2504 2503 skmsg[-1].next = skmsg;
2505 2504 }
2506 2505 skmsg->next = NULL;
2507 2506 }
2508 2507 WAITQ_LOCK(skdev);
2509 2508 skdev->skmsg_free_list = skdev->skmsg_table;
2510 2509 WAITQ_UNLOCK(skdev);
2511 2510
2512 2511 for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++) {
2513 2512 skdev->timeout_slot[i] = 0;
2514 2513 }
2515 2514 skdev->queue_depth_busy = 0;
2516 2515 }
2517 2516
2518 2517 /*
2519 2518 *
2520 2519 * Name: skd_isr_msg_from_dev, handles a message from the device.
2521 2520 *
2522 2521 * Inputs: skdev - device state structure.
2523 2522 *
2524 2523 * Returns: Nothing.
2525 2524 *
2526 2525 */
2527 2526 static void
2528 2527 skd_isr_msg_from_dev(struct skd_device *skdev)
2529 2528 {
2530 2529 uint32_t mfd;
2531 2530 uint32_t mtd;
2532 2531
2533 2532 Dcmn_err(CE_NOTE, "skd_isr_msg_from_dev:");
2534 2533
2535 2534 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
2536 2535
2537 2536 Dcmn_err(CE_NOTE, "mfd=0x%x last_mtd=0x%x\n", mfd, skdev->last_mtd);
2538 2537
2539 2538 /*
2540 2539 * ignore any mtd that is an ack for something we didn't send
2541 2540 */
2542 2541 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) {
2543 2542 return;
2544 2543 }
2545 2544
2546 2545 switch (FIT_MXD_TYPE(mfd)) {
2547 2546 case FIT_MTD_FITFW_INIT:
2548 2547 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
2549 2548
2550 2549 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
2551 2550 cmn_err(CE_WARN, "!(%s): protocol mismatch\n",
2552 2551 skdev->name);
2553 2552 cmn_err(CE_WARN, "!(%s): got=%d support=%d\n",
2554 2553 skdev->name, skdev->proto_ver,
2555 2554 FIT_PROTOCOL_VERSION_1);
2556 2555 cmn_err(CE_WARN, "!(%s): please upgrade driver\n",
2557 2556 skdev->name);
2558 2557 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
2559 2558 skd_soft_reset(skdev);
2560 2559 break;
2561 2560 }
2562 2561 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
2563 2562 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2564 2563 skdev->last_mtd = mtd;
2565 2564 break;
2566 2565
2567 2566 case FIT_MTD_GET_CMDQ_DEPTH:
2568 2567 skdev->hard_queue_depth_limit = FIT_MXD_DATA(mfd);
2569 2568 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
2570 2569 SKD_N_COMPLETION_ENTRY);
2571 2570 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2572 2571 skdev->last_mtd = mtd;
2573 2572 break;
2574 2573
2575 2574 case FIT_MTD_SET_COMPQ_DEPTH:
2576 2575 SKD_WRITEQ(skdev, skdev->cq_dma_address.cookies->dmac_laddress,
2577 2576 FIT_MSG_TO_DEVICE_ARG);
2578 2577 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
2579 2578 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2580 2579 skdev->last_mtd = mtd;
2581 2580 break;
2582 2581
2583 2582 case FIT_MTD_SET_COMPQ_ADDR:
2584 2583 skd_reset_skcomp(skdev);
2585 2584 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
2586 2585 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2587 2586 skdev->last_mtd = mtd;
2588 2587 break;
2589 2588
2590 2589 case FIT_MTD_ARM_QUEUE:
2591 2590 skdev->last_mtd = 0;
2592 2591 /*
2593 2592 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
2594 2593 */
2595 2594 break;
2596 2595
2597 2596 default:
2598 2597 break;
2599 2598 }
2600 2599 }
2601 2600
2602 2601
2603 2602 /*
2604 2603 *
2605 2604 * Name: skd_disable_interrupts, issues command to disable
2606 2605 * device interrupts.
2607 2606 *
2608 2607 * Inputs: skdev - device state structure.
2609 2608 *
2610 2609 * Returns: Nothing.
2611 2610 *
2612 2611 */
2613 2612 static void
2614 2613 skd_disable_interrupts(struct skd_device *skdev)
2615 2614 {
2616 2615 uint32_t sense;
2617 2616
2618 2617 Dcmn_err(CE_NOTE, "skd_disable_interrupts:");
2619 2618
2620 2619 sense = SKD_READL(skdev, FIT_CONTROL);
2621 2620 sense &= ~FIT_CR_ENABLE_INTERRUPTS;
2622 2621 SKD_WRITEL(skdev, sense, FIT_CONTROL);
2623 2622
2624 2623 Dcmn_err(CE_NOTE, "sense 0x%x", sense);
2625 2624
2626 2625 /*
2627 2626 * Note that the 1s is written. A 1-bit means
2628 2627 * disable, a 0 means enable.
2629 2628 */
2630 2629 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
2631 2630 }
2632 2631
2633 2632 /*
2634 2633 *
2635 2634 * Name: skd_enable_interrupts, issues command to enable
2636 2635 * device interrupts.
2637 2636 *
2638 2637 * Inputs: skdev - device state structure.
2639 2638 *
2640 2639 * Returns: Nothing.
2641 2640 *
2642 2641 */
2643 2642 static void
2644 2643 skd_enable_interrupts(struct skd_device *skdev)
2645 2644 {
2646 2645 uint32_t val;
2647 2646
2648 2647 Dcmn_err(CE_NOTE, "skd_enable_interrupts:");
2649 2648
2650 2649 /* unmask interrupts first */
2651 2650 val = FIT_ISH_FW_STATE_CHANGE +
2652 2651 FIT_ISH_COMPLETION_POSTED +
2653 2652 FIT_ISH_MSG_FROM_DEV;
2654 2653
2655 2654 /*
2656 2655 * Note that the compliment of mask is written. A 1-bit means
2657 2656 * disable, a 0 means enable.
2658 2657 */
2659 2658 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
2660 2659
2661 2660 Dcmn_err(CE_NOTE, "interrupt mask=0x%x", ~val);
2662 2661
2663 2662 val = SKD_READL(skdev, FIT_CONTROL);
2664 2663 val |= FIT_CR_ENABLE_INTERRUPTS;
2665 2664
2666 2665 Dcmn_err(CE_NOTE, "control=0x%x", val);
2667 2666
2668 2667 SKD_WRITEL(skdev, val, FIT_CONTROL);
2669 2668 }
2670 2669
2671 2670 /*
2672 2671 *
2673 2672 * Name: skd_soft_reset, issues a soft reset to the hardware.
2674 2673 *
2675 2674 * Inputs: skdev - device state structure.
2676 2675 *
2677 2676 * Returns: Nothing.
2678 2677 *
2679 2678 */
2680 2679 static void
2681 2680 skd_soft_reset(struct skd_device *skdev)
2682 2681 {
2683 2682 uint32_t val;
2684 2683
2685 2684 Dcmn_err(CE_NOTE, "skd_soft_reset:");
2686 2685
2687 2686 val = SKD_READL(skdev, FIT_CONTROL);
2688 2687 val |= (FIT_CR_SOFT_RESET);
2689 2688
2690 2689 Dcmn_err(CE_NOTE, "soft_reset: control=0x%x", val);
2691 2690
2692 2691 SKD_WRITEL(skdev, val, FIT_CONTROL);
2693 2692 }
2694 2693
2695 2694 /*
2696 2695 *
2697 2696 * Name: skd_start_device, gets the device going.
2698 2697 *
2699 2698 * Inputs: skdev - device state structure.
2700 2699 *
2701 2700 * Returns: Nothing.
2702 2701 *
2703 2702 */
2704 2703 static void
2705 2704 skd_start_device(struct skd_device *skdev)
2706 2705 {
2707 2706 uint32_t state;
2708 2707 int delay_action = 0;
2709 2708
2710 2709 Dcmn_err(CE_NOTE, "skd_start_device:");
2711 2710
2712 2711 /* ack all ghost interrupts */
2713 2712 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2714 2713
2715 2714 state = SKD_READL(skdev, FIT_STATUS);
2716 2715
2717 2716 Dcmn_err(CE_NOTE, "initial status=0x%x", state);
2718 2717
2719 2718 state &= FIT_SR_DRIVE_STATE_MASK;
2720 2719 skdev->drive_state = state;
2721 2720 skdev->last_mtd = 0;
2722 2721
2723 2722 skdev->state = SKD_DRVR_STATE_STARTING;
2724 2723 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_STARTING_TO);
2725 2724
2726 2725 skd_enable_interrupts(skdev);
2727 2726
2728 2727 switch (skdev->drive_state) {
2729 2728 case FIT_SR_DRIVE_OFFLINE:
2730 2729 Dcmn_err(CE_NOTE, "(%s): Drive offline...",
2731 2730 skd_name(skdev));
2732 2731 break;
2733 2732
2734 2733 case FIT_SR_DRIVE_FW_BOOTING:
2735 2734 Dcmn_err(CE_NOTE, "FIT_SR_DRIVE_FW_BOOTING %s\n", skdev->name);
2736 2735 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
2737 2736 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_WAIT_BOOT_TO);
2738 2737 break;
2739 2738
2740 2739 case FIT_SR_DRIVE_BUSY_SANITIZE:
2741 2740 Dcmn_err(CE_NOTE, "(%s): Start: BUSY_SANITIZE\n",
2742 2741 skd_name(skdev));
2743 2742 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
2744 2743 skdev->timer_countdown = SKD_TIMER_SECONDS(60);
2745 2744 break;
2746 2745
2747 2746 case FIT_SR_DRIVE_BUSY_ERASE:
2748 2747 Dcmn_err(CE_NOTE, "(%s): Start: BUSY_ERASE\n",
2749 2748 skd_name(skdev));
2750 2749 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
2751 2750 skdev->timer_countdown = SKD_TIMER_SECONDS(60);
2752 2751 break;
2753 2752
2754 2753 case FIT_SR_DRIVE_INIT:
2755 2754 case FIT_SR_DRIVE_ONLINE:
2756 2755 skd_soft_reset(skdev);
2757 2756
2758 2757 break;
2759 2758
2760 2759 case FIT_SR_DRIVE_BUSY:
2761 2760 Dcmn_err(CE_NOTE, "(%s): Drive Busy...\n",
2762 2761 skd_name(skdev));
2763 2762 skdev->state = SKD_DRVR_STATE_BUSY;
2764 2763 skdev->timer_countdown = SKD_TIMER_SECONDS(60);
2765 2764 break;
2766 2765
2767 2766 case FIT_SR_DRIVE_SOFT_RESET:
2768 2767 Dcmn_err(CE_NOTE, "(%s) drive soft reset in prog\n",
2769 2768 skd_name(skdev));
2770 2769 break;
2771 2770
2772 2771 case FIT_SR_DRIVE_FAULT:
2773 2772 /*
2774 2773 * Fault state is bad...soft reset won't do it...
2775 2774 * Hard reset, maybe, but does it work on device?
2776 2775 * For now, just fault so the system doesn't hang.
2777 2776 */
2778 2777 skd_drive_fault(skdev);
2779 2778
2780 2779 delay_action = 1;
2781 2780 break;
2782 2781
2783 2782 case 0xFF:
2784 2783 skd_drive_disappeared(skdev);
2785 2784
2786 2785 delay_action = 1;
2787 2786 break;
2788 2787
2789 2788 default:
2790 2789 Dcmn_err(CE_NOTE, "(%s) Start: unknown state %x\n",
2791 2790 skd_name(skdev), skdev->drive_state);
2792 2791 break;
2793 2792 }
2794 2793
2795 2794 state = SKD_READL(skdev, FIT_CONTROL);
2796 2795 Dcmn_err(CE_NOTE, "FIT Control Status=0x%x\n", state);
2797 2796
2798 2797 state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2799 2798 Dcmn_err(CE_NOTE, "Intr Status=0x%x\n", state);
2800 2799
2801 2800 state = SKD_READL(skdev, FIT_INT_MASK_HOST);
2802 2801 Dcmn_err(CE_NOTE, "Intr Mask=0x%x\n", state);
2803 2802
2804 2803 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
2805 2804 Dcmn_err(CE_NOTE, "Msg from Dev=0x%x\n", state);
2806 2805
2807 2806 state = SKD_READL(skdev, FIT_HW_VERSION);
2808 2807 Dcmn_err(CE_NOTE, "HW version=0x%x\n", state);
2809 2808
2810 2809 if (delay_action) {
2811 2810 /* start the queue so we can respond with error to requests */
2812 2811 Dcmn_err(CE_NOTE, "Starting %s queue\n", skdev->name);
2813 2812 skd_start(skdev);
2814 2813 skdev->gendisk_on = -1;
2815 2814 cv_signal(&skdev->cv_waitq);
2816 2815 }
2817 2816 }
2818 2817
2819 2818 /*
2820 2819 *
2821 2820 * Name: skd_restart_device, restart the hardware.
2822 2821 *
2823 2822 * Inputs: skdev - device state structure.
2824 2823 *
2825 2824 * Returns: Nothing.
2826 2825 *
2827 2826 */
2828 2827 static void
2829 2828 skd_restart_device(struct skd_device *skdev)
2830 2829 {
2831 2830 uint32_t state;
2832 2831
2833 2832 Dcmn_err(CE_NOTE, "skd_restart_device:");
2834 2833
2835 2834 /* ack all ghost interrupts */
2836 2835 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2837 2836
2838 2837 state = SKD_READL(skdev, FIT_STATUS);
2839 2838
2840 2839 Dcmn_err(CE_NOTE, "skd_restart_device: drive status=0x%x\n", state);
2841 2840
2842 2841 state &= FIT_SR_DRIVE_STATE_MASK;
2843 2842 skdev->drive_state = state;
2844 2843 skdev->last_mtd = 0;
2845 2844
2846 2845 skdev->state = SKD_DRVR_STATE_RESTARTING;
2847 2846 skdev->timer_countdown = SKD_TIMER_MINUTES(4);
2848 2847
2849 2848 skd_soft_reset(skdev);
2850 2849 }
2851 2850
2852 2851 /*
2853 2852 *
2854 2853 * Name: skd_stop_device, stops the device.
2855 2854 *
2856 2855 * Inputs: skdev - device state structure.
2857 2856 *
2858 2857 * Returns: Nothing.
2859 2858 *
2860 2859 */
2861 2860 static void
2862 2861 skd_stop_device(struct skd_device *skdev)
2863 2862 {
2864 2863 clock_t cur_ticks, tmo;
2865 2864 int secs;
2866 2865 struct skd_special_context *skspcl = &skdev->internal_skspcl;
2867 2866
2868 2867 if (SKD_DRVR_STATE_ONLINE != skdev->state) {
2869 2868 Dcmn_err(CE_NOTE, "(%s): skd_stop_device not online no sync\n",
2870 2869 skdev->name);
2871 2870 goto stop_out;
2872 2871 }
2873 2872
2874 2873 if (SKD_REQ_STATE_IDLE != skspcl->req.state) {
2875 2874 Dcmn_err(CE_NOTE, "(%s): skd_stop_device no special\n",
2876 2875 skdev->name);
2877 2876 goto stop_out;
2878 2877 }
2879 2878
2880 2879 skdev->state = SKD_DRVR_STATE_SYNCING;
2881 2880 skdev->sync_done = 0;
2882 2881
2883 2882 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
2884 2883
2885 2884 secs = 10;
2886 2885 mutex_enter(&skdev->skd_internalio_mutex);
2887 2886 while (skdev->sync_done == 0) {
2888 2887 cur_ticks = ddi_get_lbolt();
2889 2888 tmo = cur_ticks + drv_usectohz(1000000 * secs);
2890 2889 if (cv_timedwait(&skdev->cv_waitq,
2891 2890 &skdev->skd_internalio_mutex, tmo) == -1) {
2892 2891 /* Oops - timed out */
2893 2892
2894 2893 Dcmn_err(CE_NOTE, "stop_device - %d secs TMO", secs);
2895 2894 }
2896 2895 }
2897 2896
2898 2897 mutex_exit(&skdev->skd_internalio_mutex);
2899 2898
2900 2899 switch (skdev->sync_done) {
2901 2900 case 0:
2902 2901 Dcmn_err(CE_NOTE, "(%s): skd_stop_device no sync\n",
2903 2902 skdev->name);
2904 2903 break;
2905 2904 case 1:
2906 2905 Dcmn_err(CE_NOTE, "(%s): skd_stop_device sync done\n",
2907 2906 skdev->name);
2908 2907 break;
2909 2908 default:
2910 2909 Dcmn_err(CE_NOTE, "(%s): skd_stop_device sync error\n",
2911 2910 skdev->name);
2912 2911 }
2913 2912
2914 2913
2915 2914 stop_out:
2916 2915 skdev->state = SKD_DRVR_STATE_STOPPING;
2917 2916
2918 2917 skd_disable_interrupts(skdev);
2919 2918
2920 2919 /* ensure all ints on device are cleared */
2921 2920 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2922 2921 /* soft reset the device to unload with a clean slate */
2923 2922 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
2924 2923 }
2925 2924
2926 2925 /*
2927 2926 * CONSTRUCT
2928 2927 */
2929 2928
2930 2929 static int skd_cons_skcomp(struct skd_device *);
2931 2930 static int skd_cons_skmsg(struct skd_device *);
2932 2931 static int skd_cons_skreq(struct skd_device *);
2933 2932 static int skd_cons_sksb(struct skd_device *);
2934 2933 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *, uint32_t,
2935 2934 dma_mem_t *);
2936 2935
2937 2936 /*
2938 2937 *
2939 2938 * Name: skd_construct, calls other routines to build device
2940 2939 * interface structures.
2941 2940 *
2942 2941 * Inputs: skdev - device state structure.
2943 2942 * instance - DDI instance number.
2944 2943 *
2945 2944 * Returns: Returns DDI_FAILURE on any failure otherwise returns
2946 2945 * DDI_SUCCESS.
2947 2946 *
2948 2947 */
2949 2948 /* ARGSUSED */ /* Upstream common source with other platforms. */
2950 2949 static int
2951 2950 skd_construct(skd_device_t *skdev, int instance)
2952 2951 {
2953 2952 int rc = 0;
2954 2953
2955 2954 skdev->state = SKD_DRVR_STATE_LOAD;
2956 2955 skdev->irq_type = skd_isr_type;
2957 2956 skdev->soft_queue_depth_limit = skd_max_queue_depth;
2958 2957 skdev->hard_queue_depth_limit = 10; /* until GET_CMDQ_DEPTH */
2959 2958
2960 2959 skdev->num_req_context = skd_max_queue_depth;
2961 2960 skdev->num_fitmsg_context = skd_max_queue_depth;
2962 2961
2963 2962 skdev->queue_depth_limit = skdev->hard_queue_depth_limit;
2964 2963 skdev->queue_depth_lowat = 1;
2965 2964 skdev->proto_ver = 99; /* initialize to invalid value */
2966 2965 skdev->sgs_per_request = skd_sgs_per_request;
2967 2966 skdev->dbg_level = skd_dbg_level;
2968 2967
2969 2968 rc = skd_cons_skcomp(skdev);
2970 2969 if (rc < 0) {
2971 2970 goto err_out;
2972 2971 }
2973 2972
2974 2973 rc = skd_cons_skmsg(skdev);
2975 2974 if (rc < 0) {
2976 2975 goto err_out;
2977 2976 }
2978 2977
2979 2978 rc = skd_cons_skreq(skdev);
2980 2979 if (rc < 0) {
2981 2980 goto err_out;
2982 2981 }
2983 2982
2984 2983 rc = skd_cons_sksb(skdev);
2985 2984 if (rc < 0) {
2986 2985 goto err_out;
2987 2986 }
2988 2987
2989 2988 Dcmn_err(CE_NOTE, "CONSTRUCT VICTORY");
2990 2989
2991 2990 return (DDI_SUCCESS);
2992 2991
2993 2992 err_out:
2994 2993 Dcmn_err(CE_NOTE, "construct failed\n");
2995 2994 skd_destruct(skdev);
2996 2995
2997 2996 return (DDI_FAILURE);
2998 2997 }
2999 2998
3000 2999 /*
3001 3000 *
3002 3001 * Name: skd_free_phys, frees DMA memory.
3003 3002 *
3004 3003 * Inputs: skdev - device state structure.
3005 3004 * mem - DMA info.
3006 3005 *
3007 3006 * Returns: Nothing.
3008 3007 *
3009 3008 */
3010 3009 static void
3011 3010 skd_free_phys(skd_device_t *skdev, dma_mem_t *mem)
3012 3011 {
3013 3012 _NOTE(ARGUNUSED(skdev));
3014 3013
3015 3014 if (mem == NULL || mem->dma_handle == NULL)
3016 3015 return;
3017 3016
3018 3017 (void) ddi_dma_unbind_handle(mem->dma_handle);
3019 3018
3020 3019 if (mem->acc_handle != NULL) {
3021 3020 ddi_dma_mem_free(&mem->acc_handle);
3022 3021 mem->acc_handle = NULL;
3023 3022 }
3024 3023
3025 3024 mem->bp = NULL;
3026 3025 ddi_dma_free_handle(&mem->dma_handle);
3027 3026 mem->dma_handle = NULL;
3028 3027 }
3029 3028
3030 3029 /*
3031 3030 *
3032 3031 * Name: skd_alloc_dma_mem, allocates DMA memory.
3033 3032 *
3034 3033 * Inputs: skdev - device state structure.
3035 3034 * mem - DMA data structure.
3036 3035 * sleep - indicates whether called routine can sleep.
3037 3036 * atype - specified 32 or 64 bit allocation.
3038 3037 *
3039 3038 * Returns: Void pointer to mem->bp on success else NULL.
3040 3039 * NOTE: There are some failure modes even if sleep is set
3041 3040 * to KM_SLEEP, so callers MUST check the return code even
3042 3041 * if KM_SLEEP is passed in.
3043 3042 *
3044 3043 */
3045 3044 static void *
3046 3045 skd_alloc_dma_mem(skd_device_t *skdev, dma_mem_t *mem, uint8_t atype)
3047 3046 {
3048 3047 size_t rlen;
3049 3048 uint_t cnt;
3050 3049 ddi_dma_attr_t dma_attr = skd_64bit_io_dma_attr;
3051 3050 ddi_device_acc_attr_t acc_attr = {
3052 3051 DDI_DEVICE_ATTR_V0,
3053 3052 DDI_STRUCTURE_LE_ACC,
3054 3053 DDI_STRICTORDER_ACC
3055 3054 };
3056 3055
3057 3056 if (atype == ATYPE_32BIT)
3058 3057 dma_attr.dma_attr_addr_hi = SKD_DMA_HIGH_32BIT_ADDRESS;
3059 3058
3060 3059 dma_attr.dma_attr_sgllen = 1;
3061 3060
3062 3061 /*
3063 3062 * Allocate DMA memory.
3064 3063 */
3065 3064 if (ddi_dma_alloc_handle(skdev->dip, &dma_attr, DDI_DMA_SLEEP, NULL,
3066 3065 &mem->dma_handle) != DDI_SUCCESS) {
3067 3066 cmn_err(CE_WARN, "!alloc_dma_mem-1, failed");
3068 3067
3069 3068 mem->dma_handle = NULL;
3070 3069
3071 3070 return (NULL);
3072 3071 }
3073 3072
3074 3073 if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
3075 3074 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, (caddr_t *)&mem->bp, &rlen,
3076 3075 &mem->acc_handle) != DDI_SUCCESS) {
3077 3076 cmn_err(CE_WARN, "!skd_alloc_dma_mem-2, failed");
3078 3077 ddi_dma_free_handle(&mem->dma_handle);
3079 3078 mem->dma_handle = NULL;
3080 3079 mem->acc_handle = NULL;
3081 3080 mem->bp = NULL;
3082 3081
3083 3082 return (NULL);
3084 3083 }
3085 3084 bzero(mem->bp, mem->size);
3086 3085
3087 3086 if (ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
3088 3087 mem->size, (DDI_DMA_CONSISTENT | DDI_DMA_RDWR), DDI_DMA_SLEEP, NULL,
3089 3088 &mem->cookie, &cnt) != DDI_DMA_MAPPED) {
3090 3089 cmn_err(CE_WARN, "!skd_alloc_dma_mem-3, failed");
3091 3090 ddi_dma_mem_free(&mem->acc_handle);
3092 3091 ddi_dma_free_handle(&mem->dma_handle);
3093 3092
3094 3093 return (NULL);
3095 3094 }
3096 3095
3097 3096 if (cnt > 1) {
3098 3097 (void) ddi_dma_unbind_handle(mem->dma_handle);
3099 3098 cmn_err(CE_WARN, "!skd_alloc_dma_mem-4, failed, "
3100 3099 "cookie_count %d > 1", cnt);
3101 3100 skd_free_phys(skdev, mem);
3102 3101
3103 3102 return (NULL);
3104 3103 }
3105 3104 mem->cookies = &mem->cookie;
3106 3105 mem->cookies->dmac_size = mem->size;
3107 3106
3108 3107 return (mem->bp);
3109 3108 }
3110 3109
3111 3110 /*
3112 3111 *
3113 3112 * Name: skd_cons_skcomp, allocates space for the skcomp table.
3114 3113 *
3115 3114 * Inputs: skdev - device state structure.
3116 3115 *
3117 3116 * Returns: -ENOMEM if no memory otherwise NULL.
3118 3117 *
3119 3118 */
3120 3119 static int
3121 3120 skd_cons_skcomp(struct skd_device *skdev)
3122 3121 {
3123 3122 uint64_t *dma_alloc;
3124 3123 struct fit_completion_entry_v1 *skcomp;
3125 3124 int rc = 0;
3126 3125 uint32_t nbytes;
3127 3126 dma_mem_t *mem;
3128 3127
3129 3128 nbytes = sizeof (*skcomp) * SKD_N_COMPLETION_ENTRY;
3130 3129 nbytes += sizeof (struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
3131 3130
3132 3131 Dcmn_err(CE_NOTE, "cons_skcomp: nbytes=%d,entries=%d", nbytes,
3133 3132 SKD_N_COMPLETION_ENTRY);
3134 3133
3135 3134 mem = &skdev->cq_dma_address;
3136 3135 mem->size = nbytes;
3137 3136
3138 3137 dma_alloc = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT);
3139 3138 skcomp = (struct fit_completion_entry_v1 *)dma_alloc;
3140 3139 if (skcomp == NULL) {
3141 3140 rc = -ENOMEM;
3142 3141 goto err_out;
3143 3142 }
3144 3143
3145 3144 bzero(skcomp, nbytes);
3146 3145
3147 3146 Dcmn_err(CE_NOTE, "cons_skcomp: skcomp=%p nbytes=%d",
3148 3147 (void *)skcomp, nbytes);
3149 3148
3150 3149 skdev->skcomp_table = skcomp;
3151 3150 skdev->skerr_table = (struct fit_comp_error_info *)(dma_alloc +
3152 3151 (SKD_N_COMPLETION_ENTRY * sizeof (*skcomp) / sizeof (uint64_t)));
3153 3152
3154 3153 err_out:
3155 3154 return (rc);
3156 3155 }
3157 3156
3158 3157 /*
3159 3158 *
3160 3159 * Name: skd_cons_skmsg, allocates space for the skmsg table.
3161 3160 *
3162 3161 * Inputs: skdev - device state structure.
3163 3162 *
3164 3163 * Returns: -ENOMEM if no memory otherwise NULL.
3165 3164 *
3166 3165 */
3167 3166 static int
3168 3167 skd_cons_skmsg(struct skd_device *skdev)
3169 3168 {
3170 3169 dma_mem_t *mem;
3171 3170 int rc = 0;
3172 3171 uint32_t i;
3173 3172
3174 3173 Dcmn_err(CE_NOTE, "skmsg_table kzalloc, struct %lu, count %u total %lu",
3175 3174 (ulong_t)sizeof (struct skd_fitmsg_context),
3176 3175 skdev->num_fitmsg_context,
3177 3176 (ulong_t)(sizeof (struct skd_fitmsg_context) *
3178 3177 skdev->num_fitmsg_context));
3179 3178
3180 3179 skdev->skmsg_table = (struct skd_fitmsg_context *)kmem_zalloc(
3181 3180 sizeof (struct skd_fitmsg_context) * skdev->num_fitmsg_context,
3182 3181 KM_SLEEP);
3183 3182
3184 3183 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3185 3184 struct skd_fitmsg_context *skmsg;
3186 3185
3187 3186 skmsg = &skdev->skmsg_table[i];
3188 3187
3189 3188 skmsg->id = i + SKD_ID_FIT_MSG;
3190 3189
3191 3190 skmsg->state = SKD_MSG_STATE_IDLE;
3192 3191
3193 3192 mem = &skmsg->mb_dma_address;
3194 3193 mem->size = SKD_N_FITMSG_BYTES + 64;
3195 3194
3196 3195 skmsg->msg_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT);
3197 3196
3198 3197 if (NULL == skmsg->msg_buf) {
3199 3198 rc = -ENOMEM;
3200 3199 i++;
3201 3200 break;
3202 3201 }
3203 3202
3204 3203 skmsg->offset = 0;
3205 3204
3206 3205 bzero(skmsg->msg_buf, SKD_N_FITMSG_BYTES);
3207 3206
3208 3207 skmsg->next = &skmsg[1];
3209 3208 }
3210 3209
3211 3210 /* Free list is in order starting with the 0th entry. */
3212 3211 skdev->skmsg_table[i - 1].next = NULL;
3213 3212 skdev->skmsg_free_list = skdev->skmsg_table;
3214 3213
3215 3214 return (rc);
3216 3215 }
3217 3216
3218 3217 /*
3219 3218 *
3220 3219 * Name: skd_cons_skreq, allocates space for the skreq table.
3221 3220 *
3222 3221 * Inputs: skdev - device state structure.
3223 3222 *
3224 3223 * Returns: -ENOMEM if no memory otherwise NULL.
3225 3224 *
3226 3225 */
3227 3226 static int
3228 3227 skd_cons_skreq(struct skd_device *skdev)
3229 3228 {
3230 3229 int rc = 0;
3231 3230 uint32_t i;
3232 3231
3233 3232 Dcmn_err(CE_NOTE,
3234 3233 "skreq_table kmem_zalloc, struct %lu, count %u total %lu",
3235 3234 (ulong_t)sizeof (struct skd_request_context),
3236 3235 skdev->num_req_context,
3237 3236 (ulong_t) (sizeof (struct skd_request_context) *
3238 3237 skdev->num_req_context));
3239 3238
3240 3239 skdev->skreq_table = (struct skd_request_context *)kmem_zalloc(
3241 3240 sizeof (struct skd_request_context) * skdev->num_req_context,
3242 3241 KM_SLEEP);
3243 3242
3244 3243 for (i = 0; i < skdev->num_req_context; i++) {
3245 3244 struct skd_request_context *skreq;
3246 3245
3247 3246 skreq = &skdev->skreq_table[i];
3248 3247
3249 3248 skreq->id = (uint16_t)(i + SKD_ID_RW_REQUEST);
3250 3249 skreq->state = SKD_REQ_STATE_IDLE;
3251 3250
3252 3251 skreq->sksg_list = skd_cons_sg_list(skdev,
3253 3252 skdev->sgs_per_request,
3254 3253 &skreq->sksg_dma_address);
3255 3254
3256 3255 if (NULL == skreq->sksg_list) {
3257 3256 rc = -ENOMEM;
3258 3257 goto err_out;
3259 3258 }
3260 3259
3261 3260 skreq->next = &skreq[1];
3262 3261 }
3263 3262
3264 3263 /* Free list is in order starting with the 0th entry. */
3265 3264 skdev->skreq_table[i - 1].next = NULL;
3266 3265 skdev->skreq_free_list = skdev->skreq_table;
3267 3266
3268 3267 err_out:
3269 3268 return (rc);
3270 3269 }
3271 3270
3272 3271 /*
3273 3272 *
3274 3273 * Name: skd_cons_sksb, allocates space for the skspcl msg buf
3275 3274 * and data buf.
3276 3275 *
3277 3276 * Inputs: skdev - device state structure.
3278 3277 *
3279 3278 * Returns: -ENOMEM if no memory otherwise NULL.
3280 3279 *
3281 3280 */
3282 3281 static int
3283 3282 skd_cons_sksb(struct skd_device *skdev)
3284 3283 {
3285 3284 int rc = 0;
3286 3285 struct skd_special_context *skspcl;
3287 3286 dma_mem_t *mem;
3288 3287 uint32_t nbytes;
3289 3288
3290 3289 skspcl = &skdev->internal_skspcl;
3291 3290
3292 3291 skspcl->req.id = 0 + SKD_ID_INTERNAL;
3293 3292 skspcl->req.state = SKD_REQ_STATE_IDLE;
3294 3293
3295 3294 nbytes = SKD_N_INTERNAL_BYTES;
3296 3295
3297 3296 mem = &skspcl->db_dma_address;
3298 3297 mem->size = nbytes;
3299 3298
3300 3299 /* data_buf's DMA pointer is skspcl->db_dma_address */
3301 3300 skspcl->data_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT);
3302 3301 if (skspcl->data_buf == NULL) {
3303 3302 rc = -ENOMEM;
3304 3303 goto err_out;
3305 3304 }
3306 3305
3307 3306 bzero(skspcl->data_buf, nbytes);
3308 3307
3309 3308 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
3310 3309
3311 3310 mem = &skspcl->mb_dma_address;
3312 3311 mem->size = nbytes;
3313 3312
3314 3313 /* msg_buf DMA pointer is skspcl->mb_dma_address */
3315 3314 skspcl->msg_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT);
3316 3315 if (skspcl->msg_buf == NULL) {
3317 3316 rc = -ENOMEM;
3318 3317 goto err_out;
3319 3318 }
3320 3319
3321 3320
3322 3321 bzero(skspcl->msg_buf, nbytes);
3323 3322
3324 3323 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
3325 3324 &skspcl->req.sksg_dma_address);
3326 3325
3327 3326
3328 3327 if (skspcl->req.sksg_list == NULL) {
3329 3328 rc = -ENOMEM;
3330 3329 goto err_out;
3331 3330 }
3332 3331
3333 3332 if (skd_format_internal_skspcl(skdev) == 0) {
3334 3333 rc = -EINVAL;
3335 3334 goto err_out;
3336 3335 }
3337 3336
3338 3337 err_out:
3339 3338 return (rc);
3340 3339 }
3341 3340
3342 3341 /*
3343 3342 *
3344 3343 * Name: skd_cons_sg_list, allocates the S/G list.
3345 3344 *
3346 3345 * Inputs: skdev - device state structure.
3347 3346 * n_sg - Number of scatter-gather entries.
3348 3347 * ret_dma_addr - S/G list DMA pointer.
3349 3348 *
3350 3349 * Returns: A list of FIT message descriptors.
3351 3350 *
3352 3351 */
3353 3352 static struct fit_sg_descriptor
3354 3353 *skd_cons_sg_list(struct skd_device *skdev,
3355 3354 uint32_t n_sg, dma_mem_t *ret_dma_addr)
3356 3355 {
3357 3356 struct fit_sg_descriptor *sg_list;
3358 3357 uint32_t nbytes;
3359 3358 dma_mem_t *mem;
3360 3359
3361 3360 nbytes = sizeof (*sg_list) * n_sg;
3362 3361
3363 3362 mem = ret_dma_addr;
3364 3363 mem->size = nbytes;
3365 3364
3366 3365 /* sg_list's DMA pointer is *ret_dma_addr */
3367 3366 sg_list = skd_alloc_dma_mem(skdev, mem, ATYPE_32BIT);
3368 3367
3369 3368 if (sg_list != NULL) {
3370 3369 uint64_t dma_address = ret_dma_addr->cookie.dmac_laddress;
3371 3370 uint32_t i;
3372 3371
3373 3372 bzero(sg_list, nbytes);
3374 3373
3375 3374 for (i = 0; i < n_sg - 1; i++) {
3376 3375 uint64_t ndp_off;
3377 3376 ndp_off = (i + 1) * sizeof (struct fit_sg_descriptor);
3378 3377
3379 3378 sg_list[i].next_desc_ptr = dma_address + ndp_off;
3380 3379 }
3381 3380 sg_list[i].next_desc_ptr = 0LL;
3382 3381 }
3383 3382
3384 3383 return (sg_list);
3385 3384 }
3386 3385
3387 3386 /*
3388 3387 * DESTRUCT (FREE)
3389 3388 */
3390 3389
3391 3390 static void skd_free_skcomp(struct skd_device *skdev);
3392 3391 static void skd_free_skmsg(struct skd_device *skdev);
3393 3392 static void skd_free_skreq(struct skd_device *skdev);
3394 3393 static void skd_free_sksb(struct skd_device *skdev);
3395 3394
3396 3395 static void skd_free_sg_list(struct skd_device *skdev,
3397 3396 struct fit_sg_descriptor *sg_list,
3398 3397 uint32_t n_sg, dma_mem_t dma_addr);
3399 3398
3400 3399 /*
3401 3400 *
3402 3401 * Name: skd_destruct, call various rouines to deallocate
3403 3402 * space acquired during initialization.
3404 3403 *
3405 3404 * Inputs: skdev - device state structure.
3406 3405 *
3407 3406 * Returns: Nothing.
3408 3407 *
3409 3408 */
3410 3409 static void
3411 3410 skd_destruct(struct skd_device *skdev)
3412 3411 {
3413 3412 if (skdev == NULL) {
3414 3413 return;
3415 3414 }
3416 3415
3417 3416 Dcmn_err(CE_NOTE, "destruct sksb");
3418 3417 skd_free_sksb(skdev);
3419 3418
3420 3419 Dcmn_err(CE_NOTE, "destruct skreq");
3421 3420 skd_free_skreq(skdev);
3422 3421
3423 3422 Dcmn_err(CE_NOTE, "destruct skmsg");
3424 3423 skd_free_skmsg(skdev);
3425 3424
3426 3425 Dcmn_err(CE_NOTE, "destruct skcomp");
3427 3426 skd_free_skcomp(skdev);
3428 3427
3429 3428 Dcmn_err(CE_NOTE, "DESTRUCT VICTORY");
3430 3429 }
3431 3430
3432 3431 /*
3433 3432 *
3434 3433 * Name: skd_free_skcomp, deallocates skcomp table DMA resources.
3435 3434 *
3436 3435 * Inputs: skdev - device state structure.
3437 3436 *
3438 3437 * Returns: Nothing.
3439 3438 *
3440 3439 */
3441 3440 static void
3442 3441 skd_free_skcomp(struct skd_device *skdev)
3443 3442 {
3444 3443 if (skdev->skcomp_table != NULL) {
3445 3444 skd_free_phys(skdev, &skdev->cq_dma_address);
3446 3445 }
3447 3446
3448 3447 skdev->skcomp_table = NULL;
3449 3448 }
3450 3449
3451 3450 /*
3452 3451 *
3453 3452 * Name: skd_free_skmsg, deallocates skmsg table DMA resources.
3454 3453 *
3455 3454 * Inputs: skdev - device state structure.
3456 3455 *
3457 3456 * Returns: Nothing.
3458 3457 *
3459 3458 */
3460 3459 static void
3461 3460 skd_free_skmsg(struct skd_device *skdev)
3462 3461 {
3463 3462 uint32_t i;
3464 3463
3465 3464 if (NULL == skdev->skmsg_table)
3466 3465 return;
3467 3466
3468 3467 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3469 3468 struct skd_fitmsg_context *skmsg;
3470 3469
3471 3470 skmsg = &skdev->skmsg_table[i];
3472 3471
3473 3472 if (skmsg->msg_buf != NULL) {
3474 3473 skd_free_phys(skdev, &skmsg->mb_dma_address);
3475 3474 }
3476 3475
3477 3476
3478 3477 skmsg->msg_buf = NULL;
3479 3478 }
3480 3479
3481 3480 kmem_free(skdev->skmsg_table, sizeof (struct skd_fitmsg_context) *
3482 3481 skdev->num_fitmsg_context);
3483 3482
3484 3483 skdev->skmsg_table = NULL;
3485 3484
3486 3485 }
3487 3486
3488 3487 /*
3489 3488 *
3490 3489 * Name: skd_free_skreq, deallocates skspcl table DMA resources.
3491 3490 *
3492 3491 * Inputs: skdev - device state structure.
3493 3492 *
3494 3493 * Returns: Nothing.
3495 3494 *
3496 3495 */
3497 3496 static void
3498 3497 skd_free_skreq(struct skd_device *skdev)
3499 3498 {
3500 3499 uint32_t i;
3501 3500
3502 3501 if (NULL == skdev->skreq_table)
3503 3502 return;
3504 3503
3505 3504 for (i = 0; i < skdev->num_req_context; i++) {
3506 3505 struct skd_request_context *skreq;
3507 3506
3508 3507 skreq = &skdev->skreq_table[i];
3509 3508
3510 3509 skd_free_sg_list(skdev, skreq->sksg_list,
3511 3510 skdev->sgs_per_request, skreq->sksg_dma_address);
3512 3511
3513 3512 skreq->sksg_list = NULL;
3514 3513 }
3515 3514
3516 3515 kmem_free(skdev->skreq_table, sizeof (struct skd_request_context) *
3517 3516 skdev->num_req_context);
3518 3517
3519 3518 skdev->skreq_table = NULL;
3520 3519
3521 3520 }
3522 3521
3523 3522 /*
3524 3523 *
3525 3524 * Name: skd_free_sksb, deallocates skspcl data buf and
3526 3525 * msg buf DMA resources.
3527 3526 *
3528 3527 * Inputs: skdev - device state structure.
3529 3528 *
3530 3529 * Returns: Nothing.
3531 3530 *
3532 3531 */
3533 3532 static void
3534 3533 skd_free_sksb(struct skd_device *skdev)
3535 3534 {
3536 3535 struct skd_special_context *skspcl;
3537 3536
3538 3537 skspcl = &skdev->internal_skspcl;
3539 3538
3540 3539 if (skspcl->data_buf != NULL) {
3541 3540 skd_free_phys(skdev, &skspcl->db_dma_address);
3542 3541 }
3543 3542
3544 3543 skspcl->data_buf = NULL;
3545 3544
3546 3545 if (skspcl->msg_buf != NULL) {
3547 3546 skd_free_phys(skdev, &skspcl->mb_dma_address);
3548 3547 }
3549 3548
3550 3549 skspcl->msg_buf = NULL;
3551 3550
3552 3551 skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
3553 3552 skspcl->req.sksg_dma_address);
3554 3553
3555 3554 skspcl->req.sksg_list = NULL;
3556 3555 }
3557 3556
3558 3557 /*
3559 3558 *
3560 3559 * Name: skd_free_sg_list, deallocates S/G DMA resources.
3561 3560 *
3562 3561 * Inputs: skdev - device state structure.
3563 3562 * sg_list - S/G list itself.
3564 3563 * n_sg - nukmber of segments
3565 3564 * dma_addr - S/G list DMA address.
3566 3565 *
3567 3566 * Returns: Nothing.
3568 3567 *
3569 3568 */
3570 3569 /* ARGSUSED */ /* Upstream common source with other platforms. */
3571 3570 static void
3572 3571 skd_free_sg_list(struct skd_device *skdev,
3573 3572 struct fit_sg_descriptor *sg_list,
3574 3573 uint32_t n_sg, dma_mem_t dma_addr)
3575 3574 {
3576 3575 if (sg_list != NULL) {
3577 3576 skd_free_phys(skdev, &dma_addr);
3578 3577 }
3579 3578 }
3580 3579
3581 3580 /*
3582 3581 *
3583 3582 * Name: skd_queue, queues the I/O request.
3584 3583 *
3585 3584 * Inputs: skdev - device state structure.
3586 3585 * pbuf - I/O request
3587 3586 *
3588 3587 * Returns: Nothing.
3589 3588 *
3590 3589 */
3591 3590 static void
3592 3591 skd_queue(skd_device_t *skdev, skd_buf_private_t *pbuf)
3593 3592 {
3594 3593 struct waitqueue *waitq;
3595 3594
3596 3595 ASSERT(skdev != NULL);
3597 3596 ASSERT(pbuf != NULL);
3598 3597
3599 3598 ASSERT(WAITQ_LOCK_HELD(skdev));
3600 3599
3601 3600 waitq = &skdev->waitqueue;
3602 3601
3603 3602 if (SIMPLEQ_EMPTY(waitq))
3604 3603 SIMPLEQ_INSERT_HEAD(waitq, pbuf, sq);
3605 3604 else
3606 3605 SIMPLEQ_INSERT_TAIL(waitq, pbuf, sq);
3607 3606 }
3608 3607
3609 3608 /*
3610 3609 *
3611 3610 * Name: skd_list_skreq, displays the skreq table entries.
3612 3611 *
3613 3612 * Inputs: skdev - device state structure.
3614 3613 * list - flag, if true displays the entry address.
3615 3614 *
3616 3615 * Returns: Returns number of skmsg entries found.
3617 3616 *
3618 3617 */
3619 3618 /* ARGSUSED */ /* Upstream common source with other platforms. */
3620 3619 static int
3621 3620 skd_list_skreq(skd_device_t *skdev, int list)
3622 3621 {
3623 3622 int inx = 0;
3624 3623 struct skd_request_context *skreq;
3625 3624
3626 3625 if (list) {
3627 3626 Dcmn_err(CE_NOTE, "skreq_table[0]\n");
3628 3627
3629 3628 skreq = &skdev->skreq_table[0];
3630 3629 while (skreq) {
3631 3630 if (list)
3632 3631 Dcmn_err(CE_NOTE,
3633 3632 "%d: skreq=%p state=%d id=%x fid=%x "
3634 3633 "pbuf=%p dir=%d comp=%d\n",
3635 3634 inx, (void *)skreq, skreq->state,
3636 3635 skreq->id, skreq->fitmsg_id,
3637 3636 (void *)skreq->pbuf,
3638 3637 skreq->sg_data_dir, skreq->did_complete);
3639 3638 inx++;
3640 3639 skreq = skreq->next;
3641 3640 }
3642 3641 }
3643 3642
3644 3643 inx = 0;
3645 3644 skreq = skdev->skreq_free_list;
3646 3645
3647 3646 if (list)
3648 3647 Dcmn_err(CE_NOTE, "skreq_free_list\n");
3649 3648 while (skreq) {
3650 3649 if (list)
3651 3650 Dcmn_err(CE_NOTE, "%d: skreq=%p state=%d id=%x fid=%x "
3652 3651 "pbuf=%p dir=%d\n", inx, (void *)skreq,
3653 3652 skreq->state, skreq->id, skreq->fitmsg_id,
3654 3653 (void *)skreq->pbuf, skreq->sg_data_dir);
3655 3654 inx++;
3656 3655 skreq = skreq->next;
3657 3656 }
3658 3657
3659 3658 return (inx);
3660 3659 }
3661 3660
3662 3661 /*
3663 3662 *
3664 3663 * Name: skd_list_skmsg, displays the skmsg table entries.
3665 3664 *
3666 3665 * Inputs: skdev - device state structure.
3667 3666 * list - flag, if true displays the entry address.
3668 3667 *
3669 3668 * Returns: Returns number of skmsg entries found.
3670 3669 *
3671 3670 */
3672 3671 static int
3673 3672 skd_list_skmsg(skd_device_t *skdev, int list)
3674 3673 {
3675 3674 int inx = 0;
3676 3675 struct skd_fitmsg_context *skmsgp;
3677 3676
3678 3677 skmsgp = &skdev->skmsg_table[0];
3679 3678
3680 3679 if (list) {
3681 3680 Dcmn_err(CE_NOTE, "skmsg_table[0]\n");
3682 3681
3683 3682 while (skmsgp) {
3684 3683 if (list)
3685 3684 Dcmn_err(CE_NOTE, "%d: skmsgp=%p id=%x outs=%d "
3686 3685 "l=%d o=%d nxt=%p\n", inx, (void *)skmsgp,
3687 3686 skmsgp->id, skmsgp->outstanding,
3688 3687 skmsgp->length, skmsgp->offset,
3689 3688 (void *)skmsgp->next);
3690 3689 inx++;
3691 3690 skmsgp = skmsgp->next;
3692 3691 }
3693 3692 }
3694 3693
3695 3694 inx = 0;
3696 3695 if (list)
3697 3696 Dcmn_err(CE_NOTE, "skmsg_free_list\n");
3698 3697 skmsgp = skdev->skmsg_free_list;
3699 3698 while (skmsgp) {
3700 3699 if (list)
3701 3700 Dcmn_err(CE_NOTE, "%d: skmsgp=%p id=%x outs=%d l=%d "
3702 3701 "o=%d nxt=%p\n",
3703 3702 inx, (void *)skmsgp, skmsgp->id,
3704 3703 skmsgp->outstanding, skmsgp->length,
3705 3704 skmsgp->offset, (void *)skmsgp->next);
3706 3705 inx++;
3707 3706 skmsgp = skmsgp->next;
3708 3707 }
3709 3708
3710 3709 return (inx);
3711 3710 }
3712 3711
3713 3712 /*
3714 3713 *
3715 3714 * Name: skd_get_queue_pbuf, retrieves top of queue entry and
3716 3715 * delinks entry from the queue.
3717 3716 *
3718 3717 * Inputs: skdev - device state structure.
3719 3718 * drive - device number
3720 3719 *
3721 3720 * Returns: Returns the top of the job queue entry.
3722 3721 *
3723 3722 */
3724 3723 static skd_buf_private_t
3725 3724 *skd_get_queued_pbuf(skd_device_t *skdev)
3726 3725 {
3727 3726 skd_buf_private_t *pbuf;
3728 3727
3729 3728 ASSERT(WAITQ_LOCK_HELD(skdev));
3730 3729 pbuf = SIMPLEQ_FIRST(&skdev->waitqueue);
3731 3730 if (pbuf != NULL)
3732 3731 SIMPLEQ_REMOVE_HEAD(&skdev->waitqueue, sq);
3733 3732 return (pbuf);
3734 3733 }
3735 3734
3736 3735 /*
3737 3736 * PCI DRIVER GLUE
3738 3737 */
3739 3738
3740 3739 /*
3741 3740 *
3742 3741 * Name: skd_pci_info, logs certain device PCI info.
3743 3742 *
3744 3743 * Inputs: skdev - device state structure.
3745 3744 *
3746 3745 * Returns: str which contains the device speed info..
3747 3746 *
3748 3747 */
3749 3748 static char *
3750 3749 skd_pci_info(struct skd_device *skdev, char *str, size_t len)
3751 3750 {
3752 3751 int pcie_reg;
3753 3752
3754 3753 str[0] = '\0';
3755 3754
3756 3755 pcie_reg = skd_pci_find_capability(skdev, PCI_CAP_ID_EXP);
3757 3756
3758 3757 if (pcie_reg) {
3759 3758 uint16_t lstat, lspeed, lwidth;
3760 3759
3761 3760 pcie_reg += 0x12;
3762 3761 lstat = pci_config_get16(skdev->pci_handle, pcie_reg);
3763 3762 lspeed = lstat & (0xF);
3764 3763 lwidth = (lstat & 0x3F0) >> 4;
3765 3764
3766 3765 (void) snprintf(str, len, "PCIe (%s rev %d)",
3767 3766 lspeed == 1 ? "2.5GT/s" :
3768 3767 lspeed == 2 ? "5.0GT/s" : "<unknown>",
3769 3768 lwidth);
3770 3769 }
3771 3770
3772 3771 return (str);
3773 3772 }
3774 3773
3775 3774 /*
3776 3775 * MODULE GLUE
3777 3776 */
3778 3777
3779 3778 /*
3780 3779 *
3781 3780 * Name: skd_init, initializes certain values.
3782 3781 *
3783 3782 * Inputs: skdev - device state structure.
3784 3783 *
3785 3784 * Returns: Zero.
3786 3785 *
3787 3786 */
3788 3787 /* ARGSUSED */ /* Upstream common source with other platforms. */
3789 3788 static int
3790 3789 skd_init(skd_device_t *skdev)
3791 3790 {
3792 3791 Dcmn_err(CE_NOTE, "skd_init: v%s-b%s\n", DRV_VERSION, DRV_BUILD_ID);
3793 3792
3794 3793 if (skd_max_queue_depth < 1 ||
3795 3794 skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
3796 3795 cmn_err(CE_NOTE, "skd_max_q_depth %d invalid, re-set to %d\n",
3797 3796 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
3798 3797 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
3799 3798 }
3800 3799
3801 3800 if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
3802 3801 cmn_err(CE_NOTE, "skd_max_req_per_msg %d invalid, set to %d\n",
3803 3802 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
3804 3803 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
3805 3804 }
3806 3805
3807 3806
3808 3807 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
3809 3808 cmn_err(CE_NOTE, "skd_sg_per_request %d invalid, set to %d\n",
3810 3809 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
3811 3810 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
3812 3811 }
3813 3812
3814 3813 if (skd_dbg_level < 0 || skd_dbg_level > 2) {
3815 3814 cmn_err(CE_NOTE, "skd_dbg_level %d invalid, re-set to %d\n",
3816 3815 skd_dbg_level, 0);
3817 3816 skd_dbg_level = 0;
3818 3817 }
3819 3818
3820 3819 return (0);
3821 3820 }
3822 3821
3823 3822 /*
3824 3823 *
3825 3824 * Name: skd_exit, exits the driver & logs the fact.
3826 3825 *
3827 3826 * Inputs: none.
3828 3827 *
3829 3828 * Returns: Nothing.
3830 3829 *
3831 3830 */
3832 3831 static void
3833 3832 skd_exit(void)
3834 3833 {
3835 3834 cmn_err(CE_NOTE, "skd v%s unloading", DRV_VERSION);
3836 3835 }
3837 3836
3838 3837 /*
3839 3838 *
3840 3839 * Name: skd_drive_state_to_str, converts binary drive state
3841 3840 * to its corresponding string value.
3842 3841 *
3843 3842 * Inputs: Drive state.
3844 3843 *
3845 3844 * Returns: String representing drive state.
3846 3845 *
3847 3846 */
3848 3847 const char *
3849 3848 skd_drive_state_to_str(int state)
3850 3849 {
3851 3850 switch (state) {
3852 3851 case FIT_SR_DRIVE_OFFLINE: return ("OFFLINE");
3853 3852 case FIT_SR_DRIVE_INIT: return ("INIT");
3854 3853 case FIT_SR_DRIVE_ONLINE: return ("ONLINE");
3855 3854 case FIT_SR_DRIVE_BUSY: return ("BUSY");
3856 3855 case FIT_SR_DRIVE_FAULT: return ("FAULT");
3857 3856 case FIT_SR_DRIVE_DEGRADED: return ("DEGRADED");
3858 3857 case FIT_SR_PCIE_LINK_DOWN: return ("LINK_DOWN");
3859 3858 case FIT_SR_DRIVE_SOFT_RESET: return ("SOFT_RESET");
3860 3859 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: return ("NEED_FW");
3861 3860 case FIT_SR_DRIVE_INIT_FAULT: return ("INIT_FAULT");
3862 3861 case FIT_SR_DRIVE_BUSY_SANITIZE:return ("BUSY_SANITIZE");
3863 3862 case FIT_SR_DRIVE_BUSY_ERASE: return ("BUSY_ERASE");
3864 3863 case FIT_SR_DRIVE_FW_BOOTING: return ("FW_BOOTING");
3865 3864 default: return ("???");
3866 3865 }
3867 3866 }
3868 3867
3869 3868 /*
3870 3869 *
3871 3870 * Name: skd_skdev_state_to_str, converts binary driver state
3872 3871 * to its corresponding string value.
3873 3872 *
3874 3873 * Inputs: Driver state.
3875 3874 *
3876 3875 * Returns: String representing driver state.
3877 3876 *
3878 3877 */
3879 3878 static const char *
3880 3879 skd_skdev_state_to_str(enum skd_drvr_state state)
3881 3880 {
3882 3881 switch (state) {
3883 3882 case SKD_DRVR_STATE_LOAD: return ("LOAD");
3884 3883 case SKD_DRVR_STATE_IDLE: return ("IDLE");
3885 3884 case SKD_DRVR_STATE_BUSY: return ("BUSY");
3886 3885 case SKD_DRVR_STATE_STARTING: return ("STARTING");
3887 3886 case SKD_DRVR_STATE_ONLINE: return ("ONLINE");
3888 3887 case SKD_DRVR_STATE_PAUSING: return ("PAUSING");
3889 3888 case SKD_DRVR_STATE_PAUSED: return ("PAUSED");
3890 3889 case SKD_DRVR_STATE_DRAINING_TIMEOUT: return ("DRAINING_TIMEOUT");
3891 3890 case SKD_DRVR_STATE_RESTARTING: return ("RESTARTING");
3892 3891 case SKD_DRVR_STATE_RESUMING: return ("RESUMING");
3893 3892 case SKD_DRVR_STATE_STOPPING: return ("STOPPING");
3894 3893 case SKD_DRVR_STATE_SYNCING: return ("SYNCING");
3895 3894 case SKD_DRVR_STATE_FAULT: return ("FAULT");
3896 3895 case SKD_DRVR_STATE_DISAPPEARED: return ("DISAPPEARED");
3897 3896 case SKD_DRVR_STATE_BUSY_ERASE: return ("BUSY_ERASE");
3898 3897 case SKD_DRVR_STATE_BUSY_SANITIZE:return ("BUSY_SANITIZE");
3899 3898 case SKD_DRVR_STATE_BUSY_IMMINENT: return ("BUSY_IMMINENT");
3900 3899 case SKD_DRVR_STATE_WAIT_BOOT: return ("WAIT_BOOT");
3901 3900
3902 3901 default: return ("???");
3903 3902 }
3904 3903 }
3905 3904
3906 3905 /*
3907 3906 *
3908 3907 * Name: skd_skmsg_state_to_str, converts binary driver state
3909 3908 * to its corresponding string value.
3910 3909 *
3911 3910 * Inputs: Msg state.
3912 3911 *
3913 3912 * Returns: String representing msg state.
3914 3913 *
3915 3914 */
3916 3915 static const char *
3917 3916 skd_skmsg_state_to_str(enum skd_fit_msg_state state)
3918 3917 {
3919 3918 switch (state) {
3920 3919 case SKD_MSG_STATE_IDLE: return ("IDLE");
3921 3920 case SKD_MSG_STATE_BUSY: return ("BUSY");
3922 3921 default: return ("???");
3923 3922 }
3924 3923 }
3925 3924
3926 3925 /*
3927 3926 *
3928 3927 * Name: skd_skreq_state_to_str, converts binary req state
3929 3928 * to its corresponding string value.
3930 3929 *
3931 3930 * Inputs: Req state.
3932 3931 *
3933 3932 * Returns: String representing req state.
3934 3933 *
3935 3934 */
3936 3935 static const char *
3937 3936 skd_skreq_state_to_str(enum skd_req_state state)
3938 3937 {
3939 3938 switch (state) {
3940 3939 case SKD_REQ_STATE_IDLE: return ("IDLE");
3941 3940 case SKD_REQ_STATE_SETUP: return ("SETUP");
3942 3941 case SKD_REQ_STATE_BUSY: return ("BUSY");
3943 3942 case SKD_REQ_STATE_COMPLETED: return ("COMPLETED");
3944 3943 case SKD_REQ_STATE_TIMEOUT: return ("TIMEOUT");
3945 3944 case SKD_REQ_STATE_ABORTED: return ("ABORTED");
3946 3945 default: return ("???");
3947 3946 }
3948 3947 }
3949 3948
3950 3949 /*
3951 3950 *
3952 3951 * Name: skd_log_skdev, logs device state & parameters.
3953 3952 *
3954 3953 * Inputs: skdev - device state structure.
3955 3954 * event - event (string) to log.
3956 3955 *
3957 3956 * Returns: Nothing.
3958 3957 *
3959 3958 */
3960 3959 static void
3961 3960 skd_log_skdev(struct skd_device *skdev, const char *event)
3962 3961 {
3963 3962 Dcmn_err(CE_NOTE, "log_skdev(%s) skdev=%p event='%s'",
3964 3963 skdev->name, (void *)skdev, event);
3965 3964 Dcmn_err(CE_NOTE, " drive_state=%s(%d) driver_state=%s(%d)",
3966 3965 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3967 3966 skd_skdev_state_to_str(skdev->state), skdev->state);
3968 3967 Dcmn_err(CE_NOTE, " busy=%d limit=%d soft=%d hard=%d lowat=%d",
3969 3968 skdev->queue_depth_busy, skdev->queue_depth_limit,
3970 3969 skdev->soft_queue_depth_limit, skdev->hard_queue_depth_limit,
3971 3970 skdev->queue_depth_lowat);
3972 3971 Dcmn_err(CE_NOTE, " timestamp=0x%x cycle=%d cycle_ix=%d",
3973 3972 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
3974 3973 }
3975 3974
3976 3975 /*
3977 3976 *
3978 3977 * Name: skd_log_skmsg, logs the skmsg event.
3979 3978 *
3980 3979 * Inputs: skdev - device state structure.
3981 3980 * skmsg - FIT message structure.
3982 3981 * event - event string to log.
3983 3982 *
3984 3983 * Returns: Nothing.
3985 3984 *
3986 3985 */
3987 3986 static void
3988 3987 skd_log_skmsg(struct skd_device *skdev,
3989 3988 struct skd_fitmsg_context *skmsg, const char *event)
3990 3989 {
3991 3990 Dcmn_err(CE_NOTE, "log_skmsg:(%s) skmsg=%p event='%s'",
3992 3991 skdev->name, (void *)skmsg, event);
3993 3992 Dcmn_err(CE_NOTE, " state=%s(%d) id=0x%04x length=%d",
3994 3993 skd_skmsg_state_to_str(skmsg->state), skmsg->state,
3995 3994 skmsg->id, skmsg->length);
3996 3995 }
3997 3996
3998 3997 /*
3999 3998 *
4000 3999 * Name: skd_log_skreq, logs the skreq event.
4001 4000 *
4002 4001 * Inputs: skdev - device state structure.
4003 4002 * skreq -skreq structure.
4004 4003 * event - event string to log.
4005 4004 *
4006 4005 * Returns: Nothing.
4007 4006 *
4008 4007 */
4009 4008 static void
4010 4009 skd_log_skreq(struct skd_device *skdev,
4011 4010 struct skd_request_context *skreq, const char *event)
4012 4011 {
4013 4012 skd_buf_private_t *pbuf;
4014 4013
4015 4014 Dcmn_err(CE_NOTE, "log_skreq: (%s) skreq=%p pbuf=%p event='%s'",
4016 4015 skdev->name, (void *)skreq, (void *)skreq->pbuf, event);
4017 4016
4018 4017 Dcmn_err(CE_NOTE, " state=%s(%d) id=0x%04x fitmsg=0x%04x",
4019 4018 skd_skreq_state_to_str(skreq->state), skreq->state,
4020 4019 skreq->id, skreq->fitmsg_id);
4021 4020 Dcmn_err(CE_NOTE, " timo=0x%x sg_dir=%d n_sg=%d",
4022 4021 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
4023 4022
4024 4023 if ((pbuf = skreq->pbuf) != NULL) {
4025 4024 uint32_t lba, count;
4026 4025 lba = pbuf->x_xfer->x_blkno;
4027 4026 count = pbuf->x_xfer->x_nblks;
4028 4027 Dcmn_err(CE_NOTE, " pbuf=%p lba=%u(0x%x) count=%u(0x%x) ",
4029 4028 (void *)pbuf, lba, lba, count, count);
4030 4029 Dcmn_err(CE_NOTE, " dir=%s "
4031 4030 " intrs=%" PRId64 " qdepth=%d",
4032 4031 (pbuf->dir & B_READ) ? "Read" : "Write",
4033 4032 skdev->intr_cntr, skdev->queue_depth_busy);
4034 4033 } else {
4035 4034 Dcmn_err(CE_NOTE, " req=NULL\n");
4036 4035 }
4037 4036 }
4038 4037
4039 4038 /*
4040 4039 *
4041 4040 * Name: skd_init_mutex, initializes all mutexes.
4042 4041 *
4043 4042 * Inputs: skdev - device state structure.
4044 4043 *
4045 4044 * Returns: DDI_FAILURE on failure otherwise DDI_SUCCESS.
4046 4045 *
4047 4046 */
4048 4047 static int
4049 4048 skd_init_mutex(skd_device_t *skdev)
4050 4049 {
4051 4050 void *intr;
4052 4051
4053 4052 Dcmn_err(CE_CONT, "(%s%d): init_mutex flags=%x", DRV_NAME,
4054 4053 skdev->instance, skdev->flags);
4055 4054
4056 4055 intr = (void *)(uintptr_t)skdev->intr_pri;
4057 4056
4058 4057 if (skdev->flags & SKD_MUTEX_INITED)
4059 4058 cmn_err(CE_NOTE, "init_mutex: Oh-Oh - already INITED");
4060 4059
4061 4060 /* mutexes to protect the adapter state structure. */
4062 4061 mutex_init(&skdev->skd_lock_mutex, NULL, MUTEX_DRIVER,
4063 4062 DDI_INTR_PRI(intr));
4064 4063 mutex_init(&skdev->skd_intr_mutex, NULL, MUTEX_DRIVER,
4065 4064 DDI_INTR_PRI(intr));
4066 4065 mutex_init(&skdev->waitqueue_mutex, NULL, MUTEX_DRIVER,
4067 4066 DDI_INTR_PRI(intr));
4068 4067 mutex_init(&skdev->skd_internalio_mutex, NULL, MUTEX_DRIVER,
4069 4068 DDI_INTR_PRI(intr));
4070 4069
4071 4070 cv_init(&skdev->cv_waitq, NULL, CV_DRIVER, NULL);
4072 4071
4073 4072 skdev->flags |= SKD_MUTEX_INITED;
4074 4073 if (skdev->flags & SKD_MUTEX_DESTROYED)
4075 4074 skdev->flags &= ~SKD_MUTEX_DESTROYED;
4076 4075
4077 4076 Dcmn_err(CE_CONT, "init_mutex (%s%d): done, flags=%x", DRV_NAME,
4078 4077 skdev->instance, skdev->flags);
4079 4078
4080 4079 return (DDI_SUCCESS);
4081 4080 }
4082 4081
4083 4082 /*
4084 4083 *
4085 4084 * Name: skd_destroy_mutex, destroys all mutexes.
4086 4085 *
4087 4086 * Inputs: skdev - device state structure.
4088 4087 *
4089 4088 * Returns: Nothing.
4090 4089 *
4091 4090 */
4092 4091 static void
4093 4092 skd_destroy_mutex(skd_device_t *skdev)
4094 4093 {
4095 4094 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) {
4096 4095 if (skdev->flags & SKD_MUTEX_INITED) {
4097 4096 mutex_destroy(&skdev->waitqueue_mutex);
4098 4097 mutex_destroy(&skdev->skd_intr_mutex);
4099 4098 mutex_destroy(&skdev->skd_lock_mutex);
4100 4099 mutex_destroy(&skdev->skd_internalio_mutex);
4101 4100
4102 4101 cv_destroy(&skdev->cv_waitq);
4103 4102
4104 4103 skdev->flags |= SKD_MUTEX_DESTROYED;
4105 4104
4106 4105 if (skdev->flags & SKD_MUTEX_INITED)
4107 4106 skdev->flags &= ~SKD_MUTEX_INITED;
4108 4107 }
4109 4108 }
4110 4109 }
4111 4110
4112 4111 /*
4113 4112 *
4114 4113 * Name: skd_setup_intr, setup the interrupt handling
4115 4114 *
4116 4115 * Inputs: skdev - device state structure.
4117 4116 * intr_type - requested DDI interrupt type.
4118 4117 *
4119 4118 * Returns: DDI_FAILURE on failure otherwise DDI_SUCCESS.
4120 4119 *
4121 4120 */
4122 4121 static int
4123 4122 skd_setup_intr(skd_device_t *skdev, int intr_type)
4124 4123 {
4125 4124 int32_t count = 0;
4126 4125 int32_t avail = 0;
4127 4126 int32_t actual = 0;
4128 4127 int32_t ret;
4129 4128 uint32_t i;
4130 4129
4131 4130 Dcmn_err(CE_CONT, "(%s%d): setup_intr", DRV_NAME, skdev->instance);
4132 4131
4133 4132 /* Get number of interrupts the platform h/w supports */
4134 4133 if (((ret = ddi_intr_get_nintrs(skdev->dip, intr_type, &count)) !=
4135 4134 DDI_SUCCESS) || count == 0) {
4136 4135 cmn_err(CE_WARN, "!intr_setup failed, nintrs ret=%xh, cnt=%xh",
4137 4136 ret, count);
4138 4137
4139 4138 return (DDI_FAILURE);
4140 4139 }
4141 4140
4142 4141 /* Get number of available system interrupts */
4143 4142 if (((ret = ddi_intr_get_navail(skdev->dip, intr_type, &avail)) !=
4144 4143 DDI_SUCCESS) || avail == 0) {
4145 4144 cmn_err(CE_WARN, "!intr_setup failed, navail ret=%xh, "
4146 4145 "avail=%xh", ret, avail);
4147 4146
4148 4147 return (DDI_FAILURE);
4149 4148 }
4150 4149
4151 4150 if (intr_type == DDI_INTR_TYPE_MSIX && avail < SKD_MSIX_MAXAIF) {
4152 4151 cmn_err(CE_WARN, "!intr_setup failed, min MSI-X h/w vectors "
4153 4152 "req'd: %d, avail: %d",
4154 4153 SKD_MSIX_MAXAIF, count);
4155 4154
4156 4155 return (DDI_FAILURE);
4157 4156 }
4158 4157
4159 4158 /* Allocate space for interrupt handles */
4160 4159 skdev->hsize = sizeof (ddi_intr_handle_t) * avail;
4161 4160 skdev->htable = kmem_zalloc(skdev->hsize, KM_SLEEP);
4162 4161
4163 4162 /* Allocate the interrupts */
4164 4163 if ((ret = ddi_intr_alloc(skdev->dip, skdev->htable, intr_type,
4165 4164 0, count, &actual, 0)) != DDI_SUCCESS) {
4166 4165 cmn_err(CE_WARN, "!intr_setup failed, intr_alloc ret=%xh, "
4167 4166 "count = %xh, " "actual=%xh", ret, count, actual);
4168 4167
4169 4168 skd_release_intr(skdev);
4170 4169
4171 4170 return (DDI_FAILURE);
4172 4171 }
4173 4172
4174 4173 skdev->intr_cnt = actual;
4175 4174
4176 4175 if (intr_type == DDI_INTR_TYPE_FIXED)
4177 4176 (void) ddi_intr_set_pri(skdev->htable[0], 10);
4178 4177
4179 4178 /* Get interrupt priority */
4180 4179 if ((ret = ddi_intr_get_pri(skdev->htable[0], &skdev->intr_pri)) !=
4181 4180 DDI_SUCCESS) {
4182 4181 cmn_err(CE_WARN, "!intr_setup failed, get_pri ret=%xh", ret);
4183 4182 skd_release_intr(skdev);
4184 4183
4185 4184 return (ret);
4186 4185 }
4187 4186
4188 4187 /* Add the interrupt handlers */
4189 4188 for (i = 0; i < actual; i++) {
4190 4189 if ((ret = ddi_intr_add_handler(skdev->htable[i],
4191 4190 skd_isr_aif, (void *)skdev, (void *)((ulong_t)i))) !=
4192 4191 DDI_SUCCESS) {
4193 4192 cmn_err(CE_WARN, "!intr_setup failed, addh#=%xh, "
4194 4193 "act=%xh, ret=%xh", i, actual, ret);
4195 4194 skd_release_intr(skdev);
4196 4195
4197 4196 return (ret);
4198 4197 }
4199 4198 }
4200 4199
4201 4200 /* Setup mutexes */
4202 4201 if ((ret = skd_init_mutex(skdev)) != DDI_SUCCESS) {
4203 4202 cmn_err(CE_WARN, "!intr_setup failed, mutex init ret=%xh", ret);
4204 4203 skd_release_intr(skdev);
4205 4204
4206 4205 return (ret);
4207 4206 }
4208 4207
4209 4208 /* Get the capabilities */
4210 4209 (void) ddi_intr_get_cap(skdev->htable[0], &skdev->intr_cap);
4211 4210
4212 4211 /* Enable interrupts */
4213 4212 if (skdev->intr_cap & DDI_INTR_FLAG_BLOCK) {
4214 4213 if ((ret = ddi_intr_block_enable(skdev->htable,
4215 4214 skdev->intr_cnt)) != DDI_SUCCESS) {
4216 4215 cmn_err(CE_WARN, "!failed, intr_setup block enable, "
4217 4216 "ret=%xh", ret);
4218 4217 skd_destroy_mutex(skdev);
4219 4218 skd_release_intr(skdev);
4220 4219
4221 4220 return (ret);
4222 4221 }
4223 4222 } else {
4224 4223 for (i = 0; i < skdev->intr_cnt; i++) {
4225 4224 if ((ret = ddi_intr_enable(skdev->htable[i])) !=
4226 4225 DDI_SUCCESS) {
4227 4226 cmn_err(CE_WARN, "!intr_setup failed, "
4228 4227 "intr enable, ret=%xh", ret);
4229 4228 skd_destroy_mutex(skdev);
4230 4229 skd_release_intr(skdev);
4231 4230
4232 4231 return (ret);
4233 4232 }
4234 4233 }
4235 4234 }
4236 4235
4237 4236 if (intr_type == DDI_INTR_TYPE_FIXED)
4238 4237 (void) ddi_intr_clr_mask(skdev->htable[0]);
4239 4238
4240 4239 skdev->irq_type = intr_type;
4241 4240
4242 4241 return (DDI_SUCCESS);
4243 4242 }
4244 4243
4245 4244 /*
4246 4245 *
4247 4246 * Name: skd_disable_intr, disable interrupt handling.
4248 4247 *
4249 4248 * Inputs: skdev - device state structure.
4250 4249 *
4251 4250 * Returns: Nothing.
4252 4251 *
4253 4252 */
4254 4253 static void
4255 4254 skd_disable_intr(skd_device_t *skdev)
4256 4255 {
4257 4256 uint32_t i, rval;
4258 4257
4259 4258 if (skdev->intr_cap & DDI_INTR_FLAG_BLOCK) {
4260 4259 /* Remove AIF block interrupts (MSI/MSI-X) */
4261 4260 if ((rval = ddi_intr_block_disable(skdev->htable,
4262 4261 skdev->intr_cnt)) != DDI_SUCCESS) {
4263 4262 cmn_err(CE_WARN, "!failed intr block disable, rval=%x",
4264 4263 rval);
4265 4264 }
4266 4265 } else {
4267 4266 /* Remove AIF non-block interrupts (fixed). */
4268 4267 for (i = 0; i < skdev->intr_cnt; i++) {
4269 4268 if ((rval = ddi_intr_disable(skdev->htable[i])) !=
4270 4269 DDI_SUCCESS) {
4271 4270 cmn_err(CE_WARN, "!failed intr disable, "
4272 4271 "intr#=%xh, " "rval=%xh", i, rval);
4273 4272 }
4274 4273 }
4275 4274 }
4276 4275 }
4277 4276
4278 4277 /*
4279 4278 *
4280 4279 * Name: skd_release_intr, disables interrupt handling.
4281 4280 *
4282 4281 * Inputs: skdev - device state structure.
4283 4282 *
4284 4283 * Returns: Nothing.
4285 4284 *
4286 4285 */
4287 4286 static void
4288 4287 skd_release_intr(skd_device_t *skdev)
4289 4288 {
4290 4289 int32_t i;
4291 4290 int rval;
4292 4291
4293 4292
4294 4293 Dcmn_err(CE_CONT, "REL_INTR intr_cnt=%d", skdev->intr_cnt);
4295 4294
4296 4295 if (skdev->irq_type == 0) {
4297 4296 Dcmn_err(CE_CONT, "release_intr: (%s%d): done",
4298 4297 DRV_NAME, skdev->instance);
4299 4298 return;
4300 4299 }
4301 4300
4302 4301 if (skdev->htable != NULL && skdev->hsize > 0) {
4303 4302 i = (int32_t)skdev->hsize / (int32_t)sizeof (ddi_intr_handle_t);
4304 4303
4305 4304 while (i-- > 0) {
4306 4305 if (skdev->htable[i] == 0) {
4307 4306 Dcmn_err(CE_NOTE, "htable[%x]=0h", i);
4308 4307 continue;
4309 4308 }
4310 4309
4311 4310 if ((rval = ddi_intr_disable(skdev->htable[i])) !=
4312 4311 DDI_SUCCESS)
4313 4312 Dcmn_err(CE_NOTE, "release_intr: intr_disable "
4314 4313 "htable[%d], rval=%d", i, rval);
4315 4314
4316 4315 if (i < skdev->intr_cnt) {
4317 4316 if ((rval = ddi_intr_remove_handler(
4318 4317 skdev->htable[i])) != DDI_SUCCESS)
4319 4318 cmn_err(CE_WARN, "!release_intr: "
4320 4319 "intr_remove_handler FAILED, "
4321 4320 "rval=%d", rval);
4322 4321
4323 4322 Dcmn_err(CE_NOTE, "release_intr: "
4324 4323 "remove_handler htable[%d]", i);
4325 4324 }
4326 4325
4327 4326 if ((rval = ddi_intr_free(skdev->htable[i])) !=
4328 4327 DDI_SUCCESS)
4329 4328 cmn_err(CE_WARN, "!release_intr: intr_free "
4330 4329 "FAILED, rval=%d", rval);
4331 4330 Dcmn_err(CE_NOTE, "release_intr: intr_free htable[%d]",
4332 4331 i);
4333 4332 }
4334 4333
4335 4334 kmem_free(skdev->htable, skdev->hsize);
4336 4335 skdev->htable = NULL;
4337 4336 }
4338 4337
4339 4338 skdev->hsize = 0;
4340 4339 skdev->intr_cnt = 0;
4341 4340 skdev->intr_pri = 0;
4342 4341 skdev->intr_cap = 0;
4343 4342 skdev->irq_type = 0;
4344 4343 }
4345 4344
4346 4345 /*
4347 4346 *
4348 4347 * Name: skd_dealloc_resources, deallocate resources allocated
4349 4348 * during attach.
4350 4349 *
4351 4350 * Inputs: dip - DDI device info pointer.
4352 4351 * skdev - device state structure.
4353 4352 * seq - bit flag representing allocated item.
4354 4353 * instance - device instance.
4355 4354 *
4356 4355 * Returns: Nothing.
4357 4356 *
4358 4357 */
4359 4358 /* ARGSUSED */ /* Upstream common source with other platforms. */
4360 4359 static void
4361 4360 skd_dealloc_resources(dev_info_t *dip, skd_device_t *skdev,
4362 4361 uint32_t seq, int instance)
4363 4362 {
4364 4363
4365 4364 if (skdev == NULL)
4366 4365 return;
4367 4366
4368 4367 if (seq & SKD_CONSTRUCTED)
4369 4368 skd_destruct(skdev);
4370 4369
4371 4370 if (seq & SKD_INTR_ADDED) {
4372 4371 skd_disable_intr(skdev);
4373 4372 skd_release_intr(skdev);
4374 4373 }
4375 4374
4376 4375 if (seq & SKD_DEV_IOBASE_MAPPED)
4377 4376 ddi_regs_map_free(&skdev->dev_handle);
4378 4377
4379 4378 if (seq & SKD_IOMAP_IOBASE_MAPPED)
4380 4379 ddi_regs_map_free(&skdev->iomap_handle);
4381 4380
4382 4381 if (seq & SKD_REGS_MAPPED)
4383 4382 ddi_regs_map_free(&skdev->iobase_handle);
4384 4383
4385 4384 if (seq & SKD_CONFIG_SPACE_SETUP)
4386 4385 pci_config_teardown(&skdev->pci_handle);
4387 4386
4388 4387 if (seq & SKD_SOFT_STATE_ALLOCED) {
4389 4388 if (skdev->pathname &&
4390 4389 (skdev->flags & SKD_PATHNAME_ALLOCED)) {
4391 4390 kmem_free(skdev->pathname,
4392 4391 strlen(skdev->pathname)+1);
4393 4392 }
4394 4393 }
4395 4394
4396 4395 if (skdev->s1120_devid)
4397 4396 ddi_devid_free(skdev->s1120_devid);
4398 4397 }
4399 4398
4400 4399 /*
4401 4400 *
4402 4401 * Name: skd_setup_interrupt, sets up the appropriate interrupt type
4403 4402 * msi, msix, or fixed.
4404 4403 *
4405 4404 * Inputs: skdev - device state structure.
4406 4405 *
4407 4406 * Returns: DDI_FAILURE on failure otherwise DDI_SUCCESS.
4408 4407 *
4409 4408 */
4410 4409 static int
4411 4410 skd_setup_interrupts(skd_device_t *skdev)
4412 4411 {
4413 4412 int32_t rval = DDI_FAILURE;
4414 4413 int32_t i;
4415 4414 int32_t itypes = 0;
4416 4415
4417 4416 /*
4418 4417 * See what types of interrupts this adapter and platform support
4419 4418 */
4420 4419 if ((i = ddi_intr_get_supported_types(skdev->dip, &itypes)) !=
4421 4420 DDI_SUCCESS) {
4422 4421 cmn_err(CE_NOTE, "intr supported types failed, rval=%xh, ", i);
4423 4422 return (DDI_FAILURE);
4424 4423 }
4425 4424
4426 4425 Dcmn_err(CE_NOTE, "%s:supported interrupts types: %x",
4427 4426 skdev->name, itypes);
4428 4427
4429 4428 itypes &= skdev->irq_type;
4430 4429
4431 4430 if (!skd_disable_msix && (itypes & DDI_INTR_TYPE_MSIX) &&
4432 4431 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_MSIX)) == DDI_SUCCESS) {
4433 4432 cmn_err(CE_NOTE, "!%s: successful MSI-X setup",
4434 4433 skdev->name);
4435 4434 } else if (!skd_disable_msi && (itypes & DDI_INTR_TYPE_MSI) &&
4436 4435 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_MSI)) == DDI_SUCCESS) {
4437 4436 cmn_err(CE_NOTE, "!%s: successful MSI setup",
4438 4437 skdev->name);
4439 4438 } else if ((itypes & DDI_INTR_TYPE_FIXED) &&
4440 4439 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_FIXED))
4441 4440 == DDI_SUCCESS) {
4442 4441 cmn_err(CE_NOTE, "!%s: successful fixed intr setup",
4443 4442 skdev->name);
4444 4443 } else {
4445 4444 cmn_err(CE_WARN, "!%s: no supported interrupt types",
4446 4445 skdev->name);
4447 4446 return (DDI_FAILURE);
4448 4447 }
4449 4448
4450 4449 Dcmn_err(CE_CONT, "%s: setup interrupts done", skdev->name);
4451 4450
4452 4451 return (rval);
4453 4452 }
4454 4453
4455 4454 /*
4456 4455 *
4457 4456 * Name: skd_get_properties, retrieves properties from skd.conf.
4458 4457 *
4459 4458 * Inputs: skdev - device state structure.
4460 4459 * dip - dev_info data structure.
4461 4460 *
4462 4461 * Returns: Nothing.
4463 4462 *
4464 4463 */
4465 4464 /* ARGSUSED */ /* Upstream common source with other platforms. */
4466 4465 static void
4467 4466 skd_get_properties(dev_info_t *dip, skd_device_t *skdev)
4468 4467 {
4469 4468 int prop_value;
4470 4469
4471 4470 skd_isr_type = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4472 4471 "intr-type-cap", -1);
4473 4472
4474 4473 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4475 4474 "max-scsi-reqs", -1);
4476 4475 if (prop_value >= 1 && prop_value <= SKD_MAX_QUEUE_DEPTH)
4477 4476 skd_max_queue_depth = prop_value;
4478 4477
4479 4478 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4480 4479 "max-scsi-reqs-per-msg", -1);
4481 4480 if (prop_value >= 1 && prop_value <= SKD_MAX_REQ_PER_MSG)
4482 4481 skd_max_req_per_msg = prop_value;
4483 4482
4484 4483 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4485 4484 "max-sgs-per-req", -1);
4486 4485 if (prop_value >= 1 && prop_value <= SKD_MAX_N_SG_PER_REQ)
4487 4486 skd_sgs_per_request = prop_value;
4488 4487
4489 4488 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4490 4489 "dbg-level", -1);
4491 4490 if (prop_value >= 1 && prop_value <= 2)
4492 4491 skd_dbg_level = prop_value;
4493 4492 }
4494 4493
4495 4494 /*
4496 4495 *
4497 4496 * Name: skd_wait_for_s1120, wait for device to finish
4498 4497 * its initialization.
4499 4498 *
4500 4499 * Inputs: skdev - device state structure.
4501 4500 *
4502 4501 * Returns: DDI_SUCCESS or DDI_FAILURE.
4503 4502 *
4504 4503 */
4505 4504 static int
4506 4505 skd_wait_for_s1120(skd_device_t *skdev)
4507 4506 {
4508 4507 clock_t cur_ticks, tmo;
4509 4508 int loop_cntr = 0;
4510 4509 int rc = DDI_FAILURE;
4511 4510
4512 4511 mutex_enter(&skdev->skd_internalio_mutex);
4513 4512
4514 4513 while (skdev->gendisk_on == 0) {
4515 4514 cur_ticks = ddi_get_lbolt();
4516 4515 tmo = cur_ticks + drv_usectohz(MICROSEC);
4517 4516 if (cv_timedwait(&skdev->cv_waitq,
4518 4517 &skdev->skd_internalio_mutex, tmo) == -1) {
4519 4518 /* Oops - timed out */
4520 4519 if (loop_cntr++ > 10)
4521 4520 break;
4522 4521 }
4523 4522 }
4524 4523
4525 4524 mutex_exit(&skdev->skd_internalio_mutex);
4526 4525
4527 4526 if (skdev->gendisk_on == 1)
4528 4527 rc = DDI_SUCCESS;
4529 4528
4530 4529 return (rc);
4531 4530 }
4532 4531
4533 4532 /*
4534 4533 *
4535 4534 * Name: skd_update_props, updates certain device properties.
4536 4535 *
4537 4536 * Inputs: skdev - device state structure.
4538 4537 * dip - dev info structure
4539 4538 *
4540 4539 * Returns: Nothing.
4541 4540 *
4542 4541 */
4543 4542 static void
4544 4543 skd_update_props(skd_device_t *skdev, dev_info_t *dip)
4545 4544 {
4546 4545 int blksize = 512;
4547 4546
4548 4547 if ((ddi_prop_update_int64(DDI_DEV_T_NONE, dip, "device-nblocks",
4549 4548 skdev->Nblocks) != DDI_SUCCESS) ||
4550 4549 (ddi_prop_update_int(DDI_DEV_T_NONE, dip, "device-blksize",
4551 4550 blksize) != DDI_SUCCESS)) {
4552 4551 cmn_err(CE_NOTE, "%s: FAILED to create driver properties",
4553 4552 skdev->name);
4554 4553 }
4555 4554 }
4556 4555
4557 4556 /*
4558 4557 *
4559 4558 * Name: skd_setup_devid, sets up device ID info.
4560 4559 *
4561 4560 * Inputs: skdev - device state structure.
4562 4561 * devid - Device ID for the DDI.
4563 4562 *
4564 4563 * Returns: DDI_SUCCESS or DDI_FAILURE.
4565 4564 *
4566 4565 */
4567 4566 static int
4568 4567 skd_setup_devid(skd_device_t *skdev, ddi_devid_t *devid)
4569 4568 {
4570 4569 int rc, sz_model, sz_sn, sz;
4571 4570
4572 4571 sz_model = scsi_ascii_inquiry_len(skdev->inq_product_id,
4573 4572 strlen(skdev->inq_product_id));
4574 4573 sz_sn = scsi_ascii_inquiry_len(skdev->inq_serial_num,
4575 4574 strlen(skdev->inq_serial_num));
4576 4575 sz = sz_model + sz_sn + 1;
4577 4576
4578 4577 (void) snprintf(skdev->devid_str, sizeof (skdev->devid_str),
4579 4578 "%.*s=%.*s", sz_model, skdev->inq_product_id, sz_sn,
4580 4579 skdev->inq_serial_num);
4581 4580 rc = ddi_devid_init(skdev->dip, DEVID_SCSI_SERIAL, sz,
4582 4581 skdev->devid_str, devid);
4583 4582
4584 4583 if (rc != DDI_SUCCESS)
4585 4584 cmn_err(CE_WARN, "!%s: devid_init FAILED", skdev->name);
4586 4585
4587 4586 return (rc);
4588 4587
4589 4588 }
4590 4589
4591 4590 /*
4592 4591 *
4593 4592 * Name: skd_bd_attach, attach to blkdev driver
4594 4593 *
4595 4594 * Inputs: skdev - device state structure.
4596 4595 * dip - device info structure.
4597 4596 *
4598 4597 * Returns: DDI_SUCCESS or DDI_FAILURE.
4599 4598 *
4600 4599 */
4601 4600 static int
4602 4601 skd_bd_attach(dev_info_t *dip, skd_device_t *skdev)
4603 4602 {
4604 4603 int rv;
4605 4604
4606 4605 skdev->s_bdh = bd_alloc_handle(skdev, &skd_bd_ops,
4607 4606 &skd_64bit_io_dma_attr, KM_SLEEP);
4608 4607
4609 4608 if (skdev->s_bdh == NULL) {
4610 4609 cmn_err(CE_WARN, "!skd_bd_attach: FAILED");
4611 4610
4612 4611 return (DDI_FAILURE);
4613 4612 }
4614 4613
4615 4614 rv = bd_attach_handle(dip, skdev->s_bdh);
4616 4615
4617 4616 if (rv != DDI_SUCCESS) {
4618 4617 cmn_err(CE_WARN, "!bd_attach_handle FAILED\n");
4619 4618 } else {
4620 4619 Dcmn_err(CE_NOTE, "bd_attach_handle OK\n");
4621 4620 skdev->bd_attached++;
4622 4621 }
4623 4622
4624 4623 return (rv);
4625 4624 }
4626 4625
4627 4626 /*
4628 4627 *
4629 4628 * Name: skd_bd_detach, detach from the blkdev driver.
4630 4629 *
4631 4630 * Inputs: skdev - device state structure.
4632 4631 *
4633 4632 * Returns: Nothing.
4634 4633 *
4635 4634 */
4636 4635 static void
4637 4636 skd_bd_detach(skd_device_t *skdev)
4638 4637 {
4639 4638 if (skdev->bd_attached)
4640 4639 (void) bd_detach_handle(skdev->s_bdh);
4641 4640
4642 4641 bd_free_handle(skdev->s_bdh);
4643 4642 }
4644 4643
4645 4644 /*
4646 4645 *
4647 4646 * Name: skd_attach, attach sdk device driver
4648 4647 *
4649 4648 * Inputs: dip - device info structure.
4650 4649 * cmd - DDI attach argument (ATTACH, RESUME, etc.)
4651 4650 *
4652 4651 * Returns: DDI_SUCCESS or DDI_FAILURE.
4653 4652 *
4654 4653 */
4655 4654 static int
4656 4655 skd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
4657 4656 {
4658 4657 int instance;
4659 4658 int nregs;
4660 4659 skd_device_t *skdev = NULL;
4661 4660 int inx;
4662 4661 uint16_t cmd_reg;
4663 4662 int progress = 0;
4664 4663 char name[MAXPATHLEN];
4665 4664 off_t regsize;
4666 4665 char pci_str[32];
4667 4666 char fw_version[8];
4668 4667
4669 4668 instance = ddi_get_instance(dip);
4670 4669
4671 4670 (void) ddi_get_parent_data(dip);
4672 4671
4673 4672 switch (cmd) {
4674 4673 case DDI_ATTACH:
4675 4674 break;
4676 4675
4677 4676 case DDI_RESUME:
4678 4677 /* Re-enable timer */
4679 4678 skd_start_timer(skdev);
4680 4679
4681 4680 return (DDI_SUCCESS);
4682 4681
4683 4682 default:
4684 4683 return (DDI_FAILURE);
4685 4684 }
4686 4685
4687 4686 Dcmn_err(CE_NOTE, "sTec S1120 Driver v%s Instance: %d",
4688 4687 VERSIONSTR, instance);
4689 4688
4690 4689 /*
4691 4690 * Check that hardware is installed in a DMA-capable slot
4692 4691 */
4693 4692 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
4694 4693 cmn_err(CE_WARN, "!%s%d: installed in a "
4695 4694 "slot that isn't DMA-capable slot", DRV_NAME, instance);
4696 4695 return (DDI_FAILURE);
4697 4696 }
4698 4697
4699 4698 /*
4700 4699 * No support for high-level interrupts
4701 4700 */
4702 4701 if (ddi_intr_hilevel(dip, 0) != 0) {
4703 4702 cmn_err(CE_WARN, "!%s%d: High level interrupt not supported",
4704 4703 DRV_NAME, instance);
4705 4704 return (DDI_FAILURE);
4706 4705 }
4707 4706
4708 4707 /*
4709 4708 * Allocate our per-device-instance structure
4710 4709 */
4711 4710 if (ddi_soft_state_zalloc(skd_state, instance) !=
4712 4711 DDI_SUCCESS) {
4713 4712 cmn_err(CE_WARN, "!%s%d: soft state zalloc failed ",
4714 4713 DRV_NAME, instance);
4715 4714 return (DDI_FAILURE);
4716 4715 }
4717 4716
4718 4717 progress |= SKD_SOFT_STATE_ALLOCED;
4719 4718
4720 4719 skdev = ddi_get_soft_state(skd_state, instance);
4721 4720 if (skdev == NULL) {
4722 4721 cmn_err(CE_WARN, "!%s%d: Unable to get soft state structure",
4723 4722 DRV_NAME, instance);
4724 4723 goto skd_attach_failed;
4725 4724 }
4726 4725
4727 4726 (void) snprintf(skdev->name, sizeof (skdev->name),
4728 4727 DRV_NAME "%d", instance);
4729 4728
4730 4729 skdev->dip = dip;
4731 4730 skdev->instance = instance;
4732 4731
4733 4732 ddi_set_driver_private(dip, skdev);
4734 4733
4735 4734 (void) ddi_pathname(dip, name);
4736 4735 for (inx = strlen(name); inx; inx--) {
4737 4736 if (name[inx] == ',') {
4738 4737 name[inx] = '\0';
4739 4738 break;
4740 4739 }
4741 4740 if (name[inx] == '@') {
4742 4741 break;
4743 4742 }
4744 4743 }
4745 4744
4746 4745 skdev->pathname = kmem_zalloc(strlen(name) + 1, KM_SLEEP);
4747 4746 (void) strlcpy(skdev->pathname, name, strlen(name) + 1);
4748 4747
4749 4748 progress |= SKD_PATHNAME_ALLOCED;
4750 4749 skdev->flags |= SKD_PATHNAME_ALLOCED;
4751 4750
4752 4751 if (pci_config_setup(dip, &skdev->pci_handle) != DDI_SUCCESS) {
4753 4752 cmn_err(CE_WARN, "!%s%d: pci_config_setup FAILED",
4754 4753 DRV_NAME, instance);
4755 4754 goto skd_attach_failed;
4756 4755 }
4757 4756
4758 4757 progress |= SKD_CONFIG_SPACE_SETUP;
4759 4758
4760 4759 /* Save adapter path. */
4761 4760
4762 4761 (void) ddi_dev_nregs(dip, &nregs);
4763 4762
4764 4763 /*
4765 4764 * 0x0 Configuration Space
4766 4765 * 0x1 I/O Space
4767 4766 * 0x2 s1120 register space
4768 4767 */
4769 4768 if (ddi_dev_regsize(dip, 1, ®size) != DDI_SUCCESS ||
4770 4769 ddi_regs_map_setup(dip, 1, &skdev->iobase, 0, regsize,
4771 4770 &dev_acc_attr, &skdev->iobase_handle) != DDI_SUCCESS) {
4772 4771 cmn_err(CE_WARN, "!%s%d: regs_map_setup(mem) failed",
4773 4772 DRV_NAME, instance);
4774 4773 goto skd_attach_failed;
4775 4774 }
4776 4775 progress |= SKD_REGS_MAPPED;
4777 4776
4778 4777 skdev->iomap_iobase = skdev->iobase;
4779 4778 skdev->iomap_handle = skdev->iobase_handle;
4780 4779
4781 4780 Dcmn_err(CE_NOTE, "%s: PCI iobase=%ph, iomap=%ph, regnum=%d, "
4782 4781 "regsize=%ld", skdev->name, (void *)skdev->iobase,
4783 4782 (void *)skdev->iomap_iobase, 1, regsize);
4784 4783
4785 4784 if (ddi_dev_regsize(dip, 2, ®size) != DDI_SUCCESS ||
4786 4785 ddi_regs_map_setup(dip, 2, &skdev->dev_iobase, 0, regsize,
4787 4786 &dev_acc_attr, &skdev->dev_handle) != DDI_SUCCESS) {
4788 4787 cmn_err(CE_WARN, "!%s%d: regs_map_setup(mem) failed",
4789 4788 DRV_NAME, instance);
4790 4789
4791 4790 goto skd_attach_failed;
4792 4791 }
4793 4792
4794 4793 skdev->dev_memsize = (int)regsize;
4795 4794
4796 4795 Dcmn_err(CE_NOTE, "%s: DEV iobase=%ph regsize=%d",
4797 4796 skdev->name, (void *)skdev->dev_iobase,
4798 4797 skdev->dev_memsize);
4799 4798
4800 4799 progress |= SKD_DEV_IOBASE_MAPPED;
4801 4800
4802 4801 cmd_reg = pci_config_get16(skdev->pci_handle, PCI_CONF_COMM);
4803 4802 cmd_reg |= (PCI_COMM_ME | PCI_COMM_INTX_DISABLE);
4804 4803 cmd_reg &= ~PCI_COMM_PARITY_DETECT;
4805 4804 pci_config_put16(skdev->pci_handle, PCI_CONF_COMM, cmd_reg);
4806 4805
4807 4806 /* Get adapter PCI device information. */
4808 4807 skdev->vendor_id = pci_config_get16(skdev->pci_handle, PCI_CONF_VENID);
4809 4808 skdev->device_id = pci_config_get16(skdev->pci_handle, PCI_CONF_DEVID);
4810 4809
4811 4810 Dcmn_err(CE_NOTE, "%s: %x-%x card detected",
4812 4811 skdev->name, skdev->vendor_id, skdev->device_id);
4813 4812
4814 4813 skd_get_properties(dip, skdev);
4815 4814
4816 4815 (void) skd_init(skdev);
4817 4816
4818 4817 if (skd_construct(skdev, instance)) {
4819 4818 cmn_err(CE_WARN, "!%s: construct FAILED", skdev->name);
4820 4819 goto skd_attach_failed;
4821 4820 }
4822 4821
4823 4822 progress |= SKD_PROBED;
4824 4823 progress |= SKD_CONSTRUCTED;
4825 4824
4826 4825 SIMPLEQ_INIT(&skdev->waitqueue);
4827 4826
4828 4827 /*
4829 4828 * Setup interrupt handler
4830 4829 */
4831 4830 if (skd_setup_interrupts(skdev) != DDI_SUCCESS) {
4832 4831 cmn_err(CE_WARN, "!%s: Unable to add interrupt",
4833 4832 skdev->name);
4834 4833 goto skd_attach_failed;
4835 4834 }
4836 4835
4837 4836 progress |= SKD_INTR_ADDED;
4838 4837
4839 4838 ADAPTER_STATE_LOCK(skdev);
4840 4839 skdev->flags |= SKD_ATTACHED;
4841 4840 ADAPTER_STATE_UNLOCK(skdev);
4842 4841
4843 4842 skdev->d_blkshift = 9;
4844 4843 progress |= SKD_ATTACHED;
4845 4844
4846 4845
4847 4846 skd_start_device(skdev);
4848 4847
4849 4848 ADAPTER_STATE_LOCK(skdev);
4850 4849 skdev->progress = progress;
4851 4850 ADAPTER_STATE_UNLOCK(skdev);
4852 4851
4853 4852 /*
4854 4853 * Give the board a chance to
4855 4854 * complete its initialization.
4856 4855 */
4857 4856 if (skdev->gendisk_on != 1)
4858 4857 (void) skd_wait_for_s1120(skdev);
4859 4858
4860 4859 if (skdev->gendisk_on != 1) {
4861 4860 cmn_err(CE_WARN, "!%s: s1120 failed to come ONLINE",
4862 4861 skdev->name);
4863 4862 goto skd_attach_failed;
4864 4863 }
4865 4864
4866 4865 ddi_report_dev(dip);
4867 4866
4868 4867 skd_send_internal_skspcl(skdev, &skdev->internal_skspcl, INQUIRY);
4869 4868
4870 4869 skdev->disks_initialized++;
4871 4870
4872 4871 (void) strcpy(fw_version, "???");
4873 4872 (void) skd_pci_info(skdev, pci_str, sizeof (pci_str));
4874 4873 Dcmn_err(CE_NOTE, " sTec S1120 Driver(%s) version %s-b%s",
4875 4874 DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
4876 4875
4877 4876 Dcmn_err(CE_NOTE, " sTec S1120 %04x:%04x %s 64 bit",
4878 4877 skdev->vendor_id, skdev->device_id, pci_str);
4879 4878
4880 4879 Dcmn_err(CE_NOTE, " sTec S1120 %s\n", skdev->pathname);
4881 4880
4882 4881 if (*skdev->inq_serial_num)
4883 4882 Dcmn_err(CE_NOTE, " sTec S1120 serial#=%s",
4884 4883 skdev->inq_serial_num);
4885 4884
4886 4885 if (*skdev->inq_product_id &&
4887 4886 *skdev->inq_product_rev)
4888 4887 Dcmn_err(CE_NOTE, " sTec S1120 prod ID=%s prod rev=%s",
4889 4888 skdev->inq_product_id, skdev->inq_product_rev);
4890 4889
4891 4890 Dcmn_err(CE_NOTE, "%s: intr-type-cap: %d",
4892 4891 skdev->name, skdev->irq_type);
4893 4892 Dcmn_err(CE_NOTE, "%s: max-scsi-reqs: %d",
4894 4893 skdev->name, skd_max_queue_depth);
4895 4894 Dcmn_err(CE_NOTE, "%s: max-sgs-per-req: %d",
4896 4895 skdev->name, skd_sgs_per_request);
4897 4896 Dcmn_err(CE_NOTE, "%s: max-scsi-req-per-msg: %d",
4898 4897 skdev->name, skd_max_req_per_msg);
4899 4898
4900 4899 if (skd_bd_attach(dip, skdev) == DDI_FAILURE)
4901 4900 goto skd_attach_failed;
4902 4901
4903 4902 skd_update_props(skdev, dip);
4904 4903
4905 4904 /* Enable timer */
4906 4905 skd_start_timer(skdev);
4907 4906
4908 4907 ADAPTER_STATE_LOCK(skdev);
4909 4908 skdev->progress = progress;
4910 4909 ADAPTER_STATE_UNLOCK(skdev);
4911 4910
4912 4911 skdev->attached = 1;
4913 4912 return (DDI_SUCCESS);
4914 4913
4915 4914 skd_attach_failed:
4916 4915 skd_dealloc_resources(dip, skdev, progress, instance);
4917 4916
4918 4917 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) {
4919 4918 skd_destroy_mutex(skdev);
4920 4919 }
4921 4920
4922 4921 ddi_soft_state_free(skd_state, instance);
4923 4922
4924 4923 cmn_err(CE_WARN, "!skd_attach FAILED: progress=%x", progress);
4925 4924 return (DDI_FAILURE);
4926 4925 }
4927 4926
4928 4927 /*
4929 4928 *
4930 4929 * Name: skd_halt
4931 4930 *
4932 4931 * Inputs: skdev - device state structure.
4933 4932 *
4934 4933 * Returns: Nothing.
4935 4934 *
4936 4935 */
4937 4936 static void
4938 4937 skd_halt(skd_device_t *skdev)
4939 4938 {
4940 4939 Dcmn_err(CE_NOTE, "%s: halt/suspend ......", skdev->name);
4941 4940 }
4942 4941
4943 4942 /*
4944 4943 *
4945 4944 * Name: skd_detach, detaches driver from the system.
4946 4945 *
4947 4946 * Inputs: dip - device info structure.
4948 4947 *
4949 4948 * Returns: DDI_SUCCESS on successful detach otherwise DDI_FAILURE.
4950 4949 *
4951 4950 */
4952 4951 static int
4953 4952 skd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
4954 4953 {
4955 4954 skd_buf_private_t *pbuf;
4956 4955 skd_device_t *skdev;
4957 4956 int instance;
4958 4957 timeout_id_t timer_id = NULL;
4959 4958 int rv1 = DDI_SUCCESS;
4960 4959 struct skd_special_context *skspcl;
4961 4960
4962 4961 instance = ddi_get_instance(dip);
4963 4962
4964 4963 skdev = ddi_get_soft_state(skd_state, instance);
4965 4964 if (skdev == NULL) {
4966 4965 cmn_err(CE_WARN, "!detach failed: NULL skd state");
4967 4966
4968 4967 return (DDI_FAILURE);
4969 4968 }
4970 4969
4971 4970 Dcmn_err(CE_CONT, "skd_detach(%d): entered", instance);
4972 4971
4973 4972 switch (cmd) {
4974 4973 case DDI_DETACH:
4975 4974 /* Test for packet cache inuse. */
4976 4975 ADAPTER_STATE_LOCK(skdev);
4977 4976
4978 4977 /* Stop command/event processing. */
4979 4978 skdev->flags |= (SKD_SUSPENDED | SKD_CMD_ABORT_TMO);
4980 4979
4981 4980 /* Disable driver timer if no adapters. */
4982 4981 if (skdev->skd_timer_timeout_id != 0) {
4983 4982 timer_id = skdev->skd_timer_timeout_id;
4984 4983 skdev->skd_timer_timeout_id = 0;
4985 4984 }
4986 4985 ADAPTER_STATE_UNLOCK(skdev);
4987 4986
4988 4987 if (timer_id != 0) {
4989 4988 (void) untimeout(timer_id);
4990 4989 }
4991 4990
4992 4991 #ifdef SKD_PM
4993 4992 if (skdev->power_level != LOW_POWER_LEVEL) {
4994 4993 skd_halt(skdev);
4995 4994 skdev->power_level = LOW_POWER_LEVEL;
4996 4995 }
4997 4996 #endif
4998 4997 skspcl = &skdev->internal_skspcl;
4999 4998 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
5000 4999
5001 5000 skd_stop_device(skdev);
5002 5001
5003 5002 /*
5004 5003 * Clear request queue.
5005 5004 */
5006 5005 while (!SIMPLEQ_EMPTY(&skdev->waitqueue)) {
5007 5006 pbuf = skd_get_queued_pbuf(skdev);
5008 5007 skd_end_request_abnormal(skdev, pbuf, ECANCELED,
5009 5008 SKD_IODONE_WNIOC);
5010 5009 Dcmn_err(CE_NOTE,
5011 5010 "detach: cancelled pbuf %p %ld <%s> %lld\n",
5012 5011 (void *)pbuf, pbuf->x_xfer->x_nblks,
5013 5012 (pbuf->dir & B_READ) ? "Read" : "Write",
5014 5013 pbuf->x_xfer->x_blkno);
5015 5014 }
5016 5015
5017 5016 skd_bd_detach(skdev);
5018 5017
5019 5018 skd_dealloc_resources(dip, skdev, skdev->progress, instance);
5020 5019
5021 5020 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) {
5022 5021 skd_destroy_mutex(skdev);
5023 5022 }
5024 5023
5025 5024 ddi_soft_state_free(skd_state, instance);
5026 5025
5027 5026 skd_exit();
5028 5027
5029 5028 break;
5030 5029
5031 5030 case DDI_SUSPEND:
5032 5031 /* Block timer. */
5033 5032
5034 5033 ADAPTER_STATE_LOCK(skdev);
5035 5034 skdev->flags |= SKD_SUSPENDED;
5036 5035
5037 5036 /* Disable driver timer if last adapter. */
5038 5037 if (skdev->skd_timer_timeout_id != 0) {
5039 5038 timer_id = skdev->skd_timer_timeout_id;
5040 5039 skdev->skd_timer_timeout_id = 0;
5041 5040 }
5042 5041 ADAPTER_STATE_UNLOCK(skdev);
5043 5042
5044 5043 if (timer_id != 0) {
5045 5044 (void) untimeout(timer_id);
5046 5045 }
5047 5046
5048 5047 ddi_prop_remove_all(dip);
5049 5048
5050 5049 skd_halt(skdev);
5051 5050
5052 5051 break;
5053 5052 default:
5054 5053 rv1 = DDI_FAILURE;
5055 5054 break;
5056 5055 }
5057 5056
5058 5057 if (rv1 != DDI_SUCCESS) {
5059 5058 cmn_err(CE_WARN, "!skd_detach, failed, rv1=%x", rv1);
5060 5059 } else {
5061 5060 Dcmn_err(CE_CONT, "skd_detach: exiting");
5062 5061 }
5063 5062
5064 5063 if (rv1 != DDI_SUCCESS)
5065 5064 return (DDI_FAILURE);
5066 5065
5067 5066 return (rv1);
5068 5067 }
5069 5068
5070 5069 /*
5071 5070 *
5072 5071 * Name: skd_devid_init, calls skd_setup_devid to setup
5073 5072 * the device's devid structure.
5074 5073 *
5075 5074 * Inputs: arg - device state structure.
5076 5075 * dip - dev_info structure.
5077 5076 * devid - devid structure.
5078 5077 *
5079 5078 * Returns: Nothing.
5080 5079 *
5081 5080 */
5082 5081 /* ARGSUSED */ /* Upstream common source with other platforms. */
5083 5082 static int
5084 5083 skd_devid_init(void *arg, dev_info_t *dip, ddi_devid_t *devid)
5085 5084 {
5086 5085 skd_device_t *skdev = arg;
5087 5086
5088 5087 (void) skd_setup_devid(skdev, devid);
5089 5088
5090 5089 return (0);
5091 5090 }
5092 5091
5093 5092 /*
5094 5093 *
5095 5094 * Name: skd_bd_driveinfo, retrieves device's info.
5096 5095 *
5097 5096 * Inputs: drive - drive data structure.
5098 5097 * arg - device state structure.
5099 5098 *
5100 5099 * Returns: Nothing.
5101 5100 *
5102 5101 */
5103 5102 static void
5104 5103 skd_bd_driveinfo(void *arg, bd_drive_t *drive)
5105 5104 {
5106 5105 skd_device_t *skdev = arg;
5107 5106
5108 5107 drive->d_qsize = (skdev->queue_depth_limit * 4) / 5;
5109 5108 drive->d_maxxfer = SKD_DMA_MAXXFER;
5110 5109 drive->d_removable = B_FALSE;
5111 5110 drive->d_hotpluggable = B_FALSE;
5112 5111 drive->d_target = 0;
5113 5112 drive->d_lun = 0;
5114 5113
5115 5114 if (skdev->inquiry_is_valid != 0) {
5116 5115 drive->d_vendor = skdev->inq_vendor_id;
5117 5116 drive->d_vendor_len = strlen(drive->d_vendor);
5118 5117
5119 5118 drive->d_product = skdev->inq_product_id;
5120 5119 drive->d_product_len = strlen(drive->d_product);
5121 5120
5122 5121 drive->d_serial = skdev->inq_serial_num;
5123 5122 drive->d_serial_len = strlen(drive->d_serial);
5124 5123
5125 5124 drive->d_revision = skdev->inq_product_rev;
5126 5125 drive->d_revision_len = strlen(drive->d_revision);
5127 5126 }
5128 5127 }
5129 5128
5130 5129 /*
5131 5130 *
5132 5131 * Name: skd_bd_mediainfo, retrieves device media info.
5133 5132 *
5134 5133 * Inputs: arg - device state structure.
5135 5134 * media - container for media info.
5136 5135 *
5137 5136 * Returns: Zero.
5138 5137 *
5139 5138 */
5140 5139 static int
5141 5140 skd_bd_mediainfo(void *arg, bd_media_t *media)
5142 5141 {
5143 5142 skd_device_t *skdev = arg;
5144 5143
5145 5144 media->m_nblks = skdev->Nblocks;
5146 5145 media->m_blksize = 512;
5147 5146 media->m_pblksize = 4096;
5148 5147 media->m_readonly = B_FALSE;
5149 5148 media->m_solidstate = B_TRUE;
5150 5149
5151 5150 return (0);
5152 5151 }
5153 5152
5154 5153 /*
5155 5154 *
5156 5155 * Name: skd_rw, performs R/W requests for blkdev driver.
5157 5156 *
5158 5157 * Inputs: skdev - device state structure.
5159 5158 * xfer - tranfer structure.
5160 5159 * dir - I/O direction.
5161 5160 *
5162 5161 * Returns: EAGAIN if device is not online. EIO if blkdev wants us to
5163 5162 * be a dump device (for now).
5164 5163 * Value returned by skd_start().
5165 5164 *
5166 5165 */
5167 5166 static int
5168 5167 skd_rw(skd_device_t *skdev, bd_xfer_t *xfer, int dir)
5169 5168 {
5170 5169 skd_buf_private_t *pbuf;
5171 5170
5172 5171 /*
5173 5172 * The x_flags structure element is not defined in Oracle Solaris
5174 5173 */
5175 5174 /* We'll need to fix this in order to support dump on this device. */
5176 5175 if (xfer->x_flags & BD_XFER_POLL)
5177 5176 return (EIO);
5178 5177
5179 5178 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
5180 5179 Dcmn_err(CE_NOTE, "Device - not ONLINE");
5181 5180
5182 5181 skd_request_fn_not_online(skdev);
5183 5182
5184 5183 return (EAGAIN);
5185 5184 }
5186 5185
5187 5186 pbuf = kmem_zalloc(sizeof (skd_buf_private_t), KM_NOSLEEP);
5188 5187 if (pbuf == NULL)
5189 5188 return (ENOMEM);
5190 5189
5191 5190 WAITQ_LOCK(skdev);
5192 5191 pbuf->dir = dir;
5193 5192 pbuf->x_xfer = xfer;
5194 5193
5195 5194 skd_queue(skdev, pbuf);
5196 5195 skdev->ios_queued++;
5197 5196 WAITQ_UNLOCK(skdev);
5198 5197
5199 5198 skd_start(skdev);
5200 5199
5201 5200 return (0);
5202 5201 }
5203 5202
5204 5203 /*
5205 5204 *
5206 5205 * Name: skd_bd_read, performs blkdev read requests.
5207 5206 *
5208 5207 * Inputs: arg - device state structure.
5209 5208 * xfer - tranfer request structure.
5210 5209 *
5211 5210 * Returns: Value return by skd_rw().
5212 5211 *
5213 5212 */
5214 5213 static int
5215 5214 skd_bd_read(void *arg, bd_xfer_t *xfer)
5216 5215 {
5217 5216 return (skd_rw(arg, xfer, B_READ));
5218 5217 }
5219 5218
5220 5219 /*
5221 5220 *
5222 5221 * Name: skd_bd_write, performs blkdev write requests.
5223 5222 *
5224 5223 * Inputs: arg - device state structure.
5225 5224 * xfer - tranfer request structure.
5226 5225 *
5227 5226 * Returns: Value return by skd_rw().
5228 5227 *
5229 5228 */
5230 5229 static int
5231 5230 skd_bd_write(void *arg, bd_xfer_t *xfer)
5232 5231 {
5233 5232 return (skd_rw(arg, xfer, B_WRITE));
5234 5233 }
↓ open down ↓ |
5051 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX