1 /*
2 *
3 * skd.c: Solaris 11/10 Driver for sTec, Inc. S112x PCIe SSD card
4 *
5 * Solaris driver is based on the Linux driver authored by:
6 *
7 * Authors/Alphabetical: Dragan Stancevic <dstancevic@stec-inc.com>
8 * Gordon Waidhofer <gwaidhofer@stec-inc.com>
9 * John Hamilton <jhamilton@stec-inc.com>
10 */
11
12 /*
13 * This file and its contents are supplied under the terms of the
14 * Common Development and Distribution License ("CDDL"), version 1.0.
15 * You may only use this file in accordance with the terms of version
16 * 1.0 of the CDDL.
17 *
18 * A full copy of the text of the CDDL should have accompanied this
19 * source. A copy of the CDDL is also available via the Internet at
20 * http://www.illumos.org/license/CDDL.
21 */
22
23 /*
24 * Copyright 2013 STEC, Inc. All rights reserved.
25 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
26 */
27
28 #include <sys/types.h>
29 #include <sys/stream.h>
30 #include <sys/cmn_err.h>
31 #include <sys/kmem.h>
32 #include <sys/file.h>
33 #include <sys/buf.h>
34 #include <sys/uio.h>
35 #include <sys/cred.h>
36 #include <sys/modctl.h>
37 #include <sys/debug.h>
38 #include <sys/modctl.h>
39 #include <sys/list.h>
40 #include <sys/sysmacros.h>
41 #include <sys/errno.h>
42 #include <sys/pcie.h>
43 #include <sys/pci.h>
44 #include <sys/ddi.h>
45 #include <sys/dditypes.h>
46 #include <sys/sunddi.h>
47 #include <sys/atomic.h>
48 #include <sys/mutex.h>
49 #include <sys/param.h>
50 #include <sys/devops.h>
51 #include <sys/blkdev.h>
52 #include <sys/queue.h>
53 #include <sys/scsi/impl/inquiry.h>
54
55 #include "skd_s1120.h"
56 #include "skd.h"
57
58 int skd_dbg_level = 0;
59
60 void *skd_state = NULL;
61 int skd_disable_msi = 0;
62 int skd_disable_msix = 0;
63
64 /* Initialized in _init() and tunable, see _init(). */
65 clock_t skd_timer_ticks;
66
67 /* I/O DMA attributes structures. */
68 static ddi_dma_attr_t skd_64bit_io_dma_attr = {
69 DMA_ATTR_V0, /* dma_attr_version */
70 SKD_DMA_LOW_ADDRESS, /* low DMA address range */
71 SKD_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
72 SKD_DMA_XFER_COUNTER, /* DMA counter register */
73 SKD_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */
74 SKD_DMA_BURSTSIZES, /* DMA burstsizes */
75 SKD_DMA_MIN_XFER_SIZE, /* min effective DMA size */
76 SKD_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
77 SKD_DMA_SEGMENT_BOUNDARY, /* segment boundary */
78 SKD_DMA_SG_LIST_LENGTH, /* s/g list length */
79 SKD_DMA_GRANULARITY, /* granularity of device */
80 SKD_DMA_XFER_FLAGS /* DMA transfer flags */
81 };
82
83 int skd_isr_type = -1;
84
85 #define SKD_MAX_QUEUE_DEPTH 255
86 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
87 int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
88
89 #define SKD_MAX_REQ_PER_MSG 14
90 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
91 int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
92
93 #define SKD_MAX_N_SG_PER_REQ 4096
94 int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
95
96 static int skd_sys_quiesce_dev(dev_info_t *);
97 static int skd_quiesce_dev(skd_device_t *);
98 static int skd_list_skmsg(skd_device_t *, int);
99 static int skd_list_skreq(skd_device_t *, int);
100 static int skd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
101 static int skd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
102 static int skd_format_internal_skspcl(struct skd_device *skdev);
103 static void skd_start(skd_device_t *);
104 static void skd_destroy_mutex(skd_device_t *skdev);
105 static void skd_enable_interrupts(struct skd_device *);
106 static void skd_request_fn_not_online(skd_device_t *skdev);
107 static void skd_send_internal_skspcl(struct skd_device *,
108 struct skd_special_context *, uint8_t);
109 static void skd_queue(skd_device_t *, skd_buf_private_t *);
110 static void *skd_alloc_dma_mem(skd_device_t *, dma_mem_t *, uint8_t);
111 static void skd_release_intr(skd_device_t *skdev);
112 static void skd_isr_fwstate(struct skd_device *skdev);
113 static void skd_isr_msg_from_dev(struct skd_device *skdev);
114 static void skd_soft_reset(struct skd_device *skdev);
115 static void skd_refresh_device_data(struct skd_device *skdev);
116 static void skd_update_props(skd_device_t *, dev_info_t *);
117 static void skd_end_request_abnormal(struct skd_device *, skd_buf_private_t *,
118 int, int);
119 static char *skd_pci_info(struct skd_device *skdev, char *str, size_t len);
120
121 static skd_buf_private_t *skd_get_queued_pbuf(skd_device_t *);
122
123 static void skd_bd_driveinfo(void *arg, bd_drive_t *drive);
124 static int skd_bd_mediainfo(void *arg, bd_media_t *media);
125 static int skd_bd_read(void *arg, bd_xfer_t *xfer);
126 static int skd_bd_write(void *arg, bd_xfer_t *xfer);
127 static int skd_devid_init(void *arg, dev_info_t *, ddi_devid_t *);
128
129
130 static bd_ops_t skd_bd_ops = {
131 BD_OPS_VERSION_0,
132 skd_bd_driveinfo,
133 skd_bd_mediainfo,
134 skd_devid_init,
135 NULL, /* sync_cache */
136 skd_bd_read,
137 skd_bd_write,
138 };
139
140 static ddi_device_acc_attr_t dev_acc_attr = {
141 DDI_DEVICE_ATTR_V0,
142 DDI_STRUCTURE_LE_ACC,
143 DDI_STRICTORDER_ACC
144 };
145
146 /*
147 * Solaris module loading/unloading structures
148 */
149 struct dev_ops skd_dev_ops = {
150 DEVO_REV, /* devo_rev */
151 0, /* refcnt */
152 ddi_no_info, /* getinfo */
153 nulldev, /* identify */
154 nulldev, /* probe */
155 skd_attach, /* attach */
156 skd_detach, /* detach */
157 nodev, /* reset */
158 NULL, /* char/block ops */
159 NULL, /* bus operations */
160 NULL, /* power management */
161 skd_sys_quiesce_dev /* quiesce */
162 };
163
164 static struct modldrv modldrv = {
165 &mod_driverops, /* type of module: driver */
166 "sTec skd v" DRV_VER_COMPL, /* name of module */
167 &skd_dev_ops /* driver dev_ops */
168 };
169
170 static struct modlinkage modlinkage = {
171 MODREV_1,
172 { &modldrv, NULL }
173 };
174
175 /*
176 * sTec-required wrapper for debug printing.
177 */
178 /*PRINTFLIKE2*/
179 static inline void
180 Dcmn_err(int lvl, const char *fmt, ...)
181 {
182 va_list ap;
183
184 if (skd_dbg_level == 0)
185 return;
186
187 va_start(ap, fmt);
188 vcmn_err(lvl, fmt, ap);
189 va_end(ap);
190 }
191
192 /*
193 * Solaris module loading/unloading routines
194 */
195
196 /*
197 *
198 * Name: _init, performs initial installation
199 *
200 * Inputs: None.
201 *
202 * Returns: Returns the value returned by the ddi_softstate_init function
203 * on a failure to create the device state structure or the result
204 * of the module install routines.
205 *
206 */
207 int
208 _init(void)
209 {
210 int rval = 0;
211 int tgts = 0;
212
213 tgts |= 0x02;
214 tgts |= 0x08; /* In #ifdef NEXENTA block from original sTec drop. */
215
216 /*
217 * drv_usectohz() is a function, so can't initialize it at
218 * instantiation.
219 */
220 skd_timer_ticks = drv_usectohz(1000000);
221
222 Dcmn_err(CE_NOTE,
223 "<# Installing skd Driver dbg-lvl=%d %s %x>",
224 skd_dbg_level, DRV_BUILD_ID, tgts);
225
226 rval = ddi_soft_state_init(&skd_state, sizeof (skd_device_t), 0);
227 if (rval != DDI_SUCCESS)
228 return (rval);
229
230 bd_mod_init(&skd_dev_ops);
231
232 rval = mod_install(&modlinkage);
233 if (rval != DDI_SUCCESS) {
234 ddi_soft_state_fini(&skd_state);
235 bd_mod_fini(&skd_dev_ops);
236 }
237
238 return (rval);
239 }
240
241 /*
242 *
243 * Name: _info, returns information about loadable module.
244 *
245 * Inputs: modinfo, pointer to module information structure.
246 *
247 * Returns: Value returned by mod_info().
248 *
249 */
250 int
251 _info(struct modinfo *modinfop)
252 {
253 return (mod_info(&modlinkage, modinfop));
254 }
255
256 /*
257 * _fini Prepares a module for unloading. It is called when the system
258 * wants to unload a module. If the module determines that it can
259 * be unloaded, then _fini() returns the value returned by
260 * mod_remove(). Upon successful return from _fini() no other
261 * routine in the module will be called before _init() is called.
262 *
263 * Inputs: None.
264 *
265 * Returns: DDI_SUCCESS or DDI_FAILURE.
266 *
267 */
268 int
269 _fini(void)
270 {
271 int rval;
272
273 rval = mod_remove(&modlinkage);
274 if (rval == DDI_SUCCESS) {
275 ddi_soft_state_fini(&skd_state);
276 bd_mod_fini(&skd_dev_ops);
277 }
278
279 return (rval);
280 }
281
282 /*
283 * Solaris Register read/write routines
284 */
285
286 /*
287 *
288 * Name: skd_reg_write64, writes a 64-bit value to specified address
289 *
290 * Inputs: skdev - device state structure.
291 * val - 64-bit value to be written.
292 * offset - offset from PCI base address.
293 *
294 * Returns: Nothing.
295 *
296 */
297 /*
298 * Local vars are to keep lint silent. Any compiler worth its weight will
299 * optimize it all right out...
300 */
301 static inline void
302 skd_reg_write64(struct skd_device *skdev, uint64_t val, uint32_t offset)
303 {
304 uint64_t *addr;
305
306 ASSERT((offset & 0x7) == 0);
307 /* LINTED */
308 addr = (uint64_t *)(skdev->dev_iobase + offset);
309 ddi_put64(skdev->dev_handle, addr, val);
310 }
311
312 /*
313 *
314 * Name: skd_reg_read32, reads a 32-bit value to specified address
315 *
316 * Inputs: skdev - device state structure.
317 * offset - offset from PCI base address.
318 *
319 * Returns: val, 32-bit value read from specified PCI address.
320 *
321 */
322 static inline uint32_t
323 skd_reg_read32(struct skd_device *skdev, uint32_t offset)
324 {
325 uint32_t *addr;
326
327 ASSERT((offset & 0x3) == 0);
328 /* LINTED */
329 addr = (uint32_t *)(skdev->dev_iobase + offset);
330 return (ddi_get32(skdev->dev_handle, addr));
331 }
332
333 /*
334 *
335 * Name: skd_reg_write32, writes a 32-bit value to specified address
336 *
337 * Inputs: skdev - device state structure.
338 * val - value to be written.
339 * offset - offset from PCI base address.
340 *
341 * Returns: Nothing.
342 *
343 */
344 static inline void
345 skd_reg_write32(struct skd_device *skdev, uint32_t val, uint32_t offset)
346 {
347 uint32_t *addr;
348
349 ASSERT((offset & 0x3) == 0);
350 /* LINTED */
351 addr = (uint32_t *)(skdev->dev_iobase + offset);
352 ddi_put32(skdev->dev_handle, addr, val);
353 }
354
355
356 /*
357 * Solaris skd routines
358 */
359
360 /*
361 *
362 * Name: skd_name, generates the name of the driver.
363 *
364 * Inputs: skdev - device state structure
365 *
366 * Returns: char pointer to generated driver name.
367 *
368 */
369 static const char *
370 skd_name(struct skd_device *skdev)
371 {
372 (void) snprintf(skdev->id_str, sizeof (skdev->id_str), "%s:", DRV_NAME);
373
374 return (skdev->id_str);
375 }
376
377 /*
378 *
379 * Name: skd_pci_find_capability, searches the PCI capability
380 * list for the specified capability.
381 *
382 * Inputs: skdev - device state structure.
383 * cap - capability sought.
384 *
385 * Returns: Returns position where capability was found.
386 * If not found, returns zero.
387 *
388 */
389 static int
390 skd_pci_find_capability(struct skd_device *skdev, int cap)
391 {
392 uint16_t status;
393 uint8_t pos, id, hdr;
394 int ttl = 48;
395
396 status = pci_config_get16(skdev->pci_handle, PCI_CONF_STAT);
397
398 if (!(status & PCI_STAT_CAP))
399 return (0);
400
401 hdr = pci_config_get8(skdev->pci_handle, PCI_CONF_HEADER);
402
403 if ((hdr & PCI_HEADER_TYPE_M) != 0)
404 return (0);
405
406 pos = pci_config_get8(skdev->pci_handle, PCI_CONF_CAP_PTR);
407
408 while (ttl-- && pos >= 0x40) {
409 pos &= ~3;
410 id = pci_config_get8(skdev->pci_handle, pos+PCI_CAP_ID);
411 if (id == 0xff)
412 break;
413 if (id == cap)
414 return (pos);
415 pos = pci_config_get8(skdev->pci_handle, pos+PCI_CAP_NEXT_PTR);
416 }
417
418 return (0);
419 }
420
421 /*
422 *
423 * Name: skd_io_done, called to conclude an I/O operation.
424 *
425 * Inputs: skdev - device state structure.
426 * pbuf - I/O request
427 * error - contain error value.
428 * mode - debug only.
429 *
430 * Returns: Nothing.
431 *
432 */
433 static void
434 skd_io_done(skd_device_t *skdev, skd_buf_private_t *pbuf,
435 int error, int mode)
436 {
437 bd_xfer_t *xfer;
438
439 ASSERT(pbuf != NULL);
440
441 xfer = pbuf->x_xfer;
442
443 switch (mode) {
444 case SKD_IODONE_WIOC:
445 skdev->iodone_wioc++;
446 break;
447 case SKD_IODONE_WNIOC:
448 skdev->iodone_wnioc++;
449 break;
450 case SKD_IODONE_WDEBUG:
451 skdev->iodone_wdebug++;
452 break;
453 default:
454 skdev->iodone_unknown++;
455 }
456
457 if (error) {
458 skdev->ios_errors++;
459 cmn_err(CE_WARN,
460 "!%s:skd_io_done:ERR=%d %lld-%ld %s", skdev->name,
461 error, xfer->x_blkno, xfer->x_nblks,
462 (pbuf->dir & B_READ) ? "Read" : "Write");
463 }
464
465 kmem_free(pbuf, sizeof (skd_buf_private_t));
466
467 bd_xfer_done(xfer, error);
468 }
469
470 /*
471 * QUIESCE DEVICE
472 */
473
474 /*
475 *
476 * Name: skd_sys_quiesce_dev, quiets the device
477 *
478 * Inputs: dip - dev info strucuture
479 *
480 * Returns: Zero.
481 *
482 */
483 static int
484 skd_sys_quiesce_dev(dev_info_t *dip)
485 {
486 skd_device_t *skdev;
487
488 skdev = ddi_get_soft_state(skd_state, ddi_get_instance(dip));
489
490 /* make sure Dcmn_err() doesn't actually print anything */
491 skd_dbg_level = 0;
492
493 skd_disable_interrupts(skdev);
494 skd_soft_reset(skdev);
495
496 return (0);
497 }
498
499 /*
500 *
501 * Name: skd_quiesce_dev, quiets the device, but doesn't really do much.
502 *
503 * Inputs: skdev - Device state.
504 *
505 * Returns: -EINVAL if device is not in proper state otherwise
506 * returns zero.
507 *
508 */
509 static int
510 skd_quiesce_dev(skd_device_t *skdev)
511 {
512 int rc = 0;
513
514 if (skd_dbg_level)
515 Dcmn_err(CE_NOTE, "skd_quiece_dev:");
516
517 switch (skdev->state) {
518 case SKD_DRVR_STATE_BUSY:
519 case SKD_DRVR_STATE_BUSY_IMMINENT:
520 Dcmn_err(CE_NOTE, "%s: stopping queue", skdev->name);
521 break;
522 case SKD_DRVR_STATE_ONLINE:
523 case SKD_DRVR_STATE_STOPPING:
524 case SKD_DRVR_STATE_SYNCING:
525 case SKD_DRVR_STATE_PAUSING:
526 case SKD_DRVR_STATE_PAUSED:
527 case SKD_DRVR_STATE_STARTING:
528 case SKD_DRVR_STATE_RESTARTING:
529 case SKD_DRVR_STATE_RESUMING:
530 default:
531 rc = -EINVAL;
532 cmn_err(CE_NOTE, "state [%d] not implemented", skdev->state);
533 }
534
535 return (rc);
536 }
537
538 /*
539 * UNQUIESCE DEVICE:
540 * Note: Assumes lock is held to protect device state.
541 */
542 /*
543 *
544 * Name: skd_unquiesce_dev, awkens the device
545 *
546 * Inputs: skdev - Device state.
547 *
548 * Returns: -EINVAL if device is not in proper state otherwise
549 * returns zero.
550 *
551 */
552 static int
553 skd_unquiesce_dev(struct skd_device *skdev)
554 {
555 Dcmn_err(CE_NOTE, "skd_unquiece_dev:");
556
557 skd_log_skdev(skdev, "unquiesce");
558 if (skdev->state == SKD_DRVR_STATE_ONLINE) {
559 Dcmn_err(CE_NOTE, "**** device already ONLINE");
560
561 return (0);
562 }
563 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
564 /*
565 * If there has been an state change to other than
566 * ONLINE, we will rely on controller state change
567 * to come back online and restart the queue.
568 * The BUSY state means that driver is ready to
569 * continue normal processing but waiting for controller
570 * to become available.
571 */
572 skdev->state = SKD_DRVR_STATE_BUSY;
573 Dcmn_err(CE_NOTE, "drive BUSY state\n");
574
575 return (0);
576 }
577 /*
578 * Drive just come online, driver is either in startup,
579 * paused performing a task, or bust waiting for hardware.
580 */
581 switch (skdev->state) {
582 case SKD_DRVR_STATE_PAUSED:
583 case SKD_DRVR_STATE_BUSY:
584 case SKD_DRVR_STATE_BUSY_IMMINENT:
585 case SKD_DRVR_STATE_BUSY_ERASE:
586 case SKD_DRVR_STATE_STARTING:
587 case SKD_DRVR_STATE_RESTARTING:
588 case SKD_DRVR_STATE_FAULT:
589 case SKD_DRVR_STATE_IDLE:
590 case SKD_DRVR_STATE_LOAD:
591 skdev->state = SKD_DRVR_STATE_ONLINE;
592 Dcmn_err(CE_NOTE, "%s: sTec s1120 ONLINE", skdev->name);
593 Dcmn_err(CE_NOTE, "%s: Starting request queue", skdev->name);
594 Dcmn_err(CE_NOTE,
595 "%s: queue depth limit=%d hard=%d soft=%d lowat=%d",
596 skdev->name,
597 skdev->queue_depth_limit,
598 skdev->hard_queue_depth_limit,
599 skdev->soft_queue_depth_limit,
600 skdev->queue_depth_lowat);
601
602 skdev->gendisk_on = 1;
603 cv_signal(&skdev->cv_waitq);
604 break;
605 case SKD_DRVR_STATE_DISAPPEARED:
606 default:
607 cmn_err(CE_NOTE, "**** driver state %d, not implemented \n",
608 skdev->state);
609 return (-EBUSY);
610 }
611
612 return (0);
613 }
614
615 /*
616 * READ/WRITE REQUESTS
617 */
618
619 /*
620 *
621 * Name: skd_blkdev_preop_sg_list, builds the S/G list from info
622 * passed in by the blkdev driver.
623 *
624 * Inputs: skdev - device state structure.
625 * skreq - request structure.
626 * sg_byte_count - data transfer byte count.
627 *
628 * Returns: Nothing.
629 *
630 */
631 /*ARGSUSED*/
632 static void
633 skd_blkdev_preop_sg_list(struct skd_device *skdev,
634 struct skd_request_context *skreq, uint32_t *sg_byte_count)
635 {
636 bd_xfer_t *xfer;
637 skd_buf_private_t *pbuf;
638 int i, bcount = 0;
639 uint_t n_sg;
640
641 *sg_byte_count = 0;
642
643 ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
644 skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST);
645
646 pbuf = skreq->pbuf;
647 ASSERT(pbuf != NULL);
648
649 xfer = pbuf->x_xfer;
650 n_sg = xfer->x_ndmac;
651
652 ASSERT(n_sg <= skdev->sgs_per_request);
653
654 skreq->n_sg = n_sg;
655
656 skreq->io_dma_handle = xfer->x_dmah;
657
658 skreq->total_sg_bcount = 0;
659
660 for (i = 0; i < n_sg; i++) {
661 ddi_dma_cookie_t *cookiep = &xfer->x_dmac;
662 struct fit_sg_descriptor *sgd;
663 uint32_t cnt = (uint32_t)cookiep->dmac_size;
664
665 bcount += cnt;
666
667 sgd = &skreq->sksg_list[i];
668 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
669 sgd->byte_count = cnt;
670 sgd->host_side_addr = cookiep->dmac_laddress;
671 sgd->dev_side_addr = 0; /* not used */
672 *sg_byte_count += cnt;
673
674 skreq->total_sg_bcount += cnt;
675
676 if ((i + 1) != n_sg)
677 ddi_dma_nextcookie(skreq->io_dma_handle, &xfer->x_dmac);
678 }
679
680 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
681 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
682
683 (void) ddi_dma_sync(skreq->sksg_dma_address.dma_handle, 0, 0,
684 DDI_DMA_SYNC_FORDEV);
685 }
686
687 /*
688 *
689 * Name: skd_blkdev_postop_sg_list, deallocates DMA
690 *
691 * Inputs: skdev - device state structure.
692 * skreq - skreq data structure.
693 *
694 * Returns: Nothing.
695 *
696 */
697 /* ARGSUSED */ /* Upstream common source with other platforms. */
698 static void
699 skd_blkdev_postop_sg_list(struct skd_device *skdev,
700 struct skd_request_context *skreq)
701 {
702 /*
703 * restore the next ptr for next IO request so we
704 * don't have to set it every time.
705 */
706 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
707 skreq->sksg_dma_address.cookies->dmac_laddress +
708 ((skreq->n_sg) * sizeof (struct fit_sg_descriptor));
709 }
710
711 /*
712 *
713 * Name: skd_start, initiates an I/O.
714 *
715 * Inputs: skdev - device state structure.
716 *
717 * Returns: EAGAIN if devicfe is not ONLINE.
718 * On error, if the caller is the blkdev driver, return
719 * the error value. Otherwise, return zero.
720 *
721 */
722 /* Upstream common source with other platforms. */
723 static void
724 skd_start(skd_device_t *skdev)
725 {
726 struct skd_fitmsg_context *skmsg = NULL;
727 struct fit_msg_hdr *fmh = NULL;
728 struct skd_request_context *skreq = NULL;
729 struct waitqueue *waitq = &skdev->waitqueue;
730 struct skd_scsi_request *scsi_req;
731 skd_buf_private_t *pbuf = NULL;
732 int bcount;
733
734 uint32_t lba;
735 uint32_t count;
736 uint32_t timo_slot;
737 void *cmd_ptr;
738 uint32_t sg_byte_count = 0;
739
740 /*
741 * Stop conditions:
742 * - There are no more native requests
743 * - There are already the maximum number of requests is progress
744 * - There are no more skd_request_context entries
745 * - There are no more FIT msg buffers
746 */
747 for (;;) {
748 /* Are too many requests already in progress? */
749 if (skdev->queue_depth_busy >= skdev->queue_depth_limit) {
750 Dcmn_err(CE_NOTE, "qdepth %d, limit %d\n",
751 skdev->queue_depth_busy,
752 skdev->queue_depth_limit);
753 break;
754 }
755
756 WAITQ_LOCK(skdev);
757 if (SIMPLEQ_EMPTY(waitq)) {
758 WAITQ_UNLOCK(skdev);
759 break;
760 }
761
762 /* Is a skd_request_context available? */
763 skreq = skdev->skreq_free_list;
764 if (skreq == NULL) {
765 WAITQ_UNLOCK(skdev);
766 break;
767 }
768
769 ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
770 ASSERT((skreq->id & SKD_ID_INCR) == 0);
771
772 skdev->skreq_free_list = skreq->next;
773
774 skreq->state = SKD_REQ_STATE_BUSY;
775 skreq->id += SKD_ID_INCR;
776
777 /* Start a new FIT msg if there is none in progress. */
778 if (skmsg == NULL) {
779 /* Are there any FIT msg buffers available? */
780 skmsg = skdev->skmsg_free_list;
781 if (skmsg == NULL) {
782 WAITQ_UNLOCK(skdev);
783 break;
784 }
785
786 ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
787 ASSERT((skmsg->id & SKD_ID_INCR) == 0);
788
789 skdev->skmsg_free_list = skmsg->next;
790
791 skmsg->state = SKD_MSG_STATE_BUSY;
792 skmsg->id += SKD_ID_INCR;
793
794 /* Initialize the FIT msg header */
795 fmh = (struct fit_msg_hdr *)skmsg->msg_buf64;
796 bzero(fmh, sizeof (*fmh)); /* Too expensive */
797 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
798 skmsg->length = sizeof (struct fit_msg_hdr);
799 }
800
801 /*
802 * At this point we are committed to either start or reject
803 * the native request. Note that a FIT msg may have just been
804 * started but contains no SoFIT requests yet.
805 * Now - dequeue pbuf.
806 */
807 pbuf = skd_get_queued_pbuf(skdev);
808 WAITQ_UNLOCK(skdev);
809
810 skreq->pbuf = pbuf;
811 lba = pbuf->x_xfer->x_blkno;
812 count = pbuf->x_xfer->x_nblks;
813 skreq->did_complete = 0;
814
815 skreq->fitmsg_id = skmsg->id;
816
817 Dcmn_err(CE_NOTE,
818 "pbuf=%p lba=%u(0x%x) count=%u(0x%x) dir=%x\n",
819 (void *)pbuf, lba, lba, count, count, pbuf->dir);
820
821 /*
822 * Transcode the request.
823 */
824 cmd_ptr = &skmsg->msg_buf[skmsg->length];
825 bzero(cmd_ptr, 32); /* This is too expensive */
826
827 scsi_req = cmd_ptr;
828 scsi_req->hdr.tag = skreq->id;
829 scsi_req->hdr.sg_list_dma_address =
830 cpu_to_be64(skreq->sksg_dma_address.cookies->dmac_laddress);
831 scsi_req->cdb[1] = 0;
832 scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
833 scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
834 scsi_req->cdb[4] = (lba & 0xff00) >> 8;
835 scsi_req->cdb[5] = (lba & 0xff);
836 scsi_req->cdb[6] = 0;
837 scsi_req->cdb[7] = (count & 0xff00) >> 8;
838 scsi_req->cdb[8] = count & 0xff;
839 scsi_req->cdb[9] = 0;
840
841 if (pbuf->dir & B_READ) {
842 scsi_req->cdb[0] = 0x28;
843 skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
844 } else {
845 scsi_req->cdb[0] = 0x2a;
846 skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
847 }
848
849 skd_blkdev_preop_sg_list(skdev, skreq, &sg_byte_count);
850
851 scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(sg_byte_count);
852
853 bcount = (sg_byte_count + 511) / 512;
854 scsi_req->cdb[7] = (bcount & 0xff00) >> 8;
855 scsi_req->cdb[8] = bcount & 0xff;
856
857 Dcmn_err(CE_NOTE,
858 "skd_start: pbuf=%p skreq->id=%x opc=%x ====>>>>>",
859 (void *)pbuf, skreq->id, *scsi_req->cdb);
860
861 skmsg->length += sizeof (struct skd_scsi_request);
862 fmh->num_protocol_cmds_coalesced++;
863
864 /*
865 * Update the active request counts.
866 * Capture the timeout timestamp.
867 */
868 skreq->timeout_stamp = skdev->timeout_stamp;
869 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
870
871 atomic_inc_32(&skdev->timeout_slot[timo_slot]);
872 atomic_inc_32(&skdev->queue_depth_busy);
873
874 Dcmn_err(CE_NOTE, "req=0x%x busy=%d timo_slot=%d",
875 skreq->id, skdev->queue_depth_busy, timo_slot);
876 /*
877 * If the FIT msg buffer is full send it.
878 */
879 if (skmsg->length >= SKD_N_FITMSG_BYTES ||
880 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
881
882 atomic_inc_64(&skdev->active_cmds);
883 pbuf->skreq = skreq;
884
885 skdev->fitmsg_sent1++;
886 skd_send_fitmsg(skdev, skmsg);
887
888 skmsg = NULL;
889 fmh = NULL;
890 }
891 }
892
893 /*
894 * Is a FIT msg in progress? If it is empty put the buffer back
895 * on the free list. If it is non-empty send what we got.
896 * This minimizes latency when there are fewer requests than
897 * what fits in a FIT msg.
898 */
899 if (skmsg != NULL) {
900 ASSERT(skmsg->length > sizeof (struct fit_msg_hdr));
901 Dcmn_err(CE_NOTE, "sending msg=%p, len %d",
902 (void *)skmsg, skmsg->length);
903
904 skdev->active_cmds++;
905
906 skdev->fitmsg_sent2++;
907 skd_send_fitmsg(skdev, skmsg);
908 }
909 }
910
911 /*
912 *
913 * Name: skd_end_request
914 *
915 * Inputs: skdev - device state structure.
916 * skreq - request structure.
917 * error - I/O error value.
918 *
919 * Returns: Nothing.
920 *
921 */
922 static void
923 skd_end_request(struct skd_device *skdev,
924 struct skd_request_context *skreq, int error)
925 {
926 skdev->ios_completed++;
927 skd_io_done(skdev, skreq->pbuf, error, SKD_IODONE_WIOC);
928 skreq->pbuf = NULL;
929 skreq->did_complete = 1;
930 }
931
932 /*
933 *
934 * Name: skd_end_request_abnormal
935 *
936 * Inputs: skdev - device state structure.
937 * pbuf - I/O request.
938 * error - I/O error value.
939 * mode - debug
940 *
941 * Returns: Nothing.
942 *
943 */
944 static void
945 skd_end_request_abnormal(skd_device_t *skdev, skd_buf_private_t *pbuf,
946 int error, int mode)
947 {
948 skd_io_done(skdev, pbuf, error, mode);
949 }
950
951 /*
952 *
953 * Name: skd_request_fn_not_online, handles the condition
954 * of the device not being online.
955 *
956 * Inputs: skdev - device state structure.
957 *
958 * Returns: nothing (void).
959 *
960 */
961 static void
962 skd_request_fn_not_online(skd_device_t *skdev)
963 {
964 int error;
965 skd_buf_private_t *pbuf;
966
967 ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
968
969 skd_log_skdev(skdev, "req_not_online");
970
971 switch (skdev->state) {
972 case SKD_DRVR_STATE_PAUSING:
973 case SKD_DRVR_STATE_PAUSED:
974 case SKD_DRVR_STATE_STARTING:
975 case SKD_DRVR_STATE_RESTARTING:
976 case SKD_DRVR_STATE_WAIT_BOOT:
977 /*
978 * In case of starting, we haven't started the queue,
979 * so we can't get here... but requests are
980 * possibly hanging out waiting for us because we
981 * reported the dev/skd/0 already. They'll wait
982 * forever if connect doesn't complete.
983 * What to do??? delay dev/skd/0 ??
984 */
985 case SKD_DRVR_STATE_BUSY:
986 case SKD_DRVR_STATE_BUSY_IMMINENT:
987 case SKD_DRVR_STATE_BUSY_ERASE:
988 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
989 return;
990
991 case SKD_DRVR_STATE_BUSY_SANITIZE:
992 case SKD_DRVR_STATE_STOPPING:
993 case SKD_DRVR_STATE_SYNCING:
994 case SKD_DRVR_STATE_FAULT:
995 case SKD_DRVR_STATE_DISAPPEARED:
996 default:
997 error = -EIO;
998 break;
999 }
1000
1001 /*
1002 * If we get here, terminate all pending block requeusts
1003 * with EIO and any scsi pass thru with appropriate sense
1004 */
1005 ASSERT(WAITQ_LOCK_HELD(skdev));
1006 if (SIMPLEQ_EMPTY(&skdev->waitqueue))
1007 return;
1008
1009 while ((pbuf = skd_get_queued_pbuf(skdev)))
1010 skd_end_request_abnormal(skdev, pbuf, error, SKD_IODONE_WNIOC);
1011
1012 cv_signal(&skdev->cv_waitq);
1013 }
1014
1015 /*
1016 * TIMER
1017 */
1018
1019 static void skd_timer_tick_not_online(struct skd_device *skdev);
1020
1021 /*
1022 *
1023 * Name: skd_timer_tick, monitors requests for timeouts.
1024 *
1025 * Inputs: skdev - device state structure.
1026 *
1027 * Returns: Nothing.
1028 *
1029 */
1030 static void
1031 skd_timer_tick(skd_device_t *skdev)
1032 {
1033 uint32_t timo_slot;
1034
1035 skdev->timer_active = 1;
1036
1037 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
1038 skd_timer_tick_not_online(skdev);
1039 goto timer_func_out;
1040 }
1041
1042 skdev->timeout_stamp++;
1043 timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
1044
1045 /*
1046 * All requests that happened during the previous use of
1047 * this slot should be done by now. The previous use was
1048 * over 7 seconds ago.
1049 */
1050 if (skdev->timeout_slot[timo_slot] == 0) {
1051 goto timer_func_out;
1052 }
1053
1054 /* Something is overdue */
1055 Dcmn_err(CE_NOTE, "found %d timeouts, draining busy=%d",
1056 skdev->timeout_slot[timo_slot],
1057 skdev->queue_depth_busy);
1058 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
1059 skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
1060 skdev->timo_slot = timo_slot;
1061
1062 timer_func_out:
1063 skdev->timer_active = 0;
1064 }
1065
1066 /*
1067 *
1068 * Name: skd_timer_tick_not_online, handles various device
1069 * state transitions.
1070 *
1071 * Inputs: skdev - device state structure.
1072 *
1073 * Returns: Nothing.
1074 *
1075 */
1076 static void
1077 skd_timer_tick_not_online(struct skd_device *skdev)
1078 {
1079 Dcmn_err(CE_NOTE, "skd_skd_timer_tick_not_online: state=%d tmo=%d",
1080 skdev->state, skdev->timer_countdown);
1081
1082 ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
1083
1084 switch (skdev->state) {
1085 case SKD_DRVR_STATE_IDLE:
1086 case SKD_DRVR_STATE_LOAD:
1087 break;
1088 case SKD_DRVR_STATE_BUSY_SANITIZE:
1089 cmn_err(CE_WARN, "!drive busy sanitize[%x], driver[%x]\n",
1090 skdev->drive_state, skdev->state);
1091 break;
1092
1093 case SKD_DRVR_STATE_BUSY:
1094 case SKD_DRVR_STATE_BUSY_IMMINENT:
1095 case SKD_DRVR_STATE_BUSY_ERASE:
1096 Dcmn_err(CE_NOTE, "busy[%x], countdown=%d\n",
1097 skdev->state, skdev->timer_countdown);
1098 if (skdev->timer_countdown > 0) {
1099 skdev->timer_countdown--;
1100 return;
1101 }
1102 cmn_err(CE_WARN, "!busy[%x], timedout=%d, restarting device.",
1103 skdev->state, skdev->timer_countdown);
1104 skd_restart_device(skdev);
1105 break;
1106
1107 case SKD_DRVR_STATE_WAIT_BOOT:
1108 case SKD_DRVR_STATE_STARTING:
1109 if (skdev->timer_countdown > 0) {
1110 skdev->timer_countdown--;
1111 return;
1112 }
1113 /*
1114 * For now, we fault the drive. Could attempt resets to
1115 * revcover at some point.
1116 */
1117 skdev->state = SKD_DRVR_STATE_FAULT;
1118
1119 cmn_err(CE_WARN, "!(%s): DriveFault Connect Timeout (%x)",
1120 skd_name(skdev), skdev->drive_state);
1121
1122 /* start the queue so we can respond with error to requests */
1123 skd_start(skdev);
1124
1125 /* wakeup anyone waiting for startup complete */
1126 skdev->gendisk_on = -1;
1127
1128 cv_signal(&skdev->cv_waitq);
1129 break;
1130
1131
1132 case SKD_DRVR_STATE_PAUSING:
1133 case SKD_DRVR_STATE_PAUSED:
1134 break;
1135
1136 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
1137 cmn_err(CE_WARN,
1138 "!%s: draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1139 skdev->name,
1140 skdev->timo_slot,
1141 skdev->timer_countdown,
1142 skdev->queue_depth_busy,
1143 skdev->timeout_slot[skdev->timo_slot]);
1144 /* if the slot has cleared we can let the I/O continue */
1145 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
1146 Dcmn_err(CE_NOTE, "Slot drained, starting queue.");
1147 skdev->state = SKD_DRVR_STATE_ONLINE;
1148 skd_start(skdev);
1149 return;
1150 }
1151 if (skdev->timer_countdown > 0) {
1152 skdev->timer_countdown--;
1153 return;
1154 }
1155 skd_restart_device(skdev);
1156 break;
1157
1158 case SKD_DRVR_STATE_RESTARTING:
1159 if (skdev->timer_countdown > 0) {
1160 skdev->timer_countdown--;
1161
1162 return;
1163 }
1164 /*
1165 * For now, we fault the drive. Could attempt resets to
1166 * revcover at some point.
1167 */
1168 skdev->state = SKD_DRVR_STATE_FAULT;
1169 cmn_err(CE_WARN, "!(%s): DriveFault Reconnect Timeout (%x)\n",
1170 skd_name(skdev), skdev->drive_state);
1171
1172 /*
1173 * Recovering does two things:
1174 * 1. completes IO with error
1175 * 2. reclaims dma resources
1176 * When is it safe to recover requests?
1177 * - if the drive state is faulted
1178 * - if the state is still soft reset after out timeout
1179 * - if the drive registers are dead (state = FF)
1180 */
1181
1182 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1183 (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1184 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK)) {
1185 /*
1186 * It never came out of soft reset. Try to
1187 * recover the requests and then let them
1188 * fail. This is to mitigate hung processes.
1189 *
1190 * Acquire the interrupt lock since these lists are
1191 * manipulated by interrupt handlers.
1192 */
1193 ASSERT(!WAITQ_LOCK_HELD(skdev));
1194 INTR_LOCK(skdev);
1195 skd_recover_requests(skdev);
1196 INTR_UNLOCK(skdev);
1197 }
1198 /* start the queue so we can respond with error to requests */
1199 skd_start(skdev);
1200 /* wakeup anyone waiting for startup complete */
1201 skdev->gendisk_on = -1;
1202 cv_signal(&skdev->cv_waitq);
1203 break;
1204
1205 case SKD_DRVR_STATE_RESUMING:
1206 case SKD_DRVR_STATE_STOPPING:
1207 case SKD_DRVR_STATE_SYNCING:
1208 case SKD_DRVR_STATE_FAULT:
1209 case SKD_DRVR_STATE_DISAPPEARED:
1210 default:
1211 break;
1212 }
1213 }
1214
1215 /*
1216 *
1217 * Name: skd_timer, kicks off the timer processing.
1218 *
1219 * Inputs: skdev - device state structure.
1220 *
1221 * Returns: Nothing.
1222 *
1223 */
1224 static void
1225 skd_timer(void *arg)
1226 {
1227 skd_device_t *skdev = (skd_device_t *)arg;
1228
1229 /* Someone set us to 0, don't bother rescheduling. */
1230 ADAPTER_STATE_LOCK(skdev);
1231 if (skdev->skd_timer_timeout_id != 0) {
1232 ADAPTER_STATE_UNLOCK(skdev);
1233 /* Pardon the drop-and-then-acquire logic here. */
1234 skd_timer_tick(skdev);
1235 ADAPTER_STATE_LOCK(skdev);
1236 /* Restart timer, if not being stopped. */
1237 if (skdev->skd_timer_timeout_id != 0) {
1238 skdev->skd_timer_timeout_id =
1239 timeout(skd_timer, arg, skd_timer_ticks);
1240 }
1241 }
1242 ADAPTER_STATE_UNLOCK(skdev);
1243 }
1244
1245 /*
1246 *
1247 * Name: skd_start_timer, kicks off the 1-second timer.
1248 *
1249 * Inputs: skdev - device state structure.
1250 *
1251 * Returns: Zero.
1252 *
1253 */
1254 static void
1255 skd_start_timer(struct skd_device *skdev)
1256 {
1257 /* Start one second driver timer. */
1258 ADAPTER_STATE_LOCK(skdev);
1259 ASSERT(skdev->skd_timer_timeout_id == 0);
1260
1261 /*
1262 * Do first "timeout tick" right away, but not in this
1263 * thread.
1264 */
1265 skdev->skd_timer_timeout_id = timeout(skd_timer, skdev, 1);
1266 ADAPTER_STATE_UNLOCK(skdev);
1267 }
1268
1269 /*
1270 * INTERNAL REQUESTS -- generated by driver itself
1271 */
1272
1273 /*
1274 *
1275 * Name: skd_format_internal_skspcl, setups the internal
1276 * FIT request message.
1277 *
1278 * Inputs: skdev - device state structure.
1279 *
1280 * Returns: One.
1281 *
1282 */
1283 static int
1284 skd_format_internal_skspcl(struct skd_device *skdev)
1285 {
1286 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1287 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1288 struct fit_msg_hdr *fmh;
1289 uint64_t dma_address;
1290 struct skd_scsi_request *scsi;
1291
1292 fmh = (struct fit_msg_hdr *)&skspcl->msg_buf64[0];
1293 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1294 fmh->num_protocol_cmds_coalesced = 1;
1295
1296 /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */
1297 scsi = (struct skd_scsi_request *)&skspcl->msg_buf64[8];
1298 bzero(scsi, sizeof (*scsi));
1299 dma_address = skspcl->req.sksg_dma_address.cookies->_dmu._dmac_ll;
1300 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
1301 sgd->control = FIT_SGD_CONTROL_LAST;
1302 sgd->byte_count = 0;
1303 sgd->host_side_addr = skspcl->db_dma_address.cookies->_dmu._dmac_ll;
1304 sgd->dev_side_addr = 0; /* not used */
1305 sgd->next_desc_ptr = 0LL;
1306
1307 return (1);
1308 }
1309
1310 /*
1311 *
1312 * Name: skd_send_internal_skspcl, send internal requests to
1313 * the hardware.
1314 *
1315 * Inputs: skdev - device state structure.
1316 * skspcl - request structure
1317 * opcode - just what it says
1318 *
1319 * Returns: Nothing.
1320 *
1321 */
1322 void
1323 skd_send_internal_skspcl(struct skd_device *skdev,
1324 struct skd_special_context *skspcl, uint8_t opcode)
1325 {
1326 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1327 struct skd_scsi_request *scsi;
1328
1329 if (SKD_REQ_STATE_IDLE != skspcl->req.state) {
1330 /*
1331 * A refresh is already in progress.
1332 * Just wait for it to finish.
1333 */
1334 return;
1335 }
1336
1337 ASSERT(0 == (skspcl->req.id & SKD_ID_INCR));
1338 skspcl->req.state = SKD_REQ_STATE_BUSY;
1339 skspcl->req.id += SKD_ID_INCR;
1340
1341 /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */
1342 scsi = (struct skd_scsi_request *)&skspcl->msg_buf64[8];
1343 scsi->hdr.tag = skspcl->req.id;
1344
1345 Dcmn_err(CE_NOTE, "internal skspcl: opcode=%x req.id=%x ==========>",
1346 opcode, skspcl->req.id);
1347
1348 switch (opcode) {
1349 case TEST_UNIT_READY:
1350 scsi->cdb[0] = TEST_UNIT_READY;
1351 scsi->cdb[1] = 0x00;
1352 scsi->cdb[2] = 0x00;
1353 scsi->cdb[3] = 0x00;
1354 scsi->cdb[4] = 0x00;
1355 scsi->cdb[5] = 0x00;
1356 sgd->byte_count = 0;
1357 scsi->hdr.sg_list_len_bytes = 0;
1358 break;
1359 case READ_CAPACITY_EXT:
1360 scsi->cdb[0] = READ_CAPACITY_EXT;
1361 scsi->cdb[1] = 0x10;
1362 scsi->cdb[2] = 0x00;
1363 scsi->cdb[3] = 0x00;
1364 scsi->cdb[4] = 0x00;
1365 scsi->cdb[5] = 0x00;
1366 scsi->cdb[6] = 0x00;
1367 scsi->cdb[7] = 0x00;
1368 scsi->cdb[8] = 0x00;
1369 scsi->cdb[9] = 0x00;
1370 scsi->cdb[10] = 0x00;
1371 scsi->cdb[11] = 0x00;
1372 scsi->cdb[12] = 0x00;
1373 scsi->cdb[13] = 0x20;
1374 scsi->cdb[14] = 0x00;
1375 scsi->cdb[15] = 0x00;
1376 sgd->byte_count = SKD_N_READ_CAP_EXT_BYTES;
1377 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1378 break;
1379 case 0x28:
1380 (void) memset(skspcl->data_buf, 0x65, SKD_N_INTERNAL_BYTES);
1381
1382 scsi->cdb[0] = 0x28;
1383 scsi->cdb[1] = 0x00;
1384 scsi->cdb[2] = 0x00;
1385 scsi->cdb[3] = 0x00;
1386 scsi->cdb[4] = 0x00;
1387 scsi->cdb[5] = 0x00;
1388 scsi->cdb[6] = 0x00;
1389 scsi->cdb[7] = 0x00;
1390 scsi->cdb[8] = 0x01;
1391 scsi->cdb[9] = 0x00;
1392 sgd->byte_count = SKD_N_INTERNAL_BYTES;
1393 scsi->hdr.sg_list_len_bytes = cpu_to_be32(SKD_N_INTERNAL_BYTES);
1394 break;
1395 case INQUIRY:
1396 scsi->cdb[0] = INQUIRY;
1397 scsi->cdb[1] = 0x01; /* evpd */
1398 scsi->cdb[2] = 0x80; /* serial number page */
1399 scsi->cdb[3] = 0x00;
1400 scsi->cdb[4] = 0x10;
1401 scsi->cdb[5] = 0x00;
1402 sgd->byte_count = 16; /* SKD_N_INQ_BYTES */;
1403 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1404 break;
1405 case INQUIRY2:
1406 scsi->cdb[0] = INQUIRY;
1407 scsi->cdb[1] = 0x00;
1408 scsi->cdb[2] = 0x00; /* serial number page */
1409 scsi->cdb[3] = 0x00;
1410 scsi->cdb[4] = 0x24;
1411 scsi->cdb[5] = 0x00;
1412 sgd->byte_count = 36; /* SKD_N_INQ_BYTES */;
1413 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1414 break;
1415 case SYNCHRONIZE_CACHE:
1416 scsi->cdb[0] = SYNCHRONIZE_CACHE;
1417 scsi->cdb[1] = 0x00;
1418 scsi->cdb[2] = 0x00;
1419 scsi->cdb[3] = 0x00;
1420 scsi->cdb[4] = 0x00;
1421 scsi->cdb[5] = 0x00;
1422 scsi->cdb[6] = 0x00;
1423 scsi->cdb[7] = 0x00;
1424 scsi->cdb[8] = 0x00;
1425 scsi->cdb[9] = 0x00;
1426 sgd->byte_count = 0;
1427 scsi->hdr.sg_list_len_bytes = 0;
1428 break;
1429 default:
1430 ASSERT("Don't know what to send");
1431 return;
1432
1433 }
1434
1435 skd_send_special_fitmsg(skdev, skspcl);
1436 }
1437
1438 /*
1439 *
1440 * Name: skd_refresh_device_data, sends a TUR command.
1441 *
1442 * Inputs: skdev - device state structure.
1443 *
1444 * Returns: Nothing.
1445 *
1446 */
1447 static void
1448 skd_refresh_device_data(struct skd_device *skdev)
1449 {
1450 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1451
1452 Dcmn_err(CE_NOTE, "refresh_device_data: state=%d", skdev->state);
1453
1454 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1455 }
1456
1457 /*
1458 *
1459 * Name: skd_complete_internal, handles the completion of
1460 * driver-initiated I/O requests.
1461 *
1462 * Inputs: skdev - device state structure.
1463 * skcomp - completion structure.
1464 * skerr - error structure.
1465 * skspcl - request structure.
1466 *
1467 * Returns: Nothing.
1468 *
1469 */
1470 /* ARGSUSED */ /* Upstream common source with other platforms. */
1471 static void
1472 skd_complete_internal(struct skd_device *skdev,
1473 volatile struct fit_completion_entry_v1 *skcomp,
1474 volatile struct fit_comp_error_info *skerr,
1475 struct skd_special_context *skspcl)
1476 {
1477 uint8_t *buf = skspcl->data_buf;
1478 uint8_t status = 2;
1479 /* Instead of 64-bytes in, use 8-(64-bit-words) for linted alignment. */
1480 struct skd_scsi_request *scsi =
1481 (struct skd_scsi_request *)&skspcl->msg_buf64[8];
1482
1483 ASSERT(skspcl == &skdev->internal_skspcl);
1484
1485 (void) ddi_dma_sync(skspcl->db_dma_address.dma_handle, 0, 0,
1486 DDI_DMA_SYNC_FORKERNEL);
1487 (void) ddi_dma_sync(skspcl->mb_dma_address.dma_handle, 0, 0,
1488 DDI_DMA_SYNC_FORKERNEL);
1489
1490 Dcmn_err(CE_NOTE, "complete internal %x", scsi->cdb[0]);
1491
1492 skspcl->req.completion = *skcomp;
1493 skspcl->req.state = SKD_REQ_STATE_IDLE;
1494 skspcl->req.id += SKD_ID_INCR;
1495
1496 status = skspcl->req.completion.status;
1497
1498 Dcmn_err(CE_NOTE, "<<<<====== complete_internal: opc=%x", *scsi->cdb);
1499
1500 switch (scsi->cdb[0]) {
1501 case TEST_UNIT_READY:
1502 if (SAM_STAT_GOOD == status) {
1503 skd_send_internal_skspcl(skdev, skspcl,
1504 READ_CAPACITY_EXT);
1505 } else {
1506 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
1507 cmn_err(CE_WARN,
1508 "!%s: TUR failed, don't send anymore"
1509 "state 0x%x", skdev->name, skdev->state);
1510
1511 return;
1512 }
1513
1514 Dcmn_err(CE_NOTE, "%s: TUR failed, retry skerr",
1515 skdev->name);
1516 skd_send_internal_skspcl(skdev, skspcl, 0x00);
1517 }
1518 break;
1519 case READ_CAPACITY_EXT: {
1520 uint64_t cap, Nblocks;
1521 uint64_t xbuf[1];
1522
1523 skdev->read_cap_is_valid = 0;
1524 if (SAM_STAT_GOOD == status) {
1525 bcopy(buf, xbuf, 8);
1526 cap = be64_to_cpu(*xbuf);
1527 skdev->read_cap_last_lba = cap;
1528 skdev->read_cap_blocksize =
1529 (buf[8] << 24) | (buf[9] << 16) |
1530 (buf[10] << 8) | buf[11];
1531
1532 cap *= skdev->read_cap_blocksize;
1533 Dcmn_err(CE_NOTE, " Last LBA: %" PRIu64 " (0x%" PRIx64
1534 "), blk sz: %d, Capacity: %" PRIu64 "GB\n",
1535 skdev->read_cap_last_lba,
1536 skdev->read_cap_last_lba,
1537 skdev->read_cap_blocksize,
1538 cap >> 30ULL);
1539
1540 Nblocks = skdev->read_cap_last_lba + 1;
1541
1542 skdev->Nblocks = Nblocks;
1543 skdev->read_cap_is_valid = 1;
1544
1545 skd_send_internal_skspcl(skdev, skspcl, INQUIRY2);
1546
1547 } else {
1548 Dcmn_err(CE_NOTE, "**** READCAP failed, retry TUR");
1549 skd_send_internal_skspcl(skdev, skspcl,
1550 TEST_UNIT_READY);
1551 }
1552 break;
1553 }
1554 case INQUIRY:
1555 skdev->inquiry_is_valid = 0;
1556 if (SAM_STAT_GOOD == status) {
1557 skdev->inquiry_is_valid = 1;
1558
1559 if (scsi->cdb[1] == 0x1) {
1560 bcopy(&buf[4], skdev->inq_serial_num, 12);
1561 skdev->inq_serial_num[12] = '\0';
1562 } else {
1563 char *tmp = skdev->inq_vendor_id;
1564
1565 bcopy(&buf[8], tmp, 8);
1566 tmp[8] = '\0';
1567
1568 tmp = skdev->inq_product_id;
1569 bcopy(&buf[16], tmp, 16);
1570 tmp[16] = '\0';
1571
1572 tmp = skdev->inq_product_rev;
1573 bcopy(&buf[32], tmp, 4);
1574 tmp[4] = '\0';
1575 }
1576 }
1577
1578 if (skdev->state != SKD_DRVR_STATE_ONLINE)
1579 if (skd_unquiesce_dev(skdev) < 0)
1580 cmn_err(CE_NOTE, "** failed, to ONLINE device");
1581 break;
1582 case SYNCHRONIZE_CACHE:
1583 skdev->sync_done = (SAM_STAT_GOOD == status) ? 1 : -1;
1584
1585 cv_signal(&skdev->cv_waitq);
1586 break;
1587
1588 default:
1589 ASSERT("we didn't send this");
1590 }
1591 }
1592
1593 /*
1594 * FIT MESSAGES
1595 */
1596
1597 /*
1598 *
1599 * Name: skd_send_fitmsg, send a FIT message to the hardware.
1600 *
1601 * Inputs: skdev - device state structure.
1602 * skmsg - FIT message structure.
1603 *
1604 * Returns: Nothing.
1605 *
1606 */
1607 /* ARGSUSED */ /* Upstream common source with other platforms. */
1608 static void
1609 skd_send_fitmsg(struct skd_device *skdev,
1610 struct skd_fitmsg_context *skmsg)
1611 {
1612 uint64_t qcmd;
1613 struct fit_msg_hdr *fmh;
1614
1615 Dcmn_err(CE_NOTE, "msgbuf's DMA addr: 0x%" PRIx64 ", qdepth_busy=%d",
1616 skmsg->mb_dma_address.cookies->dmac_laddress,
1617 skdev->queue_depth_busy);
1618
1619 Dcmn_err(CE_NOTE, "msg_buf 0x%p, offset %x", (void *)skmsg->msg_buf,
1620 skmsg->offset);
1621
1622 qcmd = skmsg->mb_dma_address.cookies->dmac_laddress;
1623 qcmd |= FIT_QCMD_QID_NORMAL;
1624
1625 fmh = (struct fit_msg_hdr *)skmsg->msg_buf64;
1626 skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
1627
1628 if (skdev->dbg_level > 1) {
1629 uint8_t *bp = skmsg->msg_buf;
1630 int i;
1631
1632 for (i = 0; i < skmsg->length; i += 8) {
1633 Dcmn_err(CE_NOTE, " msg[%2d] %02x %02x %02x %02x "
1634 "%02x %02x %02x %02x",
1635 i, bp[i + 0], bp[i + 1], bp[i + 2],
1636 bp[i + 3], bp[i + 4], bp[i + 5],
1637 bp[i + 6], bp[i + 7]);
1638 if (i == 0) i = 64 - 8;
1639 }
1640 }
1641
1642 (void) ddi_dma_sync(skmsg->mb_dma_address.dma_handle, 0, 0,
1643 DDI_DMA_SYNC_FORDEV);
1644
1645 ASSERT(skmsg->length > sizeof (struct fit_msg_hdr));
1646 if (skmsg->length > 256) {
1647 qcmd |= FIT_QCMD_MSGSIZE_512;
1648 } else if (skmsg->length > 128) {
1649 qcmd |= FIT_QCMD_MSGSIZE_256;
1650 } else if (skmsg->length > 64) {
1651 qcmd |= FIT_QCMD_MSGSIZE_128;
1652 }
1653
1654 skdev->ios_started++;
1655
1656 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1657 }
1658
1659 /*
1660 *
1661 * Name: skd_send_special_fitmsg, send a special FIT message
1662 * to the hardware used driver-originated I/O requests.
1663 *
1664 * Inputs: skdev - device state structure.
1665 * skspcl - skspcl structure.
1666 *
1667 * Returns: Nothing.
1668 *
1669 */
1670 static void
1671 skd_send_special_fitmsg(struct skd_device *skdev,
1672 struct skd_special_context *skspcl)
1673 {
1674 uint64_t qcmd;
1675
1676 Dcmn_err(CE_NOTE, "send_special_fitmsg: pt 1");
1677
1678 if (skdev->dbg_level > 1) {
1679 uint8_t *bp = skspcl->msg_buf;
1680 int i;
1681
1682 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
1683 cmn_err(CE_NOTE,
1684 " spcl[%2d] %02x %02x %02x %02x "
1685 "%02x %02x %02x %02x\n", i,
1686 bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3],
1687 bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]);
1688 if (i == 0) i = 64 - 8;
1689 }
1690
1691 for (i = 0; i < skspcl->req.n_sg; i++) {
1692 struct fit_sg_descriptor *sgd =
1693 &skspcl->req.sksg_list[i];
1694
1695 cmn_err(CE_NOTE, " sg[%d] count=%u ctrl=0x%x "
1696 "addr=0x%" PRIx64 " next=0x%" PRIx64,
1697 i, sgd->byte_count, sgd->control,
1698 sgd->host_side_addr, sgd->next_desc_ptr);
1699 }
1700 }
1701
1702 (void) ddi_dma_sync(skspcl->mb_dma_address.dma_handle, 0, 0,
1703 DDI_DMA_SYNC_FORDEV);
1704 (void) ddi_dma_sync(skspcl->db_dma_address.dma_handle, 0, 0,
1705 DDI_DMA_SYNC_FORDEV);
1706
1707 /*
1708 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
1709 * and one 64-byte SSDI command.
1710 */
1711 qcmd = skspcl->mb_dma_address.cookies->dmac_laddress;
1712
1713 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
1714
1715 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1716 }
1717
1718 /*
1719 * COMPLETION QUEUE
1720 */
1721
1722 static void skd_complete_other(struct skd_device *skdev,
1723 volatile struct fit_completion_entry_v1 *skcomp,
1724 volatile struct fit_comp_error_info *skerr);
1725
1726 struct sns_info {
1727 uint8_t type;
1728 uint8_t stat;
1729 uint8_t key;
1730 uint8_t asc;
1731 uint8_t ascq;
1732 uint8_t mask;
1733 enum skd_check_status_action action;
1734 };
1735
1736 static struct sns_info skd_chkstat_table[] = {
1737 /* Good */
1738 {0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c, SKD_CHECK_STATUS_REPORT_GOOD},
1739
1740 /* Smart alerts */
1741 {0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
1742 SKD_CHECK_STATUS_REPORT_SMART_ALERT},
1743 {0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
1744 SKD_CHECK_STATUS_REPORT_SMART_ALERT},
1745 {0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temp over trigger */
1746 SKD_CHECK_STATUS_REPORT_SMART_ALERT},
1747
1748 /* Retry (with limits) */
1749 {0x70, 0x02, ABORTED_COMMAND, 0, 0, 0x1C, /* DMA errors */
1750 SKD_CHECK_STATUS_REQUEUE_REQUEST},
1751 {0x70, 0x02, UNIT_ATTENTION, 0x0B, 0x00, 0x1E, /* warnings */
1752 SKD_CHECK_STATUS_REQUEUE_REQUEST},
1753 {0x70, 0x02, UNIT_ATTENTION, 0x5D, 0x00, 0x1E, /* thresholds */
1754 SKD_CHECK_STATUS_REQUEUE_REQUEST},
1755 {0x70, 0x02, UNIT_ATTENTION, 0x80, 0x30, 0x1F, /* backup power */
1756 SKD_CHECK_STATUS_REQUEUE_REQUEST},
1757
1758 /* Busy (or about to be) */
1759 {0x70, 0x02, UNIT_ATTENTION, 0x3f, 0x01, 0x1F, /* fw changed */
1760 SKD_CHECK_STATUS_BUSY_IMMINENT},
1761 };
1762
1763 /*
1764 *
1765 * Name: skd_check_status, checks the return status from a
1766 * completed I/O request.
1767 *
1768 * Inputs: skdev - device state structure.
1769 * cmp_status - SCSI status byte.
1770 * skerr - the error data structure.
1771 *
1772 * Returns: Depending on the error condition, return the action
1773 * to be taken as specified in the skd_chkstat_table.
1774 * If no corresponding value is found in the table
1775 * return SKD_CHECK_STATUS_REPORT_GOOD is no error otherwise
1776 * return SKD_CHECK_STATUS_REPORT_ERROR.
1777 *
1778 */
1779 static enum skd_check_status_action
1780 skd_check_status(struct skd_device *skdev, uint8_t cmp_status,
1781 volatile struct fit_comp_error_info *skerr)
1782 {
1783 /*
1784 * Look up status and sense data to decide how to handle the error
1785 * from the device.
1786 * mask says which fields must match e.g., mask=0x18 means check
1787 * type and stat, ignore key, asc, ascq.
1788 */
1789 int i, n;
1790
1791 Dcmn_err(CE_NOTE, "(%s): key/asc/ascq %02x/%02x/%02x",
1792 skd_name(skdev), skerr->key, skerr->code, skerr->qual);
1793
1794 Dcmn_err(CE_NOTE, "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x",
1795 skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual);
1796
1797 /* Does the info match an entry in the good category? */
1798 n = sizeof (skd_chkstat_table) / sizeof (skd_chkstat_table[0]);
1799 for (i = 0; i < n; i++) {
1800 struct sns_info *sns = &skd_chkstat_table[i];
1801
1802 if (sns->mask & 0x10)
1803 if (skerr->type != sns->type) continue;
1804
1805 if (sns->mask & 0x08)
1806 if (cmp_status != sns->stat) continue;
1807
1808 if (sns->mask & 0x04)
1809 if (skerr->key != sns->key) continue;
1810
1811 if (sns->mask & 0x02)
1812 if (skerr->code != sns->asc) continue;
1813
1814 if (sns->mask & 0x01)
1815 if (skerr->qual != sns->ascq) continue;
1816
1817 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
1818 cmn_err(CE_WARN, "!(%s):SMART Alert: sense key/asc/ascq"
1819 " %02x/%02x/%02x",
1820 skd_name(skdev), skerr->key,
1821 skerr->code, skerr->qual);
1822 }
1823
1824 Dcmn_err(CE_NOTE, "skd_check_status: returning %x",
1825 sns->action);
1826
1827 return (sns->action);
1828 }
1829
1830 /*
1831 * No other match, so nonzero status means error,
1832 * zero status means good
1833 */
1834 if (cmp_status) {
1835 cmn_err(CE_WARN,
1836 "!%s: status check: qdepth=%d skmfl=%p (%d) skrfl=%p (%d)",
1837 skdev->name,
1838 skdev->queue_depth_busy,
1839 (void *)skdev->skmsg_free_list, skd_list_skmsg(skdev, 0),
1840 (void *)skdev->skreq_free_list, skd_list_skreq(skdev, 0));
1841
1842 cmn_err(CE_WARN, "!%s: t=%02x stat=%02x k=%02x c=%02x q=%02x",
1843 skdev->name, skerr->type, cmp_status, skerr->key,
1844 skerr->code, skerr->qual);
1845
1846 return (SKD_CHECK_STATUS_REPORT_ERROR);
1847 }
1848
1849 Dcmn_err(CE_NOTE, "status check good default");
1850
1851 return (SKD_CHECK_STATUS_REPORT_GOOD);
1852 }
1853
1854 /*
1855 *
1856 * Name: skd_isr_completion_posted, handles I/O completions.
1857 *
1858 * Inputs: skdev - device state structure.
1859 *
1860 * Returns: Nothing.
1861 *
1862 */
1863 static void
1864 skd_isr_completion_posted(struct skd_device *skdev)
1865 {
1866 volatile struct fit_completion_entry_v1 *skcmp = NULL;
1867 volatile struct fit_comp_error_info *skerr;
1868 struct skd_fitmsg_context *skmsg;
1869 struct skd_request_context *skreq;
1870 skd_buf_private_t *pbuf;
1871 uint16_t req_id;
1872 uint32_t req_slot;
1873 uint32_t timo_slot;
1874 uint32_t msg_slot;
1875 uint16_t cmp_cntxt = 0;
1876 uint8_t cmp_status = 0;
1877 uint8_t cmp_cycle = 0;
1878 uint32_t cmp_bytes = 0;
1879
1880 (void) ddi_dma_sync(skdev->cq_dma_address.dma_handle, 0, 0,
1881 DDI_DMA_SYNC_FORKERNEL);
1882
1883 for (;;) {
1884 ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
1885
1886 WAITQ_LOCK(skdev);
1887
1888 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
1889 cmp_cycle = skcmp->cycle;
1890 cmp_cntxt = skcmp->tag;
1891 cmp_status = skcmp->status;
1892 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
1893
1894 skerr = &skdev->skerr_table[skdev->skcomp_ix];
1895
1896 Dcmn_err(CE_NOTE,
1897 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
1898 "qdepth_busy=%d rbytes=0x%x proto=%d",
1899 skdev->skcomp_cycle, skdev->skcomp_ix,
1900 cmp_cycle, cmp_cntxt, cmp_status,
1901 skdev->queue_depth_busy, cmp_bytes, skdev->proto_ver);
1902
1903 if (cmp_cycle != skdev->skcomp_cycle) {
1904 Dcmn_err(CE_NOTE, "%s:end of completions", skdev->name);
1905
1906 WAITQ_UNLOCK(skdev);
1907 break;
1908 }
1909
1910
1911 skdev->n_req++;
1912
1913 /*
1914 * Update the completion queue head index and possibly
1915 * the completion cycle count.
1916 */
1917 skdev->skcomp_ix++;
1918 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
1919 skdev->skcomp_ix = 0;
1920 skdev->skcomp_cycle++; /* 8-bit wrap-around */
1921 }
1922
1923
1924 /*
1925 * The command context is a unique 32-bit ID. The low order
1926 * bits help locate the request. The request is usually a
1927 * r/w request (see skd_start() above) or a special request.
1928 */
1929 req_id = cmp_cntxt;
1930 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
1931
1932 Dcmn_err(CE_NOTE,
1933 "<<<< completion_posted 1: req_id=%x req_slot=%x",
1934 req_id, req_slot);
1935
1936 /* Is this other than a r/w request? */
1937 if (req_slot >= skdev->num_req_context) {
1938 /*
1939 * This is not a completion for a r/w request.
1940 */
1941 skd_complete_other(skdev, skcmp, skerr);
1942 WAITQ_UNLOCK(skdev);
1943 continue;
1944 }
1945
1946 skreq = &skdev->skreq_table[req_slot];
1947
1948 /*
1949 * Make sure the request ID for the slot matches.
1950 */
1951 ASSERT(skreq->id == req_id);
1952
1953 if (SKD_REQ_STATE_ABORTED == skreq->state) {
1954 Dcmn_err(CE_NOTE, "reclaim req %p id=%04x\n",
1955 (void *)skreq, skreq->id);
1956 /*
1957 * a previously timed out command can
1958 * now be cleaned up
1959 */
1960 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
1961 ASSERT(msg_slot < skdev->num_fitmsg_context);
1962 skmsg = &skdev->skmsg_table[msg_slot];
1963 if (skmsg->id == skreq->fitmsg_id) {
1964 ASSERT(skmsg->outstanding > 0);
1965 skmsg->outstanding--;
1966 if (skmsg->outstanding == 0) {
1967 ASSERT(SKD_MSG_STATE_BUSY ==
1968 skmsg->state);
1969 skmsg->state = SKD_MSG_STATE_IDLE;
1970 skmsg->id += SKD_ID_INCR;
1971 skmsg->next = skdev->skmsg_free_list;
1972 skdev->skmsg_free_list = skmsg;
1973 }
1974 }
1975 /*
1976 * Reclaim the skd_request_context
1977 */
1978 skreq->state = SKD_REQ_STATE_IDLE;
1979 skreq->id += SKD_ID_INCR;
1980 skreq->next = skdev->skreq_free_list;
1981 skdev->skreq_free_list = skreq;
1982 WAITQ_UNLOCK(skdev);
1983 continue;
1984 }
1985
1986 skreq->completion.status = cmp_status;
1987
1988 pbuf = skreq->pbuf;
1989 ASSERT(pbuf != NULL);
1990
1991 Dcmn_err(CE_NOTE, "<<<< completion_posted 2: pbuf=%p "
1992 "req_id=%x req_slot=%x", (void *)pbuf, req_id, req_slot);
1993 if (cmp_status && skdev->disks_initialized) {
1994 cmn_err(CE_WARN, "!%s: "
1995 "I/O err: pbuf=%p blkno=%lld (%llx) nbklks=%ld ",
1996 skdev->name, (void *)pbuf, pbuf->x_xfer->x_blkno,
1997 pbuf->x_xfer->x_blkno, pbuf->x_xfer->x_nblks);
1998 }
1999
2000 ASSERT(skdev->active_cmds);
2001 atomic_dec_64(&skdev->active_cmds);
2002
2003 if (SAM_STAT_GOOD == cmp_status) {
2004 /* Release DMA resources for the request. */
2005 if (pbuf->x_xfer->x_nblks != 0)
2006 skd_blkdev_postop_sg_list(skdev, skreq);
2007 WAITQ_UNLOCK(skdev);
2008 skd_end_request(skdev, skreq, 0);
2009 WAITQ_LOCK(skdev);
2010 } else {
2011 switch (skd_check_status(skdev, cmp_status, skerr)) {
2012 case SKD_CHECK_STATUS_REPORT_GOOD:
2013 case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
2014 WAITQ_UNLOCK(skdev);
2015 skd_end_request(skdev, skreq, 0);
2016 WAITQ_LOCK(skdev);
2017 break;
2018
2019 case SKD_CHECK_STATUS_BUSY_IMMINENT:
2020 skd_log_skreq(skdev, skreq, "retry(busy)");
2021 skd_queue(skdev, pbuf);
2022 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2023 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2024
2025 (void) skd_quiesce_dev(skdev);
2026 break;
2027
2028 /* FALLTHRU */
2029 case SKD_CHECK_STATUS_REPORT_ERROR:
2030 /* fall thru to report error */
2031 default:
2032 /*
2033 * Save the entire completion
2034 * and error entries for
2035 * later error interpretation.
2036 */
2037 skreq->completion = *skcmp;
2038 skreq->err_info = *skerr;
2039 WAITQ_UNLOCK(skdev);
2040 skd_end_request(skdev, skreq, -EIO);
2041 WAITQ_LOCK(skdev);
2042 break;
2043 }
2044 }
2045
2046 /*
2047 * Reclaim the FIT msg buffer if this is
2048 * the first of the requests it carried to
2049 * be completed. The FIT msg buffer used to
2050 * send this request cannot be reused until
2051 * we are sure the s1120 card has copied
2052 * it to its memory. The FIT msg might have
2053 * contained several requests. As soon as
2054 * any of them are completed we know that
2055 * the entire FIT msg was transferred.
2056 * Only the first completed request will
2057 * match the FIT msg buffer id. The FIT
2058 * msg buffer id is immediately updated.
2059 * When subsequent requests complete the FIT
2060 * msg buffer id won't match, so we know
2061 * quite cheaply that it is already done.
2062 */
2063 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
2064
2065 ASSERT(msg_slot < skdev->num_fitmsg_context);
2066 skmsg = &skdev->skmsg_table[msg_slot];
2067 if (skmsg->id == skreq->fitmsg_id) {
2068 ASSERT(SKD_MSG_STATE_BUSY == skmsg->state);
2069 skmsg->state = SKD_MSG_STATE_IDLE;
2070 skmsg->id += SKD_ID_INCR;
2071 skmsg->next = skdev->skmsg_free_list;
2072 skdev->skmsg_free_list = skmsg;
2073 }
2074
2075 /*
2076 * Decrease the number of active requests.
2077 * This also decrements the count in the
2078 * timeout slot.
2079 */
2080 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
2081 ASSERT(skdev->timeout_slot[timo_slot] > 0);
2082 ASSERT(skdev->queue_depth_busy > 0);
2083
2084 atomic_dec_32(&skdev->timeout_slot[timo_slot]);
2085 atomic_dec_32(&skdev->queue_depth_busy);
2086
2087 /*
2088 * Reclaim the skd_request_context
2089 */
2090 skreq->state = SKD_REQ_STATE_IDLE;
2091 skreq->id += SKD_ID_INCR;
2092 skreq->next = skdev->skreq_free_list;
2093 skdev->skreq_free_list = skreq;
2094
2095 WAITQ_UNLOCK(skdev);
2096
2097 /*
2098 * make sure the lock is held by caller.
2099 */
2100 if ((skdev->state == SKD_DRVR_STATE_PAUSING) &&
2101 (0 == skdev->queue_depth_busy)) {
2102 skdev->state = SKD_DRVR_STATE_PAUSED;
2103 cv_signal(&skdev->cv_waitq);
2104 }
2105 } /* for(;;) */
2106 }
2107
2108 /*
2109 *
2110 * Name: skd_complete_other, handle the completion of a
2111 * non-r/w request.
2112 *
2113 * Inputs: skdev - device state structure.
2114 * skcomp - FIT completion structure.
2115 * skerr - error structure.
2116 *
2117 * Returns: Nothing.
2118 *
2119 */
2120 static void
2121 skd_complete_other(struct skd_device *skdev,
2122 volatile struct fit_completion_entry_v1 *skcomp,
2123 volatile struct fit_comp_error_info *skerr)
2124 {
2125 uint32_t req_id = 0;
2126 uint32_t req_table;
2127 uint32_t req_slot;
2128 struct skd_special_context *skspcl;
2129
2130 req_id = skcomp->tag;
2131 req_table = req_id & SKD_ID_TABLE_MASK;
2132 req_slot = req_id & SKD_ID_SLOT_MASK;
2133
2134 Dcmn_err(CE_NOTE, "complete_other: table=0x%x id=0x%x slot=%d",
2135 req_table, req_id, req_slot);
2136
2137 /*
2138 * Based on the request id, determine how to dispatch this completion.
2139 * This swich/case is finding the good cases and forwarding the
2140 * completion entry. Errors are reported below the switch.
2141 */
2142 ASSERT(req_table == SKD_ID_INTERNAL);
2143 ASSERT(req_slot == 0);
2144
2145 skspcl = &skdev->internal_skspcl;
2146 ASSERT(skspcl->req.id == req_id);
2147 ASSERT(skspcl->req.state == SKD_REQ_STATE_BUSY);
2148
2149 Dcmn_err(CE_NOTE, "<<<<== complete_other: ID_INTERNAL");
2150 skd_complete_internal(skdev, skcomp, skerr, skspcl);
2151 }
2152
2153 /*
2154 *
2155 * Name: skd_reset_skcomp, does what it says, resetting completion
2156 * tables.
2157 *
2158 * Inputs: skdev - device state structure.
2159 *
2160 * Returns: Nothing.
2161 *
2162 */
2163 static void
2164 skd_reset_skcomp(struct skd_device *skdev)
2165 {
2166 uint32_t nbytes;
2167
2168 nbytes = sizeof (struct fit_completion_entry_v1) *
2169 SKD_N_COMPLETION_ENTRY;
2170 nbytes += sizeof (struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
2171
2172 if (skdev->skcomp_table)
2173 bzero(skdev->skcomp_table, nbytes);
2174
2175 skdev->skcomp_ix = 0;
2176 skdev->skcomp_cycle = 1;
2177 }
2178
2179
2180
2181 /*
2182 * INTERRUPTS
2183 */
2184
2185 /*
2186 *
2187 * Name: skd_isr_aif, handles the device interrupts.
2188 *
2189 * Inputs: arg - skdev device state structure.
2190 * intvec - not referenced
2191 *
2192 * Returns: DDI_INTR_CLAIMED if interrupt is handled otherwise
2193 * return DDI_INTR_UNCLAIMED.
2194 *
2195 */
2196 /* ARGSUSED */ /* Upstream common source with other platforms. */
2197 static uint_t
2198 skd_isr_aif(caddr_t arg, caddr_t intvec)
2199 {
2200 uint32_t intstat;
2201 uint32_t ack;
2202 int rc = DDI_INTR_UNCLAIMED;
2203 struct skd_device *skdev;
2204
2205 skdev = (skd_device_t *)(uintptr_t)arg;
2206
2207 ASSERT(skdev != NULL);
2208
2209 skdev->intr_cntr++;
2210
2211 Dcmn_err(CE_NOTE, "skd_isr_aif: intr=%" PRId64 "\n", skdev->intr_cntr);
2212
2213 for (;;) {
2214
2215 ASSERT(!WAITQ_LOCK_HELD(skdev));
2216 INTR_LOCK(skdev);
2217
2218 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2219
2220 ack = FIT_INT_DEF_MASK;
2221 ack &= intstat;
2222
2223 Dcmn_err(CE_NOTE, "intstat=0x%x ack=0x%x", intstat, ack);
2224
2225 /*
2226 * As long as there is an int pending on device, keep
2227 * running loop. When none, get out, but if we've never
2228 * done any processing, call completion handler?
2229 */
2230 if (ack == 0) {
2231 /*
2232 * No interrupts on device, but run the completion
2233 * processor anyway?
2234 */
2235 if (rc == DDI_INTR_UNCLAIMED &&
2236 skdev->state == SKD_DRVR_STATE_ONLINE) {
2237 Dcmn_err(CE_NOTE,
2238 "1: Want isr_comp_posted call");
2239 skd_isr_completion_posted(skdev);
2240 }
2241 INTR_UNLOCK(skdev);
2242
2243 break;
2244 }
2245 rc = DDI_INTR_CLAIMED;
2246
2247 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
2248
2249 if ((skdev->state != SKD_DRVR_STATE_LOAD) &&
2250 (skdev->state != SKD_DRVR_STATE_STOPPING)) {
2251 if (intstat & FIT_ISH_COMPLETION_POSTED) {
2252 Dcmn_err(CE_NOTE,
2253 "2: Want isr_comp_posted call");
2254 skd_isr_completion_posted(skdev);
2255 }
2256
2257 if (intstat & FIT_ISH_FW_STATE_CHANGE) {
2258 Dcmn_err(CE_NOTE, "isr: fwstate change");
2259
2260 skd_isr_fwstate(skdev);
2261 if (skdev->state == SKD_DRVR_STATE_FAULT ||
2262 skdev->state ==
2263 SKD_DRVR_STATE_DISAPPEARED) {
2264 INTR_UNLOCK(skdev);
2265
2266 return (rc);
2267 }
2268 }
2269
2270 if (intstat & FIT_ISH_MSG_FROM_DEV) {
2271 Dcmn_err(CE_NOTE, "isr: msg_from_dev change");
2272 skd_isr_msg_from_dev(skdev);
2273 }
2274 }
2275
2276 INTR_UNLOCK(skdev);
2277 }
2278
2279 if (!SIMPLEQ_EMPTY(&skdev->waitqueue))
2280 skd_start(skdev);
2281
2282 return (rc);
2283 }
2284
2285 /*
2286 *
2287 * Name: skd_drive_fault, set the drive state to DRV_STATE_FAULT.
2288 *
2289 * Inputs: skdev - device state structure.
2290 *
2291 * Returns: Nothing.
2292 *
2293 */
2294 static void
2295 skd_drive_fault(struct skd_device *skdev)
2296 {
2297 skdev->state = SKD_DRVR_STATE_FAULT;
2298 cmn_err(CE_WARN, "!(%s): Drive FAULT\n",
2299 skd_name(skdev));
2300 }
2301
2302 /*
2303 *
2304 * Name: skd_drive_disappeared, set the drive state to DISAPPEARED..
2305 *
2306 * Inputs: skdev - device state structure.
2307 *
2308 * Returns: Nothing.
2309 *
2310 */
2311 static void
2312 skd_drive_disappeared(struct skd_device *skdev)
2313 {
2314 skdev->state = SKD_DRVR_STATE_DISAPPEARED;
2315 cmn_err(CE_WARN, "!(%s): Drive DISAPPEARED\n",
2316 skd_name(skdev));
2317 }
2318
2319 /*
2320 *
2321 * Name: skd_isr_fwstate, handles the various device states.
2322 *
2323 * Inputs: skdev - device state structure.
2324 *
2325 * Returns: Nothing.
2326 *
2327 */
2328 static void
2329 skd_isr_fwstate(struct skd_device *skdev)
2330 {
2331 uint32_t sense;
2332 uint32_t state;
2333 int prev_driver_state;
2334 uint32_t mtd;
2335
2336 prev_driver_state = skdev->state;
2337
2338 sense = SKD_READL(skdev, FIT_STATUS);
2339 state = sense & FIT_SR_DRIVE_STATE_MASK;
2340
2341 Dcmn_err(CE_NOTE, "s1120 state %s(%d)=>%s(%d)",
2342 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
2343 skd_drive_state_to_str(state), state);
2344
2345 skdev->drive_state = state;
2346
2347 switch (skdev->drive_state) {
2348 case FIT_SR_DRIVE_INIT:
2349 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
2350 skd_disable_interrupts(skdev);
2351 break;
2352 }
2353 if (skdev->state == SKD_DRVR_STATE_RESTARTING) {
2354 skd_recover_requests(skdev);
2355 }
2356 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
2357 skdev->timer_countdown =
2358 SKD_TIMER_SECONDS(SKD_STARTING_TO);
2359 skdev->state = SKD_DRVR_STATE_STARTING;
2360 skd_soft_reset(skdev);
2361 break;
2362 }
2363 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
2364 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2365 skdev->last_mtd = mtd;
2366 break;
2367
2368 case FIT_SR_DRIVE_ONLINE:
2369 skdev->queue_depth_limit = skdev->soft_queue_depth_limit;
2370 if (skdev->queue_depth_limit > skdev->hard_queue_depth_limit) {
2371 skdev->queue_depth_limit =
2372 skdev->hard_queue_depth_limit;
2373 }
2374
2375 skdev->queue_depth_lowat = skdev->queue_depth_limit * 2 / 3 + 1;
2376 if (skdev->queue_depth_lowat < 1)
2377 skdev->queue_depth_lowat = 1;
2378 Dcmn_err(CE_NOTE,
2379 "%s queue depth limit=%d hard=%d soft=%d lowat=%d",
2380 DRV_NAME,
2381 skdev->queue_depth_limit,
2382 skdev->hard_queue_depth_limit,
2383 skdev->soft_queue_depth_limit,
2384 skdev->queue_depth_lowat);
2385
2386 skd_refresh_device_data(skdev);
2387 break;
2388 case FIT_SR_DRIVE_BUSY:
2389 skdev->state = SKD_DRVR_STATE_BUSY;
2390 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2391 (void) skd_quiesce_dev(skdev);
2392 break;
2393 case FIT_SR_DRIVE_BUSY_SANITIZE:
2394 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
2395 skd_start(skdev);
2396 break;
2397 case FIT_SR_DRIVE_BUSY_ERASE:
2398 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
2399 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2400 break;
2401 case FIT_SR_DRIVE_OFFLINE:
2402 skdev->state = SKD_DRVR_STATE_IDLE;
2403 break;
2404 case FIT_SR_DRIVE_SOFT_RESET:
2405 skdev->state = SKD_DRVR_STATE_RESTARTING;
2406
2407 switch (skdev->state) {
2408 case SKD_DRVR_STATE_STARTING:
2409 case SKD_DRVR_STATE_RESTARTING:
2410 break;
2411 default:
2412 skdev->state = SKD_DRVR_STATE_RESTARTING;
2413 break;
2414 }
2415 break;
2416 case FIT_SR_DRIVE_FW_BOOTING:
2417 Dcmn_err(CE_NOTE,
2418 "ISR FIT_SR_DRIVE_FW_BOOTING %s", skdev->name);
2419 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
2420 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_WAIT_BOOT_TO);
2421 break;
2422
2423 case FIT_SR_DRIVE_DEGRADED:
2424 case FIT_SR_PCIE_LINK_DOWN:
2425 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
2426 break;
2427
2428 case FIT_SR_DRIVE_FAULT:
2429 skd_drive_fault(skdev);
2430 skd_recover_requests(skdev);
2431 skd_start(skdev);
2432 break;
2433
2434 case 0xFF:
2435 skd_drive_disappeared(skdev);
2436 skd_recover_requests(skdev);
2437 skd_start(skdev);
2438 break;
2439 default:
2440 /*
2441 * Uknown FW State. Wait for a state we recognize.
2442 */
2443 break;
2444 }
2445
2446 Dcmn_err(CE_NOTE, "Driver state %s(%d)=>%s(%d)",
2447 skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
2448 skd_skdev_state_to_str(skdev->state), skdev->state);
2449 }
2450
2451 /*
2452 *
2453 * Name: skd_recover_requests, attempts to recover requests.
2454 *
2455 * Inputs: skdev - device state structure.
2456 *
2457 * Returns: Nothing.
2458 *
2459 */
2460 static void
2461 skd_recover_requests(struct skd_device *skdev)
2462 {
2463 int i;
2464
2465 ASSERT(INTR_LOCK_HELD(skdev));
2466
2467 for (i = 0; i < skdev->num_req_context; i++) {
2468 struct skd_request_context *skreq = &skdev->skreq_table[i];
2469
2470 if (skreq->state == SKD_REQ_STATE_BUSY) {
2471 skd_log_skreq(skdev, skreq, "requeue");
2472
2473 ASSERT(0 != (skreq->id & SKD_ID_INCR));
2474 ASSERT(skreq->pbuf != NULL);
2475 /* Release DMA resources for the request. */
2476 skd_blkdev_postop_sg_list(skdev, skreq);
2477
2478 skd_end_request(skdev, skreq, EAGAIN);
2479 skreq->pbuf = NULL;
2480 skreq->state = SKD_REQ_STATE_IDLE;
2481 skreq->id += SKD_ID_INCR;
2482 }
2483 if (i > 0) {
2484 skreq[-1].next = skreq;
2485 }
2486 skreq->next = NULL;
2487 }
2488
2489 WAITQ_LOCK(skdev);
2490 skdev->skreq_free_list = skdev->skreq_table;
2491 WAITQ_UNLOCK(skdev);
2492
2493 for (i = 0; i < skdev->num_fitmsg_context; i++) {
2494 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
2495
2496 if (skmsg->state == SKD_MSG_STATE_BUSY) {
2497 skd_log_skmsg(skdev, skmsg, "salvaged");
2498 ASSERT((skmsg->id & SKD_ID_INCR) != 0);
2499 skmsg->state = SKD_MSG_STATE_IDLE;
2500 skmsg->id &= ~SKD_ID_INCR;
2501 }
2502 if (i > 0) {
2503 skmsg[-1].next = skmsg;
2504 }
2505 skmsg->next = NULL;
2506 }
2507 WAITQ_LOCK(skdev);
2508 skdev->skmsg_free_list = skdev->skmsg_table;
2509 WAITQ_UNLOCK(skdev);
2510
2511 for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++) {
2512 skdev->timeout_slot[i] = 0;
2513 }
2514 skdev->queue_depth_busy = 0;
2515 }
2516
2517 /*
2518 *
2519 * Name: skd_isr_msg_from_dev, handles a message from the device.
2520 *
2521 * Inputs: skdev - device state structure.
2522 *
2523 * Returns: Nothing.
2524 *
2525 */
2526 static void
2527 skd_isr_msg_from_dev(struct skd_device *skdev)
2528 {
2529 uint32_t mfd;
2530 uint32_t mtd;
2531
2532 Dcmn_err(CE_NOTE, "skd_isr_msg_from_dev:");
2533
2534 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
2535
2536 Dcmn_err(CE_NOTE, "mfd=0x%x last_mtd=0x%x\n", mfd, skdev->last_mtd);
2537
2538 /*
2539 * ignore any mtd that is an ack for something we didn't send
2540 */
2541 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) {
2542 return;
2543 }
2544
2545 switch (FIT_MXD_TYPE(mfd)) {
2546 case FIT_MTD_FITFW_INIT:
2547 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
2548
2549 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
2550 cmn_err(CE_WARN, "!(%s): protocol mismatch\n",
2551 skdev->name);
2552 cmn_err(CE_WARN, "!(%s): got=%d support=%d\n",
2553 skdev->name, skdev->proto_ver,
2554 FIT_PROTOCOL_VERSION_1);
2555 cmn_err(CE_WARN, "!(%s): please upgrade driver\n",
2556 skdev->name);
2557 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
2558 skd_soft_reset(skdev);
2559 break;
2560 }
2561 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
2562 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2563 skdev->last_mtd = mtd;
2564 break;
2565
2566 case FIT_MTD_GET_CMDQ_DEPTH:
2567 skdev->hard_queue_depth_limit = FIT_MXD_DATA(mfd);
2568 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
2569 SKD_N_COMPLETION_ENTRY);
2570 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2571 skdev->last_mtd = mtd;
2572 break;
2573
2574 case FIT_MTD_SET_COMPQ_DEPTH:
2575 SKD_WRITEQ(skdev, skdev->cq_dma_address.cookies->dmac_laddress,
2576 FIT_MSG_TO_DEVICE_ARG);
2577 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
2578 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2579 skdev->last_mtd = mtd;
2580 break;
2581
2582 case FIT_MTD_SET_COMPQ_ADDR:
2583 skd_reset_skcomp(skdev);
2584 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
2585 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2586 skdev->last_mtd = mtd;
2587 break;
2588
2589 case FIT_MTD_ARM_QUEUE:
2590 skdev->last_mtd = 0;
2591 /*
2592 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
2593 */
2594 break;
2595
2596 default:
2597 break;
2598 }
2599 }
2600
2601
2602 /*
2603 *
2604 * Name: skd_disable_interrupts, issues command to disable
2605 * device interrupts.
2606 *
2607 * Inputs: skdev - device state structure.
2608 *
2609 * Returns: Nothing.
2610 *
2611 */
2612 static void
2613 skd_disable_interrupts(struct skd_device *skdev)
2614 {
2615 uint32_t sense;
2616
2617 Dcmn_err(CE_NOTE, "skd_disable_interrupts:");
2618
2619 sense = SKD_READL(skdev, FIT_CONTROL);
2620 sense &= ~FIT_CR_ENABLE_INTERRUPTS;
2621 SKD_WRITEL(skdev, sense, FIT_CONTROL);
2622
2623 Dcmn_err(CE_NOTE, "sense 0x%x", sense);
2624
2625 /*
2626 * Note that the 1s is written. A 1-bit means
2627 * disable, a 0 means enable.
2628 */
2629 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
2630 }
2631
2632 /*
2633 *
2634 * Name: skd_enable_interrupts, issues command to enable
2635 * device interrupts.
2636 *
2637 * Inputs: skdev - device state structure.
2638 *
2639 * Returns: Nothing.
2640 *
2641 */
2642 static void
2643 skd_enable_interrupts(struct skd_device *skdev)
2644 {
2645 uint32_t val;
2646
2647 Dcmn_err(CE_NOTE, "skd_enable_interrupts:");
2648
2649 /* unmask interrupts first */
2650 val = FIT_ISH_FW_STATE_CHANGE +
2651 FIT_ISH_COMPLETION_POSTED +
2652 FIT_ISH_MSG_FROM_DEV;
2653
2654 /*
2655 * Note that the compliment of mask is written. A 1-bit means
2656 * disable, a 0 means enable.
2657 */
2658 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
2659
2660 Dcmn_err(CE_NOTE, "interrupt mask=0x%x", ~val);
2661
2662 val = SKD_READL(skdev, FIT_CONTROL);
2663 val |= FIT_CR_ENABLE_INTERRUPTS;
2664
2665 Dcmn_err(CE_NOTE, "control=0x%x", val);
2666
2667 SKD_WRITEL(skdev, val, FIT_CONTROL);
2668 }
2669
2670 /*
2671 *
2672 * Name: skd_soft_reset, issues a soft reset to the hardware.
2673 *
2674 * Inputs: skdev - device state structure.
2675 *
2676 * Returns: Nothing.
2677 *
2678 */
2679 static void
2680 skd_soft_reset(struct skd_device *skdev)
2681 {
2682 uint32_t val;
2683
2684 Dcmn_err(CE_NOTE, "skd_soft_reset:");
2685
2686 val = SKD_READL(skdev, FIT_CONTROL);
2687 val |= (FIT_CR_SOFT_RESET);
2688
2689 Dcmn_err(CE_NOTE, "soft_reset: control=0x%x", val);
2690
2691 SKD_WRITEL(skdev, val, FIT_CONTROL);
2692 }
2693
2694 /*
2695 *
2696 * Name: skd_start_device, gets the device going.
2697 *
2698 * Inputs: skdev - device state structure.
2699 *
2700 * Returns: Nothing.
2701 *
2702 */
2703 static void
2704 skd_start_device(struct skd_device *skdev)
2705 {
2706 uint32_t state;
2707 int delay_action = 0;
2708
2709 Dcmn_err(CE_NOTE, "skd_start_device:");
2710
2711 /* ack all ghost interrupts */
2712 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2713
2714 state = SKD_READL(skdev, FIT_STATUS);
2715
2716 Dcmn_err(CE_NOTE, "initial status=0x%x", state);
2717
2718 state &= FIT_SR_DRIVE_STATE_MASK;
2719 skdev->drive_state = state;
2720 skdev->last_mtd = 0;
2721
2722 skdev->state = SKD_DRVR_STATE_STARTING;
2723 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_STARTING_TO);
2724
2725 skd_enable_interrupts(skdev);
2726
2727 switch (skdev->drive_state) {
2728 case FIT_SR_DRIVE_OFFLINE:
2729 Dcmn_err(CE_NOTE, "(%s): Drive offline...",
2730 skd_name(skdev));
2731 break;
2732
2733 case FIT_SR_DRIVE_FW_BOOTING:
2734 Dcmn_err(CE_NOTE, "FIT_SR_DRIVE_FW_BOOTING %s\n", skdev->name);
2735 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
2736 skdev->timer_countdown = SKD_TIMER_SECONDS(SKD_WAIT_BOOT_TO);
2737 break;
2738
2739 case FIT_SR_DRIVE_BUSY_SANITIZE:
2740 Dcmn_err(CE_NOTE, "(%s): Start: BUSY_SANITIZE\n",
2741 skd_name(skdev));
2742 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
2743 skdev->timer_countdown = SKD_TIMER_SECONDS(60);
2744 break;
2745
2746 case FIT_SR_DRIVE_BUSY_ERASE:
2747 Dcmn_err(CE_NOTE, "(%s): Start: BUSY_ERASE\n",
2748 skd_name(skdev));
2749 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
2750 skdev->timer_countdown = SKD_TIMER_SECONDS(60);
2751 break;
2752
2753 case FIT_SR_DRIVE_INIT:
2754 case FIT_SR_DRIVE_ONLINE:
2755 skd_soft_reset(skdev);
2756
2757 break;
2758
2759 case FIT_SR_DRIVE_BUSY:
2760 Dcmn_err(CE_NOTE, "(%s): Drive Busy...\n",
2761 skd_name(skdev));
2762 skdev->state = SKD_DRVR_STATE_BUSY;
2763 skdev->timer_countdown = SKD_TIMER_SECONDS(60);
2764 break;
2765
2766 case FIT_SR_DRIVE_SOFT_RESET:
2767 Dcmn_err(CE_NOTE, "(%s) drive soft reset in prog\n",
2768 skd_name(skdev));
2769 break;
2770
2771 case FIT_SR_DRIVE_FAULT:
2772 /*
2773 * Fault state is bad...soft reset won't do it...
2774 * Hard reset, maybe, but does it work on device?
2775 * For now, just fault so the system doesn't hang.
2776 */
2777 skd_drive_fault(skdev);
2778
2779 delay_action = 1;
2780 break;
2781
2782 case 0xFF:
2783 skd_drive_disappeared(skdev);
2784
2785 delay_action = 1;
2786 break;
2787
2788 default:
2789 Dcmn_err(CE_NOTE, "(%s) Start: unknown state %x\n",
2790 skd_name(skdev), skdev->drive_state);
2791 break;
2792 }
2793
2794 state = SKD_READL(skdev, FIT_CONTROL);
2795 Dcmn_err(CE_NOTE, "FIT Control Status=0x%x\n", state);
2796
2797 state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2798 Dcmn_err(CE_NOTE, "Intr Status=0x%x\n", state);
2799
2800 state = SKD_READL(skdev, FIT_INT_MASK_HOST);
2801 Dcmn_err(CE_NOTE, "Intr Mask=0x%x\n", state);
2802
2803 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
2804 Dcmn_err(CE_NOTE, "Msg from Dev=0x%x\n", state);
2805
2806 state = SKD_READL(skdev, FIT_HW_VERSION);
2807 Dcmn_err(CE_NOTE, "HW version=0x%x\n", state);
2808
2809 if (delay_action) {
2810 /* start the queue so we can respond with error to requests */
2811 Dcmn_err(CE_NOTE, "Starting %s queue\n", skdev->name);
2812 skd_start(skdev);
2813 skdev->gendisk_on = -1;
2814 cv_signal(&skdev->cv_waitq);
2815 }
2816 }
2817
2818 /*
2819 *
2820 * Name: skd_restart_device, restart the hardware.
2821 *
2822 * Inputs: skdev - device state structure.
2823 *
2824 * Returns: Nothing.
2825 *
2826 */
2827 static void
2828 skd_restart_device(struct skd_device *skdev)
2829 {
2830 uint32_t state;
2831
2832 Dcmn_err(CE_NOTE, "skd_restart_device:");
2833
2834 /* ack all ghost interrupts */
2835 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2836
2837 state = SKD_READL(skdev, FIT_STATUS);
2838
2839 Dcmn_err(CE_NOTE, "skd_restart_device: drive status=0x%x\n", state);
2840
2841 state &= FIT_SR_DRIVE_STATE_MASK;
2842 skdev->drive_state = state;
2843 skdev->last_mtd = 0;
2844
2845 skdev->state = SKD_DRVR_STATE_RESTARTING;
2846 skdev->timer_countdown = SKD_TIMER_MINUTES(4);
2847
2848 skd_soft_reset(skdev);
2849 }
2850
2851 /*
2852 *
2853 * Name: skd_stop_device, stops the device.
2854 *
2855 * Inputs: skdev - device state structure.
2856 *
2857 * Returns: Nothing.
2858 *
2859 */
2860 static void
2861 skd_stop_device(struct skd_device *skdev)
2862 {
2863 clock_t cur_ticks, tmo;
2864 int secs;
2865 struct skd_special_context *skspcl = &skdev->internal_skspcl;
2866
2867 if (SKD_DRVR_STATE_ONLINE != skdev->state) {
2868 Dcmn_err(CE_NOTE, "(%s): skd_stop_device not online no sync\n",
2869 skdev->name);
2870 goto stop_out;
2871 }
2872
2873 if (SKD_REQ_STATE_IDLE != skspcl->req.state) {
2874 Dcmn_err(CE_NOTE, "(%s): skd_stop_device no special\n",
2875 skdev->name);
2876 goto stop_out;
2877 }
2878
2879 skdev->state = SKD_DRVR_STATE_SYNCING;
2880 skdev->sync_done = 0;
2881
2882 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
2883
2884 secs = 10;
2885 mutex_enter(&skdev->skd_internalio_mutex);
2886 while (skdev->sync_done == 0) {
2887 cur_ticks = ddi_get_lbolt();
2888 tmo = cur_ticks + drv_usectohz(1000000 * secs);
2889 if (cv_timedwait(&skdev->cv_waitq,
2890 &skdev->skd_internalio_mutex, tmo) == -1) {
2891 /* Oops - timed out */
2892
2893 Dcmn_err(CE_NOTE, "stop_device - %d secs TMO", secs);
2894 }
2895 }
2896
2897 mutex_exit(&skdev->skd_internalio_mutex);
2898
2899 switch (skdev->sync_done) {
2900 case 0:
2901 Dcmn_err(CE_NOTE, "(%s): skd_stop_device no sync\n",
2902 skdev->name);
2903 break;
2904 case 1:
2905 Dcmn_err(CE_NOTE, "(%s): skd_stop_device sync done\n",
2906 skdev->name);
2907 break;
2908 default:
2909 Dcmn_err(CE_NOTE, "(%s): skd_stop_device sync error\n",
2910 skdev->name);
2911 }
2912
2913
2914 stop_out:
2915 skdev->state = SKD_DRVR_STATE_STOPPING;
2916
2917 skd_disable_interrupts(skdev);
2918
2919 /* ensure all ints on device are cleared */
2920 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2921 /* soft reset the device to unload with a clean slate */
2922 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
2923 }
2924
2925 /*
2926 * CONSTRUCT
2927 */
2928
2929 static int skd_cons_skcomp(struct skd_device *);
2930 static int skd_cons_skmsg(struct skd_device *);
2931 static int skd_cons_skreq(struct skd_device *);
2932 static int skd_cons_sksb(struct skd_device *);
2933 static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *, uint32_t,
2934 dma_mem_t *);
2935
2936 /*
2937 *
2938 * Name: skd_construct, calls other routines to build device
2939 * interface structures.
2940 *
2941 * Inputs: skdev - device state structure.
2942 * instance - DDI instance number.
2943 *
2944 * Returns: Returns DDI_FAILURE on any failure otherwise returns
2945 * DDI_SUCCESS.
2946 *
2947 */
2948 /* ARGSUSED */ /* Upstream common source with other platforms. */
2949 static int
2950 skd_construct(skd_device_t *skdev, int instance)
2951 {
2952 int rc = 0;
2953
2954 skdev->state = SKD_DRVR_STATE_LOAD;
2955 skdev->irq_type = skd_isr_type;
2956 skdev->soft_queue_depth_limit = skd_max_queue_depth;
2957 skdev->hard_queue_depth_limit = 10; /* until GET_CMDQ_DEPTH */
2958
2959 skdev->num_req_context = skd_max_queue_depth;
2960 skdev->num_fitmsg_context = skd_max_queue_depth;
2961
2962 skdev->queue_depth_limit = skdev->hard_queue_depth_limit;
2963 skdev->queue_depth_lowat = 1;
2964 skdev->proto_ver = 99; /* initialize to invalid value */
2965 skdev->sgs_per_request = skd_sgs_per_request;
2966 skdev->dbg_level = skd_dbg_level;
2967
2968 rc = skd_cons_skcomp(skdev);
2969 if (rc < 0) {
2970 goto err_out;
2971 }
2972
2973 rc = skd_cons_skmsg(skdev);
2974 if (rc < 0) {
2975 goto err_out;
2976 }
2977
2978 rc = skd_cons_skreq(skdev);
2979 if (rc < 0) {
2980 goto err_out;
2981 }
2982
2983 rc = skd_cons_sksb(skdev);
2984 if (rc < 0) {
2985 goto err_out;
2986 }
2987
2988 Dcmn_err(CE_NOTE, "CONSTRUCT VICTORY");
2989
2990 return (DDI_SUCCESS);
2991
2992 err_out:
2993 Dcmn_err(CE_NOTE, "construct failed\n");
2994 skd_destruct(skdev);
2995
2996 return (DDI_FAILURE);
2997 }
2998
2999 /*
3000 *
3001 * Name: skd_free_phys, frees DMA memory.
3002 *
3003 * Inputs: skdev - device state structure.
3004 * mem - DMA info.
3005 *
3006 * Returns: Nothing.
3007 *
3008 */
3009 static void
3010 skd_free_phys(skd_device_t *skdev, dma_mem_t *mem)
3011 {
3012 _NOTE(ARGUNUSED(skdev));
3013
3014 if (mem == NULL || mem->dma_handle == NULL)
3015 return;
3016
3017 (void) ddi_dma_unbind_handle(mem->dma_handle);
3018
3019 if (mem->acc_handle != NULL) {
3020 ddi_dma_mem_free(&mem->acc_handle);
3021 mem->acc_handle = NULL;
3022 }
3023
3024 mem->bp = NULL;
3025 ddi_dma_free_handle(&mem->dma_handle);
3026 mem->dma_handle = NULL;
3027 }
3028
3029 /*
3030 *
3031 * Name: skd_alloc_dma_mem, allocates DMA memory.
3032 *
3033 * Inputs: skdev - device state structure.
3034 * mem - DMA data structure.
3035 * sleep - indicates whether called routine can sleep.
3036 * atype - specified 32 or 64 bit allocation.
3037 *
3038 * Returns: Void pointer to mem->bp on success else NULL.
3039 * NOTE: There are some failure modes even if sleep is set
3040 * to KM_SLEEP, so callers MUST check the return code even
3041 * if KM_SLEEP is passed in.
3042 *
3043 */
3044 static void *
3045 skd_alloc_dma_mem(skd_device_t *skdev, dma_mem_t *mem, uint8_t atype)
3046 {
3047 size_t rlen;
3048 uint_t cnt;
3049 ddi_dma_attr_t dma_attr = skd_64bit_io_dma_attr;
3050 ddi_device_acc_attr_t acc_attr = {
3051 DDI_DEVICE_ATTR_V0,
3052 DDI_STRUCTURE_LE_ACC,
3053 DDI_STRICTORDER_ACC
3054 };
3055
3056 if (atype == ATYPE_32BIT)
3057 dma_attr.dma_attr_addr_hi = SKD_DMA_HIGH_32BIT_ADDRESS;
3058
3059 dma_attr.dma_attr_sgllen = 1;
3060
3061 /*
3062 * Allocate DMA memory.
3063 */
3064 if (ddi_dma_alloc_handle(skdev->dip, &dma_attr, DDI_DMA_SLEEP, NULL,
3065 &mem->dma_handle) != DDI_SUCCESS) {
3066 cmn_err(CE_WARN, "!alloc_dma_mem-1, failed");
3067
3068 mem->dma_handle = NULL;
3069
3070 return (NULL);
3071 }
3072
3073 if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
3074 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, (caddr_t *)&mem->bp, &rlen,
3075 &mem->acc_handle) != DDI_SUCCESS) {
3076 cmn_err(CE_WARN, "!skd_alloc_dma_mem-2, failed");
3077 ddi_dma_free_handle(&mem->dma_handle);
3078 mem->dma_handle = NULL;
3079 mem->acc_handle = NULL;
3080 mem->bp = NULL;
3081
3082 return (NULL);
3083 }
3084 bzero(mem->bp, mem->size);
3085
3086 if (ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
3087 mem->size, (DDI_DMA_CONSISTENT | DDI_DMA_RDWR), DDI_DMA_SLEEP, NULL,
3088 &mem->cookie, &cnt) != DDI_DMA_MAPPED) {
3089 cmn_err(CE_WARN, "!skd_alloc_dma_mem-3, failed");
3090 ddi_dma_mem_free(&mem->acc_handle);
3091 ddi_dma_free_handle(&mem->dma_handle);
3092
3093 return (NULL);
3094 }
3095
3096 if (cnt > 1) {
3097 (void) ddi_dma_unbind_handle(mem->dma_handle);
3098 cmn_err(CE_WARN, "!skd_alloc_dma_mem-4, failed, "
3099 "cookie_count %d > 1", cnt);
3100 skd_free_phys(skdev, mem);
3101
3102 return (NULL);
3103 }
3104 mem->cookies = &mem->cookie;
3105 mem->cookies->dmac_size = mem->size;
3106
3107 return (mem->bp);
3108 }
3109
3110 /*
3111 *
3112 * Name: skd_cons_skcomp, allocates space for the skcomp table.
3113 *
3114 * Inputs: skdev - device state structure.
3115 *
3116 * Returns: -ENOMEM if no memory otherwise NULL.
3117 *
3118 */
3119 static int
3120 skd_cons_skcomp(struct skd_device *skdev)
3121 {
3122 uint64_t *dma_alloc;
3123 struct fit_completion_entry_v1 *skcomp;
3124 int rc = 0;
3125 uint32_t nbytes;
3126 dma_mem_t *mem;
3127
3128 nbytes = sizeof (*skcomp) * SKD_N_COMPLETION_ENTRY;
3129 nbytes += sizeof (struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
3130
3131 Dcmn_err(CE_NOTE, "cons_skcomp: nbytes=%d,entries=%d", nbytes,
3132 SKD_N_COMPLETION_ENTRY);
3133
3134 mem = &skdev->cq_dma_address;
3135 mem->size = nbytes;
3136
3137 dma_alloc = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT);
3138 skcomp = (struct fit_completion_entry_v1 *)dma_alloc;
3139 if (skcomp == NULL) {
3140 rc = -ENOMEM;
3141 goto err_out;
3142 }
3143
3144 bzero(skcomp, nbytes);
3145
3146 Dcmn_err(CE_NOTE, "cons_skcomp: skcomp=%p nbytes=%d",
3147 (void *)skcomp, nbytes);
3148
3149 skdev->skcomp_table = skcomp;
3150 skdev->skerr_table = (struct fit_comp_error_info *)(dma_alloc +
3151 (SKD_N_COMPLETION_ENTRY * sizeof (*skcomp) / sizeof (uint64_t)));
3152
3153 err_out:
3154 return (rc);
3155 }
3156
3157 /*
3158 *
3159 * Name: skd_cons_skmsg, allocates space for the skmsg table.
3160 *
3161 * Inputs: skdev - device state structure.
3162 *
3163 * Returns: -ENOMEM if no memory otherwise NULL.
3164 *
3165 */
3166 static int
3167 skd_cons_skmsg(struct skd_device *skdev)
3168 {
3169 dma_mem_t *mem;
3170 int rc = 0;
3171 uint32_t i;
3172
3173 Dcmn_err(CE_NOTE, "skmsg_table kzalloc, struct %lu, count %u total %lu",
3174 (ulong_t)sizeof (struct skd_fitmsg_context),
3175 skdev->num_fitmsg_context,
3176 (ulong_t)(sizeof (struct skd_fitmsg_context) *
3177 skdev->num_fitmsg_context));
3178
3179 skdev->skmsg_table = (struct skd_fitmsg_context *)kmem_zalloc(
3180 sizeof (struct skd_fitmsg_context) * skdev->num_fitmsg_context,
3181 KM_SLEEP);
3182
3183 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3184 struct skd_fitmsg_context *skmsg;
3185
3186 skmsg = &skdev->skmsg_table[i];
3187
3188 skmsg->id = i + SKD_ID_FIT_MSG;
3189
3190 skmsg->state = SKD_MSG_STATE_IDLE;
3191
3192 mem = &skmsg->mb_dma_address;
3193 mem->size = SKD_N_FITMSG_BYTES + 64;
3194
3195 skmsg->msg_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT);
3196
3197 if (NULL == skmsg->msg_buf) {
3198 rc = -ENOMEM;
3199 i++;
3200 break;
3201 }
3202
3203 skmsg->offset = 0;
3204
3205 bzero(skmsg->msg_buf, SKD_N_FITMSG_BYTES);
3206
3207 skmsg->next = &skmsg[1];
3208 }
3209
3210 /* Free list is in order starting with the 0th entry. */
3211 skdev->skmsg_table[i - 1].next = NULL;
3212 skdev->skmsg_free_list = skdev->skmsg_table;
3213
3214 return (rc);
3215 }
3216
3217 /*
3218 *
3219 * Name: skd_cons_skreq, allocates space for the skreq table.
3220 *
3221 * Inputs: skdev - device state structure.
3222 *
3223 * Returns: -ENOMEM if no memory otherwise NULL.
3224 *
3225 */
3226 static int
3227 skd_cons_skreq(struct skd_device *skdev)
3228 {
3229 int rc = 0;
3230 uint32_t i;
3231
3232 Dcmn_err(CE_NOTE,
3233 "skreq_table kmem_zalloc, struct %lu, count %u total %lu",
3234 (ulong_t)sizeof (struct skd_request_context),
3235 skdev->num_req_context,
3236 (ulong_t) (sizeof (struct skd_request_context) *
3237 skdev->num_req_context));
3238
3239 skdev->skreq_table = (struct skd_request_context *)kmem_zalloc(
3240 sizeof (struct skd_request_context) * skdev->num_req_context,
3241 KM_SLEEP);
3242
3243 for (i = 0; i < skdev->num_req_context; i++) {
3244 struct skd_request_context *skreq;
3245
3246 skreq = &skdev->skreq_table[i];
3247
3248 skreq->id = (uint16_t)(i + SKD_ID_RW_REQUEST);
3249 skreq->state = SKD_REQ_STATE_IDLE;
3250
3251 skreq->sksg_list = skd_cons_sg_list(skdev,
3252 skdev->sgs_per_request,
3253 &skreq->sksg_dma_address);
3254
3255 if (NULL == skreq->sksg_list) {
3256 rc = -ENOMEM;
3257 goto err_out;
3258 }
3259
3260 skreq->next = &skreq[1];
3261 }
3262
3263 /* Free list is in order starting with the 0th entry. */
3264 skdev->skreq_table[i - 1].next = NULL;
3265 skdev->skreq_free_list = skdev->skreq_table;
3266
3267 err_out:
3268 return (rc);
3269 }
3270
3271 /*
3272 *
3273 * Name: skd_cons_sksb, allocates space for the skspcl msg buf
3274 * and data buf.
3275 *
3276 * Inputs: skdev - device state structure.
3277 *
3278 * Returns: -ENOMEM if no memory otherwise NULL.
3279 *
3280 */
3281 static int
3282 skd_cons_sksb(struct skd_device *skdev)
3283 {
3284 int rc = 0;
3285 struct skd_special_context *skspcl;
3286 dma_mem_t *mem;
3287 uint32_t nbytes;
3288
3289 skspcl = &skdev->internal_skspcl;
3290
3291 skspcl->req.id = 0 + SKD_ID_INTERNAL;
3292 skspcl->req.state = SKD_REQ_STATE_IDLE;
3293
3294 nbytes = SKD_N_INTERNAL_BYTES;
3295
3296 mem = &skspcl->db_dma_address;
3297 mem->size = nbytes;
3298
3299 /* data_buf's DMA pointer is skspcl->db_dma_address */
3300 skspcl->data_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT);
3301 if (skspcl->data_buf == NULL) {
3302 rc = -ENOMEM;
3303 goto err_out;
3304 }
3305
3306 bzero(skspcl->data_buf, nbytes);
3307
3308 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
3309
3310 mem = &skspcl->mb_dma_address;
3311 mem->size = nbytes;
3312
3313 /* msg_buf DMA pointer is skspcl->mb_dma_address */
3314 skspcl->msg_buf = skd_alloc_dma_mem(skdev, mem, ATYPE_64BIT);
3315 if (skspcl->msg_buf == NULL) {
3316 rc = -ENOMEM;
3317 goto err_out;
3318 }
3319
3320
3321 bzero(skspcl->msg_buf, nbytes);
3322
3323 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
3324 &skspcl->req.sksg_dma_address);
3325
3326
3327 if (skspcl->req.sksg_list == NULL) {
3328 rc = -ENOMEM;
3329 goto err_out;
3330 }
3331
3332 if (skd_format_internal_skspcl(skdev) == 0) {
3333 rc = -EINVAL;
3334 goto err_out;
3335 }
3336
3337 err_out:
3338 return (rc);
3339 }
3340
3341 /*
3342 *
3343 * Name: skd_cons_sg_list, allocates the S/G list.
3344 *
3345 * Inputs: skdev - device state structure.
3346 * n_sg - Number of scatter-gather entries.
3347 * ret_dma_addr - S/G list DMA pointer.
3348 *
3349 * Returns: A list of FIT message descriptors.
3350 *
3351 */
3352 static struct fit_sg_descriptor
3353 *skd_cons_sg_list(struct skd_device *skdev,
3354 uint32_t n_sg, dma_mem_t *ret_dma_addr)
3355 {
3356 struct fit_sg_descriptor *sg_list;
3357 uint32_t nbytes;
3358 dma_mem_t *mem;
3359
3360 nbytes = sizeof (*sg_list) * n_sg;
3361
3362 mem = ret_dma_addr;
3363 mem->size = nbytes;
3364
3365 /* sg_list's DMA pointer is *ret_dma_addr */
3366 sg_list = skd_alloc_dma_mem(skdev, mem, ATYPE_32BIT);
3367
3368 if (sg_list != NULL) {
3369 uint64_t dma_address = ret_dma_addr->cookie.dmac_laddress;
3370 uint32_t i;
3371
3372 bzero(sg_list, nbytes);
3373
3374 for (i = 0; i < n_sg - 1; i++) {
3375 uint64_t ndp_off;
3376 ndp_off = (i + 1) * sizeof (struct fit_sg_descriptor);
3377
3378 sg_list[i].next_desc_ptr = dma_address + ndp_off;
3379 }
3380 sg_list[i].next_desc_ptr = 0LL;
3381 }
3382
3383 return (sg_list);
3384 }
3385
3386 /*
3387 * DESTRUCT (FREE)
3388 */
3389
3390 static void skd_free_skcomp(struct skd_device *skdev);
3391 static void skd_free_skmsg(struct skd_device *skdev);
3392 static void skd_free_skreq(struct skd_device *skdev);
3393 static void skd_free_sksb(struct skd_device *skdev);
3394
3395 static void skd_free_sg_list(struct skd_device *skdev,
3396 struct fit_sg_descriptor *sg_list,
3397 uint32_t n_sg, dma_mem_t dma_addr);
3398
3399 /*
3400 *
3401 * Name: skd_destruct, call various rouines to deallocate
3402 * space acquired during initialization.
3403 *
3404 * Inputs: skdev - device state structure.
3405 *
3406 * Returns: Nothing.
3407 *
3408 */
3409 static void
3410 skd_destruct(struct skd_device *skdev)
3411 {
3412 if (skdev == NULL) {
3413 return;
3414 }
3415
3416 Dcmn_err(CE_NOTE, "destruct sksb");
3417 skd_free_sksb(skdev);
3418
3419 Dcmn_err(CE_NOTE, "destruct skreq");
3420 skd_free_skreq(skdev);
3421
3422 Dcmn_err(CE_NOTE, "destruct skmsg");
3423 skd_free_skmsg(skdev);
3424
3425 Dcmn_err(CE_NOTE, "destruct skcomp");
3426 skd_free_skcomp(skdev);
3427
3428 Dcmn_err(CE_NOTE, "DESTRUCT VICTORY");
3429 }
3430
3431 /*
3432 *
3433 * Name: skd_free_skcomp, deallocates skcomp table DMA resources.
3434 *
3435 * Inputs: skdev - device state structure.
3436 *
3437 * Returns: Nothing.
3438 *
3439 */
3440 static void
3441 skd_free_skcomp(struct skd_device *skdev)
3442 {
3443 if (skdev->skcomp_table != NULL) {
3444 skd_free_phys(skdev, &skdev->cq_dma_address);
3445 }
3446
3447 skdev->skcomp_table = NULL;
3448 }
3449
3450 /*
3451 *
3452 * Name: skd_free_skmsg, deallocates skmsg table DMA resources.
3453 *
3454 * Inputs: skdev - device state structure.
3455 *
3456 * Returns: Nothing.
3457 *
3458 */
3459 static void
3460 skd_free_skmsg(struct skd_device *skdev)
3461 {
3462 uint32_t i;
3463
3464 if (NULL == skdev->skmsg_table)
3465 return;
3466
3467 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3468 struct skd_fitmsg_context *skmsg;
3469
3470 skmsg = &skdev->skmsg_table[i];
3471
3472 if (skmsg->msg_buf != NULL) {
3473 skd_free_phys(skdev, &skmsg->mb_dma_address);
3474 }
3475
3476
3477 skmsg->msg_buf = NULL;
3478 }
3479
3480 kmem_free(skdev->skmsg_table, sizeof (struct skd_fitmsg_context) *
3481 skdev->num_fitmsg_context);
3482
3483 skdev->skmsg_table = NULL;
3484
3485 }
3486
3487 /*
3488 *
3489 * Name: skd_free_skreq, deallocates skspcl table DMA resources.
3490 *
3491 * Inputs: skdev - device state structure.
3492 *
3493 * Returns: Nothing.
3494 *
3495 */
3496 static void
3497 skd_free_skreq(struct skd_device *skdev)
3498 {
3499 uint32_t i;
3500
3501 if (NULL == skdev->skreq_table)
3502 return;
3503
3504 for (i = 0; i < skdev->num_req_context; i++) {
3505 struct skd_request_context *skreq;
3506
3507 skreq = &skdev->skreq_table[i];
3508
3509 skd_free_sg_list(skdev, skreq->sksg_list,
3510 skdev->sgs_per_request, skreq->sksg_dma_address);
3511
3512 skreq->sksg_list = NULL;
3513 }
3514
3515 kmem_free(skdev->skreq_table, sizeof (struct skd_request_context) *
3516 skdev->num_req_context);
3517
3518 skdev->skreq_table = NULL;
3519
3520 }
3521
3522 /*
3523 *
3524 * Name: skd_free_sksb, deallocates skspcl data buf and
3525 * msg buf DMA resources.
3526 *
3527 * Inputs: skdev - device state structure.
3528 *
3529 * Returns: Nothing.
3530 *
3531 */
3532 static void
3533 skd_free_sksb(struct skd_device *skdev)
3534 {
3535 struct skd_special_context *skspcl;
3536
3537 skspcl = &skdev->internal_skspcl;
3538
3539 if (skspcl->data_buf != NULL) {
3540 skd_free_phys(skdev, &skspcl->db_dma_address);
3541 }
3542
3543 skspcl->data_buf = NULL;
3544
3545 if (skspcl->msg_buf != NULL) {
3546 skd_free_phys(skdev, &skspcl->mb_dma_address);
3547 }
3548
3549 skspcl->msg_buf = NULL;
3550
3551 skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
3552 skspcl->req.sksg_dma_address);
3553
3554 skspcl->req.sksg_list = NULL;
3555 }
3556
3557 /*
3558 *
3559 * Name: skd_free_sg_list, deallocates S/G DMA resources.
3560 *
3561 * Inputs: skdev - device state structure.
3562 * sg_list - S/G list itself.
3563 * n_sg - nukmber of segments
3564 * dma_addr - S/G list DMA address.
3565 *
3566 * Returns: Nothing.
3567 *
3568 */
3569 /* ARGSUSED */ /* Upstream common source with other platforms. */
3570 static void
3571 skd_free_sg_list(struct skd_device *skdev,
3572 struct fit_sg_descriptor *sg_list,
3573 uint32_t n_sg, dma_mem_t dma_addr)
3574 {
3575 if (sg_list != NULL) {
3576 skd_free_phys(skdev, &dma_addr);
3577 }
3578 }
3579
3580 /*
3581 *
3582 * Name: skd_queue, queues the I/O request.
3583 *
3584 * Inputs: skdev - device state structure.
3585 * pbuf - I/O request
3586 *
3587 * Returns: Nothing.
3588 *
3589 */
3590 static void
3591 skd_queue(skd_device_t *skdev, skd_buf_private_t *pbuf)
3592 {
3593 struct waitqueue *waitq;
3594
3595 ASSERT(skdev != NULL);
3596 ASSERT(pbuf != NULL);
3597
3598 ASSERT(WAITQ_LOCK_HELD(skdev));
3599
3600 waitq = &skdev->waitqueue;
3601
3602 if (SIMPLEQ_EMPTY(waitq))
3603 SIMPLEQ_INSERT_HEAD(waitq, pbuf, sq);
3604 else
3605 SIMPLEQ_INSERT_TAIL(waitq, pbuf, sq);
3606 }
3607
3608 /*
3609 *
3610 * Name: skd_list_skreq, displays the skreq table entries.
3611 *
3612 * Inputs: skdev - device state structure.
3613 * list - flag, if true displays the entry address.
3614 *
3615 * Returns: Returns number of skmsg entries found.
3616 *
3617 */
3618 /* ARGSUSED */ /* Upstream common source with other platforms. */
3619 static int
3620 skd_list_skreq(skd_device_t *skdev, int list)
3621 {
3622 int inx = 0;
3623 struct skd_request_context *skreq;
3624
3625 if (list) {
3626 Dcmn_err(CE_NOTE, "skreq_table[0]\n");
3627
3628 skreq = &skdev->skreq_table[0];
3629 while (skreq) {
3630 if (list)
3631 Dcmn_err(CE_NOTE,
3632 "%d: skreq=%p state=%d id=%x fid=%x "
3633 "pbuf=%p dir=%d comp=%d\n",
3634 inx, (void *)skreq, skreq->state,
3635 skreq->id, skreq->fitmsg_id,
3636 (void *)skreq->pbuf,
3637 skreq->sg_data_dir, skreq->did_complete);
3638 inx++;
3639 skreq = skreq->next;
3640 }
3641 }
3642
3643 inx = 0;
3644 skreq = skdev->skreq_free_list;
3645
3646 if (list)
3647 Dcmn_err(CE_NOTE, "skreq_free_list\n");
3648 while (skreq) {
3649 if (list)
3650 Dcmn_err(CE_NOTE, "%d: skreq=%p state=%d id=%x fid=%x "
3651 "pbuf=%p dir=%d\n", inx, (void *)skreq,
3652 skreq->state, skreq->id, skreq->fitmsg_id,
3653 (void *)skreq->pbuf, skreq->sg_data_dir);
3654 inx++;
3655 skreq = skreq->next;
3656 }
3657
3658 return (inx);
3659 }
3660
3661 /*
3662 *
3663 * Name: skd_list_skmsg, displays the skmsg table entries.
3664 *
3665 * Inputs: skdev - device state structure.
3666 * list - flag, if true displays the entry address.
3667 *
3668 * Returns: Returns number of skmsg entries found.
3669 *
3670 */
3671 static int
3672 skd_list_skmsg(skd_device_t *skdev, int list)
3673 {
3674 int inx = 0;
3675 struct skd_fitmsg_context *skmsgp;
3676
3677 skmsgp = &skdev->skmsg_table[0];
3678
3679 if (list) {
3680 Dcmn_err(CE_NOTE, "skmsg_table[0]\n");
3681
3682 while (skmsgp) {
3683 if (list)
3684 Dcmn_err(CE_NOTE, "%d: skmsgp=%p id=%x outs=%d "
3685 "l=%d o=%d nxt=%p\n", inx, (void *)skmsgp,
3686 skmsgp->id, skmsgp->outstanding,
3687 skmsgp->length, skmsgp->offset,
3688 (void *)skmsgp->next);
3689 inx++;
3690 skmsgp = skmsgp->next;
3691 }
3692 }
3693
3694 inx = 0;
3695 if (list)
3696 Dcmn_err(CE_NOTE, "skmsg_free_list\n");
3697 skmsgp = skdev->skmsg_free_list;
3698 while (skmsgp) {
3699 if (list)
3700 Dcmn_err(CE_NOTE, "%d: skmsgp=%p id=%x outs=%d l=%d "
3701 "o=%d nxt=%p\n",
3702 inx, (void *)skmsgp, skmsgp->id,
3703 skmsgp->outstanding, skmsgp->length,
3704 skmsgp->offset, (void *)skmsgp->next);
3705 inx++;
3706 skmsgp = skmsgp->next;
3707 }
3708
3709 return (inx);
3710 }
3711
3712 /*
3713 *
3714 * Name: skd_get_queue_pbuf, retrieves top of queue entry and
3715 * delinks entry from the queue.
3716 *
3717 * Inputs: skdev - device state structure.
3718 * drive - device number
3719 *
3720 * Returns: Returns the top of the job queue entry.
3721 *
3722 */
3723 static skd_buf_private_t
3724 *skd_get_queued_pbuf(skd_device_t *skdev)
3725 {
3726 skd_buf_private_t *pbuf;
3727
3728 ASSERT(WAITQ_LOCK_HELD(skdev));
3729 pbuf = SIMPLEQ_FIRST(&skdev->waitqueue);
3730 if (pbuf != NULL)
3731 SIMPLEQ_REMOVE_HEAD(&skdev->waitqueue, sq);
3732 return (pbuf);
3733 }
3734
3735 /*
3736 * PCI DRIVER GLUE
3737 */
3738
3739 /*
3740 *
3741 * Name: skd_pci_info, logs certain device PCI info.
3742 *
3743 * Inputs: skdev - device state structure.
3744 *
3745 * Returns: str which contains the device speed info..
3746 *
3747 */
3748 static char *
3749 skd_pci_info(struct skd_device *skdev, char *str, size_t len)
3750 {
3751 int pcie_reg;
3752
3753 str[0] = '\0';
3754
3755 pcie_reg = skd_pci_find_capability(skdev, PCI_CAP_ID_EXP);
3756
3757 if (pcie_reg) {
3758 uint16_t lstat, lspeed, lwidth;
3759
3760 pcie_reg += 0x12;
3761 lstat = pci_config_get16(skdev->pci_handle, pcie_reg);
3762 lspeed = lstat & (0xF);
3763 lwidth = (lstat & 0x3F0) >> 4;
3764
3765 (void) snprintf(str, len, "PCIe (%s rev %d)",
3766 lspeed == 1 ? "2.5GT/s" :
3767 lspeed == 2 ? "5.0GT/s" : "<unknown>",
3768 lwidth);
3769 }
3770
3771 return (str);
3772 }
3773
3774 /*
3775 * MODULE GLUE
3776 */
3777
3778 /*
3779 *
3780 * Name: skd_init, initializes certain values.
3781 *
3782 * Inputs: skdev - device state structure.
3783 *
3784 * Returns: Zero.
3785 *
3786 */
3787 /* ARGSUSED */ /* Upstream common source with other platforms. */
3788 static int
3789 skd_init(skd_device_t *skdev)
3790 {
3791 Dcmn_err(CE_NOTE, "skd_init: v%s-b%s\n", DRV_VERSION, DRV_BUILD_ID);
3792
3793 if (skd_max_queue_depth < 1 ||
3794 skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
3795 cmn_err(CE_NOTE, "skd_max_q_depth %d invalid, re-set to %d\n",
3796 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
3797 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
3798 }
3799
3800 if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
3801 cmn_err(CE_NOTE, "skd_max_req_per_msg %d invalid, set to %d\n",
3802 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
3803 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
3804 }
3805
3806
3807 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
3808 cmn_err(CE_NOTE, "skd_sg_per_request %d invalid, set to %d\n",
3809 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
3810 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
3811 }
3812
3813 if (skd_dbg_level < 0 || skd_dbg_level > 2) {
3814 cmn_err(CE_NOTE, "skd_dbg_level %d invalid, re-set to %d\n",
3815 skd_dbg_level, 0);
3816 skd_dbg_level = 0;
3817 }
3818
3819 return (0);
3820 }
3821
3822 /*
3823 *
3824 * Name: skd_exit, exits the driver & logs the fact.
3825 *
3826 * Inputs: none.
3827 *
3828 * Returns: Nothing.
3829 *
3830 */
3831 static void
3832 skd_exit(void)
3833 {
3834 cmn_err(CE_NOTE, "skd v%s unloading", DRV_VERSION);
3835 }
3836
3837 /*
3838 *
3839 * Name: skd_drive_state_to_str, converts binary drive state
3840 * to its corresponding string value.
3841 *
3842 * Inputs: Drive state.
3843 *
3844 * Returns: String representing drive state.
3845 *
3846 */
3847 const char *
3848 skd_drive_state_to_str(int state)
3849 {
3850 switch (state) {
3851 case FIT_SR_DRIVE_OFFLINE: return ("OFFLINE");
3852 case FIT_SR_DRIVE_INIT: return ("INIT");
3853 case FIT_SR_DRIVE_ONLINE: return ("ONLINE");
3854 case FIT_SR_DRIVE_BUSY: return ("BUSY");
3855 case FIT_SR_DRIVE_FAULT: return ("FAULT");
3856 case FIT_SR_DRIVE_DEGRADED: return ("DEGRADED");
3857 case FIT_SR_PCIE_LINK_DOWN: return ("LINK_DOWN");
3858 case FIT_SR_DRIVE_SOFT_RESET: return ("SOFT_RESET");
3859 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: return ("NEED_FW");
3860 case FIT_SR_DRIVE_INIT_FAULT: return ("INIT_FAULT");
3861 case FIT_SR_DRIVE_BUSY_SANITIZE:return ("BUSY_SANITIZE");
3862 case FIT_SR_DRIVE_BUSY_ERASE: return ("BUSY_ERASE");
3863 case FIT_SR_DRIVE_FW_BOOTING: return ("FW_BOOTING");
3864 default: return ("???");
3865 }
3866 }
3867
3868 /*
3869 *
3870 * Name: skd_skdev_state_to_str, converts binary driver state
3871 * to its corresponding string value.
3872 *
3873 * Inputs: Driver state.
3874 *
3875 * Returns: String representing driver state.
3876 *
3877 */
3878 static const char *
3879 skd_skdev_state_to_str(enum skd_drvr_state state)
3880 {
3881 switch (state) {
3882 case SKD_DRVR_STATE_LOAD: return ("LOAD");
3883 case SKD_DRVR_STATE_IDLE: return ("IDLE");
3884 case SKD_DRVR_STATE_BUSY: return ("BUSY");
3885 case SKD_DRVR_STATE_STARTING: return ("STARTING");
3886 case SKD_DRVR_STATE_ONLINE: return ("ONLINE");
3887 case SKD_DRVR_STATE_PAUSING: return ("PAUSING");
3888 case SKD_DRVR_STATE_PAUSED: return ("PAUSED");
3889 case SKD_DRVR_STATE_DRAINING_TIMEOUT: return ("DRAINING_TIMEOUT");
3890 case SKD_DRVR_STATE_RESTARTING: return ("RESTARTING");
3891 case SKD_DRVR_STATE_RESUMING: return ("RESUMING");
3892 case SKD_DRVR_STATE_STOPPING: return ("STOPPING");
3893 case SKD_DRVR_STATE_SYNCING: return ("SYNCING");
3894 case SKD_DRVR_STATE_FAULT: return ("FAULT");
3895 case SKD_DRVR_STATE_DISAPPEARED: return ("DISAPPEARED");
3896 case SKD_DRVR_STATE_BUSY_ERASE: return ("BUSY_ERASE");
3897 case SKD_DRVR_STATE_BUSY_SANITIZE:return ("BUSY_SANITIZE");
3898 case SKD_DRVR_STATE_BUSY_IMMINENT: return ("BUSY_IMMINENT");
3899 case SKD_DRVR_STATE_WAIT_BOOT: return ("WAIT_BOOT");
3900
3901 default: return ("???");
3902 }
3903 }
3904
3905 /*
3906 *
3907 * Name: skd_skmsg_state_to_str, converts binary driver state
3908 * to its corresponding string value.
3909 *
3910 * Inputs: Msg state.
3911 *
3912 * Returns: String representing msg state.
3913 *
3914 */
3915 static const char *
3916 skd_skmsg_state_to_str(enum skd_fit_msg_state state)
3917 {
3918 switch (state) {
3919 case SKD_MSG_STATE_IDLE: return ("IDLE");
3920 case SKD_MSG_STATE_BUSY: return ("BUSY");
3921 default: return ("???");
3922 }
3923 }
3924
3925 /*
3926 *
3927 * Name: skd_skreq_state_to_str, converts binary req state
3928 * to its corresponding string value.
3929 *
3930 * Inputs: Req state.
3931 *
3932 * Returns: String representing req state.
3933 *
3934 */
3935 static const char *
3936 skd_skreq_state_to_str(enum skd_req_state state)
3937 {
3938 switch (state) {
3939 case SKD_REQ_STATE_IDLE: return ("IDLE");
3940 case SKD_REQ_STATE_SETUP: return ("SETUP");
3941 case SKD_REQ_STATE_BUSY: return ("BUSY");
3942 case SKD_REQ_STATE_COMPLETED: return ("COMPLETED");
3943 case SKD_REQ_STATE_TIMEOUT: return ("TIMEOUT");
3944 case SKD_REQ_STATE_ABORTED: return ("ABORTED");
3945 default: return ("???");
3946 }
3947 }
3948
3949 /*
3950 *
3951 * Name: skd_log_skdev, logs device state & parameters.
3952 *
3953 * Inputs: skdev - device state structure.
3954 * event - event (string) to log.
3955 *
3956 * Returns: Nothing.
3957 *
3958 */
3959 static void
3960 skd_log_skdev(struct skd_device *skdev, const char *event)
3961 {
3962 Dcmn_err(CE_NOTE, "log_skdev(%s) skdev=%p event='%s'",
3963 skdev->name, (void *)skdev, event);
3964 Dcmn_err(CE_NOTE, " drive_state=%s(%d) driver_state=%s(%d)",
3965 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3966 skd_skdev_state_to_str(skdev->state), skdev->state);
3967 Dcmn_err(CE_NOTE, " busy=%d limit=%d soft=%d hard=%d lowat=%d",
3968 skdev->queue_depth_busy, skdev->queue_depth_limit,
3969 skdev->soft_queue_depth_limit, skdev->hard_queue_depth_limit,
3970 skdev->queue_depth_lowat);
3971 Dcmn_err(CE_NOTE, " timestamp=0x%x cycle=%d cycle_ix=%d",
3972 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
3973 }
3974
3975 /*
3976 *
3977 * Name: skd_log_skmsg, logs the skmsg event.
3978 *
3979 * Inputs: skdev - device state structure.
3980 * skmsg - FIT message structure.
3981 * event - event string to log.
3982 *
3983 * Returns: Nothing.
3984 *
3985 */
3986 static void
3987 skd_log_skmsg(struct skd_device *skdev,
3988 struct skd_fitmsg_context *skmsg, const char *event)
3989 {
3990 Dcmn_err(CE_NOTE, "log_skmsg:(%s) skmsg=%p event='%s'",
3991 skdev->name, (void *)skmsg, event);
3992 Dcmn_err(CE_NOTE, " state=%s(%d) id=0x%04x length=%d",
3993 skd_skmsg_state_to_str(skmsg->state), skmsg->state,
3994 skmsg->id, skmsg->length);
3995 }
3996
3997 /*
3998 *
3999 * Name: skd_log_skreq, logs the skreq event.
4000 *
4001 * Inputs: skdev - device state structure.
4002 * skreq -skreq structure.
4003 * event - event string to log.
4004 *
4005 * Returns: Nothing.
4006 *
4007 */
4008 static void
4009 skd_log_skreq(struct skd_device *skdev,
4010 struct skd_request_context *skreq, const char *event)
4011 {
4012 skd_buf_private_t *pbuf;
4013
4014 Dcmn_err(CE_NOTE, "log_skreq: (%s) skreq=%p pbuf=%p event='%s'",
4015 skdev->name, (void *)skreq, (void *)skreq->pbuf, event);
4016
4017 Dcmn_err(CE_NOTE, " state=%s(%d) id=0x%04x fitmsg=0x%04x",
4018 skd_skreq_state_to_str(skreq->state), skreq->state,
4019 skreq->id, skreq->fitmsg_id);
4020 Dcmn_err(CE_NOTE, " timo=0x%x sg_dir=%d n_sg=%d",
4021 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
4022
4023 if ((pbuf = skreq->pbuf) != NULL) {
4024 uint32_t lba, count;
4025 lba = pbuf->x_xfer->x_blkno;
4026 count = pbuf->x_xfer->x_nblks;
4027 Dcmn_err(CE_NOTE, " pbuf=%p lba=%u(0x%x) count=%u(0x%x) ",
4028 (void *)pbuf, lba, lba, count, count);
4029 Dcmn_err(CE_NOTE, " dir=%s "
4030 " intrs=%" PRId64 " qdepth=%d",
4031 (pbuf->dir & B_READ) ? "Read" : "Write",
4032 skdev->intr_cntr, skdev->queue_depth_busy);
4033 } else {
4034 Dcmn_err(CE_NOTE, " req=NULL\n");
4035 }
4036 }
4037
4038 /*
4039 *
4040 * Name: skd_init_mutex, initializes all mutexes.
4041 *
4042 * Inputs: skdev - device state structure.
4043 *
4044 * Returns: DDI_FAILURE on failure otherwise DDI_SUCCESS.
4045 *
4046 */
4047 static int
4048 skd_init_mutex(skd_device_t *skdev)
4049 {
4050 void *intr;
4051
4052 Dcmn_err(CE_CONT, "(%s%d): init_mutex flags=%x", DRV_NAME,
4053 skdev->instance, skdev->flags);
4054
4055 intr = (void *)(uintptr_t)skdev->intr_pri;
4056
4057 if (skdev->flags & SKD_MUTEX_INITED)
4058 cmn_err(CE_NOTE, "init_mutex: Oh-Oh - already INITED");
4059
4060 /* mutexes to protect the adapter state structure. */
4061 mutex_init(&skdev->skd_lock_mutex, NULL, MUTEX_DRIVER,
4062 DDI_INTR_PRI(intr));
4063 mutex_init(&skdev->skd_intr_mutex, NULL, MUTEX_DRIVER,
4064 DDI_INTR_PRI(intr));
4065 mutex_init(&skdev->waitqueue_mutex, NULL, MUTEX_DRIVER,
4066 DDI_INTR_PRI(intr));
4067 mutex_init(&skdev->skd_internalio_mutex, NULL, MUTEX_DRIVER,
4068 DDI_INTR_PRI(intr));
4069
4070 cv_init(&skdev->cv_waitq, NULL, CV_DRIVER, NULL);
4071
4072 skdev->flags |= SKD_MUTEX_INITED;
4073 if (skdev->flags & SKD_MUTEX_DESTROYED)
4074 skdev->flags &= ~SKD_MUTEX_DESTROYED;
4075
4076 Dcmn_err(CE_CONT, "init_mutex (%s%d): done, flags=%x", DRV_NAME,
4077 skdev->instance, skdev->flags);
4078
4079 return (DDI_SUCCESS);
4080 }
4081
4082 /*
4083 *
4084 * Name: skd_destroy_mutex, destroys all mutexes.
4085 *
4086 * Inputs: skdev - device state structure.
4087 *
4088 * Returns: Nothing.
4089 *
4090 */
4091 static void
4092 skd_destroy_mutex(skd_device_t *skdev)
4093 {
4094 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) {
4095 if (skdev->flags & SKD_MUTEX_INITED) {
4096 mutex_destroy(&skdev->waitqueue_mutex);
4097 mutex_destroy(&skdev->skd_intr_mutex);
4098 mutex_destroy(&skdev->skd_lock_mutex);
4099 mutex_destroy(&skdev->skd_internalio_mutex);
4100
4101 cv_destroy(&skdev->cv_waitq);
4102
4103 skdev->flags |= SKD_MUTEX_DESTROYED;
4104
4105 if (skdev->flags & SKD_MUTEX_INITED)
4106 skdev->flags &= ~SKD_MUTEX_INITED;
4107 }
4108 }
4109 }
4110
4111 /*
4112 *
4113 * Name: skd_setup_intr, setup the interrupt handling
4114 *
4115 * Inputs: skdev - device state structure.
4116 * intr_type - requested DDI interrupt type.
4117 *
4118 * Returns: DDI_FAILURE on failure otherwise DDI_SUCCESS.
4119 *
4120 */
4121 static int
4122 skd_setup_intr(skd_device_t *skdev, int intr_type)
4123 {
4124 int32_t count = 0;
4125 int32_t avail = 0;
4126 int32_t actual = 0;
4127 int32_t ret;
4128 uint32_t i;
4129
4130 Dcmn_err(CE_CONT, "(%s%d): setup_intr", DRV_NAME, skdev->instance);
4131
4132 /* Get number of interrupts the platform h/w supports */
4133 if (((ret = ddi_intr_get_nintrs(skdev->dip, intr_type, &count)) !=
4134 DDI_SUCCESS) || count == 0) {
4135 cmn_err(CE_WARN, "!intr_setup failed, nintrs ret=%xh, cnt=%xh",
4136 ret, count);
4137
4138 return (DDI_FAILURE);
4139 }
4140
4141 /* Get number of available system interrupts */
4142 if (((ret = ddi_intr_get_navail(skdev->dip, intr_type, &avail)) !=
4143 DDI_SUCCESS) || avail == 0) {
4144 cmn_err(CE_WARN, "!intr_setup failed, navail ret=%xh, "
4145 "avail=%xh", ret, avail);
4146
4147 return (DDI_FAILURE);
4148 }
4149
4150 if (intr_type == DDI_INTR_TYPE_MSIX && avail < SKD_MSIX_MAXAIF) {
4151 cmn_err(CE_WARN, "!intr_setup failed, min MSI-X h/w vectors "
4152 "req'd: %d, avail: %d",
4153 SKD_MSIX_MAXAIF, count);
4154
4155 return (DDI_FAILURE);
4156 }
4157
4158 /* Allocate space for interrupt handles */
4159 skdev->hsize = sizeof (ddi_intr_handle_t) * avail;
4160 skdev->htable = kmem_zalloc(skdev->hsize, KM_SLEEP);
4161
4162 /* Allocate the interrupts */
4163 if ((ret = ddi_intr_alloc(skdev->dip, skdev->htable, intr_type,
4164 0, count, &actual, 0)) != DDI_SUCCESS) {
4165 cmn_err(CE_WARN, "!intr_setup failed, intr_alloc ret=%xh, "
4166 "count = %xh, " "actual=%xh", ret, count, actual);
4167
4168 skd_release_intr(skdev);
4169
4170 return (DDI_FAILURE);
4171 }
4172
4173 skdev->intr_cnt = actual;
4174
4175 if (intr_type == DDI_INTR_TYPE_FIXED)
4176 (void) ddi_intr_set_pri(skdev->htable[0], 10);
4177
4178 /* Get interrupt priority */
4179 if ((ret = ddi_intr_get_pri(skdev->htable[0], &skdev->intr_pri)) !=
4180 DDI_SUCCESS) {
4181 cmn_err(CE_WARN, "!intr_setup failed, get_pri ret=%xh", ret);
4182 skd_release_intr(skdev);
4183
4184 return (ret);
4185 }
4186
4187 /* Add the interrupt handlers */
4188 for (i = 0; i < actual; i++) {
4189 if ((ret = ddi_intr_add_handler(skdev->htable[i],
4190 skd_isr_aif, (void *)skdev, (void *)((ulong_t)i))) !=
4191 DDI_SUCCESS) {
4192 cmn_err(CE_WARN, "!intr_setup failed, addh#=%xh, "
4193 "act=%xh, ret=%xh", i, actual, ret);
4194 skd_release_intr(skdev);
4195
4196 return (ret);
4197 }
4198 }
4199
4200 /* Setup mutexes */
4201 if ((ret = skd_init_mutex(skdev)) != DDI_SUCCESS) {
4202 cmn_err(CE_WARN, "!intr_setup failed, mutex init ret=%xh", ret);
4203 skd_release_intr(skdev);
4204
4205 return (ret);
4206 }
4207
4208 /* Get the capabilities */
4209 (void) ddi_intr_get_cap(skdev->htable[0], &skdev->intr_cap);
4210
4211 /* Enable interrupts */
4212 if (skdev->intr_cap & DDI_INTR_FLAG_BLOCK) {
4213 if ((ret = ddi_intr_block_enable(skdev->htable,
4214 skdev->intr_cnt)) != DDI_SUCCESS) {
4215 cmn_err(CE_WARN, "!failed, intr_setup block enable, "
4216 "ret=%xh", ret);
4217 skd_destroy_mutex(skdev);
4218 skd_release_intr(skdev);
4219
4220 return (ret);
4221 }
4222 } else {
4223 for (i = 0; i < skdev->intr_cnt; i++) {
4224 if ((ret = ddi_intr_enable(skdev->htable[i])) !=
4225 DDI_SUCCESS) {
4226 cmn_err(CE_WARN, "!intr_setup failed, "
4227 "intr enable, ret=%xh", ret);
4228 skd_destroy_mutex(skdev);
4229 skd_release_intr(skdev);
4230
4231 return (ret);
4232 }
4233 }
4234 }
4235
4236 if (intr_type == DDI_INTR_TYPE_FIXED)
4237 (void) ddi_intr_clr_mask(skdev->htable[0]);
4238
4239 skdev->irq_type = intr_type;
4240
4241 return (DDI_SUCCESS);
4242 }
4243
4244 /*
4245 *
4246 * Name: skd_disable_intr, disable interrupt handling.
4247 *
4248 * Inputs: skdev - device state structure.
4249 *
4250 * Returns: Nothing.
4251 *
4252 */
4253 static void
4254 skd_disable_intr(skd_device_t *skdev)
4255 {
4256 uint32_t i, rval;
4257
4258 if (skdev->intr_cap & DDI_INTR_FLAG_BLOCK) {
4259 /* Remove AIF block interrupts (MSI/MSI-X) */
4260 if ((rval = ddi_intr_block_disable(skdev->htable,
4261 skdev->intr_cnt)) != DDI_SUCCESS) {
4262 cmn_err(CE_WARN, "!failed intr block disable, rval=%x",
4263 rval);
4264 }
4265 } else {
4266 /* Remove AIF non-block interrupts (fixed). */
4267 for (i = 0; i < skdev->intr_cnt; i++) {
4268 if ((rval = ddi_intr_disable(skdev->htable[i])) !=
4269 DDI_SUCCESS) {
4270 cmn_err(CE_WARN, "!failed intr disable, "
4271 "intr#=%xh, " "rval=%xh", i, rval);
4272 }
4273 }
4274 }
4275 }
4276
4277 /*
4278 *
4279 * Name: skd_release_intr, disables interrupt handling.
4280 *
4281 * Inputs: skdev - device state structure.
4282 *
4283 * Returns: Nothing.
4284 *
4285 */
4286 static void
4287 skd_release_intr(skd_device_t *skdev)
4288 {
4289 int32_t i;
4290 int rval;
4291
4292
4293 Dcmn_err(CE_CONT, "REL_INTR intr_cnt=%d", skdev->intr_cnt);
4294
4295 if (skdev->irq_type == 0) {
4296 Dcmn_err(CE_CONT, "release_intr: (%s%d): done",
4297 DRV_NAME, skdev->instance);
4298 return;
4299 }
4300
4301 if (skdev->htable != NULL && skdev->hsize > 0) {
4302 i = (int32_t)skdev->hsize / (int32_t)sizeof (ddi_intr_handle_t);
4303
4304 while (i-- > 0) {
4305 if (skdev->htable[i] == 0) {
4306 Dcmn_err(CE_NOTE, "htable[%x]=0h", i);
4307 continue;
4308 }
4309
4310 if ((rval = ddi_intr_disable(skdev->htable[i])) !=
4311 DDI_SUCCESS)
4312 Dcmn_err(CE_NOTE, "release_intr: intr_disable "
4313 "htable[%d], rval=%d", i, rval);
4314
4315 if (i < skdev->intr_cnt) {
4316 if ((rval = ddi_intr_remove_handler(
4317 skdev->htable[i])) != DDI_SUCCESS)
4318 cmn_err(CE_WARN, "!release_intr: "
4319 "intr_remove_handler FAILED, "
4320 "rval=%d", rval);
4321
4322 Dcmn_err(CE_NOTE, "release_intr: "
4323 "remove_handler htable[%d]", i);
4324 }
4325
4326 if ((rval = ddi_intr_free(skdev->htable[i])) !=
4327 DDI_SUCCESS)
4328 cmn_err(CE_WARN, "!release_intr: intr_free "
4329 "FAILED, rval=%d", rval);
4330 Dcmn_err(CE_NOTE, "release_intr: intr_free htable[%d]",
4331 i);
4332 }
4333
4334 kmem_free(skdev->htable, skdev->hsize);
4335 skdev->htable = NULL;
4336 }
4337
4338 skdev->hsize = 0;
4339 skdev->intr_cnt = 0;
4340 skdev->intr_pri = 0;
4341 skdev->intr_cap = 0;
4342 skdev->irq_type = 0;
4343 }
4344
4345 /*
4346 *
4347 * Name: skd_dealloc_resources, deallocate resources allocated
4348 * during attach.
4349 *
4350 * Inputs: dip - DDI device info pointer.
4351 * skdev - device state structure.
4352 * seq - bit flag representing allocated item.
4353 * instance - device instance.
4354 *
4355 * Returns: Nothing.
4356 *
4357 */
4358 /* ARGSUSED */ /* Upstream common source with other platforms. */
4359 static void
4360 skd_dealloc_resources(dev_info_t *dip, skd_device_t *skdev,
4361 uint32_t seq, int instance)
4362 {
4363
4364 if (skdev == NULL)
4365 return;
4366
4367 if (seq & SKD_CONSTRUCTED)
4368 skd_destruct(skdev);
4369
4370 if (seq & SKD_INTR_ADDED) {
4371 skd_disable_intr(skdev);
4372 skd_release_intr(skdev);
4373 }
4374
4375 if (seq & SKD_DEV_IOBASE_MAPPED)
4376 ddi_regs_map_free(&skdev->dev_handle);
4377
4378 if (seq & SKD_IOMAP_IOBASE_MAPPED)
4379 ddi_regs_map_free(&skdev->iomap_handle);
4380
4381 if (seq & SKD_REGS_MAPPED)
4382 ddi_regs_map_free(&skdev->iobase_handle);
4383
4384 if (seq & SKD_CONFIG_SPACE_SETUP)
4385 pci_config_teardown(&skdev->pci_handle);
4386
4387 if (seq & SKD_SOFT_STATE_ALLOCED) {
4388 if (skdev->pathname &&
4389 (skdev->flags & SKD_PATHNAME_ALLOCED)) {
4390 kmem_free(skdev->pathname,
4391 strlen(skdev->pathname)+1);
4392 }
4393 }
4394
4395 if (skdev->s1120_devid)
4396 ddi_devid_free(skdev->s1120_devid);
4397 }
4398
4399 /*
4400 *
4401 * Name: skd_setup_interrupt, sets up the appropriate interrupt type
4402 * msi, msix, or fixed.
4403 *
4404 * Inputs: skdev - device state structure.
4405 *
4406 * Returns: DDI_FAILURE on failure otherwise DDI_SUCCESS.
4407 *
4408 */
4409 static int
4410 skd_setup_interrupts(skd_device_t *skdev)
4411 {
4412 int32_t rval = DDI_FAILURE;
4413 int32_t i;
4414 int32_t itypes = 0;
4415
4416 /*
4417 * See what types of interrupts this adapter and platform support
4418 */
4419 if ((i = ddi_intr_get_supported_types(skdev->dip, &itypes)) !=
4420 DDI_SUCCESS) {
4421 cmn_err(CE_NOTE, "intr supported types failed, rval=%xh, ", i);
4422 return (DDI_FAILURE);
4423 }
4424
4425 Dcmn_err(CE_NOTE, "%s:supported interrupts types: %x",
4426 skdev->name, itypes);
4427
4428 itypes &= skdev->irq_type;
4429
4430 if (!skd_disable_msix && (itypes & DDI_INTR_TYPE_MSIX) &&
4431 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_MSIX)) == DDI_SUCCESS) {
4432 cmn_err(CE_NOTE, "!%s: successful MSI-X setup",
4433 skdev->name);
4434 } else if (!skd_disable_msi && (itypes & DDI_INTR_TYPE_MSI) &&
4435 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_MSI)) == DDI_SUCCESS) {
4436 cmn_err(CE_NOTE, "!%s: successful MSI setup",
4437 skdev->name);
4438 } else if ((itypes & DDI_INTR_TYPE_FIXED) &&
4439 (rval = skd_setup_intr(skdev, DDI_INTR_TYPE_FIXED))
4440 == DDI_SUCCESS) {
4441 cmn_err(CE_NOTE, "!%s: successful fixed intr setup",
4442 skdev->name);
4443 } else {
4444 cmn_err(CE_WARN, "!%s: no supported interrupt types",
4445 skdev->name);
4446 return (DDI_FAILURE);
4447 }
4448
4449 Dcmn_err(CE_CONT, "%s: setup interrupts done", skdev->name);
4450
4451 return (rval);
4452 }
4453
4454 /*
4455 *
4456 * Name: skd_get_properties, retrieves properties from skd.conf.
4457 *
4458 * Inputs: skdev - device state structure.
4459 * dip - dev_info data structure.
4460 *
4461 * Returns: Nothing.
4462 *
4463 */
4464 /* ARGSUSED */ /* Upstream common source with other platforms. */
4465 static void
4466 skd_get_properties(dev_info_t *dip, skd_device_t *skdev)
4467 {
4468 int prop_value;
4469
4470 skd_isr_type = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4471 "intr-type-cap", -1);
4472
4473 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4474 "max-scsi-reqs", -1);
4475 if (prop_value >= 1 && prop_value <= SKD_MAX_QUEUE_DEPTH)
4476 skd_max_queue_depth = prop_value;
4477
4478 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4479 "max-scsi-reqs-per-msg", -1);
4480 if (prop_value >= 1 && prop_value <= SKD_MAX_REQ_PER_MSG)
4481 skd_max_req_per_msg = prop_value;
4482
4483 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4484 "max-sgs-per-req", -1);
4485 if (prop_value >= 1 && prop_value <= SKD_MAX_N_SG_PER_REQ)
4486 skd_sgs_per_request = prop_value;
4487
4488 prop_value = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
4489 "dbg-level", -1);
4490 if (prop_value >= 1 && prop_value <= 2)
4491 skd_dbg_level = prop_value;
4492 }
4493
4494 /*
4495 *
4496 * Name: skd_wait_for_s1120, wait for device to finish
4497 * its initialization.
4498 *
4499 * Inputs: skdev - device state structure.
4500 *
4501 * Returns: DDI_SUCCESS or DDI_FAILURE.
4502 *
4503 */
4504 static int
4505 skd_wait_for_s1120(skd_device_t *skdev)
4506 {
4507 clock_t cur_ticks, tmo;
4508 int loop_cntr = 0;
4509 int rc = DDI_FAILURE;
4510
4511 mutex_enter(&skdev->skd_internalio_mutex);
4512
4513 while (skdev->gendisk_on == 0) {
4514 cur_ticks = ddi_get_lbolt();
4515 tmo = cur_ticks + drv_usectohz(MICROSEC);
4516 if (cv_timedwait(&skdev->cv_waitq,
4517 &skdev->skd_internalio_mutex, tmo) == -1) {
4518 /* Oops - timed out */
4519 if (loop_cntr++ > 10)
4520 break;
4521 }
4522 }
4523
4524 mutex_exit(&skdev->skd_internalio_mutex);
4525
4526 if (skdev->gendisk_on == 1)
4527 rc = DDI_SUCCESS;
4528
4529 return (rc);
4530 }
4531
4532 /*
4533 *
4534 * Name: skd_update_props, updates certain device properties.
4535 *
4536 * Inputs: skdev - device state structure.
4537 * dip - dev info structure
4538 *
4539 * Returns: Nothing.
4540 *
4541 */
4542 static void
4543 skd_update_props(skd_device_t *skdev, dev_info_t *dip)
4544 {
4545 int blksize = 512;
4546
4547 if ((ddi_prop_update_int64(DDI_DEV_T_NONE, dip, "device-nblocks",
4548 skdev->Nblocks) != DDI_SUCCESS) ||
4549 (ddi_prop_update_int(DDI_DEV_T_NONE, dip, "device-blksize",
4550 blksize) != DDI_SUCCESS)) {
4551 cmn_err(CE_NOTE, "%s: FAILED to create driver properties",
4552 skdev->name);
4553 }
4554 }
4555
4556 /*
4557 *
4558 * Name: skd_setup_devid, sets up device ID info.
4559 *
4560 * Inputs: skdev - device state structure.
4561 * devid - Device ID for the DDI.
4562 *
4563 * Returns: DDI_SUCCESS or DDI_FAILURE.
4564 *
4565 */
4566 static int
4567 skd_setup_devid(skd_device_t *skdev, ddi_devid_t *devid)
4568 {
4569 int rc, sz_model, sz_sn, sz;
4570
4571 sz_model = scsi_ascii_inquiry_len(skdev->inq_product_id,
4572 strlen(skdev->inq_product_id));
4573 sz_sn = scsi_ascii_inquiry_len(skdev->inq_serial_num,
4574 strlen(skdev->inq_serial_num));
4575 sz = sz_model + sz_sn + 1;
4576
4577 (void) snprintf(skdev->devid_str, sizeof (skdev->devid_str),
4578 "%.*s=%.*s", sz_model, skdev->inq_product_id, sz_sn,
4579 skdev->inq_serial_num);
4580 rc = ddi_devid_init(skdev->dip, DEVID_SCSI_SERIAL, sz,
4581 skdev->devid_str, devid);
4582
4583 if (rc != DDI_SUCCESS)
4584 cmn_err(CE_WARN, "!%s: devid_init FAILED", skdev->name);
4585
4586 return (rc);
4587
4588 }
4589
4590 /*
4591 *
4592 * Name: skd_bd_attach, attach to blkdev driver
4593 *
4594 * Inputs: skdev - device state structure.
4595 * dip - device info structure.
4596 *
4597 * Returns: DDI_SUCCESS or DDI_FAILURE.
4598 *
4599 */
4600 static int
4601 skd_bd_attach(dev_info_t *dip, skd_device_t *skdev)
4602 {
4603 int rv;
4604
4605 skdev->s_bdh = bd_alloc_handle(skdev, &skd_bd_ops,
4606 &skd_64bit_io_dma_attr, KM_SLEEP);
4607
4608 if (skdev->s_bdh == NULL) {
4609 cmn_err(CE_WARN, "!skd_bd_attach: FAILED");
4610
4611 return (DDI_FAILURE);
4612 }
4613
4614 rv = bd_attach_handle(dip, skdev->s_bdh);
4615
4616 if (rv != DDI_SUCCESS) {
4617 cmn_err(CE_WARN, "!bd_attach_handle FAILED\n");
4618 } else {
4619 Dcmn_err(CE_NOTE, "bd_attach_handle OK\n");
4620 skdev->bd_attached++;
4621 }
4622
4623 return (rv);
4624 }
4625
4626 /*
4627 *
4628 * Name: skd_bd_detach, detach from the blkdev driver.
4629 *
4630 * Inputs: skdev - device state structure.
4631 *
4632 * Returns: Nothing.
4633 *
4634 */
4635 static void
4636 skd_bd_detach(skd_device_t *skdev)
4637 {
4638 if (skdev->bd_attached)
4639 (void) bd_detach_handle(skdev->s_bdh);
4640
4641 bd_free_handle(skdev->s_bdh);
4642 }
4643
4644 /*
4645 *
4646 * Name: skd_attach, attach sdk device driver
4647 *
4648 * Inputs: dip - device info structure.
4649 * cmd - DDI attach argument (ATTACH, RESUME, etc.)
4650 *
4651 * Returns: DDI_SUCCESS or DDI_FAILURE.
4652 *
4653 */
4654 static int
4655 skd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
4656 {
4657 int instance;
4658 int nregs;
4659 skd_device_t *skdev = NULL;
4660 int inx;
4661 uint16_t cmd_reg;
4662 int progress = 0;
4663 char name[MAXPATHLEN];
4664 off_t regsize;
4665 char pci_str[32];
4666 char fw_version[8];
4667
4668 instance = ddi_get_instance(dip);
4669
4670 (void) ddi_get_parent_data(dip);
4671
4672 switch (cmd) {
4673 case DDI_ATTACH:
4674 break;
4675
4676 case DDI_RESUME:
4677 /* Re-enable timer */
4678 skd_start_timer(skdev);
4679
4680 return (DDI_SUCCESS);
4681
4682 default:
4683 return (DDI_FAILURE);
4684 }
4685
4686 Dcmn_err(CE_NOTE, "sTec S1120 Driver v%s Instance: %d",
4687 VERSIONSTR, instance);
4688
4689 /*
4690 * Check that hardware is installed in a DMA-capable slot
4691 */
4692 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
4693 cmn_err(CE_WARN, "!%s%d: installed in a "
4694 "slot that isn't DMA-capable slot", DRV_NAME, instance);
4695 return (DDI_FAILURE);
4696 }
4697
4698 /*
4699 * No support for high-level interrupts
4700 */
4701 if (ddi_intr_hilevel(dip, 0) != 0) {
4702 cmn_err(CE_WARN, "!%s%d: High level interrupt not supported",
4703 DRV_NAME, instance);
4704 return (DDI_FAILURE);
4705 }
4706
4707 /*
4708 * Allocate our per-device-instance structure
4709 */
4710 if (ddi_soft_state_zalloc(skd_state, instance) !=
4711 DDI_SUCCESS) {
4712 cmn_err(CE_WARN, "!%s%d: soft state zalloc failed ",
4713 DRV_NAME, instance);
4714 return (DDI_FAILURE);
4715 }
4716
4717 progress |= SKD_SOFT_STATE_ALLOCED;
4718
4719 skdev = ddi_get_soft_state(skd_state, instance);
4720 if (skdev == NULL) {
4721 cmn_err(CE_WARN, "!%s%d: Unable to get soft state structure",
4722 DRV_NAME, instance);
4723 goto skd_attach_failed;
4724 }
4725
4726 (void) snprintf(skdev->name, sizeof (skdev->name),
4727 DRV_NAME "%d", instance);
4728
4729 skdev->dip = dip;
4730 skdev->instance = instance;
4731
4732 ddi_set_driver_private(dip, skdev);
4733
4734 (void) ddi_pathname(dip, name);
4735 for (inx = strlen(name); inx; inx--) {
4736 if (name[inx] == ',') {
4737 name[inx] = '\0';
4738 break;
4739 }
4740 if (name[inx] == '@') {
4741 break;
4742 }
4743 }
4744
4745 skdev->pathname = kmem_zalloc(strlen(name) + 1, KM_SLEEP);
4746 (void) strlcpy(skdev->pathname, name, strlen(name) + 1);
4747
4748 progress |= SKD_PATHNAME_ALLOCED;
4749 skdev->flags |= SKD_PATHNAME_ALLOCED;
4750
4751 if (pci_config_setup(dip, &skdev->pci_handle) != DDI_SUCCESS) {
4752 cmn_err(CE_WARN, "!%s%d: pci_config_setup FAILED",
4753 DRV_NAME, instance);
4754 goto skd_attach_failed;
4755 }
4756
4757 progress |= SKD_CONFIG_SPACE_SETUP;
4758
4759 /* Save adapter path. */
4760
4761 (void) ddi_dev_nregs(dip, &nregs);
4762
4763 /*
4764 * 0x0 Configuration Space
4765 * 0x1 I/O Space
4766 * 0x2 s1120 register space
4767 */
4768 if (ddi_dev_regsize(dip, 1, ®size) != DDI_SUCCESS ||
4769 ddi_regs_map_setup(dip, 1, &skdev->iobase, 0, regsize,
4770 &dev_acc_attr, &skdev->iobase_handle) != DDI_SUCCESS) {
4771 cmn_err(CE_WARN, "!%s%d: regs_map_setup(mem) failed",
4772 DRV_NAME, instance);
4773 goto skd_attach_failed;
4774 }
4775 progress |= SKD_REGS_MAPPED;
4776
4777 skdev->iomap_iobase = skdev->iobase;
4778 skdev->iomap_handle = skdev->iobase_handle;
4779
4780 Dcmn_err(CE_NOTE, "%s: PCI iobase=%ph, iomap=%ph, regnum=%d, "
4781 "regsize=%ld", skdev->name, (void *)skdev->iobase,
4782 (void *)skdev->iomap_iobase, 1, regsize);
4783
4784 if (ddi_dev_regsize(dip, 2, ®size) != DDI_SUCCESS ||
4785 ddi_regs_map_setup(dip, 2, &skdev->dev_iobase, 0, regsize,
4786 &dev_acc_attr, &skdev->dev_handle) != DDI_SUCCESS) {
4787 cmn_err(CE_WARN, "!%s%d: regs_map_setup(mem) failed",
4788 DRV_NAME, instance);
4789
4790 goto skd_attach_failed;
4791 }
4792
4793 skdev->dev_memsize = (int)regsize;
4794
4795 Dcmn_err(CE_NOTE, "%s: DEV iobase=%ph regsize=%d",
4796 skdev->name, (void *)skdev->dev_iobase,
4797 skdev->dev_memsize);
4798
4799 progress |= SKD_DEV_IOBASE_MAPPED;
4800
4801 cmd_reg = pci_config_get16(skdev->pci_handle, PCI_CONF_COMM);
4802 cmd_reg |= (PCI_COMM_ME | PCI_COMM_INTX_DISABLE);
4803 cmd_reg &= ~PCI_COMM_PARITY_DETECT;
4804 pci_config_put16(skdev->pci_handle, PCI_CONF_COMM, cmd_reg);
4805
4806 /* Get adapter PCI device information. */
4807 skdev->vendor_id = pci_config_get16(skdev->pci_handle, PCI_CONF_VENID);
4808 skdev->device_id = pci_config_get16(skdev->pci_handle, PCI_CONF_DEVID);
4809
4810 Dcmn_err(CE_NOTE, "%s: %x-%x card detected",
4811 skdev->name, skdev->vendor_id, skdev->device_id);
4812
4813 skd_get_properties(dip, skdev);
4814
4815 (void) skd_init(skdev);
4816
4817 if (skd_construct(skdev, instance)) {
4818 cmn_err(CE_WARN, "!%s: construct FAILED", skdev->name);
4819 goto skd_attach_failed;
4820 }
4821
4822 progress |= SKD_PROBED;
4823 progress |= SKD_CONSTRUCTED;
4824
4825 SIMPLEQ_INIT(&skdev->waitqueue);
4826
4827 /*
4828 * Setup interrupt handler
4829 */
4830 if (skd_setup_interrupts(skdev) != DDI_SUCCESS) {
4831 cmn_err(CE_WARN, "!%s: Unable to add interrupt",
4832 skdev->name);
4833 goto skd_attach_failed;
4834 }
4835
4836 progress |= SKD_INTR_ADDED;
4837
4838 ADAPTER_STATE_LOCK(skdev);
4839 skdev->flags |= SKD_ATTACHED;
4840 ADAPTER_STATE_UNLOCK(skdev);
4841
4842 skdev->d_blkshift = 9;
4843 progress |= SKD_ATTACHED;
4844
4845
4846 skd_start_device(skdev);
4847
4848 ADAPTER_STATE_LOCK(skdev);
4849 skdev->progress = progress;
4850 ADAPTER_STATE_UNLOCK(skdev);
4851
4852 /*
4853 * Give the board a chance to
4854 * complete its initialization.
4855 */
4856 if (skdev->gendisk_on != 1)
4857 (void) skd_wait_for_s1120(skdev);
4858
4859 if (skdev->gendisk_on != 1) {
4860 cmn_err(CE_WARN, "!%s: s1120 failed to come ONLINE",
4861 skdev->name);
4862 goto skd_attach_failed;
4863 }
4864
4865 ddi_report_dev(dip);
4866
4867 skd_send_internal_skspcl(skdev, &skdev->internal_skspcl, INQUIRY);
4868
4869 skdev->disks_initialized++;
4870
4871 (void) strcpy(fw_version, "???");
4872 (void) skd_pci_info(skdev, pci_str, sizeof (pci_str));
4873 Dcmn_err(CE_NOTE, " sTec S1120 Driver(%s) version %s-b%s",
4874 DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
4875
4876 Dcmn_err(CE_NOTE, " sTec S1120 %04x:%04x %s 64 bit",
4877 skdev->vendor_id, skdev->device_id, pci_str);
4878
4879 Dcmn_err(CE_NOTE, " sTec S1120 %s\n", skdev->pathname);
4880
4881 if (*skdev->inq_serial_num)
4882 Dcmn_err(CE_NOTE, " sTec S1120 serial#=%s",
4883 skdev->inq_serial_num);
4884
4885 if (*skdev->inq_product_id &&
4886 *skdev->inq_product_rev)
4887 Dcmn_err(CE_NOTE, " sTec S1120 prod ID=%s prod rev=%s",
4888 skdev->inq_product_id, skdev->inq_product_rev);
4889
4890 Dcmn_err(CE_NOTE, "%s: intr-type-cap: %d",
4891 skdev->name, skdev->irq_type);
4892 Dcmn_err(CE_NOTE, "%s: max-scsi-reqs: %d",
4893 skdev->name, skd_max_queue_depth);
4894 Dcmn_err(CE_NOTE, "%s: max-sgs-per-req: %d",
4895 skdev->name, skd_sgs_per_request);
4896 Dcmn_err(CE_NOTE, "%s: max-scsi-req-per-msg: %d",
4897 skdev->name, skd_max_req_per_msg);
4898
4899 if (skd_bd_attach(dip, skdev) == DDI_FAILURE)
4900 goto skd_attach_failed;
4901
4902 skd_update_props(skdev, dip);
4903
4904 /* Enable timer */
4905 skd_start_timer(skdev);
4906
4907 ADAPTER_STATE_LOCK(skdev);
4908 skdev->progress = progress;
4909 ADAPTER_STATE_UNLOCK(skdev);
4910
4911 skdev->attached = 1;
4912 return (DDI_SUCCESS);
4913
4914 skd_attach_failed:
4915 skd_dealloc_resources(dip, skdev, progress, instance);
4916
4917 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) {
4918 skd_destroy_mutex(skdev);
4919 }
4920
4921 ddi_soft_state_free(skd_state, instance);
4922
4923 cmn_err(CE_WARN, "!skd_attach FAILED: progress=%x", progress);
4924 return (DDI_FAILURE);
4925 }
4926
4927 /*
4928 *
4929 * Name: skd_halt
4930 *
4931 * Inputs: skdev - device state structure.
4932 *
4933 * Returns: Nothing.
4934 *
4935 */
4936 static void
4937 skd_halt(skd_device_t *skdev)
4938 {
4939 Dcmn_err(CE_NOTE, "%s: halt/suspend ......", skdev->name);
4940 }
4941
4942 /*
4943 *
4944 * Name: skd_detach, detaches driver from the system.
4945 *
4946 * Inputs: dip - device info structure.
4947 *
4948 * Returns: DDI_SUCCESS on successful detach otherwise DDI_FAILURE.
4949 *
4950 */
4951 static int
4952 skd_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
4953 {
4954 skd_buf_private_t *pbuf;
4955 skd_device_t *skdev;
4956 int instance;
4957 timeout_id_t timer_id = NULL;
4958 int rv1 = DDI_SUCCESS;
4959 struct skd_special_context *skspcl;
4960
4961 instance = ddi_get_instance(dip);
4962
4963 skdev = ddi_get_soft_state(skd_state, instance);
4964 if (skdev == NULL) {
4965 cmn_err(CE_WARN, "!detach failed: NULL skd state");
4966
4967 return (DDI_FAILURE);
4968 }
4969
4970 Dcmn_err(CE_CONT, "skd_detach(%d): entered", instance);
4971
4972 switch (cmd) {
4973 case DDI_DETACH:
4974 /* Test for packet cache inuse. */
4975 ADAPTER_STATE_LOCK(skdev);
4976
4977 /* Stop command/event processing. */
4978 skdev->flags |= (SKD_SUSPENDED | SKD_CMD_ABORT_TMO);
4979
4980 /* Disable driver timer if no adapters. */
4981 if (skdev->skd_timer_timeout_id != 0) {
4982 timer_id = skdev->skd_timer_timeout_id;
4983 skdev->skd_timer_timeout_id = 0;
4984 }
4985 ADAPTER_STATE_UNLOCK(skdev);
4986
4987 if (timer_id != 0) {
4988 (void) untimeout(timer_id);
4989 }
4990
4991 #ifdef SKD_PM
4992 if (skdev->power_level != LOW_POWER_LEVEL) {
4993 skd_halt(skdev);
4994 skdev->power_level = LOW_POWER_LEVEL;
4995 }
4996 #endif
4997 skspcl = &skdev->internal_skspcl;
4998 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
4999
5000 skd_stop_device(skdev);
5001
5002 /*
5003 * Clear request queue.
5004 */
5005 while (!SIMPLEQ_EMPTY(&skdev->waitqueue)) {
5006 pbuf = skd_get_queued_pbuf(skdev);
5007 skd_end_request_abnormal(skdev, pbuf, ECANCELED,
5008 SKD_IODONE_WNIOC);
5009 Dcmn_err(CE_NOTE,
5010 "detach: cancelled pbuf %p %ld <%s> %lld\n",
5011 (void *)pbuf, pbuf->x_xfer->x_nblks,
5012 (pbuf->dir & B_READ) ? "Read" : "Write",
5013 pbuf->x_xfer->x_blkno);
5014 }
5015
5016 skd_bd_detach(skdev);
5017
5018 skd_dealloc_resources(dip, skdev, skdev->progress, instance);
5019
5020 if ((skdev->flags & SKD_MUTEX_DESTROYED) == 0) {
5021 skd_destroy_mutex(skdev);
5022 }
5023
5024 ddi_soft_state_free(skd_state, instance);
5025
5026 skd_exit();
5027
5028 break;
5029
5030 case DDI_SUSPEND:
5031 /* Block timer. */
5032
5033 ADAPTER_STATE_LOCK(skdev);
5034 skdev->flags |= SKD_SUSPENDED;
5035
5036 /* Disable driver timer if last adapter. */
5037 if (skdev->skd_timer_timeout_id != 0) {
5038 timer_id = skdev->skd_timer_timeout_id;
5039 skdev->skd_timer_timeout_id = 0;
5040 }
5041 ADAPTER_STATE_UNLOCK(skdev);
5042
5043 if (timer_id != 0) {
5044 (void) untimeout(timer_id);
5045 }
5046
5047 ddi_prop_remove_all(dip);
5048
5049 skd_halt(skdev);
5050
5051 break;
5052 default:
5053 rv1 = DDI_FAILURE;
5054 break;
5055 }
5056
5057 if (rv1 != DDI_SUCCESS) {
5058 cmn_err(CE_WARN, "!skd_detach, failed, rv1=%x", rv1);
5059 } else {
5060 Dcmn_err(CE_CONT, "skd_detach: exiting");
5061 }
5062
5063 if (rv1 != DDI_SUCCESS)
5064 return (DDI_FAILURE);
5065
5066 return (rv1);
5067 }
5068
5069 /*
5070 *
5071 * Name: skd_devid_init, calls skd_setup_devid to setup
5072 * the device's devid structure.
5073 *
5074 * Inputs: arg - device state structure.
5075 * dip - dev_info structure.
5076 * devid - devid structure.
5077 *
5078 * Returns: Nothing.
5079 *
5080 */
5081 /* ARGSUSED */ /* Upstream common source with other platforms. */
5082 static int
5083 skd_devid_init(void *arg, dev_info_t *dip, ddi_devid_t *devid)
5084 {
5085 skd_device_t *skdev = arg;
5086
5087 (void) skd_setup_devid(skdev, devid);
5088
5089 return (0);
5090 }
5091
5092 /*
5093 *
5094 * Name: skd_bd_driveinfo, retrieves device's info.
5095 *
5096 * Inputs: drive - drive data structure.
5097 * arg - device state structure.
5098 *
5099 * Returns: Nothing.
5100 *
5101 */
5102 static void
5103 skd_bd_driveinfo(void *arg, bd_drive_t *drive)
5104 {
5105 skd_device_t *skdev = arg;
5106
5107 drive->d_qsize = (skdev->queue_depth_limit * 4) / 5;
5108 drive->d_maxxfer = SKD_DMA_MAXXFER;
5109 drive->d_removable = B_FALSE;
5110 drive->d_hotpluggable = B_FALSE;
5111 drive->d_target = 0;
5112 drive->d_lun = 0;
5113
5114 if (skdev->inquiry_is_valid != 0) {
5115 drive->d_vendor = skdev->inq_vendor_id;
5116 drive->d_vendor_len = strlen(drive->d_vendor);
5117
5118 drive->d_product = skdev->inq_product_id;
5119 drive->d_product_len = strlen(drive->d_product);
5120
5121 drive->d_serial = skdev->inq_serial_num;
5122 drive->d_serial_len = strlen(drive->d_serial);
5123
5124 drive->d_revision = skdev->inq_product_rev;
5125 drive->d_revision_len = strlen(drive->d_revision);
5126 }
5127 }
5128
5129 /*
5130 *
5131 * Name: skd_bd_mediainfo, retrieves device media info.
5132 *
5133 * Inputs: arg - device state structure.
5134 * media - container for media info.
5135 *
5136 * Returns: Zero.
5137 *
5138 */
5139 static int
5140 skd_bd_mediainfo(void *arg, bd_media_t *media)
5141 {
5142 skd_device_t *skdev = arg;
5143
5144 media->m_nblks = skdev->Nblocks;
5145 media->m_blksize = 512;
5146 media->m_pblksize = 4096;
5147 media->m_readonly = B_FALSE;
5148 media->m_solidstate = B_TRUE;
5149
5150 return (0);
5151 }
5152
5153 /*
5154 *
5155 * Name: skd_rw, performs R/W requests for blkdev driver.
5156 *
5157 * Inputs: skdev - device state structure.
5158 * xfer - tranfer structure.
5159 * dir - I/O direction.
5160 *
5161 * Returns: EAGAIN if device is not online. EIO if blkdev wants us to
5162 * be a dump device (for now).
5163 * Value returned by skd_start().
5164 *
5165 */
5166 static int
5167 skd_rw(skd_device_t *skdev, bd_xfer_t *xfer, int dir)
5168 {
5169 skd_buf_private_t *pbuf;
5170
5171 /*
5172 * The x_flags structure element is not defined in Oracle Solaris
5173 */
5174 /* We'll need to fix this in order to support dump on this device. */
5175 if (xfer->x_flags & BD_XFER_POLL)
5176 return (EIO);
5177
5178 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
5179 Dcmn_err(CE_NOTE, "Device - not ONLINE");
5180
5181 skd_request_fn_not_online(skdev);
5182
5183 return (EAGAIN);
5184 }
5185
5186 pbuf = kmem_zalloc(sizeof (skd_buf_private_t), KM_NOSLEEP);
5187 if (pbuf == NULL)
5188 return (ENOMEM);
5189
5190 WAITQ_LOCK(skdev);
5191 pbuf->dir = dir;
5192 pbuf->x_xfer = xfer;
5193
5194 skd_queue(skdev, pbuf);
5195 skdev->ios_queued++;
5196 WAITQ_UNLOCK(skdev);
5197
5198 skd_start(skdev);
5199
5200 return (0);
5201 }
5202
5203 /*
5204 *
5205 * Name: skd_bd_read, performs blkdev read requests.
5206 *
5207 * Inputs: arg - device state structure.
5208 * xfer - tranfer request structure.
5209 *
5210 * Returns: Value return by skd_rw().
5211 *
5212 */
5213 static int
5214 skd_bd_read(void *arg, bd_xfer_t *xfer)
5215 {
5216 return (skd_rw(arg, xfer, B_READ));
5217 }
5218
5219 /*
5220 *
5221 * Name: skd_bd_write, performs blkdev write requests.
5222 *
5223 * Inputs: arg - device state structure.
5224 * xfer - tranfer request structure.
5225 *
5226 * Returns: Value return by skd_rw().
5227 *
5228 */
5229 static int
5230 skd_bd_write(void *arg, bd_xfer_t *xfer)
5231 {
5232 return (skd_rw(arg, xfer, B_WRITE));
5233 }