1 /*
2 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
4 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
5 */
6 /*
7 * Copyright (c) 1999,2000 Michael Smith
8 * Copyright (c) 2000 BSDi
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32 /*
33 * Copyright (c) 2002 Eric Moore
34 * Copyright (c) 2002 LSI Logic Corporation
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. The party using or redistributing the source code and binary forms
46 * agrees to the disclaimer below and the terms and conditions set forth
47 * herein.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 */
61
62 #include <sys/int_types.h>
63 #include <sys/scsi/scsi.h>
64 #include <sys/dkbad.h>
65 #include <sys/dklabel.h>
66 #include <sys/dkio.h>
67 #include <sys/cdio.h>
68 #include <sys/mhd.h>
69 #include <sys/vtoc.h>
70 #include <sys/dktp/fdisk.h>
71 #include <sys/scsi/targets/sddef.h>
72 #include <sys/debug.h>
73 #include <sys/pci.h>
74 #include <sys/ksynch.h>
75 #include <sys/ddi.h>
76 #include <sys/sunddi.h>
77 #include <sys/modctl.h>
78 #include <sys/byteorder.h>
79
80 #include "amrreg.h"
81 #include "amrvar.h"
82
83 /* dynamic debug symbol */
84 int amr_debug_var = 0;
85
86 #define AMR_DELAY(cond, count, done_flag) { \
87 int local_counter = 0; \
88 done_flag = 1; \
89 while (!(cond)) { \
90 delay(drv_usectohz(100)); \
91 if ((local_counter) > count) { \
92 done_flag = 0; \
93 break; \
94 } \
95 (local_counter)++; \
96 } \
97 }
98
99 #define AMR_BUSYWAIT(cond, count, done_flag) { \
100 int local_counter = 0; \
101 done_flag = 1; \
102 while (!(cond)) { \
103 drv_usecwait(100); \
104 if ((local_counter) > count) { \
105 done_flag = 0; \
106 break; \
107 } \
108 (local_counter)++; \
109 } \
110 }
111
112 /*
113 * driver interfaces
114 */
115
116 static uint_t amr_intr(caddr_t arg);
117 static void amr_done(struct amr_softs *softs);
118
119 static int amr_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
120 void *arg, void **result);
121 static int amr_attach(dev_info_t *, ddi_attach_cmd_t);
122 static int amr_detach(dev_info_t *, ddi_detach_cmd_t);
123
124 static int amr_setup_mbox(struct amr_softs *softs);
125 static int amr_setup_sg(struct amr_softs *softs);
126
127 /*
128 * Command wrappers
129 */
130 static int amr_query_controller(struct amr_softs *softs);
131 static void *amr_enquiry(struct amr_softs *softs, size_t bufsize,
132 uint8_t cmd, uint8_t cmdsub, uint8_t cmdqual);
133 static int amr_flush(struct amr_softs *softs);
134
135 /*
136 * Command processing.
137 */
138 static void amr_rw_command(struct amr_softs *softs,
139 struct scsi_pkt *pkt, int lun);
140 static void amr_mode_sense(union scsi_cdb *cdbp, struct buf *bp,
141 unsigned int capacity);
142 static void amr_set_arq_data(struct scsi_pkt *pkt, uchar_t key);
143 static int amr_enquiry_mapcmd(struct amr_command *ac, uint32_t data_size);
144 static void amr_enquiry_unmapcmd(struct amr_command *ac);
145 static int amr_mapcmd(struct amr_command *ac, int (*callback)(), caddr_t arg);
146 static void amr_unmapcmd(struct amr_command *ac);
147
148 /*
149 * Status monitoring
150 */
151 static void amr_periodic(void *data);
152
153 /*
154 * Interface-specific shims
155 */
156 static int amr_poll_command(struct amr_command *ac);
157 static void amr_start_waiting_queue(void *softp);
158 static void amr_call_pkt_comp(struct amr_command *head);
159
160 /*
161 * SCSI interface
162 */
163 static int amr_setup_tran(dev_info_t *dip, struct amr_softs *softp);
164
165 /*
166 * Function prototypes
167 *
168 * SCSA functions exported by means of the transport table
169 */
170 static int amr_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
171 scsi_hba_tran_t *tran, struct scsi_device *sd);
172 static int amr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt);
173 static int amr_tran_reset(struct scsi_address *ap, int level);
174 static int amr_tran_getcap(struct scsi_address *ap, char *cap, int whom);
175 static int amr_tran_setcap(struct scsi_address *ap, char *cap, int value,
176 int whom);
177 static struct scsi_pkt *amr_tran_init_pkt(struct scsi_address *ap,
178 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
179 int tgtlen, int flags, int (*callback)(), caddr_t arg);
180 static void amr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
181 static void amr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
182 static void amr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
183
184 static ddi_dma_attr_t buffer_dma_attr = {
185 DMA_ATTR_V0, /* version of this structure */
186 0, /* lowest usable address */
187 0xffffffffull, /* highest usable address */
188 0x00ffffffull, /* maximum DMAable byte count */
189 4, /* alignment */
190 1, /* burst sizes */
191 1, /* minimum transfer */
192 0xffffffffull, /* maximum transfer */
193 0xffffffffull, /* maximum segment length */
194 AMR_NSEG, /* maximum number of segments */
195 AMR_BLKSIZE, /* granularity */
196 0, /* flags (reserved) */
197 };
198
199 static ddi_dma_attr_t addr_dma_attr = {
200 DMA_ATTR_V0, /* version of this structure */
201 0, /* lowest usable address */
202 0xffffffffull, /* highest usable address */
203 0x7fffffff, /* maximum DMAable byte count */
204 4, /* alignment */
205 1, /* burst sizes */
206 1, /* minimum transfer */
207 0xffffffffull, /* maximum transfer */
208 0xffffffffull, /* maximum segment length */
209 1, /* maximum number of segments */
210 1, /* granularity */
211 0, /* flags (reserved) */
212 };
213
214
215 static struct dev_ops amr_ops = {
216 DEVO_REV, /* devo_rev, */
217 0, /* refcnt */
218 amr_info, /* info */
219 nulldev, /* identify */
220 nulldev, /* probe */
221 amr_attach, /* attach */
222 amr_detach, /* detach */
223 nodev, /* reset */
224 NULL, /* driver operations */
225 (struct bus_ops *)0, /* bus operations */
226 0, /* power */
227 ddi_quiesce_not_supported, /* devo_quiesce */
228 };
229
230
231 extern struct mod_ops mod_driverops;
232 static struct modldrv modldrv = {
233 &mod_driverops, /* Type of module. driver here */
234 "AMR Driver", /* Name of the module. */
235 &amr_ops, /* Driver ops vector */
236 };
237
238 static struct modlinkage modlinkage = {
239 MODREV_1,
240 { &modldrv, NULL }
241 };
242
243 /* DMA access attributes */
244 static ddi_device_acc_attr_t accattr = {
245 DDI_DEVICE_ATTR_V0,
246 DDI_NEVERSWAP_ACC,
247 DDI_STRICTORDER_ACC
248 };
249
250 static struct amr_softs *amr_softstatep;
251
252
253 int
254 _init(void)
255 {
256 int error;
257
258 error = ddi_soft_state_init((void *)&amr_softstatep,
259 sizeof (struct amr_softs), 0);
260
261 if (error != 0)
262 goto error_out;
263
264 if ((error = scsi_hba_init(&modlinkage)) != 0) {
265 ddi_soft_state_fini((void*)&amr_softstatep);
266 goto error_out;
267 }
268
269 error = mod_install(&modlinkage);
270 if (error != 0) {
271 scsi_hba_fini(&modlinkage);
272 ddi_soft_state_fini((void*)&amr_softstatep);
273 goto error_out;
274 }
275
276 return (error);
277
278 error_out:
279 cmn_err(CE_NOTE, "_init failed");
280 return (error);
281 }
282
283 int
284 _info(struct modinfo *modinfop)
285 {
286 return (mod_info(&modlinkage, modinfop));
287 }
288
289 int
290 _fini(void)
291 {
292 int error;
293
294 if ((error = mod_remove(&modlinkage)) != 0) {
295 return (error);
296 }
297
298 scsi_hba_fini(&modlinkage);
299
300 ddi_soft_state_fini((void*)&amr_softstatep);
301 return (error);
302 }
303
304
305 static int
306 amr_attach(dev_info_t *dev, ddi_attach_cmd_t cmd)
307 {
308 struct amr_softs *softs;
309 int error;
310 uint32_t command, i;
311 int instance;
312 caddr_t cfgaddr;
313
314 instance = ddi_get_instance(dev);
315
316 switch (cmd) {
317 case DDI_ATTACH:
318 break;
319
320 case DDI_RESUME:
321 return (DDI_FAILURE);
322
323 default:
324 return (DDI_FAILURE);
325 }
326
327 /*
328 * Initialize softs.
329 */
330 if (ddi_soft_state_zalloc(amr_softstatep, instance) != DDI_SUCCESS)
331 return (DDI_FAILURE);
332 softs = ddi_get_soft_state(amr_softstatep, instance);
333 softs->state |= AMR_STATE_SOFT_STATE_SETUP;
334
335 softs->dev_info_p = dev;
336
337 AMRDB_PRINT((CE_NOTE, "softs: %p; busy_slot addr: %p",
338 (void *)softs, (void *)&(softs->amr_busyslots)));
339
340 if (pci_config_setup(dev, &(softs->pciconfig_handle))
341 != DDI_SUCCESS) {
342 goto error_out;
343 }
344 softs->state |= AMR_STATE_PCI_CONFIG_SETUP;
345
346 error = ddi_regs_map_setup(dev, 1, &cfgaddr, 0, 0,
347 &accattr, &(softs->regsmap_handle));
348 if (error != DDI_SUCCESS) {
349 goto error_out;
350 }
351 softs->state |= AMR_STATE_PCI_MEM_MAPPED;
352
353 /*
354 * Determine board type.
355 */
356 command = pci_config_get16(softs->pciconfig_handle, PCI_CONF_COMM);
357
358 /*
359 * Make sure we are going to be able to talk to this board.
360 */
361 if ((command & PCI_COMM_MAE) == 0) {
362 AMRDB_PRINT((CE_NOTE, "memory window not available"));
363 goto error_out;
364 }
365
366 /* force the busmaster enable bit on */
367 if (!(command & PCI_COMM_ME)) {
368 command |= PCI_COMM_ME;
369 pci_config_put16(softs->pciconfig_handle,
370 PCI_CONF_COMM, command);
371 command = pci_config_get16(softs->pciconfig_handle,
372 PCI_CONF_COMM);
373 if (!(command & PCI_COMM_ME))
374 goto error_out;
375 }
376
377 /*
378 * Allocate and connect our interrupt.
379 */
380 if (ddi_intr_hilevel(dev, 0) != 0) {
381 AMRDB_PRINT((CE_NOTE,
382 "High level interrupt is not supported!"));
383 goto error_out;
384 }
385
386 if (ddi_get_iblock_cookie(dev, 0, &softs->iblock_cookiep)
387 != DDI_SUCCESS) {
388 goto error_out;
389 }
390
391 mutex_init(&softs->cmd_mutex, NULL, MUTEX_DRIVER,
392 softs->iblock_cookiep); /* should be used in interrupt */
393 mutex_init(&softs->queue_mutex, NULL, MUTEX_DRIVER,
394 softs->iblock_cookiep); /* should be used in interrupt */
395 mutex_init(&softs->periodic_mutex, NULL, MUTEX_DRIVER,
396 softs->iblock_cookiep); /* should be used in interrupt */
397 /* sychronize waits for the busy slots via this cv */
398 cv_init(&softs->cmd_cv, NULL, CV_DRIVER, NULL);
399 softs->state |= AMR_STATE_KMUTEX_INITED;
400
401 /*
402 * Do bus-independent initialisation, bring controller online.
403 */
404 if (amr_setup_mbox(softs) != DDI_SUCCESS)
405 goto error_out;
406 softs->state |= AMR_STATE_MAILBOX_SETUP;
407
408 if (amr_setup_sg(softs) != DDI_SUCCESS)
409 goto error_out;
410
411 softs->state |= AMR_STATE_SG_TABLES_SETUP;
412
413 if (amr_query_controller(softs) != DDI_SUCCESS)
414 goto error_out;
415
416 /*
417 * A taskq is created for dispatching the waiting queue processing
418 * thread. The threads number equals to the logic drive number and
419 * the thread number should be 1 if there is no logic driver is
420 * configured for this instance.
421 */
422 if ((softs->amr_taskq = ddi_taskq_create(dev, "amr_taskq",
423 MAX(softs->amr_nlogdrives, 1), TASKQ_DEFAULTPRI, 0)) == NULL) {
424 goto error_out;
425 }
426 softs->state |= AMR_STATE_TASKQ_SETUP;
427
428 if (ddi_add_intr(dev, 0, &softs->iblock_cookiep, NULL,
429 amr_intr, (caddr_t)softs) != DDI_SUCCESS) {
430 goto error_out;
431 }
432 softs->state |= AMR_STATE_INTR_SETUP;
433
434 /* set up the tran interface */
435 if (amr_setup_tran(softs->dev_info_p, softs) != DDI_SUCCESS) {
436 AMRDB_PRINT((CE_NOTE, "setup tran failed"));
437 goto error_out;
438 }
439 softs->state |= AMR_STATE_TRAN_SETUP;
440
441 /* schedule a thread for periodic check */
442 mutex_enter(&softs->periodic_mutex);
443 softs->timeout_t = timeout(amr_periodic, (void *)softs,
444 drv_usectohz(500000*AMR_PERIODIC_TIMEOUT));
445 softs->state |= AMR_STATE_TIMEOUT_ENABLED;
446 mutex_exit(&softs->periodic_mutex);
447
448 /* print firmware information in verbose mode */
449 cmn_err(CE_CONT, "?MegaRaid %s %s attached.",
450 softs->amr_product_info.pi_product_name,
451 softs->amr_product_info.pi_firmware_ver);
452
453 /* clear any interrupts */
454 AMR_QCLEAR_INTR(softs);
455 return (DDI_SUCCESS);
456
457 error_out:
458 if (softs->state & AMR_STATE_INTR_SETUP) {
459 ddi_remove_intr(dev, 0, softs->iblock_cookiep);
460 }
461 if (softs->state & AMR_STATE_TASKQ_SETUP) {
462 ddi_taskq_destroy(softs->amr_taskq);
463 }
464 if (softs->state & AMR_STATE_SG_TABLES_SETUP) {
465 for (i = 0; i < softs->sg_max_count; i++) {
466 (void) ddi_dma_unbind_handle(
467 softs->sg_items[i].sg_handle);
468 (void) ddi_dma_mem_free(
469 &((softs->sg_items[i]).sg_acc_handle));
470 (void) ddi_dma_free_handle(
471 &(softs->sg_items[i].sg_handle));
472 }
473 }
474 if (softs->state & AMR_STATE_MAILBOX_SETUP) {
475 (void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
476 (void) ddi_dma_mem_free(&softs->mbox_acc_handle);
477 (void) ddi_dma_free_handle(&softs->mbox_dma_handle);
478 }
479 if (softs->state & AMR_STATE_KMUTEX_INITED) {
480 mutex_destroy(&softs->queue_mutex);
481 mutex_destroy(&softs->cmd_mutex);
482 mutex_destroy(&softs->periodic_mutex);
483 cv_destroy(&softs->cmd_cv);
484 }
485 if (softs->state & AMR_STATE_PCI_MEM_MAPPED)
486 ddi_regs_map_free(&softs->regsmap_handle);
487 if (softs->state & AMR_STATE_PCI_CONFIG_SETUP)
488 pci_config_teardown(&softs->pciconfig_handle);
489 if (softs->state & AMR_STATE_SOFT_STATE_SETUP)
490 ddi_soft_state_free(amr_softstatep, instance);
491 return (DDI_FAILURE);
492 }
493
494 /*
495 * Bring the controller down to a dormant state and detach all child devices.
496 * This function is called during detach, system shutdown.
497 *
498 * Note that we can assume that the bufq on the controller is empty, as we won't
499 * allow shutdown if any device is open.
500 */
501 /*ARGSUSED*/
502 static int amr_detach(dev_info_t *dev, ddi_detach_cmd_t cmd)
503 {
504 struct amr_softs *softs;
505 int instance;
506 uint32_t i, done_flag;
507
508 instance = ddi_get_instance(dev);
509 softs = ddi_get_soft_state(amr_softstatep, instance);
510
511 /* flush the controllor */
512 if (amr_flush(softs) != 0) {
513 AMRDB_PRINT((CE_NOTE, "device shutdown failed"));
514 return (EIO);
515 }
516
517 /* release the amr timer */
518 mutex_enter(&softs->periodic_mutex);
519 softs->state &= ~AMR_STATE_TIMEOUT_ENABLED;
520 if (softs->timeout_t) {
521 (void) untimeout(softs->timeout_t);
522 softs->timeout_t = 0;
523 }
524 mutex_exit(&softs->periodic_mutex);
525
526 for (i = 0; i < softs->sg_max_count; i++) {
527 (void) ddi_dma_unbind_handle(
528 softs->sg_items[i].sg_handle);
529 (void) ddi_dma_mem_free(
530 &((softs->sg_items[i]).sg_acc_handle));
531 (void) ddi_dma_free_handle(
532 &(softs->sg_items[i].sg_handle));
533 }
534
535 (void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
536 (void) ddi_dma_mem_free(&softs->mbox_acc_handle);
537 (void) ddi_dma_free_handle(&softs->mbox_dma_handle);
538
539 /* disconnect the interrupt handler */
540 ddi_remove_intr(softs->dev_info_p, 0, softs->iblock_cookiep);
541
542 /* wait for the completion of current in-progress interruptes */
543 AMR_DELAY((softs->amr_interrupts_counter == 0), 1000, done_flag);
544 if (!done_flag) {
545 cmn_err(CE_WARN, "Suspicious interrupts in-progress.");
546 }
547
548 ddi_taskq_destroy(softs->amr_taskq);
549
550 (void) scsi_hba_detach(dev);
551 scsi_hba_tran_free(softs->hba_tran);
552 ddi_regs_map_free(&softs->regsmap_handle);
553 pci_config_teardown(&softs->pciconfig_handle);
554
555 mutex_destroy(&softs->queue_mutex);
556 mutex_destroy(&softs->cmd_mutex);
557 mutex_destroy(&softs->periodic_mutex);
558 cv_destroy(&softs->cmd_cv);
559
560 /* print firmware information in verbose mode */
561 cmn_err(CE_NOTE, "?MegaRaid %s %s detached.",
562 softs->amr_product_info.pi_product_name,
563 softs->amr_product_info.pi_firmware_ver);
564
565 ddi_soft_state_free(amr_softstatep, instance);
566
567 return (DDI_SUCCESS);
568 }
569
570
571 /*ARGSUSED*/
572 static int amr_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
573 void *arg, void **result)
574 {
575 struct amr_softs *softs;
576 int instance;
577
578 instance = ddi_get_instance(dip);
579
580 switch (infocmd) {
581 case DDI_INFO_DEVT2DEVINFO:
582 softs = ddi_get_soft_state(amr_softstatep, instance);
583 if (softs != NULL) {
584 *result = softs->dev_info_p;
585 return (DDI_SUCCESS);
586 } else {
587 *result = NULL;
588 return (DDI_FAILURE);
589 }
590 case DDI_INFO_DEVT2INSTANCE:
591 *(int *)result = instance;
592 break;
593 default:
594 break;
595 }
596 return (DDI_SUCCESS);
597 }
598
599 /*
600 * Take an interrupt, or be poked by other code to look for interrupt-worthy
601 * status.
602 */
603 static uint_t
604 amr_intr(caddr_t arg)
605 {
606 struct amr_softs *softs = (struct amr_softs *)arg;
607
608 softs->amr_interrupts_counter++;
609
610 if (AMR_QGET_ODB(softs) != AMR_QODB_READY) {
611 softs->amr_interrupts_counter--;
612 return (DDI_INTR_UNCLAIMED);
613 }
614
615 /* collect finished commands, queue anything waiting */
616 amr_done(softs);
617
618 softs->amr_interrupts_counter--;
619
620 return (DDI_INTR_CLAIMED);
621
622 }
623
624 /*
625 * Setup the amr mailbox
626 */
627 static int
628 amr_setup_mbox(struct amr_softs *softs)
629 {
630 uint32_t move;
631 size_t mbox_len;
632
633 if (ddi_dma_alloc_handle(
634 softs->dev_info_p,
635 &addr_dma_attr,
636 DDI_DMA_SLEEP,
637 NULL,
638 &softs->mbox_dma_handle) != DDI_SUCCESS) {
639 AMRDB_PRINT((CE_NOTE, "Cannot alloc dma handle for mailbox"));
640 goto error_out;
641 }
642
643 if (ddi_dma_mem_alloc(
644 softs->mbox_dma_handle,
645 sizeof (struct amr_mailbox) + 16,
646 &accattr,
647 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
648 DDI_DMA_SLEEP,
649 NULL,
650 (caddr_t *)(&softs->mbox),
651 &mbox_len,
652 &softs->mbox_acc_handle) !=
653 DDI_SUCCESS) {
654
655 AMRDB_PRINT((CE_WARN, "Cannot alloc dma memory for mailbox"));
656 goto error_out;
657 }
658
659 if (ddi_dma_addr_bind_handle(
660 softs->mbox_dma_handle,
661 NULL,
662 (caddr_t)softs->mbox,
663 mbox_len,
664 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
665 DDI_DMA_SLEEP,
666 NULL,
667 &softs->mbox_dma_cookie,
668 &softs->mbox_dma_cookien) != DDI_DMA_MAPPED) {
669
670 AMRDB_PRINT((CE_NOTE, "Cannot bind dma memory for mailbox"));
671 goto error_out;
672 }
673
674 if (softs->mbox_dma_cookien != 1)
675 goto error_out;
676
677 /* The phy address of mailbox must be aligned on a 16-byte boundary */
678 move = 16 - (((uint32_t)softs->mbox_dma_cookie.dmac_address)&0xf);
679 softs->mbox_phyaddr =
680 (softs->mbox_dma_cookie.dmac_address + move);
681
682 softs->mailbox =
683 (struct amr_mailbox *)(((uintptr_t)softs->mbox) + move);
684
685 AMRDB_PRINT((CE_NOTE, "phraddy=%x, mailbox=%p, softs->mbox=%p, move=%x",
686 softs->mbox_phyaddr, (void *)softs->mailbox,
687 softs->mbox, move));
688
689 return (DDI_SUCCESS);
690
691 error_out:
692 if (softs->mbox_dma_cookien)
693 (void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
694 if (softs->mbox_acc_handle) {
695 (void) ddi_dma_mem_free(&(softs->mbox_acc_handle));
696 softs->mbox_acc_handle = NULL;
697 }
698 if (softs->mbox_dma_handle) {
699 (void) ddi_dma_free_handle(&softs->mbox_dma_handle);
700 softs->mbox_dma_handle = NULL;
701 }
702
703 return (DDI_FAILURE);
704 }
705
706 /*
707 * Perform a periodic check of the controller status
708 */
709 static void
710 amr_periodic(void *data)
711 {
712 uint32_t i;
713 struct amr_softs *softs = (struct amr_softs *)data;
714 struct scsi_pkt *pkt;
715 register struct amr_command *ac;
716
717 for (i = 0; i < softs->sg_max_count; i++) {
718 if (softs->busycmd[i] == NULL)
719 continue;
720
721 mutex_enter(&softs->cmd_mutex);
722
723 if (softs->busycmd[i] == NULL) {
724 mutex_exit(&softs->cmd_mutex);
725 continue;
726 }
727
728 pkt = softs->busycmd[i]->pkt;
729
730 if ((pkt->pkt_time != 0) &&
731 (ddi_get_time() -
732 softs->busycmd[i]->ac_timestamp >
733 pkt->pkt_time)) {
734
735 cmn_err(CE_WARN,
736 "!timed out packet detected,\
737 sc = %p, pkt = %p, index = %d, ac = %p",
738 (void *)softs,
739 (void *)pkt,
740 i,
741 (void *)softs->busycmd[i]);
742
743 ac = softs->busycmd[i];
744 ac->ac_next = NULL;
745
746 /* pull command from the busy index */
747 softs->busycmd[i] = NULL;
748 if (softs->amr_busyslots > 0)
749 softs->amr_busyslots--;
750 if (softs->amr_busyslots == 0)
751 cv_broadcast(&softs->cmd_cv);
752
753 mutex_exit(&softs->cmd_mutex);
754
755 pkt = ac->pkt;
756 *pkt->pkt_scbp = 0;
757 pkt->pkt_statistics |= STAT_TIMEOUT;
758 pkt->pkt_reason = CMD_TIMEOUT;
759 if (!(pkt->pkt_flags & FLAG_NOINTR)) {
760 /* call pkt callback */
761 scsi_hba_pkt_comp(pkt);
762 }
763
764 } else {
765 mutex_exit(&softs->cmd_mutex);
766 }
767 }
768
769 /* restart the amr timer */
770 mutex_enter(&softs->periodic_mutex);
771 if (softs->state & AMR_STATE_TIMEOUT_ENABLED)
772 softs->timeout_t = timeout(amr_periodic, (void *)softs,
773 drv_usectohz(500000*AMR_PERIODIC_TIMEOUT));
774 mutex_exit(&softs->periodic_mutex);
775 }
776
777 /*
778 * Interrogate the controller for the operational parameters we require.
779 */
780 static int
781 amr_query_controller(struct amr_softs *softs)
782 {
783 struct amr_enquiry3 *aex;
784 struct amr_prodinfo *ap;
785 struct amr_enquiry *ae;
786 uint32_t ldrv;
787 int instance;
788
789 /*
790 * If we haven't found the real limit yet, let us have a couple of
791 * commands in order to be able to probe.
792 */
793 if (softs->maxio == 0)
794 softs->maxio = 2;
795
796 instance = ddi_get_instance(softs->dev_info_p);
797
798 /*
799 * Try to issue an ENQUIRY3 command
800 */
801 if ((aex = amr_enquiry(softs, AMR_ENQ_BUFFER_SIZE, AMR_CMD_CONFIG,
802 AMR_CONFIG_ENQ3, AMR_CONFIG_ENQ3_SOLICITED_FULL)) != NULL) {
803
804 AMRDB_PRINT((CE_NOTE, "First enquiry"));
805
806 for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
807 softs->logic_drive[ldrv].al_size =
808 aex->ae_drivesize[ldrv];
809 softs->logic_drive[ldrv].al_state =
810 aex->ae_drivestate[ldrv];
811 softs->logic_drive[ldrv].al_properties =
812 aex->ae_driveprop[ldrv];
813 AMRDB_PRINT((CE_NOTE,
814 " drive %d: size: %d state %x properties %x\n",
815 ldrv,
816 softs->logic_drive[ldrv].al_size,
817 softs->logic_drive[ldrv].al_state,
818 softs->logic_drive[ldrv].al_properties));
819
820 if (softs->logic_drive[ldrv].al_state ==
821 AMR_LDRV_OFFLINE)
822 cmn_err(CE_NOTE,
823 "!instance %d log-drive %d is offline",
824 instance, ldrv);
825 else
826 softs->amr_nlogdrives++;
827 }
828 kmem_free(aex, AMR_ENQ_BUFFER_SIZE);
829
830 if ((ap = amr_enquiry(softs, AMR_ENQ_BUFFER_SIZE,
831 AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0)) == NULL) {
832 AMRDB_PRINT((CE_NOTE,
833 "Cannot obtain product data from controller"));
834 return (EIO);
835 }
836
837 softs->maxdrives = AMR_40LD_MAXDRIVES;
838 softs->maxchan = ap->ap_nschan;
839 softs->maxio = ap->ap_maxio;
840
841 bcopy(ap->ap_firmware, softs->amr_product_info.pi_firmware_ver,
842 AMR_FIRMWARE_VER_SIZE);
843 softs->amr_product_info.
844 pi_firmware_ver[AMR_FIRMWARE_VER_SIZE] = 0;
845
846 bcopy(ap->ap_product, softs->amr_product_info.pi_product_name,
847 AMR_PRODUCT_INFO_SIZE);
848 softs->amr_product_info.
849 pi_product_name[AMR_PRODUCT_INFO_SIZE] = 0;
850
851 kmem_free(ap, AMR_ENQ_BUFFER_SIZE);
852 AMRDB_PRINT((CE_NOTE, "maxio=%d", softs->maxio));
853 } else {
854
855 AMRDB_PRINT((CE_NOTE, "First enquiry failed, \
856 so try another way"));
857
858 /* failed, try the 8LD ENQUIRY commands */
859 if ((ae = (struct amr_enquiry *)amr_enquiry(softs,
860 AMR_ENQ_BUFFER_SIZE, AMR_CMD_EXT_ENQUIRY2, 0, 0))
861 == NULL) {
862
863 if ((ae = (struct amr_enquiry *)amr_enquiry(softs,
864 AMR_ENQ_BUFFER_SIZE, AMR_CMD_ENQUIRY, 0, 0))
865 == NULL) {
866 AMRDB_PRINT((CE_NOTE,
867 "Cannot obtain configuration data"));
868 return (EIO);
869 }
870 ae->ae_signature = 0;
871 }
872
873 /*
874 * Fetch current state of logical drives.
875 */
876 for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
877 softs->logic_drive[ldrv].al_size =
878 ae->ae_ldrv.al_size[ldrv];
879 softs->logic_drive[ldrv].al_state =
880 ae->ae_ldrv.al_state[ldrv];
881 softs->logic_drive[ldrv].al_properties =
882 ae->ae_ldrv.al_properties[ldrv];
883 AMRDB_PRINT((CE_NOTE,
884 " ********* drive %d: %d state %x properties %x",
885 ldrv,
886 softs->logic_drive[ldrv].al_size,
887 softs->logic_drive[ldrv].al_state,
888 softs->logic_drive[ldrv].al_properties));
889
890 if (softs->logic_drive[ldrv].al_state ==
891 AMR_LDRV_OFFLINE)
892 cmn_err(CE_NOTE,
893 "!instance %d log-drive %d is offline",
894 instance, ldrv);
895 else
896 softs->amr_nlogdrives++;
897 }
898
899 softs->maxdrives = AMR_8LD_MAXDRIVES;
900 softs->maxchan = ae->ae_adapter.aa_channels;
901 softs->maxio = ae->ae_adapter.aa_maxio;
902 kmem_free(ae, AMR_ENQ_BUFFER_SIZE);
903 }
904
905 /*
906 * Mark remaining drives as unused.
907 */
908 for (; ldrv < AMR_MAXLD; ldrv++)
909 softs->logic_drive[ldrv].al_state = AMR_LDRV_OFFLINE;
910
911 /*
912 * Cap the maximum number of outstanding I/Os. AMI's driver
913 * doesn't trust the controller's reported value, and lockups have
914 * been seen when we do.
915 */
916 softs->maxio = MIN(softs->maxio, AMR_LIMITCMD);
917
918 return (DDI_SUCCESS);
919 }
920
921 /*
922 * Run a generic enquiry-style command.
923 */
924 static void *
925 amr_enquiry(struct amr_softs *softs, size_t bufsize, uint8_t cmd,
926 uint8_t cmdsub, uint8_t cmdqual)
927 {
928 struct amr_command ac;
929 void *result;
930
931 result = NULL;
932
933 bzero(&ac, sizeof (struct amr_command));
934 ac.ac_softs = softs;
935
936 /* set command flags */
937 ac.ac_flags |= AMR_CMD_DATAOUT;
938
939 /* build the command proper */
940 ac.mailbox.mb_command = cmd;
941 ac.mailbox.mb_cmdsub = cmdsub;
942 ac.mailbox.mb_cmdqual = cmdqual;
943
944 if (amr_enquiry_mapcmd(&ac, bufsize) != DDI_SUCCESS)
945 return (NULL);
946
947 if (amr_poll_command(&ac) || ac.ac_status != 0) {
948 AMRDB_PRINT((CE_NOTE, "can not poll command, goto out"));
949 amr_enquiry_unmapcmd(&ac);
950 return (NULL);
951 }
952
953 /* allocate the response structure */
954 result = kmem_zalloc(bufsize, KM_SLEEP);
955
956 bcopy(ac.ac_data, result, bufsize);
957
958 amr_enquiry_unmapcmd(&ac);
959 return (result);
960 }
961
962 /*
963 * Flush the controller's internal cache, return status.
964 */
965 static int
966 amr_flush(struct amr_softs *softs)
967 {
968 struct amr_command ac;
969 int error = 0;
970
971 bzero(&ac, sizeof (struct amr_command));
972 ac.ac_softs = softs;
973
974 ac.ac_flags |= AMR_CMD_DATAOUT;
975
976 /* build the command proper */
977 ac.mailbox.mb_command = AMR_CMD_FLUSH;
978
979 /* have to poll, as the system may be going down or otherwise damaged */
980 if (error = amr_poll_command(&ac)) {
981 AMRDB_PRINT((CE_NOTE, "can not poll this cmd"));
982 return (error);
983 }
984
985 return (error);
986 }
987
988 /*
989 * Take a command, submit it to the controller and wait for it to return.
990 * Returns nonzero on error. Can be safely called with interrupts enabled.
991 */
992 static int
993 amr_poll_command(struct amr_command *ac)
994 {
995 struct amr_softs *softs = ac->ac_softs;
996 volatile uint32_t done_flag;
997
998 AMRDB_PRINT((CE_NOTE, "Amr_Poll bcopy(%p, %p, %d)",
999 (void *)&ac->mailbox,
1000 (void *)softs->mailbox,
1001 (uint32_t)AMR_MBOX_CMDSIZE));
1002
1003 mutex_enter(&softs->cmd_mutex);
1004
1005 while (softs->amr_busyslots != 0)
1006 cv_wait(&softs->cmd_cv, &softs->cmd_mutex);
1007
1008 /*
1009 * For read/write commands, the scatter/gather table should be
1010 * filled, and the last entry in scatter/gather table will be used.
1011 */
1012 if ((ac->mailbox.mb_command == AMR_CMD_LREAD) ||
1013 (ac->mailbox.mb_command == AMR_CMD_LWRITE)) {
1014 bcopy(ac->sgtable,
1015 softs->sg_items[softs->sg_max_count - 1].sg_table,
1016 sizeof (struct amr_sgentry) * AMR_NSEG);
1017
1018 (void) ddi_dma_sync(
1019 softs->sg_items[softs->sg_max_count - 1].sg_handle,
1020 0, 0, DDI_DMA_SYNC_FORDEV);
1021
1022 ac->mailbox.mb_physaddr =
1023 softs->sg_items[softs->sg_max_count - 1].sg_phyaddr;
1024 }
1025
1026 bcopy(&ac->mailbox, (void *)softs->mailbox, AMR_MBOX_CMDSIZE);
1027
1028 /* sync the dma memory */
1029 (void) ddi_dma_sync(softs->mbox_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
1030
1031 /* clear the poll/ack fields in the mailbox */
1032 softs->mailbox->mb_ident = AMR_POLL_COMMAND_ID;
1033 softs->mailbox->mb_nstatus = AMR_POLL_DEFAULT_NSTATUS;
1034 softs->mailbox->mb_status = AMR_POLL_DEFAULT_STATUS;
1035 softs->mailbox->mb_poll = 0;
1036 softs->mailbox->mb_ack = 0;
1037 softs->mailbox->mb_busy = 1;
1038
1039 AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_SUBMIT);
1040
1041 /* sync the dma memory */
1042 (void) ddi_dma_sync(softs->mbox_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1043
1044 AMR_DELAY((softs->mailbox->mb_nstatus != AMR_POLL_DEFAULT_NSTATUS),
1045 1000, done_flag);
1046 if (!done_flag) {
1047 mutex_exit(&softs->cmd_mutex);
1048 return (1);
1049 }
1050
1051 ac->ac_status = softs->mailbox->mb_status;
1052
1053 AMR_DELAY((softs->mailbox->mb_poll == AMR_POLL_ACK), 1000, done_flag);
1054 if (!done_flag) {
1055 mutex_exit(&softs->cmd_mutex);
1056 return (1);
1057 }
1058
1059 softs->mailbox->mb_poll = 0;
1060 softs->mailbox->mb_ack = AMR_POLL_ACK;
1061
1062 /* acknowledge that we have the commands */
1063 AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
1064
1065 AMR_DELAY(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK), 1000, done_flag);
1066 if (!done_flag) {
1067 mutex_exit(&softs->cmd_mutex);
1068 return (1);
1069 }
1070
1071 mutex_exit(&softs->cmd_mutex);
1072 return (ac->ac_status != AMR_STATUS_SUCCESS);
1073 }
1074
1075 /*
1076 * setup the scatter/gather table
1077 */
1078 static int
1079 amr_setup_sg(struct amr_softs *softs)
1080 {
1081 uint32_t i;
1082 size_t len;
1083 ddi_dma_cookie_t cookie;
1084 uint_t cookien;
1085
1086 softs->sg_max_count = 0;
1087
1088 for (i = 0; i < AMR_MAXCMD; i++) {
1089
1090 /* reset the cookien */
1091 cookien = 0;
1092
1093 (softs->sg_items[i]).sg_handle = NULL;
1094 if (ddi_dma_alloc_handle(
1095 softs->dev_info_p,
1096 &addr_dma_attr,
1097 DDI_DMA_SLEEP,
1098 NULL,
1099 &((softs->sg_items[i]).sg_handle)) != DDI_SUCCESS) {
1100
1101 AMRDB_PRINT((CE_WARN,
1102 "Cannot alloc dma handle for s/g table"));
1103 goto error_out;
1104 }
1105
1106 if (ddi_dma_mem_alloc((softs->sg_items[i]).sg_handle,
1107 sizeof (struct amr_sgentry) * AMR_NSEG,
1108 &accattr,
1109 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1110 DDI_DMA_SLEEP, NULL,
1111 (caddr_t *)(&(softs->sg_items[i]).sg_table),
1112 &len,
1113 &(softs->sg_items[i]).sg_acc_handle)
1114 != DDI_SUCCESS) {
1115
1116 AMRDB_PRINT((CE_WARN,
1117 "Cannot allocate DMA memory"));
1118 goto error_out;
1119 }
1120
1121 if (ddi_dma_addr_bind_handle(
1122 (softs->sg_items[i]).sg_handle,
1123 NULL,
1124 (caddr_t)((softs->sg_items[i]).sg_table),
1125 len,
1126 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1127 DDI_DMA_SLEEP,
1128 NULL,
1129 &cookie,
1130 &cookien) != DDI_DMA_MAPPED) {
1131
1132 AMRDB_PRINT((CE_WARN,
1133 "Cannot bind communication area for s/g table"));
1134 goto error_out;
1135 }
1136
1137 if (cookien != 1)
1138 goto error_out;
1139
1140 softs->sg_items[i].sg_phyaddr = cookie.dmac_address;
1141 softs->sg_max_count++;
1142 }
1143
1144 return (DDI_SUCCESS);
1145
1146 error_out:
1147 /*
1148 * Couldn't allocate/initialize all of the sg table entries.
1149 * Clean up the partially-initialized entry before returning.
1150 */
1151 if (cookien) {
1152 (void) ddi_dma_unbind_handle((softs->sg_items[i]).sg_handle);
1153 }
1154 if ((softs->sg_items[i]).sg_acc_handle) {
1155 (void) ddi_dma_mem_free(&((softs->sg_items[i]).sg_acc_handle));
1156 (softs->sg_items[i]).sg_acc_handle = NULL;
1157 }
1158 if ((softs->sg_items[i]).sg_handle) {
1159 (void) ddi_dma_free_handle(&((softs->sg_items[i]).sg_handle));
1160 (softs->sg_items[i]).sg_handle = NULL;
1161 }
1162
1163 /*
1164 * At least two sg table entries are needed. One is for regular data
1165 * I/O commands, the other is for poll I/O commands.
1166 */
1167 return (softs->sg_max_count > 1 ? DDI_SUCCESS : DDI_FAILURE);
1168 }
1169
1170 /*
1171 * Map/unmap (ac)'s data in the controller's addressable space as required.
1172 *
1173 * These functions may be safely called multiple times on a given command.
1174 */
1175 static void
1176 amr_setup_dmamap(struct amr_command *ac, ddi_dma_cookie_t *buffer_dma_cookiep,
1177 int nsegments)
1178 {
1179 struct amr_sgentry *sg;
1180 uint32_t i, size;
1181
1182 sg = ac->sgtable;
1183
1184 size = 0;
1185
1186 ac->mailbox.mb_nsgelem = (uint8_t)nsegments;
1187 for (i = 0; i < nsegments; i++, sg++) {
1188 sg->sg_addr = buffer_dma_cookiep->dmac_address;
1189 sg->sg_count = buffer_dma_cookiep->dmac_size;
1190 size += sg->sg_count;
1191
1192 /*
1193 * There is no next cookie if the end of the current
1194 * window is reached. Otherwise, the next cookie
1195 * would be found.
1196 */
1197 if ((ac->current_cookie + i + 1) != ac->num_of_cookie)
1198 ddi_dma_nextcookie(ac->buffer_dma_handle,
1199 buffer_dma_cookiep);
1200 }
1201
1202 ac->transfer_size = size;
1203 ac->data_transfered += size;
1204 }
1205
1206
1207 /*
1208 * map the amr command for enquiry, allocate the DMA resource
1209 */
1210 static int
1211 amr_enquiry_mapcmd(struct amr_command *ac, uint32_t data_size)
1212 {
1213 struct amr_softs *softs = ac->ac_softs;
1214 size_t len;
1215 uint_t dma_flags;
1216
1217 AMRDB_PRINT((CE_NOTE, "Amr_enquiry_mapcmd called, ac=%p, flags=%x",
1218 (void *)ac, ac->ac_flags));
1219
1220 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1221 dma_flags = DDI_DMA_READ;
1222 } else {
1223 dma_flags = DDI_DMA_WRITE;
1224 }
1225
1226 dma_flags |= DDI_DMA_CONSISTENT;
1227
1228 /* process the DMA by address bind mode */
1229 if (ddi_dma_alloc_handle(softs->dev_info_p,
1230 &addr_dma_attr, DDI_DMA_SLEEP, NULL,
1231 &ac->buffer_dma_handle) !=
1232 DDI_SUCCESS) {
1233
1234 AMRDB_PRINT((CE_WARN,
1235 "Cannot allocate addr DMA tag"));
1236 goto error_out;
1237 }
1238
1239 if (ddi_dma_mem_alloc(ac->buffer_dma_handle,
1240 data_size,
1241 &accattr,
1242 dma_flags,
1243 DDI_DMA_SLEEP,
1244 NULL,
1245 (caddr_t *)&ac->ac_data,
1246 &len,
1247 &ac->buffer_acc_handle) !=
1248 DDI_SUCCESS) {
1249
1250 AMRDB_PRINT((CE_WARN,
1251 "Cannot allocate DMA memory"));
1252 goto error_out;
1253 }
1254
1255 if ((ddi_dma_addr_bind_handle(
1256 ac->buffer_dma_handle,
1257 NULL, ac->ac_data, len, dma_flags,
1258 DDI_DMA_SLEEP, NULL, &ac->buffer_dma_cookie,
1259 &ac->num_of_cookie)) != DDI_DMA_MAPPED) {
1260
1261 AMRDB_PRINT((CE_WARN,
1262 "Cannot bind addr for dma"));
1263 goto error_out;
1264 }
1265
1266 ac->ac_dataphys = (&ac->buffer_dma_cookie)->dmac_address;
1267
1268 ((struct amr_mailbox *)&(ac->mailbox))->mb_param = 0;
1269 ac->mailbox.mb_nsgelem = 0;
1270 ac->mailbox.mb_physaddr = ac->ac_dataphys;
1271
1272 ac->ac_flags |= AMR_CMD_MAPPED;
1273
1274 return (DDI_SUCCESS);
1275
1276 error_out:
1277 if (ac->num_of_cookie)
1278 (void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1279 if (ac->buffer_acc_handle) {
1280 ddi_dma_mem_free(&ac->buffer_acc_handle);
1281 ac->buffer_acc_handle = NULL;
1282 }
1283 if (ac->buffer_dma_handle) {
1284 (void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1285 ac->buffer_dma_handle = NULL;
1286 }
1287
1288 return (DDI_FAILURE);
1289 }
1290
1291 /*
1292 * unmap the amr command for enquiry, free the DMA resource
1293 */
1294 static void
1295 amr_enquiry_unmapcmd(struct amr_command *ac)
1296 {
1297 AMRDB_PRINT((CE_NOTE, "Amr_enquiry_unmapcmd called, ac=%p",
1298 (void *)ac));
1299
1300 /* if the command involved data at all and was mapped */
1301 if ((ac->ac_flags & AMR_CMD_MAPPED) && ac->ac_data) {
1302 if (ac->buffer_dma_handle)
1303 (void) ddi_dma_unbind_handle(
1304 ac->buffer_dma_handle);
1305 if (ac->buffer_acc_handle) {
1306 ddi_dma_mem_free(&ac->buffer_acc_handle);
1307 ac->buffer_acc_handle = NULL;
1308 }
1309 if (ac->buffer_dma_handle) {
1310 (void) ddi_dma_free_handle(
1311 &ac->buffer_dma_handle);
1312 ac->buffer_dma_handle = NULL;
1313 }
1314 }
1315
1316 ac->ac_flags &= ~AMR_CMD_MAPPED;
1317 }
1318
1319 /*
1320 * map the amr command, allocate the DMA resource
1321 */
1322 static int
1323 amr_mapcmd(struct amr_command *ac, int (*callback)(), caddr_t arg)
1324 {
1325 uint_t dma_flags;
1326 off_t off;
1327 size_t len;
1328 int error;
1329 int (*cb)(caddr_t);
1330
1331 AMRDB_PRINT((CE_NOTE, "Amr_mapcmd called, ac=%p, flags=%x",
1332 (void *)ac, ac->ac_flags));
1333
1334 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1335 dma_flags = DDI_DMA_READ;
1336 } else {
1337 dma_flags = DDI_DMA_WRITE;
1338 }
1339
1340 if (ac->ac_flags & AMR_CMD_PKT_CONSISTENT) {
1341 dma_flags |= DDI_DMA_CONSISTENT;
1342 }
1343 if (ac->ac_flags & AMR_CMD_PKT_DMA_PARTIAL) {
1344 dma_flags |= DDI_DMA_PARTIAL;
1345 }
1346
1347 if ((!(ac->ac_flags & AMR_CMD_MAPPED)) && (ac->ac_buf == NULL)) {
1348 ac->ac_flags |= AMR_CMD_MAPPED;
1349 return (DDI_SUCCESS);
1350 }
1351
1352 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
1353
1354 /* if the command involves data at all, and hasn't been mapped */
1355 if (!(ac->ac_flags & AMR_CMD_MAPPED)) {
1356 /* process the DMA by buffer bind mode */
1357 error = ddi_dma_buf_bind_handle(ac->buffer_dma_handle,
1358 ac->ac_buf,
1359 dma_flags,
1360 cb,
1361 arg,
1362 &ac->buffer_dma_cookie,
1363 &ac->num_of_cookie);
1364 switch (error) {
1365 case DDI_DMA_PARTIAL_MAP:
1366 if (ddi_dma_numwin(ac->buffer_dma_handle,
1367 &ac->num_of_win) == DDI_FAILURE) {
1368
1369 AMRDB_PRINT((CE_WARN,
1370 "Cannot get dma num win"));
1371 (void) ddi_dma_unbind_handle(
1372 ac->buffer_dma_handle);
1373 (void) ddi_dma_free_handle(
1374 &ac->buffer_dma_handle);
1375 ac->buffer_dma_handle = NULL;
1376 return (DDI_FAILURE);
1377 }
1378 ac->current_win = 0;
1379 break;
1380
1381 case DDI_DMA_MAPPED:
1382 ac->num_of_win = 1;
1383 ac->current_win = 0;
1384 break;
1385
1386 default:
1387 AMRDB_PRINT((CE_WARN,
1388 "Cannot bind buf for dma"));
1389
1390 (void) ddi_dma_free_handle(
1391 &ac->buffer_dma_handle);
1392 ac->buffer_dma_handle = NULL;
1393 return (DDI_FAILURE);
1394 }
1395
1396 ac->current_cookie = 0;
1397
1398 ac->ac_flags |= AMR_CMD_MAPPED;
1399 } else if (ac->current_cookie == AMR_LAST_COOKIE_TAG) {
1400 /* get the next window */
1401 ac->current_win++;
1402 (void) ddi_dma_getwin(ac->buffer_dma_handle,
1403 ac->current_win, &off, &len,
1404 &ac->buffer_dma_cookie,
1405 &ac->num_of_cookie);
1406 ac->current_cookie = 0;
1407 }
1408
1409 if ((ac->num_of_cookie - ac->current_cookie) > AMR_NSEG) {
1410 amr_setup_dmamap(ac, &ac->buffer_dma_cookie, AMR_NSEG);
1411 ac->current_cookie += AMR_NSEG;
1412 } else {
1413 amr_setup_dmamap(ac, &ac->buffer_dma_cookie,
1414 ac->num_of_cookie - ac->current_cookie);
1415 ac->current_cookie = AMR_LAST_COOKIE_TAG;
1416 }
1417
1418 return (DDI_SUCCESS);
1419 }
1420
1421 /*
1422 * unmap the amr command, free the DMA resource
1423 */
1424 static void
1425 amr_unmapcmd(struct amr_command *ac)
1426 {
1427 AMRDB_PRINT((CE_NOTE, "Amr_unmapcmd called, ac=%p",
1428 (void *)ac));
1429
1430 /* if the command involved data at all and was mapped */
1431 if ((ac->ac_flags & AMR_CMD_MAPPED) &&
1432 ac->ac_buf && ac->buffer_dma_handle)
1433 (void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1434
1435 ac->ac_flags &= ~AMR_CMD_MAPPED;
1436 }
1437
1438 static int
1439 amr_setup_tran(dev_info_t *dip, struct amr_softs *softp)
1440 {
1441 softp->hba_tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
1442
1443 /*
1444 * hba_private always points to the amr_softs struct
1445 */
1446 softp->hba_tran->tran_hba_private = softp;
1447 softp->hba_tran->tran_tgt_init = amr_tran_tgt_init;
1448 softp->hba_tran->tran_tgt_probe = scsi_hba_probe;
1449 softp->hba_tran->tran_start = amr_tran_start;
1450 softp->hba_tran->tran_reset = amr_tran_reset;
1451 softp->hba_tran->tran_getcap = amr_tran_getcap;
1452 softp->hba_tran->tran_setcap = amr_tran_setcap;
1453 softp->hba_tran->tran_init_pkt = amr_tran_init_pkt;
1454 softp->hba_tran->tran_destroy_pkt = amr_tran_destroy_pkt;
1455 softp->hba_tran->tran_dmafree = amr_tran_dmafree;
1456 softp->hba_tran->tran_sync_pkt = amr_tran_sync_pkt;
1457 softp->hba_tran->tran_abort = NULL;
1458 softp->hba_tran->tran_tgt_free = NULL;
1459 softp->hba_tran->tran_quiesce = NULL;
1460 softp->hba_tran->tran_unquiesce = NULL;
1461 softp->hba_tran->tran_sd = NULL;
1462
1463 if (scsi_hba_attach_setup(dip, &buffer_dma_attr, softp->hba_tran,
1464 SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
1465 scsi_hba_tran_free(softp->hba_tran);
1466 softp->hba_tran = NULL;
1467 return (DDI_FAILURE);
1468 } else {
1469 return (DDI_SUCCESS);
1470 }
1471 }
1472
1473 /*ARGSUSED*/
1474 static int
1475 amr_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1476 scsi_hba_tran_t *tran, struct scsi_device *sd)
1477 {
1478 struct amr_softs *softs;
1479 ushort_t target = sd->sd_address.a_target;
1480 uchar_t lun = sd->sd_address.a_lun;
1481
1482 softs = (struct amr_softs *)
1483 (sd->sd_address.a_hba_tran->tran_hba_private);
1484
1485 if ((lun == 0) && (target < AMR_MAXLD))
1486 if (softs->logic_drive[target].al_state != AMR_LDRV_OFFLINE)
1487 return (DDI_SUCCESS);
1488
1489 return (DDI_FAILURE);
1490 }
1491
1492 static int
1493 amr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1494 {
1495 struct amr_softs *softs;
1496 struct buf *bp = NULL;
1497 union scsi_cdb *cdbp = (union scsi_cdb *)pkt->pkt_cdbp;
1498 int ret;
1499 uint32_t capacity;
1500 struct amr_command *ac;
1501
1502 AMRDB_PRINT((CE_NOTE, "amr_tran_start, cmd=%X,target=%d,lun=%d",
1503 cdbp->scc_cmd, ap->a_target, ap->a_lun));
1504
1505 softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1506 if ((ap->a_lun != 0) || (ap->a_target >= AMR_MAXLD) ||
1507 (softs->logic_drive[ap->a_target].al_state ==
1508 AMR_LDRV_OFFLINE)) {
1509 cmn_err(CE_WARN, "target or lun is not correct!");
1510 ret = TRAN_BADPKT;
1511 return (ret);
1512 }
1513
1514 ac = (struct amr_command *)pkt->pkt_ha_private;
1515 bp = ac->ac_buf;
1516
1517 AMRDB_PRINT((CE_NOTE, "scsi cmd accepted, cmd=%X", cdbp->scc_cmd));
1518
1519 switch (cdbp->scc_cmd) {
1520 case SCMD_READ: /* read */
1521 case SCMD_READ_G1: /* read g1 */
1522 case SCMD_READ_BUFFER: /* read buffer */
1523 case SCMD_WRITE: /* write */
1524 case SCMD_WRITE_G1: /* write g1 */
1525 case SCMD_WRITE_BUFFER: /* write buffer */
1526 amr_rw_command(softs, pkt, ap->a_target);
1527
1528 if (pkt->pkt_flags & FLAG_NOINTR) {
1529 (void) amr_poll_command(ac);
1530 pkt->pkt_state |= (STATE_GOT_BUS
1531 | STATE_GOT_TARGET
1532 | STATE_SENT_CMD
1533 | STATE_XFERRED_DATA);
1534 *pkt->pkt_scbp = 0;
1535 pkt->pkt_statistics |= STAT_SYNC;
1536 pkt->pkt_reason = CMD_CMPLT;
1537 } else {
1538 mutex_enter(&softs->queue_mutex);
1539 if (softs->waiting_q_head == NULL) {
1540 ac->ac_prev = NULL;
1541 ac->ac_next = NULL;
1542 softs->waiting_q_head = ac;
1543 softs->waiting_q_tail = ac;
1544 } else {
1545 ac->ac_next = NULL;
1546 ac->ac_prev = softs->waiting_q_tail;
1547 softs->waiting_q_tail->ac_next = ac;
1548 softs->waiting_q_tail = ac;
1549 }
1550 mutex_exit(&softs->queue_mutex);
1551 amr_start_waiting_queue((void *)softs);
1552 }
1553 ret = TRAN_ACCEPT;
1554 break;
1555
1556 case SCMD_INQUIRY: /* inquiry */
1557 if (bp && bp->b_un.b_addr && bp->b_bcount) {
1558 struct scsi_inquiry inqp;
1559 uint8_t *sinq_p = (uint8_t *)&inqp;
1560
1561 bzero(&inqp, sizeof (struct scsi_inquiry));
1562
1563 if (((char *)cdbp)[1] || ((char *)cdbp)[2]) {
1564 /*
1565 * The EVDP and pagecode is
1566 * not supported
1567 */
1568 sinq_p[1] = 0xFF;
1569 sinq_p[2] = 0x0;
1570 } else {
1571 inqp.inq_len = AMR_INQ_ADDITIONAL_LEN;
1572 inqp.inq_ansi = AMR_INQ_ANSI_VER;
1573 inqp.inq_rdf = AMR_INQ_RESP_DATA_FORMAT;
1574 /* Enable Tag Queue */
1575 inqp.inq_cmdque = 1;
1576 bcopy("MegaRaid", inqp.inq_vid,
1577 sizeof (inqp.inq_vid));
1578 bcopy(softs->amr_product_info.pi_product_name,
1579 inqp.inq_pid,
1580 AMR_PRODUCT_INFO_SIZE);
1581 bcopy(softs->amr_product_info.pi_firmware_ver,
1582 inqp.inq_revision,
1583 AMR_FIRMWARE_VER_SIZE);
1584 }
1585
1586 amr_unmapcmd(ac);
1587
1588 if (bp->b_flags & (B_PHYS | B_PAGEIO))
1589 bp_mapin(bp);
1590 bcopy(&inqp, bp->b_un.b_addr,
1591 sizeof (struct scsi_inquiry));
1592
1593 pkt->pkt_state |= STATE_XFERRED_DATA;
1594 }
1595 pkt->pkt_reason = CMD_CMPLT;
1596 pkt->pkt_state |= (STATE_GOT_BUS
1597 | STATE_GOT_TARGET
1598 | STATE_SENT_CMD);
1599 *pkt->pkt_scbp = 0;
1600 ret = TRAN_ACCEPT;
1601 if (!(pkt->pkt_flags & FLAG_NOINTR))
1602 scsi_hba_pkt_comp(pkt);
1603 break;
1604
1605 case SCMD_READ_CAPACITY: /* read capacity */
1606 if (bp && bp->b_un.b_addr && bp->b_bcount) {
1607 struct scsi_capacity cp;
1608
1609 capacity = softs->logic_drive[ap->a_target].al_size - 1;
1610 cp.capacity = BE_32(capacity);
1611 cp.lbasize = BE_32(512);
1612
1613 amr_unmapcmd(ac);
1614
1615 if (bp->b_flags & (B_PHYS | B_PAGEIO))
1616 bp_mapin(bp);
1617 bcopy(&cp, bp->b_un.b_addr, 8);
1618 }
1619 pkt->pkt_reason = CMD_CMPLT;
1620 pkt->pkt_state |= (STATE_GOT_BUS
1621 | STATE_GOT_TARGET
1622 | STATE_SENT_CMD
1623 | STATE_XFERRED_DATA);
1624 *pkt->pkt_scbp = 0;
1625 ret = TRAN_ACCEPT;
1626 if (!(pkt->pkt_flags & FLAG_NOINTR))
1627 scsi_hba_pkt_comp(pkt);
1628 break;
1629
1630 case SCMD_MODE_SENSE: /* mode sense */
1631 case SCMD_MODE_SENSE_G1: /* mode sense g1 */
1632 amr_unmapcmd(ac);
1633
1634 capacity = softs->logic_drive[ap->a_target].al_size - 1;
1635 amr_mode_sense(cdbp, bp, capacity);
1636
1637 pkt->pkt_reason = CMD_CMPLT;
1638 pkt->pkt_state |= (STATE_GOT_BUS
1639 | STATE_GOT_TARGET
1640 | STATE_SENT_CMD
1641 | STATE_XFERRED_DATA);
1642 *pkt->pkt_scbp = 0;
1643 ret = TRAN_ACCEPT;
1644 if (!(pkt->pkt_flags & FLAG_NOINTR))
1645 scsi_hba_pkt_comp(pkt);
1646 break;
1647
1648 case SCMD_TEST_UNIT_READY: /* test unit ready */
1649 case SCMD_REQUEST_SENSE: /* request sense */
1650 case SCMD_FORMAT: /* format */
1651 case SCMD_START_STOP: /* start stop */
1652 case SCMD_SYNCHRONIZE_CACHE: /* synchronize cache */
1653 if (bp && bp->b_un.b_addr && bp->b_bcount) {
1654 amr_unmapcmd(ac);
1655
1656 if (bp->b_flags & (B_PHYS | B_PAGEIO))
1657 bp_mapin(bp);
1658 bzero(bp->b_un.b_addr, bp->b_bcount);
1659
1660 pkt->pkt_state |= STATE_XFERRED_DATA;
1661 }
1662 pkt->pkt_reason = CMD_CMPLT;
1663 pkt->pkt_state |= (STATE_GOT_BUS
1664 | STATE_GOT_TARGET
1665 | STATE_SENT_CMD);
1666 ret = TRAN_ACCEPT;
1667 *pkt->pkt_scbp = 0;
1668 if (!(pkt->pkt_flags & FLAG_NOINTR))
1669 scsi_hba_pkt_comp(pkt);
1670 break;
1671
1672 default: /* any other commands */
1673 amr_unmapcmd(ac);
1674 pkt->pkt_reason = CMD_INCOMPLETE;
1675 pkt->pkt_state = (STATE_GOT_BUS
1676 | STATE_GOT_TARGET
1677 | STATE_SENT_CMD
1678 | STATE_GOT_STATUS
1679 | STATE_ARQ_DONE);
1680 ret = TRAN_ACCEPT;
1681 *pkt->pkt_scbp = 0;
1682 amr_set_arq_data(pkt, KEY_ILLEGAL_REQUEST);
1683 if (!(pkt->pkt_flags & FLAG_NOINTR))
1684 scsi_hba_pkt_comp(pkt);
1685 break;
1686 }
1687
1688 return (ret);
1689 }
1690
1691 /*
1692 * tran_reset() will reset the bus/target/adapter to support the fault recovery
1693 * functionality according to the "level" in interface. However, we got the
1694 * confirmation from LSI that these HBA cards does not support any commands to
1695 * reset bus/target/adapter/channel.
1696 *
1697 * If the tran_reset() return a FAILURE to the sd, the system will not
1698 * continue to dump the core. But core dump is an crucial method to analyze
1699 * problems in panic. Now we adopt a work around solution, that is to return
1700 * a fake SUCCESS to sd during panic, which will force the system continue
1701 * to dump core though the core may have problems in some situtation because
1702 * some on-the-fly commands will continue DMAing data to the memory.
1703 * In addition, the work around core dump method may not be performed
1704 * successfully if the panic is caused by the HBA itself. So the work around
1705 * solution is not a good example for the implementation of tran_reset(),
1706 * the most reasonable approach should send a reset command to the adapter.
1707 */
1708 /*ARGSUSED*/
1709 static int
1710 amr_tran_reset(struct scsi_address *ap, int level)
1711 {
1712 struct amr_softs *softs;
1713 volatile uint32_t done_flag;
1714
1715 if (ddi_in_panic()) {
1716 softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1717
1718 /* Acknowledge the card if there are any significant commands */
1719 while (softs->amr_busyslots > 0) {
1720 AMR_DELAY((softs->mailbox->mb_busy == 0),
1721 AMR_RETRYCOUNT, done_flag);
1722 if (!done_flag) {
1723 /*
1724 * command not completed, indicate the
1725 * problem and continue get ac
1726 */
1727 cmn_err(CE_WARN,
1728 "AMR command is not completed");
1729 return (0);
1730 }
1731
1732 AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
1733
1734 /* wait for the acknowledge from hardware */
1735 AMR_BUSYWAIT(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK),
1736 AMR_RETRYCOUNT, done_flag);
1737 if (!done_flag) {
1738 /*
1739 * command is not completed, return from the
1740 * current interrupt and wait for the next one
1741 */
1742 cmn_err(CE_WARN, "No answer from the hardware");
1743
1744 mutex_exit(&softs->cmd_mutex);
1745 return (0);
1746 }
1747
1748 softs->amr_busyslots -= softs->mailbox->mb_nstatus;
1749 }
1750
1751 /* flush the controllor */
1752 (void) amr_flush(softs);
1753
1754 /*
1755 * If the system is in panic, the tran_reset() will return a
1756 * fake SUCCESS to sd, then the system would continue dump the
1757 * core by poll commands. This is a work around for dumping
1758 * core in panic.
1759 *
1760 * Note: Some on-the-fly command will continue DMAing data to
1761 * the memory when the core is dumping, which may cause
1762 * some flaws in the dumped core file, so a cmn_err()
1763 * will be printed out to warn users. However, for most
1764 * cases, the core file will be fine.
1765 */
1766 cmn_err(CE_WARN, "This system contains a SCSI HBA card/driver "
1767 "that doesn't support software reset. This "
1768 "means that memory being used by the HBA for "
1769 "DMA based reads could have been updated after "
1770 "we panic'd.");
1771 return (1);
1772 } else {
1773 /* return failure to sd */
1774 return (0);
1775 }
1776 }
1777
1778 /*ARGSUSED*/
1779 static int
1780 amr_tran_getcap(struct scsi_address *ap, char *cap, int whom)
1781 {
1782 struct amr_softs *softs;
1783
1784 /*
1785 * We don't allow inquiring about capabilities for other targets
1786 */
1787 if (cap == NULL || whom == 0)
1788 return (-1);
1789
1790 softs = ((struct amr_softs *)(ap->a_hba_tran)->tran_hba_private);
1791
1792 switch (scsi_hba_lookup_capstr(cap)) {
1793 case SCSI_CAP_ARQ:
1794 return (1);
1795 case SCSI_CAP_GEOMETRY:
1796 return ((AMR_DEFAULT_HEADS << 16) | AMR_DEFAULT_CYLINDERS);
1797 case SCSI_CAP_SECTOR_SIZE:
1798 return (AMR_DEFAULT_SECTORS);
1799 case SCSI_CAP_TOTAL_SECTORS:
1800 /* number of sectors */
1801 return (softs->logic_drive[ap->a_target].al_size);
1802 case SCSI_CAP_UNTAGGED_QING:
1803 case SCSI_CAP_TAGGED_QING:
1804 return (1);
1805 default:
1806 return (-1);
1807 }
1808 }
1809
1810 /*ARGSUSED*/
1811 static int
1812 amr_tran_setcap(struct scsi_address *ap, char *cap, int value,
1813 int whom)
1814 {
1815 /*
1816 * We don't allow setting capabilities for other targets
1817 */
1818 if (cap == NULL || whom == 0) {
1819 AMRDB_PRINT((CE_NOTE,
1820 "Set Cap not supported, string = %s, whom=%d",
1821 cap, whom));
1822 return (-1);
1823 }
1824
1825 switch (scsi_hba_lookup_capstr(cap)) {
1826 case SCSI_CAP_ARQ:
1827 return (1);
1828 case SCSI_CAP_TOTAL_SECTORS:
1829 return (1);
1830 case SCSI_CAP_SECTOR_SIZE:
1831 return (1);
1832 case SCSI_CAP_UNTAGGED_QING:
1833 case SCSI_CAP_TAGGED_QING:
1834 return ((value == 1) ? 1 : 0);
1835 default:
1836 return (0);
1837 }
1838 }
1839
1840 static struct scsi_pkt *
1841 amr_tran_init_pkt(struct scsi_address *ap,
1842 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
1843 int tgtlen, int flags, int (*callback)(), caddr_t arg)
1844 {
1845 struct amr_softs *softs;
1846 struct amr_command *ac;
1847 uint32_t slen;
1848
1849 softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1850
1851 if ((ap->a_lun != 0)||(ap->a_target >= AMR_MAXLD)||
1852 (softs->logic_drive[ap->a_target].al_state ==
1853 AMR_LDRV_OFFLINE)) {
1854 return (NULL);
1855 }
1856
1857 if (pkt == NULL) {
1858 /* force auto request sense */
1859 slen = MAX(statuslen, sizeof (struct scsi_arq_status));
1860
1861 pkt = scsi_hba_pkt_alloc(softs->dev_info_p, ap, cmdlen,
1862 slen, tgtlen, sizeof (struct amr_command),
1863 callback, arg);
1864 if (pkt == NULL) {
1865 AMRDB_PRINT((CE_WARN, "scsi_hba_pkt_alloc failed"));
1866 return (NULL);
1867 }
1868 pkt->pkt_address = *ap;
1869 pkt->pkt_comp = (void (*)())NULL;
1870 pkt->pkt_time = 0;
1871 pkt->pkt_resid = 0;
1872 pkt->pkt_statistics = 0;
1873 pkt->pkt_reason = 0;
1874
1875 ac = (struct amr_command *)pkt->pkt_ha_private;
1876 ac->ac_buf = bp;
1877 ac->cmdlen = cmdlen;
1878 ac->ac_softs = softs;
1879 ac->pkt = pkt;
1880 ac->ac_flags &= ~AMR_CMD_GOT_SLOT;
1881 ac->ac_flags &= ~AMR_CMD_BUSY;
1882
1883 if ((bp == NULL) || (bp->b_bcount == 0)) {
1884 return (pkt);
1885 }
1886
1887 if (ddi_dma_alloc_handle(softs->dev_info_p, &buffer_dma_attr,
1888 DDI_DMA_SLEEP, NULL,
1889 &ac->buffer_dma_handle) != DDI_SUCCESS) {
1890
1891 AMRDB_PRINT((CE_WARN,
1892 "Cannot allocate buffer DMA tag"));
1893 scsi_hba_pkt_free(ap, pkt);
1894 return (NULL);
1895
1896 }
1897
1898 } else {
1899 if ((bp == NULL) || (bp->b_bcount == 0)) {
1900 return (pkt);
1901 }
1902 ac = (struct amr_command *)pkt->pkt_ha_private;
1903 }
1904
1905 ASSERT(ac != NULL);
1906
1907 if (bp->b_flags & B_READ) {
1908 ac->ac_flags |= AMR_CMD_DATAOUT;
1909 } else {
1910 ac->ac_flags |= AMR_CMD_DATAIN;
1911 }
1912
1913 if (flags & PKT_CONSISTENT) {
1914 ac->ac_flags |= AMR_CMD_PKT_CONSISTENT;
1915 }
1916
1917 if (flags & PKT_DMA_PARTIAL) {
1918 ac->ac_flags |= AMR_CMD_PKT_DMA_PARTIAL;
1919 }
1920
1921 if (amr_mapcmd(ac, callback, arg) != DDI_SUCCESS) {
1922 scsi_hba_pkt_free(ap, pkt);
1923 return (NULL);
1924 }
1925
1926 pkt->pkt_resid = bp->b_bcount - ac->data_transfered;
1927
1928 AMRDB_PRINT((CE_NOTE,
1929 "init pkt, pkt_resid=%d, b_bcount=%d, data_transfered=%d",
1930 (uint32_t)pkt->pkt_resid, (uint32_t)bp->b_bcount,
1931 ac->data_transfered));
1932
1933 ASSERT(pkt->pkt_resid >= 0);
1934
1935 return (pkt);
1936 }
1937
1938 static void
1939 amr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1940 {
1941 struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1942
1943 amr_unmapcmd(ac);
1944
1945 if (ac->buffer_dma_handle) {
1946 (void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1947 ac->buffer_dma_handle = NULL;
1948 }
1949
1950 scsi_hba_pkt_free(ap, pkt);
1951 AMRDB_PRINT((CE_NOTE, "Destroy pkt called"));
1952 }
1953
1954 /*ARGSUSED*/
1955 static void
1956 amr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1957 {
1958 struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1959
1960 if (ac->buffer_dma_handle) {
1961 (void) ddi_dma_sync(ac->buffer_dma_handle, 0, 0,
1962 (ac->ac_flags & AMR_CMD_DATAIN) ?
1963 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
1964 }
1965 }
1966
1967 /*ARGSUSED*/
1968 static void
1969 amr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1970 {
1971 struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1972
1973 if (ac->ac_flags & AMR_CMD_MAPPED) {
1974 (void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1975 (void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1976 ac->buffer_dma_handle = NULL;
1977 ac->ac_flags &= ~AMR_CMD_MAPPED;
1978 }
1979
1980 }
1981
1982 /*ARGSUSED*/
1983 static void
1984 amr_rw_command(struct amr_softs *softs, struct scsi_pkt *pkt, int target)
1985 {
1986 struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1987 union scsi_cdb *cdbp = (union scsi_cdb *)pkt->pkt_cdbp;
1988 uint8_t cmd;
1989
1990 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1991 cmd = AMR_CMD_LREAD;
1992 } else {
1993 cmd = AMR_CMD_LWRITE;
1994 }
1995
1996 ac->mailbox.mb_command = cmd;
1997 ac->mailbox.mb_blkcount =
1998 (ac->transfer_size + AMR_BLKSIZE - 1)/AMR_BLKSIZE;
1999 ac->mailbox.mb_lba = (ac->cmdlen == 10) ?
2000 GETG1ADDR(cdbp) : GETG0ADDR(cdbp);
2001 ac->mailbox.mb_drive = (uint8_t)target;
2002 }
2003
2004 static void
2005 amr_mode_sense(union scsi_cdb *cdbp, struct buf *bp, unsigned int capacity)
2006 {
2007 uchar_t pagecode;
2008 struct mode_format *page3p;
2009 struct mode_geometry *page4p;
2010 struct mode_header *headerp;
2011 uint32_t ncyl;
2012
2013 if (!(bp && bp->b_un.b_addr && bp->b_bcount))
2014 return;
2015
2016 if (bp->b_flags & (B_PHYS | B_PAGEIO))
2017 bp_mapin(bp);
2018
2019 pagecode = cdbp->cdb_un.sg.scsi[0];
2020 switch (pagecode) {
2021 case SD_MODE_SENSE_PAGE3_CODE:
2022 headerp = (struct mode_header *)(bp->b_un.b_addr);
2023 headerp->bdesc_length = MODE_BLK_DESC_LENGTH;
2024
2025 page3p = (struct mode_format *)((caddr_t)headerp +
2026 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
2027 page3p->mode_page.code = BE_8(SD_MODE_SENSE_PAGE3_CODE);
2028 page3p->mode_page.length = BE_8(sizeof (struct mode_format));
2029 page3p->data_bytes_sect = BE_16(AMR_DEFAULT_SECTORS);
2030 page3p->sect_track = BE_16(AMR_DEFAULT_CYLINDERS);
2031
2032 return;
2033
2034 case SD_MODE_SENSE_PAGE4_CODE:
2035 headerp = (struct mode_header *)(bp->b_un.b_addr);
2036 headerp->bdesc_length = MODE_BLK_DESC_LENGTH;
2037
2038 page4p = (struct mode_geometry *)((caddr_t)headerp +
2039 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
2040 page4p->mode_page.code = BE_8(SD_MODE_SENSE_PAGE4_CODE);
2041 page4p->mode_page.length = BE_8(sizeof (struct mode_geometry));
2042 page4p->heads = BE_8(AMR_DEFAULT_HEADS);
2043 page4p->rpm = BE_16(AMR_DEFAULT_ROTATIONS);
2044
2045 ncyl = capacity / (AMR_DEFAULT_HEADS*AMR_DEFAULT_CYLINDERS);
2046 page4p->cyl_lb = BE_8(ncyl & 0xff);
2047 page4p->cyl_mb = BE_8((ncyl >> 8) & 0xff);
2048 page4p->cyl_ub = BE_8((ncyl >> 16) & 0xff);
2049
2050 return;
2051 default:
2052 bzero(bp->b_un.b_addr, bp->b_bcount);
2053 return;
2054 }
2055 }
2056
2057 static void
2058 amr_set_arq_data(struct scsi_pkt *pkt, uchar_t key)
2059 {
2060 struct scsi_arq_status *arqstat;
2061
2062 arqstat = (struct scsi_arq_status *)(pkt->pkt_scbp);
2063 arqstat->sts_status.sts_chk = 1; /* CHECK CONDITION */
2064 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2065 arqstat->sts_rqpkt_resid = 0;
2066 arqstat->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2067 STATE_SENT_CMD | STATE_XFERRED_DATA;
2068 arqstat->sts_rqpkt_statistics = 0;
2069 arqstat->sts_sensedata.es_valid = 1;
2070 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2071 arqstat->sts_sensedata.es_key = key;
2072 }
2073
2074 static void
2075 amr_start_waiting_queue(void *softp)
2076 {
2077 uint32_t slot;
2078 struct amr_command *ac;
2079 volatile uint32_t done_flag;
2080 struct amr_softs *softs = (struct amr_softs *)softp;
2081
2082 /* only one command allowed at the same time */
2083 mutex_enter(&softs->queue_mutex);
2084 mutex_enter(&softs->cmd_mutex);
2085
2086 while ((ac = softs->waiting_q_head) != NULL) {
2087 /*
2088 * Find an available slot, the last slot is
2089 * occupied by poll I/O command.
2090 */
2091 for (slot = 0; slot < (softs->sg_max_count - 1); slot++) {
2092 if (softs->busycmd[slot] == NULL) {
2093 if (AMR_QGET_IDB(softs) & AMR_QIDB_SUBMIT) {
2094 /*
2095 * only one command allowed at the
2096 * same time
2097 */
2098 mutex_exit(&softs->cmd_mutex);
2099 mutex_exit(&softs->queue_mutex);
2100 return;
2101 }
2102
2103 ac->ac_timestamp = ddi_get_time();
2104
2105 if (!(ac->ac_flags & AMR_CMD_GOT_SLOT)) {
2106
2107 softs->busycmd[slot] = ac;
2108 ac->ac_slot = slot;
2109 softs->amr_busyslots++;
2110
2111 bcopy(ac->sgtable,
2112 softs->sg_items[slot].sg_table,
2113 sizeof (struct amr_sgentry) *
2114 AMR_NSEG);
2115
2116 (void) ddi_dma_sync(
2117 softs->sg_items[slot].sg_handle,
2118 0, 0, DDI_DMA_SYNC_FORDEV);
2119
2120 ac->mailbox.mb_physaddr =
2121 softs->sg_items[slot].sg_phyaddr;
2122 }
2123
2124 /* take the cmd from the queue */
2125 softs->waiting_q_head = ac->ac_next;
2126
2127 ac->mailbox.mb_ident = ac->ac_slot + 1;
2128 ac->mailbox.mb_busy = 1;
2129 ac->ac_next = NULL;
2130 ac->ac_prev = NULL;
2131 ac->ac_flags |= AMR_CMD_GOT_SLOT;
2132
2133 /* clear the poll/ack fields in the mailbox */
2134 softs->mailbox->mb_poll = 0;
2135 softs->mailbox->mb_ack = 0;
2136
2137 AMR_DELAY((softs->mailbox->mb_busy == 0),
2138 AMR_RETRYCOUNT, done_flag);
2139 if (!done_flag) {
2140 /*
2141 * command not completed, indicate the
2142 * problem and continue get ac
2143 */
2144 cmn_err(CE_WARN,
2145 "AMR command is not completed");
2146 break;
2147 }
2148
2149 bcopy(&ac->mailbox, (void *)softs->mailbox,
2150 AMR_MBOX_CMDSIZE);
2151 ac->ac_flags |= AMR_CMD_BUSY;
2152
2153 (void) ddi_dma_sync(softs->mbox_dma_handle,
2154 0, 0, DDI_DMA_SYNC_FORDEV);
2155
2156 AMR_QPUT_IDB(softs,
2157 softs->mbox_phyaddr | AMR_QIDB_SUBMIT);
2158
2159 /*
2160 * current ac is submitted
2161 * so quit 'for-loop' to get next ac
2162 */
2163 break;
2164 }
2165 }
2166
2167 /* no slot, finish our task */
2168 if (slot == softs->maxio)
2169 break;
2170 }
2171
2172 /* only one command allowed at the same time */
2173 mutex_exit(&softs->cmd_mutex);
2174 mutex_exit(&softs->queue_mutex);
2175 }
2176
2177 static void
2178 amr_done(struct amr_softs *softs)
2179 {
2180
2181 uint32_t i, idx;
2182 volatile uint32_t done_flag;
2183 struct amr_mailbox *mbox, mbsave;
2184 struct amr_command *ac, *head, *tail;
2185
2186 head = tail = NULL;
2187
2188 AMR_QPUT_ODB(softs, AMR_QODB_READY);
2189
2190 /* acknowledge interrupt */
2191 (void) AMR_QGET_ODB(softs);
2192
2193 mutex_enter(&softs->cmd_mutex);
2194
2195 if (softs->mailbox->mb_nstatus != 0) {
2196 (void) ddi_dma_sync(softs->mbox_dma_handle,
2197 0, 0, DDI_DMA_SYNC_FORCPU);
2198
2199 /* save mailbox, which contains a list of completed commands */
2200 bcopy((void *)(uintptr_t)(volatile void *)softs->mailbox,
2201 &mbsave, sizeof (mbsave));
2202
2203 mbox = &mbsave;
2204
2205 AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
2206
2207 /* wait for the acknowledge from hardware */
2208 AMR_BUSYWAIT(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK),
2209 AMR_RETRYCOUNT, done_flag);
2210 if (!done_flag) {
2211 /*
2212 * command is not completed, return from the current
2213 * interrupt and wait for the next one
2214 */
2215 cmn_err(CE_WARN, "No answer from the hardware");
2216
2217 mutex_exit(&softs->cmd_mutex);
2218 return;
2219 }
2220
2221 for (i = 0; i < mbox->mb_nstatus; i++) {
2222 idx = mbox->mb_completed[i] - 1;
2223 ac = softs->busycmd[idx];
2224
2225 if (ac != NULL) {
2226 /* pull the command from the busy index */
2227 softs->busycmd[idx] = NULL;
2228 if (softs->amr_busyslots > 0)
2229 softs->amr_busyslots--;
2230 if (softs->amr_busyslots == 0)
2231 cv_broadcast(&softs->cmd_cv);
2232
2233 ac->ac_flags &= ~AMR_CMD_BUSY;
2234 ac->ac_flags &= ~AMR_CMD_GOT_SLOT;
2235 ac->ac_status = mbox->mb_status;
2236
2237 /* enqueue here */
2238 if (head) {
2239 tail->ac_next = ac;
2240 tail = ac;
2241 tail->ac_next = NULL;
2242 } else {
2243 tail = head = ac;
2244 ac->ac_next = NULL;
2245 }
2246 } else {
2247 AMRDB_PRINT((CE_WARN,
2248 "ac in mailbox is NULL!"));
2249 }
2250 }
2251 } else {
2252 AMRDB_PRINT((CE_WARN, "mailbox is not ready for copy out!"));
2253 }
2254
2255 mutex_exit(&softs->cmd_mutex);
2256
2257 if (head != NULL) {
2258 amr_call_pkt_comp(head);
2259 }
2260
2261 /* dispatch a thread to process the pending I/O if there is any */
2262 if ((ddi_taskq_dispatch(softs->amr_taskq, amr_start_waiting_queue,
2263 (void *)softs, DDI_NOSLEEP)) != DDI_SUCCESS) {
2264 cmn_err(CE_WARN, "No memory available to dispatch taskq");
2265 }
2266 }
2267
2268 static void
2269 amr_call_pkt_comp(register struct amr_command *head)
2270 {
2271 register struct scsi_pkt *pkt;
2272 register struct amr_command *ac, *localhead;
2273
2274 localhead = head;
2275
2276 while (localhead) {
2277 ac = localhead;
2278 localhead = ac->ac_next;
2279 ac->ac_next = NULL;
2280
2281 pkt = ac->pkt;
2282 *pkt->pkt_scbp = 0;
2283
2284 if (ac->ac_status == AMR_STATUS_SUCCESS) {
2285 pkt->pkt_state |= (STATE_GOT_BUS
2286 | STATE_GOT_TARGET
2287 | STATE_SENT_CMD
2288 | STATE_XFERRED_DATA);
2289 pkt->pkt_reason = CMD_CMPLT;
2290 } else {
2291 pkt->pkt_state |= STATE_GOT_BUS
2292 | STATE_ARQ_DONE;
2293 pkt->pkt_reason = CMD_INCOMPLETE;
2294 amr_set_arq_data(pkt, KEY_HARDWARE_ERROR);
2295 }
2296 if (!(pkt->pkt_flags & FLAG_NOINTR)) {
2297 scsi_hba_pkt_comp(pkt);
2298 }
2299 }
2300 }