1 /*
2 * megaraid_sas.c: source for mega_sas driver
3 *
4 * MegaRAID device driver for SAS controllers
5 * Copyright (c) 2005-2008, LSI Logic Corporation.
6 * All rights reserved.
7 *
8 * Version:
9 * Author:
10 * Rajesh Prabhakaran<Rajesh.Prabhakaran@lsil.com>
11 * Seokmann Ju
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright notice,
17 * this list of conditions and the following disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above copyright notice,
20 * this list of conditions and the following disclaimer in the documentation
21 * and/or other materials provided with the distribution.
22 *
23 * 3. Neither the name of the author nor the names of its contributors may be
24 * used to endorse or promote products derived from this software without
25 * specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
30 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
31 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
35 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
36 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
37 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 */
40
41 /*
42 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
43 * Use is subject to license terms.
44 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
45 */
46
47 #include <sys/types.h>
48 #include <sys/param.h>
49 #include <sys/file.h>
50 #include <sys/errno.h>
51 #include <sys/open.h>
52 #include <sys/cred.h>
53 #include <sys/modctl.h>
54 #include <sys/conf.h>
55 #include <sys/devops.h>
56 #include <sys/cmn_err.h>
57 #include <sys/kmem.h>
58 #include <sys/stat.h>
59 #include <sys/mkdev.h>
60 #include <sys/pci.h>
61 #include <sys/scsi/scsi.h>
62 #include <sys/ddi.h>
63 #include <sys/sunddi.h>
64 #include <sys/atomic.h>
65 #include <sys/signal.h>
66
67 #include "megaraid_sas.h"
68
69 /*
70 * FMA header files
71 */
72 #include <sys/ddifm.h>
73 #include <sys/fm/protocol.h>
74 #include <sys/fm/util.h>
75 #include <sys/fm/io/ddi.h>
76
77 /*
78 * Local static data
79 */
80 static void *megasas_state = NULL;
81 static int debug_level_g = CL_ANN;
82
83 #pragma weak scsi_hba_open
84 #pragma weak scsi_hba_close
85 #pragma weak scsi_hba_ioctl
86
87 static ddi_dma_attr_t megasas_generic_dma_attr = {
88 DMA_ATTR_V0, /* dma_attr_version */
89 0, /* low DMA address range */
90 0xFFFFFFFFU, /* high DMA address range */
91 0xFFFFFFFFU, /* DMA counter register */
92 8, /* DMA address alignment */
93 0x07, /* DMA burstsizes */
94 1, /* min DMA size */
95 0xFFFFFFFFU, /* max DMA size */
96 0xFFFFFFFFU, /* segment boundary */
97 MEGASAS_MAX_SGE_CNT, /* dma_attr_sglen */
98 512, /* granularity of device */
99 0 /* bus specific DMA flags */
100 };
101
102 int32_t megasas_max_cap_maxxfer = 0x1000000;
103
104 /*
105 * cb_ops contains base level routines
106 */
107 static struct cb_ops megasas_cb_ops = {
108 megasas_open, /* open */
109 megasas_close, /* close */
110 nodev, /* strategy */
111 nodev, /* print */
112 nodev, /* dump */
113 nodev, /* read */
114 nodev, /* write */
115 megasas_ioctl, /* ioctl */
116 nodev, /* devmap */
117 nodev, /* mmap */
118 nodev, /* segmap */
119 nochpoll, /* poll */
120 nodev, /* cb_prop_op */
121 0, /* streamtab */
122 D_NEW | D_HOTPLUG, /* cb_flag */
123 CB_REV, /* cb_rev */
124 nodev, /* cb_aread */
125 nodev /* cb_awrite */
126 };
127
128 /*
129 * dev_ops contains configuration routines
130 */
131 static struct dev_ops megasas_ops = {
132 DEVO_REV, /* rev, */
133 0, /* refcnt */
134 megasas_getinfo, /* getinfo */
135 nulldev, /* identify */
136 nulldev, /* probe */
137 megasas_attach, /* attach */
138 megasas_detach, /* detach */
139 megasas_reset, /* reset */
140 &megasas_cb_ops, /* char/block ops */
141 NULL, /* bus ops */
142 NULL, /* power */
143 ddi_quiesce_not_supported, /* devo_quiesce */
144 };
145
146 static struct modldrv modldrv = {
147 &mod_driverops, /* module type - driver */
148 MEGASAS_VERSION,
149 &megasas_ops, /* driver ops */
150 };
151
152 static struct modlinkage modlinkage = {
153 MODREV_1, /* ml_rev - must be MODREV_1 */
154 { &modldrv, NULL } /* ml_linkage */
155 };
156
157 static struct ddi_device_acc_attr endian_attr = {
158 DDI_DEVICE_ATTR_V1,
159 DDI_STRUCTURE_LE_ACC,
160 DDI_STRICTORDER_ACC,
161 DDI_DEFAULT_ACC
162 };
163
164
165 /*
166 * ************************************************************************** *
167 * *
168 * common entry points - for loadable kernel modules *
169 * *
170 * ************************************************************************** *
171 */
172
173 /*
174 * _init - initialize a loadable module
175 * @void
176 *
177 * The driver should perform any one-time resource allocation or data
178 * initialization during driver loading in _init(). For example, the driver
179 * should initialize any mutexes global to the driver in this routine.
180 * The driver should not, however, use _init() to allocate or initialize
181 * anything that has to do with a particular instance of the device.
182 * Per-instance initialization must be done in attach().
183 */
184 int
185 _init(void)
186 {
187 int ret;
188
189 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
190
191 ret = ddi_soft_state_init(&megasas_state,
192 sizeof (struct megasas_instance), 0);
193
194 if (ret != 0) {
195 con_log(CL_ANN, (CE_WARN, "megaraid: could not init state"));
196 return (ret);
197 }
198
199 if ((ret = scsi_hba_init(&modlinkage)) != 0) {
200 con_log(CL_ANN, (CE_WARN, "megaraid: could not init scsi hba"));
201 ddi_soft_state_fini(&megasas_state);
202 return (ret);
203 }
204
205 ret = mod_install(&modlinkage);
206
207 if (ret != 0) {
208 con_log(CL_ANN, (CE_WARN, "megaraid: mod_install failed"));
209 scsi_hba_fini(&modlinkage);
210 ddi_soft_state_fini(&megasas_state);
211 }
212
213 return (ret);
214 }
215
216 /*
217 * _info - returns information about a loadable module.
218 * @void
219 *
220 * _info() is called to return module information. This is a typical entry
221 * point that does predefined role. It simply calls mod_info().
222 */
223 int
224 _info(struct modinfo *modinfop)
225 {
226 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
227
228 return (mod_info(&modlinkage, modinfop));
229 }
230
231 /*
232 * _fini - prepare a loadable module for unloading
233 * @void
234 *
235 * In _fini(), the driver should release any resources that were allocated in
236 * _init(). The driver must remove itself from the system module list.
237 */
238 int
239 _fini(void)
240 {
241 int ret;
242
243 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
244
245 if ((ret = mod_remove(&modlinkage)) != 0)
246 return (ret);
247
248 scsi_hba_fini(&modlinkage);
249
250 ddi_soft_state_fini(&megasas_state);
251
252 return (ret);
253 }
254
255
256 /*
257 * ************************************************************************** *
258 * *
259 * common entry points - for autoconfiguration *
260 * *
261 * ************************************************************************** *
262 */
263 /*
264 * attach - adds a device to the system as part of initialization
265 * @dip:
266 * @cmd:
267 *
268 * The kernel calls a driver's attach() entry point to attach an instance of
269 * a device (for MegaRAID, it is instance of a controller) or to resume
270 * operation for an instance of a device that has been suspended or has been
271 * shut down by the power management framework
272 * The attach() entry point typically includes the following types of
273 * processing:
274 * - allocate a soft-state structure for the device instance (for MegaRAID,
275 * controller instance)
276 * - initialize per-instance mutexes
277 * - initialize condition variables
278 * - register the device's interrupts (for MegaRAID, controller's interrupts)
279 * - map the registers and memory of the device instance (for MegaRAID,
280 * controller instance)
281 * - create minor device nodes for the device instance (for MegaRAID,
282 * controller instance)
283 * - report that the device instance (for MegaRAID, controller instance) has
284 * attached
285 */
286 static int
287 megasas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
288 {
289 int instance_no;
290 int nregs;
291 uint8_t added_isr_f = 0;
292 uint8_t added_soft_isr_f = 0;
293 uint8_t create_devctl_node_f = 0;
294 uint8_t create_scsi_node_f = 0;
295 uint8_t create_ioc_node_f = 0;
296 uint8_t tran_alloc_f = 0;
297 uint8_t irq;
298 uint16_t vendor_id;
299 uint16_t device_id;
300 uint16_t subsysvid;
301 uint16_t subsysid;
302 uint16_t command;
303
304 scsi_hba_tran_t *tran;
305 ddi_dma_attr_t tran_dma_attr;
306 struct megasas_instance *instance;
307
308 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
309
310 /* CONSTCOND */
311 ASSERT(NO_COMPETING_THREADS);
312
313 instance_no = ddi_get_instance(dip);
314
315 /*
316 * Since we know that some instantiations of this device can be
317 * plugged into slave-only SBus slots, check to see whether this is
318 * one such.
319 */
320 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
321 con_log(CL_ANN, (CE_WARN,
322 "mega%d: Device in slave-only slot, unused", instance_no));
323 return (DDI_FAILURE);
324 }
325
326 switch (cmd) {
327 case DDI_ATTACH:
328 con_log(CL_DLEVEL1, (CE_NOTE, "megasas: DDI_ATTACH"));
329 /* allocate the soft state for the instance */
330 if (ddi_soft_state_zalloc(megasas_state, instance_no)
331 != DDI_SUCCESS) {
332 con_log(CL_ANN, (CE_WARN,
333 "mega%d: Failed to allocate soft state",
334 instance_no));
335
336 return (DDI_FAILURE);
337 }
338
339 instance = (struct megasas_instance *)ddi_get_soft_state
340 (megasas_state, instance_no);
341
342 if (instance == NULL) {
343 con_log(CL_ANN, (CE_WARN,
344 "mega%d: Bad soft state", instance_no));
345
346 ddi_soft_state_free(megasas_state, instance_no);
347
348 return (DDI_FAILURE);
349 }
350
351 bzero((caddr_t)instance,
352 sizeof (struct megasas_instance));
353
354 instance->func_ptr = kmem_zalloc(
355 sizeof (struct megasas_func_ptr), KM_SLEEP);
356 ASSERT(instance->func_ptr);
357
358 /* Setup the PCI configuration space handles */
359 if (pci_config_setup(dip, &instance->pci_handle) !=
360 DDI_SUCCESS) {
361 con_log(CL_ANN, (CE_WARN,
362 "mega%d: pci config setup failed ",
363 instance_no));
364
365 kmem_free(instance->func_ptr,
366 sizeof (struct megasas_func_ptr));
367 ddi_soft_state_free(megasas_state, instance_no);
368
369 return (DDI_FAILURE);
370 }
371
372 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
373 con_log(CL_ANN, (CE_WARN,
374 "megaraid: failed to get registers."));
375
376 pci_config_teardown(&instance->pci_handle);
377 kmem_free(instance->func_ptr,
378 sizeof (struct megasas_func_ptr));
379 ddi_soft_state_free(megasas_state, instance_no);
380
381 return (DDI_FAILURE);
382 }
383
384 vendor_id = pci_config_get16(instance->pci_handle,
385 PCI_CONF_VENID);
386 device_id = pci_config_get16(instance->pci_handle,
387 PCI_CONF_DEVID);
388
389 subsysvid = pci_config_get16(instance->pci_handle,
390 PCI_CONF_SUBVENID);
391 subsysid = pci_config_get16(instance->pci_handle,
392 PCI_CONF_SUBSYSID);
393
394 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
395 (pci_config_get16(instance->pci_handle,
396 PCI_CONF_COMM) | PCI_COMM_ME));
397 irq = pci_config_get8(instance->pci_handle,
398 PCI_CONF_ILINE);
399
400 con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: "
401 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s\n",
402 instance_no, vendor_id, device_id, subsysvid,
403 subsysid, irq, MEGASAS_VERSION));
404
405 /* enable bus-mastering */
406 command = pci_config_get16(instance->pci_handle,
407 PCI_CONF_COMM);
408
409 if (!(command & PCI_COMM_ME)) {
410 command |= PCI_COMM_ME;
411
412 pci_config_put16(instance->pci_handle,
413 PCI_CONF_COMM, command);
414
415 con_log(CL_ANN, (CE_CONT, "megaraid%d: "
416 "enable bus-mastering\n", instance_no));
417 } else {
418 con_log(CL_DLEVEL1, (CE_CONT, "megaraid%d: "
419 "bus-mastering already set\n", instance_no));
420 }
421
422 /* initialize function pointers */
423 if ((device_id == PCI_DEVICE_ID_LSI_1078) ||
424 (device_id == PCI_DEVICE_ID_LSI_1078DE)) {
425 con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: "
426 "1078R/DE detected\n", instance_no));
427 instance->func_ptr->read_fw_status_reg =
428 read_fw_status_reg_ppc;
429 instance->func_ptr->issue_cmd = issue_cmd_ppc;
430 instance->func_ptr->issue_cmd_in_sync_mode =
431 issue_cmd_in_sync_mode_ppc;
432 instance->func_ptr->issue_cmd_in_poll_mode =
433 issue_cmd_in_poll_mode_ppc;
434 instance->func_ptr->enable_intr =
435 enable_intr_ppc;
436 instance->func_ptr->disable_intr =
437 disable_intr_ppc;
438 instance->func_ptr->intr_ack = intr_ack_ppc;
439 } else {
440 con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: "
441 "1064/8R detected\n", instance_no));
442 instance->func_ptr->read_fw_status_reg =
443 read_fw_status_reg_xscale;
444 instance->func_ptr->issue_cmd =
445 issue_cmd_xscale;
446 instance->func_ptr->issue_cmd_in_sync_mode =
447 issue_cmd_in_sync_mode_xscale;
448 instance->func_ptr->issue_cmd_in_poll_mode =
449 issue_cmd_in_poll_mode_xscale;
450 instance->func_ptr->enable_intr =
451 enable_intr_xscale;
452 instance->func_ptr->disable_intr =
453 disable_intr_xscale;
454 instance->func_ptr->intr_ack =
455 intr_ack_xscale;
456 }
457
458 instance->baseaddress = pci_config_get32(
459 instance->pci_handle, PCI_CONF_BASE0);
460 instance->baseaddress &= 0x0fffc;
461
462 instance->dip = dip;
463 instance->vendor_id = vendor_id;
464 instance->device_id = device_id;
465 instance->subsysvid = subsysvid;
466 instance->subsysid = subsysid;
467
468 /* Initialize FMA */
469 instance->fm_capabilities = ddi_prop_get_int(
470 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS,
471 "fm-capable", DDI_FM_EREPORT_CAPABLE |
472 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE
473 | DDI_FM_ERRCB_CAPABLE);
474
475 megasas_fm_init(instance);
476
477 /* setup the mfi based low level driver */
478 if (init_mfi(instance) != DDI_SUCCESS) {
479 con_log(CL_ANN, (CE_WARN, "megaraid: "
480 "could not initialize the low level driver"));
481
482 goto fail_attach;
483 }
484
485 /*
486 * Allocate the interrupt blocking cookie.
487 * It represents the information the framework
488 * needs to block interrupts. This cookie will
489 * be used by the locks shared accross our ISR.
490 * These locks must be initialized before we
491 * register our ISR.
492 * ddi_add_intr(9F)
493 */
494 if (ddi_get_iblock_cookie(dip, 0,
495 &instance->iblock_cookie) != DDI_SUCCESS) {
496
497 goto fail_attach;
498 }
499
500 if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_HIGH,
501 &instance->soft_iblock_cookie) != DDI_SUCCESS) {
502
503 goto fail_attach;
504 }
505
506 /*
507 * Initialize the driver mutexes common to
508 * normal/high level isr
509 */
510 if (ddi_intr_hilevel(dip, 0)) {
511 instance->isr_level = HIGH_LEVEL_INTR;
512 mutex_init(&instance->cmd_pool_mtx,
513 "cmd_pool_mtx", MUTEX_DRIVER,
514 instance->soft_iblock_cookie);
515 mutex_init(&instance->cmd_pend_mtx,
516 "cmd_pend_mtx", MUTEX_DRIVER,
517 instance->soft_iblock_cookie);
518 } else {
519 /*
520 * Initialize the driver mutexes
521 * specific to soft-isr
522 */
523 instance->isr_level = NORMAL_LEVEL_INTR;
524 mutex_init(&instance->cmd_pool_mtx,
525 "cmd_pool_mtx", MUTEX_DRIVER,
526 instance->iblock_cookie);
527 mutex_init(&instance->cmd_pend_mtx,
528 "cmd_pend_mtx", MUTEX_DRIVER,
529 instance->iblock_cookie);
530 }
531
532 mutex_init(&instance->completed_pool_mtx,
533 "completed_pool_mtx", MUTEX_DRIVER,
534 instance->iblock_cookie);
535 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx",
536 MUTEX_DRIVER, instance->iblock_cookie);
537 mutex_init(&instance->aen_cmd_mtx, "aen_cmd_mtx",
538 MUTEX_DRIVER, instance->iblock_cookie);
539 mutex_init(&instance->abort_cmd_mtx, "abort_cmd_mtx",
540 MUTEX_DRIVER, instance->iblock_cookie);
541
542 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
543 cv_init(&instance->abort_cmd_cv, NULL, CV_DRIVER, NULL);
544
545 INIT_LIST_HEAD(&instance->completed_pool_list);
546
547 /* Register our isr. */
548 if (ddi_add_intr(dip, 0, NULL, NULL, megasas_isr,
549 (caddr_t)instance) != DDI_SUCCESS) {
550 con_log(CL_ANN, (CE_WARN,
551 " ISR did not register"));
552
553 goto fail_attach;
554 }
555
556 added_isr_f = 1;
557
558 /* Register our soft-isr for highlevel interrupts. */
559 if (instance->isr_level == HIGH_LEVEL_INTR) {
560 if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH,
561 &instance->soft_intr_id, NULL, NULL,
562 megasas_softintr, (caddr_t)instance) !=
563 DDI_SUCCESS) {
564 con_log(CL_ANN, (CE_WARN,
565 " Software ISR did not register"));
566
567 goto fail_attach;
568 }
569
570 added_soft_isr_f = 1;
571 }
572
573 /* Allocate a transport structure */
574 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
575
576 if (tran == NULL) {
577 con_log(CL_ANN, (CE_WARN,
578 "scsi_hba_tran_alloc failed"));
579 goto fail_attach;
580 }
581
582 tran_alloc_f = 1;
583
584 instance->tran = tran;
585
586 tran->tran_hba_private = instance;
587 tran->tran_tgt_private = NULL;
588 tran->tran_tgt_init = megasas_tran_tgt_init;
589 tran->tran_tgt_probe = scsi_hba_probe;
590 tran->tran_tgt_free = (void (*)())NULL;
591 tran->tran_init_pkt = megasas_tran_init_pkt;
592 tran->tran_start = megasas_tran_start;
593 tran->tran_abort = megasas_tran_abort;
594 tran->tran_reset = megasas_tran_reset;
595 tran->tran_bus_reset = megasas_tran_bus_reset;
596 tran->tran_getcap = megasas_tran_getcap;
597 tran->tran_setcap = megasas_tran_setcap;
598 tran->tran_destroy_pkt = megasas_tran_destroy_pkt;
599 tran->tran_dmafree = megasas_tran_dmafree;
600 tran->tran_sync_pkt = megasas_tran_sync_pkt;
601 tran->tran_reset_notify = NULL;
602 tran->tran_quiesce = megasas_tran_quiesce;
603 tran->tran_unquiesce = megasas_tran_unquiesce;
604
605 tran_dma_attr = megasas_generic_dma_attr;
606 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
607
608 /* Attach this instance of the hba */
609 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
610 != DDI_SUCCESS) {
611 con_log(CL_ANN, (CE_WARN,
612 "scsi_hba_attach failed\n"));
613
614 goto fail_attach;
615 }
616
617 /* create devctl node for cfgadm command */
618 if (ddi_create_minor_node(dip, "devctl",
619 S_IFCHR, INST2DEVCTL(instance_no),
620 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
621 con_log(CL_ANN, (CE_WARN,
622 "megaraid: failed to create devctl node."));
623
624 goto fail_attach;
625 }
626
627 create_devctl_node_f = 1;
628
629 /* create scsi node for cfgadm command */
630 if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
631 INST2SCSI(instance_no),
632 DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
633 DDI_FAILURE) {
634 con_log(CL_ANN, (CE_WARN,
635 "megaraid: failed to create scsi node."));
636
637 goto fail_attach;
638 }
639
640 create_scsi_node_f = 1;
641
642 (void) sprintf(instance->iocnode, "%d:lsirdctl",
643 instance_no);
644
645 /*
646 * Create a node for applications
647 * for issuing ioctl to the driver.
648 */
649 if (ddi_create_minor_node(dip, instance->iocnode,
650 S_IFCHR, INST2LSIRDCTL(instance_no),
651 DDI_PSEUDO, 0) == DDI_FAILURE) {
652 con_log(CL_ANN, (CE_WARN,
653 "megaraid: failed to create ioctl node."));
654
655 goto fail_attach;
656 }
657
658 create_ioc_node_f = 1;
659
660 /* enable interrupt */
661 instance->func_ptr->enable_intr(instance);
662
663 /* initiate AEN */
664 if (start_mfi_aen(instance)) {
665 con_log(CL_ANN, (CE_WARN,
666 "megaraid: failed to initiate AEN."));
667 goto fail_initiate_aen;
668 }
669
670 con_log(CL_DLEVEL1, (CE_NOTE,
671 "AEN started for instance %d.", instance_no));
672
673 /* Finally! We are on the air. */
674 ddi_report_dev(dip);
675
676 if (megasas_check_acc_handle(instance->regmap_handle) !=
677 DDI_SUCCESS) {
678 goto fail_attach;
679 }
680 if (megasas_check_acc_handle(instance->pci_handle) !=
681 DDI_SUCCESS) {
682 goto fail_attach;
683 }
684 break;
685 case DDI_PM_RESUME:
686 con_log(CL_ANN, (CE_NOTE,
687 "megasas: DDI_PM_RESUME"));
688 break;
689 case DDI_RESUME:
690 con_log(CL_ANN, (CE_NOTE,
691 "megasas: DDI_RESUME"));
692 break;
693 default:
694 con_log(CL_ANN, (CE_WARN,
695 "megasas: invalid attach cmd=%x", cmd));
696 return (DDI_FAILURE);
697 }
698
699 return (DDI_SUCCESS);
700
701 fail_initiate_aen:
702 fail_attach:
703 if (create_devctl_node_f) {
704 ddi_remove_minor_node(dip, "devctl");
705 }
706
707 if (create_scsi_node_f) {
708 ddi_remove_minor_node(dip, "scsi");
709 }
710
711 if (create_ioc_node_f) {
712 ddi_remove_minor_node(dip, instance->iocnode);
713 }
714
715 if (tran_alloc_f) {
716 scsi_hba_tran_free(tran);
717 }
718
719
720 if (added_soft_isr_f) {
721 ddi_remove_softintr(instance->soft_intr_id);
722 }
723
724 if (added_isr_f) {
725 ddi_remove_intr(dip, 0, instance->iblock_cookie);
726 }
727
728 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
729 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
730
731 megasas_fm_fini(instance);
732
733 pci_config_teardown(&instance->pci_handle);
734
735 ddi_soft_state_free(megasas_state, instance_no);
736
737 con_log(CL_ANN, (CE_NOTE,
738 "megasas: return failure from mega_attach\n"));
739
740 return (DDI_FAILURE);
741 }
742
743 /*
744 * getinfo - gets device information
745 * @dip:
746 * @cmd:
747 * @arg:
748 * @resultp:
749 *
750 * The system calls getinfo() to obtain configuration information that only
751 * the driver knows. The mapping of minor numbers to device instance is
752 * entirely under the control of the driver. The system sometimes needs to ask
753 * the driver which device a particular dev_t represents.
754 * Given the device number return the devinfo pointer from the scsi_device
755 * structure.
756 */
757 /*ARGSUSED*/
758 static int
759 megasas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
760 {
761 int rval;
762 int megasas_minor = getminor((dev_t)arg);
763
764 struct megasas_instance *instance;
765
766 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
767
768 switch (cmd) {
769 case DDI_INFO_DEVT2DEVINFO:
770 instance = (struct megasas_instance *)
771 ddi_get_soft_state(megasas_state,
772 MINOR2INST(megasas_minor));
773
774 if (instance == NULL) {
775 *resultp = NULL;
776 rval = DDI_FAILURE;
777 } else {
778 *resultp = instance->dip;
779 rval = DDI_SUCCESS;
780 }
781 break;
782 case DDI_INFO_DEVT2INSTANCE:
783 *resultp = (void *)instance;
784 rval = DDI_SUCCESS;
785 break;
786 default:
787 *resultp = NULL;
788 rval = DDI_FAILURE;
789 }
790
791 return (rval);
792 }
793
794 /*
795 * detach - detaches a device from the system
796 * @dip: pointer to the device's dev_info structure
797 * @cmd: type of detach
798 *
799 * A driver's detach() entry point is called to detach an instance of a device
800 * that is bound to the driver. The entry point is called with the instance of
801 * the device node to be detached and with DDI_DETACH, which is specified as
802 * the cmd argument to the entry point.
803 * This routine is called during driver unload. We free all the allocated
804 * resources and call the corresponding LLD so that it can also release all
805 * its resources.
806 */
807 static int
808 megasas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
809 {
810 int instance_no;
811
812 struct megasas_instance *instance;
813
814 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
815
816 /* CONSTCOND */
817 ASSERT(NO_COMPETING_THREADS);
818
819 instance_no = ddi_get_instance(dip);
820
821 instance = (struct megasas_instance *)ddi_get_soft_state(megasas_state,
822 instance_no);
823
824 if (!instance) {
825 con_log(CL_ANN, (CE_WARN,
826 "megasas:%d could not get instance in detach",
827 instance_no));
828
829 return (DDI_FAILURE);
830 }
831
832 con_log(CL_ANN, (CE_NOTE,
833 "megasas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x\n",
834 instance_no, instance->vendor_id, instance->device_id,
835 instance->subsysvid, instance->subsysid));
836
837 switch (cmd) {
838 case DDI_DETACH:
839 con_log(CL_ANN, (CE_NOTE,
840 "megasas_detach: DDI_DETACH\n"));
841
842 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
843 con_log(CL_ANN, (CE_WARN,
844 "megasas:%d failed to detach",
845 instance_no));
846
847 return (DDI_FAILURE);
848 }
849
850 scsi_hba_tran_free(instance->tran);
851
852 if (abort_aen_cmd(instance, instance->aen_cmd)) {
853 con_log(CL_ANN, (CE_WARN, "megasas_detach: "
854 "failed to abort prevous AEN command\n"));
855
856 return (DDI_FAILURE);
857 }
858
859 instance->func_ptr->disable_intr(instance);
860
861 if (instance->isr_level == HIGH_LEVEL_INTR) {
862 ddi_remove_softintr(instance->soft_intr_id);
863 }
864
865 ddi_remove_intr(dip, 0, instance->iblock_cookie);
866
867 free_space_for_mfi(instance);
868
869 megasas_fm_fini(instance);
870
871 pci_config_teardown(&instance->pci_handle);
872
873 kmem_free(instance->func_ptr,
874 sizeof (struct megasas_func_ptr));
875
876 ddi_soft_state_free(megasas_state, instance_no);
877 break;
878 case DDI_PM_SUSPEND:
879 con_log(CL_ANN, (CE_NOTE,
880 "megasas_detach: DDI_PM_SUSPEND\n"));
881
882 break;
883 case DDI_SUSPEND:
884 con_log(CL_ANN, (CE_NOTE,
885 "megasas_detach: DDI_SUSPEND\n"));
886
887 break;
888 default:
889 con_log(CL_ANN, (CE_WARN,
890 "invalid detach command:0x%x", cmd));
891 return (DDI_FAILURE);
892 }
893
894 return (DDI_SUCCESS);
895 }
896
897 /*
898 * ************************************************************************** *
899 * *
900 * common entry points - for character driver types *
901 * *
902 * ************************************************************************** *
903 */
904 /*
905 * open - gets access to a device
906 * @dev:
907 * @openflags:
908 * @otyp:
909 * @credp:
910 *
911 * Access to a device by one or more application programs is controlled
912 * through the open() and close() entry points. The primary function of
913 * open() is to verify that the open request is allowed.
914 */
915 static int
916 megasas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
917 {
918 int rval = 0;
919
920 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
921
922 /* Check root permissions */
923 if (drv_priv(credp) != 0) {
924 con_log(CL_ANN, (CE_WARN,
925 "megaraid: Non-root ioctl access tried!"));
926 return (EPERM);
927 }
928
929 /* Verify we are being opened as a character device */
930 if (otyp != OTYP_CHR) {
931 con_log(CL_ANN, (CE_WARN,
932 "megaraid: ioctl node must be a char node\n"));
933 return (EINVAL);
934 }
935
936 if (ddi_get_soft_state(megasas_state, MINOR2INST(getminor(*dev)))
937 == NULL) {
938 return (ENXIO);
939 }
940
941 if (scsi_hba_open) {
942 rval = scsi_hba_open(dev, openflags, otyp, credp);
943 }
944
945 return (rval);
946 }
947
948 /*
949 * close - gives up access to a device
950 * @dev:
951 * @openflags:
952 * @otyp:
953 * @credp:
954 *
955 * close() should perform any cleanup necessary to finish using the minor
956 * device, and prepare the device (and driver) to be opened again.
957 */
958 static int
959 megasas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
960 {
961 int rval = 0;
962
963 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
964
965 /* no need for locks! */
966
967 if (scsi_hba_close) {
968 rval = scsi_hba_close(dev, openflags, otyp, credp);
969 }
970
971 return (rval);
972 }
973
974 /*
975 * ioctl - performs a range of I/O commands for character drivers
976 * @dev:
977 * @cmd:
978 * @arg:
979 * @mode:
980 * @credp:
981 * @rvalp:
982 *
983 * ioctl() routine must make sure that user data is copied into or out of the
984 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
985 * and ddi_copyout(), as appropriate.
986 * This is a wrapper routine to serialize access to the actual ioctl routine.
987 * ioctl() should return 0 on success, or the appropriate error number. The
988 * driver may also set the value returned to the calling process through rvalp.
989 */
990 static int
991 megasas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
992 int *rvalp)
993 {
994 int rval = 0;
995
996 struct megasas_instance *instance;
997 struct megasas_ioctl ioctl;
998 struct megasas_aen aen;
999
1000 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1001
1002 instance = ddi_get_soft_state(megasas_state, MINOR2INST(getminor(dev)));
1003
1004 if (instance == NULL) {
1005 /* invalid minor number */
1006 con_log(CL_ANN, (CE_WARN, "megaraid: adapter not found."));
1007 return (ENXIO);
1008 }
1009
1010 switch ((uint_t)cmd) {
1011 case MEGASAS_IOCTL_FIRMWARE:
1012 if (ddi_copyin((void *) arg, &ioctl,
1013 sizeof (struct megasas_ioctl), mode)) {
1014 con_log(CL_ANN, (CE_WARN, "megasas_ioctl: "
1015 "ERROR IOCTL copyin"));
1016 return (EFAULT);
1017 }
1018
1019 if (ioctl.control_code == MR_DRIVER_IOCTL_COMMON) {
1020 rval = handle_drv_ioctl(instance, &ioctl, mode);
1021 } else {
1022 rval = handle_mfi_ioctl(instance, &ioctl, mode);
1023 }
1024
1025 if (ddi_copyout((void *) &ioctl, (void *)arg,
1026 (sizeof (struct megasas_ioctl) - 1), mode)) {
1027 con_log(CL_ANN, (CE_WARN,
1028 "megasas_ioctl: copy_to_user failed\n"));
1029 rval = 1;
1030 }
1031
1032 break;
1033 case MEGASAS_IOCTL_AEN:
1034 if (ddi_copyin((void *) arg, &aen,
1035 sizeof (struct megasas_aen), mode)) {
1036 con_log(CL_ANN, (CE_WARN,
1037 "megasas_ioctl: ERROR AEN copyin"));
1038 return (EFAULT);
1039 }
1040
1041 rval = handle_mfi_aen(instance, &aen);
1042
1043 if (ddi_copyout((void *) &aen, (void *)arg,
1044 sizeof (struct megasas_aen), mode)) {
1045 con_log(CL_ANN, (CE_WARN,
1046 "megasas_ioctl: copy_to_user failed\n"));
1047 rval = 1;
1048 }
1049
1050 break;
1051 default:
1052 rval = scsi_hba_ioctl(dev, cmd, arg,
1053 mode, credp, rvalp);
1054
1055 con_log(CL_DLEVEL1, (CE_NOTE, "megasas_ioctl: "
1056 "scsi_hba_ioctl called, ret = %x.", rval));
1057 }
1058
1059 return (rval);
1060 }
1061
1062 /*
1063 * ************************************************************************** *
1064 * *
1065 * common entry points - for block driver types *
1066 * *
1067 * ************************************************************************** *
1068 */
1069 /*
1070 * reset - TBD
1071 * @dip:
1072 * @cmd:
1073 *
1074 * TBD
1075 */
1076 /*ARGSUSED*/
1077 static int
1078 megasas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1079 {
1080 int instance_no;
1081
1082 struct megasas_instance *instance;
1083
1084 instance_no = ddi_get_instance(dip);
1085 instance = (struct megasas_instance *)ddi_get_soft_state
1086 (megasas_state, instance_no);
1087
1088 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1089
1090 if (!instance) {
1091 con_log(CL_ANN, (CE_WARN,
1092 "megaraid:%d could not get adapter in reset",
1093 instance_no));
1094 return (DDI_FAILURE);
1095 }
1096
1097 con_log(CL_ANN, (CE_NOTE, "flushing cache for instance %d ..",
1098 instance_no));
1099
1100 flush_cache(instance);
1101
1102 return (DDI_SUCCESS);
1103 }
1104
1105
1106 /*
1107 * ************************************************************************** *
1108 * *
1109 * entry points (SCSI HBA) *
1110 * *
1111 * ************************************************************************** *
1112 */
1113 /*
1114 * tran_tgt_init - initialize a target device instance
1115 * @hba_dip:
1116 * @tgt_dip:
1117 * @tran:
1118 * @sd:
1119 *
1120 * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1121 * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1122 * the device's address as valid and supportable for that particular HBA.
1123 * By returning DDI_FAILURE, the instance of the target driver for that device
1124 * is not probed or attached.
1125 */
1126 /*ARGSUSED*/
1127 static int
1128 megasas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1129 scsi_hba_tran_t *tran, struct scsi_device *sd)
1130 {
1131 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1132
1133 return (DDI_SUCCESS);
1134 }
1135
1136 /*
1137 * tran_init_pkt - allocate & initialize a scsi_pkt structure
1138 * @ap:
1139 * @pkt:
1140 * @bp:
1141 * @cmdlen:
1142 * @statuslen:
1143 * @tgtlen:
1144 * @flags:
1145 * @callback:
1146 *
1147 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1148 * structure and DMA resources for a target driver request. The
1149 * tran_init_pkt() entry point is called when the target driver calls the
1150 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1151 * is a request to perform one or more of three possible services:
1152 * - allocation and initialization of a scsi_pkt structure
1153 * - allocation of DMA resources for data transfer
1154 * - reallocation of DMA resources for the next portion of the data transfer
1155 */
1156 static struct scsi_pkt *
1157 megasas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
1158 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1159 int flags, int (*callback)(), caddr_t arg)
1160 {
1161 struct scsa_cmd *acmd;
1162 struct megasas_instance *instance;
1163 struct scsi_pkt *new_pkt;
1164
1165 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1166
1167 instance = ADDR2MEGA(ap);
1168
1169 /* step #1 : pkt allocation */
1170 if (pkt == NULL) {
1171 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1172 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1173 if (pkt == NULL) {
1174 return (NULL);
1175 }
1176
1177 acmd = PKT2CMD(pkt);
1178
1179 /*
1180 * Initialize the new pkt - we redundantly initialize
1181 * all the fields for illustrative purposes.
1182 */
1183 acmd->cmd_pkt = pkt;
1184 acmd->cmd_flags = 0;
1185 acmd->cmd_scblen = statuslen;
1186 acmd->cmd_cdblen = cmdlen;
1187 acmd->cmd_dmahandle = NULL;
1188 acmd->cmd_ncookies = 0;
1189 acmd->cmd_cookie = 0;
1190 acmd->cmd_cookiecnt = 0;
1191 acmd->cmd_nwin = 0;
1192
1193 pkt->pkt_address = *ap;
1194 pkt->pkt_comp = (void (*)())NULL;
1195 pkt->pkt_flags = 0;
1196 pkt->pkt_time = 0;
1197 pkt->pkt_resid = 0;
1198 pkt->pkt_state = 0;
1199 pkt->pkt_statistics = 0;
1200 pkt->pkt_reason = 0;
1201 new_pkt = pkt;
1202 } else {
1203 acmd = PKT2CMD(pkt);
1204 new_pkt = NULL;
1205 }
1206
1207 /* step #2 : dma allocation/move */
1208 if (bp && bp->b_bcount != 0) {
1209 if (acmd->cmd_dmahandle == NULL) {
1210 if (megasas_dma_alloc(instance, pkt, bp, flags,
1211 callback) == -1) {
1212 if (new_pkt) {
1213 scsi_hba_pkt_free(ap, new_pkt);
1214 }
1215
1216 return ((struct scsi_pkt *)NULL);
1217 }
1218 } else {
1219 if (megasas_dma_move(instance, pkt, bp) == -1) {
1220 return ((struct scsi_pkt *)NULL);
1221 }
1222 }
1223 }
1224
1225 return (pkt);
1226 }
1227
1228 /*
1229 * tran_start - transport a SCSI command to the addressed target
1230 * @ap:
1231 * @pkt:
1232 *
1233 * The tran_start() entry point for a SCSI HBA driver is called to transport a
1234 * SCSI command to the addressed target. The SCSI command is described
1235 * entirely within the scsi_pkt structure, which the target driver allocated
1236 * through the HBA driver's tran_init_pkt() entry point. If the command
1237 * involves a data transfer, DMA resources must also have been allocated for
1238 * the scsi_pkt structure.
1239 *
1240 * Return Values :
1241 * TRAN_BUSY - request queue is full, no more free scbs
1242 * TRAN_ACCEPT - pkt has been submitted to the instance
1243 */
1244 static int
1245 megasas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
1246 {
1247 uchar_t cmd_done = 0;
1248
1249 struct megasas_instance *instance = ADDR2MEGA(ap);
1250 struct megasas_cmd *cmd;
1251
1252 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d:SCSI CDB[0]=0x%x",
1253 __func__, __LINE__, pkt->pkt_cdbp[0]));
1254
1255 pkt->pkt_reason = CMD_CMPLT;
1256 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1257
1258 cmd = build_cmd(instance, ap, pkt, &cmd_done);
1259
1260 /*
1261 * Check if the command is already completed by the mega_build_cmd()
1262 * routine. In which case the busy_flag would be clear and scb will be
1263 * NULL and appropriate reason provided in pkt_reason field
1264 */
1265 if (cmd_done) {
1266 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1267 scsi_hba_pkt_comp(pkt);
1268 }
1269 pkt->pkt_reason = CMD_CMPLT;
1270 pkt->pkt_scbp[0] = STATUS_GOOD;
1271 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1272 | STATE_SENT_CMD;
1273 return (TRAN_ACCEPT);
1274 }
1275
1276 if (cmd == NULL) {
1277 return (TRAN_BUSY);
1278 }
1279
1280 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1281 if (instance->fw_outstanding > instance->max_fw_cmds) {
1282 con_log(CL_ANN, (CE_CONT, "megasas:Firmware busy"));
1283 return_mfi_pkt(instance, cmd);
1284 return (TRAN_BUSY);
1285 }
1286
1287 /* Syncronize the Cmd frame for the controller */
1288 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1289 DDI_DMA_SYNC_FORDEV);
1290
1291 instance->func_ptr->issue_cmd(cmd, instance);
1292
1293 } else {
1294 struct megasas_header *hdr = &cmd->frame->hdr;
1295
1296 cmd->sync_cmd = MEGASAS_TRUE;
1297
1298 instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd);
1299
1300 pkt->pkt_reason = CMD_CMPLT;
1301 pkt->pkt_statistics = 0;
1302 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1303
1304 switch (hdr->cmd_status) {
1305 case MFI_STAT_OK:
1306 pkt->pkt_scbp[0] = STATUS_GOOD;
1307 break;
1308
1309 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1310
1311 pkt->pkt_reason = CMD_CMPLT;
1312 pkt->pkt_statistics = 0;
1313
1314 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
1315 break;
1316
1317 case MFI_STAT_DEVICE_NOT_FOUND:
1318 pkt->pkt_reason = CMD_DEV_GONE;
1319 pkt->pkt_statistics = STAT_DISCON;
1320 break;
1321
1322 default:
1323 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
1324 }
1325
1326 return_mfi_pkt(instance, cmd);
1327 (void) megasas_common_check(instance, cmd);
1328
1329 scsi_hba_pkt_comp(pkt);
1330
1331 }
1332
1333 return (TRAN_ACCEPT);
1334 }
1335
1336 /*
1337 * tran_abort - Abort any commands that are currently in transport
1338 * @ap:
1339 * @pkt:
1340 *
1341 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
1342 * commands that are currently in transport for a particular target. This entry
1343 * point is called when a target driver calls scsi_abort(). The tran_abort()
1344 * entry point should attempt to abort the command denoted by the pkt
1345 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
1346 * abort all outstanding commands in the transport layer for the particular
1347 * target or logical unit.
1348 */
1349 /*ARGSUSED*/
1350 static int
1351 megasas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1352 {
1353 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1354
1355 /* aborting command not supported by H/W */
1356
1357 return (DDI_FAILURE);
1358 }
1359
1360 /*
1361 * tran_reset - reset either the SCSI bus or target
1362 * @ap:
1363 * @level:
1364 *
1365 * The tran_reset() entry point for a SCSI HBA driver is called to reset either
1366 * the SCSI bus or a particular SCSI target device. This entry point is called
1367 * when a target driver calls scsi_reset(). The tran_reset() entry point must
1368 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
1369 * particular target or logical unit must be reset.
1370 */
1371 /*ARGSUSED*/
1372 static int
1373 megasas_tran_reset(struct scsi_address *ap, int level)
1374 {
1375 struct megasas_instance *instance = ADDR2MEGA(ap);
1376
1377 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1378
1379 if (wait_for_outstanding(instance)) {
1380 return (DDI_FAILURE);
1381 } else {
1382 return (DDI_SUCCESS);
1383 }
1384 }
1385
1386 /*
1387 * tran_bus_reset - reset the SCSI bus
1388 * @dip:
1389 * @level:
1390 *
1391 * The tran_bus_reset() vector in the scsi_hba_tran structure should be
1392 * initialized during the HBA driver's attach(). The vector should point to
1393 * an HBA entry point that is to be called when a user initiates a bus reset.
1394 * Implementation is hardware specific. If the HBA driver cannot reset the
1395 * SCSI bus without affecting the targets, the driver should fail RESET_BUS
1396 * or not initialize this vector.
1397 */
1398 /*ARGSUSED*/
1399 static int
1400 megasas_tran_bus_reset(dev_info_t *dip, int level)
1401 {
1402 int instance_no = ddi_get_instance(dip);
1403
1404 struct megasas_instance *instance = ddi_get_soft_state(megasas_state,
1405 instance_no);
1406
1407 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1408
1409 if (wait_for_outstanding(instance)) {
1410 return (DDI_FAILURE);
1411 } else {
1412 return (DDI_SUCCESS);
1413 }
1414 }
1415
1416 /*
1417 * tran_getcap - get one of a set of SCSA-defined capabilities
1418 * @ap:
1419 * @cap:
1420 * @whom:
1421 *
1422 * The target driver can request the current setting of the capability for a
1423 * particular target by setting the whom parameter to nonzero. A whom value of
1424 * zero indicates a request for the current setting of the general capability
1425 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
1426 * for undefined capabilities or the current value of the requested capability.
1427 */
1428 /*ARGSUSED*/
1429 static int
1430 megasas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
1431 {
1432 int rval = 0;
1433
1434 struct megasas_instance *instance = ADDR2MEGA(ap);
1435
1436 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1437
1438 /* we do allow inquiring about capabilities for other targets */
1439 if (cap == NULL) {
1440 return (-1);
1441 }
1442
1443 switch (scsi_hba_lookup_capstr(cap)) {
1444 case SCSI_CAP_DMA_MAX:
1445 /* Limit to 16MB max transfer */
1446 rval = megasas_max_cap_maxxfer;
1447 break;
1448 case SCSI_CAP_MSG_OUT:
1449 rval = 1;
1450 break;
1451 case SCSI_CAP_DISCONNECT:
1452 rval = 0;
1453 break;
1454 case SCSI_CAP_SYNCHRONOUS:
1455 rval = 0;
1456 break;
1457 case SCSI_CAP_WIDE_XFER:
1458 rval = 1;
1459 break;
1460 case SCSI_CAP_TAGGED_QING:
1461 rval = 1;
1462 break;
1463 case SCSI_CAP_UNTAGGED_QING:
1464 rval = 1;
1465 break;
1466 case SCSI_CAP_PARITY:
1467 rval = 1;
1468 break;
1469 case SCSI_CAP_INITIATOR_ID:
1470 rval = instance->init_id;
1471 break;
1472 case SCSI_CAP_ARQ:
1473 rval = 1;
1474 break;
1475 case SCSI_CAP_LINKED_CMDS:
1476 rval = 0;
1477 break;
1478 case SCSI_CAP_RESET_NOTIFICATION:
1479 rval = 1;
1480 break;
1481 case SCSI_CAP_GEOMETRY:
1482 rval = -1;
1483
1484 break;
1485 default:
1486 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
1487 scsi_hba_lookup_capstr(cap)));
1488 rval = -1;
1489 break;
1490 }
1491
1492 return (rval);
1493 }
1494
1495 /*
1496 * tran_setcap - set one of a set of SCSA-defined capabilities
1497 * @ap:
1498 * @cap:
1499 * @value:
1500 * @whom:
1501 *
1502 * The target driver might request that the new value be set for a particular
1503 * target by setting the whom parameter to nonzero. A whom value of zero
1504 * means that request is to set the new value for the SCSI bus or for adapter
1505 * hardware in general.
1506 * The tran_setcap() should return the following values as appropriate:
1507 * - -1 for undefined capabilities
1508 * - 0 if the HBA driver cannot set the capability to the requested value
1509 * - 1 if the HBA driver is able to set the capability to the requested value
1510 */
1511 /*ARGSUSED*/
1512 static int
1513 megasas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
1514 {
1515 int rval = 1;
1516
1517 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1518
1519 /* We don't allow setting capabilities for other targets */
1520 if (cap == NULL || whom == 0) {
1521 return (-1);
1522 }
1523
1524 switch (scsi_hba_lookup_capstr(cap)) {
1525 case SCSI_CAP_DMA_MAX:
1526 case SCSI_CAP_MSG_OUT:
1527 case SCSI_CAP_PARITY:
1528 case SCSI_CAP_LINKED_CMDS:
1529 case SCSI_CAP_RESET_NOTIFICATION:
1530 case SCSI_CAP_DISCONNECT:
1531 case SCSI_CAP_SYNCHRONOUS:
1532 case SCSI_CAP_UNTAGGED_QING:
1533 case SCSI_CAP_WIDE_XFER:
1534 case SCSI_CAP_INITIATOR_ID:
1535 case SCSI_CAP_ARQ:
1536 /*
1537 * None of these are settable via
1538 * the capability interface.
1539 */
1540 break;
1541 case SCSI_CAP_TAGGED_QING:
1542 rval = 1;
1543 break;
1544 case SCSI_CAP_SECTOR_SIZE:
1545 rval = 1;
1546 break;
1547
1548 case SCSI_CAP_TOTAL_SECTORS:
1549 rval = 1;
1550 break;
1551 default:
1552 rval = -1;
1553 break;
1554 }
1555
1556 return (rval);
1557 }
1558
1559 /*
1560 * tran_destroy_pkt - deallocate scsi_pkt structure
1561 * @ap:
1562 * @pkt:
1563 *
1564 * The tran_destroy_pkt() entry point is the HBA driver function that
1565 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
1566 * called when the target driver calls scsi_destroy_pkt(). The
1567 * tran_destroy_pkt() entry point must free any DMA resources that have been
1568 * allocated for the packet. An implicit DMA synchronization occurs if the
1569 * DMA resources are freed and any cached data remains after the completion
1570 * of the transfer.
1571 */
1572 static void
1573 megasas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1574 {
1575 struct scsa_cmd *acmd = PKT2CMD(pkt);
1576
1577 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1578
1579 if (acmd->cmd_flags & CFLAG_DMAVALID) {
1580 acmd->cmd_flags &= ~CFLAG_DMAVALID;
1581
1582 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
1583
1584 ddi_dma_free_handle(&acmd->cmd_dmahandle);
1585
1586 acmd->cmd_dmahandle = NULL;
1587 }
1588
1589 /* free the pkt */
1590 scsi_hba_pkt_free(ap, pkt);
1591 }
1592
1593 /*
1594 * tran_dmafree - deallocates DMA resources
1595 * @ap:
1596 * @pkt:
1597 *
1598 * The tran_dmafree() entry point deallocates DMAQ resources that have been
1599 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
1600 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
1601 * free only DMA resources allocated for a scsi_pkt structure, not the
1602 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
1603 * implicitly performed.
1604 */
1605 /*ARGSUSED*/
1606 static void
1607 megasas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1608 {
1609 register struct scsa_cmd *acmd = PKT2CMD(pkt);
1610
1611 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1612
1613 if (acmd->cmd_flags & CFLAG_DMAVALID) {
1614 acmd->cmd_flags &= ~CFLAG_DMAVALID;
1615
1616 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
1617
1618 ddi_dma_free_handle(&acmd->cmd_dmahandle);
1619
1620 acmd->cmd_dmahandle = NULL;
1621 }
1622 }
1623
1624 /*
1625 * tran_sync_pkt - synchronize the DMA object allocated
1626 * @ap:
1627 * @pkt:
1628 *
1629 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
1630 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
1631 * entry point is called when the target driver calls scsi_sync_pkt(). If the
1632 * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
1633 * must synchronize the CPU's view of the data. If the data transfer direction
1634 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
1635 * device's view of the data.
1636 */
1637 /*ARGSUSED*/
1638 static void
1639 megasas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1640 {
1641 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1642
1643 /*
1644 * following 'ddi_dma_sync()' API call
1645 * already called for each I/O in the ISR
1646 */
1647 #if 0
1648 int i;
1649
1650 register struct scsa_cmd *acmd = PKT2CMD(pkt);
1651
1652 if (acmd->cmd_flags & CFLAG_DMAVALID) {
1653 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
1654 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
1655 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
1656 }
1657 #endif
1658 }
1659
1660 /*ARGSUSED*/
1661 static int
1662 megasas_tran_quiesce(dev_info_t *dip)
1663 {
1664 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1665
1666 return (1);
1667 }
1668
1669 /*ARGSUSED*/
1670 static int
1671 megasas_tran_unquiesce(dev_info_t *dip)
1672 {
1673 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1674
1675 return (1);
1676 }
1677
1678 /*
1679 * megasas_isr(caddr_t)
1680 *
1681 * The Interrupt Service Routine
1682 *
1683 * Collect status for all completed commands and do callback
1684 *
1685 */
1686 static uint_t
1687 megasas_isr(struct megasas_instance *instance)
1688 {
1689 int need_softintr;
1690 uint32_t producer;
1691 uint32_t consumer;
1692 uint32_t context;
1693
1694 struct megasas_cmd *cmd;
1695
1696 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1697
1698 ASSERT(instance);
1699 if (!instance->func_ptr->intr_ack(instance)) {
1700 return (DDI_INTR_UNCLAIMED);
1701 }
1702
1703 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
1704 0, 0, DDI_DMA_SYNC_FORCPU);
1705
1706 if (megasas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
1707 != DDI_SUCCESS) {
1708 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
1709 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
1710 return (DDI_INTR_UNCLAIMED);
1711 }
1712
1713 producer = *instance->producer;
1714 consumer = *instance->consumer;
1715
1716 con_log(CL_ANN1, (CE_CONT, " producer %x consumer %x ",
1717 producer, consumer));
1718
1719 mutex_enter(&instance->completed_pool_mtx);
1720
1721 while (consumer != producer) {
1722 context = instance->reply_queue[consumer];
1723 cmd = instance->cmd_list[context];
1724 mlist_add_tail(&cmd->list, &instance->completed_pool_list);
1725
1726 consumer++;
1727 if (consumer == (instance->max_fw_cmds + 1)) {
1728 consumer = 0;
1729 }
1730 }
1731
1732 mutex_exit(&instance->completed_pool_mtx);
1733
1734 *instance->consumer = consumer;
1735 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
1736 0, 0, DDI_DMA_SYNC_FORDEV);
1737
1738 if (instance->softint_running) {
1739 need_softintr = 0;
1740 } else {
1741 need_softintr = 1;
1742 }
1743
1744 if (instance->isr_level == HIGH_LEVEL_INTR) {
1745 if (need_softintr) {
1746 ddi_trigger_softintr(instance->soft_intr_id);
1747 }
1748 } else {
1749 /*
1750 * Not a high-level interrupt, therefore call the soft level
1751 * interrupt explicitly
1752 */
1753 (void) megasas_softintr(instance);
1754 }
1755
1756 return (DDI_INTR_CLAIMED);
1757 }
1758
1759
1760 /*
1761 * ************************************************************************** *
1762 * *
1763 * libraries *
1764 * *
1765 * ************************************************************************** *
1766 */
1767 /*
1768 * get_mfi_pkt : Get a command from the free pool
1769 */
1770 static struct megasas_cmd *
1771 get_mfi_pkt(struct megasas_instance *instance)
1772 {
1773 mlist_t *head = &instance->cmd_pool_list;
1774 struct megasas_cmd *cmd = NULL;
1775
1776 mutex_enter(&instance->cmd_pool_mtx);
1777 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
1778
1779 if (!mlist_empty(head)) {
1780 cmd = mlist_entry(head->next, struct megasas_cmd, list);
1781 mlist_del_init(head->next);
1782 }
1783 if (cmd != NULL)
1784 cmd->pkt = NULL;
1785 mutex_exit(&instance->cmd_pool_mtx);
1786
1787 return (cmd);
1788 }
1789
1790 /*
1791 * return_mfi_pkt : Return a cmd to free command pool
1792 */
1793 static void
1794 return_mfi_pkt(struct megasas_instance *instance, struct megasas_cmd *cmd)
1795 {
1796 mutex_enter(&instance->cmd_pool_mtx);
1797 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
1798
1799 mlist_add(&cmd->list, &instance->cmd_pool_list);
1800
1801 mutex_exit(&instance->cmd_pool_mtx);
1802 }
1803
1804 /*
1805 * destroy_mfi_frame_pool
1806 */
1807 static void
1808 destroy_mfi_frame_pool(struct megasas_instance *instance)
1809 {
1810 int i;
1811 uint32_t max_cmd = instance->max_fw_cmds;
1812
1813 struct megasas_cmd *cmd;
1814
1815 /* return all frames to pool */
1816 for (i = 0; i < max_cmd; i++) {
1817
1818 cmd = instance->cmd_list[i];
1819
1820 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
1821 (void) mega_free_dma_obj(instance, cmd->frame_dma_obj);
1822
1823 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
1824 }
1825
1826 }
1827
1828 /*
1829 * create_mfi_frame_pool
1830 */
1831 static int
1832 create_mfi_frame_pool(struct megasas_instance *instance)
1833 {
1834 int i = 0;
1835 int cookie_cnt;
1836 uint16_t max_cmd;
1837 uint16_t sge_sz;
1838 uint32_t sgl_sz;
1839 uint32_t tot_frame_size;
1840
1841 struct megasas_cmd *cmd;
1842
1843 max_cmd = instance->max_fw_cmds;
1844
1845 sge_sz = sizeof (struct megasas_sge64);
1846
1847 /* calculated the number of 64byte frames required for SGL */
1848 sgl_sz = sge_sz * instance->max_num_sge;
1849 tot_frame_size = sgl_sz + MEGAMFI_FRAME_SIZE + SENSE_LENGTH;
1850
1851 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
1852 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
1853
1854 while (i < max_cmd) {
1855 cmd = instance->cmd_list[i];
1856
1857 cmd->frame_dma_obj.size = tot_frame_size;
1858 cmd->frame_dma_obj.dma_attr = megasas_generic_dma_attr;
1859 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1860 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1861 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
1862 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
1863
1864
1865 cookie_cnt = mega_alloc_dma_obj(instance, &cmd->frame_dma_obj);
1866
1867 if (cookie_cnt == -1 || cookie_cnt > 1) {
1868 con_log(CL_ANN, (CE_WARN,
1869 "create_mfi_frame_pool: could not alloc."));
1870 return (DDI_FAILURE);
1871 }
1872
1873 bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
1874
1875 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
1876 cmd->frame = (union megasas_frame *)cmd->frame_dma_obj.buffer;
1877 cmd->frame_phys_addr =
1878 cmd->frame_dma_obj.dma_cookie[0].dmac_address;
1879
1880 cmd->sense = (uint8_t *)(((unsigned long)
1881 cmd->frame_dma_obj.buffer) +
1882 tot_frame_size - SENSE_LENGTH);
1883 cmd->sense_phys_addr =
1884 cmd->frame_dma_obj.dma_cookie[0].dmac_address +
1885 tot_frame_size - SENSE_LENGTH;
1886
1887 if (!cmd->frame || !cmd->sense) {
1888 con_log(CL_ANN, (CE_NOTE,
1889 "megasas: pci_pool_alloc failed \n"));
1890
1891 return (-ENOMEM);
1892 }
1893
1894 cmd->frame->io.context = cmd->index;
1895 i++;
1896
1897 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
1898 cmd->frame->io.context, cmd->frame_phys_addr));
1899 }
1900
1901 return (DDI_SUCCESS);
1902 }
1903
1904 /*
1905 * free_additional_dma_buffer
1906 */
1907 static void
1908 free_additional_dma_buffer(struct megasas_instance *instance)
1909 {
1910 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
1911 (void) mega_free_dma_obj(instance,
1912 instance->mfi_internal_dma_obj);
1913 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
1914 }
1915
1916 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
1917 (void) mega_free_dma_obj(instance,
1918 instance->mfi_evt_detail_obj);
1919 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
1920 }
1921 }
1922
1923 /*
1924 * alloc_additional_dma_buffer
1925 */
1926 static int
1927 alloc_additional_dma_buffer(struct megasas_instance *instance)
1928 {
1929 uint32_t reply_q_sz;
1930 uint32_t internal_buf_size = PAGESIZE*2;
1931
1932 /* max cmds plus 1 + producer & consumer */
1933 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
1934
1935 instance->mfi_internal_dma_obj.size = internal_buf_size;
1936 instance->mfi_internal_dma_obj.dma_attr = megasas_generic_dma_attr;
1937 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1938 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
1939 0xFFFFFFFFU;
1940 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
1941
1942 if (mega_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj)
1943 != 1) {
1944 con_log(CL_ANN, (CE_WARN, "megaraid: could not alloc reply Q"));
1945 return (DDI_FAILURE);
1946 }
1947
1948 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
1949
1950 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
1951
1952 instance->producer = (uint32_t *)((unsigned long)
1953 instance->mfi_internal_dma_obj.buffer);
1954 instance->consumer = (uint32_t *)((unsigned long)
1955 instance->mfi_internal_dma_obj.buffer + 4);
1956 instance->reply_queue = (uint32_t *)((unsigned long)
1957 instance->mfi_internal_dma_obj.buffer + 8);
1958 instance->internal_buf = (caddr_t)(((unsigned long)
1959 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
1960 instance->internal_buf_dmac_add =
1961 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
1962 reply_q_sz;
1963 instance->internal_buf_size = internal_buf_size -
1964 (reply_q_sz + 8);
1965
1966 /* allocate evt_detail */
1967 instance->mfi_evt_detail_obj.size = sizeof (struct megasas_evt_detail);
1968 instance->mfi_evt_detail_obj.dma_attr = megasas_generic_dma_attr;
1969 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1970 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1971 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
1972 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
1973
1974 if (mega_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj) != 1) {
1975 con_log(CL_ANN, (CE_WARN, "alloc_additional_dma_buffer: "
1976 "could not data transfer buffer alloc."));
1977 return (DDI_FAILURE);
1978 }
1979
1980 bzero(instance->mfi_evt_detail_obj.buffer,
1981 sizeof (struct megasas_evt_detail));
1982
1983 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
1984
1985 return (DDI_SUCCESS);
1986 }
1987
1988 /*
1989 * free_space_for_mfi
1990 */
1991 static void
1992 free_space_for_mfi(struct megasas_instance *instance)
1993 {
1994 int i;
1995 uint32_t max_cmd = instance->max_fw_cmds;
1996
1997 /* already freed */
1998 if (instance->cmd_list == NULL) {
1999 return;
2000 }
2001
2002 free_additional_dma_buffer(instance);
2003
2004 /* first free the MFI frame pool */
2005 destroy_mfi_frame_pool(instance);
2006
2007 /* free all the commands in the cmd_list */
2008 for (i = 0; i < instance->max_fw_cmds; i++) {
2009 kmem_free(instance->cmd_list[i],
2010 sizeof (struct megasas_cmd));
2011
2012 instance->cmd_list[i] = NULL;
2013 }
2014
2015 /* free the cmd_list buffer itself */
2016 kmem_free(instance->cmd_list,
2017 sizeof (struct megasas_cmd *) * max_cmd);
2018
2019 instance->cmd_list = NULL;
2020
2021 INIT_LIST_HEAD(&instance->cmd_pool_list);
2022 }
2023
2024 /*
2025 * alloc_space_for_mfi
2026 */
2027 static int
2028 alloc_space_for_mfi(struct megasas_instance *instance)
2029 {
2030 int i;
2031 uint32_t max_cmd;
2032 size_t sz;
2033
2034 struct megasas_cmd *cmd;
2035
2036 max_cmd = instance->max_fw_cmds;
2037 sz = sizeof (struct megasas_cmd *) * max_cmd;
2038
2039 /*
2040 * instance->cmd_list is an array of struct megasas_cmd pointers.
2041 * Allocate the dynamic array first and then allocate individual
2042 * commands.
2043 */
2044 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
2045 ASSERT(instance->cmd_list);
2046
2047 for (i = 0; i < max_cmd; i++) {
2048 instance->cmd_list[i] = kmem_zalloc(sizeof (struct megasas_cmd),
2049 KM_SLEEP);
2050 ASSERT(instance->cmd_list[i]);
2051 }
2052
2053 INIT_LIST_HEAD(&instance->cmd_pool_list);
2054
2055 /* add all the commands to command pool (instance->cmd_pool) */
2056 for (i = 0; i < max_cmd; i++) {
2057 cmd = instance->cmd_list[i];
2058 cmd->index = i;
2059
2060 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2061 }
2062
2063 /* create a frame pool and assign one frame to each cmd */
2064 if (create_mfi_frame_pool(instance)) {
2065 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n"));
2066 return (DDI_FAILURE);
2067 }
2068
2069 /* create a frame pool and assign one frame to each cmd */
2070 if (alloc_additional_dma_buffer(instance)) {
2071 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n"));
2072 return (DDI_FAILURE);
2073 }
2074
2075 return (DDI_SUCCESS);
2076 }
2077
2078 /*
2079 * get_ctrl_info
2080 */
2081 static int
2082 get_ctrl_info(struct megasas_instance *instance,
2083 struct megasas_ctrl_info *ctrl_info)
2084 {
2085 int ret = 0;
2086
2087 struct megasas_cmd *cmd;
2088 struct megasas_dcmd_frame *dcmd;
2089 struct megasas_ctrl_info *ci;
2090
2091 cmd = get_mfi_pkt(instance);
2092
2093 if (!cmd) {
2094 con_log(CL_ANN, (CE_WARN,
2095 "Failed to get a cmd for ctrl info\n"));
2096 return (DDI_FAILURE);
2097 }
2098
2099 dcmd = &cmd->frame->dcmd;
2100
2101 ci = (struct megasas_ctrl_info *)instance->internal_buf;
2102
2103 if (!ci) {
2104 con_log(CL_ANN, (CE_WARN,
2105 "Failed to alloc mem for ctrl info\n"));
2106 return_mfi_pkt(instance, cmd);
2107 return (DDI_FAILURE);
2108 }
2109
2110 (void) memset(ci, 0, sizeof (struct megasas_ctrl_info));
2111
2112 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
2113 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2114
2115 dcmd->cmd = MFI_CMD_OP_DCMD;
2116 dcmd->cmd_status = MFI_CMD_STATUS_POLL_MODE;
2117 dcmd->sge_count = 1;
2118 dcmd->flags = MFI_FRAME_DIR_READ;
2119 dcmd->timeout = 0;
2120 dcmd->data_xfer_len = sizeof (struct megasas_ctrl_info);
2121 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
2122 dcmd->sgl.sge32[0].phys_addr = instance->internal_buf_dmac_add;
2123 dcmd->sgl.sge32[0].length = sizeof (struct megasas_ctrl_info);
2124
2125 cmd->frame_count = 1;
2126
2127 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2128 ret = 0;
2129 (void) memcpy(ctrl_info, ci, sizeof (struct megasas_ctrl_info));
2130 } else {
2131 con_log(CL_ANN, (CE_WARN, "get_ctrl_info: Ctrl info failed\n"));
2132 ret = -1;
2133 }
2134
2135 return_mfi_pkt(instance, cmd);
2136 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) {
2137 ret = -1;
2138 }
2139
2140 return (ret);
2141 }
2142
2143 /*
2144 * abort_aen_cmd
2145 */
2146 static int
2147 abort_aen_cmd(struct megasas_instance *instance,
2148 struct megasas_cmd *cmd_to_abort)
2149 {
2150 int ret = 0;
2151
2152 struct megasas_cmd *cmd;
2153 struct megasas_abort_frame *abort_fr;
2154
2155 cmd = get_mfi_pkt(instance);
2156
2157 if (!cmd) {
2158 con_log(CL_ANN, (CE_WARN,
2159 "Failed to get a cmd for ctrl info\n"));
2160 return (DDI_FAILURE);
2161 }
2162
2163 abort_fr = &cmd->frame->abort;
2164
2165 /* prepare and issue the abort frame */
2166 abort_fr->cmd = MFI_CMD_OP_ABORT;
2167 abort_fr->cmd_status = MFI_CMD_STATUS_SYNC_MODE;
2168 abort_fr->flags = 0;
2169 abort_fr->abort_context = cmd_to_abort->index;
2170 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
2171 abort_fr->abort_mfi_phys_addr_hi = 0;
2172
2173 instance->aen_cmd->abort_aen = 1;
2174
2175 cmd->sync_cmd = MEGASAS_TRUE;
2176 cmd->frame_count = 1;
2177
2178 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
2179 con_log(CL_ANN, (CE_WARN,
2180 "abort_aen_cmd: issue_cmd_in_sync_mode failed\n"));
2181 ret = -1;
2182 } else {
2183 ret = 0;
2184 }
2185
2186 instance->aen_cmd->abort_aen = 1;
2187 instance->aen_cmd = 0;
2188
2189 return_mfi_pkt(instance, cmd);
2190 (void) megasas_common_check(instance, cmd);
2191
2192 return (ret);
2193 }
2194
2195 /*
2196 * init_mfi
2197 */
2198 static int
2199 init_mfi(struct megasas_instance *instance)
2200 {
2201 off_t reglength;
2202 struct megasas_cmd *cmd;
2203 struct megasas_ctrl_info ctrl_info;
2204 struct megasas_init_frame *init_frame;
2205 struct megasas_init_queue_info *initq_info;
2206
2207 if ((ddi_dev_regsize(instance->dip, REGISTER_SET_IO, ®length)
2208 != DDI_SUCCESS) || reglength < MINIMUM_MFI_MEM_SZ) {
2209 return (DDI_FAILURE);
2210 }
2211
2212 if (reglength > DEFAULT_MFI_MEM_SZ) {
2213 reglength = DEFAULT_MFI_MEM_SZ;
2214 con_log(CL_DLEVEL1, (CE_NOTE,
2215 "mega: register length to map is 0x%lx bytes", reglength));
2216 }
2217
2218 if (ddi_regs_map_setup(instance->dip, REGISTER_SET_IO,
2219 &instance->regmap, 0, reglength, &endian_attr,
2220 &instance->regmap_handle) != DDI_SUCCESS) {
2221 con_log(CL_ANN, (CE_NOTE,
2222 "megaraid: couldn't map control registers"));
2223
2224 goto fail_mfi_reg_setup;
2225 }
2226
2227 /* we expect the FW state to be READY */
2228 if (mfi_state_transition_to_ready(instance)) {
2229 con_log(CL_ANN, (CE_WARN, "megaraid: F/W is not ready"));
2230 goto fail_ready_state;
2231 }
2232
2233 /* get various operational parameters from status register */
2234 instance->max_num_sge =
2235 (instance->func_ptr->read_fw_status_reg(instance) &
2236 0xFF0000) >> 0x10;
2237 /*
2238 * Reduce the max supported cmds by 1. This is to ensure that the
2239 * reply_q_sz (1 more than the max cmd that driver may send)
2240 * does not exceed max cmds that the FW can support
2241 */
2242 instance->max_fw_cmds =
2243 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
2244 instance->max_fw_cmds = instance->max_fw_cmds - 1;
2245
2246 instance->max_num_sge =
2247 (instance->max_num_sge > MEGASAS_MAX_SGE_CNT) ?
2248 MEGASAS_MAX_SGE_CNT : instance->max_num_sge;
2249
2250 /* create a pool of commands */
2251 if (alloc_space_for_mfi(instance))
2252 goto fail_alloc_fw_space;
2253
2254 /* disable interrupt for initial preparation */
2255 instance->func_ptr->disable_intr(instance);
2256
2257 /*
2258 * Prepare a init frame. Note the init frame points to queue info
2259 * structure. Each frame has SGL allocated after first 64 bytes. For
2260 * this frame - since we don't need any SGL - we use SGL's space as
2261 * queue info structure
2262 */
2263 cmd = get_mfi_pkt(instance);
2264
2265 init_frame = (struct megasas_init_frame *)cmd->frame;
2266 initq_info = (struct megasas_init_queue_info *)
2267 ((unsigned long)init_frame + 64);
2268
2269 (void) memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
2270 (void) memset(initq_info, 0, sizeof (struct megasas_init_queue_info));
2271
2272 initq_info->init_flags = 0;
2273
2274 initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
2275
2276 initq_info->producer_index_phys_addr_hi = 0;
2277 initq_info->producer_index_phys_addr_lo =
2278 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
2279
2280 initq_info->consumer_index_phys_addr_hi = 0;
2281 initq_info->consumer_index_phys_addr_lo =
2282 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4;
2283
2284 initq_info->reply_queue_start_phys_addr_hi = 0;
2285 initq_info->reply_queue_start_phys_addr_lo =
2286 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8;
2287
2288 init_frame->cmd = MFI_CMD_OP_INIT;
2289 init_frame->cmd_status = MFI_CMD_STATUS_POLL_MODE;
2290 init_frame->flags = 0;
2291 init_frame->queue_info_new_phys_addr_lo =
2292 cmd->frame_phys_addr + 64;
2293 init_frame->queue_info_new_phys_addr_hi = 0;
2294
2295 init_frame->data_xfer_len = sizeof (struct megasas_init_queue_info);
2296
2297 cmd->frame_count = 1;
2298
2299 /* issue the init frame in polled mode */
2300 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2301 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
2302 goto fail_fw_init;
2303 }
2304
2305 return_mfi_pkt(instance, cmd);
2306 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) {
2307 goto fail_fw_init;
2308 }
2309
2310 /* gather misc FW related information */
2311 if (!get_ctrl_info(instance, &ctrl_info)) {
2312 instance->max_sectors_per_req = ctrl_info.max_request_size;
2313 con_log(CL_ANN1, (CE_NOTE, "product name %s ld present %d",
2314 ctrl_info.product_name, ctrl_info.ld_present_count));
2315 } else {
2316 instance->max_sectors_per_req = instance->max_num_sge *
2317 PAGESIZE / 512;
2318 }
2319
2320 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2321 goto fail_fw_init;
2322 }
2323
2324 return (0);
2325
2326 fail_fw_init:
2327 fail_alloc_fw_space:
2328
2329 free_space_for_mfi(instance);
2330
2331 fail_ready_state:
2332 ddi_regs_map_free(&instance->regmap_handle);
2333
2334 fail_mfi_reg_setup:
2335 return (DDI_FAILURE);
2336 }
2337
2338 /*
2339 * mfi_state_transition_to_ready : Move the FW to READY state
2340 *
2341 * @reg_set : MFI register set
2342 */
2343 static int
2344 mfi_state_transition_to_ready(struct megasas_instance *instance)
2345 {
2346 int i;
2347 uint8_t max_wait;
2348 uint32_t fw_ctrl;
2349 uint32_t fw_state;
2350 uint32_t cur_state;
2351
2352 fw_state =
2353 instance->func_ptr->read_fw_status_reg(instance) & MFI_STATE_MASK;
2354 con_log(CL_ANN1, (CE_NOTE,
2355 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
2356
2357 while (fw_state != MFI_STATE_READY) {
2358 con_log(CL_ANN, (CE_NOTE,
2359 "mfi_state_transition_to_ready:FW state%x", fw_state));
2360
2361 switch (fw_state) {
2362 case MFI_STATE_FAULT:
2363 con_log(CL_ANN, (CE_NOTE,
2364 "megasas: FW in FAULT state!!"));
2365
2366 return (-ENODEV);
2367 case MFI_STATE_WAIT_HANDSHAKE:
2368 /* set the CLR bit in IMR0 */
2369 con_log(CL_ANN, (CE_NOTE,
2370 "megasas: FW waiting for HANDSHAKE"));
2371 /*
2372 * PCI_Hot Plug: MFI F/W requires
2373 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
2374 * to be set
2375 */
2376 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
2377 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
2378 MFI_INIT_HOTPLUG, instance);
2379
2380 max_wait = 2;
2381 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2382 break;
2383 case MFI_STATE_BOOT_MESSAGE_PENDING:
2384 /* set the CLR bit in IMR0 */
2385 con_log(CL_ANN, (CE_NOTE,
2386 "megasas: FW state boot message pending"));
2387 /*
2388 * PCI_Hot Plug: MFI F/W requires
2389 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
2390 * to be set
2391 */
2392 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
2393
2394 max_wait = 10;
2395 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2396 break;
2397 case MFI_STATE_OPERATIONAL:
2398 /* bring it to READY state; assuming max wait 2 secs */
2399 instance->func_ptr->disable_intr(instance);
2400 con_log(CL_ANN1, (CE_NOTE,
2401 "megasas: FW in OPERATIONAL state"));
2402 /*
2403 * PCI_Hot Plug: MFI F/W requires
2404 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
2405 * to be set
2406 */
2407 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
2408 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
2409
2410 max_wait = 10;
2411 cur_state = MFI_STATE_OPERATIONAL;
2412 break;
2413 case MFI_STATE_UNDEFINED:
2414 /* this state should not last for more than 2 seconds */
2415 con_log(CL_ANN, (CE_NOTE, "FW state undefined\n"));
2416
2417 max_wait = 2;
2418 cur_state = MFI_STATE_UNDEFINED;
2419 break;
2420 case MFI_STATE_BB_INIT:
2421 max_wait = 2;
2422 cur_state = MFI_STATE_BB_INIT;
2423 break;
2424 case MFI_STATE_FW_INIT:
2425 max_wait = 2;
2426 cur_state = MFI_STATE_FW_INIT;
2427 break;
2428 case MFI_STATE_DEVICE_SCAN:
2429 max_wait = 10;
2430 cur_state = MFI_STATE_DEVICE_SCAN;
2431 break;
2432 default:
2433 con_log(CL_ANN, (CE_NOTE,
2434 "megasas: Unknown state 0x%x\n", fw_state));
2435 return (-ENODEV);
2436 }
2437
2438 /* the cur_state should not last for more than max_wait secs */
2439 for (i = 0; i < (max_wait * MILLISEC); i++) {
2440 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
2441 fw_state =
2442 instance->func_ptr->read_fw_status_reg(instance) &
2443 MFI_STATE_MASK;
2444
2445 if (fw_state == cur_state) {
2446 delay(1 * drv_usectohz(MILLISEC));
2447 } else {
2448 break;
2449 }
2450 }
2451
2452 /* return error if fw_state hasn't changed after max_wait */
2453 if (fw_state == cur_state) {
2454 con_log(CL_ANN, (CE_NOTE,
2455 "FW state hasn't changed in %d secs\n", max_wait));
2456 return (-ENODEV);
2457 }
2458 };
2459
2460 fw_ctrl = RD_IB_DOORBELL(instance);
2461
2462 con_log(CL_ANN1, (CE_NOTE,
2463 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
2464
2465 /*
2466 * Write 0xF to the doorbell register to do the following.
2467 * - Abort all outstanding commands (bit 0).
2468 * - Transition from OPERATIONAL to READY state (bit 1).
2469 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
2470 * - Set to release FW to continue running (i.e. BIOS handshake
2471 * (bit 3).
2472 */
2473 WR_IB_DOORBELL(0xF, instance);
2474
2475 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2476 return (-ENODEV);
2477 }
2478 return (0);
2479 }
2480
2481 /*
2482 * get_seq_num
2483 */
2484 static int
2485 get_seq_num(struct megasas_instance *instance,
2486 struct megasas_evt_log_info *eli)
2487 {
2488 int ret = 0;
2489
2490 dma_obj_t dcmd_dma_obj;
2491 struct megasas_cmd *cmd;
2492 struct megasas_dcmd_frame *dcmd;
2493
2494 cmd = get_mfi_pkt(instance);
2495
2496 if (!cmd) {
2497 cmn_err(CE_WARN, "megasas: failed to get a cmd\n");
2498 return (-ENOMEM);
2499 }
2500
2501 dcmd = &cmd->frame->dcmd;
2502
2503 /* allocate the data transfer buffer */
2504 dcmd_dma_obj.size = sizeof (struct megasas_evt_log_info);
2505 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr;
2506 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2507 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2508 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
2509 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
2510
2511 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) {
2512 con_log(CL_ANN, (CE_WARN,
2513 "get_seq_num: could not data transfer buffer alloc."));
2514 return (DDI_FAILURE);
2515 }
2516
2517 (void) memset(dcmd_dma_obj.buffer, 0,
2518 sizeof (struct megasas_evt_log_info));
2519
2520 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2521
2522 dcmd->cmd = MFI_CMD_OP_DCMD;
2523 dcmd->cmd_status = 0;
2524 dcmd->sge_count = 1;
2525 dcmd->flags = MFI_FRAME_DIR_READ;
2526 dcmd->timeout = 0;
2527 dcmd->data_xfer_len = sizeof (struct megasas_evt_log_info);
2528 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
2529 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_log_info);
2530 dcmd->sgl.sge32[0].phys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
2531
2532 cmd->sync_cmd = MEGASAS_TRUE;
2533 cmd->frame_count = 1;
2534
2535 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
2536 cmn_err(CE_WARN, "get_seq_num: "
2537 "failed to issue MR_DCMD_CTRL_EVENT_GET_INFO\n");
2538 ret = -1;
2539 } else {
2540 /* copy the data back into callers buffer */
2541 bcopy(dcmd_dma_obj.buffer, eli,
2542 sizeof (struct megasas_evt_log_info));
2543 ret = 0;
2544 }
2545
2546 if (mega_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
2547 ret = -1;
2548
2549 return_mfi_pkt(instance, cmd);
2550 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) {
2551 ret = -1;
2552 }
2553 return (ret);
2554 }
2555
2556 /*
2557 * start_mfi_aen
2558 */
2559 static int
2560 start_mfi_aen(struct megasas_instance *instance)
2561 {
2562 int ret = 0;
2563
2564 struct megasas_evt_log_info eli;
2565 union megasas_evt_class_locale class_locale;
2566
2567 /* get the latest sequence number from FW */
2568 (void) memset(&eli, 0, sizeof (struct megasas_evt_log_info));
2569
2570 if (get_seq_num(instance, &eli)) {
2571 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num\n");
2572 return (-1);
2573 }
2574
2575 /* register AEN with FW for latest sequence number plus 1 */
2576 class_locale.members.reserved = 0;
2577 class_locale.members.locale = MR_EVT_LOCALE_ALL;
2578 class_locale.members.class = MR_EVT_CLASS_CRITICAL;
2579
2580 ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
2581 class_locale.word);
2582
2583 if (ret) {
2584 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed\n");
2585 return (-1);
2586 }
2587
2588 return (ret);
2589 }
2590
2591 /*
2592 * flush_cache
2593 */
2594 static void
2595 flush_cache(struct megasas_instance *instance)
2596 {
2597 struct megasas_cmd *cmd;
2598 struct megasas_dcmd_frame *dcmd;
2599
2600 if (!(cmd = get_mfi_pkt(instance)))
2601 return;
2602
2603 dcmd = &cmd->frame->dcmd;
2604
2605 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2606
2607 dcmd->cmd = MFI_CMD_OP_DCMD;
2608 dcmd->cmd_status = 0x0;
2609 dcmd->sge_count = 0;
2610 dcmd->flags = MFI_FRAME_DIR_NONE;
2611 dcmd->timeout = 0;
2612 dcmd->data_xfer_len = 0;
2613 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
2614 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
2615
2616 cmd->frame_count = 1;
2617
2618 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2619 cmn_err(CE_WARN,
2620 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH\n");
2621 }
2622 con_log(CL_DLEVEL1, (CE_NOTE, "done"));
2623 return_mfi_pkt(instance, cmd);
2624 (void) megasas_common_check(instance, cmd);
2625 }
2626
2627 /*
2628 * service_mfi_aen- Completes an AEN command
2629 * @instance: Adapter soft state
2630 * @cmd: Command to be completed
2631 *
2632 */
2633 static void
2634 service_mfi_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2635 {
2636 uint32_t seq_num;
2637 struct megasas_evt_detail *evt_detail =
2638 (struct megasas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
2639
2640 cmd->cmd_status = cmd->frame->io.cmd_status;
2641
2642 if (cmd->cmd_status == ENODATA) {
2643 cmd->cmd_status = 0;
2644 }
2645
2646 /*
2647 * log the MFI AEN event to the sysevent queue so that
2648 * application will get noticed
2649 */
2650 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
2651 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
2652 int instance_no = ddi_get_instance(instance->dip);
2653 con_log(CL_ANN, (CE_WARN,
2654 "mega%d: Failed to log AEN event", instance_no));
2655 }
2656
2657 /* get copy of seq_num and class/locale for re-registration */
2658 seq_num = evt_detail->seq_num;
2659 seq_num++;
2660 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
2661 sizeof (struct megasas_evt_detail));
2662
2663 cmd->frame->dcmd.cmd_status = 0x0;
2664 cmd->frame->dcmd.mbox.w[0] = seq_num;
2665
2666 instance->aen_seq_num = seq_num;
2667
2668 cmd->frame_count = 1;
2669
2670 /* Issue the aen registration frame */
2671 instance->func_ptr->issue_cmd(cmd, instance);
2672 }
2673
2674 /*
2675 * complete_cmd_in_sync_mode - Completes an internal command
2676 * @instance: Adapter soft state
2677 * @cmd: Command to be completed
2678 *
2679 * The issue_cmd_in_sync_mode() function waits for a command to complete
2680 * after it issues a command. This function wakes up that waiting routine by
2681 * calling wake_up() on the wait queue.
2682 */
2683 static void
2684 complete_cmd_in_sync_mode(struct megasas_instance *instance,
2685 struct megasas_cmd *cmd)
2686 {
2687 cmd->cmd_status = cmd->frame->io.cmd_status;
2688
2689 cmd->sync_cmd = MEGASAS_FALSE;
2690
2691 if (cmd->cmd_status == ENODATA) {
2692 cmd->cmd_status = 0;
2693 }
2694
2695 cv_broadcast(&instance->int_cmd_cv);
2696 }
2697
2698 /*
2699 * megasas_softintr - The Software ISR
2700 * @param arg : HBA soft state
2701 *
2702 * called from high-level interrupt if hi-level interrupt are not there,
2703 * otherwise triggered as a soft interrupt
2704 */
2705 static uint_t
2706 megasas_softintr(struct megasas_instance *instance)
2707 {
2708 struct scsi_pkt *pkt;
2709 struct scsa_cmd *acmd;
2710 struct megasas_cmd *cmd;
2711 struct mlist_head *pos, *next;
2712 mlist_t process_list;
2713 struct megasas_header *hdr;
2714 struct scsi_arq_status *arqstat;
2715
2716 con_log(CL_ANN1, (CE_CONT, "megasas_softintr called"));
2717
2718 ASSERT(instance);
2719 mutex_enter(&instance->completed_pool_mtx);
2720
2721 if (mlist_empty(&instance->completed_pool_list)) {
2722 mutex_exit(&instance->completed_pool_mtx);
2723 return (DDI_INTR_UNCLAIMED);
2724 }
2725
2726 instance->softint_running = 1;
2727
2728 INIT_LIST_HEAD(&process_list);
2729 mlist_splice(&instance->completed_pool_list, &process_list);
2730 INIT_LIST_HEAD(&instance->completed_pool_list);
2731
2732 mutex_exit(&instance->completed_pool_mtx);
2733
2734 /* perform all callbacks first, before releasing the SCBs */
2735 mlist_for_each_safe(pos, next, &process_list) {
2736 cmd = mlist_entry(pos, struct megasas_cmd, list);
2737
2738 /* syncronize the Cmd frame for the controller */
2739 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
2740 0, 0, DDI_DMA_SYNC_FORCPU);
2741
2742 if (megasas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
2743 DDI_SUCCESS) {
2744 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2745 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2746 return (DDI_INTR_UNCLAIMED);
2747 }
2748
2749 hdr = &cmd->frame->hdr;
2750
2751 /* remove the internal command from the process list */
2752 mlist_del_init(&cmd->list);
2753
2754 switch (hdr->cmd) {
2755 case MFI_CMD_OP_PD_SCSI:
2756 case MFI_CMD_OP_LD_SCSI:
2757 case MFI_CMD_OP_LD_READ:
2758 case MFI_CMD_OP_LD_WRITE:
2759 /*
2760 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
2761 * could have been issued either through an
2762 * IO path or an IOCTL path. If it was via IOCTL,
2763 * we will send it to internal completion.
2764 */
2765 if (cmd->sync_cmd == MEGASAS_TRUE) {
2766 complete_cmd_in_sync_mode(instance, cmd);
2767 break;
2768 }
2769
2770 /* regular commands */
2771 acmd = cmd->cmd;
2772 pkt = CMD2PKT(acmd);
2773
2774 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2775 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2776 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2777 acmd->cmd_dma_offset,
2778 acmd->cmd_dma_len,
2779 DDI_DMA_SYNC_FORCPU);
2780 }
2781 }
2782
2783 pkt->pkt_reason = CMD_CMPLT;
2784 pkt->pkt_statistics = 0;
2785 pkt->pkt_state = STATE_GOT_BUS
2786 | STATE_GOT_TARGET | STATE_SENT_CMD
2787 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2788
2789 con_log(CL_ANN1, (CE_CONT,
2790 "CDB[0] = %x completed for %s: size %lx context %x",
2791 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
2792 acmd->cmd_dmacount, hdr->context));
2793
2794 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2795 struct scsi_inquiry *inq;
2796
2797 if (acmd->cmd_dmacount != 0) {
2798 bp_mapin(acmd->cmd_buf);
2799 inq = (struct scsi_inquiry *)
2800 acmd->cmd_buf->b_un.b_addr;
2801
2802 /* don't expose physical drives to OS */
2803 if (acmd->islogical &&
2804 (hdr->cmd_status == MFI_STAT_OK)) {
2805 display_scsi_inquiry(
2806 (caddr_t)inq);
2807 } else if ((hdr->cmd_status ==
2808 MFI_STAT_OK) && inq->inq_dtype ==
2809 DTYPE_DIRECT) {
2810
2811 display_scsi_inquiry(
2812 (caddr_t)inq);
2813
2814 /* for physical disk */
2815 hdr->cmd_status =
2816 MFI_STAT_DEVICE_NOT_FOUND;
2817 }
2818 }
2819 }
2820
2821 switch (hdr->cmd_status) {
2822 case MFI_STAT_OK:
2823 pkt->pkt_scbp[0] = STATUS_GOOD;
2824 break;
2825 case MFI_STAT_LD_CC_IN_PROGRESS:
2826 case MFI_STAT_LD_RECON_IN_PROGRESS:
2827 /* SJ - these are not correct way */
2828 pkt->pkt_scbp[0] = STATUS_GOOD;
2829 break;
2830 case MFI_STAT_LD_INIT_IN_PROGRESS:
2831 con_log(CL_ANN,
2832 (CE_WARN, "Initialization in Progress"));
2833 pkt->pkt_reason = CMD_TRAN_ERR;
2834
2835 break;
2836 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2837 con_log(CL_ANN1, (CE_CONT, "scsi_done error"));
2838
2839 pkt->pkt_reason = CMD_CMPLT;
2840 ((struct scsi_status *)
2841 pkt->pkt_scbp)->sts_chk = 1;
2842
2843 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2844
2845 con_log(CL_ANN,
2846 (CE_WARN, "TEST_UNIT_READY fail"));
2847
2848 } else {
2849 pkt->pkt_state |= STATE_ARQ_DONE;
2850 arqstat = (void *)(pkt->pkt_scbp);
2851 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2852 arqstat->sts_rqpkt_resid = 0;
2853 arqstat->sts_rqpkt_state |=
2854 STATE_GOT_BUS | STATE_GOT_TARGET
2855 | STATE_SENT_CMD
2856 | STATE_XFERRED_DATA;
2857 *(uint8_t *)&arqstat->sts_rqpkt_status =
2858 STATUS_GOOD;
2859
2860 bcopy(cmd->sense,
2861 &(arqstat->sts_sensedata),
2862 acmd->cmd_scblen -
2863 offsetof(struct scsi_arq_status,
2864 sts_sensedata));
2865 }
2866 break;
2867 case MFI_STAT_LD_OFFLINE:
2868 case MFI_STAT_DEVICE_NOT_FOUND:
2869 con_log(CL_ANN1, (CE_CONT,
2870 "device not found error"));
2871 pkt->pkt_reason = CMD_DEV_GONE;
2872 pkt->pkt_statistics = STAT_DISCON;
2873 break;
2874 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2875 pkt->pkt_state |= STATE_ARQ_DONE;
2876 pkt->pkt_reason = CMD_CMPLT;
2877 ((struct scsi_status *)
2878 pkt->pkt_scbp)->sts_chk = 1;
2879
2880 arqstat = (void *)(pkt->pkt_scbp);
2881 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2882 arqstat->sts_rqpkt_resid = 0;
2883 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2884 | STATE_GOT_TARGET | STATE_SENT_CMD
2885 | STATE_XFERRED_DATA;
2886 *(uint8_t *)&arqstat->sts_rqpkt_status =
2887 STATUS_GOOD;
2888
2889 arqstat->sts_sensedata.es_valid = 1;
2890 arqstat->sts_sensedata.es_key =
2891 KEY_ILLEGAL_REQUEST;
2892 arqstat->sts_sensedata.es_class =
2893 CLASS_EXTENDED_SENSE;
2894
2895 /*
2896 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2897 * ASC: 0x21h; ASCQ: 0x00h;
2898 */
2899 arqstat->sts_sensedata.es_add_code = 0x21;
2900 arqstat->sts_sensedata.es_qual_code = 0x00;
2901
2902 break;
2903
2904 default:
2905 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
2906 pkt->pkt_reason = CMD_TRAN_ERR;
2907
2908 break;
2909 }
2910
2911 atomic_add_16(&instance->fw_outstanding, (-1));
2912
2913 return_mfi_pkt(instance, cmd);
2914
2915 (void) megasas_common_check(instance, cmd);
2916
2917 if (acmd->cmd_dmahandle) {
2918 if (megasas_check_dma_handle(
2919 acmd->cmd_dmahandle) != DDI_SUCCESS) {
2920 ddi_fm_service_impact(instance->dip,
2921 DDI_SERVICE_UNAFFECTED);
2922 pkt->pkt_reason = CMD_TRAN_ERR;
2923 pkt->pkt_statistics = 0;
2924 }
2925 }
2926
2927 /* Call the callback routine */
2928 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
2929 scsi_hba_pkt_comp(pkt);
2930 }
2931
2932 break;
2933 case MFI_CMD_OP_SMP:
2934 case MFI_CMD_OP_STP:
2935 complete_cmd_in_sync_mode(instance, cmd);
2936 break;
2937 case MFI_CMD_OP_DCMD:
2938 /* see if got an event notification */
2939 if (cmd->frame->dcmd.opcode ==
2940 MR_DCMD_CTRL_EVENT_WAIT) {
2941 if ((instance->aen_cmd == cmd) &&
2942 (instance->aen_cmd->abort_aen)) {
2943 con_log(CL_ANN, (CE_WARN,
2944 "megasas_softintr: "
2945 "aborted_aen returned"));
2946 } else {
2947 service_mfi_aen(instance, cmd);
2948
2949 atomic_add_16(&instance->fw_outstanding,
2950 (-1));
2951 }
2952 } else {
2953 complete_cmd_in_sync_mode(instance, cmd);
2954 }
2955
2956 break;
2957 case MFI_CMD_OP_ABORT:
2958 con_log(CL_ANN, (CE_WARN, "MFI_CMD_OP_ABORT complete"));
2959 /*
2960 * MFI_CMD_OP_ABORT successfully completed
2961 * in the synchronous mode
2962 */
2963 complete_cmd_in_sync_mode(instance, cmd);
2964 break;
2965 default:
2966 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2967 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2968
2969 if (cmd->pkt != NULL) {
2970 pkt = cmd->pkt;
2971 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
2972 scsi_hba_pkt_comp(pkt);
2973 }
2974 }
2975 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !!"));
2976 break;
2977 }
2978 }
2979
2980 instance->softint_running = 0;
2981
2982 return (DDI_INTR_CLAIMED);
2983 }
2984
2985 /*
2986 * mega_alloc_dma_obj
2987 *
2988 * Allocate the memory and other resources for an dma object.
2989 */
2990 static int
2991 mega_alloc_dma_obj(struct megasas_instance *instance, dma_obj_t *obj)
2992 {
2993 int i;
2994 size_t alen = 0;
2995 uint_t cookie_cnt;
2996 struct ddi_device_acc_attr tmp_endian_attr;
2997
2998 tmp_endian_attr = endian_attr;
2999 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
3000 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
3001 DDI_DMA_SLEEP, NULL, &obj->dma_handle);
3002 if (i != DDI_SUCCESS) {
3003
3004 switch (i) {
3005 case DDI_DMA_BADATTR :
3006 con_log(CL_ANN, (CE_WARN,
3007 "Failed ddi_dma_alloc_handle- Bad atrib"));
3008 break;
3009 case DDI_DMA_NORESOURCES :
3010 con_log(CL_ANN, (CE_WARN,
3011 "Failed ddi_dma_alloc_handle- No Resources"));
3012 break;
3013 default :
3014 con_log(CL_ANN, (CE_WARN,
3015 "Failed ddi_dma_alloc_handle :unknown %d", i));
3016 break;
3017 }
3018
3019 return (-1);
3020 }
3021
3022 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
3023 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
3024 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
3025 alen < obj->size) {
3026
3027 ddi_dma_free_handle(&obj->dma_handle);
3028
3029 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
3030
3031 return (-1);
3032 }
3033
3034 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
3035 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
3036 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
3037
3038 ddi_dma_mem_free(&obj->acc_handle);
3039 ddi_dma_free_handle(&obj->dma_handle);
3040
3041 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
3042
3043 return (-1);
3044 }
3045
3046 if (megasas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
3047 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
3048 return (-1);
3049 }
3050
3051 if (megasas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
3052 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
3053 return (-1);
3054 }
3055
3056 return (cookie_cnt);
3057 }
3058
3059 /*
3060 * mega_free_dma_obj(struct megasas_instance *, dma_obj_t)
3061 *
3062 * De-allocate the memory and other resources for an dma object, which must
3063 * have been alloated by a previous call to mega_alloc_dma_obj()
3064 */
3065 static int
3066 mega_free_dma_obj(struct megasas_instance *instance, dma_obj_t obj)
3067 {
3068
3069 if (megasas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) {
3070 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
3071 return (DDI_FAILURE);
3072 }
3073
3074 if (megasas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) {
3075 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
3076 return (DDI_FAILURE);
3077 }
3078
3079 (void) ddi_dma_unbind_handle(obj.dma_handle);
3080 ddi_dma_mem_free(&obj.acc_handle);
3081 ddi_dma_free_handle(&obj.dma_handle);
3082
3083 return (DDI_SUCCESS);
3084 }
3085
3086 /*
3087 * megasas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
3088 * int, int (*)())
3089 *
3090 * Allocate dma resources for a new scsi command
3091 */
3092 static int
3093 megasas_dma_alloc(struct megasas_instance *instance, struct scsi_pkt *pkt,
3094 struct buf *bp, int flags, int (*callback)())
3095 {
3096 int dma_flags;
3097 int (*cb)(caddr_t);
3098 int i;
3099
3100 ddi_dma_attr_t tmp_dma_attr = megasas_generic_dma_attr;
3101 struct scsa_cmd *acmd = PKT2CMD(pkt);
3102
3103 acmd->cmd_buf = bp;
3104
3105 if (bp->b_flags & B_READ) {
3106 acmd->cmd_flags &= ~CFLAG_DMASEND;
3107 dma_flags = DDI_DMA_READ;
3108 } else {
3109 acmd->cmd_flags |= CFLAG_DMASEND;
3110 dma_flags = DDI_DMA_WRITE;
3111 }
3112
3113 if (flags & PKT_CONSISTENT) {
3114 acmd->cmd_flags |= CFLAG_CONSISTENT;
3115 dma_flags |= DDI_DMA_CONSISTENT;
3116 }
3117
3118 if (flags & PKT_DMA_PARTIAL) {
3119 dma_flags |= DDI_DMA_PARTIAL;
3120 }
3121
3122 dma_flags |= DDI_DMA_REDZONE;
3123
3124 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
3125
3126 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
3127 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
3128
3129 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
3130 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
3131 switch (i) {
3132 case DDI_DMA_BADATTR:
3133 bioerror(bp, EFAULT);
3134 return (-1);
3135
3136 case DDI_DMA_NORESOURCES:
3137 bioerror(bp, 0);
3138 return (-1);
3139
3140 default:
3141 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
3142 "0x%x impossible\n", i));
3143 bioerror(bp, EFAULT);
3144 return (-1);
3145 }
3146 }
3147
3148 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
3149 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
3150
3151 switch (i) {
3152 case DDI_DMA_PARTIAL_MAP:
3153 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
3154 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
3155 "DDI_DMA_PARTIAL_MAP impossible\n"));
3156 goto no_dma_cookies;
3157 }
3158
3159 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
3160 DDI_FAILURE) {
3161 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed\n"));
3162 goto no_dma_cookies;
3163 }
3164
3165 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
3166 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
3167 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
3168 DDI_FAILURE) {
3169
3170 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed\n"));
3171 goto no_dma_cookies;
3172 }
3173
3174 goto get_dma_cookies;
3175 case DDI_DMA_MAPPED:
3176 acmd->cmd_nwin = 1;
3177 acmd->cmd_dma_len = 0;
3178 acmd->cmd_dma_offset = 0;
3179
3180 get_dma_cookies:
3181 i = 0;
3182 acmd->cmd_dmacount = 0;
3183 for (;;) {
3184 acmd->cmd_dmacount +=
3185 acmd->cmd_dmacookies[i++].dmac_size;
3186
3187 if (i == instance->max_num_sge ||
3188 i == acmd->cmd_ncookies)
3189 break;
3190
3191 ddi_dma_nextcookie(acmd->cmd_dmahandle,
3192 &acmd->cmd_dmacookies[i]);
3193 }
3194
3195 acmd->cmd_cookie = i;
3196 acmd->cmd_cookiecnt = i;
3197
3198 acmd->cmd_flags |= CFLAG_DMAVALID;
3199
3200 if (bp->b_bcount >= acmd->cmd_dmacount) {
3201 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
3202 } else {
3203 pkt->pkt_resid = 0;
3204 }
3205
3206 return (0);
3207 case DDI_DMA_NORESOURCES:
3208 bioerror(bp, 0);
3209 break;
3210 case DDI_DMA_NOMAPPING:
3211 bioerror(bp, EFAULT);
3212 break;
3213 case DDI_DMA_TOOBIG:
3214 bioerror(bp, EINVAL);
3215 break;
3216 case DDI_DMA_INUSE:
3217 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
3218 " DDI_DMA_INUSE impossible\n"));
3219 break;
3220 default:
3221 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
3222 "0x%x impossible\n", i));
3223 break;
3224 }
3225
3226 no_dma_cookies:
3227 ddi_dma_free_handle(&acmd->cmd_dmahandle);
3228 acmd->cmd_dmahandle = NULL;
3229 acmd->cmd_flags &= ~CFLAG_DMAVALID;
3230 return (-1);
3231 }
3232
3233 /*
3234 * megasas_dma_move(struct megasas_instance *, struct scsi_pkt *, struct buf *)
3235 *
3236 * move dma resources to next dma window
3237 *
3238 */
3239 static int
3240 megasas_dma_move(struct megasas_instance *instance, struct scsi_pkt *pkt,
3241 struct buf *bp)
3242 {
3243 int i = 0;
3244
3245 struct scsa_cmd *acmd = PKT2CMD(pkt);
3246
3247 /*
3248 * If there are no more cookies remaining in this window,
3249 * must move to the next window first.
3250 */
3251 if (acmd->cmd_cookie == acmd->cmd_ncookies) {
3252 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
3253 return (0);
3254 }
3255
3256 /* at last window, cannot move */
3257 if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
3258 return (-1);
3259 }
3260
3261 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
3262 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
3263 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
3264 DDI_FAILURE) {
3265 return (-1);
3266 }
3267
3268 acmd->cmd_cookie = 0;
3269 } else {
3270 /* still more cookies in this window - get the next one */
3271 ddi_dma_nextcookie(acmd->cmd_dmahandle,
3272 &acmd->cmd_dmacookies[0]);
3273 }
3274
3275 /* get remaining cookies in this window, up to our maximum */
3276 for (;;) {
3277 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
3278 acmd->cmd_cookie++;
3279
3280 if (i == instance->max_num_sge ||
3281 acmd->cmd_cookie == acmd->cmd_ncookies) {
3282 break;
3283 }
3284
3285 ddi_dma_nextcookie(acmd->cmd_dmahandle,
3286 &acmd->cmd_dmacookies[i]);
3287 }
3288
3289 acmd->cmd_cookiecnt = i;
3290
3291 if (bp->b_bcount >= acmd->cmd_dmacount) {
3292 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
3293 } else {
3294 pkt->pkt_resid = 0;
3295 }
3296
3297 return (0);
3298 }
3299
3300 /*
3301 * build_cmd
3302 */
3303 static struct megasas_cmd *
3304 build_cmd(struct megasas_instance *instance, struct scsi_address *ap,
3305 struct scsi_pkt *pkt, uchar_t *cmd_done)
3306 {
3307 uint16_t flags = 0;
3308 uint32_t i;
3309 uint32_t context;
3310 uint32_t sge_bytes;
3311
3312 struct megasas_cmd *cmd;
3313 struct megasas_sge64 *mfi_sgl;
3314 struct scsa_cmd *acmd = PKT2CMD(pkt);
3315 struct megasas_pthru_frame *pthru;
3316 struct megasas_io_frame *ldio;
3317
3318 /* find out if this is logical or physical drive command. */
3319 acmd->islogical = MEGADRV_IS_LOGICAL(ap);
3320 acmd->device_id = MAP_DEVICE_ID(instance, ap);
3321 *cmd_done = 0;
3322
3323 /* get the command packet */
3324 if (!(cmd = get_mfi_pkt(instance))) {
3325 return (NULL);
3326 }
3327
3328 cmd->pkt = pkt;
3329 cmd->cmd = acmd;
3330
3331 /* lets get the command directions */
3332 if (acmd->cmd_flags & CFLAG_DMASEND) {
3333 flags = MFI_FRAME_DIR_WRITE;
3334
3335 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
3336 (void) ddi_dma_sync(acmd->cmd_dmahandle,
3337 acmd->cmd_dma_offset, acmd->cmd_dma_len,
3338 DDI_DMA_SYNC_FORDEV);
3339 }
3340 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
3341 flags = MFI_FRAME_DIR_READ;
3342
3343 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
3344 (void) ddi_dma_sync(acmd->cmd_dmahandle,
3345 acmd->cmd_dma_offset, acmd->cmd_dma_len,
3346 DDI_DMA_SYNC_FORCPU);
3347 }
3348 } else {
3349 flags = MFI_FRAME_DIR_NONE;
3350 }
3351
3352 flags |= MFI_FRAME_SGL64;
3353
3354 switch (pkt->pkt_cdbp[0]) {
3355
3356 /*
3357 * case SCMD_SYNCHRONIZE_CACHE:
3358 * flush_cache(instance);
3359 * return_mfi_pkt(instance, cmd);
3360 * *cmd_done = 1;
3361 *
3362 * return (NULL);
3363 */
3364
3365 case SCMD_READ:
3366 case SCMD_WRITE:
3367 case SCMD_READ_G1:
3368 case SCMD_WRITE_G1:
3369 if (acmd->islogical) {
3370 ldio = (struct megasas_io_frame *)cmd->frame;
3371
3372 /*
3373 * preare the Logical IO frame:
3374 * 2nd bit is zero for all read cmds
3375 */
3376 ldio->cmd = (pkt->pkt_cdbp[0] & 0x02) ?
3377 MFI_CMD_OP_LD_WRITE : MFI_CMD_OP_LD_READ;
3378 ldio->cmd_status = 0x0;
3379 ldio->scsi_status = 0x0;
3380 ldio->target_id = acmd->device_id;
3381 ldio->timeout = 0;
3382 ldio->reserved_0 = 0;
3383 ldio->pad_0 = 0;
3384 ldio->flags = flags;
3385
3386 /* Initialize sense Information */
3387 bzero(cmd->sense, SENSE_LENGTH);
3388 ldio->sense_len = SENSE_LENGTH;
3389 ldio->sense_buf_phys_addr_hi = 0;
3390 ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
3391
3392 ldio->start_lba_hi = 0;
3393 ldio->access_byte = (acmd->cmd_cdblen != 6) ?
3394 pkt->pkt_cdbp[1] : 0;
3395 ldio->sge_count = acmd->cmd_cookiecnt;
3396 mfi_sgl = (struct megasas_sge64 *)&ldio->sgl;
3397
3398 context = ldio->context;
3399
3400 if (acmd->cmd_cdblen == CDB_GROUP0) {
3401 ldio->lba_count = host_to_le16(
3402 (uint16_t)(pkt->pkt_cdbp[4]));
3403
3404 ldio->start_lba_lo = host_to_le32(
3405 ((uint32_t)(pkt->pkt_cdbp[3])) |
3406 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
3407 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
3408 << 16));
3409 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
3410 ldio->lba_count = host_to_le16(
3411 ((uint16_t)(pkt->pkt_cdbp[8])) |
3412 ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
3413
3414 ldio->start_lba_lo = host_to_le32(
3415 ((uint32_t)(pkt->pkt_cdbp[5])) |
3416 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
3417 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
3418 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
3419 } else if (acmd->cmd_cdblen == CDB_GROUP2) {
3420 ldio->lba_count = host_to_le16(
3421 ((uint16_t)(pkt->pkt_cdbp[9])) |
3422 ((uint16_t)(pkt->pkt_cdbp[8]) << 8) |
3423 ((uint16_t)(pkt->pkt_cdbp[7]) << 16) |
3424 ((uint16_t)(pkt->pkt_cdbp[6]) << 24));
3425
3426 ldio->start_lba_lo = host_to_le32(
3427 ((uint32_t)(pkt->pkt_cdbp[5])) |
3428 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
3429 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
3430 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
3431 } else if (acmd->cmd_cdblen == CDB_GROUP3) {
3432 ldio->lba_count = host_to_le16(
3433 ((uint16_t)(pkt->pkt_cdbp[13])) |
3434 ((uint16_t)(pkt->pkt_cdbp[12]) << 8) |
3435 ((uint16_t)(pkt->pkt_cdbp[11]) << 16) |
3436 ((uint16_t)(pkt->pkt_cdbp[10]) << 24));
3437
3438 ldio->start_lba_lo = host_to_le32(
3439 ((uint32_t)(pkt->pkt_cdbp[9])) |
3440 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
3441 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
3442 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
3443
3444 ldio->start_lba_lo = host_to_le32(
3445 ((uint32_t)(pkt->pkt_cdbp[5])) |
3446 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
3447 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
3448 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
3449 }
3450
3451 break;
3452 }
3453 /* fall through For all non-rd/wr cmds */
3454 default:
3455 pthru = (struct megasas_pthru_frame *)cmd->frame;
3456
3457 /* prepare the DCDB frame */
3458 pthru->cmd = (acmd->islogical) ?
3459 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI;
3460 pthru->cmd_status = 0x0;
3461 pthru->scsi_status = 0x0;
3462 pthru->target_id = acmd->device_id;
3463 pthru->lun = 0;
3464 pthru->cdb_len = acmd->cmd_cdblen;
3465 pthru->timeout = 0;
3466 pthru->flags = flags;
3467 pthru->data_xfer_len = acmd->cmd_dmacount;
3468 pthru->sge_count = acmd->cmd_cookiecnt;
3469 mfi_sgl = (struct megasas_sge64 *)&pthru->sgl;
3470
3471 bzero(cmd->sense, SENSE_LENGTH);
3472 pthru->sense_len = SENSE_LENGTH;
3473 pthru->sense_buf_phys_addr_hi = 0;
3474 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
3475
3476 context = pthru->context;
3477
3478 bcopy(pkt->pkt_cdbp, pthru->cdb, acmd->cmd_cdblen);
3479
3480 break;
3481 }
3482 #ifdef lint
3483 context = context;
3484 #endif
3485 /* bzero(mfi_sgl, sizeof (struct megasas_sge64) * MAX_SGL); */
3486
3487 /* prepare the scatter-gather list for the firmware */
3488 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
3489 mfi_sgl->phys_addr = acmd->cmd_dmacookies[i].dmac_laddress;
3490 mfi_sgl->length = acmd->cmd_dmacookies[i].dmac_size;
3491 }
3492
3493 sge_bytes = sizeof (struct megasas_sge64)*acmd->cmd_cookiecnt;
3494
3495 cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
3496 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1;
3497
3498 if (cmd->frame_count >= 8) {
3499 cmd->frame_count = 8;
3500 }
3501
3502 return (cmd);
3503 }
3504
3505 /*
3506 * wait_for_outstanding - Wait for all outstanding cmds
3507 * @instance: Adapter soft state
3508 *
3509 * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to
3510 * complete all its outstanding commands. Returns error if one or more IOs
3511 * are pending after this time period.
3512 */
3513 static int
3514 wait_for_outstanding(struct megasas_instance *instance)
3515 {
3516 int i;
3517 uint32_t wait_time = 90;
3518
3519 for (i = 0; i < wait_time; i++) {
3520 if (!instance->fw_outstanding) {
3521 break;
3522 }
3523
3524 drv_usecwait(MILLISEC); /* wait for 1000 usecs */;
3525 }
3526
3527 if (instance->fw_outstanding) {
3528 return (1);
3529 }
3530
3531 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VERSION);
3532
3533 return (0);
3534 }
3535
3536 /*
3537 * issue_mfi_pthru
3538 */
3539 static int
3540 issue_mfi_pthru(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
3541 struct megasas_cmd *cmd, int mode)
3542 {
3543 void *ubuf;
3544 uint32_t kphys_addr = 0;
3545 uint32_t xferlen = 0;
3546 uint_t model;
3547
3548 dma_obj_t pthru_dma_obj;
3549 struct megasas_pthru_frame *kpthru;
3550 struct megasas_pthru_frame *pthru;
3551
3552 pthru = &cmd->frame->pthru;
3553 kpthru = (struct megasas_pthru_frame *)&ioctl->frame[0];
3554
3555 model = ddi_model_convert_from(mode & FMODELS);
3556 if (model == DDI_MODEL_ILP32) {
3557 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32"));
3558
3559 xferlen = kpthru->sgl.sge32[0].length;
3560
3561 /* SJ! - ubuf needs to be virtual address. */
3562 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
3563 } else {
3564 #ifdef _ILP32
3565 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32"));
3566 xferlen = kpthru->sgl.sge32[0].length;
3567 /* SJ! - ubuf needs to be virtual address. */
3568 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
3569 #else
3570 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP64"));
3571 xferlen = kpthru->sgl.sge64[0].length;
3572 /* SJ! - ubuf needs to be virtual address. */
3573 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
3574 #endif
3575 }
3576
3577 if (xferlen) {
3578 /* means IOCTL requires DMA */
3579 /* allocate the data transfer buffer */
3580 pthru_dma_obj.size = xferlen;
3581 pthru_dma_obj.dma_attr = megasas_generic_dma_attr;
3582 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3583 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3584 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
3585 pthru_dma_obj.dma_attr.dma_attr_align = 1;
3586
3587 /* allocate kernel buffer for DMA */
3588 if (mega_alloc_dma_obj(instance, &pthru_dma_obj) != 1) {
3589 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
3590 "could not data transfer buffer alloc."));
3591 return (DDI_FAILURE);
3592 }
3593
3594 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3595 if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
3596 if (ddi_copyin(ubuf, (void *)pthru_dma_obj.buffer,
3597 xferlen, mode)) {
3598 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
3599 "copy from user space failed\n"));
3600 return (1);
3601 }
3602 }
3603
3604 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
3605 }
3606
3607 pthru->cmd = kpthru->cmd;
3608 pthru->sense_len = kpthru->sense_len;
3609 pthru->cmd_status = kpthru->cmd_status;
3610 pthru->scsi_status = kpthru->scsi_status;
3611 pthru->target_id = kpthru->target_id;
3612 pthru->lun = kpthru->lun;
3613 pthru->cdb_len = kpthru->cdb_len;
3614 pthru->sge_count = kpthru->sge_count;
3615 pthru->timeout = kpthru->timeout;
3616 pthru->data_xfer_len = kpthru->data_xfer_len;
3617
3618 pthru->sense_buf_phys_addr_hi = 0;
3619 /* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */
3620 pthru->sense_buf_phys_addr_lo = 0;
3621
3622 bcopy((void *)kpthru->cdb, (void *)pthru->cdb, pthru->cdb_len);
3623
3624 pthru->flags = kpthru->flags & ~MFI_FRAME_SGL64;
3625 pthru->sgl.sge32[0].length = xferlen;
3626 pthru->sgl.sge32[0].phys_addr = kphys_addr;
3627
3628 cmd->sync_cmd = MEGASAS_TRUE;
3629 cmd->frame_count = 1;
3630
3631 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3632 con_log(CL_ANN, (CE_WARN,
3633 "issue_mfi_pthru: fw_ioctl failed\n"));
3634 } else {
3635 if (xferlen && (kpthru->flags & MFI_FRAME_DIR_READ)) {
3636
3637 if (ddi_copyout(pthru_dma_obj.buffer, ubuf,
3638 xferlen, mode)) {
3639 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
3640 "copy to user space failed\n"));
3641 return (1);
3642 }
3643 }
3644 }
3645
3646 kpthru->cmd_status = pthru->cmd_status;
3647 kpthru->scsi_status = pthru->scsi_status;
3648
3649 con_log(CL_ANN, (CE_NOTE, "issue_mfi_pthru: cmd_status %x, "
3650 "scsi_status %x\n", pthru->cmd_status, pthru->scsi_status));
3651
3652 if (xferlen) {
3653 /* free kernel buffer */
3654 if (mega_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
3655 return (1);
3656 }
3657
3658 return (0);
3659 }
3660
3661 /*
3662 * issue_mfi_dcmd
3663 */
3664 static int
3665 issue_mfi_dcmd(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
3666 struct megasas_cmd *cmd, int mode)
3667 {
3668 void *ubuf;
3669 uint32_t kphys_addr = 0;
3670 uint32_t xferlen = 0;
3671 uint32_t model;
3672 dma_obj_t dcmd_dma_obj;
3673 struct megasas_dcmd_frame *kdcmd;
3674 struct megasas_dcmd_frame *dcmd;
3675
3676 dcmd = &cmd->frame->dcmd;
3677 kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0];
3678
3679 model = ddi_model_convert_from(mode & FMODELS);
3680 if (model == DDI_MODEL_ILP32) {
3681 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
3682
3683 xferlen = kdcmd->sgl.sge32[0].length;
3684
3685 /* SJ! - ubuf needs to be virtual address. */
3686 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
3687 }
3688 else
3689 {
3690 #ifdef _ILP32
3691 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
3692 xferlen = kdcmd->sgl.sge32[0].length;
3693 /* SJ! - ubuf needs to be virtual address. */
3694 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
3695 #else
3696 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_LP64"));
3697 xferlen = kdcmd->sgl.sge64[0].length;
3698 /* SJ! - ubuf needs to be virtual address. */
3699 ubuf = (void *)(ulong_t)dcmd->sgl.sge64[0].phys_addr;
3700 #endif
3701 }
3702 if (xferlen) {
3703 /* means IOCTL requires DMA */
3704 /* allocate the data transfer buffer */
3705 dcmd_dma_obj.size = xferlen;
3706 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr;
3707 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3708 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3709 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3710 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3711
3712 /* allocate kernel buffer for DMA */
3713 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) {
3714 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
3715 "could not data transfer buffer alloc."));
3716 return (DDI_FAILURE);
3717 }
3718
3719 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3720 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
3721 if (ddi_copyin(ubuf, (void *)dcmd_dma_obj.buffer,
3722 xferlen, mode)) {
3723 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
3724 "copy from user space failed\n"));
3725 return (1);
3726 }
3727 }
3728
3729 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
3730 }
3731
3732 dcmd->cmd = kdcmd->cmd;
3733 dcmd->cmd_status = kdcmd->cmd_status;
3734 dcmd->sge_count = kdcmd->sge_count;
3735 dcmd->timeout = kdcmd->timeout;
3736 dcmd->data_xfer_len = kdcmd->data_xfer_len;
3737 dcmd->opcode = kdcmd->opcode;
3738
3739 bcopy((void *)kdcmd->mbox.b, (void *)dcmd->mbox.b, DCMD_MBOX_SZ);
3740
3741 dcmd->flags = kdcmd->flags & ~MFI_FRAME_SGL64;
3742 dcmd->sgl.sge32[0].length = xferlen;
3743 dcmd->sgl.sge32[0].phys_addr = kphys_addr;
3744
3745 cmd->sync_cmd = MEGASAS_TRUE;
3746 cmd->frame_count = 1;
3747
3748 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3749 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed\n"));
3750 } else {
3751 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
3752
3753 if (ddi_copyout(dcmd_dma_obj.buffer, ubuf,
3754 xferlen, mode)) {
3755 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
3756 "copy to user space failed\n"));
3757 return (1);
3758 }
3759 }
3760 }
3761
3762 kdcmd->cmd_status = dcmd->cmd_status;
3763
3764 if (xferlen) {
3765 /* free kernel buffer */
3766 if (mega_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
3767 return (1);
3768 }
3769
3770 return (0);
3771 }
3772
3773 /*
3774 * issue_mfi_smp
3775 */
3776 static int
3777 issue_mfi_smp(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
3778 struct megasas_cmd *cmd, int mode)
3779 {
3780 void *request_ubuf;
3781 void *response_ubuf;
3782 uint32_t request_xferlen = 0;
3783 uint32_t response_xferlen = 0;
3784 uint_t model;
3785 dma_obj_t request_dma_obj;
3786 dma_obj_t response_dma_obj;
3787 struct megasas_smp_frame *ksmp;
3788 struct megasas_smp_frame *smp;
3789 struct megasas_sge32 *sge32;
3790 #ifndef _ILP32
3791 struct megasas_sge64 *sge64;
3792 #endif
3793
3794 smp = &cmd->frame->smp;
3795 ksmp = (struct megasas_smp_frame *)&ioctl->frame[0];
3796
3797 model = ddi_model_convert_from(mode & FMODELS);
3798 if (model == DDI_MODEL_ILP32) {
3799 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32"));
3800
3801 sge32 = &ksmp->sgl[0].sge32[0];
3802 response_xferlen = sge32[0].length;
3803 request_xferlen = sge32[1].length;
3804 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: "
3805 "response_xferlen = %x, request_xferlen = %x",
3806 response_xferlen, request_xferlen));
3807
3808 /* SJ! - ubuf needs to be virtual address. */
3809
3810 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
3811 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
3812 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
3813 "response_ubuf = %p, request_ubuf = %p",
3814 response_ubuf, request_ubuf));
3815 } else {
3816 #ifdef _ILP32
3817 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32"));
3818
3819 sge32 = &ksmp->sgl[0].sge32[0];
3820 response_xferlen = sge32[0].length;
3821 request_xferlen = sge32[1].length;
3822 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: "
3823 "response_xferlen = %x, request_xferlen = %x",
3824 response_xferlen, request_xferlen));
3825
3826 /* SJ! - ubuf needs to be virtual address. */
3827
3828 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
3829 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
3830 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
3831 "response_ubuf = %p, request_ubuf = %p",
3832 response_ubuf, request_ubuf));
3833 #else
3834 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_LP64"));
3835
3836 sge64 = &ksmp->sgl[0].sge64[0];
3837 response_xferlen = sge64[0].length;
3838 request_xferlen = sge64[1].length;
3839
3840 /* SJ! - ubuf needs to be virtual address. */
3841 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr;
3842 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr;
3843 #endif
3844 }
3845 if (request_xferlen) {
3846 /* means IOCTL requires DMA */
3847 /* allocate the data transfer buffer */
3848 request_dma_obj.size = request_xferlen;
3849 request_dma_obj.dma_attr = megasas_generic_dma_attr;
3850 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3851 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3852 request_dma_obj.dma_attr.dma_attr_sgllen = 1;
3853 request_dma_obj.dma_attr.dma_attr_align = 1;
3854
3855 /* allocate kernel buffer for DMA */
3856 if (mega_alloc_dma_obj(instance, &request_dma_obj) != 1) {
3857 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3858 "could not data transfer buffer alloc."));
3859 return (DDI_FAILURE);
3860 }
3861
3862 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3863 if (ddi_copyin(request_ubuf, (void *) request_dma_obj.buffer,
3864 request_xferlen, mode)) {
3865 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3866 "copy from user space failed\n"));
3867 return (1);
3868 }
3869 }
3870
3871 if (response_xferlen) {
3872 /* means IOCTL requires DMA */
3873 /* allocate the data transfer buffer */
3874 response_dma_obj.size = response_xferlen;
3875 response_dma_obj.dma_attr = megasas_generic_dma_attr;
3876 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3877 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3878 response_dma_obj.dma_attr.dma_attr_sgllen = 1;
3879 response_dma_obj.dma_attr.dma_attr_align = 1;
3880
3881 /* allocate kernel buffer for DMA */
3882 if (mega_alloc_dma_obj(instance, &response_dma_obj) != 1) {
3883 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3884 "could not data transfer buffer alloc."));
3885 return (DDI_FAILURE);
3886 }
3887
3888 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3889 if (ddi_copyin(response_ubuf, (void *) response_dma_obj.buffer,
3890 response_xferlen, mode)) {
3891 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3892 "copy from user space failed\n"));
3893 return (1);
3894 }
3895 }
3896
3897 smp->cmd = ksmp->cmd;
3898 smp->cmd_status = ksmp->cmd_status;
3899 smp->connection_status = ksmp->connection_status;
3900 smp->sge_count = ksmp->sge_count;
3901 /* smp->context = ksmp->context; */
3902 smp->timeout = ksmp->timeout;
3903 smp->data_xfer_len = ksmp->data_xfer_len;
3904
3905 bcopy((void *)&ksmp->sas_addr, (void *)&smp->sas_addr,
3906 sizeof (uint64_t));
3907
3908 smp->flags = ksmp->flags & ~MFI_FRAME_SGL64;
3909
3910 model = ddi_model_convert_from(mode & FMODELS);
3911 if (model == DDI_MODEL_ILP32) {
3912 con_log(CL_ANN1, (CE_NOTE,
3913 "handle_drv_ioctl: DDI_MODEL_ILP32"));
3914
3915 sge32 = &smp->sgl[0].sge32[0];
3916 sge32[0].length = response_xferlen;
3917 sge32[0].phys_addr =
3918 response_dma_obj.dma_cookie[0].dmac_address;
3919 sge32[1].length = request_xferlen;
3920 sge32[1].phys_addr =
3921 request_dma_obj.dma_cookie[0].dmac_address;
3922 } else {
3923 #ifdef _ILP32
3924 con_log(CL_ANN1, (CE_NOTE,
3925 "handle_drv_ioctl: DDI_MODEL_ILP32"));
3926 sge32 = &smp->sgl[0].sge32[0];
3927 sge32[0].length = response_xferlen;
3928 sge32[0].phys_addr =
3929 response_dma_obj.dma_cookie[0].dmac_address;
3930 sge32[1].length = request_xferlen;
3931 sge32[1].phys_addr =
3932 request_dma_obj.dma_cookie[0].dmac_address;
3933 #else
3934 con_log(CL_ANN1, (CE_NOTE,
3935 "issue_mfi_smp: DDI_MODEL_LP64"));
3936 sge64 = &smp->sgl[0].sge64[0];
3937 sge64[0].length = response_xferlen;
3938 sge64[0].phys_addr =
3939 response_dma_obj.dma_cookie[0].dmac_address;
3940 sge64[1].length = request_xferlen;
3941 sge64[1].phys_addr =
3942 request_dma_obj.dma_cookie[0].dmac_address;
3943 #endif
3944 }
3945 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
3946 "smp->response_xferlen = %d, smp->request_xferlen = %d "
3947 "smp->data_xfer_len = %d", sge32[0].length, sge32[1].length,
3948 smp->data_xfer_len));
3949
3950 cmd->sync_cmd = MEGASAS_TRUE;
3951 cmd->frame_count = 1;
3952
3953 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3954 con_log(CL_ANN, (CE_WARN,
3955 "issue_mfi_smp: fw_ioctl failed\n"));
3956 } else {
3957 con_log(CL_ANN1, (CE_NOTE,
3958 "issue_mfi_smp: copy to user space\n"));
3959
3960 if (request_xferlen) {
3961 if (ddi_copyout(request_dma_obj.buffer, request_ubuf,
3962 request_xferlen, mode)) {
3963 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3964 "copy to user space failed\n"));
3965 return (1);
3966 }
3967 }
3968
3969 if (response_xferlen) {
3970 if (ddi_copyout(response_dma_obj.buffer, response_ubuf,
3971 response_xferlen, mode)) {
3972 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3973 "copy to user space failed\n"));
3974 return (1);
3975 }
3976 }
3977 }
3978
3979 ksmp->cmd_status = smp->cmd_status;
3980 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
3981 smp->cmd_status));
3982
3983
3984 if (request_xferlen) {
3985 /* free kernel buffer */
3986 if (mega_free_dma_obj(instance, request_dma_obj) != DDI_SUCCESS)
3987 return (1);
3988 }
3989
3990 if (response_xferlen) {
3991 /* free kernel buffer */
3992 if (mega_free_dma_obj(instance, response_dma_obj) !=
3993 DDI_SUCCESS)
3994 return (1);
3995 }
3996
3997 return (0);
3998 }
3999
4000 /*
4001 * issue_mfi_stp
4002 */
4003 static int
4004 issue_mfi_stp(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
4005 struct megasas_cmd *cmd, int mode)
4006 {
4007 void *fis_ubuf;
4008 void *data_ubuf;
4009 uint32_t fis_xferlen = 0;
4010 uint32_t data_xferlen = 0;
4011 uint_t model;
4012 dma_obj_t fis_dma_obj;
4013 dma_obj_t data_dma_obj;
4014 struct megasas_stp_frame *kstp;
4015 struct megasas_stp_frame *stp;
4016
4017 stp = &cmd->frame->stp;
4018 kstp = (struct megasas_stp_frame *)&ioctl->frame[0];
4019
4020 model = ddi_model_convert_from(mode & FMODELS);
4021 if (model == DDI_MODEL_ILP32) {
4022 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32"));
4023
4024 fis_xferlen = kstp->sgl.sge32[0].length;
4025 data_xferlen = kstp->sgl.sge32[1].length;
4026
4027 /* SJ! - ubuf needs to be virtual address. */
4028 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
4029 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
4030 }
4031 else
4032 {
4033 #ifdef _ILP32
4034 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32"));
4035
4036 fis_xferlen = kstp->sgl.sge32[0].length;
4037 data_xferlen = kstp->sgl.sge32[1].length;
4038
4039 /* SJ! - ubuf needs to be virtual address. */
4040 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
4041 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
4042 #else
4043 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_LP64"));
4044
4045 fis_xferlen = kstp->sgl.sge64[0].length;
4046 data_xferlen = kstp->sgl.sge64[1].length;
4047
4048 /* SJ! - ubuf needs to be virtual address. */
4049 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
4050 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
4051 #endif
4052 }
4053
4054
4055 if (fis_xferlen) {
4056 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: "
4057 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
4058
4059 /* means IOCTL requires DMA */
4060 /* allocate the data transfer buffer */
4061 fis_dma_obj.size = fis_xferlen;
4062 fis_dma_obj.dma_attr = megasas_generic_dma_attr;
4063 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4064 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4065 fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
4066 fis_dma_obj.dma_attr.dma_attr_align = 1;
4067
4068 /* allocate kernel buffer for DMA */
4069 if (mega_alloc_dma_obj(instance, &fis_dma_obj) != 1) {
4070 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4071 "could not data transfer buffer alloc."));
4072 return (DDI_FAILURE);
4073 }
4074
4075 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
4076 if (ddi_copyin(fis_ubuf, (void *)fis_dma_obj.buffer,
4077 fis_xferlen, mode)) {
4078 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4079 "copy from user space failed\n"));
4080 return (1);
4081 }
4082 }
4083
4084 if (data_xferlen) {
4085 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: data_ubuf = %p "
4086 "data_xferlen = %x", data_ubuf, data_xferlen));
4087
4088 /* means IOCTL requires DMA */
4089 /* allocate the data transfer buffer */
4090 data_dma_obj.size = data_xferlen;
4091 data_dma_obj.dma_attr = megasas_generic_dma_attr;
4092 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4093 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4094 data_dma_obj.dma_attr.dma_attr_sgllen = 1;
4095 data_dma_obj.dma_attr.dma_attr_align = 1;
4096
4097 /* allocate kernel buffer for DMA */
4098 if (mega_alloc_dma_obj(instance, &data_dma_obj) != 1) {
4099 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4100 "could not data transfer buffer alloc."));
4101 return (DDI_FAILURE);
4102 }
4103
4104 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
4105 if (ddi_copyin(data_ubuf, (void *) data_dma_obj.buffer,
4106 data_xferlen, mode)) {
4107 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4108 "copy from user space failed\n"));
4109 return (1);
4110 }
4111 }
4112
4113 stp->cmd = kstp->cmd;
4114 stp->cmd_status = kstp->cmd_status;
4115 stp->connection_status = kstp->connection_status;
4116 stp->target_id = kstp->target_id;
4117 stp->sge_count = kstp->sge_count;
4118 /* stp->context = kstp->context; */
4119 stp->timeout = kstp->timeout;
4120 stp->data_xfer_len = kstp->data_xfer_len;
4121
4122 bcopy((void *)kstp->fis, (void *)stp->fis, 10);
4123
4124 stp->flags = kstp->flags & ~MFI_FRAME_SGL64;
4125 stp->stp_flags = kstp->stp_flags;
4126 stp->sgl.sge32[0].length = fis_xferlen;
4127 stp->sgl.sge32[0].phys_addr = fis_dma_obj.dma_cookie[0].dmac_address;
4128 stp->sgl.sge32[1].length = data_xferlen;
4129 stp->sgl.sge32[1].phys_addr = data_dma_obj.dma_cookie[0].dmac_address;
4130
4131 cmd->sync_cmd = MEGASAS_TRUE;
4132 cmd->frame_count = 1;
4133
4134 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
4135 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed\n"));
4136 } else {
4137
4138 if (fis_xferlen) {
4139 if (ddi_copyout(fis_dma_obj.buffer, fis_ubuf,
4140 fis_xferlen, mode)) {
4141 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4142 "copy to user space failed\n"));
4143 return (1);
4144 }
4145 }
4146
4147 if (data_xferlen) {
4148 if (ddi_copyout(data_dma_obj.buffer, data_ubuf,
4149 data_xferlen, mode)) {
4150 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4151 "copy to user space failed\n"));
4152 return (1);
4153 }
4154 }
4155 }
4156
4157 kstp->cmd_status = stp->cmd_status;
4158
4159 if (fis_xferlen) {
4160 /* free kernel buffer */
4161 if (mega_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
4162 return (1);
4163 }
4164
4165 if (data_xferlen) {
4166 /* free kernel buffer */
4167 if (mega_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
4168 return (1);
4169 }
4170
4171 return (0);
4172 }
4173
4174 /*
4175 * fill_up_drv_ver
4176 */
4177 static void
4178 fill_up_drv_ver(struct megasas_drv_ver *dv)
4179 {
4180 (void) memset(dv, 0, sizeof (struct megasas_drv_ver));
4181
4182 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
4183 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
4184 (void) memcpy(dv->drv_name, "megaraid_sas", strlen("megaraid_sas"));
4185 (void) memcpy(dv->drv_ver, MEGASAS_VERSION, strlen(MEGASAS_VERSION));
4186 (void) memcpy(dv->drv_rel_date, MEGASAS_RELDATE,
4187 strlen(MEGASAS_RELDATE));
4188 }
4189
4190 /*
4191 * handle_drv_ioctl
4192 */
4193 static int
4194 handle_drv_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
4195 int mode)
4196 {
4197 int i;
4198 int rval = 0;
4199 int *props = NULL;
4200 void *ubuf;
4201
4202 uint8_t *pci_conf_buf;
4203 uint32_t xferlen;
4204 uint32_t num_props;
4205 uint_t model;
4206 struct megasas_dcmd_frame *kdcmd;
4207 struct megasas_drv_ver dv;
4208 struct megasas_pci_information pi;
4209
4210 kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0];
4211
4212 model = ddi_model_convert_from(mode & FMODELS);
4213 if (model == DDI_MODEL_ILP32) {
4214 con_log(CL_ANN1, (CE_NOTE,
4215 "handle_drv_ioctl: DDI_MODEL_ILP32"));
4216
4217 xferlen = kdcmd->sgl.sge32[0].length;
4218
4219 /* SJ! - ubuf needs to be virtual address. */
4220 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
4221 } else {
4222 #ifdef _ILP32
4223 con_log(CL_ANN1, (CE_NOTE,
4224 "handle_drv_ioctl: DDI_MODEL_ILP32"));
4225 xferlen = kdcmd->sgl.sge32[0].length;
4226 /* SJ! - ubuf needs to be virtual address. */
4227 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
4228 #else
4229 con_log(CL_ANN1, (CE_NOTE,
4230 "handle_drv_ioctl: DDI_MODEL_LP64"));
4231 xferlen = kdcmd->sgl.sge64[0].length;
4232 /* SJ! - ubuf needs to be virtual address. */
4233 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
4234 #endif
4235 }
4236 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
4237 "dataBuf=%p size=%d bytes", ubuf, xferlen));
4238
4239 switch (kdcmd->opcode) {
4240 case MR_DRIVER_IOCTL_DRIVER_VERSION:
4241 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
4242 "MR_DRIVER_IOCTL_DRIVER_VERSION"));
4243
4244 fill_up_drv_ver(&dv);
4245
4246 if (ddi_copyout(&dv, ubuf, xferlen, mode)) {
4247 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4248 "MR_DRIVER_IOCTL_DRIVER_VERSION : "
4249 "copy to user space failed\n"));
4250 kdcmd->cmd_status = 1;
4251 rval = 1;
4252 } else {
4253 kdcmd->cmd_status = 0;
4254 }
4255 break;
4256 case MR_DRIVER_IOCTL_PCI_INFORMATION:
4257 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
4258 "MR_DRIVER_IOCTL_PCI_INFORMAITON"));
4259
4260 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
4261 0, "reg", &props, &num_props)) {
4262 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4263 "MR_DRIVER_IOCTL_PCI_INFORMATION : "
4264 "ddi_prop_look_int_array failed\n"));
4265 rval = 1;
4266 } else {
4267
4268 pi.busNumber = (props[0] >> 16) & 0xFF;
4269 pi.deviceNumber = (props[0] >> 11) & 0x1f;
4270 pi.functionNumber = (props[0] >> 8) & 0x7;
4271 ddi_prop_free((void *)props);
4272 }
4273
4274 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
4275
4276 for (i = 0; i < (sizeof (struct megasas_pci_information) -
4277 offsetof(struct megasas_pci_information, pciHeaderInfo));
4278 i++) {
4279 pci_conf_buf[i] =
4280 pci_config_get8(instance->pci_handle, i);
4281 }
4282
4283 if (ddi_copyout(&pi, ubuf, xferlen, mode)) {
4284 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4285 "MR_DRIVER_IOCTL_PCI_INFORMATION : "
4286 "copy to user space failed\n"));
4287 kdcmd->cmd_status = 1;
4288 rval = 1;
4289 } else {
4290 kdcmd->cmd_status = 0;
4291 }
4292 break;
4293 default:
4294 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4295 "invalid driver specific IOCTL opcode = 0x%x",
4296 kdcmd->opcode));
4297 kdcmd->cmd_status = 1;
4298 rval = 1;
4299 break;
4300 }
4301
4302 return (rval);
4303 }
4304
4305 /*
4306 * handle_mfi_ioctl
4307 */
4308 static int
4309 handle_mfi_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
4310 int mode)
4311 {
4312 int rval = 0;
4313
4314 struct megasas_header *hdr;
4315 struct megasas_cmd *cmd;
4316
4317 cmd = get_mfi_pkt(instance);
4318
4319 if (!cmd) {
4320 con_log(CL_ANN, (CE_WARN, "megasas: "
4321 "failed to get a cmd packet\n"));
4322 return (1);
4323 }
4324
4325 hdr = (struct megasas_header *)&ioctl->frame[0];
4326
4327 switch (hdr->cmd) {
4328 case MFI_CMD_OP_DCMD:
4329 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
4330 break;
4331 case MFI_CMD_OP_SMP:
4332 rval = issue_mfi_smp(instance, ioctl, cmd, mode);
4333 break;
4334 case MFI_CMD_OP_STP:
4335 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
4336 break;
4337 case MFI_CMD_OP_LD_SCSI:
4338 case MFI_CMD_OP_PD_SCSI:
4339 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
4340 break;
4341 default:
4342 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
4343 "invalid mfi ioctl hdr->cmd = %d\n", hdr->cmd));
4344 rval = 1;
4345 break;
4346 }
4347
4348
4349 return_mfi_pkt(instance, cmd);
4350 if (megasas_common_check(instance, cmd) != DDI_SUCCESS)
4351 rval = 1;
4352 return (rval);
4353 }
4354
4355 /*
4356 * AEN
4357 */
4358 static int
4359 handle_mfi_aen(struct megasas_instance *instance, struct megasas_aen *aen)
4360 {
4361 int rval = 0;
4362
4363 rval = register_mfi_aen(instance, instance->aen_seq_num,
4364 aen->class_locale_word);
4365
4366 aen->cmd_status = (uint8_t)rval;
4367
4368 return (rval);
4369 }
4370
4371 static int
4372 register_mfi_aen(struct megasas_instance *instance, uint32_t seq_num,
4373 uint32_t class_locale_word)
4374 {
4375 int ret_val;
4376
4377 struct megasas_cmd *cmd;
4378 struct megasas_dcmd_frame *dcmd;
4379 union megasas_evt_class_locale curr_aen;
4380 union megasas_evt_class_locale prev_aen;
4381
4382 /*
4383 * If there an AEN pending already (aen_cmd), check if the
4384 * class_locale of that pending AEN is inclusive of the new
4385 * AEN request we currently have. If it is, then we don't have
4386 * to do anything. In other words, whichever events the current
4387 * AEN request is subscribing to, have already been subscribed
4388 * to.
4389 *
4390 * If the old_cmd is _not_ inclusive, then we have to abort
4391 * that command, form a class_locale that is superset of both
4392 * old and current and re-issue to the FW
4393 */
4394
4395 curr_aen.word = class_locale_word;
4396
4397 if (instance->aen_cmd) {
4398 prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
4399
4400 /*
4401 * A class whose enum value is smaller is inclusive of all
4402 * higher values. If a PROGRESS (= -1) was previously
4403 * registered, then a new registration requests for higher
4404 * classes need not be sent to FW. They are automatically
4405 * included.
4406 *
4407 * Locale numbers don't have such hierarchy. They are bitmap
4408 * values
4409 */
4410 if ((prev_aen.members.class <= curr_aen.members.class) &&
4411 !((prev_aen.members.locale & curr_aen.members.locale) ^
4412 curr_aen.members.locale)) {
4413 /*
4414 * Previously issued event registration includes
4415 * current request. Nothing to do.
4416 */
4417
4418 return (0);
4419 } else {
4420 curr_aen.members.locale |= prev_aen.members.locale;
4421
4422 if (prev_aen.members.class < curr_aen.members.class)
4423 curr_aen.members.class = prev_aen.members.class;
4424
4425 ret_val = abort_aen_cmd(instance, instance->aen_cmd);
4426
4427 if (ret_val) {
4428 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
4429 "failed to abort prevous AEN command\n"));
4430
4431 return (ret_val);
4432 }
4433 }
4434 } else {
4435 curr_aen.word = class_locale_word;
4436 }
4437
4438 cmd = get_mfi_pkt(instance);
4439
4440 if (!cmd)
4441 return (-ENOMEM);
4442
4443 dcmd = &cmd->frame->dcmd;
4444
4445 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
4446 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4447
4448 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
4449 sizeof (struct megasas_evt_detail));
4450
4451 /* Prepare DCMD for aen registration */
4452 dcmd->cmd = MFI_CMD_OP_DCMD;
4453 dcmd->cmd_status = 0x0;
4454 dcmd->sge_count = 1;
4455 dcmd->flags = MFI_FRAME_DIR_READ;
4456 dcmd->timeout = 0;
4457 dcmd->data_xfer_len = sizeof (struct megasas_evt_detail);
4458 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
4459 dcmd->mbox.w[0] = seq_num;
4460 dcmd->mbox.w[1] = curr_aen.word;
4461 dcmd->sgl.sge32[0].phys_addr =
4462 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address;
4463 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_detail);
4464
4465 instance->aen_seq_num = seq_num;
4466
4467 /*
4468 * Store reference to the cmd used to register for AEN. When an
4469 * application wants us to register for AEN, we have to abort this
4470 * cmd and re-register with a new EVENT LOCALE supplied by that app
4471 */
4472 instance->aen_cmd = cmd;
4473
4474 cmd->frame_count = 1;
4475
4476 /* Issue the aen registration frame */
4477 /* atomic_add_16 (&instance->fw_outstanding, 1); */
4478 instance->func_ptr->issue_cmd(cmd, instance);
4479
4480 return (0);
4481 }
4482
4483 static void
4484 display_scsi_inquiry(caddr_t scsi_inq)
4485 {
4486 #define MAX_SCSI_DEVICE_CODE 14
4487 int i;
4488 char inquiry_buf[256] = {0};
4489 int len;
4490 const char *const scsi_device_types[] = {
4491 "Direct-Access ",
4492 "Sequential-Access",
4493 "Printer ",
4494 "Processor ",
4495 "WORM ",
4496 "CD-ROM ",
4497 "Scanner ",
4498 "Optical Device ",
4499 "Medium Changer ",
4500 "Communications ",
4501 "Unknown ",
4502 "Unknown ",
4503 "Unknown ",
4504 "Enclosure ",
4505 };
4506
4507 len = 0;
4508
4509 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: ");
4510 for (i = 8; i < 16; i++) {
4511 len += snprintf(inquiry_buf + len, 265 - len, "%c",
4512 scsi_inq[i]);
4513 }
4514
4515 len += snprintf(inquiry_buf + len, 265 - len, " Model: ");
4516
4517 for (i = 16; i < 32; i++) {
4518 len += snprintf(inquiry_buf + len, 265 - len, "%c",
4519 scsi_inq[i]);
4520 }
4521
4522 len += snprintf(inquiry_buf + len, 265 - len, " Rev: ");
4523
4524 for (i = 32; i < 36; i++) {
4525 len += snprintf(inquiry_buf + len, 265 - len, "%c",
4526 scsi_inq[i]);
4527 }
4528
4529 len += snprintf(inquiry_buf + len, 265 - len, "\n");
4530
4531
4532 i = scsi_inq[0] & 0x1f;
4533
4534
4535 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ",
4536 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
4537 "Unknown ");
4538
4539
4540 len += snprintf(inquiry_buf + len, 265 - len,
4541 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
4542
4543 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
4544 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
4545 } else {
4546 len += snprintf(inquiry_buf + len, 265 - len, "\n");
4547 }
4548
4549 con_log(CL_ANN1, (CE_CONT, inquiry_buf));
4550 }
4551
4552 static int
4553 read_fw_status_reg_xscale(struct megasas_instance *instance)
4554 {
4555 return ((int)RD_OB_MSG_0(instance));
4556 }
4557
4558 static int
4559 read_fw_status_reg_ppc(struct megasas_instance *instance)
4560 {
4561 return ((int)RD_OB_SCRATCH_PAD_0(instance));
4562 }
4563
4564 static void
4565 issue_cmd_xscale(struct megasas_cmd *cmd, struct megasas_instance *instance)
4566 {
4567 atomic_inc_16(&instance->fw_outstanding);
4568
4569 /* Issue the command to the FW */
4570 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) |
4571 (cmd->frame_count - 1), instance);
4572 }
4573
4574 static void
4575 issue_cmd_ppc(struct megasas_cmd *cmd, struct megasas_instance *instance)
4576 {
4577 atomic_inc_16(&instance->fw_outstanding);
4578
4579 /* Issue the command to the FW */
4580 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) |
4581 (((cmd->frame_count - 1) << 1) | 1), instance);
4582 }
4583
4584 /*
4585 * issue_cmd_in_sync_mode
4586 */
4587 static int
4588 issue_cmd_in_sync_mode_xscale(struct megasas_instance *instance,
4589 struct megasas_cmd *cmd)
4590 {
4591 int i;
4592 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
4593
4594 cmd->cmd_status = ENODATA;
4595
4596 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) |
4597 (cmd->frame_count - 1), instance);
4598
4599 mutex_enter(&instance->int_cmd_mtx);
4600
4601 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
4602 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
4603 }
4604
4605 mutex_exit(&instance->int_cmd_mtx);
4606
4607 if (i < (msecs -1)) {
4608 return (0);
4609 } else {
4610 return (1);
4611 }
4612 }
4613
4614 static int
4615 issue_cmd_in_sync_mode_ppc(struct megasas_instance *instance,
4616 struct megasas_cmd *cmd)
4617 {
4618 int i;
4619 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
4620
4621 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called\n"));
4622
4623 cmd->cmd_status = ENODATA;
4624
4625 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) |
4626 (((cmd->frame_count - 1) << 1) | 1), instance);
4627
4628 mutex_enter(&instance->int_cmd_mtx);
4629
4630 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
4631 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
4632 }
4633
4634 mutex_exit(&instance->int_cmd_mtx);
4635
4636 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done\n"));
4637
4638 if (i < (msecs -1)) {
4639 return (0);
4640 } else {
4641 return (1);
4642 }
4643 }
4644
4645 /*
4646 * issue_cmd_in_poll_mode
4647 */
4648 static int
4649 issue_cmd_in_poll_mode_xscale(struct megasas_instance *instance,
4650 struct megasas_cmd *cmd)
4651 {
4652 int i;
4653 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
4654 struct megasas_header *frame_hdr;
4655
4656 frame_hdr = (struct megasas_header *)cmd->frame;
4657 frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
4658 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
4659
4660 /* issue the frame using inbound queue port */
4661 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) |
4662 (cmd->frame_count - 1), instance);
4663
4664 /* wait for cmd_status to change from 0xFF */
4665 for (i = 0; i < msecs && (frame_hdr->cmd_status ==
4666 MFI_CMD_STATUS_POLL_MODE); i++) {
4667 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
4668 }
4669
4670 if (frame_hdr->cmd_status == MFI_CMD_STATUS_POLL_MODE) {
4671 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
4672 "cmd polling timed out"));
4673 return (DDI_FAILURE);
4674 }
4675
4676 return (DDI_SUCCESS);
4677 }
4678
4679 static int
4680 issue_cmd_in_poll_mode_ppc(struct megasas_instance *instance,
4681 struct megasas_cmd *cmd)
4682 {
4683 int i;
4684 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
4685 struct megasas_header *frame_hdr;
4686
4687 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called\n"));
4688
4689 frame_hdr = (struct megasas_header *)cmd->frame;
4690 frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
4691 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
4692
4693 /* issue the frame using inbound queue port */
4694 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) |
4695 (((cmd->frame_count - 1) << 1) | 1), instance);
4696
4697 /* wait for cmd_status to change from 0xFF */
4698 for (i = 0; i < msecs && (frame_hdr->cmd_status ==
4699 MFI_CMD_STATUS_POLL_MODE); i++) {
4700 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
4701 }
4702
4703 if (frame_hdr->cmd_status == MFI_CMD_STATUS_POLL_MODE) {
4704 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
4705 "cmd polling timed out"));
4706 return (DDI_FAILURE);
4707 }
4708
4709 return (DDI_SUCCESS);
4710 }
4711
4712 static void
4713 enable_intr_xscale(struct megasas_instance *instance)
4714 {
4715 MFI_ENABLE_INTR(instance);
4716 }
4717
4718 static void
4719 enable_intr_ppc(struct megasas_instance *instance)
4720 {
4721 uint32_t mask;
4722
4723 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called\n"));
4724
4725 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
4726 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
4727
4728 /*
4729 * As 1078DE is same as 1078 chip, the interrupt mask
4730 * remains the same.
4731 */
4732 /* WR_OB_INTR_MASK(~0x80000000, instance); */
4733 WR_OB_INTR_MASK(~(MFI_REPLY_1078_MESSAGE_INTR), instance);
4734
4735 /* dummy read to force PCI flush */
4736 mask = RD_OB_INTR_MASK(instance);
4737
4738 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
4739 "outbound_intr_mask = 0x%x\n", mask));
4740 }
4741
4742 static void
4743 disable_intr_xscale(struct megasas_instance *instance)
4744 {
4745 MFI_DISABLE_INTR(instance);
4746 }
4747
4748 static void
4749 disable_intr_ppc(struct megasas_instance *instance)
4750 {
4751 uint32_t mask;
4752
4753 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called\n"));
4754
4755 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
4756 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance)));
4757
4758 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */
4759 WR_OB_INTR_MASK(OB_INTR_MASK, instance);
4760
4761 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
4762 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance)));
4763
4764 /* dummy read to force PCI flush */
4765 mask = RD_OB_INTR_MASK(instance);
4766 #ifdef lint
4767 mask = mask;
4768 #endif
4769 }
4770
4771 static int
4772 intr_ack_xscale(struct megasas_instance *instance)
4773 {
4774 uint32_t status;
4775
4776 /* check if it is our interrupt */
4777 status = RD_OB_INTR_STATUS(instance);
4778
4779 if (!(status & MFI_OB_INTR_STATUS_MASK)) {
4780 return (DDI_INTR_UNCLAIMED);
4781 }
4782
4783 /* clear the interrupt by writing back the same value */
4784 WR_OB_INTR_STATUS(status, instance);
4785
4786 return (DDI_INTR_CLAIMED);
4787 }
4788
4789 static int
4790 intr_ack_ppc(struct megasas_instance *instance)
4791 {
4792 uint32_t status;
4793
4794 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called\n"));
4795
4796 /* check if it is our interrupt */
4797 status = RD_OB_INTR_STATUS(instance);
4798
4799 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x\n", status));
4800
4801 /*
4802 * As 1078DE is same as 1078 chip, the status field
4803 * remains the same.
4804 */
4805 if (!(status & MFI_REPLY_1078_MESSAGE_INTR)) {
4806 return (DDI_INTR_UNCLAIMED);
4807 }
4808
4809 /* clear the interrupt by writing back the same value */
4810 WR_OB_DOORBELL_CLEAR(status, instance);
4811
4812 /* dummy READ */
4813 status = RD_OB_INTR_STATUS(instance);
4814
4815 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared\n"));
4816
4817 return (DDI_INTR_CLAIMED);
4818 }
4819
4820 static int
4821 megasas_common_check(struct megasas_instance *instance,
4822 struct megasas_cmd *cmd)
4823 {
4824 int ret = DDI_SUCCESS;
4825
4826 if (megasas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
4827 DDI_SUCCESS) {
4828 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4829 if (cmd->pkt != NULL) {
4830 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4831 cmd->pkt->pkt_statistics = 0;
4832 }
4833 ret = DDI_FAILURE;
4834 }
4835 if (megasas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
4836 != DDI_SUCCESS) {
4837 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4838 if (cmd->pkt != NULL) {
4839 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4840 cmd->pkt->pkt_statistics = 0;
4841 }
4842 ret = DDI_FAILURE;
4843 }
4844 if (megasas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) !=
4845 DDI_SUCCESS) {
4846 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4847 if (cmd->pkt != NULL) {
4848 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4849 cmd->pkt->pkt_statistics = 0;
4850 }
4851 ret = DDI_FAILURE;
4852 }
4853 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
4854 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4855 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0);
4856 if (cmd->pkt != NULL) {
4857 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4858 cmd->pkt->pkt_statistics = 0;
4859 }
4860 ret = DDI_FAILURE;
4861 }
4862
4863 return (ret);
4864 }
4865
4866 /*ARGSUSED*/
4867 static int
4868 megasas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4869 {
4870 /*
4871 * as the driver can always deal with an error in any dma or
4872 * access handle, we can just return the fme_status value.
4873 */
4874 pci_ereport_post(dip, err, NULL);
4875 return (err->fme_status);
4876 }
4877
4878 static void
4879 megasas_fm_init(struct megasas_instance *instance)
4880 {
4881 /* Need to change iblock to priority for new MSI intr */
4882 ddi_iblock_cookie_t fm_ibc;
4883
4884 /* Only register with IO Fault Services if we have some capability */
4885 if (instance->fm_capabilities) {
4886 /* Adjust access and dma attributes for FMA */
4887 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC;
4888 megasas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
4889
4890 /*
4891 * Register capabilities with IO Fault Services.
4892 * fm_capabilities will be updated to indicate
4893 * capabilities actually supported (not requested.)
4894 */
4895
4896 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc);
4897
4898 /*
4899 * Initialize pci ereport capabilities if ereport
4900 * capable (should always be.)
4901 */
4902
4903 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
4904 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4905 pci_ereport_setup(instance->dip);
4906 }
4907
4908 /*
4909 * Register error callback if error callback capable.
4910 */
4911 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4912 ddi_fm_handler_register(instance->dip,
4913 megasas_fm_error_cb, (void*) instance);
4914 }
4915 } else {
4916 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4917 megasas_generic_dma_attr.dma_attr_flags = 0;
4918 }
4919 }
4920
4921 static void
4922 megasas_fm_fini(struct megasas_instance *instance)
4923 {
4924 /* Only unregister FMA capabilities if registered */
4925 if (instance->fm_capabilities) {
4926 /*
4927 * Un-register error callback if error callback capable.
4928 */
4929 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4930 ddi_fm_handler_unregister(instance->dip);
4931 }
4932
4933 /*
4934 * Release any resources allocated by pci_ereport_setup()
4935 */
4936 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
4937 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4938 pci_ereport_teardown(instance->dip);
4939 }
4940
4941 /* Unregister from IO Fault Services */
4942 ddi_fm_fini(instance->dip);
4943
4944 /* Adjust access and dma attributes for FMA */
4945 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4946 megasas_generic_dma_attr.dma_attr_flags = 0;
4947 }
4948 }
4949
4950 int
4951 megasas_check_acc_handle(ddi_acc_handle_t handle)
4952 {
4953 ddi_fm_error_t de;
4954
4955 if (handle == NULL) {
4956 return (DDI_FAILURE);
4957 }
4958
4959 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
4960
4961 return (de.fme_status);
4962 }
4963
4964 int
4965 megasas_check_dma_handle(ddi_dma_handle_t handle)
4966 {
4967 ddi_fm_error_t de;
4968
4969 if (handle == NULL) {
4970 return (DDI_FAILURE);
4971 }
4972
4973 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
4974
4975 return (de.fme_status);
4976 }
4977
4978 void
4979 megasas_fm_ereport(struct megasas_instance *instance, char *detail)
4980 {
4981 uint64_t ena;
4982 char buf[FM_MAX_CLASS];
4983
4984 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
4985 ena = fm_ena_generate(0, FM_ENA_FMT1);
4986 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) {
4987 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP,
4988 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
4989 }
4990 }