1 /*
2 * mr_sas.c: source for mr_sas driver
3 *
4 * Solaris MegaRAID device driver for SAS2.0 controllers
5 * Copyright (c) 2008-2012, LSI Logic Corporation.
6 * All rights reserved.
7 *
8 * Version:
9 * Author:
10 * Swaminathan K S
11 * Arun Chandrashekhar
12 * Manju R
13 * Rasheed
14 * Shakeel Bukhari
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions are met:
18 *
19 * 1. Redistributions of source code must retain the above copyright notice,
20 * this list of conditions and the following disclaimer.
21 *
22 * 2. Redistributions in binary form must reproduce the above copyright notice,
23 * this list of conditions and the following disclaimer in the documentation
24 * and/or other materials provided with the distribution.
25 *
26 * 3. Neither the name of the author nor the names of its contributors may be
27 * used to endorse or promote products derived from this software without
28 * specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
34 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
37 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
41 * DAMAGE.
42 */
43
44 /*
45 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
46 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
47 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
48 * Copyright 2015 Garrett D'Amore <garrett@damore.org>
49 */
50
51 #include <sys/types.h>
52 #include <sys/param.h>
53 #include <sys/file.h>
54 #include <sys/errno.h>
55 #include <sys/open.h>
56 #include <sys/cred.h>
57 #include <sys/modctl.h>
58 #include <sys/conf.h>
59 #include <sys/devops.h>
60 #include <sys/cmn_err.h>
61 #include <sys/kmem.h>
62 #include <sys/stat.h>
63 #include <sys/mkdev.h>
64 #include <sys/pci.h>
65 #include <sys/scsi/scsi.h>
66 #include <sys/ddi.h>
67 #include <sys/sunddi.h>
68 #include <sys/atomic.h>
69 #include <sys/signal.h>
70 #include <sys/byteorder.h>
71 #include <sys/sdt.h>
72 #include <sys/fs/dv_node.h> /* devfs_clean */
73
74 #include "mr_sas.h"
75
76 /*
77 * FMA header files
78 */
79 #include <sys/ddifm.h>
80 #include <sys/fm/protocol.h>
81 #include <sys/fm/util.h>
82 #include <sys/fm/io/ddi.h>
83
84 /* Macros to help Skinny and stock 2108/MFI live together. */
85 #define WR_IB_PICK_QPORT(addr, instance) \
86 if ((instance)->skinny) { \
87 WR_IB_LOW_QPORT((addr), (instance)); \
88 WR_IB_HIGH_QPORT(0, (instance)); \
89 } else { \
90 WR_IB_QPORT((addr), (instance)); \
91 }
92
93 /*
94 * Local static data
95 */
96 static void *mrsas_state = NULL;
97 static volatile boolean_t mrsas_relaxed_ordering = B_TRUE;
98 volatile int debug_level_g = CL_NONE;
99 static volatile int msi_enable = 1;
100 static volatile int ctio_enable = 1;
101
102 /* Default Timeout value to issue online controller reset */
103 volatile int debug_timeout_g = 0xF0; /* 0xB4; */
104 /* Simulate consecutive firmware fault */
105 static volatile int debug_fw_faults_after_ocr_g = 0;
106 #ifdef OCRDEBUG
107 /* Simulate three consecutive timeout for an IO */
108 static volatile int debug_consecutive_timeout_after_ocr_g = 0;
109 #endif
110
111 #pragma weak scsi_hba_open
112 #pragma weak scsi_hba_close
113 #pragma weak scsi_hba_ioctl
114
115 /* Local static prototypes. */
116 static int mrsas_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
117 static int mrsas_attach(dev_info_t *, ddi_attach_cmd_t);
118 #ifdef __sparc
119 static int mrsas_reset(dev_info_t *, ddi_reset_cmd_t);
120 #else
121 static int mrsas_quiesce(dev_info_t *);
122 #endif
123 static int mrsas_detach(dev_info_t *, ddi_detach_cmd_t);
124 static int mrsas_open(dev_t *, int, int, cred_t *);
125 static int mrsas_close(dev_t, int, int, cred_t *);
126 static int mrsas_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
127
128 static int mrsas_tran_tgt_init(dev_info_t *, dev_info_t *,
129 scsi_hba_tran_t *, struct scsi_device *);
130 static struct scsi_pkt *mrsas_tran_init_pkt(struct scsi_address *, register
131 struct scsi_pkt *, struct buf *, int, int, int, int,
132 int (*)(), caddr_t);
133 static int mrsas_tran_start(struct scsi_address *,
134 register struct scsi_pkt *);
135 static int mrsas_tran_abort(struct scsi_address *, struct scsi_pkt *);
136 static int mrsas_tran_reset(struct scsi_address *, int);
137 static int mrsas_tran_getcap(struct scsi_address *, char *, int);
138 static int mrsas_tran_setcap(struct scsi_address *, char *, int, int);
139 static void mrsas_tran_destroy_pkt(struct scsi_address *,
140 struct scsi_pkt *);
141 static void mrsas_tran_dmafree(struct scsi_address *, struct scsi_pkt *);
142 static void mrsas_tran_sync_pkt(struct scsi_address *, struct scsi_pkt *);
143 static int mrsas_tran_quiesce(dev_info_t *dip);
144 static int mrsas_tran_unquiesce(dev_info_t *dip);
145 static uint_t mrsas_isr();
146 static uint_t mrsas_softintr();
147 static void mrsas_undo_resources(dev_info_t *, struct mrsas_instance *);
148
149 static void free_space_for_mfi(struct mrsas_instance *);
150 static uint32_t read_fw_status_reg_ppc(struct mrsas_instance *);
151 static void issue_cmd_ppc(struct mrsas_cmd *, struct mrsas_instance *);
152 static int issue_cmd_in_poll_mode_ppc(struct mrsas_instance *,
153 struct mrsas_cmd *);
154 static int issue_cmd_in_sync_mode_ppc(struct mrsas_instance *,
155 struct mrsas_cmd *);
156 static void enable_intr_ppc(struct mrsas_instance *);
157 static void disable_intr_ppc(struct mrsas_instance *);
158 static int intr_ack_ppc(struct mrsas_instance *);
159 static void flush_cache(struct mrsas_instance *instance);
160 void display_scsi_inquiry(caddr_t);
161 static int start_mfi_aen(struct mrsas_instance *instance);
162 static int handle_drv_ioctl(struct mrsas_instance *instance,
163 struct mrsas_ioctl *ioctl, int mode);
164 static int handle_mfi_ioctl(struct mrsas_instance *instance,
165 struct mrsas_ioctl *ioctl, int mode);
166 static int handle_mfi_aen(struct mrsas_instance *instance,
167 struct mrsas_aen *aen);
168 static struct mrsas_cmd *build_cmd(struct mrsas_instance *,
169 struct scsi_address *, struct scsi_pkt *, uchar_t *);
170 static int alloc_additional_dma_buffer(struct mrsas_instance *);
171 static void complete_cmd_in_sync_mode(struct mrsas_instance *,
172 struct mrsas_cmd *);
173 static int mrsas_kill_adapter(struct mrsas_instance *);
174 static int mrsas_issue_init_mfi(struct mrsas_instance *);
175 static int mrsas_reset_ppc(struct mrsas_instance *);
176 static uint32_t mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *);
177 static int wait_for_outstanding(struct mrsas_instance *instance);
178 static int register_mfi_aen(struct mrsas_instance *instance,
179 uint32_t seq_num, uint32_t class_locale_word);
180 static int issue_mfi_pthru(struct mrsas_instance *instance, struct
181 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
182 static int issue_mfi_dcmd(struct mrsas_instance *instance, struct
183 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
184 static int issue_mfi_smp(struct mrsas_instance *instance, struct
185 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
186 static int issue_mfi_stp(struct mrsas_instance *instance, struct
187 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
188 static int abort_aen_cmd(struct mrsas_instance *instance,
189 struct mrsas_cmd *cmd_to_abort);
190
191 static void mrsas_rem_intrs(struct mrsas_instance *instance);
192 static int mrsas_add_intrs(struct mrsas_instance *instance, int intr_type);
193
194 static void mrsas_tran_tgt_free(dev_info_t *, dev_info_t *,
195 scsi_hba_tran_t *, struct scsi_device *);
196 static int mrsas_tran_bus_config(dev_info_t *, uint_t,
197 ddi_bus_config_op_t, void *, dev_info_t **);
198 static int mrsas_parse_devname(char *, int *, int *);
199 static int mrsas_config_all_devices(struct mrsas_instance *);
200 static int mrsas_config_ld(struct mrsas_instance *, uint16_t,
201 uint8_t, dev_info_t **);
202 static int mrsas_name_node(dev_info_t *, char *, int);
203 static void mrsas_issue_evt_taskq(struct mrsas_eventinfo *);
204 static void free_additional_dma_buffer(struct mrsas_instance *);
205 static void io_timeout_checker(void *);
206 static void mrsas_fm_init(struct mrsas_instance *);
207 static void mrsas_fm_fini(struct mrsas_instance *);
208
209 static struct mrsas_function_template mrsas_function_template_ppc = {
210 .read_fw_status_reg = read_fw_status_reg_ppc,
211 .issue_cmd = issue_cmd_ppc,
212 .issue_cmd_in_sync_mode = issue_cmd_in_sync_mode_ppc,
213 .issue_cmd_in_poll_mode = issue_cmd_in_poll_mode_ppc,
214 .enable_intr = enable_intr_ppc,
215 .disable_intr = disable_intr_ppc,
216 .intr_ack = intr_ack_ppc,
217 .init_adapter = mrsas_init_adapter_ppc
218 };
219
220
221 static struct mrsas_function_template mrsas_function_template_fusion = {
222 .read_fw_status_reg = tbolt_read_fw_status_reg,
223 .issue_cmd = tbolt_issue_cmd,
224 .issue_cmd_in_sync_mode = tbolt_issue_cmd_in_sync_mode,
225 .issue_cmd_in_poll_mode = tbolt_issue_cmd_in_poll_mode,
226 .enable_intr = tbolt_enable_intr,
227 .disable_intr = tbolt_disable_intr,
228 .intr_ack = tbolt_intr_ack,
229 .init_adapter = mrsas_init_adapter_tbolt
230 };
231
232
233 ddi_dma_attr_t mrsas_generic_dma_attr = {
234 DMA_ATTR_V0, /* dma_attr_version */
235 0, /* low DMA address range */
236 0xFFFFFFFFU, /* high DMA address range */
237 0xFFFFFFFFU, /* DMA counter register */
238 8, /* DMA address alignment */
239 0x07, /* DMA burstsizes */
240 1, /* min DMA size */
241 0xFFFFFFFFU, /* max DMA size */
242 0xFFFFFFFFU, /* segment boundary */
243 MRSAS_MAX_SGE_CNT, /* dma_attr_sglen */
244 512, /* granularity of device */
245 0 /* bus specific DMA flags */
246 };
247
248 int32_t mrsas_max_cap_maxxfer = 0x1000000;
249
250 /*
251 * Fix for: Thunderbolt controller IO timeout when IO write size is 1MEG,
252 * Limit size to 256K
253 */
254 uint32_t mrsas_tbolt_max_cap_maxxfer = (512 * 512);
255
256 /*
257 * cb_ops contains base level routines
258 */
259 static struct cb_ops mrsas_cb_ops = {
260 mrsas_open, /* open */
261 mrsas_close, /* close */
262 nodev, /* strategy */
263 nodev, /* print */
264 nodev, /* dump */
265 nodev, /* read */
266 nodev, /* write */
267 mrsas_ioctl, /* ioctl */
268 nodev, /* devmap */
269 nodev, /* mmap */
270 nodev, /* segmap */
271 nochpoll, /* poll */
272 nodev, /* cb_prop_op */
273 0, /* streamtab */
274 D_NEW | D_HOTPLUG, /* cb_flag */
275 CB_REV, /* cb_rev */
276 nodev, /* cb_aread */
277 nodev /* cb_awrite */
278 };
279
280 /*
281 * dev_ops contains configuration routines
282 */
283 static struct dev_ops mrsas_ops = {
284 DEVO_REV, /* rev, */
285 0, /* refcnt */
286 mrsas_getinfo, /* getinfo */
287 nulldev, /* identify */
288 nulldev, /* probe */
289 mrsas_attach, /* attach */
290 mrsas_detach, /* detach */
291 #ifdef __sparc
292 mrsas_reset, /* reset */
293 #else /* __sparc */
294 nodev,
295 #endif /* __sparc */
296 &mrsas_cb_ops, /* char/block ops */
297 NULL, /* bus ops */
298 NULL, /* power */
299 #ifdef __sparc
300 ddi_quiesce_not_needed
301 #else /* __sparc */
302 mrsas_quiesce /* quiesce */
303 #endif /* __sparc */
304 };
305
306 static struct modldrv modldrv = {
307 &mod_driverops, /* module type - driver */
308 MRSAS_VERSION,
309 &mrsas_ops, /* driver ops */
310 };
311
312 static struct modlinkage modlinkage = {
313 MODREV_1, /* ml_rev - must be MODREV_1 */
314 &modldrv, /* ml_linkage */
315 NULL /* end of driver linkage */
316 };
317
318 static struct ddi_device_acc_attr endian_attr = {
319 DDI_DEVICE_ATTR_V1,
320 DDI_STRUCTURE_LE_ACC,
321 DDI_STRICTORDER_ACC,
322 DDI_DEFAULT_ACC
323 };
324
325 /* Use the LSI Fast Path for the 2208 (tbolt) commands. */
326 unsigned int enable_fp = 1;
327
328
329 /*
330 * ************************************************************************** *
331 * *
332 * common entry points - for loadable kernel modules *
333 * *
334 * ************************************************************************** *
335 */
336
337 /*
338 * _init - initialize a loadable module
339 * @void
340 *
341 * The driver should perform any one-time resource allocation or data
342 * initialization during driver loading in _init(). For example, the driver
343 * should initialize any mutexes global to the driver in this routine.
344 * The driver should not, however, use _init() to allocate or initialize
345 * anything that has to do with a particular instance of the device.
346 * Per-instance initialization must be done in attach().
347 */
348 int
349 _init(void)
350 {
351 int ret;
352
353 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
354
355 ret = ddi_soft_state_init(&mrsas_state,
356 sizeof (struct mrsas_instance), 0);
357
358 if (ret != DDI_SUCCESS) {
359 cmn_err(CE_WARN, "mr_sas: could not init state");
360 return (ret);
361 }
362
363 if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) {
364 cmn_err(CE_WARN, "mr_sas: could not init scsi hba");
365 ddi_soft_state_fini(&mrsas_state);
366 return (ret);
367 }
368
369 ret = mod_install(&modlinkage);
370
371 if (ret != DDI_SUCCESS) {
372 cmn_err(CE_WARN, "mr_sas: mod_install failed");
373 scsi_hba_fini(&modlinkage);
374 ddi_soft_state_fini(&mrsas_state);
375 }
376
377 return (ret);
378 }
379
380 /*
381 * _info - returns information about a loadable module.
382 * @void
383 *
384 * _info() is called to return module information. This is a typical entry
385 * point that does predefined role. It simply calls mod_info().
386 */
387 int
388 _info(struct modinfo *modinfop)
389 {
390 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
391
392 return (mod_info(&modlinkage, modinfop));
393 }
394
395 /*
396 * _fini - prepare a loadable module for unloading
397 * @void
398 *
399 * In _fini(), the driver should release any resources that were allocated in
400 * _init(). The driver must remove itself from the system module list.
401 */
402 int
403 _fini(void)
404 {
405 int ret;
406
407 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
408
409 if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS) {
410 con_log(CL_ANN1,
411 (CE_WARN, "_fini: mod_remove() failed, error 0x%X", ret));
412 return (ret);
413 }
414
415 scsi_hba_fini(&modlinkage);
416 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: scsi_hba_fini() done."));
417
418 ddi_soft_state_fini(&mrsas_state);
419 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: ddi_soft_state_fini() done."));
420
421 return (ret);
422 }
423
424
425 /*
426 * ************************************************************************** *
427 * *
428 * common entry points - for autoconfiguration *
429 * *
430 * ************************************************************************** *
431 */
432 /*
433 * attach - adds a device to the system as part of initialization
434 * @dip:
435 * @cmd:
436 *
437 * The kernel calls a driver's attach() entry point to attach an instance of
438 * a device (for MegaRAID, it is instance of a controller) or to resume
439 * operation for an instance of a device that has been suspended or has been
440 * shut down by the power management framework
441 * The attach() entry point typically includes the following types of
442 * processing:
443 * - allocate a soft-state structure for the device instance (for MegaRAID,
444 * controller instance)
445 * - initialize per-instance mutexes
446 * - initialize condition variables
447 * - register the device's interrupts (for MegaRAID, controller's interrupts)
448 * - map the registers and memory of the device instance (for MegaRAID,
449 * controller instance)
450 * - create minor device nodes for the device instance (for MegaRAID,
451 * controller instance)
452 * - report that the device instance (for MegaRAID, controller instance) has
453 * attached
454 */
455 static int
456 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
457 {
458 int instance_no;
459 int nregs;
460 int i = 0;
461 uint8_t irq;
462 uint16_t vendor_id;
463 uint16_t device_id;
464 uint16_t subsysvid;
465 uint16_t subsysid;
466 uint16_t command;
467 off_t reglength = 0;
468 int intr_types = 0;
469 char *data;
470
471 scsi_hba_tran_t *tran;
472 ddi_dma_attr_t tran_dma_attr;
473 struct mrsas_instance *instance;
474
475 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
476
477 /* CONSTCOND */
478 ASSERT(NO_COMPETING_THREADS);
479
480 instance_no = ddi_get_instance(dip);
481
482 /*
483 * check to see whether this device is in a DMA-capable slot.
484 */
485 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
486 cmn_err(CE_WARN,
487 "mr_sas%d: Device in slave-only slot, unused",
488 instance_no);
489 return (DDI_FAILURE);
490 }
491
492 switch (cmd) {
493 case DDI_ATTACH:
494 /* allocate the soft state for the instance */
495 if (ddi_soft_state_zalloc(mrsas_state, instance_no)
496 != DDI_SUCCESS) {
497 cmn_err(CE_WARN,
498 "mr_sas%d: Failed to allocate soft state",
499 instance_no);
500 return (DDI_FAILURE);
501 }
502
503 instance = (struct mrsas_instance *)ddi_get_soft_state
504 (mrsas_state, instance_no);
505
506 if (instance == NULL) {
507 cmn_err(CE_WARN,
508 "mr_sas%d: Bad soft state", instance_no);
509 ddi_soft_state_free(mrsas_state, instance_no);
510 return (DDI_FAILURE);
511 }
512
513 instance->unroll.softs = 1;
514
515 /* Setup the PCI configuration space handles */
516 if (pci_config_setup(dip, &instance->pci_handle) !=
517 DDI_SUCCESS) {
518 cmn_err(CE_WARN,
519 "mr_sas%d: pci config setup failed ",
520 instance_no);
521
522 ddi_soft_state_free(mrsas_state, instance_no);
523 return (DDI_FAILURE);
524 }
525
526 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
527 cmn_err(CE_WARN,
528 "mr_sas: failed to get registers.");
529
530 pci_config_teardown(&instance->pci_handle);
531 ddi_soft_state_free(mrsas_state, instance_no);
532 return (DDI_FAILURE);
533 }
534
535 vendor_id = pci_config_get16(instance->pci_handle,
536 PCI_CONF_VENID);
537 device_id = pci_config_get16(instance->pci_handle,
538 PCI_CONF_DEVID);
539
540 subsysvid = pci_config_get16(instance->pci_handle,
541 PCI_CONF_SUBVENID);
542 subsysid = pci_config_get16(instance->pci_handle,
543 PCI_CONF_SUBSYSID);
544
545 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
546 (pci_config_get16(instance->pci_handle,
547 PCI_CONF_COMM) | PCI_COMM_ME));
548 irq = pci_config_get8(instance->pci_handle,
549 PCI_CONF_ILINE);
550
551 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
552 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s",
553 instance_no, vendor_id, device_id, subsysvid,
554 subsysid, irq, MRSAS_VERSION));
555
556 /* enable bus-mastering */
557 command = pci_config_get16(instance->pci_handle,
558 PCI_CONF_COMM);
559
560 if (!(command & PCI_COMM_ME)) {
561 command |= PCI_COMM_ME;
562
563 pci_config_put16(instance->pci_handle,
564 PCI_CONF_COMM, command);
565
566 con_log(CL_ANN, (CE_CONT, "mr_sas%d: "
567 "enable bus-mastering", instance_no));
568 } else {
569 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
570 "bus-mastering already set", instance_no));
571 }
572
573 /* initialize function pointers */
574 switch (device_id) {
575 case PCI_DEVICE_ID_LSI_TBOLT:
576 case PCI_DEVICE_ID_LSI_INVADER:
577 case PCI_DEVICE_ID_LSI_FURY:
578 con_log(CL_ANN, (CE_NOTE,
579 "mr_sas: 2208 T.B. device detected"));
580
581 instance->func_ptr =
582 &mrsas_function_template_fusion;
583 instance->tbolt = 1;
584 break;
585
586 case PCI_DEVICE_ID_LSI_SKINNY:
587 case PCI_DEVICE_ID_LSI_SKINNY_NEW:
588 /*
589 * FALLTHRU to PPC-style functions, but mark this
590 * instance as Skinny, because the register set is
591 * slightly different (See WR_IB_PICK_QPORT), and
592 * certain other features are available to a Skinny
593 * HBA.
594 */
595 instance->skinny = 1;
596 /* FALLTHRU */
597
598 case PCI_DEVICE_ID_LSI_2108VDE:
599 case PCI_DEVICE_ID_LSI_2108V:
600 con_log(CL_ANN, (CE_NOTE,
601 "mr_sas: 2108 Liberator device detected"));
602
603 instance->func_ptr =
604 &mrsas_function_template_ppc;
605 break;
606
607 default:
608 cmn_err(CE_WARN,
609 "mr_sas: Invalid device detected");
610
611 pci_config_teardown(&instance->pci_handle);
612 ddi_soft_state_free(mrsas_state, instance_no);
613 return (DDI_FAILURE);
614 }
615
616 instance->baseaddress = pci_config_get32(
617 instance->pci_handle, PCI_CONF_BASE0);
618 instance->baseaddress &= 0x0fffc;
619
620 instance->dip = dip;
621 instance->vendor_id = vendor_id;
622 instance->device_id = device_id;
623 instance->subsysvid = subsysvid;
624 instance->subsysid = subsysid;
625 instance->instance = instance_no;
626
627 /* Initialize FMA */
628 instance->fm_capabilities = ddi_prop_get_int(
629 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS,
630 "fm-capable", DDI_FM_EREPORT_CAPABLE |
631 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE
632 | DDI_FM_ERRCB_CAPABLE);
633
634 mrsas_fm_init(instance);
635
636 /* Setup register map */
637 if ((ddi_dev_regsize(instance->dip,
638 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) ||
639 reglength < MINIMUM_MFI_MEM_SZ) {
640 goto fail_attach;
641 }
642 if (reglength > DEFAULT_MFI_MEM_SZ) {
643 reglength = DEFAULT_MFI_MEM_SZ;
644 con_log(CL_DLEVEL1, (CE_NOTE,
645 "mr_sas: register length to map is 0x%lx bytes",
646 reglength));
647 }
648 if (ddi_regs_map_setup(instance->dip,
649 REGISTER_SET_IO_2108, &instance->regmap, 0,
650 reglength, &endian_attr, &instance->regmap_handle)
651 != DDI_SUCCESS) {
652 cmn_err(CE_WARN,
653 "mr_sas: couldn't map control registers");
654 goto fail_attach;
655 }
656
657 instance->unroll.regs = 1;
658
659 /*
660 * Disable Interrupt Now.
661 * Setup Software interrupt
662 */
663 instance->func_ptr->disable_intr(instance);
664
665 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
666 "mrsas-enable-msi", &data) == DDI_SUCCESS) {
667 if (strncmp(data, "no", 3) == 0) {
668 msi_enable = 0;
669 con_log(CL_ANN1, (CE_WARN,
670 "msi_enable = %d disabled", msi_enable));
671 }
672 ddi_prop_free(data);
673 }
674
675 con_log(CL_DLEVEL1, (CE_NOTE, "msi_enable = %d", msi_enable));
676
677 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
678 "mrsas-enable-fp", &data) == DDI_SUCCESS) {
679 if (strncmp(data, "no", 3) == 0) {
680 enable_fp = 0;
681 cmn_err(CE_NOTE,
682 "enable_fp = %d, Fast-Path disabled.\n",
683 enable_fp);
684 }
685
686 ddi_prop_free(data);
687 }
688
689 con_log(CL_DLEVEL1, (CE_NOTE, "enable_fp = %d\n", enable_fp));
690
691 /* Check for all supported interrupt types */
692 if (ddi_intr_get_supported_types(
693 dip, &intr_types) != DDI_SUCCESS) {
694 cmn_err(CE_WARN,
695 "ddi_intr_get_supported_types() failed");
696 goto fail_attach;
697 }
698
699 con_log(CL_DLEVEL1, (CE_NOTE,
700 "ddi_intr_get_supported_types() ret: 0x%x", intr_types));
701
702 /* Initialize and Setup Interrupt handler */
703 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) {
704 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSIX) !=
705 DDI_SUCCESS) {
706 cmn_err(CE_WARN,
707 "MSIX interrupt query failed");
708 goto fail_attach;
709 }
710 instance->intr_type = DDI_INTR_TYPE_MSIX;
711 } else if (msi_enable && (intr_types & DDI_INTR_TYPE_MSI)) {
712 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSI) !=
713 DDI_SUCCESS) {
714 cmn_err(CE_WARN,
715 "MSI interrupt query failed");
716 goto fail_attach;
717 }
718 instance->intr_type = DDI_INTR_TYPE_MSI;
719 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
720 msi_enable = 0;
721 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_FIXED) !=
722 DDI_SUCCESS) {
723 cmn_err(CE_WARN,
724 "FIXED interrupt query failed");
725 goto fail_attach;
726 }
727 instance->intr_type = DDI_INTR_TYPE_FIXED;
728 } else {
729 cmn_err(CE_WARN, "Device cannot "
730 "suppport either FIXED or MSI/X "
731 "interrupts");
732 goto fail_attach;
733 }
734
735 instance->unroll.intr = 1;
736
737 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
738 "mrsas-enable-ctio", &data) == DDI_SUCCESS) {
739 if (strncmp(data, "no", 3) == 0) {
740 ctio_enable = 0;
741 con_log(CL_ANN1, (CE_WARN,
742 "ctio_enable = %d disabled", ctio_enable));
743 }
744 ddi_prop_free(data);
745 }
746
747 con_log(CL_DLEVEL1, (CE_WARN, "ctio_enable = %d", ctio_enable));
748
749 /* setup the mfi based low level driver */
750 if (mrsas_init_adapter(instance) != DDI_SUCCESS) {
751 cmn_err(CE_WARN, "mr_sas: "
752 "could not initialize the low level driver");
753
754 goto fail_attach;
755 }
756
757 /* Initialize all Mutex */
758 INIT_LIST_HEAD(&instance->completed_pool_list);
759 mutex_init(&instance->completed_pool_mtx, NULL,
760 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
761
762 mutex_init(&instance->sync_map_mtx, NULL,
763 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
764
765 mutex_init(&instance->app_cmd_pool_mtx, NULL,
766 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
767
768 mutex_init(&instance->config_dev_mtx, NULL,
769 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
770
771 mutex_init(&instance->cmd_pend_mtx, NULL,
772 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
773
774 mutex_init(&instance->ocr_flags_mtx, NULL,
775 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
776
777 mutex_init(&instance->int_cmd_mtx, NULL,
778 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
779 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
780
781 mutex_init(&instance->cmd_pool_mtx, NULL,
782 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
783
784 mutex_init(&instance->reg_write_mtx, NULL,
785 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
786
787 if (instance->tbolt) {
788 mutex_init(&instance->cmd_app_pool_mtx, NULL,
789 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
790
791 mutex_init(&instance->chip_mtx, NULL,
792 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
793
794 }
795
796 instance->unroll.mutexs = 1;
797
798 instance->timeout_id = (timeout_id_t)-1;
799
800 /* Register our soft-isr for highlevel interrupts. */
801 instance->isr_level = instance->intr_pri;
802 if (!(instance->tbolt)) {
803 if (instance->isr_level == HIGH_LEVEL_INTR) {
804 if (ddi_add_softintr(dip,
805 DDI_SOFTINT_HIGH,
806 &instance->soft_intr_id, NULL, NULL,
807 mrsas_softintr, (caddr_t)instance) !=
808 DDI_SUCCESS) {
809 cmn_err(CE_WARN,
810 "Software ISR did not register");
811
812 goto fail_attach;
813 }
814
815 instance->unroll.soft_isr = 1;
816
817 }
818 }
819
820 instance->softint_running = 0;
821
822 /* Allocate a transport structure */
823 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
824
825 if (tran == NULL) {
826 cmn_err(CE_WARN,
827 "scsi_hba_tran_alloc failed");
828 goto fail_attach;
829 }
830
831 instance->tran = tran;
832 instance->unroll.tran = 1;
833
834 tran->tran_hba_private = instance;
835 tran->tran_tgt_init = mrsas_tran_tgt_init;
836 tran->tran_tgt_probe = scsi_hba_probe;
837 tran->tran_tgt_free = mrsas_tran_tgt_free;
838 tran->tran_init_pkt = mrsas_tran_init_pkt;
839 if (instance->tbolt)
840 tran->tran_start = mrsas_tbolt_tran_start;
841 else
842 tran->tran_start = mrsas_tran_start;
843 tran->tran_abort = mrsas_tran_abort;
844 tran->tran_reset = mrsas_tran_reset;
845 tran->tran_getcap = mrsas_tran_getcap;
846 tran->tran_setcap = mrsas_tran_setcap;
847 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt;
848 tran->tran_dmafree = mrsas_tran_dmafree;
849 tran->tran_sync_pkt = mrsas_tran_sync_pkt;
850 tran->tran_quiesce = mrsas_tran_quiesce;
851 tran->tran_unquiesce = mrsas_tran_unquiesce;
852 tran->tran_bus_config = mrsas_tran_bus_config;
853
854 if (mrsas_relaxed_ordering)
855 mrsas_generic_dma_attr.dma_attr_flags |=
856 DDI_DMA_RELAXED_ORDERING;
857
858
859 tran_dma_attr = mrsas_generic_dma_attr;
860 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
861
862 /* Attach this instance of the hba */
863 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
864 != DDI_SUCCESS) {
865 cmn_err(CE_WARN,
866 "scsi_hba_attach failed");
867
868 goto fail_attach;
869 }
870 instance->unroll.tranSetup = 1;
871 con_log(CL_ANN1,
872 (CE_CONT, "scsi_hba_attach_setup() done."));
873
874 /* create devctl node for cfgadm command */
875 if (ddi_create_minor_node(dip, "devctl",
876 S_IFCHR, INST2DEVCTL(instance_no),
877 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
878 cmn_err(CE_WARN,
879 "mr_sas: failed to create devctl node.");
880
881 goto fail_attach;
882 }
883
884 instance->unroll.devctl = 1;
885
886 /* create scsi node for cfgadm command */
887 if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
888 INST2SCSI(instance_no), DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
889 DDI_FAILURE) {
890 cmn_err(CE_WARN,
891 "mr_sas: failed to create scsi node.");
892
893 goto fail_attach;
894 }
895
896 instance->unroll.scsictl = 1;
897
898 (void) sprintf(instance->iocnode, "%d:lsirdctl",
899 instance_no);
900
901 /*
902 * Create a node for applications
903 * for issuing ioctl to the driver.
904 */
905 if (ddi_create_minor_node(dip, instance->iocnode,
906 S_IFCHR, INST2LSIRDCTL(instance_no), DDI_PSEUDO, 0) ==
907 DDI_FAILURE) {
908 cmn_err(CE_WARN,
909 "mr_sas: failed to create ioctl node.");
910
911 goto fail_attach;
912 }
913
914 instance->unroll.ioctl = 1;
915
916 /* Create a taskq to handle dr events */
917 if ((instance->taskq = ddi_taskq_create(dip,
918 "mrsas_dr_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
919 cmn_err(CE_WARN,
920 "mr_sas: failed to create taskq ");
921 instance->taskq = NULL;
922 goto fail_attach;
923 }
924 instance->unroll.taskq = 1;
925 con_log(CL_ANN1, (CE_CONT, "ddi_taskq_create() done."));
926
927 /* enable interrupt */
928 instance->func_ptr->enable_intr(instance);
929
930 /* initiate AEN */
931 if (start_mfi_aen(instance)) {
932 cmn_err(CE_WARN,
933 "mr_sas: failed to initiate AEN.");
934 goto fail_attach;
935 }
936 instance->unroll.aenPend = 1;
937 con_log(CL_ANN1,
938 (CE_CONT, "AEN started for instance %d.", instance_no));
939
940 /* Finally! We are on the air. */
941 ddi_report_dev(dip);
942
943 /* FMA handle checking. */
944 if (mrsas_check_acc_handle(instance->regmap_handle) !=
945 DDI_SUCCESS) {
946 goto fail_attach;
947 }
948 if (mrsas_check_acc_handle(instance->pci_handle) !=
949 DDI_SUCCESS) {
950 goto fail_attach;
951 }
952
953 instance->mr_ld_list =
954 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld),
955 KM_SLEEP);
956 instance->unroll.ldlist_buff = 1;
957
958 #ifdef PDSUPPORT
959 if (instance->tbolt || instance->skinny) {
960 instance->mr_tbolt_pd_max = MRSAS_TBOLT_PD_TGT_MAX;
961 instance->mr_tbolt_pd_list =
962 kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance) *
963 sizeof (struct mrsas_tbolt_pd), KM_SLEEP);
964 ASSERT(instance->mr_tbolt_pd_list);
965 for (i = 0; i < instance->mr_tbolt_pd_max; i++) {
966 instance->mr_tbolt_pd_list[i].lun_type =
967 MRSAS_TBOLT_PD_LUN;
968 instance->mr_tbolt_pd_list[i].dev_id =
969 (uint8_t)i;
970 }
971
972 instance->unroll.pdlist_buff = 1;
973 }
974 #endif
975 break;
976 case DDI_PM_RESUME:
977 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_PM_RESUME"));
978 break;
979 case DDI_RESUME:
980 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_RESUME"));
981 break;
982 default:
983 con_log(CL_ANN,
984 (CE_WARN, "mr_sas: invalid attach cmd=%x", cmd));
985 return (DDI_FAILURE);
986 }
987
988
989 con_log(CL_DLEVEL1,
990 (CE_NOTE, "mrsas_attach() return SUCCESS instance_num %d",
991 instance_no));
992 return (DDI_SUCCESS);
993
994 fail_attach:
995
996 mrsas_undo_resources(dip, instance);
997
998 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
999 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
1000
1001 mrsas_fm_fini(instance);
1002
1003 pci_config_teardown(&instance->pci_handle);
1004 ddi_soft_state_free(mrsas_state, instance_no);
1005
1006 con_log(CL_ANN, (CE_WARN, "mr_sas: return failure from mrsas_attach"));
1007
1008 cmn_err(CE_WARN, "mrsas_attach() return FAILURE instance_num %d",
1009 instance_no);
1010
1011 return (DDI_FAILURE);
1012 }
1013
1014 /*
1015 * getinfo - gets device information
1016 * @dip:
1017 * @cmd:
1018 * @arg:
1019 * @resultp:
1020 *
1021 * The system calls getinfo() to obtain configuration information that only
1022 * the driver knows. The mapping of minor numbers to device instance is
1023 * entirely under the control of the driver. The system sometimes needs to ask
1024 * the driver which device a particular dev_t represents.
1025 * Given the device number return the devinfo pointer from the scsi_device
1026 * structure.
1027 */
1028 /*ARGSUSED*/
1029 static int
1030 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
1031 {
1032 int rval;
1033 int mrsas_minor = getminor((dev_t)arg);
1034
1035 struct mrsas_instance *instance;
1036
1037 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1038
1039 switch (cmd) {
1040 case DDI_INFO_DEVT2DEVINFO:
1041 instance = (struct mrsas_instance *)
1042 ddi_get_soft_state(mrsas_state,
1043 MINOR2INST(mrsas_minor));
1044
1045 if (instance == NULL) {
1046 *resultp = NULL;
1047 rval = DDI_FAILURE;
1048 } else {
1049 *resultp = instance->dip;
1050 rval = DDI_SUCCESS;
1051 }
1052 break;
1053 case DDI_INFO_DEVT2INSTANCE:
1054 *resultp = (void *)(intptr_t)
1055 (MINOR2INST(getminor((dev_t)arg)));
1056 rval = DDI_SUCCESS;
1057 break;
1058 default:
1059 *resultp = NULL;
1060 rval = DDI_FAILURE;
1061 }
1062
1063 return (rval);
1064 }
1065
1066 /*
1067 * detach - detaches a device from the system
1068 * @dip: pointer to the device's dev_info structure
1069 * @cmd: type of detach
1070 *
1071 * A driver's detach() entry point is called to detach an instance of a device
1072 * that is bound to the driver. The entry point is called with the instance of
1073 * the device node to be detached and with DDI_DETACH, which is specified as
1074 * the cmd argument to the entry point.
1075 * This routine is called during driver unload. We free all the allocated
1076 * resources and call the corresponding LLD so that it can also release all
1077 * its resources.
1078 */
1079 static int
1080 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1081 {
1082 int instance_no;
1083
1084 struct mrsas_instance *instance;
1085
1086 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1087
1088
1089 /* CONSTCOND */
1090 ASSERT(NO_COMPETING_THREADS);
1091
1092 instance_no = ddi_get_instance(dip);
1093
1094 instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state,
1095 instance_no);
1096
1097 if (!instance) {
1098 cmn_err(CE_WARN,
1099 "mr_sas:%d could not get instance in detach",
1100 instance_no);
1101
1102 return (DDI_FAILURE);
1103 }
1104
1105 con_log(CL_ANN, (CE_NOTE,
1106 "mr_sas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x",
1107 instance_no, instance->vendor_id, instance->device_id,
1108 instance->subsysvid, instance->subsysid));
1109
1110 switch (cmd) {
1111 case DDI_DETACH:
1112 con_log(CL_ANN, (CE_NOTE,
1113 "mrsas_detach: DDI_DETACH"));
1114
1115 mutex_enter(&instance->config_dev_mtx);
1116 if (instance->timeout_id != (timeout_id_t)-1) {
1117 mutex_exit(&instance->config_dev_mtx);
1118 (void) untimeout(instance->timeout_id);
1119 instance->timeout_id = (timeout_id_t)-1;
1120 mutex_enter(&instance->config_dev_mtx);
1121 instance->unroll.timer = 0;
1122 }
1123 mutex_exit(&instance->config_dev_mtx);
1124
1125 if (instance->unroll.tranSetup == 1) {
1126 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1127 cmn_err(CE_WARN,
1128 "mr_sas2%d: failed to detach",
1129 instance_no);
1130 return (DDI_FAILURE);
1131 }
1132 instance->unroll.tranSetup = 0;
1133 con_log(CL_ANN1,
1134 (CE_CONT, "scsi_hba_dettach() done."));
1135 }
1136
1137 flush_cache(instance);
1138
1139 mrsas_undo_resources(dip, instance);
1140
1141 mrsas_fm_fini(instance);
1142
1143 pci_config_teardown(&instance->pci_handle);
1144 ddi_soft_state_free(mrsas_state, instance_no);
1145 break;
1146
1147 case DDI_PM_SUSPEND:
1148 con_log(CL_ANN, (CE_NOTE,
1149 "mrsas_detach: DDI_PM_SUSPEND"));
1150
1151 break;
1152 case DDI_SUSPEND:
1153 con_log(CL_ANN, (CE_NOTE,
1154 "mrsas_detach: DDI_SUSPEND"));
1155
1156 break;
1157 default:
1158 con_log(CL_ANN, (CE_WARN,
1159 "invalid detach command:0x%x", cmd));
1160 return (DDI_FAILURE);
1161 }
1162
1163 return (DDI_SUCCESS);
1164 }
1165
1166
1167 static void
1168 mrsas_undo_resources(dev_info_t *dip, struct mrsas_instance *instance)
1169 {
1170 int instance_no;
1171
1172 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1173
1174
1175 instance_no = ddi_get_instance(dip);
1176
1177
1178 if (instance->unroll.ioctl == 1) {
1179 ddi_remove_minor_node(dip, instance->iocnode);
1180 instance->unroll.ioctl = 0;
1181 }
1182
1183 if (instance->unroll.scsictl == 1) {
1184 ddi_remove_minor_node(dip, "scsi");
1185 instance->unroll.scsictl = 0;
1186 }
1187
1188 if (instance->unroll.devctl == 1) {
1189 ddi_remove_minor_node(dip, "devctl");
1190 instance->unroll.devctl = 0;
1191 }
1192
1193 if (instance->unroll.tranSetup == 1) {
1194 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1195 cmn_err(CE_WARN,
1196 "mr_sas2%d: failed to detach", instance_no);
1197 return; /* DDI_FAILURE */
1198 }
1199 instance->unroll.tranSetup = 0;
1200 con_log(CL_ANN1, (CE_CONT, "scsi_hba_dettach() done."));
1201 }
1202
1203 if (instance->unroll.tran == 1) {
1204 scsi_hba_tran_free(instance->tran);
1205 instance->unroll.tran = 0;
1206 con_log(CL_ANN1, (CE_CONT, "scsi_hba_tran_free() done."));
1207 }
1208
1209 if (instance->unroll.syncCmd == 1) {
1210 if (instance->tbolt) {
1211 if (abort_syncmap_cmd(instance,
1212 instance->map_update_cmd)) {
1213 cmn_err(CE_WARN, "mrsas_detach: "
1214 "failed to abort previous syncmap command");
1215 }
1216
1217 instance->unroll.syncCmd = 0;
1218 con_log(CL_ANN1, (CE_CONT, "sync cmd aborted, done."));
1219 }
1220 }
1221
1222 if (instance->unroll.aenPend == 1) {
1223 if (abort_aen_cmd(instance, instance->aen_cmd))
1224 cmn_err(CE_WARN, "mrsas_detach: "
1225 "failed to abort prevous AEN command");
1226
1227 instance->unroll.aenPend = 0;
1228 con_log(CL_ANN1, (CE_CONT, "aen cmd aborted, done."));
1229 /* This means the controller is fully initialized and running */
1230 /* Shutdown should be a last command to controller. */
1231 /* shutdown_controller(); */
1232 }
1233
1234
1235 if (instance->unroll.timer == 1) {
1236 if (instance->timeout_id != (timeout_id_t)-1) {
1237 (void) untimeout(instance->timeout_id);
1238 instance->timeout_id = (timeout_id_t)-1;
1239
1240 instance->unroll.timer = 0;
1241 }
1242 }
1243
1244 instance->func_ptr->disable_intr(instance);
1245
1246
1247 if (instance->unroll.mutexs == 1) {
1248 mutex_destroy(&instance->cmd_pool_mtx);
1249 mutex_destroy(&instance->app_cmd_pool_mtx);
1250 mutex_destroy(&instance->cmd_pend_mtx);
1251 mutex_destroy(&instance->completed_pool_mtx);
1252 mutex_destroy(&instance->sync_map_mtx);
1253 mutex_destroy(&instance->int_cmd_mtx);
1254 cv_destroy(&instance->int_cmd_cv);
1255 mutex_destroy(&instance->config_dev_mtx);
1256 mutex_destroy(&instance->ocr_flags_mtx);
1257 mutex_destroy(&instance->reg_write_mtx);
1258
1259 if (instance->tbolt) {
1260 mutex_destroy(&instance->cmd_app_pool_mtx);
1261 mutex_destroy(&instance->chip_mtx);
1262 }
1263
1264 instance->unroll.mutexs = 0;
1265 con_log(CL_ANN1, (CE_CONT, "Destroy mutex & cv, done."));
1266 }
1267
1268
1269 if (instance->unroll.soft_isr == 1) {
1270 ddi_remove_softintr(instance->soft_intr_id);
1271 instance->unroll.soft_isr = 0;
1272 }
1273
1274 if (instance->unroll.intr == 1) {
1275 mrsas_rem_intrs(instance);
1276 instance->unroll.intr = 0;
1277 }
1278
1279
1280 if (instance->unroll.taskq == 1) {
1281 if (instance->taskq) {
1282 ddi_taskq_destroy(instance->taskq);
1283 instance->unroll.taskq = 0;
1284 }
1285
1286 }
1287
1288 /*
1289 * free dma memory allocated for
1290 * cmds/frames/queues/driver version etc
1291 */
1292 if (instance->unroll.verBuff == 1) {
1293 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1294 instance->unroll.verBuff = 0;
1295 }
1296
1297 if (instance->unroll.pdlist_buff == 1) {
1298 if (instance->mr_tbolt_pd_list != NULL) {
1299 kmem_free(instance->mr_tbolt_pd_list,
1300 MRSAS_TBOLT_GET_PD_MAX(instance) *
1301 sizeof (struct mrsas_tbolt_pd));
1302 }
1303
1304 instance->mr_tbolt_pd_list = NULL;
1305 instance->unroll.pdlist_buff = 0;
1306 }
1307
1308 if (instance->unroll.ldlist_buff == 1) {
1309 if (instance->mr_ld_list != NULL) {
1310 kmem_free(instance->mr_ld_list, MRDRV_MAX_LD
1311 * sizeof (struct mrsas_ld));
1312 }
1313
1314 instance->mr_ld_list = NULL;
1315 instance->unroll.ldlist_buff = 0;
1316 }
1317
1318 if (instance->tbolt) {
1319 if (instance->unroll.alloc_space_mpi2 == 1) {
1320 free_space_for_mpi2(instance);
1321 instance->unroll.alloc_space_mpi2 = 0;
1322 }
1323 } else {
1324 if (instance->unroll.alloc_space_mfi == 1) {
1325 free_space_for_mfi(instance);
1326 instance->unroll.alloc_space_mfi = 0;
1327 }
1328 }
1329
1330 if (instance->unroll.regs == 1) {
1331 ddi_regs_map_free(&instance->regmap_handle);
1332 instance->unroll.regs = 0;
1333 con_log(CL_ANN1, (CE_CONT, "ddi_regs_map_free() done."));
1334 }
1335 }
1336
1337
1338
1339 /*
1340 * ************************************************************************** *
1341 * *
1342 * common entry points - for character driver types *
1343 * *
1344 * ************************************************************************** *
1345 */
1346 /*
1347 * open - gets access to a device
1348 * @dev:
1349 * @openflags:
1350 * @otyp:
1351 * @credp:
1352 *
1353 * Access to a device by one or more application programs is controlled
1354 * through the open() and close() entry points. The primary function of
1355 * open() is to verify that the open request is allowed.
1356 */
1357 static int
1358 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
1359 {
1360 int rval = 0;
1361
1362 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1363
1364 /* Check root permissions */
1365 if (drv_priv(credp) != 0) {
1366 con_log(CL_ANN, (CE_WARN,
1367 "mr_sas: Non-root ioctl access denied!"));
1368 return (EPERM);
1369 }
1370
1371 /* Verify we are being opened as a character device */
1372 if (otyp != OTYP_CHR) {
1373 con_log(CL_ANN, (CE_WARN,
1374 "mr_sas: ioctl node must be a char node"));
1375 return (EINVAL);
1376 }
1377
1378 if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev)))
1379 == NULL) {
1380 return (ENXIO);
1381 }
1382
1383 if (scsi_hba_open) {
1384 rval = scsi_hba_open(dev, openflags, otyp, credp);
1385 }
1386
1387 return (rval);
1388 }
1389
1390 /*
1391 * close - gives up access to a device
1392 * @dev:
1393 * @openflags:
1394 * @otyp:
1395 * @credp:
1396 *
1397 * close() should perform any cleanup necessary to finish using the minor
1398 * device, and prepare the device (and driver) to be opened again.
1399 */
1400 static int
1401 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
1402 {
1403 int rval = 0;
1404
1405 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1406
1407 /* no need for locks! */
1408
1409 if (scsi_hba_close) {
1410 rval = scsi_hba_close(dev, openflags, otyp, credp);
1411 }
1412
1413 return (rval);
1414 }
1415
1416 /*
1417 * ioctl - performs a range of I/O commands for character drivers
1418 * @dev:
1419 * @cmd:
1420 * @arg:
1421 * @mode:
1422 * @credp:
1423 * @rvalp:
1424 *
1425 * ioctl() routine must make sure that user data is copied into or out of the
1426 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
1427 * and ddi_copyout(), as appropriate.
1428 * This is a wrapper routine to serialize access to the actual ioctl routine.
1429 * ioctl() should return 0 on success, or the appropriate error number. The
1430 * driver may also set the value returned to the calling process through rvalp.
1431 */
1432
1433 static int
1434 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1435 int *rvalp)
1436 {
1437 int rval = 0;
1438
1439 struct mrsas_instance *instance;
1440 struct mrsas_ioctl *ioctl;
1441 struct mrsas_aen aen;
1442 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1443
1444 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev)));
1445
1446 if (instance == NULL) {
1447 /* invalid minor number */
1448 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found."));
1449 return (ENXIO);
1450 }
1451
1452 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl),
1453 KM_SLEEP);
1454 ASSERT(ioctl);
1455
1456 switch ((uint_t)cmd) {
1457 case MRSAS_IOCTL_FIRMWARE:
1458 if (ddi_copyin((void *)arg, ioctl,
1459 sizeof (struct mrsas_ioctl), mode)) {
1460 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: "
1461 "ERROR IOCTL copyin"));
1462 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1463 return (EFAULT);
1464 }
1465
1466 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) {
1467 rval = handle_drv_ioctl(instance, ioctl, mode);
1468 } else {
1469 rval = handle_mfi_ioctl(instance, ioctl, mode);
1470 }
1471
1472 if (ddi_copyout((void *)ioctl, (void *)arg,
1473 (sizeof (struct mrsas_ioctl) - 1), mode)) {
1474 con_log(CL_ANN, (CE_WARN,
1475 "mrsas_ioctl: copy_to_user failed"));
1476 rval = 1;
1477 }
1478
1479 break;
1480 case MRSAS_IOCTL_AEN:
1481 if (ddi_copyin((void *) arg, &aen,
1482 sizeof (struct mrsas_aen), mode)) {
1483 con_log(CL_ANN, (CE_WARN,
1484 "mrsas_ioctl: ERROR AEN copyin"));
1485 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1486 return (EFAULT);
1487 }
1488
1489 rval = handle_mfi_aen(instance, &aen);
1490
1491 if (ddi_copyout((void *) &aen, (void *)arg,
1492 sizeof (struct mrsas_aen), mode)) {
1493 con_log(CL_ANN, (CE_WARN,
1494 "mrsas_ioctl: copy_to_user failed"));
1495 rval = 1;
1496 }
1497
1498 break;
1499 default:
1500 rval = scsi_hba_ioctl(dev, cmd, arg,
1501 mode, credp, rvalp);
1502
1503 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: "
1504 "scsi_hba_ioctl called, ret = %x.", rval));
1505 }
1506
1507 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1508 return (rval);
1509 }
1510
1511 /*
1512 * ************************************************************************** *
1513 * *
1514 * common entry points - for block driver types *
1515 * *
1516 * ************************************************************************** *
1517 */
1518 #ifdef __sparc
1519 /*
1520 * reset - TBD
1521 * @dip:
1522 * @cmd:
1523 *
1524 * TBD
1525 */
1526 /*ARGSUSED*/
1527 static int
1528 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1529 {
1530 int instance_no;
1531
1532 struct mrsas_instance *instance;
1533
1534 instance_no = ddi_get_instance(dip);
1535 instance = (struct mrsas_instance *)ddi_get_soft_state
1536 (mrsas_state, instance_no);
1537
1538 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1539
1540 if (!instance) {
1541 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter "
1542 "in reset", instance_no));
1543 return (DDI_FAILURE);
1544 }
1545
1546 instance->func_ptr->disable_intr(instance);
1547
1548 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1549 instance_no));
1550
1551 flush_cache(instance);
1552
1553 return (DDI_SUCCESS);
1554 }
1555 #else /* __sparc */
1556 /*ARGSUSED*/
1557 static int
1558 mrsas_quiesce(dev_info_t *dip)
1559 {
1560 int instance_no;
1561
1562 struct mrsas_instance *instance;
1563
1564 instance_no = ddi_get_instance(dip);
1565 instance = (struct mrsas_instance *)ddi_get_soft_state
1566 (mrsas_state, instance_no);
1567
1568 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1569
1570 if (!instance) {
1571 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d could not get adapter "
1572 "in quiesce", instance_no));
1573 return (DDI_FAILURE);
1574 }
1575 if (instance->deadadapter || instance->adapterresetinprogress) {
1576 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d adapter is not in "
1577 "healthy state", instance_no));
1578 return (DDI_FAILURE);
1579 }
1580
1581 if (abort_aen_cmd(instance, instance->aen_cmd)) {
1582 con_log(CL_ANN1, (CE_WARN, "mrsas_quiesce: "
1583 "failed to abort prevous AEN command QUIESCE"));
1584 }
1585
1586 if (instance->tbolt) {
1587 if (abort_syncmap_cmd(instance,
1588 instance->map_update_cmd)) {
1589 cmn_err(CE_WARN,
1590 "mrsas_detach: failed to abort "
1591 "previous syncmap command");
1592 return (DDI_FAILURE);
1593 }
1594 }
1595
1596 instance->func_ptr->disable_intr(instance);
1597
1598 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1599 instance_no));
1600
1601 flush_cache(instance);
1602
1603 if (wait_for_outstanding(instance)) {
1604 con_log(CL_ANN1,
1605 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
1606 return (DDI_FAILURE);
1607 }
1608 return (DDI_SUCCESS);
1609 }
1610 #endif /* __sparc */
1611
1612 /*
1613 * ************************************************************************** *
1614 * *
1615 * entry points (SCSI HBA) *
1616 * *
1617 * ************************************************************************** *
1618 */
1619 /*
1620 * tran_tgt_init - initialize a target device instance
1621 * @hba_dip:
1622 * @tgt_dip:
1623 * @tran:
1624 * @sd:
1625 *
1626 * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1627 * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1628 * the device's address as valid and supportable for that particular HBA.
1629 * By returning DDI_FAILURE, the instance of the target driver for that device
1630 * is not probed or attached.
1631 */
1632 /*ARGSUSED*/
1633 static int
1634 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1635 scsi_hba_tran_t *tran, struct scsi_device *sd)
1636 {
1637 struct mrsas_instance *instance;
1638 uint16_t tgt = sd->sd_address.a_target;
1639 uint8_t lun = sd->sd_address.a_lun;
1640 dev_info_t *child = NULL;
1641
1642 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init target %d lun %d",
1643 tgt, lun));
1644
1645 instance = ADDR2MR(&sd->sd_address);
1646
1647 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
1648 /*
1649 * If no persistent node exists, we don't allow .conf node
1650 * to be created.
1651 */
1652 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
1653 con_log(CL_DLEVEL2,
1654 (CE_NOTE, "mrsas_tgt_init find child ="
1655 " %p t = %d l = %d", (void *)child, tgt, lun));
1656 if (ndi_merge_node(tgt_dip, mrsas_name_node) !=
1657 DDI_SUCCESS)
1658 /* Create this .conf node */
1659 return (DDI_SUCCESS);
1660 }
1661 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init in ndi_per "
1662 "DDI_FAILURE t = %d l = %d", tgt, lun));
1663 return (DDI_FAILURE);
1664
1665 }
1666
1667 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p",
1668 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip));
1669
1670 if (tgt < MRDRV_MAX_LD && lun == 0) {
1671 if (instance->mr_ld_list[tgt].dip == NULL &&
1672 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) {
1673 mutex_enter(&instance->config_dev_mtx);
1674 instance->mr_ld_list[tgt].dip = tgt_dip;
1675 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN;
1676 instance->mr_ld_list[tgt].flag = MRDRV_TGT_VALID;
1677 mutex_exit(&instance->config_dev_mtx);
1678 }
1679 }
1680
1681 #ifdef PDSUPPORT
1682 else if (instance->tbolt || instance->skinny) {
1683 if (instance->mr_tbolt_pd_list[tgt].dip == NULL) {
1684 mutex_enter(&instance->config_dev_mtx);
1685 instance->mr_tbolt_pd_list[tgt].dip = tgt_dip;
1686 instance->mr_tbolt_pd_list[tgt].flag =
1687 MRDRV_TGT_VALID;
1688 mutex_exit(&instance->config_dev_mtx);
1689 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_tgt_init:"
1690 "t%xl%x", tgt, lun));
1691 }
1692 }
1693 #endif
1694
1695 return (DDI_SUCCESS);
1696 }
1697
1698 /*ARGSUSED*/
1699 static void
1700 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1701 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1702 {
1703 struct mrsas_instance *instance;
1704 int tgt = sd->sd_address.a_target;
1705 int lun = sd->sd_address.a_lun;
1706
1707 instance = ADDR2MR(&sd->sd_address);
1708
1709 con_log(CL_DLEVEL2, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun));
1710
1711 if (tgt < MRDRV_MAX_LD && lun == 0) {
1712 if (instance->mr_ld_list[tgt].dip == tgt_dip) {
1713 mutex_enter(&instance->config_dev_mtx);
1714 instance->mr_ld_list[tgt].dip = NULL;
1715 mutex_exit(&instance->config_dev_mtx);
1716 }
1717 }
1718
1719 #ifdef PDSUPPORT
1720 else if (instance->tbolt || instance->skinny) {
1721 mutex_enter(&instance->config_dev_mtx);
1722 instance->mr_tbolt_pd_list[tgt].dip = NULL;
1723 mutex_exit(&instance->config_dev_mtx);
1724 con_log(CL_ANN1, (CE_NOTE, "tgt_free: Setting dip = NULL"
1725 "for tgt:%x", tgt));
1726 }
1727 #endif
1728
1729 }
1730
1731 dev_info_t *
1732 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun)
1733 {
1734 dev_info_t *child = NULL;
1735 char addr[SCSI_MAXNAMELEN];
1736 char tmp[MAXNAMELEN];
1737
1738 (void) sprintf(addr, "%x,%x", tgt, lun);
1739 for (child = ddi_get_child(instance->dip); child;
1740 child = ddi_get_next_sibling(child)) {
1741
1742 if (ndi_dev_is_persistent_node(child) == 0) {
1743 continue;
1744 }
1745
1746 if (mrsas_name_node(child, tmp, MAXNAMELEN) !=
1747 DDI_SUCCESS) {
1748 continue;
1749 }
1750
1751 if (strcmp(addr, tmp) == 0) {
1752 break;
1753 }
1754 }
1755 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_find_child: return child = %p",
1756 (void *)child));
1757 return (child);
1758 }
1759
1760 /*
1761 * mrsas_name_node -
1762 * @dip:
1763 * @name:
1764 * @len:
1765 */
1766 static int
1767 mrsas_name_node(dev_info_t *dip, char *name, int len)
1768 {
1769 int tgt, lun;
1770
1771 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1772 DDI_PROP_DONTPASS, "target", -1);
1773 con_log(CL_DLEVEL2, (CE_NOTE,
1774 "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt));
1775 if (tgt == -1) {
1776 return (DDI_FAILURE);
1777 }
1778 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1779 "lun", -1);
1780 con_log(CL_DLEVEL2,
1781 (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun));
1782 if (lun == -1) {
1783 return (DDI_FAILURE);
1784 }
1785 (void) snprintf(name, len, "%x,%x", tgt, lun);
1786 return (DDI_SUCCESS);
1787 }
1788
1789 /*
1790 * tran_init_pkt - allocate & initialize a scsi_pkt structure
1791 * @ap:
1792 * @pkt:
1793 * @bp:
1794 * @cmdlen:
1795 * @statuslen:
1796 * @tgtlen:
1797 * @flags:
1798 * @callback:
1799 *
1800 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1801 * structure and DMA resources for a target driver request. The
1802 * tran_init_pkt() entry point is called when the target driver calls the
1803 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1804 * is a request to perform one or more of three possible services:
1805 * - allocation and initialization of a scsi_pkt structure
1806 * - allocation of DMA resources for data transfer
1807 * - reallocation of DMA resources for the next portion of the data transfer
1808 */
1809 static struct scsi_pkt *
1810 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
1811 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1812 int flags, int (*callback)(), caddr_t arg)
1813 {
1814 struct scsa_cmd *acmd;
1815 struct mrsas_instance *instance;
1816 struct scsi_pkt *new_pkt;
1817
1818 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1819
1820 instance = ADDR2MR(ap);
1821
1822 /* step #1 : pkt allocation */
1823 if (pkt == NULL) {
1824 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1825 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1826 if (pkt == NULL) {
1827 return (NULL);
1828 }
1829
1830 acmd = PKT2CMD(pkt);
1831
1832 /*
1833 * Initialize the new pkt - we redundantly initialize
1834 * all the fields for illustrative purposes.
1835 */
1836 acmd->cmd_pkt = pkt;
1837 acmd->cmd_flags = 0;
1838 acmd->cmd_scblen = statuslen;
1839 acmd->cmd_cdblen = cmdlen;
1840 acmd->cmd_dmahandle = NULL;
1841 acmd->cmd_ncookies = 0;
1842 acmd->cmd_cookie = 0;
1843 acmd->cmd_cookiecnt = 0;
1844 acmd->cmd_nwin = 0;
1845
1846 pkt->pkt_address = *ap;
1847 pkt->pkt_comp = (void (*)())NULL;
1848 pkt->pkt_flags = 0;
1849 pkt->pkt_time = 0;
1850 pkt->pkt_resid = 0;
1851 pkt->pkt_state = 0;
1852 pkt->pkt_statistics = 0;
1853 pkt->pkt_reason = 0;
1854 new_pkt = pkt;
1855 } else {
1856 acmd = PKT2CMD(pkt);
1857 new_pkt = NULL;
1858 }
1859
1860 /* step #2 : dma allocation/move */
1861 if (bp && bp->b_bcount != 0) {
1862 if (acmd->cmd_dmahandle == NULL) {
1863 if (mrsas_dma_alloc(instance, pkt, bp, flags,
1864 callback) == DDI_FAILURE) {
1865 if (new_pkt) {
1866 scsi_hba_pkt_free(ap, new_pkt);
1867 }
1868 return ((struct scsi_pkt *)NULL);
1869 }
1870 } else {
1871 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1872 return ((struct scsi_pkt *)NULL);
1873 }
1874 }
1875 }
1876
1877 return (pkt);
1878 }
1879
1880 /*
1881 * tran_start - transport a SCSI command to the addressed target
1882 * @ap:
1883 * @pkt:
1884 *
1885 * The tran_start() entry point for a SCSI HBA driver is called to transport a
1886 * SCSI command to the addressed target. The SCSI command is described
1887 * entirely within the scsi_pkt structure, which the target driver allocated
1888 * through the HBA driver's tran_init_pkt() entry point. If the command
1889 * involves a data transfer, DMA resources must also have been allocated for
1890 * the scsi_pkt structure.
1891 *
1892 * Return Values :
1893 * TRAN_BUSY - request queue is full, no more free scbs
1894 * TRAN_ACCEPT - pkt has been submitted to the instance
1895 */
1896 static int
1897 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
1898 {
1899 uchar_t cmd_done = 0;
1900
1901 struct mrsas_instance *instance = ADDR2MR(ap);
1902 struct mrsas_cmd *cmd;
1903
1904 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1905 if (instance->deadadapter == 1) {
1906 con_log(CL_ANN1, (CE_WARN,
1907 "mrsas_tran_start: return TRAN_FATAL_ERROR "
1908 "for IO, as the HBA doesnt take any more IOs"));
1909 if (pkt) {
1910 pkt->pkt_reason = CMD_DEV_GONE;
1911 pkt->pkt_statistics = STAT_DISCON;
1912 }
1913 return (TRAN_FATAL_ERROR);
1914 }
1915
1916 if (instance->adapterresetinprogress) {
1917 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_start: Reset flag set, "
1918 "returning mfi_pkt and setting TRAN_BUSY\n"));
1919 return (TRAN_BUSY);
1920 }
1921
1922 con_log(CL_ANN1, (CE_CONT, "chkpnt:%s:%d:SCSI CDB[0]=0x%x time:%x",
1923 __func__, __LINE__, pkt->pkt_cdbp[0], pkt->pkt_time));
1924
1925 pkt->pkt_reason = CMD_CMPLT;
1926 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1927
1928 cmd = build_cmd(instance, ap, pkt, &cmd_done);
1929
1930 /*
1931 * Check if the command is already completed by the mrsas_build_cmd()
1932 * routine. In which case the busy_flag would be clear and scb will be
1933 * NULL and appropriate reason provided in pkt_reason field
1934 */
1935 if (cmd_done) {
1936 pkt->pkt_reason = CMD_CMPLT;
1937 pkt->pkt_scbp[0] = STATUS_GOOD;
1938 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1939 | STATE_SENT_CMD;
1940 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1941 (*pkt->pkt_comp)(pkt);
1942 }
1943
1944 return (TRAN_ACCEPT);
1945 }
1946
1947 if (cmd == NULL) {
1948 return (TRAN_BUSY);
1949 }
1950
1951 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1952 if (instance->fw_outstanding > instance->max_fw_cmds) {
1953 con_log(CL_ANN, (CE_CONT, "mr_sas:Firmware busy"));
1954 DTRACE_PROBE2(start_tran_err,
1955 uint16_t, instance->fw_outstanding,
1956 uint16_t, instance->max_fw_cmds);
1957 mrsas_return_mfi_pkt(instance, cmd);
1958 return (TRAN_BUSY);
1959 }
1960
1961 /* Synchronize the Cmd frame for the controller */
1962 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1963 DDI_DMA_SYNC_FORDEV);
1964 con_log(CL_ANN, (CE_CONT, "issue_cmd_ppc: SCSI CDB[0]=0x%x"
1965 "cmd->index:%x\n", pkt->pkt_cdbp[0], cmd->index));
1966 instance->func_ptr->issue_cmd(cmd, instance);
1967
1968 } else {
1969 struct mrsas_header *hdr = &cmd->frame->hdr;
1970
1971 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1972
1973 pkt->pkt_reason = CMD_CMPLT;
1974 pkt->pkt_statistics = 0;
1975 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1976
1977 switch (ddi_get8(cmd->frame_dma_obj.acc_handle,
1978 &hdr->cmd_status)) {
1979 case MFI_STAT_OK:
1980 pkt->pkt_scbp[0] = STATUS_GOOD;
1981 break;
1982
1983 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1984 con_log(CL_ANN, (CE_CONT,
1985 "mrsas_tran_start: scsi done with error"));
1986 pkt->pkt_reason = CMD_CMPLT;
1987 pkt->pkt_statistics = 0;
1988
1989 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
1990 break;
1991
1992 case MFI_STAT_DEVICE_NOT_FOUND:
1993 con_log(CL_ANN, (CE_CONT,
1994 "mrsas_tran_start: device not found error"));
1995 pkt->pkt_reason = CMD_DEV_GONE;
1996 pkt->pkt_statistics = STAT_DISCON;
1997 break;
1998
1999 default:
2000 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
2001 }
2002
2003 (void) mrsas_common_check(instance, cmd);
2004 DTRACE_PROBE2(start_nointr_done, uint8_t, hdr->cmd,
2005 uint8_t, hdr->cmd_status);
2006 mrsas_return_mfi_pkt(instance, cmd);
2007
2008 if (pkt->pkt_comp) {
2009 (*pkt->pkt_comp)(pkt);
2010 }
2011
2012 }
2013
2014 return (TRAN_ACCEPT);
2015 }
2016
2017 /*
2018 * tran_abort - Abort any commands that are currently in transport
2019 * @ap:
2020 * @pkt:
2021 *
2022 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
2023 * commands that are currently in transport for a particular target. This entry
2024 * point is called when a target driver calls scsi_abort(). The tran_abort()
2025 * entry point should attempt to abort the command denoted by the pkt
2026 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
2027 * abort all outstanding commands in the transport layer for the particular
2028 * target or logical unit.
2029 */
2030 /*ARGSUSED*/
2031 static int
2032 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
2033 {
2034 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2035
2036 /* abort command not supported by H/W */
2037
2038 return (DDI_FAILURE);
2039 }
2040
2041 /*
2042 * tran_reset - reset either the SCSI bus or target
2043 * @ap:
2044 * @level:
2045 *
2046 * The tran_reset() entry point for a SCSI HBA driver is called to reset either
2047 * the SCSI bus or a particular SCSI target device. This entry point is called
2048 * when a target driver calls scsi_reset(). The tran_reset() entry point must
2049 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
2050 * particular target or logical unit must be reset.
2051 */
2052 /*ARGSUSED*/
2053 static int
2054 mrsas_tran_reset(struct scsi_address *ap, int level)
2055 {
2056 struct mrsas_instance *instance = ADDR2MR(ap);
2057
2058 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2059
2060 if (wait_for_outstanding(instance)) {
2061 con_log(CL_ANN1,
2062 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
2063 return (DDI_FAILURE);
2064 } else {
2065 return (DDI_SUCCESS);
2066 }
2067 }
2068
2069 /*
2070 * tran_getcap - get one of a set of SCSA-defined capabilities
2071 * @ap:
2072 * @cap:
2073 * @whom:
2074 *
2075 * The target driver can request the current setting of the capability for a
2076 * particular target by setting the whom parameter to nonzero. A whom value of
2077 * zero indicates a request for the current setting of the general capability
2078 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
2079 * for undefined capabilities or the current value of the requested capability.
2080 */
2081 /*ARGSUSED*/
2082 static int
2083 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
2084 {
2085 int rval = 0;
2086
2087 struct mrsas_instance *instance = ADDR2MR(ap);
2088
2089 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2090
2091 /* we do allow inquiring about capabilities for other targets */
2092 if (cap == NULL) {
2093 return (-1);
2094 }
2095
2096 switch (scsi_hba_lookup_capstr(cap)) {
2097 case SCSI_CAP_DMA_MAX:
2098 if (instance->tbolt) {
2099 /* Limit to 256k max transfer */
2100 rval = mrsas_tbolt_max_cap_maxxfer;
2101 } else {
2102 /* Limit to 16MB max transfer */
2103 rval = mrsas_max_cap_maxxfer;
2104 }
2105 break;
2106 case SCSI_CAP_MSG_OUT:
2107 rval = 1;
2108 break;
2109 case SCSI_CAP_DISCONNECT:
2110 rval = 0;
2111 break;
2112 case SCSI_CAP_SYNCHRONOUS:
2113 rval = 0;
2114 break;
2115 case SCSI_CAP_WIDE_XFER:
2116 rval = 1;
2117 break;
2118 case SCSI_CAP_TAGGED_QING:
2119 rval = 1;
2120 break;
2121 case SCSI_CAP_UNTAGGED_QING:
2122 rval = 1;
2123 break;
2124 case SCSI_CAP_PARITY:
2125 rval = 1;
2126 break;
2127 case SCSI_CAP_INITIATOR_ID:
2128 rval = instance->init_id;
2129 break;
2130 case SCSI_CAP_ARQ:
2131 rval = 1;
2132 break;
2133 case SCSI_CAP_LINKED_CMDS:
2134 rval = 0;
2135 break;
2136 case SCSI_CAP_RESET_NOTIFICATION:
2137 rval = 1;
2138 break;
2139 case SCSI_CAP_GEOMETRY:
2140 rval = -1;
2141
2142 break;
2143 default:
2144 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
2145 scsi_hba_lookup_capstr(cap)));
2146 rval = -1;
2147 break;
2148 }
2149
2150 return (rval);
2151 }
2152
2153 /*
2154 * tran_setcap - set one of a set of SCSA-defined capabilities
2155 * @ap:
2156 * @cap:
2157 * @value:
2158 * @whom:
2159 *
2160 * The target driver might request that the new value be set for a particular
2161 * target by setting the whom parameter to nonzero. A whom value of zero
2162 * means that request is to set the new value for the SCSI bus or for adapter
2163 * hardware in general.
2164 * The tran_setcap() should return the following values as appropriate:
2165 * - -1 for undefined capabilities
2166 * - 0 if the HBA driver cannot set the capability to the requested value
2167 * - 1 if the HBA driver is able to set the capability to the requested value
2168 */
2169 /*ARGSUSED*/
2170 static int
2171 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
2172 {
2173 int rval = 1;
2174
2175 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2176
2177 /* We don't allow setting capabilities for other targets */
2178 if (cap == NULL || whom == 0) {
2179 return (-1);
2180 }
2181
2182 switch (scsi_hba_lookup_capstr(cap)) {
2183 case SCSI_CAP_DMA_MAX:
2184 case SCSI_CAP_MSG_OUT:
2185 case SCSI_CAP_PARITY:
2186 case SCSI_CAP_LINKED_CMDS:
2187 case SCSI_CAP_RESET_NOTIFICATION:
2188 case SCSI_CAP_DISCONNECT:
2189 case SCSI_CAP_SYNCHRONOUS:
2190 case SCSI_CAP_UNTAGGED_QING:
2191 case SCSI_CAP_WIDE_XFER:
2192 case SCSI_CAP_INITIATOR_ID:
2193 case SCSI_CAP_ARQ:
2194 /*
2195 * None of these are settable via
2196 * the capability interface.
2197 */
2198 break;
2199 case SCSI_CAP_TAGGED_QING:
2200 rval = 1;
2201 break;
2202 case SCSI_CAP_SECTOR_SIZE:
2203 rval = 1;
2204 break;
2205
2206 case SCSI_CAP_TOTAL_SECTORS:
2207 rval = 1;
2208 break;
2209 default:
2210 rval = -1;
2211 break;
2212 }
2213
2214 return (rval);
2215 }
2216
2217 /*
2218 * tran_destroy_pkt - deallocate scsi_pkt structure
2219 * @ap:
2220 * @pkt:
2221 *
2222 * The tran_destroy_pkt() entry point is the HBA driver function that
2223 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
2224 * called when the target driver calls scsi_destroy_pkt(). The
2225 * tran_destroy_pkt() entry point must free any DMA resources that have been
2226 * allocated for the packet. An implicit DMA synchronization occurs if the
2227 * DMA resources are freed and any cached data remains after the completion
2228 * of the transfer.
2229 */
2230 static void
2231 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2232 {
2233 struct scsa_cmd *acmd = PKT2CMD(pkt);
2234
2235 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2236
2237 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2238 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2239
2240 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2241
2242 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2243
2244 acmd->cmd_dmahandle = NULL;
2245 }
2246
2247 /* free the pkt */
2248 scsi_hba_pkt_free(ap, pkt);
2249 }
2250
2251 /*
2252 * tran_dmafree - deallocates DMA resources
2253 * @ap:
2254 * @pkt:
2255 *
2256 * The tran_dmafree() entry point deallocates DMAQ resources that have been
2257 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
2258 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
2259 * free only DMA resources allocated for a scsi_pkt structure, not the
2260 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
2261 * implicitly performed.
2262 */
2263 /*ARGSUSED*/
2264 static void
2265 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2266 {
2267 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2268
2269 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2270
2271 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2272 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2273
2274 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2275
2276 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2277
2278 acmd->cmd_dmahandle = NULL;
2279 }
2280 }
2281
2282 /*
2283 * tran_sync_pkt - synchronize the DMA object allocated
2284 * @ap:
2285 * @pkt:
2286 *
2287 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
2288 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
2289 * entry point is called when the target driver calls scsi_sync_pkt(). If the
2290 * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
2291 * must synchronize the CPU's view of the data. If the data transfer direction
2292 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
2293 * device's view of the data.
2294 */
2295 /*ARGSUSED*/
2296 static void
2297 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2298 {
2299 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2300
2301 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2302
2303 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2304 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
2305 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
2306 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
2307 }
2308 }
2309
2310 /*ARGSUSED*/
2311 static int
2312 mrsas_tran_quiesce(dev_info_t *dip)
2313 {
2314 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2315
2316 return (1);
2317 }
2318
2319 /*ARGSUSED*/
2320 static int
2321 mrsas_tran_unquiesce(dev_info_t *dip)
2322 {
2323 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2324
2325 return (1);
2326 }
2327
2328
2329 /*
2330 * mrsas_isr(caddr_t)
2331 *
2332 * The Interrupt Service Routine
2333 *
2334 * Collect status for all completed commands and do callback
2335 *
2336 */
2337 static uint_t
2338 mrsas_isr(struct mrsas_instance *instance)
2339 {
2340 int need_softintr;
2341 uint32_t producer;
2342 uint32_t consumer;
2343 uint32_t context;
2344 int retval;
2345
2346 struct mrsas_cmd *cmd;
2347 struct mrsas_header *hdr;
2348 struct scsi_pkt *pkt;
2349
2350 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2351 ASSERT(instance);
2352 if (instance->tbolt) {
2353 mutex_enter(&instance->chip_mtx);
2354 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2355 !(instance->func_ptr->intr_ack(instance))) {
2356 mutex_exit(&instance->chip_mtx);
2357 return (DDI_INTR_UNCLAIMED);
2358 }
2359 retval = mr_sas_tbolt_process_outstanding_cmd(instance);
2360 mutex_exit(&instance->chip_mtx);
2361 return (retval);
2362 } else {
2363 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2364 !instance->func_ptr->intr_ack(instance)) {
2365 return (DDI_INTR_UNCLAIMED);
2366 }
2367 }
2368
2369 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2370 0, 0, DDI_DMA_SYNC_FORCPU);
2371
2372 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2373 != DDI_SUCCESS) {
2374 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2375 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2376 con_log(CL_ANN1, (CE_WARN,
2377 "mr_sas_isr(): FMA check, returning DDI_INTR_UNCLAIMED"));
2378 return (DDI_INTR_CLAIMED);
2379 }
2380 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2381
2382 #ifdef OCRDEBUG
2383 if (debug_consecutive_timeout_after_ocr_g == 1) {
2384 con_log(CL_ANN1, (CE_NOTE,
2385 "simulating consecutive timeout after ocr"));
2386 return (DDI_INTR_CLAIMED);
2387 }
2388 #endif
2389
2390 mutex_enter(&instance->completed_pool_mtx);
2391 mutex_enter(&instance->cmd_pend_mtx);
2392
2393 producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2394 instance->producer);
2395 consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2396 instance->consumer);
2397
2398 con_log(CL_ANN, (CE_CONT, " producer %x consumer %x ",
2399 producer, consumer));
2400 if (producer == consumer) {
2401 con_log(CL_ANN, (CE_WARN, "producer == consumer case"));
2402 DTRACE_PROBE2(isr_pc_err, uint32_t, producer,
2403 uint32_t, consumer);
2404 mutex_exit(&instance->cmd_pend_mtx);
2405 mutex_exit(&instance->completed_pool_mtx);
2406 return (DDI_INTR_CLAIMED);
2407 }
2408
2409 while (consumer != producer) {
2410 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2411 &instance->reply_queue[consumer]);
2412 cmd = instance->cmd_list[context];
2413
2414 if (cmd->sync_cmd == MRSAS_TRUE) {
2415 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2416 if (hdr) {
2417 mlist_del_init(&cmd->list);
2418 }
2419 } else {
2420 pkt = cmd->pkt;
2421 if (pkt) {
2422 mlist_del_init(&cmd->list);
2423 }
2424 }
2425
2426 mlist_add_tail(&cmd->list, &instance->completed_pool_list);
2427
2428 consumer++;
2429 if (consumer == (instance->max_fw_cmds + 1)) {
2430 consumer = 0;
2431 }
2432 }
2433 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
2434 instance->consumer, consumer);
2435 mutex_exit(&instance->cmd_pend_mtx);
2436 mutex_exit(&instance->completed_pool_mtx);
2437
2438 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2439 0, 0, DDI_DMA_SYNC_FORDEV);
2440
2441 if (instance->softint_running) {
2442 need_softintr = 0;
2443 } else {
2444 need_softintr = 1;
2445 }
2446
2447 if (instance->isr_level == HIGH_LEVEL_INTR) {
2448 if (need_softintr) {
2449 ddi_trigger_softintr(instance->soft_intr_id);
2450 }
2451 } else {
2452 /*
2453 * Not a high-level interrupt, therefore call the soft level
2454 * interrupt explicitly
2455 */
2456 (void) mrsas_softintr(instance);
2457 }
2458
2459 return (DDI_INTR_CLAIMED);
2460 }
2461
2462
2463 /*
2464 * ************************************************************************** *
2465 * *
2466 * libraries *
2467 * *
2468 * ************************************************************************** *
2469 */
2470 /*
2471 * get_mfi_pkt : Get a command from the free pool
2472 * After successful allocation, the caller of this routine
2473 * must clear the frame buffer (memset to zero) before
2474 * using the packet further.
2475 *
2476 * ***** Note *****
2477 * After clearing the frame buffer the context id of the
2478 * frame buffer SHOULD be restored back.
2479 */
2480 struct mrsas_cmd *
2481 mrsas_get_mfi_pkt(struct mrsas_instance *instance)
2482 {
2483 mlist_t *head = &instance->cmd_pool_list;
2484 struct mrsas_cmd *cmd = NULL;
2485
2486 mutex_enter(&instance->cmd_pool_mtx);
2487
2488 if (!mlist_empty(head)) {
2489 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2490 mlist_del_init(head->next);
2491 }
2492 if (cmd != NULL) {
2493 cmd->pkt = NULL;
2494 cmd->retry_count_for_ocr = 0;
2495 cmd->drv_pkt_time = 0;
2496
2497 }
2498 mutex_exit(&instance->cmd_pool_mtx);
2499
2500 return (cmd);
2501 }
2502
2503 static struct mrsas_cmd *
2504 get_mfi_app_pkt(struct mrsas_instance *instance)
2505 {
2506 mlist_t *head = &instance->app_cmd_pool_list;
2507 struct mrsas_cmd *cmd = NULL;
2508
2509 mutex_enter(&instance->app_cmd_pool_mtx);
2510
2511 if (!mlist_empty(head)) {
2512 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2513 mlist_del_init(head->next);
2514 }
2515 if (cmd != NULL) {
2516 cmd->pkt = NULL;
2517 cmd->retry_count_for_ocr = 0;
2518 cmd->drv_pkt_time = 0;
2519 }
2520
2521 mutex_exit(&instance->app_cmd_pool_mtx);
2522
2523 return (cmd);
2524 }
2525 /*
2526 * return_mfi_pkt : Return a cmd to free command pool
2527 */
2528 void
2529 mrsas_return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2530 {
2531 mutex_enter(&instance->cmd_pool_mtx);
2532 /* use mlist_add_tail for debug assistance */
2533 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2534
2535 mutex_exit(&instance->cmd_pool_mtx);
2536 }
2537
2538 static void
2539 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2540 {
2541 mutex_enter(&instance->app_cmd_pool_mtx);
2542
2543 mlist_add(&cmd->list, &instance->app_cmd_pool_list);
2544
2545 mutex_exit(&instance->app_cmd_pool_mtx);
2546 }
2547 void
2548 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2549 {
2550 struct scsi_pkt *pkt;
2551 struct mrsas_header *hdr;
2552 con_log(CL_DLEVEL2, (CE_NOTE, "push_pending_pkt(): Called\n"));
2553 mutex_enter(&instance->cmd_pend_mtx);
2554 mlist_del_init(&cmd->list);
2555 mlist_add_tail(&cmd->list, &instance->cmd_pend_list);
2556 if (cmd->sync_cmd == MRSAS_TRUE) {
2557 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2558 if (hdr) {
2559 con_log(CL_ANN1, (CE_CONT,
2560 "push_pending_mfi_pkt: "
2561 "cmd %p index %x "
2562 "time %llx",
2563 (void *)cmd, cmd->index,
2564 gethrtime()));
2565 /* Wait for specified interval */
2566 cmd->drv_pkt_time = ddi_get16(
2567 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2568 if (cmd->drv_pkt_time < debug_timeout_g)
2569 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2570 con_log(CL_ANN1, (CE_CONT,
2571 "push_pending_pkt(): "
2572 "Called IO Timeout Value %x\n",
2573 cmd->drv_pkt_time));
2574 }
2575 if (hdr && instance->timeout_id == (timeout_id_t)-1) {
2576 instance->timeout_id = timeout(io_timeout_checker,
2577 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2578 }
2579 } else {
2580 pkt = cmd->pkt;
2581 if (pkt) {
2582 con_log(CL_ANN1, (CE_CONT,
2583 "push_pending_mfi_pkt: "
2584 "cmd %p index %x pkt %p, "
2585 "time %llx",
2586 (void *)cmd, cmd->index, (void *)pkt,
2587 gethrtime()));
2588 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2589 }
2590 if (pkt && instance->timeout_id == (timeout_id_t)-1) {
2591 instance->timeout_id = timeout(io_timeout_checker,
2592 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2593 }
2594 }
2595
2596 mutex_exit(&instance->cmd_pend_mtx);
2597
2598 }
2599
2600 int
2601 mrsas_print_pending_cmds(struct mrsas_instance *instance)
2602 {
2603 mlist_t *head = &instance->cmd_pend_list;
2604 mlist_t *tmp = head;
2605 struct mrsas_cmd *cmd = NULL;
2606 struct mrsas_header *hdr;
2607 unsigned int flag = 1;
2608 struct scsi_pkt *pkt;
2609 int saved_level;
2610 int cmd_count = 0;
2611
2612 saved_level = debug_level_g;
2613 debug_level_g = CL_ANN1;
2614
2615 cmn_err(CE_NOTE, "mrsas_print_pending_cmds(): Called\n");
2616
2617 while (flag) {
2618 mutex_enter(&instance->cmd_pend_mtx);
2619 tmp = tmp->next;
2620 if (tmp == head) {
2621 mutex_exit(&instance->cmd_pend_mtx);
2622 flag = 0;
2623 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds():"
2624 " NO MORE CMDS PENDING....\n"));
2625 break;
2626 } else {
2627 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2628 mutex_exit(&instance->cmd_pend_mtx);
2629 if (cmd) {
2630 if (cmd->sync_cmd == MRSAS_TRUE) {
2631 hdr = (struct mrsas_header *)
2632 &cmd->frame->hdr;
2633 if (hdr) {
2634 con_log(CL_ANN1, (CE_CONT,
2635 "print: cmd %p index 0x%x "
2636 "drv_pkt_time 0x%x (NO-PKT)"
2637 " hdr %p\n", (void *)cmd,
2638 cmd->index,
2639 cmd->drv_pkt_time,
2640 (void *)hdr));
2641 }
2642 } else {
2643 pkt = cmd->pkt;
2644 if (pkt) {
2645 con_log(CL_ANN1, (CE_CONT,
2646 "print: cmd %p index 0x%x "
2647 "drv_pkt_time 0x%x pkt %p \n",
2648 (void *)cmd, cmd->index,
2649 cmd->drv_pkt_time, (void *)pkt));
2650 }
2651 }
2652
2653 if (++cmd_count == 1) {
2654 mrsas_print_cmd_details(instance, cmd,
2655 0xDD);
2656 } else {
2657 mrsas_print_cmd_details(instance, cmd,
2658 1);
2659 }
2660
2661 }
2662 }
2663 }
2664 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds(): Done\n"));
2665
2666
2667 debug_level_g = saved_level;
2668
2669 return (DDI_SUCCESS);
2670 }
2671
2672
2673 int
2674 mrsas_complete_pending_cmds(struct mrsas_instance *instance)
2675 {
2676
2677 struct mrsas_cmd *cmd = NULL;
2678 struct scsi_pkt *pkt;
2679 struct mrsas_header *hdr;
2680
2681 struct mlist_head *pos, *next;
2682
2683 con_log(CL_ANN1, (CE_NOTE,
2684 "mrsas_complete_pending_cmds(): Called"));
2685
2686 mutex_enter(&instance->cmd_pend_mtx);
2687 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
2688 cmd = mlist_entry(pos, struct mrsas_cmd, list);
2689 if (cmd) {
2690 pkt = cmd->pkt;
2691 if (pkt) { /* for IO */
2692 if (((pkt->pkt_flags & FLAG_NOINTR)
2693 == 0) && pkt->pkt_comp) {
2694 pkt->pkt_reason
2695 = CMD_DEV_GONE;
2696 pkt->pkt_statistics
2697 = STAT_DISCON;
2698 con_log(CL_ANN1, (CE_CONT,
2699 "fail and posting to scsa "
2700 "cmd %p index %x"
2701 " pkt %p "
2702 "time : %llx",
2703 (void *)cmd, cmd->index,
2704 (void *)pkt, gethrtime()));
2705 (*pkt->pkt_comp)(pkt);
2706 }
2707 } else { /* for DCMDS */
2708 if (cmd->sync_cmd == MRSAS_TRUE) {
2709 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2710 con_log(CL_ANN1, (CE_CONT,
2711 "posting invalid status to application "
2712 "cmd %p index %x"
2713 " hdr %p "
2714 "time : %llx",
2715 (void *)cmd, cmd->index,
2716 (void *)hdr, gethrtime()));
2717 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2718 complete_cmd_in_sync_mode(instance, cmd);
2719 }
2720 }
2721 mlist_del_init(&cmd->list);
2722 } else {
2723 con_log(CL_ANN1, (CE_CONT,
2724 "mrsas_complete_pending_cmds:"
2725 "NULL command\n"));
2726 }
2727 con_log(CL_ANN1, (CE_CONT,
2728 "mrsas_complete_pending_cmds:"
2729 "looping for more commands\n"));
2730 }
2731 mutex_exit(&instance->cmd_pend_mtx);
2732
2733 con_log(CL_ANN1, (CE_CONT, "mrsas_complete_pending_cmds(): DONE\n"));
2734 return (DDI_SUCCESS);
2735 }
2736
2737 void
2738 mrsas_print_cmd_details(struct mrsas_instance *instance, struct mrsas_cmd *cmd,
2739 int detail)
2740 {
2741 struct scsi_pkt *pkt = cmd->pkt;
2742 Mpi2RaidSCSIIORequest_t *scsi_io = cmd->scsi_io_request;
2743 int i;
2744 int saved_level;
2745 ddi_acc_handle_t acc_handle =
2746 instance->mpi2_frame_pool_dma_obj.acc_handle;
2747
2748 if (detail == 0xDD) {
2749 saved_level = debug_level_g;
2750 debug_level_g = CL_ANN1;
2751 }
2752
2753
2754 if (instance->tbolt) {
2755 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2756 "cmd->index 0x%x SMID 0x%x timer 0x%x sec\n",
2757 (void *)cmd, cmd->index, cmd->SMID, cmd->drv_pkt_time));
2758 } else {
2759 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2760 "cmd->index 0x%x timer 0x%x sec\n",
2761 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2762 }
2763
2764 if (pkt) {
2765 con_log(CL_ANN1, (CE_CONT, "scsi_pkt CDB[0]=0x%x",
2766 pkt->pkt_cdbp[0]));
2767 } else {
2768 con_log(CL_ANN1, (CE_CONT, "NO-PKT"));
2769 }
2770
2771 if ((detail == 0xDD) && instance->tbolt) {
2772 con_log(CL_ANN1, (CE_CONT, "RAID_SCSI_IO_REQUEST\n"));
2773 con_log(CL_ANN1, (CE_CONT, "DevHandle=0x%X Function=0x%X "
2774 "IoFlags=0x%X SGLFlags=0x%X DataLength=0x%X\n",
2775 ddi_get16(acc_handle, &scsi_io->DevHandle),
2776 ddi_get8(acc_handle, &scsi_io->Function),
2777 ddi_get16(acc_handle, &scsi_io->IoFlags),
2778 ddi_get16(acc_handle, &scsi_io->SGLFlags),
2779 ddi_get32(acc_handle, &scsi_io->DataLength)));
2780
2781 for (i = 0; i < 32; i++) {
2782 con_log(CL_ANN1, (CE_CONT, "CDB[%d]=0x%x ", i,
2783 ddi_get8(acc_handle, &scsi_io->CDB.CDB32[i])));
2784 }
2785
2786 con_log(CL_ANN1, (CE_CONT, "RAID-CONTEXT\n"));
2787 con_log(CL_ANN1, (CE_CONT, "status=0x%X extStatus=0x%X "
2788 "ldTargetId=0x%X timeoutValue=0x%X regLockFlags=0x%X "
2789 "RAIDFlags=0x%X regLockRowLBA=0x%" PRIu64
2790 " regLockLength=0x%X spanArm=0x%X\n",
2791 ddi_get8(acc_handle, &scsi_io->RaidContext.status),
2792 ddi_get8(acc_handle, &scsi_io->RaidContext.extStatus),
2793 ddi_get16(acc_handle, &scsi_io->RaidContext.ldTargetId),
2794 ddi_get16(acc_handle, &scsi_io->RaidContext.timeoutValue),
2795 ddi_get8(acc_handle, &scsi_io->RaidContext.regLockFlags),
2796 ddi_get8(acc_handle, &scsi_io->RaidContext.RAIDFlags),
2797 ddi_get64(acc_handle, &scsi_io->RaidContext.regLockRowLBA),
2798 ddi_get32(acc_handle, &scsi_io->RaidContext.regLockLength),
2799 ddi_get8(acc_handle, &scsi_io->RaidContext.spanArm)));
2800 }
2801
2802 if (detail == 0xDD) {
2803 debug_level_g = saved_level;
2804 }
2805 }
2806
2807
2808 int
2809 mrsas_issue_pending_cmds(struct mrsas_instance *instance)
2810 {
2811 mlist_t *head = &instance->cmd_pend_list;
2812 mlist_t *tmp = head->next;
2813 struct mrsas_cmd *cmd = NULL;
2814 struct scsi_pkt *pkt;
2815
2816 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_pending_cmds(): Called"));
2817 while (tmp != head) {
2818 mutex_enter(&instance->cmd_pend_mtx);
2819 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2820 tmp = tmp->next;
2821 mutex_exit(&instance->cmd_pend_mtx);
2822 if (cmd) {
2823 con_log(CL_ANN1, (CE_CONT,
2824 "mrsas_issue_pending_cmds(): "
2825 "Got a cmd: cmd %p index 0x%x drv_pkt_time 0x%x ",
2826 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2827
2828 /* Reset command timeout value */
2829 if (cmd->drv_pkt_time < debug_timeout_g)
2830 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2831
2832 cmd->retry_count_for_ocr++;
2833
2834 cmn_err(CE_CONT, "cmd retry count = %d\n",
2835 cmd->retry_count_for_ocr);
2836
2837 if (cmd->retry_count_for_ocr > IO_RETRY_COUNT) {
2838 cmn_err(CE_WARN, "mrsas_issue_pending_cmds(): "
2839 "cmd->retry_count exceeded limit >%d\n",
2840 IO_RETRY_COUNT);
2841 mrsas_print_cmd_details(instance, cmd, 0xDD);
2842
2843 cmn_err(CE_WARN,
2844 "mrsas_issue_pending_cmds():"
2845 "Calling KILL Adapter\n");
2846 if (instance->tbolt)
2847 mrsas_tbolt_kill_adapter(instance);
2848 else
2849 (void) mrsas_kill_adapter(instance);
2850 return (DDI_FAILURE);
2851 }
2852
2853 pkt = cmd->pkt;
2854 if (pkt) {
2855 con_log(CL_ANN1, (CE_CONT,
2856 "PENDING PKT-CMD ISSUE: cmd %p index %x "
2857 "pkt %p time %llx",
2858 (void *)cmd, cmd->index,
2859 (void *)pkt,
2860 gethrtime()));
2861
2862 } else {
2863 cmn_err(CE_CONT,
2864 "mrsas_issue_pending_cmds(): NO-PKT, "
2865 "cmd %p index 0x%x drv_pkt_time 0x%x ",
2866 (void *)cmd, cmd->index, cmd->drv_pkt_time);
2867 }
2868
2869
2870 if (cmd->sync_cmd == MRSAS_TRUE) {
2871 cmn_err(CE_CONT, "mrsas_issue_pending_cmds(): "
2872 "SYNC_CMD == TRUE \n");
2873 instance->func_ptr->issue_cmd_in_sync_mode(
2874 instance, cmd);
2875 } else {
2876 instance->func_ptr->issue_cmd(cmd, instance);
2877 }
2878 } else {
2879 con_log(CL_ANN1, (CE_CONT,
2880 "mrsas_issue_pending_cmds: NULL command\n"));
2881 }
2882 con_log(CL_ANN1, (CE_CONT,
2883 "mrsas_issue_pending_cmds:"
2884 "looping for more commands"));
2885 }
2886 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_pending_cmds(): DONE\n"));
2887 return (DDI_SUCCESS);
2888 }
2889
2890
2891
2892 /*
2893 * destroy_mfi_frame_pool
2894 */
2895 void
2896 destroy_mfi_frame_pool(struct mrsas_instance *instance)
2897 {
2898 int i;
2899 uint32_t max_cmd = instance->max_fw_cmds;
2900
2901 struct mrsas_cmd *cmd;
2902
2903 /* return all frames to pool */
2904
2905 for (i = 0; i < max_cmd; i++) {
2906
2907 cmd = instance->cmd_list[i];
2908
2909 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
2910 (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj);
2911
2912 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
2913 }
2914
2915 }
2916
2917 /*
2918 * create_mfi_frame_pool
2919 */
2920 int
2921 create_mfi_frame_pool(struct mrsas_instance *instance)
2922 {
2923 int i = 0;
2924 int cookie_cnt;
2925 uint16_t max_cmd;
2926 uint16_t sge_sz;
2927 uint32_t sgl_sz;
2928 uint32_t tot_frame_size;
2929 struct mrsas_cmd *cmd;
2930 int retval = DDI_SUCCESS;
2931
2932 max_cmd = instance->max_fw_cmds;
2933 sge_sz = sizeof (struct mrsas_sge_ieee);
2934 /* calculated the number of 64byte frames required for SGL */
2935 sgl_sz = sge_sz * instance->max_num_sge;
2936 tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH;
2937
2938 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
2939 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
2940
2941 while (i < max_cmd) {
2942 cmd = instance->cmd_list[i];
2943
2944 cmd->frame_dma_obj.size = tot_frame_size;
2945 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr;
2946 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2947 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2948 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
2949 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
2950
2951 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj,
2952 (uchar_t)DDI_STRUCTURE_LE_ACC);
2953
2954 if (cookie_cnt == -1 || cookie_cnt > 1) {
2955 cmn_err(CE_WARN,
2956 "create_mfi_frame_pool: could not alloc.");
2957 retval = DDI_FAILURE;
2958 goto mrsas_undo_frame_pool;
2959 }
2960
2961 bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
2962
2963 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
2964 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer;
2965 cmd->frame_phys_addr =
2966 cmd->frame_dma_obj.dma_cookie[0].dmac_address;
2967
2968 cmd->sense = (uint8_t *)(((unsigned long)
2969 cmd->frame_dma_obj.buffer) +
2970 tot_frame_size - SENSE_LENGTH);
2971 cmd->sense_phys_addr =
2972 cmd->frame_dma_obj.dma_cookie[0].dmac_address +
2973 tot_frame_size - SENSE_LENGTH;
2974
2975 if (!cmd->frame || !cmd->sense) {
2976 cmn_err(CE_WARN,
2977 "mr_sas: pci_pool_alloc failed");
2978 retval = ENOMEM;
2979 goto mrsas_undo_frame_pool;
2980 }
2981
2982 ddi_put32(cmd->frame_dma_obj.acc_handle,
2983 &cmd->frame->io.context, cmd->index);
2984 i++;
2985
2986 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
2987 cmd->index, cmd->frame_phys_addr));
2988 }
2989
2990 return (DDI_SUCCESS);
2991
2992 mrsas_undo_frame_pool:
2993 if (i > 0)
2994 destroy_mfi_frame_pool(instance);
2995
2996 return (retval);
2997 }
2998
2999 /*
3000 * free_additional_dma_buffer
3001 */
3002 static void
3003 free_additional_dma_buffer(struct mrsas_instance *instance)
3004 {
3005 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
3006 (void) mrsas_free_dma_obj(instance,
3007 instance->mfi_internal_dma_obj);
3008 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
3009 }
3010
3011 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
3012 (void) mrsas_free_dma_obj(instance,
3013 instance->mfi_evt_detail_obj);
3014 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
3015 }
3016 }
3017
3018 /*
3019 * alloc_additional_dma_buffer
3020 */
3021 static int
3022 alloc_additional_dma_buffer(struct mrsas_instance *instance)
3023 {
3024 uint32_t reply_q_sz;
3025 uint32_t internal_buf_size = PAGESIZE*2;
3026
3027 /* max cmds plus 1 + producer & consumer */
3028 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
3029
3030 instance->mfi_internal_dma_obj.size = internal_buf_size;
3031 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
3032 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3033 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
3034 0xFFFFFFFFU;
3035 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
3036
3037 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
3038 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3039 cmn_err(CE_WARN,
3040 "mr_sas: could not alloc reply queue");
3041 return (DDI_FAILURE);
3042 }
3043
3044 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
3045
3046 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
3047
3048 instance->producer = (uint32_t *)((unsigned long)
3049 instance->mfi_internal_dma_obj.buffer);
3050 instance->consumer = (uint32_t *)((unsigned long)
3051 instance->mfi_internal_dma_obj.buffer + 4);
3052 instance->reply_queue = (uint32_t *)((unsigned long)
3053 instance->mfi_internal_dma_obj.buffer + 8);
3054 instance->internal_buf = (caddr_t)(((unsigned long)
3055 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
3056 instance->internal_buf_dmac_add =
3057 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
3058 (reply_q_sz + 8);
3059 instance->internal_buf_size = internal_buf_size -
3060 (reply_q_sz + 8);
3061
3062 /* allocate evt_detail */
3063 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
3064 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
3065 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3066 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3067 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
3068 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
3069
3070 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
3071 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3072 cmn_err(CE_WARN, "alloc_additional_dma_buffer: "
3073 "could not allocate data transfer buffer.");
3074 goto mrsas_undo_internal_buff;
3075 }
3076
3077 bzero(instance->mfi_evt_detail_obj.buffer,
3078 sizeof (struct mrsas_evt_detail));
3079
3080 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
3081
3082 return (DDI_SUCCESS);
3083
3084 mrsas_undo_internal_buff:
3085 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
3086 (void) mrsas_free_dma_obj(instance,
3087 instance->mfi_internal_dma_obj);
3088 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
3089 }
3090
3091 return (DDI_FAILURE);
3092 }
3093
3094
3095 void
3096 mrsas_free_cmd_pool(struct mrsas_instance *instance)
3097 {
3098 int i;
3099 uint32_t max_cmd;
3100 size_t sz;
3101
3102 /* already freed */
3103 if (instance->cmd_list == NULL) {
3104 return;
3105 }
3106
3107 max_cmd = instance->max_fw_cmds;
3108
3109 /* size of cmd_list array */
3110 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3111
3112 /* First free each cmd */
3113 for (i = 0; i < max_cmd; i++) {
3114 if (instance->cmd_list[i] != NULL) {
3115 kmem_free(instance->cmd_list[i],
3116 sizeof (struct mrsas_cmd));
3117 }
3118
3119 instance->cmd_list[i] = NULL;
3120 }
3121
3122 /* Now, free cmd_list array */
3123 if (instance->cmd_list != NULL)
3124 kmem_free(instance->cmd_list, sz);
3125
3126 instance->cmd_list = NULL;
3127
3128 INIT_LIST_HEAD(&instance->cmd_pool_list);
3129 INIT_LIST_HEAD(&instance->cmd_pend_list);
3130 if (instance->tbolt) {
3131 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
3132 } else {
3133 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3134 }
3135
3136 }
3137
3138
3139 /*
3140 * mrsas_alloc_cmd_pool
3141 */
3142 int
3143 mrsas_alloc_cmd_pool(struct mrsas_instance *instance)
3144 {
3145 int i;
3146 int count;
3147 uint32_t max_cmd;
3148 uint32_t reserve_cmd;
3149 size_t sz;
3150
3151 struct mrsas_cmd *cmd;
3152
3153 max_cmd = instance->max_fw_cmds;
3154 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
3155 "max_cmd %x", max_cmd));
3156
3157
3158 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3159
3160 /*
3161 * instance->cmd_list is an array of struct mrsas_cmd pointers.
3162 * Allocate the dynamic array first and then allocate individual
3163 * commands.
3164 */
3165 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
3166 ASSERT(instance->cmd_list);
3167
3168 /* create a frame pool and assign one frame to each cmd */
3169 for (count = 0; count < max_cmd; count++) {
3170 instance->cmd_list[count] =
3171 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
3172 ASSERT(instance->cmd_list[count]);
3173 }
3174
3175 /* add all the commands to command pool */
3176
3177 INIT_LIST_HEAD(&instance->cmd_pool_list);
3178 INIT_LIST_HEAD(&instance->cmd_pend_list);
3179 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3180
3181 /*
3182 * When max_cmd is lower than MRSAS_APP_RESERVED_CMDS, how do I split
3183 * into app_cmd and regular cmd? For now, just take
3184 * max(1/8th of max, 4);
3185 */
3186 reserve_cmd = min(MRSAS_APP_RESERVED_CMDS,
3187 max(max_cmd >> 3, MRSAS_APP_MIN_RESERVED_CMDS));
3188
3189 for (i = 0; i < reserve_cmd; i++) {
3190 cmd = instance->cmd_list[i];
3191 cmd->index = i;
3192 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list);
3193 }
3194
3195
3196 for (i = reserve_cmd; i < max_cmd; i++) {
3197 cmd = instance->cmd_list[i];
3198 cmd->index = i;
3199 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
3200 }
3201
3202 return (DDI_SUCCESS);
3203
3204 mrsas_undo_cmds:
3205 if (count > 0) {
3206 /* free each cmd */
3207 for (i = 0; i < count; i++) {
3208 if (instance->cmd_list[i] != NULL) {
3209 kmem_free(instance->cmd_list[i],
3210 sizeof (struct mrsas_cmd));
3211 }
3212 instance->cmd_list[i] = NULL;
3213 }
3214 }
3215
3216 mrsas_undo_cmd_list:
3217 if (instance->cmd_list != NULL)
3218 kmem_free(instance->cmd_list, sz);
3219 instance->cmd_list = NULL;
3220
3221 return (DDI_FAILURE);
3222 }
3223
3224
3225 /*
3226 * free_space_for_mfi
3227 */
3228 static void
3229 free_space_for_mfi(struct mrsas_instance *instance)
3230 {
3231
3232 /* already freed */
3233 if (instance->cmd_list == NULL) {
3234 return;
3235 }
3236
3237 /* Free additional dma buffer */
3238 free_additional_dma_buffer(instance);
3239
3240 /* Free the MFI frame pool */
3241 destroy_mfi_frame_pool(instance);
3242
3243 /* Free all the commands in the cmd_list */
3244 /* Free the cmd_list buffer itself */
3245 mrsas_free_cmd_pool(instance);
3246 }
3247
3248 /*
3249 * alloc_space_for_mfi
3250 */
3251 static int
3252 alloc_space_for_mfi(struct mrsas_instance *instance)
3253 {
3254 /* Allocate command pool (memory for cmd_list & individual commands) */
3255 if (mrsas_alloc_cmd_pool(instance)) {
3256 cmn_err(CE_WARN, "error creating cmd pool");
3257 return (DDI_FAILURE);
3258 }
3259
3260 /* Allocate MFI Frame pool */
3261 if (create_mfi_frame_pool(instance)) {
3262 cmn_err(CE_WARN, "error creating frame DMA pool");
3263 goto mfi_undo_cmd_pool;
3264 }
3265
3266 /* Allocate additional DMA buffer */
3267 if (alloc_additional_dma_buffer(instance)) {
3268 cmn_err(CE_WARN, "error creating frame DMA pool");
3269 goto mfi_undo_frame_pool;
3270 }
3271
3272 return (DDI_SUCCESS);
3273
3274 mfi_undo_frame_pool:
3275 destroy_mfi_frame_pool(instance);
3276
3277 mfi_undo_cmd_pool:
3278 mrsas_free_cmd_pool(instance);
3279
3280 return (DDI_FAILURE);
3281 }
3282
3283
3284
3285 /*
3286 * get_ctrl_info
3287 */
3288 static int
3289 get_ctrl_info(struct mrsas_instance *instance,
3290 struct mrsas_ctrl_info *ctrl_info)
3291 {
3292 int ret = 0;
3293
3294 struct mrsas_cmd *cmd;
3295 struct mrsas_dcmd_frame *dcmd;
3296 struct mrsas_ctrl_info *ci;
3297
3298 if (instance->tbolt) {
3299 cmd = get_raid_msg_mfi_pkt(instance);
3300 } else {
3301 cmd = mrsas_get_mfi_pkt(instance);
3302 }
3303
3304 if (!cmd) {
3305 con_log(CL_ANN, (CE_WARN,
3306 "Failed to get a cmd for ctrl info"));
3307 DTRACE_PROBE2(info_mfi_err, uint16_t, instance->fw_outstanding,
3308 uint16_t, instance->max_fw_cmds);
3309 return (DDI_FAILURE);
3310 }
3311
3312 /* Clear the frame buffer and assign back the context id */
3313 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3314 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3315 cmd->index);
3316
3317 dcmd = &cmd->frame->dcmd;
3318
3319 ci = (struct mrsas_ctrl_info *)instance->internal_buf;
3320
3321 if (!ci) {
3322 cmn_err(CE_WARN,
3323 "Failed to alloc mem for ctrl info");
3324 mrsas_return_mfi_pkt(instance, cmd);
3325 return (DDI_FAILURE);
3326 }
3327
3328 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info));
3329
3330 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
3331 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3332
3333 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3334 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status,
3335 MFI_CMD_STATUS_POLL_MODE);
3336 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3337 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3338 MFI_FRAME_DIR_READ);
3339 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3340 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3341 sizeof (struct mrsas_ctrl_info));
3342 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3343 MR_DCMD_CTRL_GET_INFO);
3344 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3345 instance->internal_buf_dmac_add);
3346 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3347 sizeof (struct mrsas_ctrl_info));
3348
3349 cmd->frame_count = 1;
3350
3351 if (instance->tbolt) {
3352 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3353 }
3354
3355 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3356 ret = 0;
3357
3358 ctrl_info->max_request_size = ddi_get32(
3359 cmd->frame_dma_obj.acc_handle, &ci->max_request_size);
3360
3361 ctrl_info->ld_present_count = ddi_get16(
3362 cmd->frame_dma_obj.acc_handle, &ci->ld_present_count);
3363
3364 ctrl_info->properties.on_off_properties = ddi_get32(
3365 cmd->frame_dma_obj.acc_handle,
3366 &ci->properties.on_off_properties);
3367 ddi_rep_get8(cmd->frame_dma_obj.acc_handle,
3368 (uint8_t *)(ctrl_info->product_name),
3369 (uint8_t *)(ci->product_name), 80 * sizeof (char),
3370 DDI_DEV_AUTOINCR);
3371 /* should get more members of ci with ddi_get when needed */
3372 } else {
3373 cmn_err(CE_WARN, "get_ctrl_info: Ctrl info failed");
3374 ret = -1;
3375 }
3376
3377 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3378 ret = -1;
3379 }
3380 if (instance->tbolt) {
3381 return_raid_msg_mfi_pkt(instance, cmd);
3382 } else {
3383 mrsas_return_mfi_pkt(instance, cmd);
3384 }
3385
3386 return (ret);
3387 }
3388
3389 /*
3390 * abort_aen_cmd
3391 */
3392 static int
3393 abort_aen_cmd(struct mrsas_instance *instance,
3394 struct mrsas_cmd *cmd_to_abort)
3395 {
3396 int ret = 0;
3397
3398 struct mrsas_cmd *cmd;
3399 struct mrsas_abort_frame *abort_fr;
3400
3401 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_aen:%d", __LINE__));
3402
3403 if (instance->tbolt) {
3404 cmd = get_raid_msg_mfi_pkt(instance);
3405 } else {
3406 cmd = mrsas_get_mfi_pkt(instance);
3407 }
3408
3409 if (!cmd) {
3410 con_log(CL_ANN1, (CE_WARN,
3411 "abort_aen_cmd():Failed to get a cmd for abort_aen_cmd"));
3412 DTRACE_PROBE2(abort_mfi_err, uint16_t, instance->fw_outstanding,
3413 uint16_t, instance->max_fw_cmds);
3414 return (DDI_FAILURE);
3415 }
3416
3417 /* Clear the frame buffer and assign back the context id */
3418 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3419 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3420 cmd->index);
3421
3422 abort_fr = &cmd->frame->abort;
3423
3424 /* prepare and issue the abort frame */
3425 ddi_put8(cmd->frame_dma_obj.acc_handle,
3426 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3427 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3428 MFI_CMD_STATUS_SYNC_MODE);
3429 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3430 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3431 cmd_to_abort->index);
3432 ddi_put32(cmd->frame_dma_obj.acc_handle,
3433 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3434 ddi_put32(cmd->frame_dma_obj.acc_handle,
3435 &abort_fr->abort_mfi_phys_addr_hi, 0);
3436
3437 instance->aen_cmd->abort_aen = 1;
3438
3439 cmd->frame_count = 1;
3440
3441 if (instance->tbolt) {
3442 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3443 }
3444
3445 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3446 con_log(CL_ANN1, (CE_WARN,
3447 "abort_aen_cmd: issue_cmd_in_poll_mode failed"));
3448 ret = -1;
3449 } else {
3450 ret = 0;
3451 }
3452
3453 instance->aen_cmd->abort_aen = 1;
3454 instance->aen_cmd = 0;
3455
3456 if (instance->tbolt) {
3457 return_raid_msg_mfi_pkt(instance, cmd);
3458 } else {
3459 mrsas_return_mfi_pkt(instance, cmd);
3460 }
3461
3462 atomic_add_16(&instance->fw_outstanding, (-1));
3463
3464 return (ret);
3465 }
3466
3467
3468 static int
3469 mrsas_build_init_cmd(struct mrsas_instance *instance,
3470 struct mrsas_cmd **cmd_ptr)
3471 {
3472 struct mrsas_cmd *cmd;
3473 struct mrsas_init_frame *init_frame;
3474 struct mrsas_init_queue_info *initq_info;
3475 struct mrsas_drv_ver drv_ver_info;
3476
3477
3478 /*
3479 * Prepare a init frame. Note the init frame points to queue info
3480 * structure. Each frame has SGL allocated after first 64 bytes. For
3481 * this frame - since we don't need any SGL - we use SGL's space as
3482 * queue info structure
3483 */
3484 cmd = *cmd_ptr;
3485
3486
3487 /* Clear the frame buffer and assign back the context id */
3488 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3489 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3490 cmd->index);
3491
3492 init_frame = (struct mrsas_init_frame *)cmd->frame;
3493 initq_info = (struct mrsas_init_queue_info *)
3494 ((unsigned long)init_frame + 64);
3495
3496 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3497 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3498
3499 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3500
3501 ddi_put32(cmd->frame_dma_obj.acc_handle,
3502 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3503
3504 ddi_put32(cmd->frame_dma_obj.acc_handle,
3505 &initq_info->producer_index_phys_addr_hi, 0);
3506 ddi_put32(cmd->frame_dma_obj.acc_handle,
3507 &initq_info->producer_index_phys_addr_lo,
3508 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3509
3510 ddi_put32(cmd->frame_dma_obj.acc_handle,
3511 &initq_info->consumer_index_phys_addr_hi, 0);
3512 ddi_put32(cmd->frame_dma_obj.acc_handle,
3513 &initq_info->consumer_index_phys_addr_lo,
3514 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3515
3516 ddi_put32(cmd->frame_dma_obj.acc_handle,
3517 &initq_info->reply_queue_start_phys_addr_hi, 0);
3518 ddi_put32(cmd->frame_dma_obj.acc_handle,
3519 &initq_info->reply_queue_start_phys_addr_lo,
3520 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3521
3522 ddi_put8(cmd->frame_dma_obj.acc_handle,
3523 &init_frame->cmd, MFI_CMD_OP_INIT);
3524 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3525 MFI_CMD_STATUS_POLL_MODE);
3526 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3527 ddi_put32(cmd->frame_dma_obj.acc_handle,
3528 &init_frame->queue_info_new_phys_addr_lo,
3529 cmd->frame_phys_addr + 64);
3530 ddi_put32(cmd->frame_dma_obj.acc_handle,
3531 &init_frame->queue_info_new_phys_addr_hi, 0);
3532
3533
3534 /* fill driver version information */
3535 fill_up_drv_ver(&drv_ver_info);
3536
3537 /* allocate the driver version data transfer buffer */
3538 instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
3539 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
3540 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3541 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3542 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
3543 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
3544
3545 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
3546 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3547 con_log(CL_ANN, (CE_WARN,
3548 "init_mfi : Could not allocate driver version buffer."));
3549 return (DDI_FAILURE);
3550 }
3551 /* copy driver version to dma buffer */
3552 (void) memset(instance->drv_ver_dma_obj.buffer, 0,
3553 sizeof (drv_ver_info.drv_ver));
3554 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
3555 (uint8_t *)drv_ver_info.drv_ver,
3556 (uint8_t *)instance->drv_ver_dma_obj.buffer,
3557 sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
3558
3559
3560 /* copy driver version physical address to init frame */
3561 ddi_put64(cmd->frame_dma_obj.acc_handle, &init_frame->driverversion,
3562 instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
3563
3564 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3565 sizeof (struct mrsas_init_queue_info));
3566
3567 cmd->frame_count = 1;
3568
3569 *cmd_ptr = cmd;
3570
3571 return (DDI_SUCCESS);
3572 }
3573
3574
3575 /*
3576 * mrsas_init_adapter_ppc - Initialize MFI interface adapter.
3577 */
3578 int
3579 mrsas_init_adapter_ppc(struct mrsas_instance *instance)
3580 {
3581 struct mrsas_cmd *cmd;
3582
3583 /*
3584 * allocate memory for mfi adapter(cmd pool, individual commands, mfi
3585 * frames etc
3586 */
3587 if (alloc_space_for_mfi(instance) != DDI_SUCCESS) {
3588 con_log(CL_ANN, (CE_NOTE,
3589 "Error, failed to allocate memory for MFI adapter"));
3590 return (DDI_FAILURE);
3591 }
3592
3593 /* Build INIT command */
3594 cmd = mrsas_get_mfi_pkt(instance);
3595 if (cmd == NULL) {
3596 DTRACE_PROBE2(init_adapter_mfi_err, uint16_t,
3597 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
3598 return (DDI_FAILURE);
3599 }
3600
3601 if (mrsas_build_init_cmd(instance, &cmd) != DDI_SUCCESS) {
3602 con_log(CL_ANN,
3603 (CE_NOTE, "Error, failed to build INIT command"));
3604
3605 goto fail_undo_alloc_mfi_space;
3606 }
3607
3608 /*
3609 * Disable interrupt before sending init frame ( see linux driver code)
3610 * send INIT MFI frame in polled mode
3611 */
3612 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3613 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
3614 goto fail_fw_init;
3615 }
3616
3617 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
3618 goto fail_fw_init;
3619 mrsas_return_mfi_pkt(instance, cmd);
3620
3621 if (ctio_enable &&
3622 (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000)) {
3623 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported"));
3624 instance->flag_ieee = 1;
3625 } else {
3626 instance->flag_ieee = 0;
3627 }
3628
3629 ASSERT(!instance->skinny || instance->flag_ieee);
3630
3631 instance->unroll.alloc_space_mfi = 1;
3632 instance->unroll.verBuff = 1;
3633
3634 return (DDI_SUCCESS);
3635
3636
3637 fail_fw_init:
3638 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
3639
3640 fail_undo_alloc_mfi_space:
3641 mrsas_return_mfi_pkt(instance, cmd);
3642 free_space_for_mfi(instance);
3643
3644 return (DDI_FAILURE);
3645
3646 }
3647
3648 /*
3649 * mrsas_init_adapter - Initialize adapter.
3650 */
3651 int
3652 mrsas_init_adapter(struct mrsas_instance *instance)
3653 {
3654 struct mrsas_ctrl_info ctrl_info;
3655
3656
3657 /* we expect the FW state to be READY */
3658 if (mfi_state_transition_to_ready(instance)) {
3659 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready"));
3660 return (DDI_FAILURE);
3661 }
3662
3663 /* get various operational parameters from status register */
3664 instance->max_num_sge =
3665 (instance->func_ptr->read_fw_status_reg(instance) &
3666 0xFF0000) >> 0x10;
3667 instance->max_num_sge =
3668 (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ?
3669 MRSAS_MAX_SGE_CNT : instance->max_num_sge;
3670
3671 /*
3672 * Reduce the max supported cmds by 1. This is to ensure that the
3673 * reply_q_sz (1 more than the max cmd that driver may send)
3674 * does not exceed max cmds that the FW can support
3675 */
3676 instance->max_fw_cmds =
3677 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
3678 instance->max_fw_cmds = instance->max_fw_cmds - 1;
3679
3680
3681
3682 /* Initialize adapter */
3683 if (instance->func_ptr->init_adapter(instance) != DDI_SUCCESS) {
3684 con_log(CL_ANN,
3685 (CE_WARN, "mr_sas: could not initialize adapter"));
3686 return (DDI_FAILURE);
3687 }
3688
3689 /* gather misc FW related information */
3690 instance->disable_online_ctrl_reset = 0;
3691
3692 if (!get_ctrl_info(instance, &ctrl_info)) {
3693 instance->max_sectors_per_req = ctrl_info.max_request_size;
3694 con_log(CL_ANN1, (CE_NOTE,
3695 "product name %s ld present %d",
3696 ctrl_info.product_name, ctrl_info.ld_present_count));
3697 } else {
3698 instance->max_sectors_per_req = instance->max_num_sge *
3699 PAGESIZE / 512;
3700 }
3701
3702 if (ctrl_info.properties.on_off_properties & DISABLE_OCR_PROP_FLAG)
3703 instance->disable_online_ctrl_reset = 1;
3704
3705 return (DDI_SUCCESS);
3706
3707 }
3708
3709
3710
3711 static int
3712 mrsas_issue_init_mfi(struct mrsas_instance *instance)
3713 {
3714 struct mrsas_cmd *cmd;
3715 struct mrsas_init_frame *init_frame;
3716 struct mrsas_init_queue_info *initq_info;
3717
3718 /*
3719 * Prepare a init frame. Note the init frame points to queue info
3720 * structure. Each frame has SGL allocated after first 64 bytes. For
3721 * this frame - since we don't need any SGL - we use SGL's space as
3722 * queue info structure
3723 */
3724 con_log(CL_ANN1, (CE_NOTE,
3725 "mrsas_issue_init_mfi: entry\n"));
3726 cmd = get_mfi_app_pkt(instance);
3727
3728 if (!cmd) {
3729 con_log(CL_ANN1, (CE_WARN,
3730 "mrsas_issue_init_mfi: get_pkt failed\n"));
3731 return (DDI_FAILURE);
3732 }
3733
3734 /* Clear the frame buffer and assign back the context id */
3735 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3736 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3737 cmd->index);
3738
3739 init_frame = (struct mrsas_init_frame *)cmd->frame;
3740 initq_info = (struct mrsas_init_queue_info *)
3741 ((unsigned long)init_frame + 64);
3742
3743 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3744 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3745
3746 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3747
3748 ddi_put32(cmd->frame_dma_obj.acc_handle,
3749 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3750 ddi_put32(cmd->frame_dma_obj.acc_handle,
3751 &initq_info->producer_index_phys_addr_hi, 0);
3752 ddi_put32(cmd->frame_dma_obj.acc_handle,
3753 &initq_info->producer_index_phys_addr_lo,
3754 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3755 ddi_put32(cmd->frame_dma_obj.acc_handle,
3756 &initq_info->consumer_index_phys_addr_hi, 0);
3757 ddi_put32(cmd->frame_dma_obj.acc_handle,
3758 &initq_info->consumer_index_phys_addr_lo,
3759 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3760
3761 ddi_put32(cmd->frame_dma_obj.acc_handle,
3762 &initq_info->reply_queue_start_phys_addr_hi, 0);
3763 ddi_put32(cmd->frame_dma_obj.acc_handle,
3764 &initq_info->reply_queue_start_phys_addr_lo,
3765 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3766
3767 ddi_put8(cmd->frame_dma_obj.acc_handle,
3768 &init_frame->cmd, MFI_CMD_OP_INIT);
3769 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3770 MFI_CMD_STATUS_POLL_MODE);
3771 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3772 ddi_put32(cmd->frame_dma_obj.acc_handle,
3773 &init_frame->queue_info_new_phys_addr_lo,
3774 cmd->frame_phys_addr + 64);
3775 ddi_put32(cmd->frame_dma_obj.acc_handle,
3776 &init_frame->queue_info_new_phys_addr_hi, 0);
3777
3778 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3779 sizeof (struct mrsas_init_queue_info));
3780
3781 cmd->frame_count = 1;
3782
3783 /* issue the init frame in polled mode */
3784 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3785 con_log(CL_ANN1, (CE_WARN,
3786 "mrsas_issue_init_mfi():failed to "
3787 "init firmware"));
3788 return_mfi_app_pkt(instance, cmd);
3789 return (DDI_FAILURE);
3790 }
3791
3792 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3793 return_mfi_app_pkt(instance, cmd);
3794 return (DDI_FAILURE);
3795 }
3796
3797 return_mfi_app_pkt(instance, cmd);
3798 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_init_mfi: Done"));
3799
3800 return (DDI_SUCCESS);
3801 }
3802 /*
3803 * mfi_state_transition_to_ready : Move the FW to READY state
3804 *
3805 * @reg_set : MFI register set
3806 */
3807 int
3808 mfi_state_transition_to_ready(struct mrsas_instance *instance)
3809 {
3810 int i;
3811 uint8_t max_wait;
3812 uint32_t fw_ctrl = 0;
3813 uint32_t fw_state;
3814 uint32_t cur_state;
3815 uint32_t cur_abs_reg_val;
3816 uint32_t prev_abs_reg_val;
3817 uint32_t status;
3818
3819 cur_abs_reg_val =
3820 instance->func_ptr->read_fw_status_reg(instance);
3821 fw_state =
3822 cur_abs_reg_val & MFI_STATE_MASK;
3823 con_log(CL_ANN1, (CE_CONT,
3824 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
3825
3826 while (fw_state != MFI_STATE_READY) {
3827 con_log(CL_ANN, (CE_CONT,
3828 "mfi_state_transition_to_ready:FW state%x", fw_state));
3829
3830 switch (fw_state) {
3831 case MFI_STATE_FAULT:
3832 con_log(CL_ANN, (CE_NOTE,
3833 "mr_sas: FW in FAULT state!!"));
3834
3835 return (ENODEV);
3836 case MFI_STATE_WAIT_HANDSHAKE:
3837 /* set the CLR bit in IMR0 */
3838 con_log(CL_ANN1, (CE_NOTE,
3839 "mr_sas: FW waiting for HANDSHAKE"));
3840 /*
3841 * PCI_Hot Plug: MFI F/W requires
3842 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3843 * to be set
3844 */
3845 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
3846 if (!instance->tbolt && !instance->skinny) {
3847 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
3848 MFI_INIT_HOTPLUG, instance);
3849 } else {
3850 WR_RESERVED0_REGISTER(MFI_INIT_CLEAR_HANDSHAKE |
3851 MFI_INIT_HOTPLUG, instance);
3852 }
3853 max_wait = (instance->tbolt == 1) ? 180 : 2;
3854 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3855 break;
3856 case MFI_STATE_BOOT_MESSAGE_PENDING:
3857 /* set the CLR bit in IMR0 */
3858 con_log(CL_ANN1, (CE_NOTE,
3859 "mr_sas: FW state boot message pending"));
3860 /*
3861 * PCI_Hot Plug: MFI F/W requires
3862 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3863 * to be set
3864 */
3865 if (!instance->tbolt && !instance->skinny) {
3866 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
3867 } else {
3868 WR_RESERVED0_REGISTER(MFI_INIT_HOTPLUG,
3869 instance);
3870 }
3871 max_wait = (instance->tbolt == 1) ? 180 : 10;
3872 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3873 break;
3874 case MFI_STATE_OPERATIONAL:
3875 /* bring it to READY state; assuming max wait 2 secs */
3876 instance->func_ptr->disable_intr(instance);
3877 con_log(CL_ANN1, (CE_NOTE,
3878 "mr_sas: FW in OPERATIONAL state"));
3879 /*
3880 * PCI_Hot Plug: MFI F/W requires
3881 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
3882 * to be set
3883 */
3884 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
3885 if (!instance->tbolt && !instance->skinny) {
3886 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
3887 } else {
3888 WR_RESERVED0_REGISTER(MFI_RESET_FLAGS,
3889 instance);
3890
3891 for (i = 0; i < (10 * 1000); i++) {
3892 status =
3893 RD_RESERVED0_REGISTER(instance);
3894 if (status & 1) {
3895 delay(1 *
3896 drv_usectohz(MILLISEC));
3897 } else {
3898 break;
3899 }
3900 }
3901
3902 }
3903 max_wait = (instance->tbolt == 1) ? 180 : 10;
3904 cur_state = MFI_STATE_OPERATIONAL;
3905 break;
3906 case MFI_STATE_UNDEFINED:
3907 /* this state should not last for more than 2 seconds */
3908 con_log(CL_ANN1, (CE_NOTE, "FW state undefined"));
3909
3910 max_wait = (instance->tbolt == 1) ? 180 : 2;
3911 cur_state = MFI_STATE_UNDEFINED;
3912 break;
3913 case MFI_STATE_BB_INIT:
3914 max_wait = (instance->tbolt == 1) ? 180 : 2;
3915 cur_state = MFI_STATE_BB_INIT;
3916 break;
3917 case MFI_STATE_FW_INIT:
3918 max_wait = (instance->tbolt == 1) ? 180 : 2;
3919 cur_state = MFI_STATE_FW_INIT;
3920 break;
3921 case MFI_STATE_FW_INIT_2:
3922 max_wait = 180;
3923 cur_state = MFI_STATE_FW_INIT_2;
3924 break;
3925 case MFI_STATE_DEVICE_SCAN:
3926 max_wait = 180;
3927 cur_state = MFI_STATE_DEVICE_SCAN;
3928 prev_abs_reg_val = cur_abs_reg_val;
3929 con_log(CL_NONE, (CE_NOTE,
3930 "Device scan in progress ...\n"));
3931 break;
3932 case MFI_STATE_FLUSH_CACHE:
3933 max_wait = 180;
3934 cur_state = MFI_STATE_FLUSH_CACHE;
3935 break;
3936 default:
3937 con_log(CL_ANN1, (CE_NOTE,
3938 "mr_sas: Unknown state 0x%x", fw_state));
3939 return (ENODEV);
3940 }
3941
3942 /* the cur_state should not last for more than max_wait secs */
3943 for (i = 0; i < (max_wait * MILLISEC); i++) {
3944 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
3945 cur_abs_reg_val =
3946 instance->func_ptr->read_fw_status_reg(instance);
3947 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3948
3949 if (fw_state == cur_state) {
3950 delay(1 * drv_usectohz(MILLISEC));
3951 } else {
3952 break;
3953 }
3954 }
3955 if (fw_state == MFI_STATE_DEVICE_SCAN) {
3956 if (prev_abs_reg_val != cur_abs_reg_val) {
3957 continue;
3958 }
3959 }
3960
3961 /* return error if fw_state hasn't changed after max_wait */
3962 if (fw_state == cur_state) {
3963 con_log(CL_ANN1, (CE_WARN,
3964 "FW state hasn't changed in %d secs", max_wait));
3965 return (ENODEV);
3966 }
3967 };
3968
3969 /* This may also need to apply to Skinny, but for now, don't worry. */
3970 if (!instance->tbolt && !instance->skinny) {
3971 fw_ctrl = RD_IB_DOORBELL(instance);
3972 con_log(CL_ANN1, (CE_CONT,
3973 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
3974
3975 /*
3976 * Write 0xF to the doorbell register to do the following.
3977 * - Abort all outstanding commands (bit 0).
3978 * - Transition from OPERATIONAL to READY state (bit 1).
3979 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
3980 * - Set to release FW to continue running (i.e. BIOS handshake
3981 * (bit 3).
3982 */
3983 WR_IB_DOORBELL(0xF, instance);
3984 }
3985
3986 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
3987 return (EIO);
3988 }
3989
3990 return (DDI_SUCCESS);
3991 }
3992
3993 /*
3994 * get_seq_num
3995 */
3996 static int
3997 get_seq_num(struct mrsas_instance *instance,
3998 struct mrsas_evt_log_info *eli)
3999 {
4000 int ret = DDI_SUCCESS;
4001
4002 dma_obj_t dcmd_dma_obj;
4003 struct mrsas_cmd *cmd;
4004 struct mrsas_dcmd_frame *dcmd;
4005 struct mrsas_evt_log_info *eli_tmp;
4006 if (instance->tbolt) {
4007 cmd = get_raid_msg_mfi_pkt(instance);
4008 } else {
4009 cmd = mrsas_get_mfi_pkt(instance);
4010 }
4011
4012 if (!cmd) {
4013 cmn_err(CE_WARN, "mr_sas: failed to get a cmd");
4014 DTRACE_PROBE2(seq_num_mfi_err, uint16_t,
4015 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
4016 return (ENOMEM);
4017 }
4018
4019 /* Clear the frame buffer and assign back the context id */
4020 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4021 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
4022 cmd->index);
4023
4024 dcmd = &cmd->frame->dcmd;
4025
4026 /* allocate the data transfer buffer */
4027 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info);
4028 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
4029 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4030 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4031 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
4032 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
4033
4034 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
4035 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
4036 cmn_err(CE_WARN,
4037 "get_seq_num: could not allocate data transfer buffer.");
4038 return (DDI_FAILURE);
4039 }
4040
4041 (void) memset(dcmd_dma_obj.buffer, 0,
4042 sizeof (struct mrsas_evt_log_info));
4043
4044 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4045
4046 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4047 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
4048 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
4049 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4050 MFI_FRAME_DIR_READ);
4051 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4052 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
4053 sizeof (struct mrsas_evt_log_info));
4054 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4055 MR_DCMD_CTRL_EVENT_GET_INFO);
4056 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
4057 sizeof (struct mrsas_evt_log_info));
4058 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
4059 dcmd_dma_obj.dma_cookie[0].dmac_address);
4060
4061 cmd->sync_cmd = MRSAS_TRUE;
4062 cmd->frame_count = 1;
4063
4064 if (instance->tbolt) {
4065 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4066 }
4067
4068 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
4069 cmn_err(CE_WARN, "get_seq_num: "
4070 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO");
4071 ret = DDI_FAILURE;
4072 } else {
4073 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer;
4074 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle,
4075 &eli_tmp->newest_seq_num);
4076 ret = DDI_SUCCESS;
4077 }
4078
4079 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
4080 ret = DDI_FAILURE;
4081
4082 if (instance->tbolt) {
4083 return_raid_msg_mfi_pkt(instance, cmd);
4084 } else {
4085 mrsas_return_mfi_pkt(instance, cmd);
4086 }
4087
4088 return (ret);
4089 }
4090
4091 /*
4092 * start_mfi_aen
4093 */
4094 static int
4095 start_mfi_aen(struct mrsas_instance *instance)
4096 {
4097 int ret = 0;
4098
4099 struct mrsas_evt_log_info eli;
4100 union mrsas_evt_class_locale class_locale;
4101
4102 /* get the latest sequence number from FW */
4103 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info));
4104
4105 if (get_seq_num(instance, &eli)) {
4106 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num");
4107 return (-1);
4108 }
4109
4110 /* register AEN with FW for latest sequence number plus 1 */
4111 class_locale.members.reserved = 0;
4112 class_locale.members.locale = LE_16(MR_EVT_LOCALE_ALL);
4113 class_locale.members.class = MR_EVT_CLASS_INFO;
4114 class_locale.word = LE_32(class_locale.word);
4115 ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
4116 class_locale.word);
4117
4118 if (ret) {
4119 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed");
4120 return (-1);
4121 }
4122
4123
4124 return (ret);
4125 }
4126
4127 /*
4128 * flush_cache
4129 */
4130 static void
4131 flush_cache(struct mrsas_instance *instance)
4132 {
4133 struct mrsas_cmd *cmd = NULL;
4134 struct mrsas_dcmd_frame *dcmd;
4135 if (instance->tbolt) {
4136 cmd = get_raid_msg_mfi_pkt(instance);
4137 } else {
4138 cmd = mrsas_get_mfi_pkt(instance);
4139 }
4140
4141 if (!cmd) {
4142 con_log(CL_ANN1, (CE_WARN,
4143 "flush_cache():Failed to get a cmd for flush_cache"));
4144 DTRACE_PROBE2(flush_cache_err, uint16_t,
4145 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
4146 return;
4147 }
4148
4149 /* Clear the frame buffer and assign back the context id */
4150 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4151 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
4152 cmd->index);
4153
4154 dcmd = &cmd->frame->dcmd;
4155
4156 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4157
4158 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4159 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
4160 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0);
4161 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4162 MFI_FRAME_DIR_NONE);
4163 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4164 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0);
4165 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4166 MR_DCMD_CTRL_CACHE_FLUSH);
4167 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0],
4168 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE);
4169
4170 cmd->frame_count = 1;
4171
4172 if (instance->tbolt) {
4173 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4174 }
4175
4176 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
4177 con_log(CL_ANN1, (CE_WARN,
4178 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH"));
4179 }
4180 con_log(CL_ANN1, (CE_CONT, "flush_cache done"));
4181 if (instance->tbolt) {
4182 return_raid_msg_mfi_pkt(instance, cmd);
4183 } else {
4184 mrsas_return_mfi_pkt(instance, cmd);
4185 }
4186
4187 }
4188
4189 /*
4190 * service_mfi_aen- Completes an AEN command
4191 * @instance: Adapter soft state
4192 * @cmd: Command to be completed
4193 *
4194 */
4195 void
4196 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
4197 {
4198 uint32_t seq_num;
4199 struct mrsas_evt_detail *evt_detail =
4200 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
4201 int rval = 0;
4202 int tgt = 0;
4203 uint8_t dtype;
4204 #ifdef PDSUPPORT
4205 mrsas_pd_address_t *pd_addr;
4206 #endif
4207 ddi_acc_handle_t acc_handle;
4208
4209 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
4210
4211 acc_handle = cmd->frame_dma_obj.acc_handle;
4212 cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status);
4213 if (cmd->cmd_status == ENODATA) {
4214 cmd->cmd_status = 0;
4215 }
4216
4217 /*
4218 * log the MFI AEN event to the sysevent queue so that
4219 * application will get noticed
4220 */
4221 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
4222 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
4223 int instance_no = ddi_get_instance(instance->dip);
4224 con_log(CL_ANN, (CE_WARN,
4225 "mr_sas%d: Failed to log AEN event", instance_no));
4226 }
4227 /*
4228 * Check for any ld devices that has changed state. i.e. online
4229 * or offline.
4230 */
4231 con_log(CL_ANN1, (CE_CONT,
4232 "AEN: code = %x class = %x locale = %x args = %x",
4233 ddi_get32(acc_handle, &evt_detail->code),
4234 evt_detail->cl.members.class,
4235 ddi_get16(acc_handle, &evt_detail->cl.members.locale),
4236 ddi_get8(acc_handle, &evt_detail->arg_type)));
4237
4238 switch (ddi_get32(acc_handle, &evt_detail->code)) {
4239 case MR_EVT_CFG_CLEARED: {
4240 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
4241 if (instance->mr_ld_list[tgt].dip != NULL) {
4242 mutex_enter(&instance->config_dev_mtx);
4243 instance->mr_ld_list[tgt].flag =
4244 (uint8_t)~MRDRV_TGT_VALID;
4245 mutex_exit(&instance->config_dev_mtx);
4246 rval = mrsas_service_evt(instance, tgt, 0,
4247 MRSAS_EVT_UNCONFIG_TGT, NULL);
4248 con_log(CL_ANN1, (CE_WARN,
4249 "mr_sas: CFG CLEARED AEN rval = %d "
4250 "tgt id = %d", rval, tgt));
4251 }
4252 }
4253 break;
4254 }
4255
4256 case MR_EVT_LD_DELETED: {
4257 tgt = ddi_get16(acc_handle, &evt_detail->args.ld.target_id);
4258 mutex_enter(&instance->config_dev_mtx);
4259 instance->mr_ld_list[tgt].flag = (uint8_t)~MRDRV_TGT_VALID;
4260 mutex_exit(&instance->config_dev_mtx);
4261 rval = mrsas_service_evt(instance,
4262 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4263 MRSAS_EVT_UNCONFIG_TGT, NULL);
4264 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d "
4265 "tgt id = %d index = %d", rval,
4266 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4267 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4268 break;
4269 } /* End of MR_EVT_LD_DELETED */
4270
4271 case MR_EVT_LD_CREATED: {
4272 rval = mrsas_service_evt(instance,
4273 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4274 MRSAS_EVT_CONFIG_TGT, NULL);
4275 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d "
4276 "tgt id = %d index = %d", rval,
4277 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4278 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4279 break;
4280 } /* End of MR_EVT_LD_CREATED */
4281
4282 #ifdef PDSUPPORT
4283 case MR_EVT_PD_REMOVED_EXT: {
4284 if (instance->tbolt || instance->skinny) {
4285 pd_addr = &evt_detail->args.pd_addr;
4286 dtype = pd_addr->scsi_dev_type;
4287 con_log(CL_DLEVEL1, (CE_NOTE,
4288 " MR_EVT_PD_REMOVED_EXT: dtype = %x,"
4289 " arg_type = %d ", dtype, evt_detail->arg_type));
4290 tgt = ddi_get16(acc_handle,
4291 &evt_detail->args.pd.device_id);
4292 mutex_enter(&instance->config_dev_mtx);
4293 instance->mr_tbolt_pd_list[tgt].flag =
4294 (uint8_t)~MRDRV_TGT_VALID;
4295 mutex_exit(&instance->config_dev_mtx);
4296 rval = mrsas_service_evt(instance, ddi_get16(
4297 acc_handle, &evt_detail->args.pd.device_id),
4298 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4299 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4300 "rval = %d tgt id = %d ", rval,
4301 ddi_get16(acc_handle,
4302 &evt_detail->args.pd.device_id)));
4303 }
4304 break;
4305 } /* End of MR_EVT_PD_REMOVED_EXT */
4306
4307 case MR_EVT_PD_INSERTED_EXT: {
4308 if (instance->tbolt || instance->skinny) {
4309 rval = mrsas_service_evt(instance,
4310 ddi_get16(acc_handle,
4311 &evt_detail->args.pd.device_id),
4312 1, MRSAS_EVT_CONFIG_TGT, NULL);
4313 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_INSERTEDi_EXT:"
4314 "rval = %d tgt id = %d ", rval,
4315 ddi_get16(acc_handle,
4316 &evt_detail->args.pd.device_id)));
4317 }
4318 break;
4319 } /* End of MR_EVT_PD_INSERTED_EXT */
4320
4321 case MR_EVT_PD_STATE_CHANGE: {
4322 if (instance->tbolt || instance->skinny) {
4323 tgt = ddi_get16(acc_handle,
4324 &evt_detail->args.pd.device_id);
4325 if ((evt_detail->args.pd_state.prevState ==
4326 PD_SYSTEM) &&
4327 (evt_detail->args.pd_state.newState != PD_SYSTEM)) {
4328 mutex_enter(&instance->config_dev_mtx);
4329 instance->mr_tbolt_pd_list[tgt].flag =
4330 (uint8_t)~MRDRV_TGT_VALID;
4331 mutex_exit(&instance->config_dev_mtx);
4332 rval = mrsas_service_evt(instance,
4333 ddi_get16(acc_handle,
4334 &evt_detail->args.pd.device_id),
4335 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4336 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4337 "rval = %d tgt id = %d ", rval,
4338 ddi_get16(acc_handle,
4339 &evt_detail->args.pd.device_id)));
4340 break;
4341 }
4342 if ((evt_detail->args.pd_state.prevState
4343 == UNCONFIGURED_GOOD) &&
4344 (evt_detail->args.pd_state.newState == PD_SYSTEM)) {
4345 rval = mrsas_service_evt(instance,
4346 ddi_get16(acc_handle,
4347 &evt_detail->args.pd.device_id),
4348 1, MRSAS_EVT_CONFIG_TGT, NULL);
4349 con_log(CL_ANN1, (CE_WARN,
4350 "mr_sas: PD_INSERTED: rval = %d "
4351 " tgt id = %d ", rval,
4352 ddi_get16(acc_handle,
4353 &evt_detail->args.pd.device_id)));
4354 break;
4355 }
4356 }
4357 break;
4358 }
4359 #endif
4360
4361 } /* End of Main Switch */
4362
4363 /* get copy of seq_num and class/locale for re-registration */
4364 seq_num = ddi_get32(acc_handle, &evt_detail->seq_num);
4365 seq_num++;
4366 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
4367 sizeof (struct mrsas_evt_detail));
4368
4369 ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0);
4370 ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num);
4371
4372 instance->aen_seq_num = seq_num;
4373
4374 cmd->frame_count = 1;
4375
4376 cmd->retry_count_for_ocr = 0;
4377 cmd->drv_pkt_time = 0;
4378
4379 /* Issue the aen registration frame */
4380 instance->func_ptr->issue_cmd(cmd, instance);
4381 }
4382
4383 /*
4384 * complete_cmd_in_sync_mode - Completes an internal command
4385 * @instance: Adapter soft state
4386 * @cmd: Command to be completed
4387 *
4388 * The issue_cmd_in_sync_mode() function waits for a command to complete
4389 * after it issues a command. This function wakes up that waiting routine by
4390 * calling wake_up() on the wait queue.
4391 */
4392 static void
4393 complete_cmd_in_sync_mode(struct mrsas_instance *instance,
4394 struct mrsas_cmd *cmd)
4395 {
4396 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
4397 &cmd->frame->io.cmd_status);
4398
4399 cmd->sync_cmd = MRSAS_FALSE;
4400
4401 con_log(CL_ANN1, (CE_NOTE, "complete_cmd_in_sync_mode called %p \n",
4402 (void *)cmd));
4403
4404 mutex_enter(&instance->int_cmd_mtx);
4405 if (cmd->cmd_status == ENODATA) {
4406 cmd->cmd_status = 0;
4407 }
4408 cv_broadcast(&instance->int_cmd_cv);
4409 mutex_exit(&instance->int_cmd_mtx);
4410
4411 }
4412
4413 /*
4414 * Call this function inside mrsas_softintr.
4415 * mrsas_initiate_ocr_if_fw_is_faulty - Initiates OCR if FW status is faulty
4416 * @instance: Adapter soft state
4417 */
4418
4419 static uint32_t
4420 mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *instance)
4421 {
4422 uint32_t cur_abs_reg_val;
4423 uint32_t fw_state;
4424
4425 cur_abs_reg_val = instance->func_ptr->read_fw_status_reg(instance);
4426 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
4427 if (fw_state == MFI_STATE_FAULT) {
4428 if (instance->disable_online_ctrl_reset == 1) {
4429 cmn_err(CE_WARN,
4430 "mrsas_initiate_ocr_if_fw_is_faulty: "
4431 "FW in Fault state, detected in ISR: "
4432 "FW doesn't support ocr ");
4433
4434 return (ADAPTER_RESET_NOT_REQUIRED);
4435 } else {
4436 con_log(CL_ANN, (CE_NOTE,
4437 "mrsas_initiate_ocr_if_fw_is_faulty: FW in Fault "
4438 "state, detected in ISR: FW supports ocr "));
4439
4440 return (ADAPTER_RESET_REQUIRED);
4441 }
4442 }
4443
4444 return (ADAPTER_RESET_NOT_REQUIRED);
4445 }
4446
4447 /*
4448 * mrsas_softintr - The Software ISR
4449 * @param arg : HBA soft state
4450 *
4451 * called from high-level interrupt if hi-level interrupt are not there,
4452 * otherwise triggered as a soft interrupt
4453 */
4454 static uint_t
4455 mrsas_softintr(struct mrsas_instance *instance)
4456 {
4457 struct scsi_pkt *pkt;
4458 struct scsa_cmd *acmd;
4459 struct mrsas_cmd *cmd;
4460 struct mlist_head *pos, *next;
4461 mlist_t process_list;
4462 struct mrsas_header *hdr;
4463 struct scsi_arq_status *arqstat;
4464
4465 con_log(CL_ANN1, (CE_NOTE, "mrsas_softintr() called."));
4466
4467 ASSERT(instance);
4468
4469 mutex_enter(&instance->completed_pool_mtx);
4470
4471 if (mlist_empty(&instance->completed_pool_list)) {
4472 mutex_exit(&instance->completed_pool_mtx);
4473 return (DDI_INTR_CLAIMED);
4474 }
4475
4476 instance->softint_running = 1;
4477
4478 INIT_LIST_HEAD(&process_list);
4479 mlist_splice(&instance->completed_pool_list, &process_list);
4480 INIT_LIST_HEAD(&instance->completed_pool_list);
4481
4482 mutex_exit(&instance->completed_pool_mtx);
4483
4484 /* perform all callbacks first, before releasing the SCBs */
4485 mlist_for_each_safe(pos, next, &process_list) {
4486 cmd = mlist_entry(pos, struct mrsas_cmd, list);
4487
4488 /* syncronize the Cmd frame for the controller */
4489 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
4490 0, 0, DDI_DMA_SYNC_FORCPU);
4491
4492 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
4493 DDI_SUCCESS) {
4494 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4495 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4496 con_log(CL_ANN1, (CE_WARN,
4497 "mrsas_softintr: "
4498 "FMA check reports DMA handle failure"));
4499 return (DDI_INTR_CLAIMED);
4500 }
4501
4502 hdr = &cmd->frame->hdr;
4503
4504 /* remove the internal command from the process list */
4505 mlist_del_init(&cmd->list);
4506
4507 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
4508 case MFI_CMD_OP_PD_SCSI:
4509 case MFI_CMD_OP_LD_SCSI:
4510 case MFI_CMD_OP_LD_READ:
4511 case MFI_CMD_OP_LD_WRITE:
4512 /*
4513 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
4514 * could have been issued either through an
4515 * IO path or an IOCTL path. If it was via IOCTL,
4516 * we will send it to internal completion.
4517 */
4518 if (cmd->sync_cmd == MRSAS_TRUE) {
4519 complete_cmd_in_sync_mode(instance, cmd);
4520 break;
4521 }
4522
4523 /* regular commands */
4524 acmd = cmd->cmd;
4525 pkt = CMD2PKT(acmd);
4526
4527 if (acmd->cmd_flags & CFLAG_DMAVALID) {
4528 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
4529 (void) ddi_dma_sync(acmd->cmd_dmahandle,
4530 acmd->cmd_dma_offset,
4531 acmd->cmd_dma_len,
4532 DDI_DMA_SYNC_FORCPU);
4533 }
4534 }
4535
4536 pkt->pkt_reason = CMD_CMPLT;
4537 pkt->pkt_statistics = 0;
4538 pkt->pkt_state = STATE_GOT_BUS
4539 | STATE_GOT_TARGET | STATE_SENT_CMD
4540 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
4541
4542 con_log(CL_ANN, (CE_CONT,
4543 "CDB[0] = %x completed for %s: size %lx context %x",
4544 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
4545 acmd->cmd_dmacount, hdr->context));
4546 DTRACE_PROBE3(softintr_cdb, uint8_t, pkt->pkt_cdbp[0],
4547 uint_t, acmd->cmd_cdblen, ulong_t,
4548 acmd->cmd_dmacount);
4549
4550 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
4551 struct scsi_inquiry *inq;
4552
4553 if (acmd->cmd_dmacount != 0) {
4554 bp_mapin(acmd->cmd_buf);
4555 inq = (struct scsi_inquiry *)
4556 acmd->cmd_buf->b_un.b_addr;
4557
4558 #ifdef PDSUPPORT
4559 if (hdr->cmd_status == MFI_STAT_OK) {
4560 display_scsi_inquiry(
4561 (caddr_t)inq);
4562 }
4563 #else
4564 /* don't expose physical drives to OS */
4565 if (acmd->islogical &&
4566 (hdr->cmd_status == MFI_STAT_OK)) {
4567 display_scsi_inquiry(
4568 (caddr_t)inq);
4569 } else if ((hdr->cmd_status ==
4570 MFI_STAT_OK) && inq->inq_dtype ==
4571 DTYPE_DIRECT) {
4572
4573 display_scsi_inquiry(
4574 (caddr_t)inq);
4575
4576 /* for physical disk */
4577 hdr->cmd_status =
4578 MFI_STAT_DEVICE_NOT_FOUND;
4579 }
4580 #endif /* PDSUPPORT */
4581 }
4582 }
4583
4584 DTRACE_PROBE2(softintr_done, uint8_t, hdr->cmd,
4585 uint8_t, hdr->cmd_status);
4586
4587 switch (hdr->cmd_status) {
4588 case MFI_STAT_OK:
4589 pkt->pkt_scbp[0] = STATUS_GOOD;
4590 break;
4591 case MFI_STAT_LD_CC_IN_PROGRESS:
4592 case MFI_STAT_LD_RECON_IN_PROGRESS:
4593 pkt->pkt_scbp[0] = STATUS_GOOD;
4594 break;
4595 case MFI_STAT_LD_INIT_IN_PROGRESS:
4596 con_log(CL_ANN,
4597 (CE_WARN, "Initialization in Progress"));
4598 pkt->pkt_reason = CMD_TRAN_ERR;
4599
4600 break;
4601 case MFI_STAT_SCSI_DONE_WITH_ERROR:
4602 con_log(CL_ANN, (CE_CONT, "scsi_done error"));
4603
4604 pkt->pkt_reason = CMD_CMPLT;
4605 ((struct scsi_status *)
4606 pkt->pkt_scbp)->sts_chk = 1;
4607
4608 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
4609 con_log(CL_ANN,
4610 (CE_WARN, "TEST_UNIT_READY fail"));
4611 } else {
4612 pkt->pkt_state |= STATE_ARQ_DONE;
4613 arqstat = (void *)(pkt->pkt_scbp);
4614 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4615 arqstat->sts_rqpkt_resid = 0;
4616 arqstat->sts_rqpkt_state |=
4617 STATE_GOT_BUS | STATE_GOT_TARGET
4618 | STATE_SENT_CMD
4619 | STATE_XFERRED_DATA;
4620 *(uint8_t *)&arqstat->sts_rqpkt_status =
4621 STATUS_GOOD;
4622 ddi_rep_get8(
4623 cmd->frame_dma_obj.acc_handle,
4624 (uint8_t *)
4625 &(arqstat->sts_sensedata),
4626 cmd->sense,
4627 sizeof (struct scsi_extended_sense),
4628 DDI_DEV_AUTOINCR);
4629 }
4630 break;
4631 case MFI_STAT_LD_OFFLINE:
4632 case MFI_STAT_DEVICE_NOT_FOUND:
4633 con_log(CL_ANN, (CE_CONT,
4634 "mrsas_softintr:device not found error"));
4635 pkt->pkt_reason = CMD_DEV_GONE;
4636 pkt->pkt_statistics = STAT_DISCON;
4637 break;
4638 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
4639 pkt->pkt_state |= STATE_ARQ_DONE;
4640 pkt->pkt_reason = CMD_CMPLT;
4641 ((struct scsi_status *)
4642 pkt->pkt_scbp)->sts_chk = 1;
4643
4644 arqstat = (void *)(pkt->pkt_scbp);
4645 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4646 arqstat->sts_rqpkt_resid = 0;
4647 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
4648 | STATE_GOT_TARGET | STATE_SENT_CMD
4649 | STATE_XFERRED_DATA;
4650 *(uint8_t *)&arqstat->sts_rqpkt_status =
4651 STATUS_GOOD;
4652
4653 arqstat->sts_sensedata.es_valid = 1;
4654 arqstat->sts_sensedata.es_key =
4655 KEY_ILLEGAL_REQUEST;
4656 arqstat->sts_sensedata.es_class =
4657 CLASS_EXTENDED_SENSE;
4658
4659 /*
4660 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
4661 * ASC: 0x21h; ASCQ: 0x00h;
4662 */
4663 arqstat->sts_sensedata.es_add_code = 0x21;
4664 arqstat->sts_sensedata.es_qual_code = 0x00;
4665
4666 break;
4667
4668 default:
4669 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
4670 pkt->pkt_reason = CMD_TRAN_ERR;
4671
4672 break;
4673 }
4674
4675 atomic_add_16(&instance->fw_outstanding, (-1));
4676
4677 (void) mrsas_common_check(instance, cmd);
4678
4679 if (acmd->cmd_dmahandle) {
4680 if (mrsas_check_dma_handle(
4681 acmd->cmd_dmahandle) != DDI_SUCCESS) {
4682 ddi_fm_service_impact(instance->dip,
4683 DDI_SERVICE_UNAFFECTED);
4684 pkt->pkt_reason = CMD_TRAN_ERR;
4685 pkt->pkt_statistics = 0;
4686 }
4687 }
4688
4689 mrsas_return_mfi_pkt(instance, cmd);
4690
4691 /* Call the callback routine */
4692 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4693 pkt->pkt_comp) {
4694 (*pkt->pkt_comp)(pkt);
4695 }
4696
4697 break;
4698
4699 case MFI_CMD_OP_SMP:
4700 case MFI_CMD_OP_STP:
4701 complete_cmd_in_sync_mode(instance, cmd);
4702 break;
4703
4704 case MFI_CMD_OP_DCMD:
4705 /* see if got an event notification */
4706 if (ddi_get32(cmd->frame_dma_obj.acc_handle,
4707 &cmd->frame->dcmd.opcode) ==
4708 MR_DCMD_CTRL_EVENT_WAIT) {
4709 if ((instance->aen_cmd == cmd) &&
4710 (instance->aen_cmd->abort_aen)) {
4711 con_log(CL_ANN, (CE_WARN,
4712 "mrsas_softintr: "
4713 "aborted_aen returned"));
4714 } else {
4715 atomic_add_16(&instance->fw_outstanding,
4716 (-1));
4717 service_mfi_aen(instance, cmd);
4718 }
4719 } else {
4720 complete_cmd_in_sync_mode(instance, cmd);
4721 }
4722
4723 break;
4724
4725 case MFI_CMD_OP_ABORT:
4726 con_log(CL_ANN, (CE_NOTE, "MFI_CMD_OP_ABORT complete"));
4727 /*
4728 * MFI_CMD_OP_ABORT successfully completed
4729 * in the synchronous mode
4730 */
4731 complete_cmd_in_sync_mode(instance, cmd);
4732 break;
4733
4734 default:
4735 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4736 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4737
4738 if (cmd->pkt != NULL) {
4739 pkt = cmd->pkt;
4740 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4741 pkt->pkt_comp) {
4742
4743 con_log(CL_ANN1, (CE_CONT, "posting to "
4744 "scsa cmd %p index %x pkt %p"
4745 "time %llx, default ", (void *)cmd,
4746 cmd->index, (void *)pkt,
4747 gethrtime()));
4748
4749 (*pkt->pkt_comp)(pkt);
4750
4751 }
4752 }
4753 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !"));
4754 break;
4755 }
4756 }
4757
4758 instance->softint_running = 0;
4759
4760 return (DDI_INTR_CLAIMED);
4761 }
4762
4763 /*
4764 * mrsas_alloc_dma_obj
4765 *
4766 * Allocate the memory and other resources for an dma object.
4767 */
4768 int
4769 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj,
4770 uchar_t endian_flags)
4771 {
4772 int i;
4773 size_t alen = 0;
4774 uint_t cookie_cnt;
4775 struct ddi_device_acc_attr tmp_endian_attr;
4776
4777 tmp_endian_attr = endian_attr;
4778 tmp_endian_attr.devacc_attr_endian_flags = endian_flags;
4779 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4780
4781 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
4782 DDI_DMA_SLEEP, NULL, &obj->dma_handle);
4783 if (i != DDI_SUCCESS) {
4784
4785 switch (i) {
4786 case DDI_DMA_BADATTR :
4787 con_log(CL_ANN, (CE_WARN,
4788 "Failed ddi_dma_alloc_handle- Bad attribute"));
4789 break;
4790 case DDI_DMA_NORESOURCES :
4791 con_log(CL_ANN, (CE_WARN,
4792 "Failed ddi_dma_alloc_handle- No Resources"));
4793 break;
4794 default :
4795 con_log(CL_ANN, (CE_WARN,
4796 "Failed ddi_dma_alloc_handle: "
4797 "unknown status %d", i));
4798 break;
4799 }
4800
4801 return (-1);
4802 }
4803
4804 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
4805 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
4806 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
4807 alen < obj->size) {
4808
4809 ddi_dma_free_handle(&obj->dma_handle);
4810
4811 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4812
4813 return (-1);
4814 }
4815
4816 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
4817 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
4818 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
4819
4820 ddi_dma_mem_free(&obj->acc_handle);
4821 ddi_dma_free_handle(&obj->dma_handle);
4822
4823 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4824
4825 return (-1);
4826 }
4827
4828 if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
4829 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4830 return (-1);
4831 }
4832
4833 if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
4834 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4835 return (-1);
4836 }
4837
4838 return (cookie_cnt);
4839 }
4840
4841 /*
4842 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t)
4843 *
4844 * De-allocate the memory and other resources for an dma object, which must
4845 * have been alloated by a previous call to mrsas_alloc_dma_obj()
4846 */
4847 int
4848 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj)
4849 {
4850
4851 if ((obj.dma_handle == NULL) || (obj.acc_handle == NULL)) {
4852 return (DDI_SUCCESS);
4853 }
4854
4855 /*
4856 * NOTE: These check-handle functions fail if *_handle == NULL, but
4857 * this function succeeds because of the previous check.
4858 */
4859 if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) {
4860 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4861 return (DDI_FAILURE);
4862 }
4863
4864 if (mrsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) {
4865 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4866 return (DDI_FAILURE);
4867 }
4868
4869 (void) ddi_dma_unbind_handle(obj.dma_handle);
4870 ddi_dma_mem_free(&obj.acc_handle);
4871 ddi_dma_free_handle(&obj.dma_handle);
4872 obj.acc_handle = NULL;
4873 return (DDI_SUCCESS);
4874 }
4875
4876 /*
4877 * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
4878 * int, int (*)())
4879 *
4880 * Allocate dma resources for a new scsi command
4881 */
4882 int
4883 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt,
4884 struct buf *bp, int flags, int (*callback)())
4885 {
4886 int dma_flags;
4887 int (*cb)(caddr_t);
4888 int i;
4889
4890 ddi_dma_attr_t tmp_dma_attr = mrsas_generic_dma_attr;
4891 struct scsa_cmd *acmd = PKT2CMD(pkt);
4892
4893 acmd->cmd_buf = bp;
4894
4895 if (bp->b_flags & B_READ) {
4896 acmd->cmd_flags &= ~CFLAG_DMASEND;
4897 dma_flags = DDI_DMA_READ;
4898 } else {
4899 acmd->cmd_flags |= CFLAG_DMASEND;
4900 dma_flags = DDI_DMA_WRITE;
4901 }
4902
4903 if (flags & PKT_CONSISTENT) {
4904 acmd->cmd_flags |= CFLAG_CONSISTENT;
4905 dma_flags |= DDI_DMA_CONSISTENT;
4906 }
4907
4908 if (flags & PKT_DMA_PARTIAL) {
4909 dma_flags |= DDI_DMA_PARTIAL;
4910 }
4911
4912 dma_flags |= DDI_DMA_REDZONE;
4913
4914 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
4915
4916 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
4917 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
4918 if (instance->tbolt) {
4919 /* OCR-RESET FIX */
4920 tmp_dma_attr.dma_attr_count_max =
4921 (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */
4922 tmp_dma_attr.dma_attr_maxxfer =
4923 (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */
4924 }
4925
4926 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
4927 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
4928 switch (i) {
4929 case DDI_DMA_BADATTR:
4930 bioerror(bp, EFAULT);
4931 return (DDI_FAILURE);
4932
4933 case DDI_DMA_NORESOURCES:
4934 bioerror(bp, 0);
4935 return (DDI_FAILURE);
4936
4937 default:
4938 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
4939 "impossible result (0x%x)", i));
4940 bioerror(bp, EFAULT);
4941 return (DDI_FAILURE);
4942 }
4943 }
4944
4945 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
4946 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
4947
4948 switch (i) {
4949 case DDI_DMA_PARTIAL_MAP:
4950 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
4951 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
4952 "DDI_DMA_PARTIAL_MAP impossible"));
4953 goto no_dma_cookies;
4954 }
4955
4956 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
4957 DDI_FAILURE) {
4958 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed"));
4959 goto no_dma_cookies;
4960 }
4961
4962 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
4963 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
4964 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
4965 DDI_FAILURE) {
4966
4967 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed"));
4968 goto no_dma_cookies;
4969 }
4970
4971 goto get_dma_cookies;
4972 case DDI_DMA_MAPPED:
4973 acmd->cmd_nwin = 1;
4974 acmd->cmd_dma_len = 0;
4975 acmd->cmd_dma_offset = 0;
4976
4977 get_dma_cookies:
4978 i = 0;
4979 acmd->cmd_dmacount = 0;
4980 for (;;) {
4981 acmd->cmd_dmacount +=
4982 acmd->cmd_dmacookies[i++].dmac_size;
4983
4984 if (i == instance->max_num_sge ||
4985 i == acmd->cmd_ncookies)
4986 break;
4987
4988 ddi_dma_nextcookie(acmd->cmd_dmahandle,
4989 &acmd->cmd_dmacookies[i]);
4990 }
4991
4992 acmd->cmd_cookie = i;
4993 acmd->cmd_cookiecnt = i;
4994
4995 acmd->cmd_flags |= CFLAG_DMAVALID;
4996
4997 if (bp->b_bcount >= acmd->cmd_dmacount) {
4998 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
4999 } else {
5000 pkt->pkt_resid = 0;
5001 }
5002
5003 return (DDI_SUCCESS);
5004 case DDI_DMA_NORESOURCES:
5005 bioerror(bp, 0);
5006 break;
5007 case DDI_DMA_NOMAPPING:
5008 bioerror(bp, EFAULT);
5009 break;
5010 case DDI_DMA_TOOBIG:
5011 bioerror(bp, EINVAL);
5012 break;
5013 case DDI_DMA_INUSE:
5014 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
5015 " DDI_DMA_INUSE impossible"));
5016 break;
5017 default:
5018 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
5019 "impossible result (0x%x)", i));
5020 break;
5021 }
5022
5023 no_dma_cookies:
5024 ddi_dma_free_handle(&acmd->cmd_dmahandle);
5025 acmd->cmd_dmahandle = NULL;
5026 acmd->cmd_flags &= ~CFLAG_DMAVALID;
5027 return (DDI_FAILURE);
5028 }
5029
5030 /*
5031 * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *)
5032 *
5033 * move dma resources to next dma window
5034 *
5035 */
5036 int
5037 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt,
5038 struct buf *bp)
5039 {
5040 int i = 0;
5041
5042 struct scsa_cmd *acmd = PKT2CMD(pkt);
5043
5044 /*
5045 * If there are no more cookies remaining in this window,
5046 * must move to the next window first.
5047 */
5048 if (acmd->cmd_cookie == acmd->cmd_ncookies) {
5049 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
5050 return (DDI_SUCCESS);
5051 }
5052
5053 /* at last window, cannot move */
5054 if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
5055 return (DDI_FAILURE);
5056 }
5057
5058 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
5059 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
5060 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
5061 DDI_FAILURE) {
5062 return (DDI_FAILURE);
5063 }
5064
5065 acmd->cmd_cookie = 0;
5066 } else {
5067 /* still more cookies in this window - get the next one */
5068 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5069 &acmd->cmd_dmacookies[0]);
5070 }
5071
5072 /* get remaining cookies in this window, up to our maximum */
5073 for (;;) {
5074 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
5075 acmd->cmd_cookie++;
5076
5077 if (i == instance->max_num_sge ||
5078 acmd->cmd_cookie == acmd->cmd_ncookies) {
5079 break;
5080 }
5081
5082 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5083 &acmd->cmd_dmacookies[i]);
5084 }
5085
5086 acmd->cmd_cookiecnt = i;
5087
5088 if (bp->b_bcount >= acmd->cmd_dmacount) {
5089 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
5090 } else {
5091 pkt->pkt_resid = 0;
5092 }
5093
5094 return (DDI_SUCCESS);
5095 }
5096
5097 /*
5098 * build_cmd
5099 */
5100 static struct mrsas_cmd *
5101 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
5102 struct scsi_pkt *pkt, uchar_t *cmd_done)
5103 {
5104 uint16_t flags = 0;
5105 uint32_t i;
5106 uint32_t context;
5107 uint32_t sge_bytes;
5108 uint32_t tmp_data_xfer_len;
5109 ddi_acc_handle_t acc_handle;
5110 struct mrsas_cmd *cmd;
5111 struct mrsas_sge64 *mfi_sgl;
5112 struct mrsas_sge_ieee *mfi_sgl_ieee;
5113 struct scsa_cmd *acmd = PKT2CMD(pkt);
5114 struct mrsas_pthru_frame *pthru;
5115 struct mrsas_io_frame *ldio;
5116
5117 /* find out if this is logical or physical drive command. */
5118 acmd->islogical = MRDRV_IS_LOGICAL(ap);
5119 acmd->device_id = MAP_DEVICE_ID(instance, ap);
5120 *cmd_done = 0;
5121
5122 /* get the command packet */
5123 if (!(cmd = mrsas_get_mfi_pkt(instance))) {
5124 DTRACE_PROBE2(build_cmd_mfi_err, uint16_t,
5125 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
5126 return (NULL);
5127 }
5128
5129 acc_handle = cmd->frame_dma_obj.acc_handle;
5130
5131 /* Clear the frame buffer and assign back the context id */
5132 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
5133 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index);
5134
5135 cmd->pkt = pkt;
5136 cmd->cmd = acmd;
5137 DTRACE_PROBE3(build_cmds, uint8_t, pkt->pkt_cdbp[0],
5138 ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len);
5139
5140 /* lets get the command directions */
5141 if (acmd->cmd_flags & CFLAG_DMASEND) {
5142 flags = MFI_FRAME_DIR_WRITE;
5143
5144 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5145 (void) ddi_dma_sync(acmd->cmd_dmahandle,
5146 acmd->cmd_dma_offset, acmd->cmd_dma_len,
5147 DDI_DMA_SYNC_FORDEV);
5148 }
5149 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
5150 flags = MFI_FRAME_DIR_READ;
5151
5152 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5153 (void) ddi_dma_sync(acmd->cmd_dmahandle,
5154 acmd->cmd_dma_offset, acmd->cmd_dma_len,
5155 DDI_DMA_SYNC_FORCPU);
5156 }
5157 } else {
5158 flags = MFI_FRAME_DIR_NONE;
5159 }
5160
5161 if (instance->flag_ieee) {
5162 flags |= MFI_FRAME_IEEE;
5163 }
5164 flags |= MFI_FRAME_SGL64;
5165
5166 switch (pkt->pkt_cdbp[0]) {
5167
5168 /*
5169 * case SCMD_SYNCHRONIZE_CACHE:
5170 * flush_cache(instance);
5171 * mrsas_return_mfi_pkt(instance, cmd);
5172 * *cmd_done = 1;
5173 *
5174 * return (NULL);
5175 */
5176
5177 case SCMD_READ:
5178 case SCMD_WRITE:
5179 case SCMD_READ_G1:
5180 case SCMD_WRITE_G1:
5181 case SCMD_READ_G4:
5182 case SCMD_WRITE_G4:
5183 case SCMD_READ_G5:
5184 case SCMD_WRITE_G5:
5185 if (acmd->islogical) {
5186 ldio = (struct mrsas_io_frame *)cmd->frame;
5187
5188 /*
5189 * preare the Logical IO frame:
5190 * 2nd bit is zero for all read cmds
5191 */
5192 ddi_put8(acc_handle, &ldio->cmd,
5193 (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE
5194 : MFI_CMD_OP_LD_READ);
5195 ddi_put8(acc_handle, &ldio->cmd_status, 0x0);
5196 ddi_put8(acc_handle, &ldio->scsi_status, 0x0);
5197 ddi_put8(acc_handle, &ldio->target_id, acmd->device_id);
5198 ddi_put16(acc_handle, &ldio->timeout, 0);
5199 ddi_put8(acc_handle, &ldio->reserved_0, 0);
5200 ddi_put16(acc_handle, &ldio->pad_0, 0);
5201 ddi_put16(acc_handle, &ldio->flags, flags);
5202
5203 /* Initialize sense Information */
5204 bzero(cmd->sense, SENSE_LENGTH);
5205 ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH);
5206 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0);
5207 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo,
5208 cmd->sense_phys_addr);
5209 ddi_put32(acc_handle, &ldio->start_lba_hi, 0);
5210 ddi_put8(acc_handle, &ldio->access_byte,
5211 (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0);
5212 ddi_put8(acc_handle, &ldio->sge_count,
5213 acmd->cmd_cookiecnt);
5214 if (instance->flag_ieee) {
5215 mfi_sgl_ieee =
5216 (struct mrsas_sge_ieee *)&ldio->sgl;
5217 } else {
5218 mfi_sgl = (struct mrsas_sge64 *)&ldio->sgl;
5219 }
5220
5221 context = ddi_get32(acc_handle, &ldio->context);
5222
5223 if (acmd->cmd_cdblen == CDB_GROUP0) {
5224 /* 6-byte cdb */
5225 ddi_put32(acc_handle, &ldio->lba_count, (
5226 (uint16_t)(pkt->pkt_cdbp[4])));
5227
5228 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5229 ((uint32_t)(pkt->pkt_cdbp[3])) |
5230 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
5231 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
5232 << 16)));
5233 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
5234 /* 10-byte cdb */
5235 ddi_put32(acc_handle, &ldio->lba_count, (
5236 ((uint16_t)(pkt->pkt_cdbp[8])) |
5237 ((uint16_t)(pkt->pkt_cdbp[7]) << 8)));
5238
5239 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5240 ((uint32_t)(pkt->pkt_cdbp[5])) |
5241 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5242 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5243 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5244 } else if (acmd->cmd_cdblen == CDB_GROUP5) {
5245 /* 12-byte cdb */
5246 ddi_put32(acc_handle, &ldio->lba_count, (
5247 ((uint32_t)(pkt->pkt_cdbp[9])) |
5248 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5249 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5250 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5251
5252 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5253 ((uint32_t)(pkt->pkt_cdbp[5])) |
5254 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5255 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5256 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5257 } else if (acmd->cmd_cdblen == CDB_GROUP4) {
5258 /* 16-byte cdb */
5259 ddi_put32(acc_handle, &ldio->lba_count, (
5260 ((uint32_t)(pkt->pkt_cdbp[13])) |
5261 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
5262 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
5263 ((uint32_t)(pkt->pkt_cdbp[10]) << 24)));
5264
5265 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5266 ((uint32_t)(pkt->pkt_cdbp[9])) |
5267 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5268 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5269 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5270
5271 ddi_put32(acc_handle, &ldio->start_lba_hi, (
5272 ((uint32_t)(pkt->pkt_cdbp[5])) |
5273 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5274 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5275 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5276 }
5277
5278 break;
5279 }
5280 /* fall through For all non-rd/wr and physical disk cmds */
5281 default:
5282
5283 switch (pkt->pkt_cdbp[0]) {
5284 case SCMD_MODE_SENSE:
5285 case SCMD_MODE_SENSE_G1: {
5286 union scsi_cdb *cdbp;
5287 uint16_t page_code;
5288
5289 cdbp = (void *)pkt->pkt_cdbp;
5290 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
5291 switch (page_code) {
5292 case 0x3:
5293 case 0x4:
5294 (void) mrsas_mode_sense_build(pkt);
5295 mrsas_return_mfi_pkt(instance, cmd);
5296 *cmd_done = 1;
5297 return (NULL);
5298 }
5299 break;
5300 }
5301 default:
5302 break;
5303 }
5304
5305 pthru = (struct mrsas_pthru_frame *)cmd->frame;
5306
5307 /* prepare the DCDB frame */
5308 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ?
5309 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI);
5310 ddi_put8(acc_handle, &pthru->cmd_status, 0x0);
5311 ddi_put8(acc_handle, &pthru->scsi_status, 0x0);
5312 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id);
5313 ddi_put8(acc_handle, &pthru->lun, 0);
5314 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen);
5315 ddi_put16(acc_handle, &pthru->timeout, 0);
5316 ddi_put16(acc_handle, &pthru->flags, flags);
5317 tmp_data_xfer_len = 0;
5318 for (i = 0; i < acmd->cmd_cookiecnt; i++) {
5319 tmp_data_xfer_len += acmd->cmd_dmacookies[i].dmac_size;
5320 }
5321 ddi_put32(acc_handle, &pthru->data_xfer_len,
5322 tmp_data_xfer_len);
5323 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt);
5324 if (instance->flag_ieee) {
5325 mfi_sgl_ieee = (struct mrsas_sge_ieee *)&pthru->sgl;
5326 } else {
5327 mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl;
5328 }
5329
5330 bzero(cmd->sense, SENSE_LENGTH);
5331 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5332 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5333 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo,
5334 cmd->sense_phys_addr);
5335
5336 context = ddi_get32(acc_handle, &pthru->context);
5337 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp,
5338 (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
5339
5340 break;
5341 }
5342 #ifdef lint
5343 context = context;
5344 #endif
5345 /* prepare the scatter-gather list for the firmware */
5346 if (instance->flag_ieee) {
5347 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl_ieee++) {
5348 ddi_put64(acc_handle, &mfi_sgl_ieee->phys_addr,
5349 acmd->cmd_dmacookies[i].dmac_laddress);
5350 ddi_put32(acc_handle, &mfi_sgl_ieee->length,
5351 acmd->cmd_dmacookies[i].dmac_size);
5352 }
5353 sge_bytes = sizeof (struct mrsas_sge_ieee)*acmd->cmd_cookiecnt;
5354 } else {
5355 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
5356 ddi_put64(acc_handle, &mfi_sgl->phys_addr,
5357 acmd->cmd_dmacookies[i].dmac_laddress);
5358 ddi_put32(acc_handle, &mfi_sgl->length,
5359 acmd->cmd_dmacookies[i].dmac_size);
5360 }
5361 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt;
5362 }
5363
5364 cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) +
5365 ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1;
5366
5367 if (cmd->frame_count >= 8) {
5368 cmd->frame_count = 8;
5369 }
5370
5371 return (cmd);
5372 }
5373
5374 /*
5375 * wait_for_outstanding - Wait for all outstanding cmds
5376 * @instance: Adapter soft state
5377 *
5378 * This function waits for upto MRDRV_RESET_WAIT_TIME seconds for FW to
5379 * complete all its outstanding commands. Returns error if one or more IOs
5380 * are pending after this time period.
5381 */
5382 static int
5383 wait_for_outstanding(struct mrsas_instance *instance)
5384 {
5385 int i;
5386 uint32_t wait_time = 90;
5387
5388 for (i = 0; i < wait_time; i++) {
5389 if (!instance->fw_outstanding) {
5390 break;
5391 }
5392
5393 drv_usecwait(MILLISEC); /* wait for 1000 usecs */;
5394 }
5395
5396 if (instance->fw_outstanding) {
5397 return (1);
5398 }
5399
5400 return (0);
5401 }
5402
5403 /*
5404 * issue_mfi_pthru
5405 */
5406 static int
5407 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5408 struct mrsas_cmd *cmd, int mode)
5409 {
5410 void *ubuf;
5411 uint32_t kphys_addr = 0;
5412 uint32_t xferlen = 0;
5413 uint32_t new_xfer_length = 0;
5414 uint_t model;
5415 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5416 dma_obj_t pthru_dma_obj;
5417 struct mrsas_pthru_frame *kpthru;
5418 struct mrsas_pthru_frame *pthru;
5419 int i;
5420 pthru = &cmd->frame->pthru;
5421 kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0];
5422
5423 if (instance->adapterresetinprogress) {
5424 con_log(CL_ANN1, (CE_WARN, "issue_mfi_pthru: Reset flag set, "
5425 "returning mfi_pkt and setting TRAN_BUSY\n"));
5426 return (DDI_FAILURE);
5427 }
5428 model = ddi_model_convert_from(mode & FMODELS);
5429 if (model == DDI_MODEL_ILP32) {
5430 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5431
5432 xferlen = kpthru->sgl.sge32[0].length;
5433
5434 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5435 } else {
5436 #ifdef _ILP32
5437 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5438 xferlen = kpthru->sgl.sge32[0].length;
5439 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5440 #else
5441 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP64"));
5442 xferlen = kpthru->sgl.sge64[0].length;
5443 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
5444 #endif
5445 }
5446
5447 if (xferlen) {
5448 /* means IOCTL requires DMA */
5449 /* allocate the data transfer buffer */
5450 /* pthru_dma_obj.size = xferlen; */
5451 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5452 PAGESIZE);
5453 pthru_dma_obj.size = new_xfer_length;
5454 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr;
5455 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5456 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5457 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
5458 pthru_dma_obj.dma_attr.dma_attr_align = 1;
5459
5460 /* allocate kernel buffer for DMA */
5461 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj,
5462 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5463 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
5464 "could not allocate data transfer buffer."));
5465 return (DDI_FAILURE);
5466 }
5467 (void) memset(pthru_dma_obj.buffer, 0, xferlen);
5468
5469 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5470 if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
5471 for (i = 0; i < xferlen; i++) {
5472 if (ddi_copyin((uint8_t *)ubuf+i,
5473 (uint8_t *)pthru_dma_obj.buffer+i,
5474 1, mode)) {
5475 con_log(CL_ANN, (CE_WARN,
5476 "issue_mfi_pthru : "
5477 "copy from user space failed"));
5478 return (DDI_FAILURE);
5479 }
5480 }
5481 }
5482
5483 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
5484 }
5485
5486 ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd);
5487 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5488 ddi_put8(acc_handle, &pthru->cmd_status, 0);
5489 ddi_put8(acc_handle, &pthru->scsi_status, 0);
5490 ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id);
5491 ddi_put8(acc_handle, &pthru->lun, kpthru->lun);
5492 ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len);
5493 ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count);
5494 ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout);
5495 ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len);
5496
5497 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5498 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
5499 /* ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); */
5500
5501 ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb,
5502 pthru->cdb_len, DDI_DEV_AUTOINCR);
5503
5504 ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64);
5505 ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen);
5506 ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr);
5507
5508 cmd->sync_cmd = MRSAS_TRUE;
5509 cmd->frame_count = 1;
5510
5511 if (instance->tbolt) {
5512 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5513 }
5514
5515 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5516 con_log(CL_ANN, (CE_WARN,
5517 "issue_mfi_pthru: fw_ioctl failed"));
5518 } else {
5519 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) {
5520 for (i = 0; i < xferlen; i++) {
5521 if (ddi_copyout(
5522 (uint8_t *)pthru_dma_obj.buffer+i,
5523 (uint8_t *)ubuf+i, 1, mode)) {
5524 con_log(CL_ANN, (CE_WARN,
5525 "issue_mfi_pthru : "
5526 "copy to user space failed"));
5527 return (DDI_FAILURE);
5528 }
5529 }
5530 }
5531 }
5532
5533 kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status);
5534 kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status);
5535
5536 con_log(CL_ANN, (CE_CONT, "issue_mfi_pthru: cmd_status %x, "
5537 "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status));
5538 DTRACE_PROBE3(issue_pthru, uint8_t, kpthru->cmd, uint8_t,
5539 kpthru->cmd_status, uint8_t, kpthru->scsi_status);
5540
5541 if (kpthru->sense_len) {
5542 uint_t sense_len = SENSE_LENGTH;
5543 void *sense_ubuf =
5544 (void *)(ulong_t)kpthru->sense_buf_phys_addr_lo;
5545 if (kpthru->sense_len <= SENSE_LENGTH) {
5546 sense_len = kpthru->sense_len;
5547 }
5548
5549 for (i = 0; i < sense_len; i++) {
5550 if (ddi_copyout(
5551 (uint8_t *)cmd->sense+i,
5552 (uint8_t *)sense_ubuf+i, 1, mode)) {
5553 con_log(CL_ANN, (CE_WARN,
5554 "issue_mfi_pthru : "
5555 "copy to user space failed"));
5556 }
5557 con_log(CL_DLEVEL1, (CE_WARN,
5558 "Copying Sense info sense_buff[%d] = 0x%X",
5559 i, *((uint8_t *)cmd->sense + i)));
5560 }
5561 }
5562 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
5563 DDI_DMA_SYNC_FORDEV);
5564
5565 if (xferlen) {
5566 /* free kernel buffer */
5567 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
5568 return (DDI_FAILURE);
5569 }
5570
5571 return (DDI_SUCCESS);
5572 }
5573
5574 /*
5575 * issue_mfi_dcmd
5576 */
5577 static int
5578 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5579 struct mrsas_cmd *cmd, int mode)
5580 {
5581 void *ubuf;
5582 uint32_t kphys_addr = 0;
5583 uint32_t xferlen = 0;
5584 uint32_t new_xfer_length = 0;
5585 uint32_t model;
5586 dma_obj_t dcmd_dma_obj;
5587 struct mrsas_dcmd_frame *kdcmd;
5588 struct mrsas_dcmd_frame *dcmd;
5589 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5590 int i;
5591 dcmd = &cmd->frame->dcmd;
5592 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
5593
5594 if (instance->adapterresetinprogress) {
5595 con_log(CL_ANN1, (CE_NOTE, "Reset flag set, "
5596 "returning mfi_pkt and setting TRAN_BUSY"));
5597 return (DDI_FAILURE);
5598 }
5599 model = ddi_model_convert_from(mode & FMODELS);
5600 if (model == DDI_MODEL_ILP32) {
5601 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5602
5603 xferlen = kdcmd->sgl.sge32[0].length;
5604
5605 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5606 } else {
5607 #ifdef _ILP32
5608 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5609 xferlen = kdcmd->sgl.sge32[0].length;
5610 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5611 #else
5612 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_LP64"));
5613 xferlen = kdcmd->sgl.sge64[0].length;
5614 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
5615 #endif
5616 }
5617 if (xferlen) {
5618 /* means IOCTL requires DMA */
5619 /* allocate the data transfer buffer */
5620 /* dcmd_dma_obj.size = xferlen; */
5621 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5622 PAGESIZE);
5623 dcmd_dma_obj.size = new_xfer_length;
5624 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
5625 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5626 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5627 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
5628 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
5629
5630 /* allocate kernel buffer for DMA */
5631 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
5632 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5633 con_log(CL_ANN,
5634 (CE_WARN, "issue_mfi_dcmd: could not "
5635 "allocate data transfer buffer."));
5636 return (DDI_FAILURE);
5637 }
5638 (void) memset(dcmd_dma_obj.buffer, 0, xferlen);
5639
5640 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5641 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
5642 for (i = 0; i < xferlen; i++) {
5643 if (ddi_copyin((uint8_t *)ubuf + i,
5644 (uint8_t *)dcmd_dma_obj.buffer + i,
5645 1, mode)) {
5646 con_log(CL_ANN, (CE_WARN,
5647 "issue_mfi_dcmd : "
5648 "copy from user space failed"));
5649 return (DDI_FAILURE);
5650 }
5651 }
5652 }
5653
5654 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
5655 }
5656
5657 ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd);
5658 ddi_put8(acc_handle, &dcmd->cmd_status, 0);
5659 ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count);
5660 ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout);
5661 ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len);
5662 ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode);
5663
5664 ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b,
5665 (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR);
5666
5667 ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64);
5668 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen);
5669 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr);
5670
5671 cmd->sync_cmd = MRSAS_TRUE;
5672 cmd->frame_count = 1;
5673
5674 if (instance->tbolt) {
5675 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5676 }
5677
5678 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5679 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed"));
5680 } else {
5681 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
5682 for (i = 0; i < xferlen; i++) {
5683 if (ddi_copyout(
5684 (uint8_t *)dcmd_dma_obj.buffer + i,
5685 (uint8_t *)ubuf + i,
5686 1, mode)) {
5687 con_log(CL_ANN, (CE_WARN,
5688 "issue_mfi_dcmd : "
5689 "copy to user space failed"));
5690 return (DDI_FAILURE);
5691 }
5692 }
5693 }
5694 }
5695
5696 kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status);
5697 con_log(CL_ANN,
5698 (CE_CONT, "issue_mfi_dcmd: cmd_status %x", kdcmd->cmd_status));
5699 DTRACE_PROBE3(issue_dcmd, uint32_t, kdcmd->opcode, uint8_t,
5700 kdcmd->cmd, uint8_t, kdcmd->cmd_status);
5701
5702 if (xferlen) {
5703 /* free kernel buffer */
5704 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
5705 return (DDI_FAILURE);
5706 }
5707
5708 return (DDI_SUCCESS);
5709 }
5710
5711 /*
5712 * issue_mfi_smp
5713 */
5714 static int
5715 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5716 struct mrsas_cmd *cmd, int mode)
5717 {
5718 void *request_ubuf;
5719 void *response_ubuf;
5720 uint32_t request_xferlen = 0;
5721 uint32_t response_xferlen = 0;
5722 uint32_t new_xfer_length1 = 0;
5723 uint32_t new_xfer_length2 = 0;
5724 uint_t model;
5725 dma_obj_t request_dma_obj;
5726 dma_obj_t response_dma_obj;
5727 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5728 struct mrsas_smp_frame *ksmp;
5729 struct mrsas_smp_frame *smp;
5730 struct mrsas_sge32 *sge32;
5731 #ifndef _ILP32
5732 struct mrsas_sge64 *sge64;
5733 #endif
5734 int i;
5735 uint64_t tmp_sas_addr;
5736
5737 smp = &cmd->frame->smp;
5738 ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0];
5739
5740 if (instance->adapterresetinprogress) {
5741 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5742 "returning mfi_pkt and setting TRAN_BUSY\n"));
5743 return (DDI_FAILURE);
5744 }
5745 model = ddi_model_convert_from(mode & FMODELS);
5746 if (model == DDI_MODEL_ILP32) {
5747 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5748
5749 sge32 = &ksmp->sgl[0].sge32[0];
5750 response_xferlen = sge32[0].length;
5751 request_xferlen = sge32[1].length;
5752 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5753 "response_xferlen = %x, request_xferlen = %x",
5754 response_xferlen, request_xferlen));
5755
5756 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5757 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5758 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5759 "response_ubuf = %p, request_ubuf = %p",
5760 response_ubuf, request_ubuf));
5761 } else {
5762 #ifdef _ILP32
5763 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5764
5765 sge32 = &ksmp->sgl[0].sge32[0];
5766 response_xferlen = sge32[0].length;
5767 request_xferlen = sge32[1].length;
5768 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5769 "response_xferlen = %x, request_xferlen = %x",
5770 response_xferlen, request_xferlen));
5771
5772 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5773 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5774 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5775 "response_ubuf = %p, request_ubuf = %p",
5776 response_ubuf, request_ubuf));
5777 #else
5778 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_LP64"));
5779
5780 sge64 = &ksmp->sgl[0].sge64[0];
5781 response_xferlen = sge64[0].length;
5782 request_xferlen = sge64[1].length;
5783
5784 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr;
5785 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr;
5786 #endif
5787 }
5788 if (request_xferlen) {
5789 /* means IOCTL requires DMA */
5790 /* allocate the data transfer buffer */
5791 /* request_dma_obj.size = request_xferlen; */
5792 MRSAS_GET_BOUNDARY_ALIGNED_LEN(request_xferlen,
5793 new_xfer_length1, PAGESIZE);
5794 request_dma_obj.size = new_xfer_length1;
5795 request_dma_obj.dma_attr = mrsas_generic_dma_attr;
5796 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5797 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5798 request_dma_obj.dma_attr.dma_attr_sgllen = 1;
5799 request_dma_obj.dma_attr.dma_attr_align = 1;
5800
5801 /* allocate kernel buffer for DMA */
5802 if (mrsas_alloc_dma_obj(instance, &request_dma_obj,
5803 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5804 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5805 "could not allocate data transfer buffer."));
5806 return (DDI_FAILURE);
5807 }
5808 (void) memset(request_dma_obj.buffer, 0, request_xferlen);
5809
5810 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5811 for (i = 0; i < request_xferlen; i++) {
5812 if (ddi_copyin((uint8_t *)request_ubuf + i,
5813 (uint8_t *)request_dma_obj.buffer + i,
5814 1, mode)) {
5815 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5816 "copy from user space failed"));
5817 return (DDI_FAILURE);
5818 }
5819 }
5820 }
5821
5822 if (response_xferlen) {
5823 /* means IOCTL requires DMA */
5824 /* allocate the data transfer buffer */
5825 /* response_dma_obj.size = response_xferlen; */
5826 MRSAS_GET_BOUNDARY_ALIGNED_LEN(response_xferlen,
5827 new_xfer_length2, PAGESIZE);
5828 response_dma_obj.size = new_xfer_length2;
5829 response_dma_obj.dma_attr = mrsas_generic_dma_attr;
5830 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5831 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5832 response_dma_obj.dma_attr.dma_attr_sgllen = 1;
5833 response_dma_obj.dma_attr.dma_attr_align = 1;
5834
5835 /* allocate kernel buffer for DMA */
5836 if (mrsas_alloc_dma_obj(instance, &response_dma_obj,
5837 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5838 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5839 "could not allocate data transfer buffer."));
5840 return (DDI_FAILURE);
5841 }
5842 (void) memset(response_dma_obj.buffer, 0, response_xferlen);
5843
5844 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5845 for (i = 0; i < response_xferlen; i++) {
5846 if (ddi_copyin((uint8_t *)response_ubuf + i,
5847 (uint8_t *)response_dma_obj.buffer + i,
5848 1, mode)) {
5849 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5850 "copy from user space failed"));
5851 return (DDI_FAILURE);
5852 }
5853 }
5854 }
5855
5856 ddi_put8(acc_handle, &smp->cmd, ksmp->cmd);
5857 ddi_put8(acc_handle, &smp->cmd_status, 0);
5858 ddi_put8(acc_handle, &smp->connection_status, 0);
5859 ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count);
5860 /* smp->context = ksmp->context; */
5861 ddi_put16(acc_handle, &smp->timeout, ksmp->timeout);
5862 ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len);
5863
5864 bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr,
5865 sizeof (uint64_t));
5866 ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr);
5867
5868 ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64);
5869
5870 model = ddi_model_convert_from(mode & FMODELS);
5871 if (model == DDI_MODEL_ILP32) {
5872 con_log(CL_ANN1, (CE_CONT,
5873 "issue_mfi_smp: DDI_MODEL_ILP32"));
5874
5875 sge32 = &smp->sgl[0].sge32[0];
5876 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5877 ddi_put32(acc_handle, &sge32[0].phys_addr,
5878 response_dma_obj.dma_cookie[0].dmac_address);
5879 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5880 ddi_put32(acc_handle, &sge32[1].phys_addr,
5881 request_dma_obj.dma_cookie[0].dmac_address);
5882 } else {
5883 #ifdef _ILP32
5884 con_log(CL_ANN1, (CE_CONT,
5885 "issue_mfi_smp: DDI_MODEL_ILP32"));
5886 sge32 = &smp->sgl[0].sge32[0];
5887 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5888 ddi_put32(acc_handle, &sge32[0].phys_addr,
5889 response_dma_obj.dma_cookie[0].dmac_address);
5890 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5891 ddi_put32(acc_handle, &sge32[1].phys_addr,
5892 request_dma_obj.dma_cookie[0].dmac_address);
5893 #else
5894 con_log(CL_ANN1, (CE_CONT,
5895 "issue_mfi_smp: DDI_MODEL_LP64"));
5896 sge64 = &smp->sgl[0].sge64[0];
5897 ddi_put32(acc_handle, &sge64[0].length, response_xferlen);
5898 ddi_put64(acc_handle, &sge64[0].phys_addr,
5899 response_dma_obj.dma_cookie[0].dmac_address);
5900 ddi_put32(acc_handle, &sge64[1].length, request_xferlen);
5901 ddi_put64(acc_handle, &sge64[1].phys_addr,
5902 request_dma_obj.dma_cookie[0].dmac_address);
5903 #endif
5904 }
5905 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp : "
5906 "smp->response_xferlen = %d, smp->request_xferlen = %d "
5907 "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length),
5908 ddi_get32(acc_handle, &sge32[1].length),
5909 ddi_get32(acc_handle, &smp->data_xfer_len)));
5910
5911 cmd->sync_cmd = MRSAS_TRUE;
5912 cmd->frame_count = 1;
5913
5914 if (instance->tbolt) {
5915 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5916 }
5917
5918 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5919 con_log(CL_ANN, (CE_WARN,
5920 "issue_mfi_smp: fw_ioctl failed"));
5921 } else {
5922 con_log(CL_ANN1, (CE_CONT,
5923 "issue_mfi_smp: copy to user space"));
5924
5925 if (request_xferlen) {
5926 for (i = 0; i < request_xferlen; i++) {
5927 if (ddi_copyout(
5928 (uint8_t *)request_dma_obj.buffer +
5929 i, (uint8_t *)request_ubuf + i,
5930 1, mode)) {
5931 con_log(CL_ANN, (CE_WARN,
5932 "issue_mfi_smp : copy to user space"
5933 " failed"));
5934 return (DDI_FAILURE);
5935 }
5936 }
5937 }
5938
5939 if (response_xferlen) {
5940 for (i = 0; i < response_xferlen; i++) {
5941 if (ddi_copyout(
5942 (uint8_t *)response_dma_obj.buffer
5943 + i, (uint8_t *)response_ubuf
5944 + i, 1, mode)) {
5945 con_log(CL_ANN, (CE_WARN,
5946 "issue_mfi_smp : copy to "
5947 "user space failed"));
5948 return (DDI_FAILURE);
5949 }
5950 }
5951 }
5952 }
5953
5954 ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status);
5955 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
5956 ksmp->cmd_status));
5957 DTRACE_PROBE2(issue_smp, uint8_t, ksmp->cmd, uint8_t, ksmp->cmd_status);
5958
5959 if (request_xferlen) {
5960 /* free kernel buffer */
5961 if (mrsas_free_dma_obj(instance, request_dma_obj) !=
5962 DDI_SUCCESS)
5963 return (DDI_FAILURE);
5964 }
5965
5966 if (response_xferlen) {
5967 /* free kernel buffer */
5968 if (mrsas_free_dma_obj(instance, response_dma_obj) !=
5969 DDI_SUCCESS)
5970 return (DDI_FAILURE);
5971 }
5972
5973 return (DDI_SUCCESS);
5974 }
5975
5976 /*
5977 * issue_mfi_stp
5978 */
5979 static int
5980 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5981 struct mrsas_cmd *cmd, int mode)
5982 {
5983 void *fis_ubuf;
5984 void *data_ubuf;
5985 uint32_t fis_xferlen = 0;
5986 uint32_t new_xfer_length1 = 0;
5987 uint32_t new_xfer_length2 = 0;
5988 uint32_t data_xferlen = 0;
5989 uint_t model;
5990 dma_obj_t fis_dma_obj;
5991 dma_obj_t data_dma_obj;
5992 struct mrsas_stp_frame *kstp;
5993 struct mrsas_stp_frame *stp;
5994 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5995 int i;
5996
5997 stp = &cmd->frame->stp;
5998 kstp = (struct mrsas_stp_frame *)&ioctl->frame[0];
5999
6000 if (instance->adapterresetinprogress) {
6001 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
6002 "returning mfi_pkt and setting TRAN_BUSY\n"));
6003 return (DDI_FAILURE);
6004 }
6005 model = ddi_model_convert_from(mode & FMODELS);
6006 if (model == DDI_MODEL_ILP32) {
6007 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
6008
6009 fis_xferlen = kstp->sgl.sge32[0].length;
6010 data_xferlen = kstp->sgl.sge32[1].length;
6011
6012 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
6013 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
6014 } else {
6015 #ifdef _ILP32
6016 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
6017
6018 fis_xferlen = kstp->sgl.sge32[0].length;
6019 data_xferlen = kstp->sgl.sge32[1].length;
6020
6021 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
6022 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
6023 #else
6024 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_LP64"));
6025
6026 fis_xferlen = kstp->sgl.sge64[0].length;
6027 data_xferlen = kstp->sgl.sge64[1].length;
6028
6029 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
6030 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
6031 #endif
6032 }
6033
6034
6035 if (fis_xferlen) {
6036 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: "
6037 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
6038
6039 /* means IOCTL requires DMA */
6040 /* allocate the data transfer buffer */
6041 /* fis_dma_obj.size = fis_xferlen; */
6042 MRSAS_GET_BOUNDARY_ALIGNED_LEN(fis_xferlen,
6043 new_xfer_length1, PAGESIZE);
6044 fis_dma_obj.size = new_xfer_length1;
6045 fis_dma_obj.dma_attr = mrsas_generic_dma_attr;
6046 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
6047 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
6048 fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
6049 fis_dma_obj.dma_attr.dma_attr_align = 1;
6050
6051 /* allocate kernel buffer for DMA */
6052 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj,
6053 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
6054 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : "
6055 "could not allocate data transfer buffer."));
6056 return (DDI_FAILURE);
6057 }
6058 (void) memset(fis_dma_obj.buffer, 0, fis_xferlen);
6059
6060 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6061 for (i = 0; i < fis_xferlen; i++) {
6062 if (ddi_copyin((uint8_t *)fis_ubuf + i,
6063 (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) {
6064 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6065 "copy from user space failed"));
6066 return (DDI_FAILURE);
6067 }
6068 }
6069 }
6070
6071 if (data_xferlen) {
6072 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: data_ubuf = %p "
6073 "data_xferlen = %x", data_ubuf, data_xferlen));
6074
6075 /* means IOCTL requires DMA */
6076 /* allocate the data transfer buffer */
6077 /* data_dma_obj.size = data_xferlen; */
6078 MRSAS_GET_BOUNDARY_ALIGNED_LEN(data_xferlen, new_xfer_length2,
6079 PAGESIZE);
6080 data_dma_obj.size = new_xfer_length2;
6081 data_dma_obj.dma_attr = mrsas_generic_dma_attr;
6082 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
6083 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
6084 data_dma_obj.dma_attr.dma_attr_sgllen = 1;
6085 data_dma_obj.dma_attr.dma_attr_align = 1;
6086
6087 /* allocate kernel buffer for DMA */
6088 if (mrsas_alloc_dma_obj(instance, &data_dma_obj,
6089 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
6090 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6091 "could not allocate data transfer buffer."));
6092 return (DDI_FAILURE);
6093 }
6094 (void) memset(data_dma_obj.buffer, 0, data_xferlen);
6095
6096 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6097 for (i = 0; i < data_xferlen; i++) {
6098 if (ddi_copyin((uint8_t *)data_ubuf + i,
6099 (uint8_t *)data_dma_obj.buffer + i, 1, mode)) {
6100 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6101 "copy from user space failed"));
6102 return (DDI_FAILURE);
6103 }
6104 }
6105 }
6106
6107 ddi_put8(acc_handle, &stp->cmd, kstp->cmd);
6108 ddi_put8(acc_handle, &stp->cmd_status, 0);
6109 ddi_put8(acc_handle, &stp->connection_status, 0);
6110 ddi_put8(acc_handle, &stp->target_id, kstp->target_id);
6111 ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count);
6112
6113 ddi_put16(acc_handle, &stp->timeout, kstp->timeout);
6114 ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len);
6115
6116 ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10,
6117 DDI_DEV_AUTOINCR);
6118
6119 ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64);
6120 ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags);
6121 ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen);
6122 ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr,
6123 fis_dma_obj.dma_cookie[0].dmac_address);
6124 ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen);
6125 ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr,
6126 data_dma_obj.dma_cookie[0].dmac_address);
6127
6128 cmd->sync_cmd = MRSAS_TRUE;
6129 cmd->frame_count = 1;
6130
6131 if (instance->tbolt) {
6132 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6133 }
6134
6135 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
6136 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed"));
6137 } else {
6138
6139 if (fis_xferlen) {
6140 for (i = 0; i < fis_xferlen; i++) {
6141 if (ddi_copyout(
6142 (uint8_t *)fis_dma_obj.buffer + i,
6143 (uint8_t *)fis_ubuf + i, 1, mode)) {
6144 con_log(CL_ANN, (CE_WARN,
6145 "issue_mfi_stp : copy to "
6146 "user space failed"));
6147 return (DDI_FAILURE);
6148 }
6149 }
6150 }
6151 }
6152 if (data_xferlen) {
6153 for (i = 0; i < data_xferlen; i++) {
6154 if (ddi_copyout(
6155 (uint8_t *)data_dma_obj.buffer + i,
6156 (uint8_t *)data_ubuf + i, 1, mode)) {
6157 con_log(CL_ANN, (CE_WARN,
6158 "issue_mfi_stp : copy to"
6159 " user space failed"));
6160 return (DDI_FAILURE);
6161 }
6162 }
6163 }
6164
6165 kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status);
6166 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: stp->cmd_status = %d",
6167 kstp->cmd_status));
6168 DTRACE_PROBE2(issue_stp, uint8_t, kstp->cmd, uint8_t, kstp->cmd_status);
6169
6170 if (fis_xferlen) {
6171 /* free kernel buffer */
6172 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
6173 return (DDI_FAILURE);
6174 }
6175
6176 if (data_xferlen) {
6177 /* free kernel buffer */
6178 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
6179 return (DDI_FAILURE);
6180 }
6181
6182 return (DDI_SUCCESS);
6183 }
6184
6185 /*
6186 * fill_up_drv_ver
6187 */
6188 void
6189 fill_up_drv_ver(struct mrsas_drv_ver *dv)
6190 {
6191 (void) memset(dv, 0, sizeof (struct mrsas_drv_ver));
6192
6193 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
6194 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
6195 (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas"));
6196 (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION));
6197 (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE,
6198 strlen(MRSAS_RELDATE));
6199
6200 }
6201
6202 /*
6203 * handle_drv_ioctl
6204 */
6205 static int
6206 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6207 int mode)
6208 {
6209 int i;
6210 int rval = DDI_SUCCESS;
6211 int *props = NULL;
6212 void *ubuf;
6213
6214 uint8_t *pci_conf_buf;
6215 uint32_t xferlen;
6216 uint32_t num_props;
6217 uint_t model;
6218 struct mrsas_dcmd_frame *kdcmd;
6219 struct mrsas_drv_ver dv;
6220 struct mrsas_pci_information pi;
6221
6222 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
6223
6224 model = ddi_model_convert_from(mode & FMODELS);
6225 if (model == DDI_MODEL_ILP32) {
6226 con_log(CL_ANN1, (CE_CONT,
6227 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6228
6229 xferlen = kdcmd->sgl.sge32[0].length;
6230
6231 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6232 } else {
6233 #ifdef _ILP32
6234 con_log(CL_ANN1, (CE_CONT,
6235 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6236 xferlen = kdcmd->sgl.sge32[0].length;
6237 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6238 #else
6239 con_log(CL_ANN1, (CE_CONT,
6240 "handle_drv_ioctl: DDI_MODEL_LP64"));
6241 xferlen = kdcmd->sgl.sge64[0].length;
6242 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
6243 #endif
6244 }
6245 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6246 "dataBuf=%p size=%d bytes", ubuf, xferlen));
6247
6248 switch (kdcmd->opcode) {
6249 case MRSAS_DRIVER_IOCTL_DRIVER_VERSION:
6250 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6251 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION"));
6252
6253 fill_up_drv_ver(&dv);
6254
6255 if (ddi_copyout(&dv, ubuf, xferlen, mode)) {
6256 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6257 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : "
6258 "copy to user space failed"));
6259 kdcmd->cmd_status = 1;
6260 rval = 1;
6261 } else {
6262 kdcmd->cmd_status = 0;
6263 }
6264 break;
6265 case MRSAS_DRIVER_IOCTL_PCI_INFORMATION:
6266 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
6267 "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON"));
6268
6269 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
6270 0, "reg", &props, &num_props)) {
6271 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6272 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6273 "ddi_prop_look_int_array failed"));
6274 rval = DDI_FAILURE;
6275 } else {
6276
6277 pi.busNumber = (props[0] >> 16) & 0xFF;
6278 pi.deviceNumber = (props[0] >> 11) & 0x1f;
6279 pi.functionNumber = (props[0] >> 8) & 0x7;
6280 ddi_prop_free((void *)props);
6281 }
6282
6283 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
6284
6285 for (i = 0; i < (sizeof (struct mrsas_pci_information) -
6286 offsetof(struct mrsas_pci_information, pciHeaderInfo));
6287 i++) {
6288 pci_conf_buf[i] =
6289 pci_config_get8(instance->pci_handle, i);
6290 }
6291
6292 if (ddi_copyout(&pi, ubuf, xferlen, mode)) {
6293 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6294 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6295 "copy to user space failed"));
6296 kdcmd->cmd_status = 1;
6297 rval = 1;
6298 } else {
6299 kdcmd->cmd_status = 0;
6300 }
6301 break;
6302 default:
6303 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6304 "invalid driver specific IOCTL opcode = 0x%x",
6305 kdcmd->opcode));
6306 kdcmd->cmd_status = 1;
6307 rval = DDI_FAILURE;
6308 break;
6309 }
6310
6311 return (rval);
6312 }
6313
6314 /*
6315 * handle_mfi_ioctl
6316 */
6317 static int
6318 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6319 int mode)
6320 {
6321 int rval = DDI_SUCCESS;
6322
6323 struct mrsas_header *hdr;
6324 struct mrsas_cmd *cmd;
6325
6326 if (instance->tbolt) {
6327 cmd = get_raid_msg_mfi_pkt(instance);
6328 } else {
6329 cmd = mrsas_get_mfi_pkt(instance);
6330 }
6331 if (!cmd) {
6332 con_log(CL_ANN, (CE_WARN, "mr_sas: "
6333 "failed to get a cmd packet"));
6334 DTRACE_PROBE2(mfi_ioctl_err, uint16_t,
6335 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
6336 return (DDI_FAILURE);
6337 }
6338
6339 /* Clear the frame buffer and assign back the context id */
6340 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6341 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6342 cmd->index);
6343
6344 hdr = (struct mrsas_header *)&ioctl->frame[0];
6345
6346 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
6347 case MFI_CMD_OP_DCMD:
6348 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
6349 break;
6350 case MFI_CMD_OP_SMP:
6351 rval = issue_mfi_smp(instance, ioctl, cmd, mode);
6352 break;
6353 case MFI_CMD_OP_STP:
6354 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
6355 break;
6356 case MFI_CMD_OP_LD_SCSI:
6357 case MFI_CMD_OP_PD_SCSI:
6358 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
6359 break;
6360 default:
6361 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
6362 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd));
6363 rval = DDI_FAILURE;
6364 break;
6365 }
6366
6367 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
6368 rval = DDI_FAILURE;
6369
6370 if (instance->tbolt) {
6371 return_raid_msg_mfi_pkt(instance, cmd);
6372 } else {
6373 mrsas_return_mfi_pkt(instance, cmd);
6374 }
6375
6376 return (rval);
6377 }
6378
6379 /*
6380 * AEN
6381 */
6382 static int
6383 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen)
6384 {
6385 int rval = 0;
6386
6387 rval = register_mfi_aen(instance, instance->aen_seq_num,
6388 aen->class_locale_word);
6389
6390 aen->cmd_status = (uint8_t)rval;
6391
6392 return (rval);
6393 }
6394
6395 static int
6396 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num,
6397 uint32_t class_locale_word)
6398 {
6399 int ret_val;
6400
6401 struct mrsas_cmd *cmd, *aen_cmd;
6402 struct mrsas_dcmd_frame *dcmd;
6403 union mrsas_evt_class_locale curr_aen;
6404 union mrsas_evt_class_locale prev_aen;
6405
6406 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6407 /*
6408 * If there an AEN pending already (aen_cmd), check if the
6409 * class_locale of that pending AEN is inclusive of the new
6410 * AEN request we currently have. If it is, then we don't have
6411 * to do anything. In other words, whichever events the current
6412 * AEN request is subscribing to, have already been subscribed
6413 * to.
6414 *
6415 * If the old_cmd is _not_ inclusive, then we have to abort
6416 * that command, form a class_locale that is superset of both
6417 * old and current and re-issue to the FW
6418 */
6419
6420 curr_aen.word = LE_32(class_locale_word);
6421 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6422 aen_cmd = instance->aen_cmd;
6423 if (aen_cmd) {
6424 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle,
6425 &aen_cmd->frame->dcmd.mbox.w[1]);
6426 prev_aen.word = LE_32(prev_aen.word);
6427 prev_aen.members.locale = LE_16(prev_aen.members.locale);
6428 /*
6429 * A class whose enum value is smaller is inclusive of all
6430 * higher values. If a PROGRESS (= -1) was previously
6431 * registered, then a new registration requests for higher
6432 * classes need not be sent to FW. They are automatically
6433 * included.
6434 *
6435 * Locale numbers don't have such hierarchy. They are bitmap
6436 * values
6437 */
6438 if ((prev_aen.members.class <= curr_aen.members.class) &&
6439 !((prev_aen.members.locale & curr_aen.members.locale) ^
6440 curr_aen.members.locale)) {
6441 /*
6442 * Previously issued event registration includes
6443 * current request. Nothing to do.
6444 */
6445
6446 return (0);
6447 } else {
6448 curr_aen.members.locale |= prev_aen.members.locale;
6449
6450 if (prev_aen.members.class < curr_aen.members.class)
6451 curr_aen.members.class = prev_aen.members.class;
6452
6453 ret_val = abort_aen_cmd(instance, aen_cmd);
6454
6455 if (ret_val) {
6456 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
6457 "failed to abort prevous AEN command"));
6458
6459 return (ret_val);
6460 }
6461 }
6462 } else {
6463 curr_aen.word = LE_32(class_locale_word);
6464 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6465 }
6466
6467 if (instance->tbolt) {
6468 cmd = get_raid_msg_mfi_pkt(instance);
6469 } else {
6470 cmd = mrsas_get_mfi_pkt(instance);
6471 }
6472
6473 if (!cmd) {
6474 DTRACE_PROBE2(mfi_aen_err, uint16_t, instance->fw_outstanding,
6475 uint16_t, instance->max_fw_cmds);
6476 return (ENOMEM);
6477 }
6478
6479 /* Clear the frame buffer and assign back the context id */
6480 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6481 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6482 cmd->index);
6483
6484 dcmd = &cmd->frame->dcmd;
6485
6486 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
6487 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
6488
6489 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
6490 sizeof (struct mrsas_evt_detail));
6491
6492 /* Prepare DCMD for aen registration */
6493 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
6494 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
6495 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
6496 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
6497 MFI_FRAME_DIR_READ);
6498 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
6499 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
6500 sizeof (struct mrsas_evt_detail));
6501 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
6502 MR_DCMD_CTRL_EVENT_WAIT);
6503 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num);
6504 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6505 curr_aen.word = LE_32(curr_aen.word);
6506 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1],
6507 curr_aen.word);
6508 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
6509 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address);
6510 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
6511 sizeof (struct mrsas_evt_detail));
6512
6513 instance->aen_seq_num = seq_num;
6514
6515
6516 /*
6517 * Store reference to the cmd used to register for AEN. When an
6518 * application wants us to register for AEN, we have to abort this
6519 * cmd and re-register with a new EVENT LOCALE supplied by that app
6520 */
6521 instance->aen_cmd = cmd;
6522
6523 cmd->frame_count = 1;
6524
6525 /* Issue the aen registration frame */
6526 /* atomic_add_16 (&instance->fw_outstanding, 1); */
6527 if (instance->tbolt) {
6528 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6529 }
6530 instance->func_ptr->issue_cmd(cmd, instance);
6531
6532 return (0);
6533 }
6534
6535 void
6536 display_scsi_inquiry(caddr_t scsi_inq)
6537 {
6538 #define MAX_SCSI_DEVICE_CODE 14
6539 int i;
6540 char inquiry_buf[256] = {0};
6541 int len;
6542 const char *const scsi_device_types[] = {
6543 "Direct-Access ",
6544 "Sequential-Access",
6545 "Printer ",
6546 "Processor ",
6547 "WORM ",
6548 "CD-ROM ",
6549 "Scanner ",
6550 "Optical Device ",
6551 "Medium Changer ",
6552 "Communications ",
6553 "Unknown ",
6554 "Unknown ",
6555 "Unknown ",
6556 "Enclosure ",
6557 };
6558
6559 len = 0;
6560
6561 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: ");
6562 for (i = 8; i < 16; i++) {
6563 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6564 scsi_inq[i]);
6565 }
6566
6567 len += snprintf(inquiry_buf + len, 265 - len, " Model: ");
6568
6569 for (i = 16; i < 32; i++) {
6570 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6571 scsi_inq[i]);
6572 }
6573
6574 len += snprintf(inquiry_buf + len, 265 - len, " Rev: ");
6575
6576 for (i = 32; i < 36; i++) {
6577 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6578 scsi_inq[i]);
6579 }
6580
6581 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6582
6583
6584 i = scsi_inq[0] & 0x1f;
6585
6586
6587 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ",
6588 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
6589 "Unknown ");
6590
6591
6592 len += snprintf(inquiry_buf + len, 265 - len,
6593 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
6594
6595 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
6596 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
6597 } else {
6598 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6599 }
6600
6601 con_log(CL_DLEVEL2, (CE_CONT, inquiry_buf));
6602 }
6603
6604 static void
6605 io_timeout_checker(void *arg)
6606 {
6607 struct scsi_pkt *pkt;
6608 struct mrsas_instance *instance = arg;
6609 struct mrsas_cmd *cmd = NULL;
6610 struct mrsas_header *hdr;
6611 int time = 0;
6612 int counter = 0;
6613 struct mlist_head *pos, *next;
6614 mlist_t process_list;
6615
6616 if (instance->adapterresetinprogress == 1) {
6617 con_log(CL_ANN, (CE_NOTE, "io_timeout_checker:"
6618 " reset in progress"));
6619
6620 instance->timeout_id = timeout(io_timeout_checker,
6621 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6622 return;
6623 }
6624
6625 /* See if this check needs to be in the beginning or last in ISR */
6626 if (mrsas_initiate_ocr_if_fw_is_faulty(instance) == 1) {
6627 cmn_err(CE_WARN, "io_timeout_checker: "
6628 "FW Fault, calling reset adapter");
6629 cmn_err(CE_CONT, "io_timeout_checker: "
6630 "fw_outstanding 0x%X max_fw_cmds 0x%X",
6631 instance->fw_outstanding, instance->max_fw_cmds);
6632 if (instance->adapterresetinprogress == 0) {
6633 instance->adapterresetinprogress = 1;
6634 if (instance->tbolt)
6635 (void) mrsas_tbolt_reset_ppc(instance);
6636 else
6637 (void) mrsas_reset_ppc(instance);
6638 instance->adapterresetinprogress = 0;
6639 }
6640 instance->timeout_id = timeout(io_timeout_checker,
6641 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6642 return;
6643 }
6644
6645 INIT_LIST_HEAD(&process_list);
6646
6647 mutex_enter(&instance->cmd_pend_mtx);
6648 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
6649 cmd = mlist_entry(pos, struct mrsas_cmd, list);
6650
6651 if (cmd == NULL) {
6652 continue;
6653 }
6654
6655 if (cmd->sync_cmd == MRSAS_TRUE) {
6656 hdr = (struct mrsas_header *)&cmd->frame->hdr;
6657 if (hdr == NULL) {
6658 continue;
6659 }
6660 time = --cmd->drv_pkt_time;
6661 } else {
6662 pkt = cmd->pkt;
6663 if (pkt == NULL) {
6664 continue;
6665 }
6666 time = --cmd->drv_pkt_time;
6667 }
6668 if (time <= 0) {
6669 cmn_err(CE_WARN, "%llx: "
6670 "io_timeout_checker: TIMING OUT: pkt: %p, "
6671 "cmd %p fw_outstanding 0x%X max_fw_cmds 0x%X\n",
6672 gethrtime(), (void *)pkt, (void *)cmd,
6673 instance->fw_outstanding, instance->max_fw_cmds);
6674
6675 counter++;
6676 break;
6677 }
6678 }
6679 mutex_exit(&instance->cmd_pend_mtx);
6680
6681 if (counter) {
6682 if (instance->disable_online_ctrl_reset == 1) {
6683 cmn_err(CE_WARN, "mr_sas %d: %s(): OCR is NOT "
6684 "supported by Firmware, KILL adapter!!!",
6685 instance->instance, __func__);
6686
6687 if (instance->tbolt)
6688 mrsas_tbolt_kill_adapter(instance);
6689 else
6690 (void) mrsas_kill_adapter(instance);
6691
6692 return;
6693 } else {
6694 if (cmd->retry_count_for_ocr <= IO_RETRY_COUNT) {
6695 if (instance->adapterresetinprogress == 0) {
6696 if (instance->tbolt) {
6697 (void) mrsas_tbolt_reset_ppc(
6698 instance);
6699 } else {
6700 (void) mrsas_reset_ppc(
6701 instance);
6702 }
6703 }
6704 } else {
6705 cmn_err(CE_WARN,
6706 "io_timeout_checker: "
6707 "cmd %p cmd->index %d "
6708 "timed out even after 3 resets: "
6709 "so KILL adapter", (void *)cmd, cmd->index);
6710
6711 mrsas_print_cmd_details(instance, cmd, 0xDD);
6712
6713 if (instance->tbolt)
6714 mrsas_tbolt_kill_adapter(instance);
6715 else
6716 (void) mrsas_kill_adapter(instance);
6717 return;
6718 }
6719 }
6720 }
6721 con_log(CL_ANN, (CE_NOTE, "mrsas: "
6722 "schedule next timeout check: "
6723 "do timeout \n"));
6724 instance->timeout_id =
6725 timeout(io_timeout_checker, (void *)instance,
6726 drv_usectohz(MRSAS_1_SECOND));
6727 }
6728
6729 static uint32_t
6730 read_fw_status_reg_ppc(struct mrsas_instance *instance)
6731 {
6732 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
6733 }
6734
6735 static void
6736 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
6737 {
6738 struct scsi_pkt *pkt;
6739 atomic_inc_16(&instance->fw_outstanding);
6740
6741 pkt = cmd->pkt;
6742 if (pkt) {
6743 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6744 "ISSUED CMD TO FW : called : cmd:"
6745 ": %p instance : %p pkt : %p pkt_time : %x\n",
6746 gethrtime(), (void *)cmd, (void *)instance,
6747 (void *)pkt, cmd->drv_pkt_time));
6748 if (instance->adapterresetinprogress) {
6749 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6750 con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer"));
6751 } else {
6752 push_pending_mfi_pkt(instance, cmd);
6753 }
6754
6755 } else {
6756 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6757 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
6758 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
6759 }
6760
6761 mutex_enter(&instance->reg_write_mtx);
6762 /* Issue the command to the FW */
6763 WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6764 (((cmd->frame_count - 1) << 1) | 1), instance);
6765 mutex_exit(&instance->reg_write_mtx);
6766
6767 }
6768
6769 /*
6770 * issue_cmd_in_sync_mode
6771 */
6772 static int
6773 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance,
6774 struct mrsas_cmd *cmd)
6775 {
6776 int i;
6777 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6778 struct mrsas_header *hdr = &cmd->frame->hdr;
6779
6780 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called"));
6781
6782 if (instance->adapterresetinprogress) {
6783 cmd->drv_pkt_time = ddi_get16(
6784 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
6785 if (cmd->drv_pkt_time < debug_timeout_g)
6786 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6787
6788 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: "
6789 "issue and return in reset case\n"));
6790 WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6791 (((cmd->frame_count - 1) << 1) | 1), instance);
6792
6793 return (DDI_SUCCESS);
6794 } else {
6795 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n"));
6796 push_pending_mfi_pkt(instance, cmd);
6797 }
6798
6799 cmd->cmd_status = ENODATA;
6800
6801 mutex_enter(&instance->reg_write_mtx);
6802 /* Issue the command to the FW */
6803 WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6804 (((cmd->frame_count - 1) << 1) | 1), instance);
6805 mutex_exit(&instance->reg_write_mtx);
6806
6807 mutex_enter(&instance->int_cmd_mtx);
6808 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
6809 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
6810 }
6811 mutex_exit(&instance->int_cmd_mtx);
6812
6813 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done"));
6814
6815 if (i < (msecs -1)) {
6816 return (DDI_SUCCESS);
6817 } else {
6818 return (DDI_FAILURE);
6819 }
6820 }
6821
6822 /*
6823 * issue_cmd_in_poll_mode
6824 */
6825 static int
6826 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance,
6827 struct mrsas_cmd *cmd)
6828 {
6829 int i;
6830 uint16_t flags;
6831 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6832 struct mrsas_header *frame_hdr;
6833
6834 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called"));
6835
6836 frame_hdr = (struct mrsas_header *)cmd->frame;
6837 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
6838 MFI_CMD_STATUS_POLL_MODE);
6839 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
6840 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
6841
6842 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
6843
6844 /* issue the frame using inbound queue port */
6845 WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6846 (((cmd->frame_count - 1) << 1) | 1), instance);
6847
6848 /* wait for cmd_status to change from 0xFF */
6849 for (i = 0; i < msecs && (
6850 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6851 == MFI_CMD_STATUS_POLL_MODE); i++) {
6852 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
6853 }
6854
6855 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6856 == MFI_CMD_STATUS_POLL_MODE) {
6857 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
6858 "cmd polling timed out"));
6859 return (DDI_FAILURE);
6860 }
6861
6862 return (DDI_SUCCESS);
6863 }
6864
6865 static void
6866 enable_intr_ppc(struct mrsas_instance *instance)
6867 {
6868 uint32_t mask;
6869
6870 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called"));
6871
6872 if (instance->skinny) {
6873 /* For SKINNY, write ~0x1, from BSD's mfi driver. */
6874 WR_OB_INTR_MASK(0xfffffffe, instance);
6875 } else {
6876 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
6877 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
6878
6879 /* WR_OB_INTR_MASK(~0x80000000, instance); */
6880 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance);
6881 }
6882
6883 /* dummy read to force PCI flush */
6884 mask = RD_OB_INTR_MASK(instance);
6885
6886 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
6887 "outbound_intr_mask = 0x%x", mask));
6888 }
6889
6890 static void
6891 disable_intr_ppc(struct mrsas_instance *instance)
6892 {
6893 uint32_t mask;
6894
6895 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called"));
6896
6897 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
6898 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6899
6900 /* For now, assume there are no extras needed for Skinny support. */
6901
6902 WR_OB_INTR_MASK(OB_INTR_MASK, instance);
6903
6904 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
6905 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6906
6907 /* dummy read to force PCI flush */
6908 mask = RD_OB_INTR_MASK(instance);
6909 #ifdef lint
6910 mask = mask;
6911 #endif
6912 }
6913
6914 static int
6915 intr_ack_ppc(struct mrsas_instance *instance)
6916 {
6917 uint32_t status;
6918 int ret = DDI_INTR_CLAIMED;
6919
6920 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called"));
6921
6922 /* check if it is our interrupt */
6923 status = RD_OB_INTR_STATUS(instance);
6924
6925 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status));
6926
6927 /*
6928 * NOTE: Some drivers call out SKINNY here, but the return is the same
6929 * for SKINNY and 2108.
6930 */
6931 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) {
6932 ret = DDI_INTR_UNCLAIMED;
6933 }
6934
6935 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
6936 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
6937 ret = DDI_INTR_UNCLAIMED;
6938 }
6939
6940 if (ret == DDI_INTR_UNCLAIMED) {
6941 return (ret);
6942 }
6943
6944 /*
6945 * Clear the interrupt by writing back the same value.
6946 * Another case where SKINNY is slightly different.
6947 */
6948 if (instance->skinny) {
6949 WR_OB_INTR_STATUS(status, instance);
6950 } else {
6951 WR_OB_DOORBELL_CLEAR(status, instance);
6952 }
6953
6954 /* dummy READ */
6955 status = RD_OB_INTR_STATUS(instance);
6956
6957 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared"));
6958
6959 return (ret);
6960 }
6961
6962 /*
6963 * Marks HBA as bad. This will be called either when an
6964 * IO packet times out even after 3 FW resets
6965 * or FW is found to be fault even after 3 continuous resets.
6966 */
6967
6968 static int
6969 mrsas_kill_adapter(struct mrsas_instance *instance)
6970 {
6971 if (instance->deadadapter == 1)
6972 return (DDI_FAILURE);
6973
6974 con_log(CL_ANN1, (CE_NOTE, "mrsas_kill_adapter: "
6975 "Writing to doorbell with MFI_STOP_ADP "));
6976 mutex_enter(&instance->ocr_flags_mtx);
6977 instance->deadadapter = 1;
6978 mutex_exit(&instance->ocr_flags_mtx);
6979 instance->func_ptr->disable_intr(instance);
6980 WR_IB_DOORBELL(MFI_STOP_ADP, instance);
6981 (void) mrsas_complete_pending_cmds(instance);
6982 return (DDI_SUCCESS);
6983 }
6984
6985
6986 static int
6987 mrsas_reset_ppc(struct mrsas_instance *instance)
6988 {
6989 uint32_t status;
6990 uint32_t retry = 0;
6991 uint32_t cur_abs_reg_val;
6992 uint32_t fw_state;
6993
6994 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6995
6996 if (instance->deadadapter == 1) {
6997 cmn_err(CE_WARN, "mrsas_reset_ppc: "
6998 "no more resets as HBA has been marked dead ");
6999 return (DDI_FAILURE);
7000 }
7001 mutex_enter(&instance->ocr_flags_mtx);
7002 instance->adapterresetinprogress = 1;
7003 mutex_exit(&instance->ocr_flags_mtx);
7004 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: adpterresetinprogress "
7005 "flag set, time %llx", gethrtime()));
7006
7007 instance->func_ptr->disable_intr(instance);
7008 retry_reset:
7009 WR_IB_WRITE_SEQ(0, instance);
7010 WR_IB_WRITE_SEQ(4, instance);
7011 WR_IB_WRITE_SEQ(0xb, instance);
7012 WR_IB_WRITE_SEQ(2, instance);
7013 WR_IB_WRITE_SEQ(7, instance);
7014 WR_IB_WRITE_SEQ(0xd, instance);
7015 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: magic number written "
7016 "to write sequence register\n"));
7017 delay(100 * drv_usectohz(MILLISEC));
7018 status = RD_OB_DRWE(instance);
7019
7020 while (!(status & DIAG_WRITE_ENABLE)) {
7021 delay(100 * drv_usectohz(MILLISEC));
7022 status = RD_OB_DRWE(instance);
7023 if (retry++ == 100) {
7024 cmn_err(CE_WARN, "mrsas_reset_ppc: DRWE bit "
7025 "check retry count %d", retry);
7026 return (DDI_FAILURE);
7027 }
7028 }
7029 WR_IB_DRWE(status | DIAG_RESET_ADAPTER, instance);
7030 delay(100 * drv_usectohz(MILLISEC));
7031 status = RD_OB_DRWE(instance);
7032 while (status & DIAG_RESET_ADAPTER) {
7033 delay(100 * drv_usectohz(MILLISEC));
7034 status = RD_OB_DRWE(instance);
7035 if (retry++ == 100) {
7036 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7037 "RESET FAILED. KILL adapter called.");
7038
7039 (void) mrsas_kill_adapter(instance);
7040 return (DDI_FAILURE);
7041 }
7042 }
7043 con_log(CL_ANN, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete"));
7044 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7045 "Calling mfi_state_transition_to_ready"));
7046
7047 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
7048 if (mfi_state_transition_to_ready(instance) ||
7049 debug_fw_faults_after_ocr_g == 1) {
7050 cur_abs_reg_val =
7051 instance->func_ptr->read_fw_status_reg(instance);
7052 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
7053
7054 #ifdef OCRDEBUG
7055 con_log(CL_ANN1, (CE_NOTE,
7056 "mrsas_reset_ppc :before fake: FW is not ready "
7057 "FW state = 0x%x", fw_state));
7058 if (debug_fw_faults_after_ocr_g == 1)
7059 fw_state = MFI_STATE_FAULT;
7060 #endif
7061
7062 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc : FW is not ready "
7063 "FW state = 0x%x", fw_state));
7064
7065 if (fw_state == MFI_STATE_FAULT) {
7066 /* increment the count */
7067 instance->fw_fault_count_after_ocr++;
7068 if (instance->fw_fault_count_after_ocr
7069 < MAX_FW_RESET_COUNT) {
7070 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7071 "FW is in fault after OCR count %d "
7072 "Retry Reset",
7073 instance->fw_fault_count_after_ocr);
7074 goto retry_reset;
7075
7076 } else {
7077 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7078 "Max Reset Count exceeded >%d"
7079 "Mark HBA as bad, KILL adapter",
7080 MAX_FW_RESET_COUNT);
7081
7082 (void) mrsas_kill_adapter(instance);
7083 return (DDI_FAILURE);
7084 }
7085 }
7086 }
7087 /* reset the counter as FW is up after OCR */
7088 instance->fw_fault_count_after_ocr = 0;
7089
7090
7091 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7092 instance->producer, 0);
7093
7094 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7095 instance->consumer, 0);
7096
7097 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7098 " after resetting produconsumer chck indexs:"
7099 "producer %x consumer %x", *instance->producer,
7100 *instance->consumer));
7101
7102 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7103 "Calling mrsas_issue_init_mfi"));
7104 (void) mrsas_issue_init_mfi(instance);
7105 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7106 "mrsas_issue_init_mfi Done"));
7107
7108 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7109 "Calling mrsas_print_pending_cmd\n"));
7110 (void) mrsas_print_pending_cmds(instance);
7111 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7112 "mrsas_print_pending_cmd done\n"));
7113
7114 instance->func_ptr->enable_intr(instance);
7115 instance->fw_outstanding = 0;
7116
7117 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7118 "Calling mrsas_issue_pending_cmds"));
7119 (void) mrsas_issue_pending_cmds(instance);
7120 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7121 "issue_pending_cmds done.\n"));
7122
7123 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7124 "Calling aen registration"));
7125
7126
7127 instance->aen_cmd->retry_count_for_ocr = 0;
7128 instance->aen_cmd->drv_pkt_time = 0;
7129
7130 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
7131 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
7132
7133 mutex_enter(&instance->ocr_flags_mtx);
7134 instance->adapterresetinprogress = 0;
7135 mutex_exit(&instance->ocr_flags_mtx);
7136 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7137 "adpterresetinprogress flag unset"));
7138
7139 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc done\n"));
7140 return (DDI_SUCCESS);
7141 }
7142
7143 /*
7144 * FMA functions.
7145 */
7146 int
7147 mrsas_common_check(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
7148 {
7149 int ret = DDI_SUCCESS;
7150
7151 if (cmd != NULL &&
7152 mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
7153 DDI_SUCCESS) {
7154 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7155 if (cmd->pkt != NULL) {
7156 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7157 cmd->pkt->pkt_statistics = 0;
7158 }
7159 ret = DDI_FAILURE;
7160 }
7161 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
7162 != DDI_SUCCESS) {
7163 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7164 if (cmd != NULL && cmd->pkt != NULL) {
7165 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7166 cmd->pkt->pkt_statistics = 0;
7167 }
7168 ret = DDI_FAILURE;
7169 }
7170 if (mrsas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) !=
7171 DDI_SUCCESS) {
7172 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7173 if (cmd != NULL && cmd->pkt != NULL) {
7174 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7175 cmd->pkt->pkt_statistics = 0;
7176 }
7177 ret = DDI_FAILURE;
7178 }
7179 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
7180 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7181
7182 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0);
7183
7184 if (cmd != NULL && cmd->pkt != NULL) {
7185 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7186 cmd->pkt->pkt_statistics = 0;
7187 }
7188 ret = DDI_FAILURE;
7189 }
7190
7191 return (ret);
7192 }
7193
7194 /*ARGSUSED*/
7195 static int
7196 mrsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
7197 {
7198 /*
7199 * as the driver can always deal with an error in any dma or
7200 * access handle, we can just return the fme_status value.
7201 */
7202 pci_ereport_post(dip, err, NULL);
7203 return (err->fme_status);
7204 }
7205
7206 static void
7207 mrsas_fm_init(struct mrsas_instance *instance)
7208 {
7209 /* Need to change iblock to priority for new MSI intr */
7210 ddi_iblock_cookie_t fm_ibc;
7211
7212 /* Only register with IO Fault Services if we have some capability */
7213 if (instance->fm_capabilities) {
7214 /* Adjust access and dma attributes for FMA */
7215 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC;
7216 mrsas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7217
7218 /*
7219 * Register capabilities with IO Fault Services.
7220 * fm_capabilities will be updated to indicate
7221 * capabilities actually supported (not requested.)
7222 */
7223
7224 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc);
7225
7226 /*
7227 * Initialize pci ereport capabilities if ereport
7228 * capable (should always be.)
7229 */
7230
7231 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7232 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7233 pci_ereport_setup(instance->dip);
7234 }
7235
7236 /*
7237 * Register error callback if error callback capable.
7238 */
7239 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7240 ddi_fm_handler_register(instance->dip,
7241 mrsas_fm_error_cb, (void*) instance);
7242 }
7243 } else {
7244 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7245 mrsas_generic_dma_attr.dma_attr_flags = 0;
7246 }
7247 }
7248
7249 static void
7250 mrsas_fm_fini(struct mrsas_instance *instance)
7251 {
7252 /* Only unregister FMA capabilities if registered */
7253 if (instance->fm_capabilities) {
7254 /*
7255 * Un-register error callback if error callback capable.
7256 */
7257 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7258 ddi_fm_handler_unregister(instance->dip);
7259 }
7260
7261 /*
7262 * Release any resources allocated by pci_ereport_setup()
7263 */
7264 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7265 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7266 pci_ereport_teardown(instance->dip);
7267 }
7268
7269 /* Unregister from IO Fault Services */
7270 ddi_fm_fini(instance->dip);
7271
7272 /* Adjust access and dma attributes for FMA */
7273 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7274 mrsas_generic_dma_attr.dma_attr_flags = 0;
7275 }
7276 }
7277
7278 int
7279 mrsas_check_acc_handle(ddi_acc_handle_t handle)
7280 {
7281 ddi_fm_error_t de;
7282
7283 if (handle == NULL) {
7284 return (DDI_FAILURE);
7285 }
7286
7287 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
7288
7289 return (de.fme_status);
7290 }
7291
7292 int
7293 mrsas_check_dma_handle(ddi_dma_handle_t handle)
7294 {
7295 ddi_fm_error_t de;
7296
7297 if (handle == NULL) {
7298 return (DDI_FAILURE);
7299 }
7300
7301 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
7302
7303 return (de.fme_status);
7304 }
7305
7306 void
7307 mrsas_fm_ereport(struct mrsas_instance *instance, char *detail)
7308 {
7309 uint64_t ena;
7310 char buf[FM_MAX_CLASS];
7311
7312 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
7313 ena = fm_ena_generate(0, FM_ENA_FMT1);
7314 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) {
7315 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP,
7316 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
7317 }
7318 }
7319
7320 static int
7321 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type)
7322 {
7323
7324 dev_info_t *dip = instance->dip;
7325 int avail, actual, count;
7326 int i, flag, ret;
7327
7328 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_type = %x",
7329 intr_type));
7330
7331 /* Get number of interrupts */
7332 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
7333 if ((ret != DDI_SUCCESS) || (count == 0)) {
7334 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:"
7335 "ret %d count %d", ret, count));
7336
7337 return (DDI_FAILURE);
7338 }
7339
7340 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: count = %d ", count));
7341
7342 /* Get number of available interrupts */
7343 ret = ddi_intr_get_navail(dip, intr_type, &avail);
7344 if ((ret != DDI_SUCCESS) || (avail == 0)) {
7345 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:"
7346 "ret %d avail %d", ret, avail));
7347
7348 return (DDI_FAILURE);
7349 }
7350 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: avail = %d ", avail));
7351
7352 /* Only one interrupt routine. So limit the count to 1 */
7353 if (count > 1) {
7354 count = 1;
7355 }
7356
7357 /*
7358 * Allocate an array of interrupt handlers. Currently we support
7359 * only one interrupt. The framework can be extended later.
7360 */
7361 instance->intr_htable_size = count * sizeof (ddi_intr_handle_t);
7362 instance->intr_htable = kmem_zalloc(instance->intr_htable_size,
7363 KM_SLEEP);
7364 ASSERT(instance->intr_htable);
7365
7366 flag = ((intr_type == DDI_INTR_TYPE_MSI) ||
7367 (intr_type == DDI_INTR_TYPE_MSIX)) ?
7368 DDI_INTR_ALLOC_STRICT : DDI_INTR_ALLOC_NORMAL;
7369
7370 /* Allocate interrupt */
7371 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0,
7372 count, &actual, flag);
7373
7374 if ((ret != DDI_SUCCESS) || (actual == 0)) {
7375 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7376 "avail = %d", avail));
7377 goto mrsas_free_htable;
7378 }
7379
7380 if (actual < count) {
7381 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7382 "Requested = %d Received = %d", count, actual));
7383 }
7384 instance->intr_cnt = actual;
7385
7386 /*
7387 * Get the priority of the interrupt allocated.
7388 */
7389 if ((ret = ddi_intr_get_pri(instance->intr_htable[0],
7390 &instance->intr_pri)) != DDI_SUCCESS) {
7391 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7392 "get priority call failed"));
7393 goto mrsas_free_handles;
7394 }
7395
7396 /*
7397 * Test for high level mutex. we don't support them.
7398 */
7399 if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) {
7400 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7401 "High level interrupts not supported."));
7402 goto mrsas_free_handles;
7403 }
7404
7405 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ",
7406 instance->intr_pri));
7407
7408 /* Call ddi_intr_add_handler() */
7409 for (i = 0; i < actual; i++) {
7410 ret = ddi_intr_add_handler(instance->intr_htable[i],
7411 (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance,
7412 (caddr_t)(uintptr_t)i);
7413
7414 if (ret != DDI_SUCCESS) {
7415 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:"
7416 "failed %d", ret));
7417 goto mrsas_free_handles;
7418 }
7419
7420 }
7421
7422 con_log(CL_DLEVEL1, (CE_NOTE, " ddi_intr_add_handler done"));
7423
7424 if ((ret = ddi_intr_get_cap(instance->intr_htable[0],
7425 &instance->intr_cap)) != DDI_SUCCESS) {
7426 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d",
7427 ret));
7428 goto mrsas_free_handlers;
7429 }
7430
7431 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7432 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable"));
7433
7434 (void) ddi_intr_block_enable(instance->intr_htable,
7435 instance->intr_cnt);
7436 } else {
7437 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable"));
7438
7439 for (i = 0; i < instance->intr_cnt; i++) {
7440 (void) ddi_intr_enable(instance->intr_htable[i]);
7441 con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns "
7442 "%d", i));
7443 }
7444 }
7445
7446 return (DDI_SUCCESS);
7447
7448 mrsas_free_handlers:
7449 for (i = 0; i < actual; i++)
7450 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7451
7452 mrsas_free_handles:
7453 for (i = 0; i < actual; i++)
7454 (void) ddi_intr_free(instance->intr_htable[i]);
7455
7456 mrsas_free_htable:
7457 if (instance->intr_htable != NULL)
7458 kmem_free(instance->intr_htable, instance->intr_htable_size);
7459
7460 instance->intr_htable = NULL;
7461 instance->intr_htable_size = 0;
7462
7463 return (DDI_FAILURE);
7464
7465 }
7466
7467
7468 static void
7469 mrsas_rem_intrs(struct mrsas_instance *instance)
7470 {
7471 int i;
7472
7473 con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called"));
7474
7475 /* Disable all interrupts first */
7476 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7477 (void) ddi_intr_block_disable(instance->intr_htable,
7478 instance->intr_cnt);
7479 } else {
7480 for (i = 0; i < instance->intr_cnt; i++) {
7481 (void) ddi_intr_disable(instance->intr_htable[i]);
7482 }
7483 }
7484
7485 /* Remove all the handlers */
7486
7487 for (i = 0; i < instance->intr_cnt; i++) {
7488 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7489 (void) ddi_intr_free(instance->intr_htable[i]);
7490 }
7491
7492 if (instance->intr_htable != NULL)
7493 kmem_free(instance->intr_htable, instance->intr_htable_size);
7494
7495 instance->intr_htable = NULL;
7496 instance->intr_htable_size = 0;
7497
7498 }
7499
7500 static int
7501 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags,
7502 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
7503 {
7504 struct mrsas_instance *instance;
7505 int config;
7506 int rval = NDI_SUCCESS;
7507
7508 char *ptr = NULL;
7509 int tgt, lun;
7510
7511 con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op));
7512
7513 if ((instance = ddi_get_soft_state(mrsas_state,
7514 ddi_get_instance(parent))) == NULL) {
7515 return (NDI_FAILURE);
7516 }
7517
7518 /* Hold nexus during bus_config */
7519 ndi_devi_enter(parent, &config);
7520 switch (op) {
7521 case BUS_CONFIG_ONE: {
7522
7523 /* parse wwid/target name out of name given */
7524 if ((ptr = strchr((char *)arg, '@')) == NULL) {
7525 rval = NDI_FAILURE;
7526 break;
7527 }
7528 ptr++;
7529
7530 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) {
7531 rval = NDI_FAILURE;
7532 break;
7533 }
7534
7535 if (lun == 0) {
7536 rval = mrsas_config_ld(instance, tgt, lun, childp);
7537 #ifdef PDSUPPORT
7538 } else if ((instance->tbolt || instance->skinny) && lun != 0) {
7539 rval = mrsas_tbolt_config_pd(instance,
7540 tgt, lun, childp);
7541 #endif
7542 } else {
7543 rval = NDI_FAILURE;
7544 }
7545
7546 break;
7547 }
7548 case BUS_CONFIG_DRIVER:
7549 case BUS_CONFIG_ALL: {
7550
7551 rval = mrsas_config_all_devices(instance);
7552
7553 rval = NDI_SUCCESS;
7554 break;
7555 }
7556 }
7557
7558 if (rval == NDI_SUCCESS) {
7559 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
7560
7561 }
7562 ndi_devi_exit(parent, config);
7563
7564 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x",
7565 rval));
7566 return (rval);
7567 }
7568
7569 static int
7570 mrsas_config_all_devices(struct mrsas_instance *instance)
7571 {
7572 int rval, tgt;
7573
7574 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
7575 (void) mrsas_config_ld(instance, tgt, 0, NULL);
7576
7577 }
7578
7579 #ifdef PDSUPPORT
7580 /* Config PD devices connected to the card */
7581 if (instance->tbolt || instance->skinny) {
7582 for (tgt = 0; tgt < instance->mr_tbolt_pd_max; tgt++) {
7583 (void) mrsas_tbolt_config_pd(instance, tgt, 1, NULL);
7584 }
7585 }
7586 #endif
7587
7588 rval = NDI_SUCCESS;
7589 return (rval);
7590 }
7591
7592 static int
7593 mrsas_parse_devname(char *devnm, int *tgt, int *lun)
7594 {
7595 char devbuf[SCSI_MAXNAMELEN];
7596 char *addr;
7597 char *p, *tp, *lp;
7598 long num;
7599
7600 /* Parse dev name and address */
7601 (void) strcpy(devbuf, devnm);
7602 addr = "";
7603 for (p = devbuf; *p != '\0'; p++) {
7604 if (*p == '@') {
7605 addr = p + 1;
7606 *p = '\0';
7607 } else if (*p == ':') {
7608 *p = '\0';
7609 break;
7610 }
7611 }
7612
7613 /* Parse target and lun */
7614 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
7615 if (*p == ',') {
7616 lp = p + 1;
7617 *p = '\0';
7618 break;
7619 }
7620 }
7621 if (tgt && tp) {
7622 if (ddi_strtol(tp, NULL, 0x10, &num)) {
7623 return (DDI_FAILURE); /* Can declare this as constant */
7624 }
7625 *tgt = (int)num;
7626 }
7627 if (lun && lp) {
7628 if (ddi_strtol(lp, NULL, 0x10, &num)) {
7629 return (DDI_FAILURE);
7630 }
7631 *lun = (int)num;
7632 }
7633 return (DDI_SUCCESS); /* Success case */
7634 }
7635
7636 static int
7637 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt,
7638 uint8_t lun, dev_info_t **ldip)
7639 {
7640 struct scsi_device *sd;
7641 dev_info_t *child;
7642 int rval;
7643
7644 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d",
7645 tgt, lun));
7646
7647 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
7648 if (ldip) {
7649 *ldip = child;
7650 }
7651 if (instance->mr_ld_list[tgt].flag != MRDRV_TGT_VALID) {
7652 rval = mrsas_service_evt(instance, tgt, 0,
7653 MRSAS_EVT_UNCONFIG_TGT, NULL);
7654 con_log(CL_ANN1, (CE_WARN,
7655 "mr_sas: DELETING STALE ENTRY rval = %d "
7656 "tgt id = %d ", rval, tgt));
7657 return (NDI_FAILURE);
7658 }
7659 return (NDI_SUCCESS);
7660 }
7661
7662 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
7663 sd->sd_address.a_hba_tran = instance->tran;
7664 sd->sd_address.a_target = (uint16_t)tgt;
7665 sd->sd_address.a_lun = (uint8_t)lun;
7666
7667 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS)
7668 rval = mrsas_config_scsi_device(instance, sd, ldip);
7669 else
7670 rval = NDI_FAILURE;
7671
7672 /* sd_unprobe is blank now. Free buffer manually */
7673 if (sd->sd_inq) {
7674 kmem_free(sd->sd_inq, SUN_INQSIZE);
7675 sd->sd_inq = (struct scsi_inquiry *)NULL;
7676 }
7677
7678 kmem_free(sd, sizeof (struct scsi_device));
7679 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: return rval = %d",
7680 rval));
7681 return (rval);
7682 }
7683
7684 int
7685 mrsas_config_scsi_device(struct mrsas_instance *instance,
7686 struct scsi_device *sd, dev_info_t **dipp)
7687 {
7688 char *nodename = NULL;
7689 char **compatible = NULL;
7690 int ncompatible = 0;
7691 char *childname;
7692 dev_info_t *ldip = NULL;
7693 int tgt = sd->sd_address.a_target;
7694 int lun = sd->sd_address.a_lun;
7695 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7696 int rval;
7697
7698 con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: scsi_device t%dL%d", tgt, lun));
7699 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
7700 NULL, &nodename, &compatible, &ncompatible);
7701
7702 if (nodename == NULL) {
7703 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver "
7704 "for t%dL%d", tgt, lun));
7705 rval = NDI_FAILURE;
7706 goto finish;
7707 }
7708
7709 childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename;
7710 con_log(CL_DLEVEL1, (CE_NOTE,
7711 "mr_sas: Childname = %2s nodename = %s", childname, nodename));
7712
7713 /* Create a dev node */
7714 rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip);
7715 con_log(CL_DLEVEL1, (CE_NOTE,
7716 "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval));
7717 if (rval == NDI_SUCCESS) {
7718 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
7719 DDI_PROP_SUCCESS) {
7720 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7721 "property for t%dl%d target", tgt, lun));
7722 rval = NDI_FAILURE;
7723 goto finish;
7724 }
7725 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
7726 DDI_PROP_SUCCESS) {
7727 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7728 "property for t%dl%d lun", tgt, lun));
7729 rval = NDI_FAILURE;
7730 goto finish;
7731 }
7732
7733 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
7734 "compatible", compatible, ncompatible) !=
7735 DDI_PROP_SUCCESS) {
7736 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7737 "property for t%dl%d compatible", tgt, lun));
7738 rval = NDI_FAILURE;
7739 goto finish;
7740 }
7741
7742 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
7743 if (rval != NDI_SUCCESS) {
7744 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online "
7745 "t%dl%d", tgt, lun));
7746 ndi_prop_remove_all(ldip);
7747 (void) ndi_devi_free(ldip);
7748 } else {
7749 con_log(CL_ANN1, (CE_CONT, "mr_sas: online Done :"
7750 "0 t%dl%d", tgt, lun));
7751 }
7752
7753 }
7754 finish:
7755 if (dipp) {
7756 *dipp = ldip;
7757 }
7758
7759 con_log(CL_DLEVEL1, (CE_NOTE,
7760 "mr_sas: config_scsi_device rval = %d t%dL%d",
7761 rval, tgt, lun));
7762 scsi_hba_nodename_compatible_free(nodename, compatible);
7763 return (rval);
7764 }
7765
7766 /*ARGSUSED*/
7767 int
7768 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event,
7769 uint64_t wwn)
7770 {
7771 struct mrsas_eventinfo *mrevt = NULL;
7772
7773 con_log(CL_ANN1, (CE_NOTE,
7774 "mrsas_service_evt called for t%dl%d event = %d",
7775 tgt, lun, event));
7776
7777 if ((instance->taskq == NULL) || (mrevt =
7778 kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) {
7779 return (ENOMEM);
7780 }
7781
7782 mrevt->instance = instance;
7783 mrevt->tgt = tgt;
7784 mrevt->lun = lun;
7785 mrevt->event = event;
7786 mrevt->wwn = wwn;
7787
7788 if ((ddi_taskq_dispatch(instance->taskq,
7789 (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) !=
7790 DDI_SUCCESS) {
7791 con_log(CL_ANN1, (CE_NOTE,
7792 "mr_sas: Event task failed for t%dl%d event = %d",
7793 tgt, lun, event));
7794 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7795 return (DDI_FAILURE);
7796 }
7797 DTRACE_PROBE3(service_evt, int, tgt, int, lun, int, event);
7798 return (DDI_SUCCESS);
7799 }
7800
7801 static void
7802 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt)
7803 {
7804 struct mrsas_instance *instance = mrevt->instance;
7805 dev_info_t *dip, *pdip;
7806 int circ1 = 0;
7807 char *devname;
7808
7809 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for"
7810 " tgt %d lun %d event %d",
7811 mrevt->tgt, mrevt->lun, mrevt->event));
7812
7813 if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) {
7814 mutex_enter(&instance->config_dev_mtx);
7815 dip = instance->mr_ld_list[mrevt->tgt].dip;
7816 mutex_exit(&instance->config_dev_mtx);
7817 #ifdef PDSUPPORT
7818 } else {
7819 mutex_enter(&instance->config_dev_mtx);
7820 dip = instance->mr_tbolt_pd_list[mrevt->tgt].dip;
7821 mutex_exit(&instance->config_dev_mtx);
7822 #endif
7823 }
7824
7825
7826 ndi_devi_enter(instance->dip, &circ1);
7827 switch (mrevt->event) {
7828 case MRSAS_EVT_CONFIG_TGT:
7829 if (dip == NULL) {
7830
7831 if (mrevt->lun == 0) {
7832 (void) mrsas_config_ld(instance, mrevt->tgt,
7833 0, NULL);
7834 #ifdef PDSUPPORT
7835 } else if (instance->tbolt || instance->skinny) {
7836 (void) mrsas_tbolt_config_pd(instance,
7837 mrevt->tgt,
7838 1, NULL);
7839 #endif
7840 }
7841 con_log(CL_ANN1, (CE_NOTE,
7842 "mr_sas: EVT_CONFIG_TGT called:"
7843 " for tgt %d lun %d event %d",
7844 mrevt->tgt, mrevt->lun, mrevt->event));
7845
7846 } else {
7847 con_log(CL_ANN1, (CE_NOTE,
7848 "mr_sas: EVT_CONFIG_TGT dip != NULL:"
7849 " for tgt %d lun %d event %d",
7850 mrevt->tgt, mrevt->lun, mrevt->event));
7851 }
7852 break;
7853 case MRSAS_EVT_UNCONFIG_TGT:
7854 if (dip) {
7855 if (i_ddi_devi_attached(dip)) {
7856
7857 pdip = ddi_get_parent(dip);
7858
7859 devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP);
7860 (void) ddi_deviname(dip, devname);
7861
7862 (void) devfs_clean(pdip, devname + 1,
7863 DV_CLEAN_FORCE);
7864 kmem_free(devname, MAXNAMELEN + 1);
7865 }
7866 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
7867 con_log(CL_ANN1, (CE_NOTE,
7868 "mr_sas: EVT_UNCONFIG_TGT called:"
7869 " for tgt %d lun %d event %d",
7870 mrevt->tgt, mrevt->lun, mrevt->event));
7871 } else {
7872 con_log(CL_ANN1, (CE_NOTE,
7873 "mr_sas: EVT_UNCONFIG_TGT dip == NULL:"
7874 " for tgt %d lun %d event %d",
7875 mrevt->tgt, mrevt->lun, mrevt->event));
7876 }
7877 break;
7878 }
7879 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7880 ndi_devi_exit(instance->dip, circ1);
7881 }
7882
7883
7884 int
7885 mrsas_mode_sense_build(struct scsi_pkt *pkt)
7886 {
7887 union scsi_cdb *cdbp;
7888 uint16_t page_code;
7889 struct scsa_cmd *acmd;
7890 struct buf *bp;
7891 struct mode_header *modehdrp;
7892
7893 cdbp = (void *)pkt->pkt_cdbp;
7894 page_code = cdbp->cdb_un.sg.scsi[0];
7895 acmd = PKT2CMD(pkt);
7896 bp = acmd->cmd_buf;
7897 if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) {
7898 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command"));
7899 /* ADD pkt statistics as Command failed. */
7900 return (NULL);
7901 }
7902
7903 bp_mapin(bp);
7904 bzero(bp->b_un.b_addr, bp->b_bcount);
7905
7906 switch (page_code) {
7907 case 0x3: {
7908 struct mode_format *page3p = NULL;
7909 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7910 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7911
7912 page3p = (void *)((caddr_t)modehdrp +
7913 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7914 page3p->mode_page.code = 0x3;
7915 page3p->mode_page.length =
7916 (uchar_t)(sizeof (struct mode_format));
7917 page3p->data_bytes_sect = 512;
7918 page3p->sect_track = 63;
7919 break;
7920 }
7921 case 0x4: {
7922 struct mode_geometry *page4p = NULL;
7923 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7924 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7925
7926 page4p = (void *)((caddr_t)modehdrp +
7927 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7928 page4p->mode_page.code = 0x4;
7929 page4p->mode_page.length =
7930 (uchar_t)(sizeof (struct mode_geometry));
7931 page4p->heads = 255;
7932 page4p->rpm = 10000;
7933 break;
7934 }
7935 default:
7936 break;
7937 }
7938 return (NULL);
7939 }