Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/mega_sas/megaraid_sas.c
+++ new/usr/src/uts/common/io/mega_sas/megaraid_sas.c
1 1 /*
2 2 * megaraid_sas.c: source for mega_sas driver
3 3 *
4 4 * MegaRAID device driver for SAS controllers
5 5 * Copyright (c) 2005-2008, LSI Logic Corporation.
6 6 * All rights reserved.
7 7 *
8 8 * Version:
9 9 * Author:
10 10 * Rajesh Prabhakaran<Rajesh.Prabhakaran@lsil.com>
11 11 * Seokmann Ju
12 12 *
13 13 * Redistribution and use in source and binary forms, with or without
14 14 * modification, are permitted provided that the following conditions are met:
15 15 *
16 16 * 1. Redistributions of source code must retain the above copyright notice,
17 17 * this list of conditions and the following disclaimer.
18 18 *
19 19 * 2. Redistributions in binary form must reproduce the above copyright notice,
20 20 * this list of conditions and the following disclaimer in the documentation
21 21 * and/or other materials provided with the distribution.
22 22 *
23 23 * 3. Neither the name of the author nor the names of its contributors may be
24 24 * used to endorse or promote products derived from this software without
25 25 * specific prior written permission.
26 26 *
27 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
30 30 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
31 31 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32 32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
33 33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
34 34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
35 35 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
36 36 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
37 37 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 38 * DAMAGE.
39 39 */
40 40
41 41 /*
42 42 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
43 43 * Use is subject to license terms.
44 44 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
45 45 */
46 46
47 47 #include <sys/types.h>
48 48 #include <sys/param.h>
49 49 #include <sys/file.h>
50 50 #include <sys/errno.h>
51 51 #include <sys/open.h>
52 52 #include <sys/cred.h>
53 53 #include <sys/modctl.h>
54 54 #include <sys/conf.h>
55 55 #include <sys/devops.h>
56 56 #include <sys/cmn_err.h>
57 57 #include <sys/kmem.h>
58 58 #include <sys/stat.h>
59 59 #include <sys/mkdev.h>
60 60 #include <sys/pci.h>
61 61 #include <sys/scsi/scsi.h>
62 62 #include <sys/ddi.h>
63 63 #include <sys/sunddi.h>
64 64 #include <sys/atomic.h>
65 65 #include <sys/signal.h>
66 66
67 67 #include "megaraid_sas.h"
68 68
69 69 /*
70 70 * FMA header files
71 71 */
72 72 #include <sys/ddifm.h>
73 73 #include <sys/fm/protocol.h>
74 74 #include <sys/fm/util.h>
75 75 #include <sys/fm/io/ddi.h>
76 76
77 77 /*
78 78 * Local static data
79 79 */
80 80 static void *megasas_state = NULL;
81 81 static int debug_level_g = CL_ANN;
82 82
83 83 #pragma weak scsi_hba_open
84 84 #pragma weak scsi_hba_close
85 85 #pragma weak scsi_hba_ioctl
86 86
87 87 static ddi_dma_attr_t megasas_generic_dma_attr = {
88 88 DMA_ATTR_V0, /* dma_attr_version */
89 89 0, /* low DMA address range */
90 90 0xFFFFFFFFU, /* high DMA address range */
91 91 0xFFFFFFFFU, /* DMA counter register */
92 92 8, /* DMA address alignment */
93 93 0x07, /* DMA burstsizes */
94 94 1, /* min DMA size */
95 95 0xFFFFFFFFU, /* max DMA size */
96 96 0xFFFFFFFFU, /* segment boundary */
97 97 MEGASAS_MAX_SGE_CNT, /* dma_attr_sglen */
98 98 512, /* granularity of device */
99 99 0 /* bus specific DMA flags */
100 100 };
101 101
102 102 int32_t megasas_max_cap_maxxfer = 0x1000000;
103 103
104 104 /*
105 105 * cb_ops contains base level routines
106 106 */
107 107 static struct cb_ops megasas_cb_ops = {
108 108 megasas_open, /* open */
109 109 megasas_close, /* close */
110 110 nodev, /* strategy */
111 111 nodev, /* print */
112 112 nodev, /* dump */
113 113 nodev, /* read */
114 114 nodev, /* write */
115 115 megasas_ioctl, /* ioctl */
116 116 nodev, /* devmap */
117 117 nodev, /* mmap */
118 118 nodev, /* segmap */
119 119 nochpoll, /* poll */
120 120 nodev, /* cb_prop_op */
121 121 0, /* streamtab */
122 122 D_NEW | D_HOTPLUG, /* cb_flag */
123 123 CB_REV, /* cb_rev */
124 124 nodev, /* cb_aread */
125 125 nodev /* cb_awrite */
126 126 };
127 127
128 128 /*
129 129 * dev_ops contains configuration routines
130 130 */
131 131 static struct dev_ops megasas_ops = {
132 132 DEVO_REV, /* rev, */
133 133 0, /* refcnt */
134 134 megasas_getinfo, /* getinfo */
135 135 nulldev, /* identify */
136 136 nulldev, /* probe */
137 137 megasas_attach, /* attach */
138 138 megasas_detach, /* detach */
139 139 megasas_reset, /* reset */
140 140 &megasas_cb_ops, /* char/block ops */
141 141 NULL, /* bus ops */
142 142 NULL, /* power */
↓ open down ↓ |
142 lines elided |
↑ open up ↑ |
143 143 ddi_quiesce_not_supported, /* devo_quiesce */
144 144 };
145 145
146 146 static struct modldrv modldrv = {
147 147 &mod_driverops, /* module type - driver */
148 148 MEGASAS_VERSION,
149 149 &megasas_ops, /* driver ops */
150 150 };
151 151
152 152 static struct modlinkage modlinkage = {
153 - MODREV_1, /* ml_rev - must be MODREV_1 */
154 - &modldrv, /* ml_linkage */
155 - NULL /* end of driver linkage */
153 + MODREV_1, /* ml_rev - must be MODREV_1 */
154 + { &modldrv, NULL } /* ml_linkage */
156 155 };
157 156
158 157 static struct ddi_device_acc_attr endian_attr = {
159 158 DDI_DEVICE_ATTR_V1,
160 159 DDI_STRUCTURE_LE_ACC,
161 160 DDI_STRICTORDER_ACC,
162 161 DDI_DEFAULT_ACC
163 162 };
164 163
165 164
166 165 /*
167 166 * ************************************************************************** *
168 167 * *
169 168 * common entry points - for loadable kernel modules *
170 169 * *
171 170 * ************************************************************************** *
172 171 */
173 172
174 173 /*
175 174 * _init - initialize a loadable module
176 175 * @void
177 176 *
178 177 * The driver should perform any one-time resource allocation or data
179 178 * initialization during driver loading in _init(). For example, the driver
180 179 * should initialize any mutexes global to the driver in this routine.
181 180 * The driver should not, however, use _init() to allocate or initialize
182 181 * anything that has to do with a particular instance of the device.
183 182 * Per-instance initialization must be done in attach().
184 183 */
185 184 int
186 185 _init(void)
187 186 {
188 187 int ret;
189 188
190 189 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
191 190
192 191 ret = ddi_soft_state_init(&megasas_state,
193 192 sizeof (struct megasas_instance), 0);
194 193
195 194 if (ret != 0) {
196 195 con_log(CL_ANN, (CE_WARN, "megaraid: could not init state"));
197 196 return (ret);
198 197 }
199 198
200 199 if ((ret = scsi_hba_init(&modlinkage)) != 0) {
201 200 con_log(CL_ANN, (CE_WARN, "megaraid: could not init scsi hba"));
202 201 ddi_soft_state_fini(&megasas_state);
203 202 return (ret);
204 203 }
205 204
206 205 ret = mod_install(&modlinkage);
207 206
208 207 if (ret != 0) {
209 208 con_log(CL_ANN, (CE_WARN, "megaraid: mod_install failed"));
210 209 scsi_hba_fini(&modlinkage);
211 210 ddi_soft_state_fini(&megasas_state);
212 211 }
213 212
214 213 return (ret);
215 214 }
216 215
217 216 /*
218 217 * _info - returns information about a loadable module.
219 218 * @void
220 219 *
221 220 * _info() is called to return module information. This is a typical entry
222 221 * point that does predefined role. It simply calls mod_info().
223 222 */
224 223 int
225 224 _info(struct modinfo *modinfop)
226 225 {
227 226 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
228 227
229 228 return (mod_info(&modlinkage, modinfop));
230 229 }
231 230
232 231 /*
233 232 * _fini - prepare a loadable module for unloading
234 233 * @void
235 234 *
236 235 * In _fini(), the driver should release any resources that were allocated in
237 236 * _init(). The driver must remove itself from the system module list.
238 237 */
239 238 int
240 239 _fini(void)
241 240 {
242 241 int ret;
243 242
244 243 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
245 244
246 245 if ((ret = mod_remove(&modlinkage)) != 0)
247 246 return (ret);
248 247
249 248 scsi_hba_fini(&modlinkage);
250 249
251 250 ddi_soft_state_fini(&megasas_state);
252 251
253 252 return (ret);
254 253 }
255 254
256 255
257 256 /*
258 257 * ************************************************************************** *
259 258 * *
260 259 * common entry points - for autoconfiguration *
261 260 * *
262 261 * ************************************************************************** *
263 262 */
264 263 /*
265 264 * attach - adds a device to the system as part of initialization
266 265 * @dip:
267 266 * @cmd:
268 267 *
269 268 * The kernel calls a driver's attach() entry point to attach an instance of
270 269 * a device (for MegaRAID, it is instance of a controller) or to resume
271 270 * operation for an instance of a device that has been suspended or has been
272 271 * shut down by the power management framework
273 272 * The attach() entry point typically includes the following types of
274 273 * processing:
275 274 * - allocate a soft-state structure for the device instance (for MegaRAID,
276 275 * controller instance)
277 276 * - initialize per-instance mutexes
278 277 * - initialize condition variables
279 278 * - register the device's interrupts (for MegaRAID, controller's interrupts)
280 279 * - map the registers and memory of the device instance (for MegaRAID,
281 280 * controller instance)
282 281 * - create minor device nodes for the device instance (for MegaRAID,
283 282 * controller instance)
284 283 * - report that the device instance (for MegaRAID, controller instance) has
285 284 * attached
286 285 */
287 286 static int
288 287 megasas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
289 288 {
290 289 int instance_no;
291 290 int nregs;
292 291 uint8_t added_isr_f = 0;
293 292 uint8_t added_soft_isr_f = 0;
294 293 uint8_t create_devctl_node_f = 0;
295 294 uint8_t create_scsi_node_f = 0;
296 295 uint8_t create_ioc_node_f = 0;
297 296 uint8_t tran_alloc_f = 0;
298 297 uint8_t irq;
299 298 uint16_t vendor_id;
300 299 uint16_t device_id;
301 300 uint16_t subsysvid;
302 301 uint16_t subsysid;
303 302 uint16_t command;
304 303
305 304 scsi_hba_tran_t *tran;
306 305 ddi_dma_attr_t tran_dma_attr;
307 306 struct megasas_instance *instance;
308 307
309 308 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
310 309
311 310 /* CONSTCOND */
312 311 ASSERT(NO_COMPETING_THREADS);
313 312
314 313 instance_no = ddi_get_instance(dip);
315 314
316 315 /*
317 316 * Since we know that some instantiations of this device can be
318 317 * plugged into slave-only SBus slots, check to see whether this is
319 318 * one such.
320 319 */
321 320 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
322 321 con_log(CL_ANN, (CE_WARN,
323 322 "mega%d: Device in slave-only slot, unused", instance_no));
324 323 return (DDI_FAILURE);
325 324 }
326 325
327 326 switch (cmd) {
328 327 case DDI_ATTACH:
329 328 con_log(CL_DLEVEL1, (CE_NOTE, "megasas: DDI_ATTACH"));
330 329 /* allocate the soft state for the instance */
331 330 if (ddi_soft_state_zalloc(megasas_state, instance_no)
332 331 != DDI_SUCCESS) {
333 332 con_log(CL_ANN, (CE_WARN,
334 333 "mega%d: Failed to allocate soft state",
335 334 instance_no));
336 335
337 336 return (DDI_FAILURE);
338 337 }
339 338
340 339 instance = (struct megasas_instance *)ddi_get_soft_state
341 340 (megasas_state, instance_no);
342 341
343 342 if (instance == NULL) {
344 343 con_log(CL_ANN, (CE_WARN,
345 344 "mega%d: Bad soft state", instance_no));
346 345
347 346 ddi_soft_state_free(megasas_state, instance_no);
348 347
349 348 return (DDI_FAILURE);
350 349 }
351 350
352 351 bzero((caddr_t)instance,
353 352 sizeof (struct megasas_instance));
354 353
355 354 instance->func_ptr = kmem_zalloc(
356 355 sizeof (struct megasas_func_ptr), KM_SLEEP);
357 356 ASSERT(instance->func_ptr);
358 357
359 358 /* Setup the PCI configuration space handles */
360 359 if (pci_config_setup(dip, &instance->pci_handle) !=
361 360 DDI_SUCCESS) {
362 361 con_log(CL_ANN, (CE_WARN,
363 362 "mega%d: pci config setup failed ",
364 363 instance_no));
365 364
366 365 kmem_free(instance->func_ptr,
367 366 sizeof (struct megasas_func_ptr));
368 367 ddi_soft_state_free(megasas_state, instance_no);
369 368
370 369 return (DDI_FAILURE);
371 370 }
372 371
373 372 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
374 373 con_log(CL_ANN, (CE_WARN,
375 374 "megaraid: failed to get registers."));
376 375
377 376 pci_config_teardown(&instance->pci_handle);
378 377 kmem_free(instance->func_ptr,
379 378 sizeof (struct megasas_func_ptr));
380 379 ddi_soft_state_free(megasas_state, instance_no);
381 380
382 381 return (DDI_FAILURE);
383 382 }
384 383
385 384 vendor_id = pci_config_get16(instance->pci_handle,
386 385 PCI_CONF_VENID);
387 386 device_id = pci_config_get16(instance->pci_handle,
388 387 PCI_CONF_DEVID);
389 388
390 389 subsysvid = pci_config_get16(instance->pci_handle,
391 390 PCI_CONF_SUBVENID);
392 391 subsysid = pci_config_get16(instance->pci_handle,
393 392 PCI_CONF_SUBSYSID);
394 393
395 394 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
396 395 (pci_config_get16(instance->pci_handle,
397 396 PCI_CONF_COMM) | PCI_COMM_ME));
398 397 irq = pci_config_get8(instance->pci_handle,
399 398 PCI_CONF_ILINE);
400 399
401 400 con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: "
402 401 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s\n",
403 402 instance_no, vendor_id, device_id, subsysvid,
404 403 subsysid, irq, MEGASAS_VERSION));
405 404
406 405 /* enable bus-mastering */
407 406 command = pci_config_get16(instance->pci_handle,
408 407 PCI_CONF_COMM);
409 408
410 409 if (!(command & PCI_COMM_ME)) {
411 410 command |= PCI_COMM_ME;
412 411
413 412 pci_config_put16(instance->pci_handle,
414 413 PCI_CONF_COMM, command);
415 414
416 415 con_log(CL_ANN, (CE_CONT, "megaraid%d: "
417 416 "enable bus-mastering\n", instance_no));
418 417 } else {
419 418 con_log(CL_DLEVEL1, (CE_CONT, "megaraid%d: "
420 419 "bus-mastering already set\n", instance_no));
421 420 }
422 421
423 422 /* initialize function pointers */
424 423 if ((device_id == PCI_DEVICE_ID_LSI_1078) ||
425 424 (device_id == PCI_DEVICE_ID_LSI_1078DE)) {
426 425 con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: "
427 426 "1078R/DE detected\n", instance_no));
428 427 instance->func_ptr->read_fw_status_reg =
429 428 read_fw_status_reg_ppc;
430 429 instance->func_ptr->issue_cmd = issue_cmd_ppc;
431 430 instance->func_ptr->issue_cmd_in_sync_mode =
432 431 issue_cmd_in_sync_mode_ppc;
433 432 instance->func_ptr->issue_cmd_in_poll_mode =
434 433 issue_cmd_in_poll_mode_ppc;
435 434 instance->func_ptr->enable_intr =
436 435 enable_intr_ppc;
437 436 instance->func_ptr->disable_intr =
438 437 disable_intr_ppc;
439 438 instance->func_ptr->intr_ack = intr_ack_ppc;
440 439 } else {
441 440 con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: "
442 441 "1064/8R detected\n", instance_no));
443 442 instance->func_ptr->read_fw_status_reg =
444 443 read_fw_status_reg_xscale;
445 444 instance->func_ptr->issue_cmd =
446 445 issue_cmd_xscale;
447 446 instance->func_ptr->issue_cmd_in_sync_mode =
448 447 issue_cmd_in_sync_mode_xscale;
449 448 instance->func_ptr->issue_cmd_in_poll_mode =
450 449 issue_cmd_in_poll_mode_xscale;
451 450 instance->func_ptr->enable_intr =
452 451 enable_intr_xscale;
453 452 instance->func_ptr->disable_intr =
454 453 disable_intr_xscale;
455 454 instance->func_ptr->intr_ack =
456 455 intr_ack_xscale;
457 456 }
458 457
459 458 instance->baseaddress = pci_config_get32(
460 459 instance->pci_handle, PCI_CONF_BASE0);
461 460 instance->baseaddress &= 0x0fffc;
462 461
463 462 instance->dip = dip;
464 463 instance->vendor_id = vendor_id;
465 464 instance->device_id = device_id;
466 465 instance->subsysvid = subsysvid;
467 466 instance->subsysid = subsysid;
468 467
469 468 /* Initialize FMA */
470 469 instance->fm_capabilities = ddi_prop_get_int(
471 470 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS,
472 471 "fm-capable", DDI_FM_EREPORT_CAPABLE |
473 472 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE
474 473 | DDI_FM_ERRCB_CAPABLE);
475 474
476 475 megasas_fm_init(instance);
477 476
478 477 /* setup the mfi based low level driver */
479 478 if (init_mfi(instance) != DDI_SUCCESS) {
480 479 con_log(CL_ANN, (CE_WARN, "megaraid: "
481 480 "could not initialize the low level driver"));
482 481
483 482 goto fail_attach;
484 483 }
485 484
486 485 /*
487 486 * Allocate the interrupt blocking cookie.
488 487 * It represents the information the framework
489 488 * needs to block interrupts. This cookie will
490 489 * be used by the locks shared accross our ISR.
491 490 * These locks must be initialized before we
492 491 * register our ISR.
493 492 * ddi_add_intr(9F)
494 493 */
495 494 if (ddi_get_iblock_cookie(dip, 0,
496 495 &instance->iblock_cookie) != DDI_SUCCESS) {
497 496
498 497 goto fail_attach;
499 498 }
500 499
501 500 if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_HIGH,
502 501 &instance->soft_iblock_cookie) != DDI_SUCCESS) {
503 502
504 503 goto fail_attach;
505 504 }
506 505
507 506 /*
508 507 * Initialize the driver mutexes common to
509 508 * normal/high level isr
510 509 */
511 510 if (ddi_intr_hilevel(dip, 0)) {
512 511 instance->isr_level = HIGH_LEVEL_INTR;
513 512 mutex_init(&instance->cmd_pool_mtx,
514 513 "cmd_pool_mtx", MUTEX_DRIVER,
515 514 instance->soft_iblock_cookie);
516 515 mutex_init(&instance->cmd_pend_mtx,
517 516 "cmd_pend_mtx", MUTEX_DRIVER,
518 517 instance->soft_iblock_cookie);
519 518 } else {
520 519 /*
521 520 * Initialize the driver mutexes
522 521 * specific to soft-isr
523 522 */
524 523 instance->isr_level = NORMAL_LEVEL_INTR;
525 524 mutex_init(&instance->cmd_pool_mtx,
526 525 "cmd_pool_mtx", MUTEX_DRIVER,
527 526 instance->iblock_cookie);
528 527 mutex_init(&instance->cmd_pend_mtx,
529 528 "cmd_pend_mtx", MUTEX_DRIVER,
530 529 instance->iblock_cookie);
531 530 }
532 531
533 532 mutex_init(&instance->completed_pool_mtx,
534 533 "completed_pool_mtx", MUTEX_DRIVER,
535 534 instance->iblock_cookie);
536 535 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx",
537 536 MUTEX_DRIVER, instance->iblock_cookie);
538 537 mutex_init(&instance->aen_cmd_mtx, "aen_cmd_mtx",
539 538 MUTEX_DRIVER, instance->iblock_cookie);
540 539 mutex_init(&instance->abort_cmd_mtx, "abort_cmd_mtx",
541 540 MUTEX_DRIVER, instance->iblock_cookie);
542 541
543 542 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
544 543 cv_init(&instance->abort_cmd_cv, NULL, CV_DRIVER, NULL);
545 544
546 545 INIT_LIST_HEAD(&instance->completed_pool_list);
547 546
548 547 /* Register our isr. */
549 548 if (ddi_add_intr(dip, 0, NULL, NULL, megasas_isr,
550 549 (caddr_t)instance) != DDI_SUCCESS) {
551 550 con_log(CL_ANN, (CE_WARN,
552 551 " ISR did not register"));
553 552
554 553 goto fail_attach;
555 554 }
556 555
557 556 added_isr_f = 1;
558 557
559 558 /* Register our soft-isr for highlevel interrupts. */
560 559 if (instance->isr_level == HIGH_LEVEL_INTR) {
561 560 if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH,
562 561 &instance->soft_intr_id, NULL, NULL,
563 562 megasas_softintr, (caddr_t)instance) !=
564 563 DDI_SUCCESS) {
565 564 con_log(CL_ANN, (CE_WARN,
566 565 " Software ISR did not register"));
567 566
568 567 goto fail_attach;
569 568 }
570 569
571 570 added_soft_isr_f = 1;
572 571 }
573 572
574 573 /* Allocate a transport structure */
575 574 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
576 575
577 576 if (tran == NULL) {
578 577 con_log(CL_ANN, (CE_WARN,
579 578 "scsi_hba_tran_alloc failed"));
580 579 goto fail_attach;
581 580 }
582 581
583 582 tran_alloc_f = 1;
584 583
585 584 instance->tran = tran;
586 585
587 586 tran->tran_hba_private = instance;
588 587 tran->tran_tgt_private = NULL;
589 588 tran->tran_tgt_init = megasas_tran_tgt_init;
590 589 tran->tran_tgt_probe = scsi_hba_probe;
591 590 tran->tran_tgt_free = (void (*)())NULL;
592 591 tran->tran_init_pkt = megasas_tran_init_pkt;
593 592 tran->tran_start = megasas_tran_start;
594 593 tran->tran_abort = megasas_tran_abort;
595 594 tran->tran_reset = megasas_tran_reset;
596 595 tran->tran_bus_reset = megasas_tran_bus_reset;
597 596 tran->tran_getcap = megasas_tran_getcap;
598 597 tran->tran_setcap = megasas_tran_setcap;
599 598 tran->tran_destroy_pkt = megasas_tran_destroy_pkt;
600 599 tran->tran_dmafree = megasas_tran_dmafree;
601 600 tran->tran_sync_pkt = megasas_tran_sync_pkt;
602 601 tran->tran_reset_notify = NULL;
603 602 tran->tran_quiesce = megasas_tran_quiesce;
604 603 tran->tran_unquiesce = megasas_tran_unquiesce;
605 604
606 605 tran_dma_attr = megasas_generic_dma_attr;
607 606 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
608 607
609 608 /* Attach this instance of the hba */
610 609 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
611 610 != DDI_SUCCESS) {
612 611 con_log(CL_ANN, (CE_WARN,
613 612 "scsi_hba_attach failed\n"));
614 613
615 614 goto fail_attach;
616 615 }
617 616
618 617 /* create devctl node for cfgadm command */
619 618 if (ddi_create_minor_node(dip, "devctl",
620 619 S_IFCHR, INST2DEVCTL(instance_no),
621 620 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
622 621 con_log(CL_ANN, (CE_WARN,
623 622 "megaraid: failed to create devctl node."));
624 623
625 624 goto fail_attach;
626 625 }
627 626
628 627 create_devctl_node_f = 1;
629 628
630 629 /* create scsi node for cfgadm command */
631 630 if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
632 631 INST2SCSI(instance_no),
633 632 DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
634 633 DDI_FAILURE) {
635 634 con_log(CL_ANN, (CE_WARN,
636 635 "megaraid: failed to create scsi node."));
637 636
638 637 goto fail_attach;
639 638 }
640 639
641 640 create_scsi_node_f = 1;
642 641
643 642 (void) sprintf(instance->iocnode, "%d:lsirdctl",
644 643 instance_no);
645 644
646 645 /*
647 646 * Create a node for applications
648 647 * for issuing ioctl to the driver.
649 648 */
650 649 if (ddi_create_minor_node(dip, instance->iocnode,
651 650 S_IFCHR, INST2LSIRDCTL(instance_no),
652 651 DDI_PSEUDO, 0) == DDI_FAILURE) {
653 652 con_log(CL_ANN, (CE_WARN,
654 653 "megaraid: failed to create ioctl node."));
655 654
656 655 goto fail_attach;
657 656 }
658 657
659 658 create_ioc_node_f = 1;
660 659
661 660 /* enable interrupt */
662 661 instance->func_ptr->enable_intr(instance);
663 662
664 663 /* initiate AEN */
665 664 if (start_mfi_aen(instance)) {
666 665 con_log(CL_ANN, (CE_WARN,
667 666 "megaraid: failed to initiate AEN."));
668 667 goto fail_initiate_aen;
669 668 }
670 669
671 670 con_log(CL_DLEVEL1, (CE_NOTE,
672 671 "AEN started for instance %d.", instance_no));
673 672
674 673 /* Finally! We are on the air. */
675 674 ddi_report_dev(dip);
676 675
677 676 if (megasas_check_acc_handle(instance->regmap_handle) !=
678 677 DDI_SUCCESS) {
679 678 goto fail_attach;
680 679 }
681 680 if (megasas_check_acc_handle(instance->pci_handle) !=
682 681 DDI_SUCCESS) {
683 682 goto fail_attach;
684 683 }
685 684 break;
686 685 case DDI_PM_RESUME:
687 686 con_log(CL_ANN, (CE_NOTE,
688 687 "megasas: DDI_PM_RESUME"));
689 688 break;
690 689 case DDI_RESUME:
691 690 con_log(CL_ANN, (CE_NOTE,
692 691 "megasas: DDI_RESUME"));
693 692 break;
694 693 default:
695 694 con_log(CL_ANN, (CE_WARN,
696 695 "megasas: invalid attach cmd=%x", cmd));
697 696 return (DDI_FAILURE);
698 697 }
699 698
700 699 return (DDI_SUCCESS);
701 700
702 701 fail_initiate_aen:
703 702 fail_attach:
704 703 if (create_devctl_node_f) {
705 704 ddi_remove_minor_node(dip, "devctl");
706 705 }
707 706
708 707 if (create_scsi_node_f) {
709 708 ddi_remove_minor_node(dip, "scsi");
710 709 }
711 710
712 711 if (create_ioc_node_f) {
713 712 ddi_remove_minor_node(dip, instance->iocnode);
714 713 }
715 714
716 715 if (tran_alloc_f) {
717 716 scsi_hba_tran_free(tran);
718 717 }
719 718
720 719
721 720 if (added_soft_isr_f) {
722 721 ddi_remove_softintr(instance->soft_intr_id);
723 722 }
724 723
725 724 if (added_isr_f) {
726 725 ddi_remove_intr(dip, 0, instance->iblock_cookie);
727 726 }
728 727
729 728 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
730 729 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
731 730
732 731 megasas_fm_fini(instance);
733 732
734 733 pci_config_teardown(&instance->pci_handle);
735 734
736 735 ddi_soft_state_free(megasas_state, instance_no);
737 736
738 737 con_log(CL_ANN, (CE_NOTE,
739 738 "megasas: return failure from mega_attach\n"));
740 739
741 740 return (DDI_FAILURE);
742 741 }
743 742
744 743 /*
745 744 * getinfo - gets device information
746 745 * @dip:
747 746 * @cmd:
748 747 * @arg:
749 748 * @resultp:
750 749 *
751 750 * The system calls getinfo() to obtain configuration information that only
752 751 * the driver knows. The mapping of minor numbers to device instance is
753 752 * entirely under the control of the driver. The system sometimes needs to ask
754 753 * the driver which device a particular dev_t represents.
755 754 * Given the device number return the devinfo pointer from the scsi_device
756 755 * structure.
757 756 */
758 757 /*ARGSUSED*/
759 758 static int
760 759 megasas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
761 760 {
762 761 int rval;
763 762 int megasas_minor = getminor((dev_t)arg);
764 763
765 764 struct megasas_instance *instance;
766 765
767 766 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
768 767
769 768 switch (cmd) {
770 769 case DDI_INFO_DEVT2DEVINFO:
771 770 instance = (struct megasas_instance *)
772 771 ddi_get_soft_state(megasas_state,
773 772 MINOR2INST(megasas_minor));
774 773
775 774 if (instance == NULL) {
776 775 *resultp = NULL;
777 776 rval = DDI_FAILURE;
778 777 } else {
779 778 *resultp = instance->dip;
780 779 rval = DDI_SUCCESS;
781 780 }
782 781 break;
783 782 case DDI_INFO_DEVT2INSTANCE:
784 783 *resultp = (void *)instance;
785 784 rval = DDI_SUCCESS;
786 785 break;
787 786 default:
788 787 *resultp = NULL;
789 788 rval = DDI_FAILURE;
790 789 }
791 790
792 791 return (rval);
793 792 }
794 793
795 794 /*
796 795 * detach - detaches a device from the system
797 796 * @dip: pointer to the device's dev_info structure
798 797 * @cmd: type of detach
799 798 *
800 799 * A driver's detach() entry point is called to detach an instance of a device
801 800 * that is bound to the driver. The entry point is called with the instance of
802 801 * the device node to be detached and with DDI_DETACH, which is specified as
803 802 * the cmd argument to the entry point.
804 803 * This routine is called during driver unload. We free all the allocated
805 804 * resources and call the corresponding LLD so that it can also release all
806 805 * its resources.
807 806 */
808 807 static int
809 808 megasas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
810 809 {
811 810 int instance_no;
812 811
813 812 struct megasas_instance *instance;
814 813
815 814 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
816 815
817 816 /* CONSTCOND */
818 817 ASSERT(NO_COMPETING_THREADS);
819 818
820 819 instance_no = ddi_get_instance(dip);
821 820
822 821 instance = (struct megasas_instance *)ddi_get_soft_state(megasas_state,
823 822 instance_no);
824 823
825 824 if (!instance) {
826 825 con_log(CL_ANN, (CE_WARN,
827 826 "megasas:%d could not get instance in detach",
828 827 instance_no));
829 828
830 829 return (DDI_FAILURE);
831 830 }
832 831
833 832 con_log(CL_ANN, (CE_NOTE,
834 833 "megasas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x\n",
835 834 instance_no, instance->vendor_id, instance->device_id,
836 835 instance->subsysvid, instance->subsysid));
837 836
838 837 switch (cmd) {
839 838 case DDI_DETACH:
840 839 con_log(CL_ANN, (CE_NOTE,
841 840 "megasas_detach: DDI_DETACH\n"));
842 841
843 842 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
844 843 con_log(CL_ANN, (CE_WARN,
845 844 "megasas:%d failed to detach",
846 845 instance_no));
847 846
848 847 return (DDI_FAILURE);
849 848 }
850 849
851 850 scsi_hba_tran_free(instance->tran);
852 851
853 852 if (abort_aen_cmd(instance, instance->aen_cmd)) {
854 853 con_log(CL_ANN, (CE_WARN, "megasas_detach: "
855 854 "failed to abort prevous AEN command\n"));
856 855
857 856 return (DDI_FAILURE);
858 857 }
859 858
860 859 instance->func_ptr->disable_intr(instance);
861 860
862 861 if (instance->isr_level == HIGH_LEVEL_INTR) {
863 862 ddi_remove_softintr(instance->soft_intr_id);
864 863 }
865 864
866 865 ddi_remove_intr(dip, 0, instance->iblock_cookie);
867 866
868 867 free_space_for_mfi(instance);
869 868
870 869 megasas_fm_fini(instance);
871 870
872 871 pci_config_teardown(&instance->pci_handle);
873 872
874 873 kmem_free(instance->func_ptr,
875 874 sizeof (struct megasas_func_ptr));
876 875
877 876 ddi_soft_state_free(megasas_state, instance_no);
878 877 break;
879 878 case DDI_PM_SUSPEND:
880 879 con_log(CL_ANN, (CE_NOTE,
881 880 "megasas_detach: DDI_PM_SUSPEND\n"));
882 881
883 882 break;
884 883 case DDI_SUSPEND:
885 884 con_log(CL_ANN, (CE_NOTE,
886 885 "megasas_detach: DDI_SUSPEND\n"));
887 886
888 887 break;
889 888 default:
890 889 con_log(CL_ANN, (CE_WARN,
891 890 "invalid detach command:0x%x", cmd));
892 891 return (DDI_FAILURE);
893 892 }
894 893
895 894 return (DDI_SUCCESS);
896 895 }
897 896
898 897 /*
899 898 * ************************************************************************** *
900 899 * *
901 900 * common entry points - for character driver types *
902 901 * *
903 902 * ************************************************************************** *
904 903 */
905 904 /*
906 905 * open - gets access to a device
907 906 * @dev:
908 907 * @openflags:
909 908 * @otyp:
910 909 * @credp:
911 910 *
912 911 * Access to a device by one or more application programs is controlled
913 912 * through the open() and close() entry points. The primary function of
914 913 * open() is to verify that the open request is allowed.
915 914 */
916 915 static int
917 916 megasas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
918 917 {
919 918 int rval = 0;
920 919
921 920 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
922 921
923 922 /* Check root permissions */
924 923 if (drv_priv(credp) != 0) {
925 924 con_log(CL_ANN, (CE_WARN,
926 925 "megaraid: Non-root ioctl access tried!"));
927 926 return (EPERM);
928 927 }
929 928
930 929 /* Verify we are being opened as a character device */
931 930 if (otyp != OTYP_CHR) {
932 931 con_log(CL_ANN, (CE_WARN,
933 932 "megaraid: ioctl node must be a char node\n"));
934 933 return (EINVAL);
935 934 }
936 935
937 936 if (ddi_get_soft_state(megasas_state, MINOR2INST(getminor(*dev)))
938 937 == NULL) {
939 938 return (ENXIO);
940 939 }
941 940
942 941 if (scsi_hba_open) {
943 942 rval = scsi_hba_open(dev, openflags, otyp, credp);
944 943 }
945 944
946 945 return (rval);
947 946 }
948 947
949 948 /*
950 949 * close - gives up access to a device
951 950 * @dev:
952 951 * @openflags:
953 952 * @otyp:
954 953 * @credp:
955 954 *
956 955 * close() should perform any cleanup necessary to finish using the minor
957 956 * device, and prepare the device (and driver) to be opened again.
958 957 */
959 958 static int
960 959 megasas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
961 960 {
962 961 int rval = 0;
963 962
964 963 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
965 964
966 965 /* no need for locks! */
967 966
968 967 if (scsi_hba_close) {
969 968 rval = scsi_hba_close(dev, openflags, otyp, credp);
970 969 }
971 970
972 971 return (rval);
973 972 }
974 973
975 974 /*
976 975 * ioctl - performs a range of I/O commands for character drivers
977 976 * @dev:
978 977 * @cmd:
979 978 * @arg:
980 979 * @mode:
981 980 * @credp:
982 981 * @rvalp:
983 982 *
984 983 * ioctl() routine must make sure that user data is copied into or out of the
985 984 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
986 985 * and ddi_copyout(), as appropriate.
987 986 * This is a wrapper routine to serialize access to the actual ioctl routine.
988 987 * ioctl() should return 0 on success, or the appropriate error number. The
989 988 * driver may also set the value returned to the calling process through rvalp.
990 989 */
991 990 static int
992 991 megasas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
993 992 int *rvalp)
994 993 {
995 994 int rval = 0;
996 995
997 996 struct megasas_instance *instance;
998 997 struct megasas_ioctl ioctl;
999 998 struct megasas_aen aen;
1000 999
1001 1000 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1002 1001
1003 1002 instance = ddi_get_soft_state(megasas_state, MINOR2INST(getminor(dev)));
1004 1003
1005 1004 if (instance == NULL) {
1006 1005 /* invalid minor number */
1007 1006 con_log(CL_ANN, (CE_WARN, "megaraid: adapter not found."));
1008 1007 return (ENXIO);
1009 1008 }
1010 1009
1011 1010 switch ((uint_t)cmd) {
1012 1011 case MEGASAS_IOCTL_FIRMWARE:
1013 1012 if (ddi_copyin((void *) arg, &ioctl,
1014 1013 sizeof (struct megasas_ioctl), mode)) {
1015 1014 con_log(CL_ANN, (CE_WARN, "megasas_ioctl: "
1016 1015 "ERROR IOCTL copyin"));
1017 1016 return (EFAULT);
1018 1017 }
1019 1018
1020 1019 if (ioctl.control_code == MR_DRIVER_IOCTL_COMMON) {
1021 1020 rval = handle_drv_ioctl(instance, &ioctl, mode);
1022 1021 } else {
1023 1022 rval = handle_mfi_ioctl(instance, &ioctl, mode);
1024 1023 }
1025 1024
1026 1025 if (ddi_copyout((void *) &ioctl, (void *)arg,
1027 1026 (sizeof (struct megasas_ioctl) - 1), mode)) {
1028 1027 con_log(CL_ANN, (CE_WARN,
1029 1028 "megasas_ioctl: copy_to_user failed\n"));
1030 1029 rval = 1;
1031 1030 }
1032 1031
1033 1032 break;
1034 1033 case MEGASAS_IOCTL_AEN:
1035 1034 if (ddi_copyin((void *) arg, &aen,
1036 1035 sizeof (struct megasas_aen), mode)) {
1037 1036 con_log(CL_ANN, (CE_WARN,
1038 1037 "megasas_ioctl: ERROR AEN copyin"));
1039 1038 return (EFAULT);
1040 1039 }
1041 1040
1042 1041 rval = handle_mfi_aen(instance, &aen);
1043 1042
1044 1043 if (ddi_copyout((void *) &aen, (void *)arg,
1045 1044 sizeof (struct megasas_aen), mode)) {
1046 1045 con_log(CL_ANN, (CE_WARN,
1047 1046 "megasas_ioctl: copy_to_user failed\n"));
1048 1047 rval = 1;
1049 1048 }
1050 1049
1051 1050 break;
1052 1051 default:
1053 1052 rval = scsi_hba_ioctl(dev, cmd, arg,
1054 1053 mode, credp, rvalp);
1055 1054
1056 1055 con_log(CL_DLEVEL1, (CE_NOTE, "megasas_ioctl: "
1057 1056 "scsi_hba_ioctl called, ret = %x.", rval));
1058 1057 }
1059 1058
1060 1059 return (rval);
1061 1060 }
1062 1061
1063 1062 /*
1064 1063 * ************************************************************************** *
1065 1064 * *
1066 1065 * common entry points - for block driver types *
1067 1066 * *
1068 1067 * ************************************************************************** *
1069 1068 */
1070 1069 /*
1071 1070 * reset - TBD
1072 1071 * @dip:
1073 1072 * @cmd:
1074 1073 *
1075 1074 * TBD
1076 1075 */
1077 1076 /*ARGSUSED*/
1078 1077 static int
1079 1078 megasas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1080 1079 {
1081 1080 int instance_no;
1082 1081
1083 1082 struct megasas_instance *instance;
1084 1083
1085 1084 instance_no = ddi_get_instance(dip);
1086 1085 instance = (struct megasas_instance *)ddi_get_soft_state
1087 1086 (megasas_state, instance_no);
1088 1087
1089 1088 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1090 1089
1091 1090 if (!instance) {
1092 1091 con_log(CL_ANN, (CE_WARN,
1093 1092 "megaraid:%d could not get adapter in reset",
1094 1093 instance_no));
1095 1094 return (DDI_FAILURE);
1096 1095 }
1097 1096
1098 1097 con_log(CL_ANN, (CE_NOTE, "flushing cache for instance %d ..",
1099 1098 instance_no));
1100 1099
1101 1100 flush_cache(instance);
1102 1101
1103 1102 return (DDI_SUCCESS);
1104 1103 }
1105 1104
1106 1105
1107 1106 /*
1108 1107 * ************************************************************************** *
1109 1108 * *
1110 1109 * entry points (SCSI HBA) *
1111 1110 * *
1112 1111 * ************************************************************************** *
1113 1112 */
1114 1113 /*
1115 1114 * tran_tgt_init - initialize a target device instance
1116 1115 * @hba_dip:
1117 1116 * @tgt_dip:
1118 1117 * @tran:
1119 1118 * @sd:
1120 1119 *
1121 1120 * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1122 1121 * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1123 1122 * the device's address as valid and supportable for that particular HBA.
1124 1123 * By returning DDI_FAILURE, the instance of the target driver for that device
1125 1124 * is not probed or attached.
1126 1125 */
1127 1126 /*ARGSUSED*/
1128 1127 static int
1129 1128 megasas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1130 1129 scsi_hba_tran_t *tran, struct scsi_device *sd)
1131 1130 {
1132 1131 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1133 1132
1134 1133 return (DDI_SUCCESS);
1135 1134 }
1136 1135
1137 1136 /*
1138 1137 * tran_init_pkt - allocate & initialize a scsi_pkt structure
1139 1138 * @ap:
1140 1139 * @pkt:
1141 1140 * @bp:
1142 1141 * @cmdlen:
1143 1142 * @statuslen:
1144 1143 * @tgtlen:
1145 1144 * @flags:
1146 1145 * @callback:
1147 1146 *
1148 1147 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1149 1148 * structure and DMA resources for a target driver request. The
1150 1149 * tran_init_pkt() entry point is called when the target driver calls the
1151 1150 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1152 1151 * is a request to perform one or more of three possible services:
1153 1152 * - allocation and initialization of a scsi_pkt structure
1154 1153 * - allocation of DMA resources for data transfer
1155 1154 * - reallocation of DMA resources for the next portion of the data transfer
1156 1155 */
1157 1156 static struct scsi_pkt *
1158 1157 megasas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
1159 1158 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1160 1159 int flags, int (*callback)(), caddr_t arg)
1161 1160 {
1162 1161 struct scsa_cmd *acmd;
1163 1162 struct megasas_instance *instance;
1164 1163 struct scsi_pkt *new_pkt;
1165 1164
1166 1165 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1167 1166
1168 1167 instance = ADDR2MEGA(ap);
1169 1168
1170 1169 /* step #1 : pkt allocation */
1171 1170 if (pkt == NULL) {
1172 1171 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1173 1172 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1174 1173 if (pkt == NULL) {
1175 1174 return (NULL);
1176 1175 }
1177 1176
1178 1177 acmd = PKT2CMD(pkt);
1179 1178
1180 1179 /*
1181 1180 * Initialize the new pkt - we redundantly initialize
1182 1181 * all the fields for illustrative purposes.
1183 1182 */
1184 1183 acmd->cmd_pkt = pkt;
1185 1184 acmd->cmd_flags = 0;
1186 1185 acmd->cmd_scblen = statuslen;
1187 1186 acmd->cmd_cdblen = cmdlen;
1188 1187 acmd->cmd_dmahandle = NULL;
1189 1188 acmd->cmd_ncookies = 0;
1190 1189 acmd->cmd_cookie = 0;
1191 1190 acmd->cmd_cookiecnt = 0;
1192 1191 acmd->cmd_nwin = 0;
1193 1192
1194 1193 pkt->pkt_address = *ap;
1195 1194 pkt->pkt_comp = (void (*)())NULL;
1196 1195 pkt->pkt_flags = 0;
1197 1196 pkt->pkt_time = 0;
1198 1197 pkt->pkt_resid = 0;
1199 1198 pkt->pkt_state = 0;
1200 1199 pkt->pkt_statistics = 0;
1201 1200 pkt->pkt_reason = 0;
1202 1201 new_pkt = pkt;
1203 1202 } else {
1204 1203 acmd = PKT2CMD(pkt);
1205 1204 new_pkt = NULL;
1206 1205 }
1207 1206
1208 1207 /* step #2 : dma allocation/move */
1209 1208 if (bp && bp->b_bcount != 0) {
1210 1209 if (acmd->cmd_dmahandle == NULL) {
1211 1210 if (megasas_dma_alloc(instance, pkt, bp, flags,
1212 1211 callback) == -1) {
1213 1212 if (new_pkt) {
1214 1213 scsi_hba_pkt_free(ap, new_pkt);
1215 1214 }
1216 1215
1217 1216 return ((struct scsi_pkt *)NULL);
1218 1217 }
1219 1218 } else {
1220 1219 if (megasas_dma_move(instance, pkt, bp) == -1) {
1221 1220 return ((struct scsi_pkt *)NULL);
1222 1221 }
1223 1222 }
1224 1223 }
1225 1224
1226 1225 return (pkt);
1227 1226 }
1228 1227
1229 1228 /*
1230 1229 * tran_start - transport a SCSI command to the addressed target
1231 1230 * @ap:
1232 1231 * @pkt:
1233 1232 *
1234 1233 * The tran_start() entry point for a SCSI HBA driver is called to transport a
1235 1234 * SCSI command to the addressed target. The SCSI command is described
1236 1235 * entirely within the scsi_pkt structure, which the target driver allocated
1237 1236 * through the HBA driver's tran_init_pkt() entry point. If the command
1238 1237 * involves a data transfer, DMA resources must also have been allocated for
1239 1238 * the scsi_pkt structure.
1240 1239 *
1241 1240 * Return Values :
1242 1241 * TRAN_BUSY - request queue is full, no more free scbs
1243 1242 * TRAN_ACCEPT - pkt has been submitted to the instance
1244 1243 */
1245 1244 static int
1246 1245 megasas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
1247 1246 {
1248 1247 uchar_t cmd_done = 0;
1249 1248
1250 1249 struct megasas_instance *instance = ADDR2MEGA(ap);
1251 1250 struct megasas_cmd *cmd;
1252 1251
1253 1252 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d:SCSI CDB[0]=0x%x",
1254 1253 __func__, __LINE__, pkt->pkt_cdbp[0]));
1255 1254
1256 1255 pkt->pkt_reason = CMD_CMPLT;
1257 1256 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1258 1257
1259 1258 cmd = build_cmd(instance, ap, pkt, &cmd_done);
1260 1259
1261 1260 /*
1262 1261 * Check if the command is already completed by the mega_build_cmd()
1263 1262 * routine. In which case the busy_flag would be clear and scb will be
1264 1263 * NULL and appropriate reason provided in pkt_reason field
1265 1264 */
1266 1265 if (cmd_done) {
1267 1266 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1268 1267 scsi_hba_pkt_comp(pkt);
1269 1268 }
1270 1269 pkt->pkt_reason = CMD_CMPLT;
1271 1270 pkt->pkt_scbp[0] = STATUS_GOOD;
1272 1271 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1273 1272 | STATE_SENT_CMD;
1274 1273 return (TRAN_ACCEPT);
1275 1274 }
1276 1275
1277 1276 if (cmd == NULL) {
1278 1277 return (TRAN_BUSY);
1279 1278 }
1280 1279
1281 1280 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1282 1281 if (instance->fw_outstanding > instance->max_fw_cmds) {
1283 1282 con_log(CL_ANN, (CE_CONT, "megasas:Firmware busy"));
1284 1283 return_mfi_pkt(instance, cmd);
1285 1284 return (TRAN_BUSY);
1286 1285 }
1287 1286
1288 1287 /* Syncronize the Cmd frame for the controller */
1289 1288 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1290 1289 DDI_DMA_SYNC_FORDEV);
1291 1290
1292 1291 instance->func_ptr->issue_cmd(cmd, instance);
1293 1292
1294 1293 } else {
1295 1294 struct megasas_header *hdr = &cmd->frame->hdr;
1296 1295
1297 1296 cmd->sync_cmd = MEGASAS_TRUE;
1298 1297
1299 1298 instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd);
1300 1299
1301 1300 pkt->pkt_reason = CMD_CMPLT;
1302 1301 pkt->pkt_statistics = 0;
1303 1302 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1304 1303
1305 1304 switch (hdr->cmd_status) {
1306 1305 case MFI_STAT_OK:
1307 1306 pkt->pkt_scbp[0] = STATUS_GOOD;
1308 1307 break;
1309 1308
1310 1309 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1311 1310
1312 1311 pkt->pkt_reason = CMD_CMPLT;
1313 1312 pkt->pkt_statistics = 0;
1314 1313
1315 1314 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
1316 1315 break;
1317 1316
1318 1317 case MFI_STAT_DEVICE_NOT_FOUND:
1319 1318 pkt->pkt_reason = CMD_DEV_GONE;
1320 1319 pkt->pkt_statistics = STAT_DISCON;
1321 1320 break;
1322 1321
1323 1322 default:
1324 1323 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
1325 1324 }
1326 1325
1327 1326 return_mfi_pkt(instance, cmd);
1328 1327 (void) megasas_common_check(instance, cmd);
1329 1328
1330 1329 scsi_hba_pkt_comp(pkt);
1331 1330
1332 1331 }
1333 1332
1334 1333 return (TRAN_ACCEPT);
1335 1334 }
1336 1335
1337 1336 /*
1338 1337 * tran_abort - Abort any commands that are currently in transport
1339 1338 * @ap:
1340 1339 * @pkt:
1341 1340 *
1342 1341 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
1343 1342 * commands that are currently in transport for a particular target. This entry
1344 1343 * point is called when a target driver calls scsi_abort(). The tran_abort()
1345 1344 * entry point should attempt to abort the command denoted by the pkt
1346 1345 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
1347 1346 * abort all outstanding commands in the transport layer for the particular
1348 1347 * target or logical unit.
1349 1348 */
1350 1349 /*ARGSUSED*/
1351 1350 static int
1352 1351 megasas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1353 1352 {
1354 1353 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1355 1354
1356 1355 /* aborting command not supported by H/W */
1357 1356
1358 1357 return (DDI_FAILURE);
1359 1358 }
1360 1359
1361 1360 /*
1362 1361 * tran_reset - reset either the SCSI bus or target
1363 1362 * @ap:
1364 1363 * @level:
1365 1364 *
1366 1365 * The tran_reset() entry point for a SCSI HBA driver is called to reset either
1367 1366 * the SCSI bus or a particular SCSI target device. This entry point is called
1368 1367 * when a target driver calls scsi_reset(). The tran_reset() entry point must
1369 1368 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
1370 1369 * particular target or logical unit must be reset.
1371 1370 */
1372 1371 /*ARGSUSED*/
1373 1372 static int
1374 1373 megasas_tran_reset(struct scsi_address *ap, int level)
1375 1374 {
1376 1375 struct megasas_instance *instance = ADDR2MEGA(ap);
1377 1376
1378 1377 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1379 1378
1380 1379 if (wait_for_outstanding(instance)) {
1381 1380 return (DDI_FAILURE);
1382 1381 } else {
1383 1382 return (DDI_SUCCESS);
1384 1383 }
1385 1384 }
1386 1385
1387 1386 /*
1388 1387 * tran_bus_reset - reset the SCSI bus
1389 1388 * @dip:
1390 1389 * @level:
1391 1390 *
1392 1391 * The tran_bus_reset() vector in the scsi_hba_tran structure should be
1393 1392 * initialized during the HBA driver's attach(). The vector should point to
1394 1393 * an HBA entry point that is to be called when a user initiates a bus reset.
1395 1394 * Implementation is hardware specific. If the HBA driver cannot reset the
1396 1395 * SCSI bus without affecting the targets, the driver should fail RESET_BUS
1397 1396 * or not initialize this vector.
1398 1397 */
1399 1398 /*ARGSUSED*/
1400 1399 static int
1401 1400 megasas_tran_bus_reset(dev_info_t *dip, int level)
1402 1401 {
1403 1402 int instance_no = ddi_get_instance(dip);
1404 1403
1405 1404 struct megasas_instance *instance = ddi_get_soft_state(megasas_state,
1406 1405 instance_no);
1407 1406
1408 1407 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1409 1408
1410 1409 if (wait_for_outstanding(instance)) {
1411 1410 return (DDI_FAILURE);
1412 1411 } else {
1413 1412 return (DDI_SUCCESS);
1414 1413 }
1415 1414 }
1416 1415
1417 1416 /*
1418 1417 * tran_getcap - get one of a set of SCSA-defined capabilities
1419 1418 * @ap:
1420 1419 * @cap:
1421 1420 * @whom:
1422 1421 *
1423 1422 * The target driver can request the current setting of the capability for a
1424 1423 * particular target by setting the whom parameter to nonzero. A whom value of
1425 1424 * zero indicates a request for the current setting of the general capability
1426 1425 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
1427 1426 * for undefined capabilities or the current value of the requested capability.
1428 1427 */
1429 1428 /*ARGSUSED*/
1430 1429 static int
1431 1430 megasas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
1432 1431 {
1433 1432 int rval = 0;
1434 1433
1435 1434 struct megasas_instance *instance = ADDR2MEGA(ap);
1436 1435
1437 1436 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1438 1437
1439 1438 /* we do allow inquiring about capabilities for other targets */
1440 1439 if (cap == NULL) {
1441 1440 return (-1);
1442 1441 }
1443 1442
1444 1443 switch (scsi_hba_lookup_capstr(cap)) {
1445 1444 case SCSI_CAP_DMA_MAX:
1446 1445 /* Limit to 16MB max transfer */
1447 1446 rval = megasas_max_cap_maxxfer;
1448 1447 break;
1449 1448 case SCSI_CAP_MSG_OUT:
1450 1449 rval = 1;
1451 1450 break;
1452 1451 case SCSI_CAP_DISCONNECT:
1453 1452 rval = 0;
1454 1453 break;
1455 1454 case SCSI_CAP_SYNCHRONOUS:
1456 1455 rval = 0;
1457 1456 break;
1458 1457 case SCSI_CAP_WIDE_XFER:
1459 1458 rval = 1;
1460 1459 break;
1461 1460 case SCSI_CAP_TAGGED_QING:
1462 1461 rval = 1;
1463 1462 break;
1464 1463 case SCSI_CAP_UNTAGGED_QING:
1465 1464 rval = 1;
1466 1465 break;
1467 1466 case SCSI_CAP_PARITY:
1468 1467 rval = 1;
1469 1468 break;
1470 1469 case SCSI_CAP_INITIATOR_ID:
1471 1470 rval = instance->init_id;
1472 1471 break;
1473 1472 case SCSI_CAP_ARQ:
1474 1473 rval = 1;
1475 1474 break;
1476 1475 case SCSI_CAP_LINKED_CMDS:
1477 1476 rval = 0;
1478 1477 break;
1479 1478 case SCSI_CAP_RESET_NOTIFICATION:
1480 1479 rval = 1;
1481 1480 break;
1482 1481 case SCSI_CAP_GEOMETRY:
1483 1482 rval = -1;
1484 1483
1485 1484 break;
1486 1485 default:
1487 1486 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
1488 1487 scsi_hba_lookup_capstr(cap)));
1489 1488 rval = -1;
1490 1489 break;
1491 1490 }
1492 1491
1493 1492 return (rval);
1494 1493 }
1495 1494
1496 1495 /*
1497 1496 * tran_setcap - set one of a set of SCSA-defined capabilities
1498 1497 * @ap:
1499 1498 * @cap:
1500 1499 * @value:
1501 1500 * @whom:
1502 1501 *
1503 1502 * The target driver might request that the new value be set for a particular
1504 1503 * target by setting the whom parameter to nonzero. A whom value of zero
1505 1504 * means that request is to set the new value for the SCSI bus or for adapter
1506 1505 * hardware in general.
1507 1506 * The tran_setcap() should return the following values as appropriate:
1508 1507 * - -1 for undefined capabilities
1509 1508 * - 0 if the HBA driver cannot set the capability to the requested value
1510 1509 * - 1 if the HBA driver is able to set the capability to the requested value
1511 1510 */
1512 1511 /*ARGSUSED*/
1513 1512 static int
1514 1513 megasas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
1515 1514 {
1516 1515 int rval = 1;
1517 1516
1518 1517 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1519 1518
1520 1519 /* We don't allow setting capabilities for other targets */
1521 1520 if (cap == NULL || whom == 0) {
1522 1521 return (-1);
1523 1522 }
1524 1523
1525 1524 switch (scsi_hba_lookup_capstr(cap)) {
1526 1525 case SCSI_CAP_DMA_MAX:
1527 1526 case SCSI_CAP_MSG_OUT:
1528 1527 case SCSI_CAP_PARITY:
1529 1528 case SCSI_CAP_LINKED_CMDS:
1530 1529 case SCSI_CAP_RESET_NOTIFICATION:
1531 1530 case SCSI_CAP_DISCONNECT:
1532 1531 case SCSI_CAP_SYNCHRONOUS:
1533 1532 case SCSI_CAP_UNTAGGED_QING:
1534 1533 case SCSI_CAP_WIDE_XFER:
1535 1534 case SCSI_CAP_INITIATOR_ID:
1536 1535 case SCSI_CAP_ARQ:
1537 1536 /*
1538 1537 * None of these are settable via
1539 1538 * the capability interface.
1540 1539 */
1541 1540 break;
1542 1541 case SCSI_CAP_TAGGED_QING:
1543 1542 rval = 1;
1544 1543 break;
1545 1544 case SCSI_CAP_SECTOR_SIZE:
1546 1545 rval = 1;
1547 1546 break;
1548 1547
1549 1548 case SCSI_CAP_TOTAL_SECTORS:
1550 1549 rval = 1;
1551 1550 break;
1552 1551 default:
1553 1552 rval = -1;
1554 1553 break;
1555 1554 }
1556 1555
1557 1556 return (rval);
1558 1557 }
1559 1558
1560 1559 /*
1561 1560 * tran_destroy_pkt - deallocate scsi_pkt structure
1562 1561 * @ap:
1563 1562 * @pkt:
1564 1563 *
1565 1564 * The tran_destroy_pkt() entry point is the HBA driver function that
1566 1565 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
1567 1566 * called when the target driver calls scsi_destroy_pkt(). The
1568 1567 * tran_destroy_pkt() entry point must free any DMA resources that have been
1569 1568 * allocated for the packet. An implicit DMA synchronization occurs if the
1570 1569 * DMA resources are freed and any cached data remains after the completion
1571 1570 * of the transfer.
1572 1571 */
1573 1572 static void
1574 1573 megasas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1575 1574 {
1576 1575 struct scsa_cmd *acmd = PKT2CMD(pkt);
1577 1576
1578 1577 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1579 1578
1580 1579 if (acmd->cmd_flags & CFLAG_DMAVALID) {
1581 1580 acmd->cmd_flags &= ~CFLAG_DMAVALID;
1582 1581
1583 1582 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
1584 1583
1585 1584 ddi_dma_free_handle(&acmd->cmd_dmahandle);
1586 1585
1587 1586 acmd->cmd_dmahandle = NULL;
1588 1587 }
1589 1588
1590 1589 /* free the pkt */
1591 1590 scsi_hba_pkt_free(ap, pkt);
1592 1591 }
1593 1592
1594 1593 /*
1595 1594 * tran_dmafree - deallocates DMA resources
1596 1595 * @ap:
1597 1596 * @pkt:
1598 1597 *
1599 1598 * The tran_dmafree() entry point deallocates DMAQ resources that have been
1600 1599 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
1601 1600 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
1602 1601 * free only DMA resources allocated for a scsi_pkt structure, not the
1603 1602 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
1604 1603 * implicitly performed.
1605 1604 */
1606 1605 /*ARGSUSED*/
1607 1606 static void
1608 1607 megasas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1609 1608 {
1610 1609 register struct scsa_cmd *acmd = PKT2CMD(pkt);
1611 1610
1612 1611 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1613 1612
1614 1613 if (acmd->cmd_flags & CFLAG_DMAVALID) {
1615 1614 acmd->cmd_flags &= ~CFLAG_DMAVALID;
1616 1615
1617 1616 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
1618 1617
1619 1618 ddi_dma_free_handle(&acmd->cmd_dmahandle);
1620 1619
1621 1620 acmd->cmd_dmahandle = NULL;
1622 1621 }
1623 1622 }
1624 1623
1625 1624 /*
1626 1625 * tran_sync_pkt - synchronize the DMA object allocated
1627 1626 * @ap:
1628 1627 * @pkt:
1629 1628 *
1630 1629 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
1631 1630 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
1632 1631 * entry point is called when the target driver calls scsi_sync_pkt(). If the
1633 1632 * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
1634 1633 * must synchronize the CPU's view of the data. If the data transfer direction
1635 1634 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
1636 1635 * device's view of the data.
1637 1636 */
1638 1637 /*ARGSUSED*/
1639 1638 static void
1640 1639 megasas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1641 1640 {
1642 1641 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1643 1642
1644 1643 /*
1645 1644 * following 'ddi_dma_sync()' API call
1646 1645 * already called for each I/O in the ISR
1647 1646 */
1648 1647 #if 0
1649 1648 int i;
1650 1649
1651 1650 register struct scsa_cmd *acmd = PKT2CMD(pkt);
1652 1651
1653 1652 if (acmd->cmd_flags & CFLAG_DMAVALID) {
1654 1653 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
1655 1654 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
1656 1655 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
1657 1656 }
1658 1657 #endif
1659 1658 }
1660 1659
1661 1660 /*ARGSUSED*/
1662 1661 static int
1663 1662 megasas_tran_quiesce(dev_info_t *dip)
1664 1663 {
1665 1664 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1666 1665
1667 1666 return (1);
1668 1667 }
1669 1668
1670 1669 /*ARGSUSED*/
1671 1670 static int
1672 1671 megasas_tran_unquiesce(dev_info_t *dip)
1673 1672 {
1674 1673 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1675 1674
1676 1675 return (1);
1677 1676 }
1678 1677
1679 1678 /*
1680 1679 * megasas_isr(caddr_t)
1681 1680 *
1682 1681 * The Interrupt Service Routine
1683 1682 *
1684 1683 * Collect status for all completed commands and do callback
1685 1684 *
1686 1685 */
1687 1686 static uint_t
1688 1687 megasas_isr(struct megasas_instance *instance)
1689 1688 {
1690 1689 int need_softintr;
1691 1690 uint32_t producer;
1692 1691 uint32_t consumer;
1693 1692 uint32_t context;
1694 1693
1695 1694 struct megasas_cmd *cmd;
1696 1695
1697 1696 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1698 1697
1699 1698 ASSERT(instance);
1700 1699 if (!instance->func_ptr->intr_ack(instance)) {
1701 1700 return (DDI_INTR_UNCLAIMED);
1702 1701 }
1703 1702
1704 1703 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
1705 1704 0, 0, DDI_DMA_SYNC_FORCPU);
1706 1705
1707 1706 if (megasas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
1708 1707 != DDI_SUCCESS) {
1709 1708 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
1710 1709 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
1711 1710 return (DDI_INTR_UNCLAIMED);
1712 1711 }
1713 1712
1714 1713 producer = *instance->producer;
1715 1714 consumer = *instance->consumer;
1716 1715
1717 1716 con_log(CL_ANN1, (CE_CONT, " producer %x consumer %x ",
1718 1717 producer, consumer));
1719 1718
1720 1719 mutex_enter(&instance->completed_pool_mtx);
1721 1720
1722 1721 while (consumer != producer) {
1723 1722 context = instance->reply_queue[consumer];
1724 1723 cmd = instance->cmd_list[context];
1725 1724 mlist_add_tail(&cmd->list, &instance->completed_pool_list);
1726 1725
1727 1726 consumer++;
1728 1727 if (consumer == (instance->max_fw_cmds + 1)) {
1729 1728 consumer = 0;
1730 1729 }
1731 1730 }
1732 1731
1733 1732 mutex_exit(&instance->completed_pool_mtx);
1734 1733
1735 1734 *instance->consumer = consumer;
1736 1735 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
1737 1736 0, 0, DDI_DMA_SYNC_FORDEV);
1738 1737
1739 1738 if (instance->softint_running) {
1740 1739 need_softintr = 0;
1741 1740 } else {
1742 1741 need_softintr = 1;
1743 1742 }
1744 1743
1745 1744 if (instance->isr_level == HIGH_LEVEL_INTR) {
1746 1745 if (need_softintr) {
1747 1746 ddi_trigger_softintr(instance->soft_intr_id);
1748 1747 }
1749 1748 } else {
1750 1749 /*
1751 1750 * Not a high-level interrupt, therefore call the soft level
1752 1751 * interrupt explicitly
1753 1752 */
1754 1753 (void) megasas_softintr(instance);
1755 1754 }
1756 1755
1757 1756 return (DDI_INTR_CLAIMED);
1758 1757 }
1759 1758
1760 1759
1761 1760 /*
1762 1761 * ************************************************************************** *
1763 1762 * *
1764 1763 * libraries *
1765 1764 * *
1766 1765 * ************************************************************************** *
1767 1766 */
1768 1767 /*
1769 1768 * get_mfi_pkt : Get a command from the free pool
1770 1769 */
1771 1770 static struct megasas_cmd *
1772 1771 get_mfi_pkt(struct megasas_instance *instance)
1773 1772 {
1774 1773 mlist_t *head = &instance->cmd_pool_list;
1775 1774 struct megasas_cmd *cmd = NULL;
1776 1775
1777 1776 mutex_enter(&instance->cmd_pool_mtx);
1778 1777 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
1779 1778
1780 1779 if (!mlist_empty(head)) {
1781 1780 cmd = mlist_entry(head->next, struct megasas_cmd, list);
1782 1781 mlist_del_init(head->next);
1783 1782 }
1784 1783 if (cmd != NULL)
1785 1784 cmd->pkt = NULL;
1786 1785 mutex_exit(&instance->cmd_pool_mtx);
1787 1786
1788 1787 return (cmd);
1789 1788 }
1790 1789
1791 1790 /*
1792 1791 * return_mfi_pkt : Return a cmd to free command pool
1793 1792 */
1794 1793 static void
1795 1794 return_mfi_pkt(struct megasas_instance *instance, struct megasas_cmd *cmd)
1796 1795 {
1797 1796 mutex_enter(&instance->cmd_pool_mtx);
1798 1797 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
1799 1798
1800 1799 mlist_add(&cmd->list, &instance->cmd_pool_list);
1801 1800
1802 1801 mutex_exit(&instance->cmd_pool_mtx);
1803 1802 }
1804 1803
1805 1804 /*
1806 1805 * destroy_mfi_frame_pool
1807 1806 */
1808 1807 static void
1809 1808 destroy_mfi_frame_pool(struct megasas_instance *instance)
1810 1809 {
1811 1810 int i;
1812 1811 uint32_t max_cmd = instance->max_fw_cmds;
1813 1812
1814 1813 struct megasas_cmd *cmd;
1815 1814
1816 1815 /* return all frames to pool */
1817 1816 for (i = 0; i < max_cmd; i++) {
1818 1817
1819 1818 cmd = instance->cmd_list[i];
1820 1819
1821 1820 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
1822 1821 (void) mega_free_dma_obj(instance, cmd->frame_dma_obj);
1823 1822
1824 1823 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
1825 1824 }
1826 1825
1827 1826 }
1828 1827
1829 1828 /*
1830 1829 * create_mfi_frame_pool
1831 1830 */
1832 1831 static int
1833 1832 create_mfi_frame_pool(struct megasas_instance *instance)
1834 1833 {
1835 1834 int i = 0;
1836 1835 int cookie_cnt;
1837 1836 uint16_t max_cmd;
1838 1837 uint16_t sge_sz;
1839 1838 uint32_t sgl_sz;
1840 1839 uint32_t tot_frame_size;
1841 1840
1842 1841 struct megasas_cmd *cmd;
1843 1842
1844 1843 max_cmd = instance->max_fw_cmds;
1845 1844
1846 1845 sge_sz = sizeof (struct megasas_sge64);
1847 1846
1848 1847 /* calculated the number of 64byte frames required for SGL */
1849 1848 sgl_sz = sge_sz * instance->max_num_sge;
1850 1849 tot_frame_size = sgl_sz + MEGAMFI_FRAME_SIZE + SENSE_LENGTH;
1851 1850
1852 1851 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
1853 1852 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
1854 1853
1855 1854 while (i < max_cmd) {
1856 1855 cmd = instance->cmd_list[i];
1857 1856
1858 1857 cmd->frame_dma_obj.size = tot_frame_size;
1859 1858 cmd->frame_dma_obj.dma_attr = megasas_generic_dma_attr;
1860 1859 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1861 1860 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1862 1861 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
1863 1862 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
1864 1863
1865 1864
1866 1865 cookie_cnt = mega_alloc_dma_obj(instance, &cmd->frame_dma_obj);
1867 1866
1868 1867 if (cookie_cnt == -1 || cookie_cnt > 1) {
1869 1868 con_log(CL_ANN, (CE_WARN,
1870 1869 "create_mfi_frame_pool: could not alloc."));
1871 1870 return (DDI_FAILURE);
1872 1871 }
1873 1872
1874 1873 bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
1875 1874
1876 1875 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
1877 1876 cmd->frame = (union megasas_frame *)cmd->frame_dma_obj.buffer;
1878 1877 cmd->frame_phys_addr =
1879 1878 cmd->frame_dma_obj.dma_cookie[0].dmac_address;
1880 1879
1881 1880 cmd->sense = (uint8_t *)(((unsigned long)
1882 1881 cmd->frame_dma_obj.buffer) +
1883 1882 tot_frame_size - SENSE_LENGTH);
1884 1883 cmd->sense_phys_addr =
1885 1884 cmd->frame_dma_obj.dma_cookie[0].dmac_address +
1886 1885 tot_frame_size - SENSE_LENGTH;
1887 1886
1888 1887 if (!cmd->frame || !cmd->sense) {
1889 1888 con_log(CL_ANN, (CE_NOTE,
1890 1889 "megasas: pci_pool_alloc failed \n"));
1891 1890
1892 1891 return (-ENOMEM);
1893 1892 }
1894 1893
1895 1894 cmd->frame->io.context = cmd->index;
1896 1895 i++;
1897 1896
1898 1897 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
1899 1898 cmd->frame->io.context, cmd->frame_phys_addr));
1900 1899 }
1901 1900
1902 1901 return (DDI_SUCCESS);
1903 1902 }
1904 1903
1905 1904 /*
1906 1905 * free_additional_dma_buffer
1907 1906 */
1908 1907 static void
1909 1908 free_additional_dma_buffer(struct megasas_instance *instance)
1910 1909 {
1911 1910 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
1912 1911 (void) mega_free_dma_obj(instance,
1913 1912 instance->mfi_internal_dma_obj);
1914 1913 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
1915 1914 }
1916 1915
1917 1916 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
1918 1917 (void) mega_free_dma_obj(instance,
1919 1918 instance->mfi_evt_detail_obj);
1920 1919 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
1921 1920 }
1922 1921 }
1923 1922
1924 1923 /*
1925 1924 * alloc_additional_dma_buffer
1926 1925 */
1927 1926 static int
1928 1927 alloc_additional_dma_buffer(struct megasas_instance *instance)
1929 1928 {
1930 1929 uint32_t reply_q_sz;
1931 1930 uint32_t internal_buf_size = PAGESIZE*2;
1932 1931
1933 1932 /* max cmds plus 1 + producer & consumer */
1934 1933 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
1935 1934
1936 1935 instance->mfi_internal_dma_obj.size = internal_buf_size;
1937 1936 instance->mfi_internal_dma_obj.dma_attr = megasas_generic_dma_attr;
1938 1937 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1939 1938 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
1940 1939 0xFFFFFFFFU;
1941 1940 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
1942 1941
1943 1942 if (mega_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj)
1944 1943 != 1) {
1945 1944 con_log(CL_ANN, (CE_WARN, "megaraid: could not alloc reply Q"));
1946 1945 return (DDI_FAILURE);
1947 1946 }
1948 1947
1949 1948 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
1950 1949
1951 1950 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
1952 1951
1953 1952 instance->producer = (uint32_t *)((unsigned long)
1954 1953 instance->mfi_internal_dma_obj.buffer);
1955 1954 instance->consumer = (uint32_t *)((unsigned long)
1956 1955 instance->mfi_internal_dma_obj.buffer + 4);
1957 1956 instance->reply_queue = (uint32_t *)((unsigned long)
1958 1957 instance->mfi_internal_dma_obj.buffer + 8);
1959 1958 instance->internal_buf = (caddr_t)(((unsigned long)
1960 1959 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
1961 1960 instance->internal_buf_dmac_add =
1962 1961 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
1963 1962 reply_q_sz;
1964 1963 instance->internal_buf_size = internal_buf_size -
1965 1964 (reply_q_sz + 8);
1966 1965
1967 1966 /* allocate evt_detail */
1968 1967 instance->mfi_evt_detail_obj.size = sizeof (struct megasas_evt_detail);
1969 1968 instance->mfi_evt_detail_obj.dma_attr = megasas_generic_dma_attr;
1970 1969 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1971 1970 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1972 1971 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
1973 1972 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
1974 1973
1975 1974 if (mega_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj) != 1) {
1976 1975 con_log(CL_ANN, (CE_WARN, "alloc_additional_dma_buffer: "
1977 1976 "could not data transfer buffer alloc."));
1978 1977 return (DDI_FAILURE);
1979 1978 }
1980 1979
1981 1980 bzero(instance->mfi_evt_detail_obj.buffer,
1982 1981 sizeof (struct megasas_evt_detail));
1983 1982
1984 1983 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
1985 1984
1986 1985 return (DDI_SUCCESS);
1987 1986 }
1988 1987
1989 1988 /*
1990 1989 * free_space_for_mfi
1991 1990 */
1992 1991 static void
1993 1992 free_space_for_mfi(struct megasas_instance *instance)
1994 1993 {
1995 1994 int i;
1996 1995 uint32_t max_cmd = instance->max_fw_cmds;
1997 1996
1998 1997 /* already freed */
1999 1998 if (instance->cmd_list == NULL) {
2000 1999 return;
2001 2000 }
2002 2001
2003 2002 free_additional_dma_buffer(instance);
2004 2003
2005 2004 /* first free the MFI frame pool */
2006 2005 destroy_mfi_frame_pool(instance);
2007 2006
2008 2007 /* free all the commands in the cmd_list */
2009 2008 for (i = 0; i < instance->max_fw_cmds; i++) {
2010 2009 kmem_free(instance->cmd_list[i],
2011 2010 sizeof (struct megasas_cmd));
2012 2011
2013 2012 instance->cmd_list[i] = NULL;
2014 2013 }
2015 2014
2016 2015 /* free the cmd_list buffer itself */
2017 2016 kmem_free(instance->cmd_list,
2018 2017 sizeof (struct megasas_cmd *) * max_cmd);
2019 2018
2020 2019 instance->cmd_list = NULL;
2021 2020
2022 2021 INIT_LIST_HEAD(&instance->cmd_pool_list);
2023 2022 }
2024 2023
2025 2024 /*
2026 2025 * alloc_space_for_mfi
2027 2026 */
2028 2027 static int
2029 2028 alloc_space_for_mfi(struct megasas_instance *instance)
2030 2029 {
2031 2030 int i;
2032 2031 uint32_t max_cmd;
2033 2032 size_t sz;
2034 2033
2035 2034 struct megasas_cmd *cmd;
2036 2035
2037 2036 max_cmd = instance->max_fw_cmds;
2038 2037 sz = sizeof (struct megasas_cmd *) * max_cmd;
2039 2038
2040 2039 /*
2041 2040 * instance->cmd_list is an array of struct megasas_cmd pointers.
2042 2041 * Allocate the dynamic array first and then allocate individual
2043 2042 * commands.
2044 2043 */
2045 2044 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
2046 2045 ASSERT(instance->cmd_list);
2047 2046
2048 2047 for (i = 0; i < max_cmd; i++) {
2049 2048 instance->cmd_list[i] = kmem_zalloc(sizeof (struct megasas_cmd),
2050 2049 KM_SLEEP);
2051 2050 ASSERT(instance->cmd_list[i]);
2052 2051 }
2053 2052
2054 2053 INIT_LIST_HEAD(&instance->cmd_pool_list);
2055 2054
2056 2055 /* add all the commands to command pool (instance->cmd_pool) */
2057 2056 for (i = 0; i < max_cmd; i++) {
2058 2057 cmd = instance->cmd_list[i];
2059 2058 cmd->index = i;
2060 2059
2061 2060 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2062 2061 }
2063 2062
2064 2063 /* create a frame pool and assign one frame to each cmd */
2065 2064 if (create_mfi_frame_pool(instance)) {
2066 2065 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n"));
2067 2066 return (DDI_FAILURE);
2068 2067 }
2069 2068
2070 2069 /* create a frame pool and assign one frame to each cmd */
2071 2070 if (alloc_additional_dma_buffer(instance)) {
2072 2071 con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n"));
2073 2072 return (DDI_FAILURE);
2074 2073 }
2075 2074
2076 2075 return (DDI_SUCCESS);
2077 2076 }
2078 2077
2079 2078 /*
2080 2079 * get_ctrl_info
2081 2080 */
2082 2081 static int
2083 2082 get_ctrl_info(struct megasas_instance *instance,
2084 2083 struct megasas_ctrl_info *ctrl_info)
2085 2084 {
2086 2085 int ret = 0;
2087 2086
2088 2087 struct megasas_cmd *cmd;
2089 2088 struct megasas_dcmd_frame *dcmd;
2090 2089 struct megasas_ctrl_info *ci;
2091 2090
2092 2091 cmd = get_mfi_pkt(instance);
2093 2092
2094 2093 if (!cmd) {
2095 2094 con_log(CL_ANN, (CE_WARN,
2096 2095 "Failed to get a cmd for ctrl info\n"));
2097 2096 return (DDI_FAILURE);
2098 2097 }
2099 2098
2100 2099 dcmd = &cmd->frame->dcmd;
2101 2100
2102 2101 ci = (struct megasas_ctrl_info *)instance->internal_buf;
2103 2102
2104 2103 if (!ci) {
2105 2104 con_log(CL_ANN, (CE_WARN,
2106 2105 "Failed to alloc mem for ctrl info\n"));
2107 2106 return_mfi_pkt(instance, cmd);
2108 2107 return (DDI_FAILURE);
2109 2108 }
2110 2109
2111 2110 (void) memset(ci, 0, sizeof (struct megasas_ctrl_info));
2112 2111
2113 2112 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
2114 2113 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2115 2114
2116 2115 dcmd->cmd = MFI_CMD_OP_DCMD;
2117 2116 dcmd->cmd_status = MFI_CMD_STATUS_POLL_MODE;
2118 2117 dcmd->sge_count = 1;
2119 2118 dcmd->flags = MFI_FRAME_DIR_READ;
2120 2119 dcmd->timeout = 0;
2121 2120 dcmd->data_xfer_len = sizeof (struct megasas_ctrl_info);
2122 2121 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
2123 2122 dcmd->sgl.sge32[0].phys_addr = instance->internal_buf_dmac_add;
2124 2123 dcmd->sgl.sge32[0].length = sizeof (struct megasas_ctrl_info);
2125 2124
2126 2125 cmd->frame_count = 1;
2127 2126
2128 2127 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2129 2128 ret = 0;
2130 2129 (void) memcpy(ctrl_info, ci, sizeof (struct megasas_ctrl_info));
2131 2130 } else {
2132 2131 con_log(CL_ANN, (CE_WARN, "get_ctrl_info: Ctrl info failed\n"));
2133 2132 ret = -1;
2134 2133 }
2135 2134
2136 2135 return_mfi_pkt(instance, cmd);
2137 2136 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) {
2138 2137 ret = -1;
2139 2138 }
2140 2139
2141 2140 return (ret);
2142 2141 }
2143 2142
2144 2143 /*
2145 2144 * abort_aen_cmd
2146 2145 */
2147 2146 static int
2148 2147 abort_aen_cmd(struct megasas_instance *instance,
2149 2148 struct megasas_cmd *cmd_to_abort)
2150 2149 {
2151 2150 int ret = 0;
2152 2151
2153 2152 struct megasas_cmd *cmd;
2154 2153 struct megasas_abort_frame *abort_fr;
2155 2154
2156 2155 cmd = get_mfi_pkt(instance);
2157 2156
2158 2157 if (!cmd) {
2159 2158 con_log(CL_ANN, (CE_WARN,
2160 2159 "Failed to get a cmd for ctrl info\n"));
2161 2160 return (DDI_FAILURE);
2162 2161 }
2163 2162
2164 2163 abort_fr = &cmd->frame->abort;
2165 2164
2166 2165 /* prepare and issue the abort frame */
2167 2166 abort_fr->cmd = MFI_CMD_OP_ABORT;
2168 2167 abort_fr->cmd_status = MFI_CMD_STATUS_SYNC_MODE;
2169 2168 abort_fr->flags = 0;
2170 2169 abort_fr->abort_context = cmd_to_abort->index;
2171 2170 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
2172 2171 abort_fr->abort_mfi_phys_addr_hi = 0;
2173 2172
2174 2173 instance->aen_cmd->abort_aen = 1;
2175 2174
2176 2175 cmd->sync_cmd = MEGASAS_TRUE;
2177 2176 cmd->frame_count = 1;
2178 2177
2179 2178 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
2180 2179 con_log(CL_ANN, (CE_WARN,
2181 2180 "abort_aen_cmd: issue_cmd_in_sync_mode failed\n"));
2182 2181 ret = -1;
2183 2182 } else {
2184 2183 ret = 0;
2185 2184 }
2186 2185
2187 2186 instance->aen_cmd->abort_aen = 1;
2188 2187 instance->aen_cmd = 0;
2189 2188
2190 2189 return_mfi_pkt(instance, cmd);
2191 2190 (void) megasas_common_check(instance, cmd);
2192 2191
2193 2192 return (ret);
2194 2193 }
2195 2194
2196 2195 /*
2197 2196 * init_mfi
2198 2197 */
2199 2198 static int
2200 2199 init_mfi(struct megasas_instance *instance)
2201 2200 {
2202 2201 off_t reglength;
2203 2202 struct megasas_cmd *cmd;
2204 2203 struct megasas_ctrl_info ctrl_info;
2205 2204 struct megasas_init_frame *init_frame;
2206 2205 struct megasas_init_queue_info *initq_info;
2207 2206
2208 2207 if ((ddi_dev_regsize(instance->dip, REGISTER_SET_IO, ®length)
2209 2208 != DDI_SUCCESS) || reglength < MINIMUM_MFI_MEM_SZ) {
2210 2209 return (DDI_FAILURE);
2211 2210 }
2212 2211
2213 2212 if (reglength > DEFAULT_MFI_MEM_SZ) {
2214 2213 reglength = DEFAULT_MFI_MEM_SZ;
2215 2214 con_log(CL_DLEVEL1, (CE_NOTE,
2216 2215 "mega: register length to map is 0x%lx bytes", reglength));
2217 2216 }
2218 2217
2219 2218 if (ddi_regs_map_setup(instance->dip, REGISTER_SET_IO,
2220 2219 &instance->regmap, 0, reglength, &endian_attr,
2221 2220 &instance->regmap_handle) != DDI_SUCCESS) {
2222 2221 con_log(CL_ANN, (CE_NOTE,
2223 2222 "megaraid: couldn't map control registers"));
2224 2223
2225 2224 goto fail_mfi_reg_setup;
2226 2225 }
2227 2226
2228 2227 /* we expect the FW state to be READY */
2229 2228 if (mfi_state_transition_to_ready(instance)) {
2230 2229 con_log(CL_ANN, (CE_WARN, "megaraid: F/W is not ready"));
2231 2230 goto fail_ready_state;
2232 2231 }
2233 2232
2234 2233 /* get various operational parameters from status register */
2235 2234 instance->max_num_sge =
2236 2235 (instance->func_ptr->read_fw_status_reg(instance) &
2237 2236 0xFF0000) >> 0x10;
2238 2237 /*
2239 2238 * Reduce the max supported cmds by 1. This is to ensure that the
2240 2239 * reply_q_sz (1 more than the max cmd that driver may send)
2241 2240 * does not exceed max cmds that the FW can support
2242 2241 */
2243 2242 instance->max_fw_cmds =
2244 2243 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
2245 2244 instance->max_fw_cmds = instance->max_fw_cmds - 1;
2246 2245
2247 2246 instance->max_num_sge =
2248 2247 (instance->max_num_sge > MEGASAS_MAX_SGE_CNT) ?
2249 2248 MEGASAS_MAX_SGE_CNT : instance->max_num_sge;
2250 2249
2251 2250 /* create a pool of commands */
2252 2251 if (alloc_space_for_mfi(instance))
2253 2252 goto fail_alloc_fw_space;
2254 2253
2255 2254 /* disable interrupt for initial preparation */
2256 2255 instance->func_ptr->disable_intr(instance);
2257 2256
2258 2257 /*
2259 2258 * Prepare a init frame. Note the init frame points to queue info
2260 2259 * structure. Each frame has SGL allocated after first 64 bytes. For
2261 2260 * this frame - since we don't need any SGL - we use SGL's space as
2262 2261 * queue info structure
2263 2262 */
2264 2263 cmd = get_mfi_pkt(instance);
2265 2264
2266 2265 init_frame = (struct megasas_init_frame *)cmd->frame;
2267 2266 initq_info = (struct megasas_init_queue_info *)
2268 2267 ((unsigned long)init_frame + 64);
2269 2268
2270 2269 (void) memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
2271 2270 (void) memset(initq_info, 0, sizeof (struct megasas_init_queue_info));
2272 2271
2273 2272 initq_info->init_flags = 0;
2274 2273
2275 2274 initq_info->reply_queue_entries = instance->max_fw_cmds + 1;
2276 2275
2277 2276 initq_info->producer_index_phys_addr_hi = 0;
2278 2277 initq_info->producer_index_phys_addr_lo =
2279 2278 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
2280 2279
2281 2280 initq_info->consumer_index_phys_addr_hi = 0;
2282 2281 initq_info->consumer_index_phys_addr_lo =
2283 2282 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4;
2284 2283
2285 2284 initq_info->reply_queue_start_phys_addr_hi = 0;
2286 2285 initq_info->reply_queue_start_phys_addr_lo =
2287 2286 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8;
2288 2287
2289 2288 init_frame->cmd = MFI_CMD_OP_INIT;
2290 2289 init_frame->cmd_status = MFI_CMD_STATUS_POLL_MODE;
2291 2290 init_frame->flags = 0;
2292 2291 init_frame->queue_info_new_phys_addr_lo =
2293 2292 cmd->frame_phys_addr + 64;
2294 2293 init_frame->queue_info_new_phys_addr_hi = 0;
2295 2294
2296 2295 init_frame->data_xfer_len = sizeof (struct megasas_init_queue_info);
2297 2296
2298 2297 cmd->frame_count = 1;
2299 2298
2300 2299 /* issue the init frame in polled mode */
2301 2300 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2302 2301 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
2303 2302 goto fail_fw_init;
2304 2303 }
2305 2304
2306 2305 return_mfi_pkt(instance, cmd);
2307 2306 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) {
2308 2307 goto fail_fw_init;
2309 2308 }
2310 2309
2311 2310 /* gather misc FW related information */
2312 2311 if (!get_ctrl_info(instance, &ctrl_info)) {
2313 2312 instance->max_sectors_per_req = ctrl_info.max_request_size;
2314 2313 con_log(CL_ANN1, (CE_NOTE, "product name %s ld present %d",
2315 2314 ctrl_info.product_name, ctrl_info.ld_present_count));
2316 2315 } else {
2317 2316 instance->max_sectors_per_req = instance->max_num_sge *
2318 2317 PAGESIZE / 512;
2319 2318 }
2320 2319
2321 2320 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2322 2321 goto fail_fw_init;
2323 2322 }
2324 2323
2325 2324 return (0);
2326 2325
2327 2326 fail_fw_init:
2328 2327 fail_alloc_fw_space:
2329 2328
2330 2329 free_space_for_mfi(instance);
2331 2330
2332 2331 fail_ready_state:
2333 2332 ddi_regs_map_free(&instance->regmap_handle);
2334 2333
2335 2334 fail_mfi_reg_setup:
2336 2335 return (DDI_FAILURE);
2337 2336 }
2338 2337
2339 2338 /*
2340 2339 * mfi_state_transition_to_ready : Move the FW to READY state
2341 2340 *
2342 2341 * @reg_set : MFI register set
2343 2342 */
2344 2343 static int
2345 2344 mfi_state_transition_to_ready(struct megasas_instance *instance)
2346 2345 {
2347 2346 int i;
2348 2347 uint8_t max_wait;
2349 2348 uint32_t fw_ctrl;
2350 2349 uint32_t fw_state;
2351 2350 uint32_t cur_state;
2352 2351
2353 2352 fw_state =
2354 2353 instance->func_ptr->read_fw_status_reg(instance) & MFI_STATE_MASK;
2355 2354 con_log(CL_ANN1, (CE_NOTE,
2356 2355 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
2357 2356
2358 2357 while (fw_state != MFI_STATE_READY) {
2359 2358 con_log(CL_ANN, (CE_NOTE,
2360 2359 "mfi_state_transition_to_ready:FW state%x", fw_state));
2361 2360
2362 2361 switch (fw_state) {
2363 2362 case MFI_STATE_FAULT:
2364 2363 con_log(CL_ANN, (CE_NOTE,
2365 2364 "megasas: FW in FAULT state!!"));
2366 2365
2367 2366 return (-ENODEV);
2368 2367 case MFI_STATE_WAIT_HANDSHAKE:
2369 2368 /* set the CLR bit in IMR0 */
2370 2369 con_log(CL_ANN, (CE_NOTE,
2371 2370 "megasas: FW waiting for HANDSHAKE"));
2372 2371 /*
2373 2372 * PCI_Hot Plug: MFI F/W requires
2374 2373 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
2375 2374 * to be set
2376 2375 */
2377 2376 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
2378 2377 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
2379 2378 MFI_INIT_HOTPLUG, instance);
2380 2379
2381 2380 max_wait = 2;
2382 2381 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2383 2382 break;
2384 2383 case MFI_STATE_BOOT_MESSAGE_PENDING:
2385 2384 /* set the CLR bit in IMR0 */
2386 2385 con_log(CL_ANN, (CE_NOTE,
2387 2386 "megasas: FW state boot message pending"));
2388 2387 /*
2389 2388 * PCI_Hot Plug: MFI F/W requires
2390 2389 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
2391 2390 * to be set
2392 2391 */
2393 2392 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
2394 2393
2395 2394 max_wait = 10;
2396 2395 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2397 2396 break;
2398 2397 case MFI_STATE_OPERATIONAL:
2399 2398 /* bring it to READY state; assuming max wait 2 secs */
2400 2399 instance->func_ptr->disable_intr(instance);
2401 2400 con_log(CL_ANN1, (CE_NOTE,
2402 2401 "megasas: FW in OPERATIONAL state"));
2403 2402 /*
2404 2403 * PCI_Hot Plug: MFI F/W requires
2405 2404 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
2406 2405 * to be set
2407 2406 */
2408 2407 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
2409 2408 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
2410 2409
2411 2410 max_wait = 10;
2412 2411 cur_state = MFI_STATE_OPERATIONAL;
2413 2412 break;
2414 2413 case MFI_STATE_UNDEFINED:
2415 2414 /* this state should not last for more than 2 seconds */
2416 2415 con_log(CL_ANN, (CE_NOTE, "FW state undefined\n"));
2417 2416
2418 2417 max_wait = 2;
2419 2418 cur_state = MFI_STATE_UNDEFINED;
2420 2419 break;
2421 2420 case MFI_STATE_BB_INIT:
2422 2421 max_wait = 2;
2423 2422 cur_state = MFI_STATE_BB_INIT;
2424 2423 break;
2425 2424 case MFI_STATE_FW_INIT:
2426 2425 max_wait = 2;
2427 2426 cur_state = MFI_STATE_FW_INIT;
2428 2427 break;
2429 2428 case MFI_STATE_DEVICE_SCAN:
2430 2429 max_wait = 10;
2431 2430 cur_state = MFI_STATE_DEVICE_SCAN;
2432 2431 break;
2433 2432 default:
2434 2433 con_log(CL_ANN, (CE_NOTE,
2435 2434 "megasas: Unknown state 0x%x\n", fw_state));
2436 2435 return (-ENODEV);
2437 2436 }
2438 2437
2439 2438 /* the cur_state should not last for more than max_wait secs */
2440 2439 for (i = 0; i < (max_wait * MILLISEC); i++) {
2441 2440 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
2442 2441 fw_state =
2443 2442 instance->func_ptr->read_fw_status_reg(instance) &
2444 2443 MFI_STATE_MASK;
2445 2444
2446 2445 if (fw_state == cur_state) {
2447 2446 delay(1 * drv_usectohz(MILLISEC));
2448 2447 } else {
2449 2448 break;
2450 2449 }
2451 2450 }
2452 2451
2453 2452 /* return error if fw_state hasn't changed after max_wait */
2454 2453 if (fw_state == cur_state) {
2455 2454 con_log(CL_ANN, (CE_NOTE,
2456 2455 "FW state hasn't changed in %d secs\n", max_wait));
2457 2456 return (-ENODEV);
2458 2457 }
2459 2458 };
2460 2459
2461 2460 fw_ctrl = RD_IB_DOORBELL(instance);
2462 2461
2463 2462 con_log(CL_ANN1, (CE_NOTE,
2464 2463 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
2465 2464
2466 2465 /*
2467 2466 * Write 0xF to the doorbell register to do the following.
2468 2467 * - Abort all outstanding commands (bit 0).
2469 2468 * - Transition from OPERATIONAL to READY state (bit 1).
2470 2469 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
2471 2470 * - Set to release FW to continue running (i.e. BIOS handshake
2472 2471 * (bit 3).
2473 2472 */
2474 2473 WR_IB_DOORBELL(0xF, instance);
2475 2474
2476 2475 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2477 2476 return (-ENODEV);
2478 2477 }
2479 2478 return (0);
2480 2479 }
2481 2480
2482 2481 /*
2483 2482 * get_seq_num
2484 2483 */
2485 2484 static int
2486 2485 get_seq_num(struct megasas_instance *instance,
2487 2486 struct megasas_evt_log_info *eli)
2488 2487 {
2489 2488 int ret = 0;
2490 2489
2491 2490 dma_obj_t dcmd_dma_obj;
2492 2491 struct megasas_cmd *cmd;
2493 2492 struct megasas_dcmd_frame *dcmd;
2494 2493
2495 2494 cmd = get_mfi_pkt(instance);
2496 2495
2497 2496 if (!cmd) {
2498 2497 cmn_err(CE_WARN, "megasas: failed to get a cmd\n");
2499 2498 return (-ENOMEM);
2500 2499 }
2501 2500
2502 2501 dcmd = &cmd->frame->dcmd;
2503 2502
2504 2503 /* allocate the data transfer buffer */
2505 2504 dcmd_dma_obj.size = sizeof (struct megasas_evt_log_info);
2506 2505 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr;
2507 2506 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2508 2507 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2509 2508 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
2510 2509 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
2511 2510
2512 2511 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) {
2513 2512 con_log(CL_ANN, (CE_WARN,
2514 2513 "get_seq_num: could not data transfer buffer alloc."));
2515 2514 return (DDI_FAILURE);
2516 2515 }
2517 2516
2518 2517 (void) memset(dcmd_dma_obj.buffer, 0,
2519 2518 sizeof (struct megasas_evt_log_info));
2520 2519
2521 2520 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2522 2521
2523 2522 dcmd->cmd = MFI_CMD_OP_DCMD;
2524 2523 dcmd->cmd_status = 0;
2525 2524 dcmd->sge_count = 1;
2526 2525 dcmd->flags = MFI_FRAME_DIR_READ;
2527 2526 dcmd->timeout = 0;
2528 2527 dcmd->data_xfer_len = sizeof (struct megasas_evt_log_info);
2529 2528 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
2530 2529 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_log_info);
2531 2530 dcmd->sgl.sge32[0].phys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
2532 2531
2533 2532 cmd->sync_cmd = MEGASAS_TRUE;
2534 2533 cmd->frame_count = 1;
2535 2534
2536 2535 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
2537 2536 cmn_err(CE_WARN, "get_seq_num: "
2538 2537 "failed to issue MR_DCMD_CTRL_EVENT_GET_INFO\n");
2539 2538 ret = -1;
2540 2539 } else {
2541 2540 /* copy the data back into callers buffer */
2542 2541 bcopy(dcmd_dma_obj.buffer, eli,
2543 2542 sizeof (struct megasas_evt_log_info));
2544 2543 ret = 0;
2545 2544 }
2546 2545
2547 2546 if (mega_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
2548 2547 ret = -1;
2549 2548
2550 2549 return_mfi_pkt(instance, cmd);
2551 2550 if (megasas_common_check(instance, cmd) != DDI_SUCCESS) {
2552 2551 ret = -1;
2553 2552 }
2554 2553 return (ret);
2555 2554 }
2556 2555
2557 2556 /*
2558 2557 * start_mfi_aen
2559 2558 */
2560 2559 static int
2561 2560 start_mfi_aen(struct megasas_instance *instance)
2562 2561 {
2563 2562 int ret = 0;
2564 2563
2565 2564 struct megasas_evt_log_info eli;
2566 2565 union megasas_evt_class_locale class_locale;
2567 2566
2568 2567 /* get the latest sequence number from FW */
2569 2568 (void) memset(&eli, 0, sizeof (struct megasas_evt_log_info));
2570 2569
2571 2570 if (get_seq_num(instance, &eli)) {
2572 2571 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num\n");
2573 2572 return (-1);
2574 2573 }
2575 2574
2576 2575 /* register AEN with FW for latest sequence number plus 1 */
2577 2576 class_locale.members.reserved = 0;
2578 2577 class_locale.members.locale = MR_EVT_LOCALE_ALL;
2579 2578 class_locale.members.class = MR_EVT_CLASS_CRITICAL;
2580 2579
2581 2580 ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
2582 2581 class_locale.word);
2583 2582
2584 2583 if (ret) {
2585 2584 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed\n");
2586 2585 return (-1);
2587 2586 }
2588 2587
2589 2588 return (ret);
2590 2589 }
2591 2590
2592 2591 /*
2593 2592 * flush_cache
2594 2593 */
2595 2594 static void
2596 2595 flush_cache(struct megasas_instance *instance)
2597 2596 {
2598 2597 struct megasas_cmd *cmd;
2599 2598 struct megasas_dcmd_frame *dcmd;
2600 2599
2601 2600 if (!(cmd = get_mfi_pkt(instance)))
2602 2601 return;
2603 2602
2604 2603 dcmd = &cmd->frame->dcmd;
2605 2604
2606 2605 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2607 2606
2608 2607 dcmd->cmd = MFI_CMD_OP_DCMD;
2609 2608 dcmd->cmd_status = 0x0;
2610 2609 dcmd->sge_count = 0;
2611 2610 dcmd->flags = MFI_FRAME_DIR_NONE;
2612 2611 dcmd->timeout = 0;
2613 2612 dcmd->data_xfer_len = 0;
2614 2613 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
2615 2614 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
2616 2615
2617 2616 cmd->frame_count = 1;
2618 2617
2619 2618 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2620 2619 cmn_err(CE_WARN,
2621 2620 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH\n");
2622 2621 }
2623 2622 con_log(CL_DLEVEL1, (CE_NOTE, "done"));
2624 2623 return_mfi_pkt(instance, cmd);
2625 2624 (void) megasas_common_check(instance, cmd);
2626 2625 }
2627 2626
2628 2627 /*
2629 2628 * service_mfi_aen- Completes an AEN command
2630 2629 * @instance: Adapter soft state
2631 2630 * @cmd: Command to be completed
2632 2631 *
2633 2632 */
2634 2633 static void
2635 2634 service_mfi_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2636 2635 {
2637 2636 uint32_t seq_num;
2638 2637 struct megasas_evt_detail *evt_detail =
2639 2638 (struct megasas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
2640 2639
2641 2640 cmd->cmd_status = cmd->frame->io.cmd_status;
2642 2641
2643 2642 if (cmd->cmd_status == ENODATA) {
2644 2643 cmd->cmd_status = 0;
2645 2644 }
2646 2645
2647 2646 /*
2648 2647 * log the MFI AEN event to the sysevent queue so that
2649 2648 * application will get noticed
2650 2649 */
2651 2650 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
2652 2651 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
2653 2652 int instance_no = ddi_get_instance(instance->dip);
2654 2653 con_log(CL_ANN, (CE_WARN,
2655 2654 "mega%d: Failed to log AEN event", instance_no));
2656 2655 }
2657 2656
2658 2657 /* get copy of seq_num and class/locale for re-registration */
2659 2658 seq_num = evt_detail->seq_num;
2660 2659 seq_num++;
2661 2660 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
2662 2661 sizeof (struct megasas_evt_detail));
2663 2662
2664 2663 cmd->frame->dcmd.cmd_status = 0x0;
2665 2664 cmd->frame->dcmd.mbox.w[0] = seq_num;
2666 2665
2667 2666 instance->aen_seq_num = seq_num;
2668 2667
2669 2668 cmd->frame_count = 1;
2670 2669
2671 2670 /* Issue the aen registration frame */
2672 2671 instance->func_ptr->issue_cmd(cmd, instance);
2673 2672 }
2674 2673
2675 2674 /*
2676 2675 * complete_cmd_in_sync_mode - Completes an internal command
2677 2676 * @instance: Adapter soft state
2678 2677 * @cmd: Command to be completed
2679 2678 *
2680 2679 * The issue_cmd_in_sync_mode() function waits for a command to complete
2681 2680 * after it issues a command. This function wakes up that waiting routine by
2682 2681 * calling wake_up() on the wait queue.
2683 2682 */
2684 2683 static void
2685 2684 complete_cmd_in_sync_mode(struct megasas_instance *instance,
2686 2685 struct megasas_cmd *cmd)
2687 2686 {
2688 2687 cmd->cmd_status = cmd->frame->io.cmd_status;
2689 2688
2690 2689 cmd->sync_cmd = MEGASAS_FALSE;
2691 2690
2692 2691 if (cmd->cmd_status == ENODATA) {
2693 2692 cmd->cmd_status = 0;
2694 2693 }
2695 2694
2696 2695 cv_broadcast(&instance->int_cmd_cv);
2697 2696 }
2698 2697
2699 2698 /*
2700 2699 * megasas_softintr - The Software ISR
2701 2700 * @param arg : HBA soft state
2702 2701 *
2703 2702 * called from high-level interrupt if hi-level interrupt are not there,
2704 2703 * otherwise triggered as a soft interrupt
2705 2704 */
2706 2705 static uint_t
2707 2706 megasas_softintr(struct megasas_instance *instance)
2708 2707 {
2709 2708 struct scsi_pkt *pkt;
2710 2709 struct scsa_cmd *acmd;
2711 2710 struct megasas_cmd *cmd;
2712 2711 struct mlist_head *pos, *next;
2713 2712 mlist_t process_list;
2714 2713 struct megasas_header *hdr;
2715 2714 struct scsi_arq_status *arqstat;
2716 2715
2717 2716 con_log(CL_ANN1, (CE_CONT, "megasas_softintr called"));
2718 2717
2719 2718 ASSERT(instance);
2720 2719 mutex_enter(&instance->completed_pool_mtx);
2721 2720
2722 2721 if (mlist_empty(&instance->completed_pool_list)) {
2723 2722 mutex_exit(&instance->completed_pool_mtx);
2724 2723 return (DDI_INTR_UNCLAIMED);
2725 2724 }
2726 2725
2727 2726 instance->softint_running = 1;
2728 2727
2729 2728 INIT_LIST_HEAD(&process_list);
2730 2729 mlist_splice(&instance->completed_pool_list, &process_list);
2731 2730 INIT_LIST_HEAD(&instance->completed_pool_list);
2732 2731
2733 2732 mutex_exit(&instance->completed_pool_mtx);
2734 2733
2735 2734 /* perform all callbacks first, before releasing the SCBs */
2736 2735 mlist_for_each_safe(pos, next, &process_list) {
2737 2736 cmd = mlist_entry(pos, struct megasas_cmd, list);
2738 2737
2739 2738 /* syncronize the Cmd frame for the controller */
2740 2739 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
2741 2740 0, 0, DDI_DMA_SYNC_FORCPU);
2742 2741
2743 2742 if (megasas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
2744 2743 DDI_SUCCESS) {
2745 2744 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2746 2745 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2747 2746 return (DDI_INTR_UNCLAIMED);
2748 2747 }
2749 2748
2750 2749 hdr = &cmd->frame->hdr;
2751 2750
2752 2751 /* remove the internal command from the process list */
2753 2752 mlist_del_init(&cmd->list);
2754 2753
2755 2754 switch (hdr->cmd) {
2756 2755 case MFI_CMD_OP_PD_SCSI:
2757 2756 case MFI_CMD_OP_LD_SCSI:
2758 2757 case MFI_CMD_OP_LD_READ:
2759 2758 case MFI_CMD_OP_LD_WRITE:
2760 2759 /*
2761 2760 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
2762 2761 * could have been issued either through an
2763 2762 * IO path or an IOCTL path. If it was via IOCTL,
2764 2763 * we will send it to internal completion.
2765 2764 */
2766 2765 if (cmd->sync_cmd == MEGASAS_TRUE) {
2767 2766 complete_cmd_in_sync_mode(instance, cmd);
2768 2767 break;
2769 2768 }
2770 2769
2771 2770 /* regular commands */
2772 2771 acmd = cmd->cmd;
2773 2772 pkt = CMD2PKT(acmd);
2774 2773
2775 2774 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2776 2775 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2777 2776 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2778 2777 acmd->cmd_dma_offset,
2779 2778 acmd->cmd_dma_len,
2780 2779 DDI_DMA_SYNC_FORCPU);
2781 2780 }
2782 2781 }
2783 2782
2784 2783 pkt->pkt_reason = CMD_CMPLT;
2785 2784 pkt->pkt_statistics = 0;
2786 2785 pkt->pkt_state = STATE_GOT_BUS
2787 2786 | STATE_GOT_TARGET | STATE_SENT_CMD
2788 2787 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2789 2788
2790 2789 con_log(CL_ANN1, (CE_CONT,
2791 2790 "CDB[0] = %x completed for %s: size %lx context %x",
2792 2791 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
2793 2792 acmd->cmd_dmacount, hdr->context));
2794 2793
2795 2794 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2796 2795 struct scsi_inquiry *inq;
2797 2796
2798 2797 if (acmd->cmd_dmacount != 0) {
2799 2798 bp_mapin(acmd->cmd_buf);
2800 2799 inq = (struct scsi_inquiry *)
2801 2800 acmd->cmd_buf->b_un.b_addr;
2802 2801
2803 2802 /* don't expose physical drives to OS */
2804 2803 if (acmd->islogical &&
2805 2804 (hdr->cmd_status == MFI_STAT_OK)) {
2806 2805 display_scsi_inquiry(
2807 2806 (caddr_t)inq);
2808 2807 } else if ((hdr->cmd_status ==
2809 2808 MFI_STAT_OK) && inq->inq_dtype ==
2810 2809 DTYPE_DIRECT) {
2811 2810
2812 2811 display_scsi_inquiry(
2813 2812 (caddr_t)inq);
2814 2813
2815 2814 /* for physical disk */
2816 2815 hdr->cmd_status =
2817 2816 MFI_STAT_DEVICE_NOT_FOUND;
2818 2817 }
2819 2818 }
2820 2819 }
2821 2820
2822 2821 switch (hdr->cmd_status) {
2823 2822 case MFI_STAT_OK:
2824 2823 pkt->pkt_scbp[0] = STATUS_GOOD;
2825 2824 break;
2826 2825 case MFI_STAT_LD_CC_IN_PROGRESS:
2827 2826 case MFI_STAT_LD_RECON_IN_PROGRESS:
2828 2827 /* SJ - these are not correct way */
2829 2828 pkt->pkt_scbp[0] = STATUS_GOOD;
2830 2829 break;
2831 2830 case MFI_STAT_LD_INIT_IN_PROGRESS:
2832 2831 con_log(CL_ANN,
2833 2832 (CE_WARN, "Initialization in Progress"));
2834 2833 pkt->pkt_reason = CMD_TRAN_ERR;
2835 2834
2836 2835 break;
2837 2836 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2838 2837 con_log(CL_ANN1, (CE_CONT, "scsi_done error"));
2839 2838
2840 2839 pkt->pkt_reason = CMD_CMPLT;
2841 2840 ((struct scsi_status *)
2842 2841 pkt->pkt_scbp)->sts_chk = 1;
2843 2842
2844 2843 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2845 2844
2846 2845 con_log(CL_ANN,
2847 2846 (CE_WARN, "TEST_UNIT_READY fail"));
2848 2847
2849 2848 } else {
2850 2849 pkt->pkt_state |= STATE_ARQ_DONE;
2851 2850 arqstat = (void *)(pkt->pkt_scbp);
2852 2851 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2853 2852 arqstat->sts_rqpkt_resid = 0;
2854 2853 arqstat->sts_rqpkt_state |=
2855 2854 STATE_GOT_BUS | STATE_GOT_TARGET
2856 2855 | STATE_SENT_CMD
2857 2856 | STATE_XFERRED_DATA;
2858 2857 *(uint8_t *)&arqstat->sts_rqpkt_status =
2859 2858 STATUS_GOOD;
2860 2859
2861 2860 bcopy(cmd->sense,
2862 2861 &(arqstat->sts_sensedata),
2863 2862 acmd->cmd_scblen -
2864 2863 offsetof(struct scsi_arq_status,
2865 2864 sts_sensedata));
2866 2865 }
2867 2866 break;
2868 2867 case MFI_STAT_LD_OFFLINE:
2869 2868 case MFI_STAT_DEVICE_NOT_FOUND:
2870 2869 con_log(CL_ANN1, (CE_CONT,
2871 2870 "device not found error"));
2872 2871 pkt->pkt_reason = CMD_DEV_GONE;
2873 2872 pkt->pkt_statistics = STAT_DISCON;
2874 2873 break;
2875 2874 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2876 2875 pkt->pkt_state |= STATE_ARQ_DONE;
2877 2876 pkt->pkt_reason = CMD_CMPLT;
2878 2877 ((struct scsi_status *)
2879 2878 pkt->pkt_scbp)->sts_chk = 1;
2880 2879
2881 2880 arqstat = (void *)(pkt->pkt_scbp);
2882 2881 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2883 2882 arqstat->sts_rqpkt_resid = 0;
2884 2883 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2885 2884 | STATE_GOT_TARGET | STATE_SENT_CMD
2886 2885 | STATE_XFERRED_DATA;
2887 2886 *(uint8_t *)&arqstat->sts_rqpkt_status =
2888 2887 STATUS_GOOD;
2889 2888
2890 2889 arqstat->sts_sensedata.es_valid = 1;
2891 2890 arqstat->sts_sensedata.es_key =
2892 2891 KEY_ILLEGAL_REQUEST;
2893 2892 arqstat->sts_sensedata.es_class =
2894 2893 CLASS_EXTENDED_SENSE;
2895 2894
2896 2895 /*
2897 2896 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2898 2897 * ASC: 0x21h; ASCQ: 0x00h;
2899 2898 */
2900 2899 arqstat->sts_sensedata.es_add_code = 0x21;
2901 2900 arqstat->sts_sensedata.es_qual_code = 0x00;
2902 2901
2903 2902 break;
2904 2903
2905 2904 default:
2906 2905 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
2907 2906 pkt->pkt_reason = CMD_TRAN_ERR;
2908 2907
2909 2908 break;
2910 2909 }
2911 2910
2912 2911 atomic_add_16(&instance->fw_outstanding, (-1));
2913 2912
2914 2913 return_mfi_pkt(instance, cmd);
2915 2914
2916 2915 (void) megasas_common_check(instance, cmd);
2917 2916
2918 2917 if (acmd->cmd_dmahandle) {
2919 2918 if (megasas_check_dma_handle(
2920 2919 acmd->cmd_dmahandle) != DDI_SUCCESS) {
2921 2920 ddi_fm_service_impact(instance->dip,
2922 2921 DDI_SERVICE_UNAFFECTED);
2923 2922 pkt->pkt_reason = CMD_TRAN_ERR;
2924 2923 pkt->pkt_statistics = 0;
2925 2924 }
2926 2925 }
2927 2926
2928 2927 /* Call the callback routine */
2929 2928 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
2930 2929 scsi_hba_pkt_comp(pkt);
2931 2930 }
2932 2931
2933 2932 break;
2934 2933 case MFI_CMD_OP_SMP:
2935 2934 case MFI_CMD_OP_STP:
2936 2935 complete_cmd_in_sync_mode(instance, cmd);
2937 2936 break;
2938 2937 case MFI_CMD_OP_DCMD:
2939 2938 /* see if got an event notification */
2940 2939 if (cmd->frame->dcmd.opcode ==
2941 2940 MR_DCMD_CTRL_EVENT_WAIT) {
2942 2941 if ((instance->aen_cmd == cmd) &&
2943 2942 (instance->aen_cmd->abort_aen)) {
2944 2943 con_log(CL_ANN, (CE_WARN,
2945 2944 "megasas_softintr: "
2946 2945 "aborted_aen returned"));
2947 2946 } else {
2948 2947 service_mfi_aen(instance, cmd);
2949 2948
2950 2949 atomic_add_16(&instance->fw_outstanding,
2951 2950 (-1));
2952 2951 }
2953 2952 } else {
2954 2953 complete_cmd_in_sync_mode(instance, cmd);
2955 2954 }
2956 2955
2957 2956 break;
2958 2957 case MFI_CMD_OP_ABORT:
2959 2958 con_log(CL_ANN, (CE_WARN, "MFI_CMD_OP_ABORT complete"));
2960 2959 /*
2961 2960 * MFI_CMD_OP_ABORT successfully completed
2962 2961 * in the synchronous mode
2963 2962 */
2964 2963 complete_cmd_in_sync_mode(instance, cmd);
2965 2964 break;
2966 2965 default:
2967 2966 megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2968 2967 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2969 2968
2970 2969 if (cmd->pkt != NULL) {
2971 2970 pkt = cmd->pkt;
2972 2971 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
2973 2972 scsi_hba_pkt_comp(pkt);
2974 2973 }
2975 2974 }
2976 2975 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !!"));
2977 2976 break;
2978 2977 }
2979 2978 }
2980 2979
2981 2980 instance->softint_running = 0;
2982 2981
2983 2982 return (DDI_INTR_CLAIMED);
2984 2983 }
2985 2984
2986 2985 /*
2987 2986 * mega_alloc_dma_obj
2988 2987 *
2989 2988 * Allocate the memory and other resources for an dma object.
2990 2989 */
2991 2990 static int
2992 2991 mega_alloc_dma_obj(struct megasas_instance *instance, dma_obj_t *obj)
2993 2992 {
2994 2993 int i;
2995 2994 size_t alen = 0;
2996 2995 uint_t cookie_cnt;
2997 2996 struct ddi_device_acc_attr tmp_endian_attr;
2998 2997
2999 2998 tmp_endian_attr = endian_attr;
3000 2999 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
3001 3000 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
3002 3001 DDI_DMA_SLEEP, NULL, &obj->dma_handle);
3003 3002 if (i != DDI_SUCCESS) {
3004 3003
3005 3004 switch (i) {
3006 3005 case DDI_DMA_BADATTR :
3007 3006 con_log(CL_ANN, (CE_WARN,
3008 3007 "Failed ddi_dma_alloc_handle- Bad atrib"));
3009 3008 break;
3010 3009 case DDI_DMA_NORESOURCES :
3011 3010 con_log(CL_ANN, (CE_WARN,
3012 3011 "Failed ddi_dma_alloc_handle- No Resources"));
3013 3012 break;
3014 3013 default :
3015 3014 con_log(CL_ANN, (CE_WARN,
3016 3015 "Failed ddi_dma_alloc_handle :unknown %d", i));
3017 3016 break;
3018 3017 }
3019 3018
3020 3019 return (-1);
3021 3020 }
3022 3021
3023 3022 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
3024 3023 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
3025 3024 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
3026 3025 alen < obj->size) {
3027 3026
3028 3027 ddi_dma_free_handle(&obj->dma_handle);
3029 3028
3030 3029 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
3031 3030
3032 3031 return (-1);
3033 3032 }
3034 3033
3035 3034 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
3036 3035 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
3037 3036 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
3038 3037
3039 3038 ddi_dma_mem_free(&obj->acc_handle);
3040 3039 ddi_dma_free_handle(&obj->dma_handle);
3041 3040
3042 3041 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
3043 3042
3044 3043 return (-1);
3045 3044 }
3046 3045
3047 3046 if (megasas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
3048 3047 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
3049 3048 return (-1);
3050 3049 }
3051 3050
3052 3051 if (megasas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
3053 3052 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
3054 3053 return (-1);
3055 3054 }
3056 3055
3057 3056 return (cookie_cnt);
3058 3057 }
3059 3058
3060 3059 /*
3061 3060 * mega_free_dma_obj(struct megasas_instance *, dma_obj_t)
3062 3061 *
3063 3062 * De-allocate the memory and other resources for an dma object, which must
3064 3063 * have been alloated by a previous call to mega_alloc_dma_obj()
3065 3064 */
3066 3065 static int
3067 3066 mega_free_dma_obj(struct megasas_instance *instance, dma_obj_t obj)
3068 3067 {
3069 3068
3070 3069 if (megasas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) {
3071 3070 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
3072 3071 return (DDI_FAILURE);
3073 3072 }
3074 3073
3075 3074 if (megasas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) {
3076 3075 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
3077 3076 return (DDI_FAILURE);
3078 3077 }
3079 3078
3080 3079 (void) ddi_dma_unbind_handle(obj.dma_handle);
3081 3080 ddi_dma_mem_free(&obj.acc_handle);
3082 3081 ddi_dma_free_handle(&obj.dma_handle);
3083 3082
3084 3083 return (DDI_SUCCESS);
3085 3084 }
3086 3085
3087 3086 /*
3088 3087 * megasas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
3089 3088 * int, int (*)())
3090 3089 *
3091 3090 * Allocate dma resources for a new scsi command
3092 3091 */
3093 3092 static int
3094 3093 megasas_dma_alloc(struct megasas_instance *instance, struct scsi_pkt *pkt,
3095 3094 struct buf *bp, int flags, int (*callback)())
3096 3095 {
3097 3096 int dma_flags;
3098 3097 int (*cb)(caddr_t);
3099 3098 int i;
3100 3099
3101 3100 ddi_dma_attr_t tmp_dma_attr = megasas_generic_dma_attr;
3102 3101 struct scsa_cmd *acmd = PKT2CMD(pkt);
3103 3102
3104 3103 acmd->cmd_buf = bp;
3105 3104
3106 3105 if (bp->b_flags & B_READ) {
3107 3106 acmd->cmd_flags &= ~CFLAG_DMASEND;
3108 3107 dma_flags = DDI_DMA_READ;
3109 3108 } else {
3110 3109 acmd->cmd_flags |= CFLAG_DMASEND;
3111 3110 dma_flags = DDI_DMA_WRITE;
3112 3111 }
3113 3112
3114 3113 if (flags & PKT_CONSISTENT) {
3115 3114 acmd->cmd_flags |= CFLAG_CONSISTENT;
3116 3115 dma_flags |= DDI_DMA_CONSISTENT;
3117 3116 }
3118 3117
3119 3118 if (flags & PKT_DMA_PARTIAL) {
3120 3119 dma_flags |= DDI_DMA_PARTIAL;
3121 3120 }
3122 3121
3123 3122 dma_flags |= DDI_DMA_REDZONE;
3124 3123
3125 3124 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
3126 3125
3127 3126 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
3128 3127 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
3129 3128
3130 3129 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
3131 3130 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
3132 3131 switch (i) {
3133 3132 case DDI_DMA_BADATTR:
3134 3133 bioerror(bp, EFAULT);
3135 3134 return (-1);
3136 3135
3137 3136 case DDI_DMA_NORESOURCES:
3138 3137 bioerror(bp, 0);
3139 3138 return (-1);
3140 3139
3141 3140 default:
3142 3141 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
3143 3142 "0x%x impossible\n", i));
3144 3143 bioerror(bp, EFAULT);
3145 3144 return (-1);
3146 3145 }
3147 3146 }
3148 3147
3149 3148 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
3150 3149 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
3151 3150
3152 3151 switch (i) {
3153 3152 case DDI_DMA_PARTIAL_MAP:
3154 3153 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
3155 3154 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
3156 3155 "DDI_DMA_PARTIAL_MAP impossible\n"));
3157 3156 goto no_dma_cookies;
3158 3157 }
3159 3158
3160 3159 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
3161 3160 DDI_FAILURE) {
3162 3161 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed\n"));
3163 3162 goto no_dma_cookies;
3164 3163 }
3165 3164
3166 3165 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
3167 3166 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
3168 3167 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
3169 3168 DDI_FAILURE) {
3170 3169
3171 3170 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed\n"));
3172 3171 goto no_dma_cookies;
3173 3172 }
3174 3173
3175 3174 goto get_dma_cookies;
3176 3175 case DDI_DMA_MAPPED:
3177 3176 acmd->cmd_nwin = 1;
3178 3177 acmd->cmd_dma_len = 0;
3179 3178 acmd->cmd_dma_offset = 0;
3180 3179
3181 3180 get_dma_cookies:
3182 3181 i = 0;
3183 3182 acmd->cmd_dmacount = 0;
3184 3183 for (;;) {
3185 3184 acmd->cmd_dmacount +=
3186 3185 acmd->cmd_dmacookies[i++].dmac_size;
3187 3186
3188 3187 if (i == instance->max_num_sge ||
3189 3188 i == acmd->cmd_ncookies)
3190 3189 break;
3191 3190
3192 3191 ddi_dma_nextcookie(acmd->cmd_dmahandle,
3193 3192 &acmd->cmd_dmacookies[i]);
3194 3193 }
3195 3194
3196 3195 acmd->cmd_cookie = i;
3197 3196 acmd->cmd_cookiecnt = i;
3198 3197
3199 3198 acmd->cmd_flags |= CFLAG_DMAVALID;
3200 3199
3201 3200 if (bp->b_bcount >= acmd->cmd_dmacount) {
3202 3201 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
3203 3202 } else {
3204 3203 pkt->pkt_resid = 0;
3205 3204 }
3206 3205
3207 3206 return (0);
3208 3207 case DDI_DMA_NORESOURCES:
3209 3208 bioerror(bp, 0);
3210 3209 break;
3211 3210 case DDI_DMA_NOMAPPING:
3212 3211 bioerror(bp, EFAULT);
3213 3212 break;
3214 3213 case DDI_DMA_TOOBIG:
3215 3214 bioerror(bp, EINVAL);
3216 3215 break;
3217 3216 case DDI_DMA_INUSE:
3218 3217 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
3219 3218 " DDI_DMA_INUSE impossible\n"));
3220 3219 break;
3221 3220 default:
3222 3221 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
3223 3222 "0x%x impossible\n", i));
3224 3223 break;
3225 3224 }
3226 3225
3227 3226 no_dma_cookies:
3228 3227 ddi_dma_free_handle(&acmd->cmd_dmahandle);
3229 3228 acmd->cmd_dmahandle = NULL;
3230 3229 acmd->cmd_flags &= ~CFLAG_DMAVALID;
3231 3230 return (-1);
3232 3231 }
3233 3232
3234 3233 /*
3235 3234 * megasas_dma_move(struct megasas_instance *, struct scsi_pkt *, struct buf *)
3236 3235 *
3237 3236 * move dma resources to next dma window
3238 3237 *
3239 3238 */
3240 3239 static int
3241 3240 megasas_dma_move(struct megasas_instance *instance, struct scsi_pkt *pkt,
3242 3241 struct buf *bp)
3243 3242 {
3244 3243 int i = 0;
3245 3244
3246 3245 struct scsa_cmd *acmd = PKT2CMD(pkt);
3247 3246
3248 3247 /*
3249 3248 * If there are no more cookies remaining in this window,
3250 3249 * must move to the next window first.
3251 3250 */
3252 3251 if (acmd->cmd_cookie == acmd->cmd_ncookies) {
3253 3252 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
3254 3253 return (0);
3255 3254 }
3256 3255
3257 3256 /* at last window, cannot move */
3258 3257 if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
3259 3258 return (-1);
3260 3259 }
3261 3260
3262 3261 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
3263 3262 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
3264 3263 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
3265 3264 DDI_FAILURE) {
3266 3265 return (-1);
3267 3266 }
3268 3267
3269 3268 acmd->cmd_cookie = 0;
3270 3269 } else {
3271 3270 /* still more cookies in this window - get the next one */
3272 3271 ddi_dma_nextcookie(acmd->cmd_dmahandle,
3273 3272 &acmd->cmd_dmacookies[0]);
3274 3273 }
3275 3274
3276 3275 /* get remaining cookies in this window, up to our maximum */
3277 3276 for (;;) {
3278 3277 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
3279 3278 acmd->cmd_cookie++;
3280 3279
3281 3280 if (i == instance->max_num_sge ||
3282 3281 acmd->cmd_cookie == acmd->cmd_ncookies) {
3283 3282 break;
3284 3283 }
3285 3284
3286 3285 ddi_dma_nextcookie(acmd->cmd_dmahandle,
3287 3286 &acmd->cmd_dmacookies[i]);
3288 3287 }
3289 3288
3290 3289 acmd->cmd_cookiecnt = i;
3291 3290
3292 3291 if (bp->b_bcount >= acmd->cmd_dmacount) {
3293 3292 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
3294 3293 } else {
3295 3294 pkt->pkt_resid = 0;
3296 3295 }
3297 3296
3298 3297 return (0);
3299 3298 }
3300 3299
3301 3300 /*
3302 3301 * build_cmd
3303 3302 */
3304 3303 static struct megasas_cmd *
3305 3304 build_cmd(struct megasas_instance *instance, struct scsi_address *ap,
3306 3305 struct scsi_pkt *pkt, uchar_t *cmd_done)
3307 3306 {
3308 3307 uint16_t flags = 0;
3309 3308 uint32_t i;
3310 3309 uint32_t context;
3311 3310 uint32_t sge_bytes;
3312 3311
3313 3312 struct megasas_cmd *cmd;
3314 3313 struct megasas_sge64 *mfi_sgl;
3315 3314 struct scsa_cmd *acmd = PKT2CMD(pkt);
3316 3315 struct megasas_pthru_frame *pthru;
3317 3316 struct megasas_io_frame *ldio;
3318 3317
3319 3318 /* find out if this is logical or physical drive command. */
3320 3319 acmd->islogical = MEGADRV_IS_LOGICAL(ap);
3321 3320 acmd->device_id = MAP_DEVICE_ID(instance, ap);
3322 3321 *cmd_done = 0;
3323 3322
3324 3323 /* get the command packet */
3325 3324 if (!(cmd = get_mfi_pkt(instance))) {
3326 3325 return (NULL);
3327 3326 }
3328 3327
3329 3328 cmd->pkt = pkt;
3330 3329 cmd->cmd = acmd;
3331 3330
3332 3331 /* lets get the command directions */
3333 3332 if (acmd->cmd_flags & CFLAG_DMASEND) {
3334 3333 flags = MFI_FRAME_DIR_WRITE;
3335 3334
3336 3335 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
3337 3336 (void) ddi_dma_sync(acmd->cmd_dmahandle,
3338 3337 acmd->cmd_dma_offset, acmd->cmd_dma_len,
3339 3338 DDI_DMA_SYNC_FORDEV);
3340 3339 }
3341 3340 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
3342 3341 flags = MFI_FRAME_DIR_READ;
3343 3342
3344 3343 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
3345 3344 (void) ddi_dma_sync(acmd->cmd_dmahandle,
3346 3345 acmd->cmd_dma_offset, acmd->cmd_dma_len,
3347 3346 DDI_DMA_SYNC_FORCPU);
3348 3347 }
3349 3348 } else {
3350 3349 flags = MFI_FRAME_DIR_NONE;
3351 3350 }
3352 3351
3353 3352 flags |= MFI_FRAME_SGL64;
3354 3353
3355 3354 switch (pkt->pkt_cdbp[0]) {
3356 3355
3357 3356 /*
3358 3357 * case SCMD_SYNCHRONIZE_CACHE:
3359 3358 * flush_cache(instance);
3360 3359 * return_mfi_pkt(instance, cmd);
3361 3360 * *cmd_done = 1;
3362 3361 *
3363 3362 * return (NULL);
3364 3363 */
3365 3364
3366 3365 case SCMD_READ:
3367 3366 case SCMD_WRITE:
3368 3367 case SCMD_READ_G1:
3369 3368 case SCMD_WRITE_G1:
3370 3369 if (acmd->islogical) {
3371 3370 ldio = (struct megasas_io_frame *)cmd->frame;
3372 3371
3373 3372 /*
3374 3373 * preare the Logical IO frame:
3375 3374 * 2nd bit is zero for all read cmds
3376 3375 */
3377 3376 ldio->cmd = (pkt->pkt_cdbp[0] & 0x02) ?
3378 3377 MFI_CMD_OP_LD_WRITE : MFI_CMD_OP_LD_READ;
3379 3378 ldio->cmd_status = 0x0;
3380 3379 ldio->scsi_status = 0x0;
3381 3380 ldio->target_id = acmd->device_id;
3382 3381 ldio->timeout = 0;
3383 3382 ldio->reserved_0 = 0;
3384 3383 ldio->pad_0 = 0;
3385 3384 ldio->flags = flags;
3386 3385
3387 3386 /* Initialize sense Information */
3388 3387 bzero(cmd->sense, SENSE_LENGTH);
3389 3388 ldio->sense_len = SENSE_LENGTH;
3390 3389 ldio->sense_buf_phys_addr_hi = 0;
3391 3390 ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
3392 3391
3393 3392 ldio->start_lba_hi = 0;
3394 3393 ldio->access_byte = (acmd->cmd_cdblen != 6) ?
3395 3394 pkt->pkt_cdbp[1] : 0;
3396 3395 ldio->sge_count = acmd->cmd_cookiecnt;
3397 3396 mfi_sgl = (struct megasas_sge64 *)&ldio->sgl;
3398 3397
3399 3398 context = ldio->context;
3400 3399
3401 3400 if (acmd->cmd_cdblen == CDB_GROUP0) {
3402 3401 ldio->lba_count = host_to_le16(
3403 3402 (uint16_t)(pkt->pkt_cdbp[4]));
3404 3403
3405 3404 ldio->start_lba_lo = host_to_le32(
3406 3405 ((uint32_t)(pkt->pkt_cdbp[3])) |
3407 3406 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
3408 3407 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
3409 3408 << 16));
3410 3409 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
3411 3410 ldio->lba_count = host_to_le16(
3412 3411 ((uint16_t)(pkt->pkt_cdbp[8])) |
3413 3412 ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
3414 3413
3415 3414 ldio->start_lba_lo = host_to_le32(
3416 3415 ((uint32_t)(pkt->pkt_cdbp[5])) |
3417 3416 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
3418 3417 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
3419 3418 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
3420 3419 } else if (acmd->cmd_cdblen == CDB_GROUP2) {
3421 3420 ldio->lba_count = host_to_le16(
3422 3421 ((uint16_t)(pkt->pkt_cdbp[9])) |
3423 3422 ((uint16_t)(pkt->pkt_cdbp[8]) << 8) |
3424 3423 ((uint16_t)(pkt->pkt_cdbp[7]) << 16) |
3425 3424 ((uint16_t)(pkt->pkt_cdbp[6]) << 24));
3426 3425
3427 3426 ldio->start_lba_lo = host_to_le32(
3428 3427 ((uint32_t)(pkt->pkt_cdbp[5])) |
3429 3428 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
3430 3429 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
3431 3430 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
3432 3431 } else if (acmd->cmd_cdblen == CDB_GROUP3) {
3433 3432 ldio->lba_count = host_to_le16(
3434 3433 ((uint16_t)(pkt->pkt_cdbp[13])) |
3435 3434 ((uint16_t)(pkt->pkt_cdbp[12]) << 8) |
3436 3435 ((uint16_t)(pkt->pkt_cdbp[11]) << 16) |
3437 3436 ((uint16_t)(pkt->pkt_cdbp[10]) << 24));
3438 3437
3439 3438 ldio->start_lba_lo = host_to_le32(
3440 3439 ((uint32_t)(pkt->pkt_cdbp[9])) |
3441 3440 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
3442 3441 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
3443 3442 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
3444 3443
3445 3444 ldio->start_lba_lo = host_to_le32(
3446 3445 ((uint32_t)(pkt->pkt_cdbp[5])) |
3447 3446 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
3448 3447 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
3449 3448 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
3450 3449 }
3451 3450
3452 3451 break;
3453 3452 }
3454 3453 /* fall through For all non-rd/wr cmds */
3455 3454 default:
3456 3455 pthru = (struct megasas_pthru_frame *)cmd->frame;
3457 3456
3458 3457 /* prepare the DCDB frame */
3459 3458 pthru->cmd = (acmd->islogical) ?
3460 3459 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI;
3461 3460 pthru->cmd_status = 0x0;
3462 3461 pthru->scsi_status = 0x0;
3463 3462 pthru->target_id = acmd->device_id;
3464 3463 pthru->lun = 0;
3465 3464 pthru->cdb_len = acmd->cmd_cdblen;
3466 3465 pthru->timeout = 0;
3467 3466 pthru->flags = flags;
3468 3467 pthru->data_xfer_len = acmd->cmd_dmacount;
3469 3468 pthru->sge_count = acmd->cmd_cookiecnt;
3470 3469 mfi_sgl = (struct megasas_sge64 *)&pthru->sgl;
3471 3470
3472 3471 bzero(cmd->sense, SENSE_LENGTH);
3473 3472 pthru->sense_len = SENSE_LENGTH;
3474 3473 pthru->sense_buf_phys_addr_hi = 0;
3475 3474 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
3476 3475
3477 3476 context = pthru->context;
3478 3477
3479 3478 bcopy(pkt->pkt_cdbp, pthru->cdb, acmd->cmd_cdblen);
3480 3479
3481 3480 break;
3482 3481 }
3483 3482 #ifdef lint
3484 3483 context = context;
3485 3484 #endif
3486 3485 /* bzero(mfi_sgl, sizeof (struct megasas_sge64) * MAX_SGL); */
3487 3486
3488 3487 /* prepare the scatter-gather list for the firmware */
3489 3488 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
3490 3489 mfi_sgl->phys_addr = acmd->cmd_dmacookies[i].dmac_laddress;
3491 3490 mfi_sgl->length = acmd->cmd_dmacookies[i].dmac_size;
3492 3491 }
3493 3492
3494 3493 sge_bytes = sizeof (struct megasas_sge64)*acmd->cmd_cookiecnt;
3495 3494
3496 3495 cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
3497 3496 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1;
3498 3497
3499 3498 if (cmd->frame_count >= 8) {
3500 3499 cmd->frame_count = 8;
3501 3500 }
3502 3501
3503 3502 return (cmd);
3504 3503 }
3505 3504
3506 3505 /*
3507 3506 * wait_for_outstanding - Wait for all outstanding cmds
3508 3507 * @instance: Adapter soft state
3509 3508 *
3510 3509 * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to
3511 3510 * complete all its outstanding commands. Returns error if one or more IOs
3512 3511 * are pending after this time period.
3513 3512 */
3514 3513 static int
3515 3514 wait_for_outstanding(struct megasas_instance *instance)
3516 3515 {
3517 3516 int i;
3518 3517 uint32_t wait_time = 90;
3519 3518
3520 3519 for (i = 0; i < wait_time; i++) {
3521 3520 if (!instance->fw_outstanding) {
3522 3521 break;
3523 3522 }
3524 3523
3525 3524 drv_usecwait(MILLISEC); /* wait for 1000 usecs */;
3526 3525 }
3527 3526
3528 3527 if (instance->fw_outstanding) {
3529 3528 return (1);
3530 3529 }
3531 3530
3532 3531 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VERSION);
3533 3532
3534 3533 return (0);
3535 3534 }
3536 3535
3537 3536 /*
3538 3537 * issue_mfi_pthru
3539 3538 */
3540 3539 static int
3541 3540 issue_mfi_pthru(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
3542 3541 struct megasas_cmd *cmd, int mode)
3543 3542 {
3544 3543 void *ubuf;
3545 3544 uint32_t kphys_addr = 0;
3546 3545 uint32_t xferlen = 0;
3547 3546 uint_t model;
3548 3547
3549 3548 dma_obj_t pthru_dma_obj;
3550 3549 struct megasas_pthru_frame *kpthru;
3551 3550 struct megasas_pthru_frame *pthru;
3552 3551
3553 3552 pthru = &cmd->frame->pthru;
3554 3553 kpthru = (struct megasas_pthru_frame *)&ioctl->frame[0];
3555 3554
3556 3555 model = ddi_model_convert_from(mode & FMODELS);
3557 3556 if (model == DDI_MODEL_ILP32) {
3558 3557 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32"));
3559 3558
3560 3559 xferlen = kpthru->sgl.sge32[0].length;
3561 3560
3562 3561 /* SJ! - ubuf needs to be virtual address. */
3563 3562 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
3564 3563 } else {
3565 3564 #ifdef _ILP32
3566 3565 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32"));
3567 3566 xferlen = kpthru->sgl.sge32[0].length;
3568 3567 /* SJ! - ubuf needs to be virtual address. */
3569 3568 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
3570 3569 #else
3571 3570 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP64"));
3572 3571 xferlen = kpthru->sgl.sge64[0].length;
3573 3572 /* SJ! - ubuf needs to be virtual address. */
3574 3573 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
3575 3574 #endif
3576 3575 }
3577 3576
3578 3577 if (xferlen) {
3579 3578 /* means IOCTL requires DMA */
3580 3579 /* allocate the data transfer buffer */
3581 3580 pthru_dma_obj.size = xferlen;
3582 3581 pthru_dma_obj.dma_attr = megasas_generic_dma_attr;
3583 3582 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3584 3583 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3585 3584 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
3586 3585 pthru_dma_obj.dma_attr.dma_attr_align = 1;
3587 3586
3588 3587 /* allocate kernel buffer for DMA */
3589 3588 if (mega_alloc_dma_obj(instance, &pthru_dma_obj) != 1) {
3590 3589 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
3591 3590 "could not data transfer buffer alloc."));
3592 3591 return (DDI_FAILURE);
3593 3592 }
3594 3593
3595 3594 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3596 3595 if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
3597 3596 if (ddi_copyin(ubuf, (void *)pthru_dma_obj.buffer,
3598 3597 xferlen, mode)) {
3599 3598 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
3600 3599 "copy from user space failed\n"));
3601 3600 return (1);
3602 3601 }
3603 3602 }
3604 3603
3605 3604 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
3606 3605 }
3607 3606
3608 3607 pthru->cmd = kpthru->cmd;
3609 3608 pthru->sense_len = kpthru->sense_len;
3610 3609 pthru->cmd_status = kpthru->cmd_status;
3611 3610 pthru->scsi_status = kpthru->scsi_status;
3612 3611 pthru->target_id = kpthru->target_id;
3613 3612 pthru->lun = kpthru->lun;
3614 3613 pthru->cdb_len = kpthru->cdb_len;
3615 3614 pthru->sge_count = kpthru->sge_count;
3616 3615 pthru->timeout = kpthru->timeout;
3617 3616 pthru->data_xfer_len = kpthru->data_xfer_len;
3618 3617
3619 3618 pthru->sense_buf_phys_addr_hi = 0;
3620 3619 /* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */
3621 3620 pthru->sense_buf_phys_addr_lo = 0;
3622 3621
3623 3622 bcopy((void *)kpthru->cdb, (void *)pthru->cdb, pthru->cdb_len);
3624 3623
3625 3624 pthru->flags = kpthru->flags & ~MFI_FRAME_SGL64;
3626 3625 pthru->sgl.sge32[0].length = xferlen;
3627 3626 pthru->sgl.sge32[0].phys_addr = kphys_addr;
3628 3627
3629 3628 cmd->sync_cmd = MEGASAS_TRUE;
3630 3629 cmd->frame_count = 1;
3631 3630
3632 3631 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3633 3632 con_log(CL_ANN, (CE_WARN,
3634 3633 "issue_mfi_pthru: fw_ioctl failed\n"));
3635 3634 } else {
3636 3635 if (xferlen && (kpthru->flags & MFI_FRAME_DIR_READ)) {
3637 3636
3638 3637 if (ddi_copyout(pthru_dma_obj.buffer, ubuf,
3639 3638 xferlen, mode)) {
3640 3639 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
3641 3640 "copy to user space failed\n"));
3642 3641 return (1);
3643 3642 }
3644 3643 }
3645 3644 }
3646 3645
3647 3646 kpthru->cmd_status = pthru->cmd_status;
3648 3647 kpthru->scsi_status = pthru->scsi_status;
3649 3648
3650 3649 con_log(CL_ANN, (CE_NOTE, "issue_mfi_pthru: cmd_status %x, "
3651 3650 "scsi_status %x\n", pthru->cmd_status, pthru->scsi_status));
3652 3651
3653 3652 if (xferlen) {
3654 3653 /* free kernel buffer */
3655 3654 if (mega_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
3656 3655 return (1);
3657 3656 }
3658 3657
3659 3658 return (0);
3660 3659 }
3661 3660
3662 3661 /*
3663 3662 * issue_mfi_dcmd
3664 3663 */
3665 3664 static int
3666 3665 issue_mfi_dcmd(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
3667 3666 struct megasas_cmd *cmd, int mode)
3668 3667 {
3669 3668 void *ubuf;
3670 3669 uint32_t kphys_addr = 0;
3671 3670 uint32_t xferlen = 0;
3672 3671 uint32_t model;
3673 3672 dma_obj_t dcmd_dma_obj;
3674 3673 struct megasas_dcmd_frame *kdcmd;
3675 3674 struct megasas_dcmd_frame *dcmd;
3676 3675
3677 3676 dcmd = &cmd->frame->dcmd;
3678 3677 kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0];
3679 3678
3680 3679 model = ddi_model_convert_from(mode & FMODELS);
3681 3680 if (model == DDI_MODEL_ILP32) {
3682 3681 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
3683 3682
3684 3683 xferlen = kdcmd->sgl.sge32[0].length;
3685 3684
3686 3685 /* SJ! - ubuf needs to be virtual address. */
3687 3686 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
3688 3687 }
3689 3688 else
3690 3689 {
3691 3690 #ifdef _ILP32
3692 3691 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
3693 3692 xferlen = kdcmd->sgl.sge32[0].length;
3694 3693 /* SJ! - ubuf needs to be virtual address. */
3695 3694 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
3696 3695 #else
3697 3696 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_LP64"));
3698 3697 xferlen = kdcmd->sgl.sge64[0].length;
3699 3698 /* SJ! - ubuf needs to be virtual address. */
3700 3699 ubuf = (void *)(ulong_t)dcmd->sgl.sge64[0].phys_addr;
3701 3700 #endif
3702 3701 }
3703 3702 if (xferlen) {
3704 3703 /* means IOCTL requires DMA */
3705 3704 /* allocate the data transfer buffer */
3706 3705 dcmd_dma_obj.size = xferlen;
3707 3706 dcmd_dma_obj.dma_attr = megasas_generic_dma_attr;
3708 3707 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3709 3708 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3710 3709 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3711 3710 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3712 3711
3713 3712 /* allocate kernel buffer for DMA */
3714 3713 if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) {
3715 3714 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
3716 3715 "could not data transfer buffer alloc."));
3717 3716 return (DDI_FAILURE);
3718 3717 }
3719 3718
3720 3719 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3721 3720 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
3722 3721 if (ddi_copyin(ubuf, (void *)dcmd_dma_obj.buffer,
3723 3722 xferlen, mode)) {
3724 3723 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
3725 3724 "copy from user space failed\n"));
3726 3725 return (1);
3727 3726 }
3728 3727 }
3729 3728
3730 3729 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
3731 3730 }
3732 3731
3733 3732 dcmd->cmd = kdcmd->cmd;
3734 3733 dcmd->cmd_status = kdcmd->cmd_status;
3735 3734 dcmd->sge_count = kdcmd->sge_count;
3736 3735 dcmd->timeout = kdcmd->timeout;
3737 3736 dcmd->data_xfer_len = kdcmd->data_xfer_len;
3738 3737 dcmd->opcode = kdcmd->opcode;
3739 3738
3740 3739 bcopy((void *)kdcmd->mbox.b, (void *)dcmd->mbox.b, DCMD_MBOX_SZ);
3741 3740
3742 3741 dcmd->flags = kdcmd->flags & ~MFI_FRAME_SGL64;
3743 3742 dcmd->sgl.sge32[0].length = xferlen;
3744 3743 dcmd->sgl.sge32[0].phys_addr = kphys_addr;
3745 3744
3746 3745 cmd->sync_cmd = MEGASAS_TRUE;
3747 3746 cmd->frame_count = 1;
3748 3747
3749 3748 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3750 3749 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed\n"));
3751 3750 } else {
3752 3751 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
3753 3752
3754 3753 if (ddi_copyout(dcmd_dma_obj.buffer, ubuf,
3755 3754 xferlen, mode)) {
3756 3755 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
3757 3756 "copy to user space failed\n"));
3758 3757 return (1);
3759 3758 }
3760 3759 }
3761 3760 }
3762 3761
3763 3762 kdcmd->cmd_status = dcmd->cmd_status;
3764 3763
3765 3764 if (xferlen) {
3766 3765 /* free kernel buffer */
3767 3766 if (mega_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
3768 3767 return (1);
3769 3768 }
3770 3769
3771 3770 return (0);
3772 3771 }
3773 3772
3774 3773 /*
3775 3774 * issue_mfi_smp
3776 3775 */
3777 3776 static int
3778 3777 issue_mfi_smp(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
3779 3778 struct megasas_cmd *cmd, int mode)
3780 3779 {
3781 3780 void *request_ubuf;
3782 3781 void *response_ubuf;
3783 3782 uint32_t request_xferlen = 0;
3784 3783 uint32_t response_xferlen = 0;
3785 3784 uint_t model;
3786 3785 dma_obj_t request_dma_obj;
3787 3786 dma_obj_t response_dma_obj;
3788 3787 struct megasas_smp_frame *ksmp;
3789 3788 struct megasas_smp_frame *smp;
3790 3789 struct megasas_sge32 *sge32;
3791 3790 #ifndef _ILP32
3792 3791 struct megasas_sge64 *sge64;
3793 3792 #endif
3794 3793
3795 3794 smp = &cmd->frame->smp;
3796 3795 ksmp = (struct megasas_smp_frame *)&ioctl->frame[0];
3797 3796
3798 3797 model = ddi_model_convert_from(mode & FMODELS);
3799 3798 if (model == DDI_MODEL_ILP32) {
3800 3799 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32"));
3801 3800
3802 3801 sge32 = &ksmp->sgl[0].sge32[0];
3803 3802 response_xferlen = sge32[0].length;
3804 3803 request_xferlen = sge32[1].length;
3805 3804 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: "
3806 3805 "response_xferlen = %x, request_xferlen = %x",
3807 3806 response_xferlen, request_xferlen));
3808 3807
3809 3808 /* SJ! - ubuf needs to be virtual address. */
3810 3809
3811 3810 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
3812 3811 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
3813 3812 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
3814 3813 "response_ubuf = %p, request_ubuf = %p",
3815 3814 response_ubuf, request_ubuf));
3816 3815 } else {
3817 3816 #ifdef _ILP32
3818 3817 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32"));
3819 3818
3820 3819 sge32 = &ksmp->sgl[0].sge32[0];
3821 3820 response_xferlen = sge32[0].length;
3822 3821 request_xferlen = sge32[1].length;
3823 3822 con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: "
3824 3823 "response_xferlen = %x, request_xferlen = %x",
3825 3824 response_xferlen, request_xferlen));
3826 3825
3827 3826 /* SJ! - ubuf needs to be virtual address. */
3828 3827
3829 3828 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
3830 3829 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
3831 3830 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
3832 3831 "response_ubuf = %p, request_ubuf = %p",
3833 3832 response_ubuf, request_ubuf));
3834 3833 #else
3835 3834 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_LP64"));
3836 3835
3837 3836 sge64 = &ksmp->sgl[0].sge64[0];
3838 3837 response_xferlen = sge64[0].length;
3839 3838 request_xferlen = sge64[1].length;
3840 3839
3841 3840 /* SJ! - ubuf needs to be virtual address. */
3842 3841 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr;
3843 3842 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr;
3844 3843 #endif
3845 3844 }
3846 3845 if (request_xferlen) {
3847 3846 /* means IOCTL requires DMA */
3848 3847 /* allocate the data transfer buffer */
3849 3848 request_dma_obj.size = request_xferlen;
3850 3849 request_dma_obj.dma_attr = megasas_generic_dma_attr;
3851 3850 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3852 3851 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3853 3852 request_dma_obj.dma_attr.dma_attr_sgllen = 1;
3854 3853 request_dma_obj.dma_attr.dma_attr_align = 1;
3855 3854
3856 3855 /* allocate kernel buffer for DMA */
3857 3856 if (mega_alloc_dma_obj(instance, &request_dma_obj) != 1) {
3858 3857 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3859 3858 "could not data transfer buffer alloc."));
3860 3859 return (DDI_FAILURE);
3861 3860 }
3862 3861
3863 3862 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3864 3863 if (ddi_copyin(request_ubuf, (void *) request_dma_obj.buffer,
3865 3864 request_xferlen, mode)) {
3866 3865 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3867 3866 "copy from user space failed\n"));
3868 3867 return (1);
3869 3868 }
3870 3869 }
3871 3870
3872 3871 if (response_xferlen) {
3873 3872 /* means IOCTL requires DMA */
3874 3873 /* allocate the data transfer buffer */
3875 3874 response_dma_obj.size = response_xferlen;
3876 3875 response_dma_obj.dma_attr = megasas_generic_dma_attr;
3877 3876 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3878 3877 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3879 3878 response_dma_obj.dma_attr.dma_attr_sgllen = 1;
3880 3879 response_dma_obj.dma_attr.dma_attr_align = 1;
3881 3880
3882 3881 /* allocate kernel buffer for DMA */
3883 3882 if (mega_alloc_dma_obj(instance, &response_dma_obj) != 1) {
3884 3883 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3885 3884 "could not data transfer buffer alloc."));
3886 3885 return (DDI_FAILURE);
3887 3886 }
3888 3887
3889 3888 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3890 3889 if (ddi_copyin(response_ubuf, (void *) response_dma_obj.buffer,
3891 3890 response_xferlen, mode)) {
3892 3891 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3893 3892 "copy from user space failed\n"));
3894 3893 return (1);
3895 3894 }
3896 3895 }
3897 3896
3898 3897 smp->cmd = ksmp->cmd;
3899 3898 smp->cmd_status = ksmp->cmd_status;
3900 3899 smp->connection_status = ksmp->connection_status;
3901 3900 smp->sge_count = ksmp->sge_count;
3902 3901 /* smp->context = ksmp->context; */
3903 3902 smp->timeout = ksmp->timeout;
3904 3903 smp->data_xfer_len = ksmp->data_xfer_len;
3905 3904
3906 3905 bcopy((void *)&ksmp->sas_addr, (void *)&smp->sas_addr,
3907 3906 sizeof (uint64_t));
3908 3907
3909 3908 smp->flags = ksmp->flags & ~MFI_FRAME_SGL64;
3910 3909
3911 3910 model = ddi_model_convert_from(mode & FMODELS);
3912 3911 if (model == DDI_MODEL_ILP32) {
3913 3912 con_log(CL_ANN1, (CE_NOTE,
3914 3913 "handle_drv_ioctl: DDI_MODEL_ILP32"));
3915 3914
3916 3915 sge32 = &smp->sgl[0].sge32[0];
3917 3916 sge32[0].length = response_xferlen;
3918 3917 sge32[0].phys_addr =
3919 3918 response_dma_obj.dma_cookie[0].dmac_address;
3920 3919 sge32[1].length = request_xferlen;
3921 3920 sge32[1].phys_addr =
3922 3921 request_dma_obj.dma_cookie[0].dmac_address;
3923 3922 } else {
3924 3923 #ifdef _ILP32
3925 3924 con_log(CL_ANN1, (CE_NOTE,
3926 3925 "handle_drv_ioctl: DDI_MODEL_ILP32"));
3927 3926 sge32 = &smp->sgl[0].sge32[0];
3928 3927 sge32[0].length = response_xferlen;
3929 3928 sge32[0].phys_addr =
3930 3929 response_dma_obj.dma_cookie[0].dmac_address;
3931 3930 sge32[1].length = request_xferlen;
3932 3931 sge32[1].phys_addr =
3933 3932 request_dma_obj.dma_cookie[0].dmac_address;
3934 3933 #else
3935 3934 con_log(CL_ANN1, (CE_NOTE,
3936 3935 "issue_mfi_smp: DDI_MODEL_LP64"));
3937 3936 sge64 = &smp->sgl[0].sge64[0];
3938 3937 sge64[0].length = response_xferlen;
3939 3938 sge64[0].phys_addr =
3940 3939 response_dma_obj.dma_cookie[0].dmac_address;
3941 3940 sge64[1].length = request_xferlen;
3942 3941 sge64[1].phys_addr =
3943 3942 request_dma_obj.dma_cookie[0].dmac_address;
3944 3943 #endif
3945 3944 }
3946 3945 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
3947 3946 "smp->response_xferlen = %d, smp->request_xferlen = %d "
3948 3947 "smp->data_xfer_len = %d", sge32[0].length, sge32[1].length,
3949 3948 smp->data_xfer_len));
3950 3949
3951 3950 cmd->sync_cmd = MEGASAS_TRUE;
3952 3951 cmd->frame_count = 1;
3953 3952
3954 3953 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3955 3954 con_log(CL_ANN, (CE_WARN,
3956 3955 "issue_mfi_smp: fw_ioctl failed\n"));
3957 3956 } else {
3958 3957 con_log(CL_ANN1, (CE_NOTE,
3959 3958 "issue_mfi_smp: copy to user space\n"));
3960 3959
3961 3960 if (request_xferlen) {
3962 3961 if (ddi_copyout(request_dma_obj.buffer, request_ubuf,
3963 3962 request_xferlen, mode)) {
3964 3963 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3965 3964 "copy to user space failed\n"));
3966 3965 return (1);
3967 3966 }
3968 3967 }
3969 3968
3970 3969 if (response_xferlen) {
3971 3970 if (ddi_copyout(response_dma_obj.buffer, response_ubuf,
3972 3971 response_xferlen, mode)) {
3973 3972 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3974 3973 "copy to user space failed\n"));
3975 3974 return (1);
3976 3975 }
3977 3976 }
3978 3977 }
3979 3978
3980 3979 ksmp->cmd_status = smp->cmd_status;
3981 3980 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
3982 3981 smp->cmd_status));
3983 3982
3984 3983
3985 3984 if (request_xferlen) {
3986 3985 /* free kernel buffer */
3987 3986 if (mega_free_dma_obj(instance, request_dma_obj) != DDI_SUCCESS)
3988 3987 return (1);
3989 3988 }
3990 3989
3991 3990 if (response_xferlen) {
3992 3991 /* free kernel buffer */
3993 3992 if (mega_free_dma_obj(instance, response_dma_obj) !=
3994 3993 DDI_SUCCESS)
3995 3994 return (1);
3996 3995 }
3997 3996
3998 3997 return (0);
3999 3998 }
4000 3999
4001 4000 /*
4002 4001 * issue_mfi_stp
4003 4002 */
4004 4003 static int
4005 4004 issue_mfi_stp(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
4006 4005 struct megasas_cmd *cmd, int mode)
4007 4006 {
4008 4007 void *fis_ubuf;
4009 4008 void *data_ubuf;
4010 4009 uint32_t fis_xferlen = 0;
4011 4010 uint32_t data_xferlen = 0;
4012 4011 uint_t model;
4013 4012 dma_obj_t fis_dma_obj;
4014 4013 dma_obj_t data_dma_obj;
4015 4014 struct megasas_stp_frame *kstp;
4016 4015 struct megasas_stp_frame *stp;
4017 4016
4018 4017 stp = &cmd->frame->stp;
4019 4018 kstp = (struct megasas_stp_frame *)&ioctl->frame[0];
4020 4019
4021 4020 model = ddi_model_convert_from(mode & FMODELS);
4022 4021 if (model == DDI_MODEL_ILP32) {
4023 4022 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32"));
4024 4023
4025 4024 fis_xferlen = kstp->sgl.sge32[0].length;
4026 4025 data_xferlen = kstp->sgl.sge32[1].length;
4027 4026
4028 4027 /* SJ! - ubuf needs to be virtual address. */
4029 4028 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
4030 4029 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
4031 4030 }
4032 4031 else
4033 4032 {
4034 4033 #ifdef _ILP32
4035 4034 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32"));
4036 4035
4037 4036 fis_xferlen = kstp->sgl.sge32[0].length;
4038 4037 data_xferlen = kstp->sgl.sge32[1].length;
4039 4038
4040 4039 /* SJ! - ubuf needs to be virtual address. */
4041 4040 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
4042 4041 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
4043 4042 #else
4044 4043 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_LP64"));
4045 4044
4046 4045 fis_xferlen = kstp->sgl.sge64[0].length;
4047 4046 data_xferlen = kstp->sgl.sge64[1].length;
4048 4047
4049 4048 /* SJ! - ubuf needs to be virtual address. */
4050 4049 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
4051 4050 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
4052 4051 #endif
4053 4052 }
4054 4053
4055 4054
4056 4055 if (fis_xferlen) {
4057 4056 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: "
4058 4057 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
4059 4058
4060 4059 /* means IOCTL requires DMA */
4061 4060 /* allocate the data transfer buffer */
4062 4061 fis_dma_obj.size = fis_xferlen;
4063 4062 fis_dma_obj.dma_attr = megasas_generic_dma_attr;
4064 4063 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4065 4064 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4066 4065 fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
4067 4066 fis_dma_obj.dma_attr.dma_attr_align = 1;
4068 4067
4069 4068 /* allocate kernel buffer for DMA */
4070 4069 if (mega_alloc_dma_obj(instance, &fis_dma_obj) != 1) {
4071 4070 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4072 4071 "could not data transfer buffer alloc."));
4073 4072 return (DDI_FAILURE);
4074 4073 }
4075 4074
4076 4075 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
4077 4076 if (ddi_copyin(fis_ubuf, (void *)fis_dma_obj.buffer,
4078 4077 fis_xferlen, mode)) {
4079 4078 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4080 4079 "copy from user space failed\n"));
4081 4080 return (1);
4082 4081 }
4083 4082 }
4084 4083
4085 4084 if (data_xferlen) {
4086 4085 con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: data_ubuf = %p "
4087 4086 "data_xferlen = %x", data_ubuf, data_xferlen));
4088 4087
4089 4088 /* means IOCTL requires DMA */
4090 4089 /* allocate the data transfer buffer */
4091 4090 data_dma_obj.size = data_xferlen;
4092 4091 data_dma_obj.dma_attr = megasas_generic_dma_attr;
4093 4092 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4094 4093 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4095 4094 data_dma_obj.dma_attr.dma_attr_sgllen = 1;
4096 4095 data_dma_obj.dma_attr.dma_attr_align = 1;
4097 4096
4098 4097 /* allocate kernel buffer for DMA */
4099 4098 if (mega_alloc_dma_obj(instance, &data_dma_obj) != 1) {
4100 4099 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4101 4100 "could not data transfer buffer alloc."));
4102 4101 return (DDI_FAILURE);
4103 4102 }
4104 4103
4105 4104 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
4106 4105 if (ddi_copyin(data_ubuf, (void *) data_dma_obj.buffer,
4107 4106 data_xferlen, mode)) {
4108 4107 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4109 4108 "copy from user space failed\n"));
4110 4109 return (1);
4111 4110 }
4112 4111 }
4113 4112
4114 4113 stp->cmd = kstp->cmd;
4115 4114 stp->cmd_status = kstp->cmd_status;
4116 4115 stp->connection_status = kstp->connection_status;
4117 4116 stp->target_id = kstp->target_id;
4118 4117 stp->sge_count = kstp->sge_count;
4119 4118 /* stp->context = kstp->context; */
4120 4119 stp->timeout = kstp->timeout;
4121 4120 stp->data_xfer_len = kstp->data_xfer_len;
4122 4121
4123 4122 bcopy((void *)kstp->fis, (void *)stp->fis, 10);
4124 4123
4125 4124 stp->flags = kstp->flags & ~MFI_FRAME_SGL64;
4126 4125 stp->stp_flags = kstp->stp_flags;
4127 4126 stp->sgl.sge32[0].length = fis_xferlen;
4128 4127 stp->sgl.sge32[0].phys_addr = fis_dma_obj.dma_cookie[0].dmac_address;
4129 4128 stp->sgl.sge32[1].length = data_xferlen;
4130 4129 stp->sgl.sge32[1].phys_addr = data_dma_obj.dma_cookie[0].dmac_address;
4131 4130
4132 4131 cmd->sync_cmd = MEGASAS_TRUE;
4133 4132 cmd->frame_count = 1;
4134 4133
4135 4134 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
4136 4135 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed\n"));
4137 4136 } else {
4138 4137
4139 4138 if (fis_xferlen) {
4140 4139 if (ddi_copyout(fis_dma_obj.buffer, fis_ubuf,
4141 4140 fis_xferlen, mode)) {
4142 4141 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4143 4142 "copy to user space failed\n"));
4144 4143 return (1);
4145 4144 }
4146 4145 }
4147 4146
4148 4147 if (data_xferlen) {
4149 4148 if (ddi_copyout(data_dma_obj.buffer, data_ubuf,
4150 4149 data_xferlen, mode)) {
4151 4150 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4152 4151 "copy to user space failed\n"));
4153 4152 return (1);
4154 4153 }
4155 4154 }
4156 4155 }
4157 4156
4158 4157 kstp->cmd_status = stp->cmd_status;
4159 4158
4160 4159 if (fis_xferlen) {
4161 4160 /* free kernel buffer */
4162 4161 if (mega_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
4163 4162 return (1);
4164 4163 }
4165 4164
4166 4165 if (data_xferlen) {
4167 4166 /* free kernel buffer */
4168 4167 if (mega_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
4169 4168 return (1);
4170 4169 }
4171 4170
4172 4171 return (0);
4173 4172 }
4174 4173
4175 4174 /*
4176 4175 * fill_up_drv_ver
4177 4176 */
4178 4177 static void
4179 4178 fill_up_drv_ver(struct megasas_drv_ver *dv)
4180 4179 {
4181 4180 (void) memset(dv, 0, sizeof (struct megasas_drv_ver));
4182 4181
4183 4182 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
4184 4183 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
4185 4184 (void) memcpy(dv->drv_name, "megaraid_sas", strlen("megaraid_sas"));
4186 4185 (void) memcpy(dv->drv_ver, MEGASAS_VERSION, strlen(MEGASAS_VERSION));
4187 4186 (void) memcpy(dv->drv_rel_date, MEGASAS_RELDATE,
4188 4187 strlen(MEGASAS_RELDATE));
4189 4188 }
4190 4189
4191 4190 /*
4192 4191 * handle_drv_ioctl
4193 4192 */
4194 4193 static int
4195 4194 handle_drv_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
4196 4195 int mode)
4197 4196 {
4198 4197 int i;
4199 4198 int rval = 0;
4200 4199 int *props = NULL;
4201 4200 void *ubuf;
4202 4201
4203 4202 uint8_t *pci_conf_buf;
4204 4203 uint32_t xferlen;
4205 4204 uint32_t num_props;
4206 4205 uint_t model;
4207 4206 struct megasas_dcmd_frame *kdcmd;
4208 4207 struct megasas_drv_ver dv;
4209 4208 struct megasas_pci_information pi;
4210 4209
4211 4210 kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0];
4212 4211
4213 4212 model = ddi_model_convert_from(mode & FMODELS);
4214 4213 if (model == DDI_MODEL_ILP32) {
4215 4214 con_log(CL_ANN1, (CE_NOTE,
4216 4215 "handle_drv_ioctl: DDI_MODEL_ILP32"));
4217 4216
4218 4217 xferlen = kdcmd->sgl.sge32[0].length;
4219 4218
4220 4219 /* SJ! - ubuf needs to be virtual address. */
4221 4220 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
4222 4221 } else {
4223 4222 #ifdef _ILP32
4224 4223 con_log(CL_ANN1, (CE_NOTE,
4225 4224 "handle_drv_ioctl: DDI_MODEL_ILP32"));
4226 4225 xferlen = kdcmd->sgl.sge32[0].length;
4227 4226 /* SJ! - ubuf needs to be virtual address. */
4228 4227 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
4229 4228 #else
4230 4229 con_log(CL_ANN1, (CE_NOTE,
4231 4230 "handle_drv_ioctl: DDI_MODEL_LP64"));
4232 4231 xferlen = kdcmd->sgl.sge64[0].length;
4233 4232 /* SJ! - ubuf needs to be virtual address. */
4234 4233 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
4235 4234 #endif
4236 4235 }
4237 4236 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
4238 4237 "dataBuf=%p size=%d bytes", ubuf, xferlen));
4239 4238
4240 4239 switch (kdcmd->opcode) {
4241 4240 case MR_DRIVER_IOCTL_DRIVER_VERSION:
4242 4241 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
4243 4242 "MR_DRIVER_IOCTL_DRIVER_VERSION"));
4244 4243
4245 4244 fill_up_drv_ver(&dv);
4246 4245
4247 4246 if (ddi_copyout(&dv, ubuf, xferlen, mode)) {
4248 4247 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4249 4248 "MR_DRIVER_IOCTL_DRIVER_VERSION : "
4250 4249 "copy to user space failed\n"));
4251 4250 kdcmd->cmd_status = 1;
4252 4251 rval = 1;
4253 4252 } else {
4254 4253 kdcmd->cmd_status = 0;
4255 4254 }
4256 4255 break;
4257 4256 case MR_DRIVER_IOCTL_PCI_INFORMATION:
4258 4257 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
4259 4258 "MR_DRIVER_IOCTL_PCI_INFORMAITON"));
4260 4259
4261 4260 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
4262 4261 0, "reg", &props, &num_props)) {
4263 4262 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4264 4263 "MR_DRIVER_IOCTL_PCI_INFORMATION : "
4265 4264 "ddi_prop_look_int_array failed\n"));
4266 4265 rval = 1;
4267 4266 } else {
4268 4267
4269 4268 pi.busNumber = (props[0] >> 16) & 0xFF;
4270 4269 pi.deviceNumber = (props[0] >> 11) & 0x1f;
4271 4270 pi.functionNumber = (props[0] >> 8) & 0x7;
4272 4271 ddi_prop_free((void *)props);
4273 4272 }
4274 4273
4275 4274 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
4276 4275
4277 4276 for (i = 0; i < (sizeof (struct megasas_pci_information) -
4278 4277 offsetof(struct megasas_pci_information, pciHeaderInfo));
4279 4278 i++) {
4280 4279 pci_conf_buf[i] =
4281 4280 pci_config_get8(instance->pci_handle, i);
4282 4281 }
4283 4282
4284 4283 if (ddi_copyout(&pi, ubuf, xferlen, mode)) {
4285 4284 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4286 4285 "MR_DRIVER_IOCTL_PCI_INFORMATION : "
4287 4286 "copy to user space failed\n"));
4288 4287 kdcmd->cmd_status = 1;
4289 4288 rval = 1;
4290 4289 } else {
4291 4290 kdcmd->cmd_status = 0;
4292 4291 }
4293 4292 break;
4294 4293 default:
4295 4294 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4296 4295 "invalid driver specific IOCTL opcode = 0x%x",
4297 4296 kdcmd->opcode));
4298 4297 kdcmd->cmd_status = 1;
4299 4298 rval = 1;
4300 4299 break;
4301 4300 }
4302 4301
4303 4302 return (rval);
4304 4303 }
4305 4304
4306 4305 /*
4307 4306 * handle_mfi_ioctl
4308 4307 */
4309 4308 static int
4310 4309 handle_mfi_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
4311 4310 int mode)
4312 4311 {
4313 4312 int rval = 0;
4314 4313
4315 4314 struct megasas_header *hdr;
4316 4315 struct megasas_cmd *cmd;
4317 4316
4318 4317 cmd = get_mfi_pkt(instance);
4319 4318
4320 4319 if (!cmd) {
4321 4320 con_log(CL_ANN, (CE_WARN, "megasas: "
4322 4321 "failed to get a cmd packet\n"));
4323 4322 return (1);
4324 4323 }
4325 4324
4326 4325 hdr = (struct megasas_header *)&ioctl->frame[0];
4327 4326
4328 4327 switch (hdr->cmd) {
4329 4328 case MFI_CMD_OP_DCMD:
4330 4329 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
4331 4330 break;
4332 4331 case MFI_CMD_OP_SMP:
4333 4332 rval = issue_mfi_smp(instance, ioctl, cmd, mode);
4334 4333 break;
4335 4334 case MFI_CMD_OP_STP:
4336 4335 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
4337 4336 break;
4338 4337 case MFI_CMD_OP_LD_SCSI:
4339 4338 case MFI_CMD_OP_PD_SCSI:
4340 4339 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
4341 4340 break;
4342 4341 default:
4343 4342 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
4344 4343 "invalid mfi ioctl hdr->cmd = %d\n", hdr->cmd));
4345 4344 rval = 1;
4346 4345 break;
4347 4346 }
4348 4347
4349 4348
4350 4349 return_mfi_pkt(instance, cmd);
4351 4350 if (megasas_common_check(instance, cmd) != DDI_SUCCESS)
4352 4351 rval = 1;
4353 4352 return (rval);
4354 4353 }
4355 4354
4356 4355 /*
4357 4356 * AEN
4358 4357 */
4359 4358 static int
4360 4359 handle_mfi_aen(struct megasas_instance *instance, struct megasas_aen *aen)
4361 4360 {
4362 4361 int rval = 0;
4363 4362
4364 4363 rval = register_mfi_aen(instance, instance->aen_seq_num,
4365 4364 aen->class_locale_word);
4366 4365
4367 4366 aen->cmd_status = (uint8_t)rval;
4368 4367
4369 4368 return (rval);
4370 4369 }
4371 4370
4372 4371 static int
4373 4372 register_mfi_aen(struct megasas_instance *instance, uint32_t seq_num,
4374 4373 uint32_t class_locale_word)
4375 4374 {
4376 4375 int ret_val;
4377 4376
4378 4377 struct megasas_cmd *cmd;
4379 4378 struct megasas_dcmd_frame *dcmd;
4380 4379 union megasas_evt_class_locale curr_aen;
4381 4380 union megasas_evt_class_locale prev_aen;
4382 4381
4383 4382 /*
4384 4383 * If there an AEN pending already (aen_cmd), check if the
4385 4384 * class_locale of that pending AEN is inclusive of the new
4386 4385 * AEN request we currently have. If it is, then we don't have
4387 4386 * to do anything. In other words, whichever events the current
4388 4387 * AEN request is subscribing to, have already been subscribed
4389 4388 * to.
4390 4389 *
4391 4390 * If the old_cmd is _not_ inclusive, then we have to abort
4392 4391 * that command, form a class_locale that is superset of both
4393 4392 * old and current and re-issue to the FW
4394 4393 */
4395 4394
4396 4395 curr_aen.word = class_locale_word;
4397 4396
4398 4397 if (instance->aen_cmd) {
4399 4398 prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
4400 4399
4401 4400 /*
4402 4401 * A class whose enum value is smaller is inclusive of all
4403 4402 * higher values. If a PROGRESS (= -1) was previously
4404 4403 * registered, then a new registration requests for higher
4405 4404 * classes need not be sent to FW. They are automatically
4406 4405 * included.
4407 4406 *
4408 4407 * Locale numbers don't have such hierarchy. They are bitmap
4409 4408 * values
4410 4409 */
4411 4410 if ((prev_aen.members.class <= curr_aen.members.class) &&
4412 4411 !((prev_aen.members.locale & curr_aen.members.locale) ^
4413 4412 curr_aen.members.locale)) {
4414 4413 /*
4415 4414 * Previously issued event registration includes
4416 4415 * current request. Nothing to do.
4417 4416 */
4418 4417
4419 4418 return (0);
4420 4419 } else {
4421 4420 curr_aen.members.locale |= prev_aen.members.locale;
4422 4421
4423 4422 if (prev_aen.members.class < curr_aen.members.class)
4424 4423 curr_aen.members.class = prev_aen.members.class;
4425 4424
4426 4425 ret_val = abort_aen_cmd(instance, instance->aen_cmd);
4427 4426
4428 4427 if (ret_val) {
4429 4428 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
4430 4429 "failed to abort prevous AEN command\n"));
4431 4430
4432 4431 return (ret_val);
4433 4432 }
4434 4433 }
4435 4434 } else {
4436 4435 curr_aen.word = class_locale_word;
4437 4436 }
4438 4437
4439 4438 cmd = get_mfi_pkt(instance);
4440 4439
4441 4440 if (!cmd)
4442 4441 return (-ENOMEM);
4443 4442
4444 4443 dcmd = &cmd->frame->dcmd;
4445 4444
4446 4445 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
4447 4446 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4448 4447
4449 4448 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
4450 4449 sizeof (struct megasas_evt_detail));
4451 4450
4452 4451 /* Prepare DCMD for aen registration */
4453 4452 dcmd->cmd = MFI_CMD_OP_DCMD;
4454 4453 dcmd->cmd_status = 0x0;
4455 4454 dcmd->sge_count = 1;
4456 4455 dcmd->flags = MFI_FRAME_DIR_READ;
4457 4456 dcmd->timeout = 0;
4458 4457 dcmd->data_xfer_len = sizeof (struct megasas_evt_detail);
4459 4458 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
4460 4459 dcmd->mbox.w[0] = seq_num;
4461 4460 dcmd->mbox.w[1] = curr_aen.word;
4462 4461 dcmd->sgl.sge32[0].phys_addr =
4463 4462 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address;
4464 4463 dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_detail);
4465 4464
4466 4465 instance->aen_seq_num = seq_num;
4467 4466
4468 4467 /*
4469 4468 * Store reference to the cmd used to register for AEN. When an
4470 4469 * application wants us to register for AEN, we have to abort this
4471 4470 * cmd and re-register with a new EVENT LOCALE supplied by that app
4472 4471 */
4473 4472 instance->aen_cmd = cmd;
4474 4473
4475 4474 cmd->frame_count = 1;
4476 4475
4477 4476 /* Issue the aen registration frame */
4478 4477 /* atomic_add_16 (&instance->fw_outstanding, 1); */
4479 4478 instance->func_ptr->issue_cmd(cmd, instance);
4480 4479
4481 4480 return (0);
4482 4481 }
4483 4482
4484 4483 static void
4485 4484 display_scsi_inquiry(caddr_t scsi_inq)
4486 4485 {
4487 4486 #define MAX_SCSI_DEVICE_CODE 14
4488 4487 int i;
4489 4488 char inquiry_buf[256] = {0};
4490 4489 int len;
4491 4490 const char *const scsi_device_types[] = {
4492 4491 "Direct-Access ",
4493 4492 "Sequential-Access",
4494 4493 "Printer ",
4495 4494 "Processor ",
4496 4495 "WORM ",
4497 4496 "CD-ROM ",
4498 4497 "Scanner ",
4499 4498 "Optical Device ",
4500 4499 "Medium Changer ",
4501 4500 "Communications ",
4502 4501 "Unknown ",
4503 4502 "Unknown ",
4504 4503 "Unknown ",
4505 4504 "Enclosure ",
4506 4505 };
4507 4506
4508 4507 len = 0;
4509 4508
4510 4509 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: ");
4511 4510 for (i = 8; i < 16; i++) {
4512 4511 len += snprintf(inquiry_buf + len, 265 - len, "%c",
4513 4512 scsi_inq[i]);
4514 4513 }
4515 4514
4516 4515 len += snprintf(inquiry_buf + len, 265 - len, " Model: ");
4517 4516
4518 4517 for (i = 16; i < 32; i++) {
4519 4518 len += snprintf(inquiry_buf + len, 265 - len, "%c",
4520 4519 scsi_inq[i]);
4521 4520 }
4522 4521
4523 4522 len += snprintf(inquiry_buf + len, 265 - len, " Rev: ");
4524 4523
4525 4524 for (i = 32; i < 36; i++) {
4526 4525 len += snprintf(inquiry_buf + len, 265 - len, "%c",
4527 4526 scsi_inq[i]);
4528 4527 }
4529 4528
4530 4529 len += snprintf(inquiry_buf + len, 265 - len, "\n");
4531 4530
4532 4531
4533 4532 i = scsi_inq[0] & 0x1f;
4534 4533
4535 4534
4536 4535 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ",
4537 4536 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
4538 4537 "Unknown ");
4539 4538
4540 4539
4541 4540 len += snprintf(inquiry_buf + len, 265 - len,
4542 4541 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
4543 4542
4544 4543 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
4545 4544 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
4546 4545 } else {
4547 4546 len += snprintf(inquiry_buf + len, 265 - len, "\n");
4548 4547 }
4549 4548
4550 4549 con_log(CL_ANN1, (CE_CONT, inquiry_buf));
4551 4550 }
4552 4551
4553 4552 static int
4554 4553 read_fw_status_reg_xscale(struct megasas_instance *instance)
4555 4554 {
4556 4555 return ((int)RD_OB_MSG_0(instance));
4557 4556 }
4558 4557
4559 4558 static int
4560 4559 read_fw_status_reg_ppc(struct megasas_instance *instance)
4561 4560 {
4562 4561 return ((int)RD_OB_SCRATCH_PAD_0(instance));
4563 4562 }
4564 4563
4565 4564 static void
4566 4565 issue_cmd_xscale(struct megasas_cmd *cmd, struct megasas_instance *instance)
4567 4566 {
4568 4567 atomic_inc_16(&instance->fw_outstanding);
4569 4568
4570 4569 /* Issue the command to the FW */
4571 4570 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) |
4572 4571 (cmd->frame_count - 1), instance);
4573 4572 }
4574 4573
4575 4574 static void
4576 4575 issue_cmd_ppc(struct megasas_cmd *cmd, struct megasas_instance *instance)
4577 4576 {
4578 4577 atomic_inc_16(&instance->fw_outstanding);
4579 4578
4580 4579 /* Issue the command to the FW */
4581 4580 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) |
4582 4581 (((cmd->frame_count - 1) << 1) | 1), instance);
4583 4582 }
4584 4583
4585 4584 /*
4586 4585 * issue_cmd_in_sync_mode
4587 4586 */
4588 4587 static int
4589 4588 issue_cmd_in_sync_mode_xscale(struct megasas_instance *instance,
4590 4589 struct megasas_cmd *cmd)
4591 4590 {
4592 4591 int i;
4593 4592 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
4594 4593
4595 4594 cmd->cmd_status = ENODATA;
4596 4595
4597 4596 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) |
4598 4597 (cmd->frame_count - 1), instance);
4599 4598
4600 4599 mutex_enter(&instance->int_cmd_mtx);
4601 4600
4602 4601 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
4603 4602 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
4604 4603 }
4605 4604
4606 4605 mutex_exit(&instance->int_cmd_mtx);
4607 4606
4608 4607 if (i < (msecs -1)) {
4609 4608 return (0);
4610 4609 } else {
4611 4610 return (1);
4612 4611 }
4613 4612 }
4614 4613
4615 4614 static int
4616 4615 issue_cmd_in_sync_mode_ppc(struct megasas_instance *instance,
4617 4616 struct megasas_cmd *cmd)
4618 4617 {
4619 4618 int i;
4620 4619 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
4621 4620
4622 4621 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called\n"));
4623 4622
4624 4623 cmd->cmd_status = ENODATA;
4625 4624
4626 4625 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) |
4627 4626 (((cmd->frame_count - 1) << 1) | 1), instance);
4628 4627
4629 4628 mutex_enter(&instance->int_cmd_mtx);
4630 4629
4631 4630 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
4632 4631 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
4633 4632 }
4634 4633
4635 4634 mutex_exit(&instance->int_cmd_mtx);
4636 4635
4637 4636 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done\n"));
4638 4637
4639 4638 if (i < (msecs -1)) {
4640 4639 return (0);
4641 4640 } else {
4642 4641 return (1);
4643 4642 }
4644 4643 }
4645 4644
4646 4645 /*
4647 4646 * issue_cmd_in_poll_mode
4648 4647 */
4649 4648 static int
4650 4649 issue_cmd_in_poll_mode_xscale(struct megasas_instance *instance,
4651 4650 struct megasas_cmd *cmd)
4652 4651 {
4653 4652 int i;
4654 4653 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
4655 4654 struct megasas_header *frame_hdr;
4656 4655
4657 4656 frame_hdr = (struct megasas_header *)cmd->frame;
4658 4657 frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
4659 4658 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
4660 4659
4661 4660 /* issue the frame using inbound queue port */
4662 4661 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) |
4663 4662 (cmd->frame_count - 1), instance);
4664 4663
4665 4664 /* wait for cmd_status to change from 0xFF */
4666 4665 for (i = 0; i < msecs && (frame_hdr->cmd_status ==
4667 4666 MFI_CMD_STATUS_POLL_MODE); i++) {
4668 4667 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
4669 4668 }
4670 4669
4671 4670 if (frame_hdr->cmd_status == MFI_CMD_STATUS_POLL_MODE) {
4672 4671 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
4673 4672 "cmd polling timed out"));
4674 4673 return (DDI_FAILURE);
4675 4674 }
4676 4675
4677 4676 return (DDI_SUCCESS);
4678 4677 }
4679 4678
4680 4679 static int
4681 4680 issue_cmd_in_poll_mode_ppc(struct megasas_instance *instance,
4682 4681 struct megasas_cmd *cmd)
4683 4682 {
4684 4683 int i;
4685 4684 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
4686 4685 struct megasas_header *frame_hdr;
4687 4686
4688 4687 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called\n"));
4689 4688
4690 4689 frame_hdr = (struct megasas_header *)cmd->frame;
4691 4690 frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
4692 4691 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
4693 4692
4694 4693 /* issue the frame using inbound queue port */
4695 4694 WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) |
4696 4695 (((cmd->frame_count - 1) << 1) | 1), instance);
4697 4696
4698 4697 /* wait for cmd_status to change from 0xFF */
4699 4698 for (i = 0; i < msecs && (frame_hdr->cmd_status ==
4700 4699 MFI_CMD_STATUS_POLL_MODE); i++) {
4701 4700 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
4702 4701 }
4703 4702
4704 4703 if (frame_hdr->cmd_status == MFI_CMD_STATUS_POLL_MODE) {
4705 4704 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
4706 4705 "cmd polling timed out"));
4707 4706 return (DDI_FAILURE);
4708 4707 }
4709 4708
4710 4709 return (DDI_SUCCESS);
4711 4710 }
4712 4711
4713 4712 static void
4714 4713 enable_intr_xscale(struct megasas_instance *instance)
4715 4714 {
4716 4715 MFI_ENABLE_INTR(instance);
4717 4716 }
4718 4717
4719 4718 static void
4720 4719 enable_intr_ppc(struct megasas_instance *instance)
4721 4720 {
4722 4721 uint32_t mask;
4723 4722
4724 4723 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called\n"));
4725 4724
4726 4725 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
4727 4726 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
4728 4727
4729 4728 /*
4730 4729 * As 1078DE is same as 1078 chip, the interrupt mask
4731 4730 * remains the same.
4732 4731 */
4733 4732 /* WR_OB_INTR_MASK(~0x80000000, instance); */
4734 4733 WR_OB_INTR_MASK(~(MFI_REPLY_1078_MESSAGE_INTR), instance);
4735 4734
4736 4735 /* dummy read to force PCI flush */
4737 4736 mask = RD_OB_INTR_MASK(instance);
4738 4737
4739 4738 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
4740 4739 "outbound_intr_mask = 0x%x\n", mask));
4741 4740 }
4742 4741
4743 4742 static void
4744 4743 disable_intr_xscale(struct megasas_instance *instance)
4745 4744 {
4746 4745 MFI_DISABLE_INTR(instance);
4747 4746 }
4748 4747
4749 4748 static void
4750 4749 disable_intr_ppc(struct megasas_instance *instance)
4751 4750 {
4752 4751 uint32_t mask;
4753 4752
4754 4753 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called\n"));
4755 4754
4756 4755 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
4757 4756 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance)));
4758 4757
4759 4758 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */
4760 4759 WR_OB_INTR_MASK(OB_INTR_MASK, instance);
4761 4760
4762 4761 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
4763 4762 "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance)));
4764 4763
4765 4764 /* dummy read to force PCI flush */
4766 4765 mask = RD_OB_INTR_MASK(instance);
4767 4766 #ifdef lint
4768 4767 mask = mask;
4769 4768 #endif
4770 4769 }
4771 4770
4772 4771 static int
4773 4772 intr_ack_xscale(struct megasas_instance *instance)
4774 4773 {
4775 4774 uint32_t status;
4776 4775
4777 4776 /* check if it is our interrupt */
4778 4777 status = RD_OB_INTR_STATUS(instance);
4779 4778
4780 4779 if (!(status & MFI_OB_INTR_STATUS_MASK)) {
4781 4780 return (DDI_INTR_UNCLAIMED);
4782 4781 }
4783 4782
4784 4783 /* clear the interrupt by writing back the same value */
4785 4784 WR_OB_INTR_STATUS(status, instance);
4786 4785
4787 4786 return (DDI_INTR_CLAIMED);
4788 4787 }
4789 4788
4790 4789 static int
4791 4790 intr_ack_ppc(struct megasas_instance *instance)
4792 4791 {
4793 4792 uint32_t status;
4794 4793
4795 4794 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called\n"));
4796 4795
4797 4796 /* check if it is our interrupt */
4798 4797 status = RD_OB_INTR_STATUS(instance);
4799 4798
4800 4799 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x\n", status));
4801 4800
4802 4801 /*
4803 4802 * As 1078DE is same as 1078 chip, the status field
4804 4803 * remains the same.
4805 4804 */
4806 4805 if (!(status & MFI_REPLY_1078_MESSAGE_INTR)) {
4807 4806 return (DDI_INTR_UNCLAIMED);
4808 4807 }
4809 4808
4810 4809 /* clear the interrupt by writing back the same value */
4811 4810 WR_OB_DOORBELL_CLEAR(status, instance);
4812 4811
4813 4812 /* dummy READ */
4814 4813 status = RD_OB_INTR_STATUS(instance);
4815 4814
4816 4815 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared\n"));
4817 4816
4818 4817 return (DDI_INTR_CLAIMED);
4819 4818 }
4820 4819
4821 4820 static int
4822 4821 megasas_common_check(struct megasas_instance *instance,
4823 4822 struct megasas_cmd *cmd)
4824 4823 {
4825 4824 int ret = DDI_SUCCESS;
4826 4825
4827 4826 if (megasas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
4828 4827 DDI_SUCCESS) {
4829 4828 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4830 4829 if (cmd->pkt != NULL) {
4831 4830 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4832 4831 cmd->pkt->pkt_statistics = 0;
4833 4832 }
4834 4833 ret = DDI_FAILURE;
4835 4834 }
4836 4835 if (megasas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
4837 4836 != DDI_SUCCESS) {
4838 4837 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4839 4838 if (cmd->pkt != NULL) {
4840 4839 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4841 4840 cmd->pkt->pkt_statistics = 0;
4842 4841 }
4843 4842 ret = DDI_FAILURE;
4844 4843 }
4845 4844 if (megasas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) !=
4846 4845 DDI_SUCCESS) {
4847 4846 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4848 4847 if (cmd->pkt != NULL) {
4849 4848 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4850 4849 cmd->pkt->pkt_statistics = 0;
4851 4850 }
4852 4851 ret = DDI_FAILURE;
4853 4852 }
4854 4853 if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
4855 4854 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4856 4855 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0);
4857 4856 if (cmd->pkt != NULL) {
4858 4857 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4859 4858 cmd->pkt->pkt_statistics = 0;
4860 4859 }
4861 4860 ret = DDI_FAILURE;
4862 4861 }
4863 4862
4864 4863 return (ret);
4865 4864 }
4866 4865
4867 4866 /*ARGSUSED*/
4868 4867 static int
4869 4868 megasas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4870 4869 {
4871 4870 /*
4872 4871 * as the driver can always deal with an error in any dma or
4873 4872 * access handle, we can just return the fme_status value.
4874 4873 */
4875 4874 pci_ereport_post(dip, err, NULL);
4876 4875 return (err->fme_status);
4877 4876 }
4878 4877
4879 4878 static void
4880 4879 megasas_fm_init(struct megasas_instance *instance)
4881 4880 {
4882 4881 /* Need to change iblock to priority for new MSI intr */
4883 4882 ddi_iblock_cookie_t fm_ibc;
4884 4883
4885 4884 /* Only register with IO Fault Services if we have some capability */
4886 4885 if (instance->fm_capabilities) {
4887 4886 /* Adjust access and dma attributes for FMA */
4888 4887 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC;
4889 4888 megasas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
4890 4889
4891 4890 /*
4892 4891 * Register capabilities with IO Fault Services.
4893 4892 * fm_capabilities will be updated to indicate
4894 4893 * capabilities actually supported (not requested.)
4895 4894 */
4896 4895
4897 4896 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc);
4898 4897
4899 4898 /*
4900 4899 * Initialize pci ereport capabilities if ereport
4901 4900 * capable (should always be.)
4902 4901 */
4903 4902
4904 4903 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
4905 4904 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4906 4905 pci_ereport_setup(instance->dip);
4907 4906 }
4908 4907
4909 4908 /*
4910 4909 * Register error callback if error callback capable.
4911 4910 */
4912 4911 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4913 4912 ddi_fm_handler_register(instance->dip,
4914 4913 megasas_fm_error_cb, (void*) instance);
4915 4914 }
4916 4915 } else {
4917 4916 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4918 4917 megasas_generic_dma_attr.dma_attr_flags = 0;
4919 4918 }
4920 4919 }
4921 4920
4922 4921 static void
4923 4922 megasas_fm_fini(struct megasas_instance *instance)
4924 4923 {
4925 4924 /* Only unregister FMA capabilities if registered */
4926 4925 if (instance->fm_capabilities) {
4927 4926 /*
4928 4927 * Un-register error callback if error callback capable.
4929 4928 */
4930 4929 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4931 4930 ddi_fm_handler_unregister(instance->dip);
4932 4931 }
4933 4932
4934 4933 /*
4935 4934 * Release any resources allocated by pci_ereport_setup()
4936 4935 */
4937 4936 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
4938 4937 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4939 4938 pci_ereport_teardown(instance->dip);
4940 4939 }
4941 4940
4942 4941 /* Unregister from IO Fault Services */
4943 4942 ddi_fm_fini(instance->dip);
4944 4943
4945 4944 /* Adjust access and dma attributes for FMA */
4946 4945 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4947 4946 megasas_generic_dma_attr.dma_attr_flags = 0;
4948 4947 }
4949 4948 }
4950 4949
4951 4950 int
4952 4951 megasas_check_acc_handle(ddi_acc_handle_t handle)
4953 4952 {
4954 4953 ddi_fm_error_t de;
4955 4954
4956 4955 if (handle == NULL) {
4957 4956 return (DDI_FAILURE);
4958 4957 }
4959 4958
4960 4959 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
4961 4960
4962 4961 return (de.fme_status);
4963 4962 }
4964 4963
4965 4964 int
4966 4965 megasas_check_dma_handle(ddi_dma_handle_t handle)
4967 4966 {
4968 4967 ddi_fm_error_t de;
4969 4968
4970 4969 if (handle == NULL) {
4971 4970 return (DDI_FAILURE);
4972 4971 }
4973 4972
4974 4973 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
4975 4974
4976 4975 return (de.fme_status);
4977 4976 }
4978 4977
4979 4978 void
4980 4979 megasas_fm_ereport(struct megasas_instance *instance, char *detail)
4981 4980 {
4982 4981 uint64_t ena;
4983 4982 char buf[FM_MAX_CLASS];
4984 4983
4985 4984 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
4986 4985 ena = fm_ena_generate(0, FM_ENA_FMT1);
4987 4986 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) {
4988 4987 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP,
4989 4988 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
4990 4989 }
4991 4990 }
↓ open down ↓ |
4826 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX