Print this page
9702 HBA drivers don't need the redundant devfs_clean step
Reviewed by: Dan Fields <dan.fields@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/mr_sas/mr_sas.c
+++ new/usr/src/uts/common/io/mr_sas/mr_sas.c
1 1 /*
2 2 * mr_sas.c: source for mr_sas driver
3 3 *
4 4 * Solaris MegaRAID device driver for SAS2.0 controllers
5 5 * Copyright (c) 2008-2012, LSI Logic Corporation.
6 6 * All rights reserved.
7 7 *
8 8 * Version:
9 9 * Author:
10 10 * Swaminathan K S
11 11 * Arun Chandrashekhar
12 12 * Manju R
13 13 * Rasheed
14 14 * Shakeel Bukhari
15 15 *
16 16 * Redistribution and use in source and binary forms, with or without
17 17 * modification, are permitted provided that the following conditions are met:
18 18 *
19 19 * 1. Redistributions of source code must retain the above copyright notice,
20 20 * this list of conditions and the following disclaimer.
21 21 *
22 22 * 2. Redistributions in binary form must reproduce the above copyright notice,
23 23 * this list of conditions and the following disclaimer in the documentation
24 24 * and/or other materials provided with the distribution.
25 25 *
26 26 * 3. Neither the name of the author nor the names of its contributors may be
27 27 * used to endorse or promote products derived from this software without
28 28 * specific prior written permission.
29 29 *
30 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33 33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
34 34 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35 35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
36 36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
37 37 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38 38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39 39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40 40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
41 41 * DAMAGE.
42 42 */
43 43
44 44 /*
45 45 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
46 46 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
47 - * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
47 + * Copyright 2018 Nexenta Systems, Inc.
48 48 * Copyright 2015, 2017 Citrus IT Limited. All rights reserved.
49 49 * Copyright 2015 Garrett D'Amore <garrett@damore.org>
50 50 */
51 51
52 52 #include <sys/types.h>
53 53 #include <sys/param.h>
54 54 #include <sys/file.h>
55 55 #include <sys/errno.h>
56 56 #include <sys/open.h>
57 57 #include <sys/cred.h>
58 58 #include <sys/modctl.h>
59 59 #include <sys/conf.h>
60 60 #include <sys/devops.h>
61 61 #include <sys/cmn_err.h>
62 62 #include <sys/kmem.h>
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
63 63 #include <sys/stat.h>
64 64 #include <sys/mkdev.h>
65 65 #include <sys/pci.h>
66 66 #include <sys/scsi/scsi.h>
67 67 #include <sys/ddi.h>
68 68 #include <sys/sunddi.h>
69 69 #include <sys/atomic.h>
70 70 #include <sys/signal.h>
71 71 #include <sys/byteorder.h>
72 72 #include <sys/sdt.h>
73 -#include <sys/fs/dv_node.h> /* devfs_clean */
74 73
75 74 #include "mr_sas.h"
76 75
77 76 /*
78 77 * FMA header files
79 78 */
80 79 #include <sys/ddifm.h>
81 80 #include <sys/fm/protocol.h>
82 81 #include <sys/fm/util.h>
83 82 #include <sys/fm/io/ddi.h>
84 83
85 84 /* Macros to help Skinny and stock 2108/MFI live together. */
86 85 #define WR_IB_PICK_QPORT(addr, instance) \
87 86 if ((instance)->skinny) { \
88 87 WR_IB_LOW_QPORT((addr), (instance)); \
89 88 WR_IB_HIGH_QPORT(0, (instance)); \
90 89 } else { \
91 90 WR_IB_QPORT((addr), (instance)); \
92 91 }
93 92
94 93 /*
95 94 * Local static data
96 95 */
97 96 static void *mrsas_state = NULL;
98 97 static volatile boolean_t mrsas_relaxed_ordering = B_TRUE;
99 98 volatile int debug_level_g = CL_NONE;
100 99 static volatile int msi_enable = 1;
101 100 static volatile int ctio_enable = 1;
102 101
103 102 /* Default Timeout value to issue online controller reset */
104 103 volatile int debug_timeout_g = 0xF0; /* 0xB4; */
105 104 /* Simulate consecutive firmware fault */
106 105 static volatile int debug_fw_faults_after_ocr_g = 0;
107 106 #ifdef OCRDEBUG
108 107 /* Simulate three consecutive timeout for an IO */
109 108 static volatile int debug_consecutive_timeout_after_ocr_g = 0;
110 109 #endif
111 110
112 111 #pragma weak scsi_hba_open
113 112 #pragma weak scsi_hba_close
114 113 #pragma weak scsi_hba_ioctl
115 114
116 115 /* Local static prototypes. */
117 116 static int mrsas_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
118 117 static int mrsas_attach(dev_info_t *, ddi_attach_cmd_t);
119 118 #ifdef __sparc
120 119 static int mrsas_reset(dev_info_t *, ddi_reset_cmd_t);
121 120 #else
122 121 static int mrsas_quiesce(dev_info_t *);
123 122 #endif
124 123 static int mrsas_detach(dev_info_t *, ddi_detach_cmd_t);
125 124 static int mrsas_open(dev_t *, int, int, cred_t *);
126 125 static int mrsas_close(dev_t, int, int, cred_t *);
127 126 static int mrsas_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
128 127
129 128 static int mrsas_tran_tgt_init(dev_info_t *, dev_info_t *,
130 129 scsi_hba_tran_t *, struct scsi_device *);
131 130 static struct scsi_pkt *mrsas_tran_init_pkt(struct scsi_address *, register
132 131 struct scsi_pkt *, struct buf *, int, int, int, int,
133 132 int (*)(), caddr_t);
134 133 static int mrsas_tran_start(struct scsi_address *,
135 134 register struct scsi_pkt *);
136 135 static int mrsas_tran_abort(struct scsi_address *, struct scsi_pkt *);
137 136 static int mrsas_tran_reset(struct scsi_address *, int);
138 137 static int mrsas_tran_getcap(struct scsi_address *, char *, int);
139 138 static int mrsas_tran_setcap(struct scsi_address *, char *, int, int);
140 139 static void mrsas_tran_destroy_pkt(struct scsi_address *,
141 140 struct scsi_pkt *);
142 141 static void mrsas_tran_dmafree(struct scsi_address *, struct scsi_pkt *);
143 142 static void mrsas_tran_sync_pkt(struct scsi_address *, struct scsi_pkt *);
144 143 static int mrsas_tran_quiesce(dev_info_t *dip);
145 144 static int mrsas_tran_unquiesce(dev_info_t *dip);
146 145 static uint_t mrsas_isr();
147 146 static uint_t mrsas_softintr();
148 147 static void mrsas_undo_resources(dev_info_t *, struct mrsas_instance *);
149 148
150 149 static void free_space_for_mfi(struct mrsas_instance *);
151 150 static uint32_t read_fw_status_reg_ppc(struct mrsas_instance *);
152 151 static void issue_cmd_ppc(struct mrsas_cmd *, struct mrsas_instance *);
153 152 static int issue_cmd_in_poll_mode_ppc(struct mrsas_instance *,
154 153 struct mrsas_cmd *);
155 154 static int issue_cmd_in_sync_mode_ppc(struct mrsas_instance *,
156 155 struct mrsas_cmd *);
157 156 static void enable_intr_ppc(struct mrsas_instance *);
158 157 static void disable_intr_ppc(struct mrsas_instance *);
159 158 static int intr_ack_ppc(struct mrsas_instance *);
160 159 static void flush_cache(struct mrsas_instance *instance);
161 160 void display_scsi_inquiry(caddr_t);
162 161 static int start_mfi_aen(struct mrsas_instance *instance);
163 162 static int handle_drv_ioctl(struct mrsas_instance *instance,
164 163 struct mrsas_ioctl *ioctl, int mode);
165 164 static int handle_mfi_ioctl(struct mrsas_instance *instance,
166 165 struct mrsas_ioctl *ioctl, int mode);
167 166 static int handle_mfi_aen(struct mrsas_instance *instance,
168 167 struct mrsas_aen *aen);
169 168 static struct mrsas_cmd *build_cmd(struct mrsas_instance *,
170 169 struct scsi_address *, struct scsi_pkt *, uchar_t *);
171 170 static int alloc_additional_dma_buffer(struct mrsas_instance *);
172 171 static void complete_cmd_in_sync_mode(struct mrsas_instance *,
173 172 struct mrsas_cmd *);
174 173 static int mrsas_kill_adapter(struct mrsas_instance *);
175 174 static int mrsas_issue_init_mfi(struct mrsas_instance *);
176 175 static int mrsas_reset_ppc(struct mrsas_instance *);
177 176 static uint32_t mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *);
178 177 static int wait_for_outstanding(struct mrsas_instance *instance);
179 178 static int register_mfi_aen(struct mrsas_instance *instance,
180 179 uint32_t seq_num, uint32_t class_locale_word);
181 180 static int issue_mfi_pthru(struct mrsas_instance *instance, struct
182 181 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
183 182 static int issue_mfi_dcmd(struct mrsas_instance *instance, struct
184 183 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
185 184 static int issue_mfi_smp(struct mrsas_instance *instance, struct
186 185 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
187 186 static int issue_mfi_stp(struct mrsas_instance *instance, struct
188 187 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
189 188 static int abort_aen_cmd(struct mrsas_instance *instance,
190 189 struct mrsas_cmd *cmd_to_abort);
191 190
192 191 static void mrsas_rem_intrs(struct mrsas_instance *instance);
193 192 static int mrsas_add_intrs(struct mrsas_instance *instance, int intr_type);
194 193
195 194 static void mrsas_tran_tgt_free(dev_info_t *, dev_info_t *,
196 195 scsi_hba_tran_t *, struct scsi_device *);
197 196 static int mrsas_tran_bus_config(dev_info_t *, uint_t,
198 197 ddi_bus_config_op_t, void *, dev_info_t **);
199 198 static int mrsas_parse_devname(char *, int *, int *);
200 199 static int mrsas_config_all_devices(struct mrsas_instance *);
201 200 static int mrsas_config_ld(struct mrsas_instance *, uint16_t,
202 201 uint8_t, dev_info_t **);
203 202 static int mrsas_name_node(dev_info_t *, char *, int);
204 203 static void mrsas_issue_evt_taskq(struct mrsas_eventinfo *);
205 204 static void free_additional_dma_buffer(struct mrsas_instance *);
206 205 static void io_timeout_checker(void *);
207 206 static void mrsas_fm_init(struct mrsas_instance *);
208 207 static void mrsas_fm_fini(struct mrsas_instance *);
209 208
210 209 static struct mrsas_function_template mrsas_function_template_ppc = {
211 210 .read_fw_status_reg = read_fw_status_reg_ppc,
212 211 .issue_cmd = issue_cmd_ppc,
213 212 .issue_cmd_in_sync_mode = issue_cmd_in_sync_mode_ppc,
214 213 .issue_cmd_in_poll_mode = issue_cmd_in_poll_mode_ppc,
215 214 .enable_intr = enable_intr_ppc,
216 215 .disable_intr = disable_intr_ppc,
217 216 .intr_ack = intr_ack_ppc,
218 217 .init_adapter = mrsas_init_adapter_ppc
219 218 };
220 219
221 220
222 221 static struct mrsas_function_template mrsas_function_template_fusion = {
223 222 .read_fw_status_reg = tbolt_read_fw_status_reg,
224 223 .issue_cmd = tbolt_issue_cmd,
225 224 .issue_cmd_in_sync_mode = tbolt_issue_cmd_in_sync_mode,
226 225 .issue_cmd_in_poll_mode = tbolt_issue_cmd_in_poll_mode,
227 226 .enable_intr = tbolt_enable_intr,
228 227 .disable_intr = tbolt_disable_intr,
229 228 .intr_ack = tbolt_intr_ack,
230 229 .init_adapter = mrsas_init_adapter_tbolt
231 230 };
232 231
233 232
234 233 ddi_dma_attr_t mrsas_generic_dma_attr = {
235 234 DMA_ATTR_V0, /* dma_attr_version */
236 235 0, /* low DMA address range */
237 236 0xFFFFFFFFU, /* high DMA address range */
238 237 0xFFFFFFFFU, /* DMA counter register */
239 238 8, /* DMA address alignment */
240 239 0x07, /* DMA burstsizes */
241 240 1, /* min DMA size */
242 241 0xFFFFFFFFU, /* max DMA size */
243 242 0xFFFFFFFFU, /* segment boundary */
244 243 MRSAS_MAX_SGE_CNT, /* dma_attr_sglen */
245 244 512, /* granularity of device */
246 245 0 /* bus specific DMA flags */
247 246 };
248 247
249 248 int32_t mrsas_max_cap_maxxfer = 0x1000000;
250 249
251 250 /*
252 251 * Fix for: Thunderbolt controller IO timeout when IO write size is 1MEG,
253 252 * Limit size to 256K
254 253 */
255 254 uint32_t mrsas_tbolt_max_cap_maxxfer = (512 * 512);
256 255
257 256 /*
258 257 * cb_ops contains base level routines
259 258 */
260 259 static struct cb_ops mrsas_cb_ops = {
261 260 mrsas_open, /* open */
262 261 mrsas_close, /* close */
263 262 nodev, /* strategy */
264 263 nodev, /* print */
265 264 nodev, /* dump */
266 265 nodev, /* read */
267 266 nodev, /* write */
268 267 mrsas_ioctl, /* ioctl */
269 268 nodev, /* devmap */
270 269 nodev, /* mmap */
271 270 nodev, /* segmap */
272 271 nochpoll, /* poll */
273 272 nodev, /* cb_prop_op */
274 273 0, /* streamtab */
275 274 D_NEW | D_HOTPLUG, /* cb_flag */
276 275 CB_REV, /* cb_rev */
277 276 nodev, /* cb_aread */
278 277 nodev /* cb_awrite */
279 278 };
280 279
281 280 /*
282 281 * dev_ops contains configuration routines
283 282 */
284 283 static struct dev_ops mrsas_ops = {
285 284 DEVO_REV, /* rev, */
286 285 0, /* refcnt */
287 286 mrsas_getinfo, /* getinfo */
288 287 nulldev, /* identify */
289 288 nulldev, /* probe */
290 289 mrsas_attach, /* attach */
291 290 mrsas_detach, /* detach */
292 291 #ifdef __sparc
293 292 mrsas_reset, /* reset */
294 293 #else /* __sparc */
295 294 nodev,
296 295 #endif /* __sparc */
297 296 &mrsas_cb_ops, /* char/block ops */
298 297 NULL, /* bus ops */
299 298 NULL, /* power */
300 299 #ifdef __sparc
301 300 ddi_quiesce_not_needed
302 301 #else /* __sparc */
303 302 mrsas_quiesce /* quiesce */
304 303 #endif /* __sparc */
305 304 };
306 305
307 306 static struct modldrv modldrv = {
308 307 &mod_driverops, /* module type - driver */
309 308 MRSAS_VERSION,
310 309 &mrsas_ops, /* driver ops */
311 310 };
312 311
313 312 static struct modlinkage modlinkage = {
314 313 MODREV_1, /* ml_rev - must be MODREV_1 */
315 314 &modldrv, /* ml_linkage */
316 315 NULL /* end of driver linkage */
317 316 };
318 317
319 318 static struct ddi_device_acc_attr endian_attr = {
320 319 DDI_DEVICE_ATTR_V1,
321 320 DDI_STRUCTURE_LE_ACC,
322 321 DDI_STRICTORDER_ACC,
323 322 DDI_DEFAULT_ACC
324 323 };
325 324
326 325 /* Use the LSI Fast Path for the 2208 (tbolt) commands. */
327 326 unsigned int enable_fp = 1;
328 327
329 328
330 329 /*
331 330 * ************************************************************************** *
332 331 * *
333 332 * common entry points - for loadable kernel modules *
334 333 * *
335 334 * ************************************************************************** *
336 335 */
337 336
338 337 /*
339 338 * _init - initialize a loadable module
340 339 * @void
341 340 *
342 341 * The driver should perform any one-time resource allocation or data
343 342 * initialization during driver loading in _init(). For example, the driver
344 343 * should initialize any mutexes global to the driver in this routine.
345 344 * The driver should not, however, use _init() to allocate or initialize
346 345 * anything that has to do with a particular instance of the device.
347 346 * Per-instance initialization must be done in attach().
348 347 */
349 348 int
350 349 _init(void)
351 350 {
352 351 int ret;
353 352
354 353 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
355 354
356 355 ret = ddi_soft_state_init(&mrsas_state,
357 356 sizeof (struct mrsas_instance), 0);
358 357
359 358 if (ret != DDI_SUCCESS) {
360 359 cmn_err(CE_WARN, "mr_sas: could not init state");
361 360 return (ret);
362 361 }
363 362
364 363 if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) {
365 364 cmn_err(CE_WARN, "mr_sas: could not init scsi hba");
366 365 ddi_soft_state_fini(&mrsas_state);
367 366 return (ret);
368 367 }
369 368
370 369 ret = mod_install(&modlinkage);
371 370
372 371 if (ret != DDI_SUCCESS) {
373 372 cmn_err(CE_WARN, "mr_sas: mod_install failed");
374 373 scsi_hba_fini(&modlinkage);
375 374 ddi_soft_state_fini(&mrsas_state);
376 375 }
377 376
378 377 return (ret);
379 378 }
380 379
381 380 /*
382 381 * _info - returns information about a loadable module.
383 382 * @void
384 383 *
385 384 * _info() is called to return module information. This is a typical entry
386 385 * point that does predefined role. It simply calls mod_info().
387 386 */
388 387 int
389 388 _info(struct modinfo *modinfop)
390 389 {
391 390 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
392 391
393 392 return (mod_info(&modlinkage, modinfop));
394 393 }
395 394
396 395 /*
397 396 * _fini - prepare a loadable module for unloading
398 397 * @void
399 398 *
400 399 * In _fini(), the driver should release any resources that were allocated in
401 400 * _init(). The driver must remove itself from the system module list.
402 401 */
403 402 int
404 403 _fini(void)
405 404 {
406 405 int ret;
407 406
408 407 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
409 408
410 409 if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS) {
411 410 con_log(CL_ANN1,
412 411 (CE_WARN, "_fini: mod_remove() failed, error 0x%X", ret));
413 412 return (ret);
414 413 }
415 414
416 415 scsi_hba_fini(&modlinkage);
417 416 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: scsi_hba_fini() done."));
418 417
419 418 ddi_soft_state_fini(&mrsas_state);
420 419 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: ddi_soft_state_fini() done."));
421 420
422 421 return (ret);
423 422 }
424 423
425 424
426 425 /*
427 426 * ************************************************************************** *
428 427 * *
429 428 * common entry points - for autoconfiguration *
430 429 * *
431 430 * ************************************************************************** *
432 431 */
433 432 /*
434 433 * attach - adds a device to the system as part of initialization
435 434 * @dip:
436 435 * @cmd:
437 436 *
438 437 * The kernel calls a driver's attach() entry point to attach an instance of
439 438 * a device (for MegaRAID, it is instance of a controller) or to resume
440 439 * operation for an instance of a device that has been suspended or has been
441 440 * shut down by the power management framework
442 441 * The attach() entry point typically includes the following types of
443 442 * processing:
444 443 * - allocate a soft-state structure for the device instance (for MegaRAID,
445 444 * controller instance)
446 445 * - initialize per-instance mutexes
447 446 * - initialize condition variables
448 447 * - register the device's interrupts (for MegaRAID, controller's interrupts)
449 448 * - map the registers and memory of the device instance (for MegaRAID,
450 449 * controller instance)
451 450 * - create minor device nodes for the device instance (for MegaRAID,
452 451 * controller instance)
453 452 * - report that the device instance (for MegaRAID, controller instance) has
454 453 * attached
455 454 */
456 455 static int
457 456 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
458 457 {
459 458 int instance_no;
460 459 int nregs;
461 460 int i = 0;
462 461 uint8_t irq;
463 462 uint16_t vendor_id;
464 463 uint16_t device_id;
465 464 uint16_t subsysvid;
466 465 uint16_t subsysid;
467 466 uint16_t command;
468 467 off_t reglength = 0;
469 468 int intr_types = 0;
470 469 char *data;
471 470
472 471 scsi_hba_tran_t *tran;
473 472 ddi_dma_attr_t tran_dma_attr;
474 473 struct mrsas_instance *instance;
475 474
476 475 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
477 476
478 477 /* CONSTCOND */
479 478 ASSERT(NO_COMPETING_THREADS);
480 479
481 480 instance_no = ddi_get_instance(dip);
482 481
483 482 /*
484 483 * check to see whether this device is in a DMA-capable slot.
485 484 */
486 485 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
487 486 dev_err(dip, CE_WARN, "Device in slave-only slot, unused");
488 487 return (DDI_FAILURE);
489 488 }
490 489
491 490 switch (cmd) {
492 491 case DDI_ATTACH:
493 492 /* allocate the soft state for the instance */
494 493 if (ddi_soft_state_zalloc(mrsas_state, instance_no)
495 494 != DDI_SUCCESS) {
496 495 dev_err(dip, CE_WARN, "Failed to allocate soft state");
497 496 return (DDI_FAILURE);
498 497 }
499 498
500 499 instance = (struct mrsas_instance *)ddi_get_soft_state
501 500 (mrsas_state, instance_no);
502 501
503 502 if (instance == NULL) {
504 503 dev_err(dip, CE_WARN, "Bad soft state");
505 504 ddi_soft_state_free(mrsas_state, instance_no);
506 505 return (DDI_FAILURE);
507 506 }
508 507
509 508 instance->unroll.softs = 1;
510 509
511 510 /* Setup the PCI configuration space handles */
512 511 if (pci_config_setup(dip, &instance->pci_handle) !=
513 512 DDI_SUCCESS) {
514 513 dev_err(dip, CE_WARN, "pci config setup failed");
515 514
516 515 ddi_soft_state_free(mrsas_state, instance_no);
517 516 return (DDI_FAILURE);
518 517 }
519 518
520 519 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
521 520 dev_err(dip, CE_WARN, "Failed to get registers");
522 521
523 522 pci_config_teardown(&instance->pci_handle);
524 523 ddi_soft_state_free(mrsas_state, instance_no);
525 524 return (DDI_FAILURE);
526 525 }
527 526
528 527 vendor_id = pci_config_get16(instance->pci_handle,
529 528 PCI_CONF_VENID);
530 529 device_id = pci_config_get16(instance->pci_handle,
531 530 PCI_CONF_DEVID);
532 531
533 532 subsysvid = pci_config_get16(instance->pci_handle,
534 533 PCI_CONF_SUBVENID);
535 534 subsysid = pci_config_get16(instance->pci_handle,
536 535 PCI_CONF_SUBSYSID);
537 536
538 537 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
539 538 (pci_config_get16(instance->pci_handle,
540 539 PCI_CONF_COMM) | PCI_COMM_ME));
541 540 irq = pci_config_get8(instance->pci_handle,
542 541 PCI_CONF_ILINE);
543 542
544 543 dev_err(dip, CE_CONT,
545 544 "?0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s\n",
546 545 vendor_id, device_id, subsysvid,
547 546 subsysid, irq, MRSAS_VERSION);
548 547
549 548 /* enable bus-mastering */
550 549 command = pci_config_get16(instance->pci_handle,
551 550 PCI_CONF_COMM);
552 551
553 552 if (!(command & PCI_COMM_ME)) {
554 553 command |= PCI_COMM_ME;
555 554
556 555 pci_config_put16(instance->pci_handle,
557 556 PCI_CONF_COMM, command);
558 557
559 558 con_log(CL_ANN, (CE_CONT, "mr_sas%d: "
560 559 "enable bus-mastering", instance_no));
561 560 } else {
562 561 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
563 562 "bus-mastering already set", instance_no));
564 563 }
565 564
566 565 /* initialize function pointers */
567 566 switch (device_id) {
568 567 case PCI_DEVICE_ID_LSI_INVADER:
569 568 case PCI_DEVICE_ID_LSI_FURY:
570 569 case PCI_DEVICE_ID_LSI_INTRUDER:
571 570 case PCI_DEVICE_ID_LSI_INTRUDER_24:
572 571 case PCI_DEVICE_ID_LSI_CUTLASS_52:
573 572 case PCI_DEVICE_ID_LSI_CUTLASS_53:
574 573 dev_err(dip, CE_CONT, "?Gen3 device detected\n");
575 574 instance->gen3 = 1;
576 575 /* FALLTHROUGH */
577 576 case PCI_DEVICE_ID_LSI_TBOLT:
578 577 dev_err(dip, CE_CONT, "?TBOLT device detected\n");
579 578
580 579 instance->func_ptr =
581 580 &mrsas_function_template_fusion;
582 581 instance->tbolt = 1;
583 582 break;
584 583
585 584 case PCI_DEVICE_ID_LSI_SKINNY:
586 585 case PCI_DEVICE_ID_LSI_SKINNY_NEW:
587 586 /*
588 587 * FALLTHRU to PPC-style functions, but mark this
589 588 * instance as Skinny, because the register set is
590 589 * slightly different (See WR_IB_PICK_QPORT), and
591 590 * certain other features are available to a Skinny
592 591 * HBA.
593 592 */
594 593 dev_err(dip, CE_CONT, "?Skinny device detected\n");
595 594 instance->skinny = 1;
596 595 /* FALLTHRU */
597 596
598 597 case PCI_DEVICE_ID_LSI_2108VDE:
599 598 case PCI_DEVICE_ID_LSI_2108V:
600 599 dev_err(dip, CE_CONT,
601 600 "?2108 Liberator device detected\n");
602 601
603 602 instance->func_ptr =
604 603 &mrsas_function_template_ppc;
605 604 break;
606 605
607 606 default:
608 607 dev_err(dip, CE_WARN, "Invalid device detected");
609 608
610 609 pci_config_teardown(&instance->pci_handle);
611 610 ddi_soft_state_free(mrsas_state, instance_no);
612 611 return (DDI_FAILURE);
613 612 }
614 613
615 614 instance->baseaddress = pci_config_get32(
616 615 instance->pci_handle, PCI_CONF_BASE0);
617 616 instance->baseaddress &= 0x0fffc;
618 617
619 618 instance->dip = dip;
620 619 instance->vendor_id = vendor_id;
621 620 instance->device_id = device_id;
622 621 instance->subsysvid = subsysvid;
623 622 instance->subsysid = subsysid;
624 623 instance->instance = instance_no;
625 624
626 625 /* Initialize FMA */
627 626 instance->fm_capabilities = ddi_prop_get_int(
628 627 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS,
629 628 "fm-capable", DDI_FM_EREPORT_CAPABLE |
630 629 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE
631 630 | DDI_FM_ERRCB_CAPABLE);
632 631
633 632 mrsas_fm_init(instance);
634 633
635 634 /* Setup register map */
636 635 if ((ddi_dev_regsize(instance->dip,
637 636 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) ||
638 637 reglength < MINIMUM_MFI_MEM_SZ) {
639 638 goto fail_attach;
640 639 }
641 640 if (reglength > DEFAULT_MFI_MEM_SZ) {
642 641 reglength = DEFAULT_MFI_MEM_SZ;
643 642 con_log(CL_DLEVEL1, (CE_NOTE,
644 643 "mr_sas: register length to map is 0x%lx bytes",
645 644 reglength));
646 645 }
647 646 if (ddi_regs_map_setup(instance->dip,
648 647 REGISTER_SET_IO_2108, &instance->regmap, 0,
649 648 reglength, &endian_attr, &instance->regmap_handle)
650 649 != DDI_SUCCESS) {
651 650 dev_err(dip, CE_WARN, "couldn't map control registers");
652 651 goto fail_attach;
653 652 }
654 653
655 654 instance->unroll.regs = 1;
656 655
657 656 /*
658 657 * Disable Interrupt Now.
659 658 * Setup Software interrupt
660 659 */
661 660 instance->func_ptr->disable_intr(instance);
662 661
663 662 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
664 663 "mrsas-enable-msi", &data) == DDI_SUCCESS) {
665 664 if (strncmp(data, "no", 3) == 0) {
666 665 msi_enable = 0;
667 666 con_log(CL_ANN1, (CE_WARN,
668 667 "msi_enable = %d disabled", msi_enable));
669 668 }
670 669 ddi_prop_free(data);
671 670 }
672 671
673 672 dev_err(dip, CE_CONT, "?msi_enable = %d\n", msi_enable);
674 673
675 674 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
676 675 "mrsas-enable-fp", &data) == DDI_SUCCESS) {
677 676 if (strncmp(data, "no", 3) == 0) {
678 677 enable_fp = 0;
679 678 dev_err(dip, CE_NOTE,
680 679 "enable_fp = %d, Fast-Path disabled.\n",
681 680 enable_fp);
682 681 }
683 682
684 683 ddi_prop_free(data);
685 684 }
686 685
687 686 dev_err(dip, CE_CONT, "?enable_fp = %d\n", enable_fp);
688 687
689 688 /* Check for all supported interrupt types */
690 689 if (ddi_intr_get_supported_types(
691 690 dip, &intr_types) != DDI_SUCCESS) {
692 691 dev_err(dip, CE_WARN,
693 692 "ddi_intr_get_supported_types() failed");
694 693 goto fail_attach;
695 694 }
696 695
697 696 con_log(CL_DLEVEL1, (CE_NOTE,
698 697 "ddi_intr_get_supported_types() ret: 0x%x", intr_types));
699 698
700 699 /* Initialize and Setup Interrupt handler */
701 700 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) {
702 701 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSIX) !=
703 702 DDI_SUCCESS) {
704 703 dev_err(dip, CE_WARN,
705 704 "MSIX interrupt query failed");
706 705 goto fail_attach;
707 706 }
708 707 instance->intr_type = DDI_INTR_TYPE_MSIX;
709 708 } else if (msi_enable && (intr_types & DDI_INTR_TYPE_MSI)) {
710 709 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSI) !=
711 710 DDI_SUCCESS) {
712 711 dev_err(dip, CE_WARN,
713 712 "MSI interrupt query failed");
714 713 goto fail_attach;
715 714 }
716 715 instance->intr_type = DDI_INTR_TYPE_MSI;
717 716 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
718 717 msi_enable = 0;
719 718 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_FIXED) !=
720 719 DDI_SUCCESS) {
721 720 dev_err(dip, CE_WARN,
722 721 "FIXED interrupt query failed");
723 722 goto fail_attach;
724 723 }
725 724 instance->intr_type = DDI_INTR_TYPE_FIXED;
726 725 } else {
727 726 dev_err(dip, CE_WARN, "Device cannot "
728 727 "suppport either FIXED or MSI/X "
729 728 "interrupts");
730 729 goto fail_attach;
731 730 }
732 731
733 732 instance->unroll.intr = 1;
734 733
735 734 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
736 735 "mrsas-enable-ctio", &data) == DDI_SUCCESS) {
737 736 if (strncmp(data, "no", 3) == 0) {
738 737 ctio_enable = 0;
739 738 con_log(CL_ANN1, (CE_WARN,
740 739 "ctio_enable = %d disabled", ctio_enable));
741 740 }
742 741 ddi_prop_free(data);
743 742 }
744 743
745 744 dev_err(dip, CE_CONT, "?ctio_enable = %d\n", ctio_enable);
746 745
747 746 /* setup the mfi based low level driver */
748 747 if (mrsas_init_adapter(instance) != DDI_SUCCESS) {
749 748 dev_err(dip, CE_WARN,
750 749 "could not initialize the low level driver");
751 750
752 751 goto fail_attach;
753 752 }
754 753
755 754 /* Initialize all Mutex */
756 755 INIT_LIST_HEAD(&instance->completed_pool_list);
757 756 mutex_init(&instance->completed_pool_mtx, NULL,
758 757 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
759 758
760 759 mutex_init(&instance->sync_map_mtx, NULL,
761 760 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
762 761
763 762 mutex_init(&instance->app_cmd_pool_mtx, NULL,
764 763 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
765 764
766 765 mutex_init(&instance->config_dev_mtx, NULL,
767 766 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
768 767
769 768 mutex_init(&instance->cmd_pend_mtx, NULL,
770 769 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
771 770
772 771 mutex_init(&instance->ocr_flags_mtx, NULL,
773 772 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
774 773
775 774 mutex_init(&instance->int_cmd_mtx, NULL,
776 775 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
777 776 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
778 777
779 778 mutex_init(&instance->cmd_pool_mtx, NULL,
780 779 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
781 780
782 781 mutex_init(&instance->reg_write_mtx, NULL,
783 782 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
784 783
785 784 if (instance->tbolt) {
786 785 mutex_init(&instance->cmd_app_pool_mtx, NULL,
787 786 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
788 787
789 788 mutex_init(&instance->chip_mtx, NULL,
790 789 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
791 790
792 791 }
793 792
794 793 instance->unroll.mutexs = 1;
795 794
796 795 instance->timeout_id = (timeout_id_t)-1;
797 796
798 797 /* Register our soft-isr for highlevel interrupts. */
799 798 instance->isr_level = instance->intr_pri;
800 799 if (!(instance->tbolt)) {
801 800 if (instance->isr_level == HIGH_LEVEL_INTR) {
802 801 if (ddi_add_softintr(dip,
803 802 DDI_SOFTINT_HIGH,
804 803 &instance->soft_intr_id, NULL, NULL,
805 804 mrsas_softintr, (caddr_t)instance) !=
806 805 DDI_SUCCESS) {
807 806 dev_err(dip, CE_WARN,
808 807 "Software ISR did not register");
809 808
810 809 goto fail_attach;
811 810 }
812 811
813 812 instance->unroll.soft_isr = 1;
814 813
815 814 }
816 815 }
817 816
818 817 instance->softint_running = 0;
819 818
820 819 /* Allocate a transport structure */
821 820 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
822 821
823 822 if (tran == NULL) {
824 823 dev_err(dip, CE_WARN,
825 824 "scsi_hba_tran_alloc failed");
826 825 goto fail_attach;
827 826 }
828 827
829 828 instance->tran = tran;
830 829 instance->unroll.tran = 1;
831 830
832 831 tran->tran_hba_private = instance;
833 832 tran->tran_tgt_init = mrsas_tran_tgt_init;
834 833 tran->tran_tgt_probe = scsi_hba_probe;
835 834 tran->tran_tgt_free = mrsas_tran_tgt_free;
836 835 tran->tran_init_pkt = mrsas_tran_init_pkt;
837 836 if (instance->tbolt)
838 837 tran->tran_start = mrsas_tbolt_tran_start;
839 838 else
840 839 tran->tran_start = mrsas_tran_start;
841 840 tran->tran_abort = mrsas_tran_abort;
842 841 tran->tran_reset = mrsas_tran_reset;
843 842 tran->tran_getcap = mrsas_tran_getcap;
844 843 tran->tran_setcap = mrsas_tran_setcap;
845 844 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt;
846 845 tran->tran_dmafree = mrsas_tran_dmafree;
847 846 tran->tran_sync_pkt = mrsas_tran_sync_pkt;
848 847 tran->tran_quiesce = mrsas_tran_quiesce;
849 848 tran->tran_unquiesce = mrsas_tran_unquiesce;
850 849 tran->tran_bus_config = mrsas_tran_bus_config;
851 850
852 851 if (mrsas_relaxed_ordering)
853 852 mrsas_generic_dma_attr.dma_attr_flags |=
854 853 DDI_DMA_RELAXED_ORDERING;
855 854
856 855
857 856 tran_dma_attr = mrsas_generic_dma_attr;
858 857 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
859 858
860 859 /* Attach this instance of the hba */
861 860 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
862 861 != DDI_SUCCESS) {
863 862 dev_err(dip, CE_WARN,
864 863 "scsi_hba_attach failed");
865 864
866 865 goto fail_attach;
867 866 }
868 867 instance->unroll.tranSetup = 1;
869 868 con_log(CL_ANN1,
870 869 (CE_CONT, "scsi_hba_attach_setup() done."));
871 870
872 871 /* create devctl node for cfgadm command */
873 872 if (ddi_create_minor_node(dip, "devctl",
874 873 S_IFCHR, INST2DEVCTL(instance_no),
875 874 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
876 875 dev_err(dip, CE_WARN, "failed to create devctl node.");
877 876
878 877 goto fail_attach;
879 878 }
880 879
881 880 instance->unroll.devctl = 1;
882 881
883 882 /* create scsi node for cfgadm command */
884 883 if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
885 884 INST2SCSI(instance_no), DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
886 885 DDI_FAILURE) {
887 886 dev_err(dip, CE_WARN, "failed to create scsi node.");
888 887
889 888 goto fail_attach;
890 889 }
891 890
892 891 instance->unroll.scsictl = 1;
893 892
894 893 (void) snprintf(instance->iocnode, sizeof (instance->iocnode),
895 894 "%d:lsirdctl", instance_no);
896 895
897 896 /*
898 897 * Create a node for applications
899 898 * for issuing ioctl to the driver.
900 899 */
901 900 if (ddi_create_minor_node(dip, instance->iocnode,
902 901 S_IFCHR, INST2LSIRDCTL(instance_no), DDI_PSEUDO, 0) ==
903 902 DDI_FAILURE) {
904 903 dev_err(dip, CE_WARN, "failed to create ioctl node.");
905 904
906 905 goto fail_attach;
907 906 }
908 907
909 908 instance->unroll.ioctl = 1;
910 909
911 910 /* Create a taskq to handle dr events */
912 911 if ((instance->taskq = ddi_taskq_create(dip,
913 912 "mrsas_dr_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
914 913 dev_err(dip, CE_WARN, "failed to create taskq.");
915 914 instance->taskq = NULL;
916 915 goto fail_attach;
917 916 }
918 917 instance->unroll.taskq = 1;
919 918 con_log(CL_ANN1, (CE_CONT, "ddi_taskq_create() done."));
920 919
921 920 /* enable interrupt */
922 921 instance->func_ptr->enable_intr(instance);
923 922
924 923 /* initiate AEN */
925 924 if (start_mfi_aen(instance)) {
926 925 dev_err(dip, CE_WARN, "failed to initiate AEN.");
927 926 goto fail_attach;
928 927 }
929 928 instance->unroll.aenPend = 1;
930 929 con_log(CL_ANN1,
931 930 (CE_CONT, "AEN started for instance %d.", instance_no));
932 931
933 932 /* Finally! We are on the air. */
934 933 ddi_report_dev(dip);
935 934
936 935 /* FMA handle checking. */
937 936 if (mrsas_check_acc_handle(instance->regmap_handle) !=
938 937 DDI_SUCCESS) {
939 938 goto fail_attach;
940 939 }
941 940 if (mrsas_check_acc_handle(instance->pci_handle) !=
942 941 DDI_SUCCESS) {
943 942 goto fail_attach;
944 943 }
945 944
946 945 instance->mr_ld_list =
947 946 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld),
948 947 KM_SLEEP);
949 948 instance->unroll.ldlist_buff = 1;
950 949
951 950 if (instance->tbolt || instance->skinny) {
952 951 instance->mr_tbolt_pd_max = MRSAS_TBOLT_PD_TGT_MAX;
953 952 instance->mr_tbolt_pd_list =
954 953 kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance) *
955 954 sizeof (struct mrsas_tbolt_pd), KM_SLEEP);
956 955 ASSERT(instance->mr_tbolt_pd_list);
957 956 for (i = 0; i < instance->mr_tbolt_pd_max; i++) {
958 957 instance->mr_tbolt_pd_list[i].lun_type =
959 958 MRSAS_TBOLT_PD_LUN;
960 959 instance->mr_tbolt_pd_list[i].dev_id =
961 960 (uint8_t)i;
962 961 }
963 962
964 963 instance->unroll.pdlist_buff = 1;
965 964 }
966 965 break;
967 966 case DDI_PM_RESUME:
968 967 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_PM_RESUME"));
969 968 break;
970 969 case DDI_RESUME:
971 970 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_RESUME"));
972 971 break;
973 972 default:
974 973 con_log(CL_ANN,
975 974 (CE_WARN, "mr_sas: invalid attach cmd=%x", cmd));
976 975 return (DDI_FAILURE);
977 976 }
978 977
979 978
980 979 con_log(CL_DLEVEL1,
981 980 (CE_NOTE, "mrsas_attach() return SUCCESS instance_num %d",
982 981 instance_no));
983 982 return (DDI_SUCCESS);
984 983
985 984 fail_attach:
986 985
987 986 mrsas_undo_resources(dip, instance);
988 987
989 988 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
990 989 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
991 990
992 991 mrsas_fm_fini(instance);
993 992
994 993 pci_config_teardown(&instance->pci_handle);
995 994 ddi_soft_state_free(mrsas_state, instance_no);
996 995
997 996 return (DDI_FAILURE);
998 997 }
999 998
1000 999 /*
1001 1000 * getinfo - gets device information
1002 1001 * @dip:
1003 1002 * @cmd:
1004 1003 * @arg:
1005 1004 * @resultp:
1006 1005 *
1007 1006 * The system calls getinfo() to obtain configuration information that only
1008 1007 * the driver knows. The mapping of minor numbers to device instance is
1009 1008 * entirely under the control of the driver. The system sometimes needs to ask
1010 1009 * the driver which device a particular dev_t represents.
1011 1010 * Given the device number return the devinfo pointer from the scsi_device
1012 1011 * structure.
1013 1012 */
1014 1013 /*ARGSUSED*/
1015 1014 static int
1016 1015 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
1017 1016 {
1018 1017 int rval;
1019 1018 int mrsas_minor = getminor((dev_t)arg);
1020 1019
1021 1020 struct mrsas_instance *instance;
1022 1021
1023 1022 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1024 1023
1025 1024 switch (cmd) {
1026 1025 case DDI_INFO_DEVT2DEVINFO:
1027 1026 instance = (struct mrsas_instance *)
1028 1027 ddi_get_soft_state(mrsas_state,
1029 1028 MINOR2INST(mrsas_minor));
1030 1029
1031 1030 if (instance == NULL) {
1032 1031 *resultp = NULL;
1033 1032 rval = DDI_FAILURE;
1034 1033 } else {
1035 1034 *resultp = instance->dip;
1036 1035 rval = DDI_SUCCESS;
1037 1036 }
1038 1037 break;
1039 1038 case DDI_INFO_DEVT2INSTANCE:
1040 1039 *resultp = (void *)(intptr_t)
1041 1040 (MINOR2INST(getminor((dev_t)arg)));
1042 1041 rval = DDI_SUCCESS;
1043 1042 break;
1044 1043 default:
1045 1044 *resultp = NULL;
1046 1045 rval = DDI_FAILURE;
1047 1046 }
1048 1047
1049 1048 return (rval);
1050 1049 }
1051 1050
1052 1051 /*
1053 1052 * detach - detaches a device from the system
1054 1053 * @dip: pointer to the device's dev_info structure
1055 1054 * @cmd: type of detach
1056 1055 *
1057 1056 * A driver's detach() entry point is called to detach an instance of a device
1058 1057 * that is bound to the driver. The entry point is called with the instance of
1059 1058 * the device node to be detached and with DDI_DETACH, which is specified as
1060 1059 * the cmd argument to the entry point.
1061 1060 * This routine is called during driver unload. We free all the allocated
1062 1061 * resources and call the corresponding LLD so that it can also release all
1063 1062 * its resources.
1064 1063 */
1065 1064 static int
1066 1065 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1067 1066 {
1068 1067 int instance_no;
1069 1068
1070 1069 struct mrsas_instance *instance;
1071 1070
1072 1071 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1073 1072
1074 1073
1075 1074 /* CONSTCOND */
1076 1075 ASSERT(NO_COMPETING_THREADS);
1077 1076
1078 1077 instance_no = ddi_get_instance(dip);
1079 1078
1080 1079 instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state,
1081 1080 instance_no);
1082 1081
1083 1082 if (!instance) {
1084 1083 dev_err(dip, CE_WARN, "could not get instance in detach");
1085 1084
1086 1085 return (DDI_FAILURE);
1087 1086 }
1088 1087
1089 1088 switch (cmd) {
1090 1089 case DDI_DETACH:
1091 1090 con_log(CL_ANN, (CE_NOTE,
1092 1091 "mrsas_detach: DDI_DETACH"));
1093 1092
1094 1093 mutex_enter(&instance->config_dev_mtx);
1095 1094 if (instance->timeout_id != (timeout_id_t)-1) {
1096 1095 mutex_exit(&instance->config_dev_mtx);
1097 1096 (void) untimeout(instance->timeout_id);
1098 1097 instance->timeout_id = (timeout_id_t)-1;
1099 1098 mutex_enter(&instance->config_dev_mtx);
1100 1099 instance->unroll.timer = 0;
1101 1100 }
1102 1101 mutex_exit(&instance->config_dev_mtx);
1103 1102
1104 1103 if (instance->unroll.tranSetup == 1) {
1105 1104 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1106 1105 dev_err(dip, CE_WARN,
1107 1106 "failed to detach");
1108 1107 return (DDI_FAILURE);
1109 1108 }
1110 1109 instance->unroll.tranSetup = 0;
1111 1110 con_log(CL_ANN1,
1112 1111 (CE_CONT, "scsi_hba_dettach() done."));
1113 1112 }
1114 1113
1115 1114 flush_cache(instance);
1116 1115
1117 1116 mrsas_undo_resources(dip, instance);
1118 1117
1119 1118 mrsas_fm_fini(instance);
1120 1119
1121 1120 pci_config_teardown(&instance->pci_handle);
1122 1121 ddi_soft_state_free(mrsas_state, instance_no);
1123 1122 break;
1124 1123
1125 1124 case DDI_PM_SUSPEND:
1126 1125 con_log(CL_ANN, (CE_NOTE,
1127 1126 "mrsas_detach: DDI_PM_SUSPEND"));
1128 1127
1129 1128 break;
1130 1129 case DDI_SUSPEND:
1131 1130 con_log(CL_ANN, (CE_NOTE,
1132 1131 "mrsas_detach: DDI_SUSPEND"));
1133 1132
1134 1133 break;
1135 1134 default:
1136 1135 con_log(CL_ANN, (CE_WARN,
1137 1136 "invalid detach command:0x%x", cmd));
1138 1137 return (DDI_FAILURE);
1139 1138 }
1140 1139
1141 1140 return (DDI_SUCCESS);
1142 1141 }
1143 1142
1144 1143
1145 1144 static void
1146 1145 mrsas_undo_resources(dev_info_t *dip, struct mrsas_instance *instance)
1147 1146 {
1148 1147 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1149 1148
1150 1149 if (instance->unroll.ioctl == 1) {
1151 1150 ddi_remove_minor_node(dip, instance->iocnode);
1152 1151 instance->unroll.ioctl = 0;
1153 1152 }
1154 1153
1155 1154 if (instance->unroll.scsictl == 1) {
1156 1155 ddi_remove_minor_node(dip, "scsi");
1157 1156 instance->unroll.scsictl = 0;
1158 1157 }
1159 1158
1160 1159 if (instance->unroll.devctl == 1) {
1161 1160 ddi_remove_minor_node(dip, "devctl");
1162 1161 instance->unroll.devctl = 0;
1163 1162 }
1164 1163
1165 1164 if (instance->unroll.tranSetup == 1) {
1166 1165 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1167 1166 dev_err(dip, CE_WARN, "failed to detach");
1168 1167 return; /* DDI_FAILURE */
1169 1168 }
1170 1169 instance->unroll.tranSetup = 0;
1171 1170 con_log(CL_ANN1, (CE_CONT, "scsi_hba_dettach() done."));
1172 1171 }
1173 1172
1174 1173 if (instance->unroll.tran == 1) {
1175 1174 scsi_hba_tran_free(instance->tran);
1176 1175 instance->unroll.tran = 0;
1177 1176 con_log(CL_ANN1, (CE_CONT, "scsi_hba_tran_free() done."));
1178 1177 }
1179 1178
1180 1179 if (instance->unroll.syncCmd == 1) {
1181 1180 if (instance->tbolt) {
1182 1181 if (abort_syncmap_cmd(instance,
1183 1182 instance->map_update_cmd)) {
1184 1183 dev_err(dip, CE_WARN, "mrsas_detach: "
1185 1184 "failed to abort previous syncmap command");
1186 1185 }
1187 1186
1188 1187 instance->unroll.syncCmd = 0;
1189 1188 con_log(CL_ANN1, (CE_CONT, "sync cmd aborted, done."));
1190 1189 }
1191 1190 }
1192 1191
1193 1192 if (instance->unroll.aenPend == 1) {
1194 1193 if (abort_aen_cmd(instance, instance->aen_cmd))
1195 1194 dev_err(dip, CE_WARN, "mrsas_detach: "
1196 1195 "failed to abort prevous AEN command");
1197 1196
1198 1197 instance->unroll.aenPend = 0;
1199 1198 con_log(CL_ANN1, (CE_CONT, "aen cmd aborted, done."));
1200 1199 /* This means the controller is fully initialized and running */
1201 1200 /* Shutdown should be a last command to controller. */
1202 1201 /* shutdown_controller(); */
1203 1202 }
1204 1203
1205 1204
1206 1205 if (instance->unroll.timer == 1) {
1207 1206 if (instance->timeout_id != (timeout_id_t)-1) {
1208 1207 (void) untimeout(instance->timeout_id);
1209 1208 instance->timeout_id = (timeout_id_t)-1;
1210 1209
1211 1210 instance->unroll.timer = 0;
1212 1211 }
1213 1212 }
1214 1213
1215 1214 instance->func_ptr->disable_intr(instance);
1216 1215
1217 1216
1218 1217 if (instance->unroll.mutexs == 1) {
1219 1218 mutex_destroy(&instance->cmd_pool_mtx);
1220 1219 mutex_destroy(&instance->app_cmd_pool_mtx);
1221 1220 mutex_destroy(&instance->cmd_pend_mtx);
1222 1221 mutex_destroy(&instance->completed_pool_mtx);
1223 1222 mutex_destroy(&instance->sync_map_mtx);
1224 1223 mutex_destroy(&instance->int_cmd_mtx);
1225 1224 cv_destroy(&instance->int_cmd_cv);
1226 1225 mutex_destroy(&instance->config_dev_mtx);
1227 1226 mutex_destroy(&instance->ocr_flags_mtx);
1228 1227 mutex_destroy(&instance->reg_write_mtx);
1229 1228
1230 1229 if (instance->tbolt) {
1231 1230 mutex_destroy(&instance->cmd_app_pool_mtx);
1232 1231 mutex_destroy(&instance->chip_mtx);
1233 1232 }
1234 1233
1235 1234 instance->unroll.mutexs = 0;
1236 1235 con_log(CL_ANN1, (CE_CONT, "Destroy mutex & cv, done."));
1237 1236 }
1238 1237
1239 1238
1240 1239 if (instance->unroll.soft_isr == 1) {
1241 1240 ddi_remove_softintr(instance->soft_intr_id);
1242 1241 instance->unroll.soft_isr = 0;
1243 1242 }
1244 1243
1245 1244 if (instance->unroll.intr == 1) {
1246 1245 mrsas_rem_intrs(instance);
1247 1246 instance->unroll.intr = 0;
1248 1247 }
1249 1248
1250 1249
1251 1250 if (instance->unroll.taskq == 1) {
1252 1251 if (instance->taskq) {
1253 1252 ddi_taskq_destroy(instance->taskq);
1254 1253 instance->unroll.taskq = 0;
1255 1254 }
1256 1255
1257 1256 }
1258 1257
1259 1258 /*
1260 1259 * free dma memory allocated for
1261 1260 * cmds/frames/queues/driver version etc
1262 1261 */
1263 1262 if (instance->unroll.verBuff == 1) {
1264 1263 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1265 1264 instance->unroll.verBuff = 0;
1266 1265 }
1267 1266
1268 1267 if (instance->unroll.pdlist_buff == 1) {
1269 1268 if (instance->mr_tbolt_pd_list != NULL) {
1270 1269 kmem_free(instance->mr_tbolt_pd_list,
1271 1270 MRSAS_TBOLT_GET_PD_MAX(instance) *
1272 1271 sizeof (struct mrsas_tbolt_pd));
1273 1272 }
1274 1273
1275 1274 instance->mr_tbolt_pd_list = NULL;
1276 1275 instance->unroll.pdlist_buff = 0;
1277 1276 }
1278 1277
1279 1278 if (instance->unroll.ldlist_buff == 1) {
1280 1279 if (instance->mr_ld_list != NULL) {
1281 1280 kmem_free(instance->mr_ld_list, MRDRV_MAX_LD
1282 1281 * sizeof (struct mrsas_ld));
1283 1282 }
1284 1283
1285 1284 instance->mr_ld_list = NULL;
1286 1285 instance->unroll.ldlist_buff = 0;
1287 1286 }
1288 1287
1289 1288 if (instance->tbolt) {
1290 1289 if (instance->unroll.alloc_space_mpi2 == 1) {
1291 1290 free_space_for_mpi2(instance);
1292 1291 instance->unroll.alloc_space_mpi2 = 0;
1293 1292 }
1294 1293 } else {
1295 1294 if (instance->unroll.alloc_space_mfi == 1) {
1296 1295 free_space_for_mfi(instance);
1297 1296 instance->unroll.alloc_space_mfi = 0;
1298 1297 }
1299 1298 }
1300 1299
1301 1300 if (instance->unroll.regs == 1) {
1302 1301 ddi_regs_map_free(&instance->regmap_handle);
1303 1302 instance->unroll.regs = 0;
1304 1303 con_log(CL_ANN1, (CE_CONT, "ddi_regs_map_free() done."));
1305 1304 }
1306 1305 }
1307 1306
1308 1307
1309 1308
1310 1309 /*
1311 1310 * ************************************************************************** *
1312 1311 * *
1313 1312 * common entry points - for character driver types *
1314 1313 * *
1315 1314 * ************************************************************************** *
1316 1315 */
1317 1316 /*
1318 1317 * open - gets access to a device
1319 1318 * @dev:
1320 1319 * @openflags:
1321 1320 * @otyp:
1322 1321 * @credp:
1323 1322 *
1324 1323 * Access to a device by one or more application programs is controlled
1325 1324 * through the open() and close() entry points. The primary function of
1326 1325 * open() is to verify that the open request is allowed.
1327 1326 */
1328 1327 static int
1329 1328 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
1330 1329 {
1331 1330 int rval = 0;
1332 1331
1333 1332 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1334 1333
1335 1334 /* Check root permissions */
1336 1335 if (drv_priv(credp) != 0) {
1337 1336 con_log(CL_ANN, (CE_WARN,
1338 1337 "mr_sas: Non-root ioctl access denied!"));
1339 1338 return (EPERM);
1340 1339 }
1341 1340
1342 1341 /* Verify we are being opened as a character device */
1343 1342 if (otyp != OTYP_CHR) {
1344 1343 con_log(CL_ANN, (CE_WARN,
1345 1344 "mr_sas: ioctl node must be a char node"));
1346 1345 return (EINVAL);
1347 1346 }
1348 1347
1349 1348 if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev)))
1350 1349 == NULL) {
1351 1350 return (ENXIO);
1352 1351 }
1353 1352
1354 1353 if (scsi_hba_open) {
1355 1354 rval = scsi_hba_open(dev, openflags, otyp, credp);
1356 1355 }
1357 1356
1358 1357 return (rval);
1359 1358 }
1360 1359
1361 1360 /*
1362 1361 * close - gives up access to a device
1363 1362 * @dev:
1364 1363 * @openflags:
1365 1364 * @otyp:
1366 1365 * @credp:
1367 1366 *
1368 1367 * close() should perform any cleanup necessary to finish using the minor
1369 1368 * device, and prepare the device (and driver) to be opened again.
1370 1369 */
1371 1370 static int
1372 1371 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
1373 1372 {
1374 1373 int rval = 0;
1375 1374
1376 1375 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1377 1376
1378 1377 /* no need for locks! */
1379 1378
1380 1379 if (scsi_hba_close) {
1381 1380 rval = scsi_hba_close(dev, openflags, otyp, credp);
1382 1381 }
1383 1382
1384 1383 return (rval);
1385 1384 }
1386 1385
1387 1386 /*
1388 1387 * ioctl - performs a range of I/O commands for character drivers
1389 1388 * @dev:
1390 1389 * @cmd:
1391 1390 * @arg:
1392 1391 * @mode:
1393 1392 * @credp:
1394 1393 * @rvalp:
1395 1394 *
1396 1395 * ioctl() routine must make sure that user data is copied into or out of the
1397 1396 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
1398 1397 * and ddi_copyout(), as appropriate.
1399 1398 * This is a wrapper routine to serialize access to the actual ioctl routine.
1400 1399 * ioctl() should return 0 on success, or the appropriate error number. The
1401 1400 * driver may also set the value returned to the calling process through rvalp.
1402 1401 */
1403 1402
1404 1403 static int
1405 1404 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1406 1405 int *rvalp)
1407 1406 {
1408 1407 int rval = 0;
1409 1408
1410 1409 struct mrsas_instance *instance;
1411 1410 struct mrsas_ioctl *ioctl;
1412 1411 struct mrsas_aen aen;
1413 1412 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1414 1413
1415 1414 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev)));
1416 1415
1417 1416 if (instance == NULL) {
1418 1417 /* invalid minor number */
1419 1418 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found."));
1420 1419 return (ENXIO);
1421 1420 }
1422 1421
1423 1422 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl),
1424 1423 KM_SLEEP);
1425 1424 ASSERT(ioctl);
1426 1425
1427 1426 switch ((uint_t)cmd) {
1428 1427 case MRSAS_IOCTL_FIRMWARE:
1429 1428 if (ddi_copyin((void *)arg, ioctl,
1430 1429 sizeof (struct mrsas_ioctl), mode)) {
1431 1430 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: "
1432 1431 "ERROR IOCTL copyin"));
1433 1432 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1434 1433 return (EFAULT);
1435 1434 }
1436 1435
1437 1436 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) {
1438 1437 rval = handle_drv_ioctl(instance, ioctl, mode);
1439 1438 } else {
1440 1439 rval = handle_mfi_ioctl(instance, ioctl, mode);
1441 1440 }
1442 1441
1443 1442 if (ddi_copyout((void *)ioctl, (void *)arg,
1444 1443 (sizeof (struct mrsas_ioctl) - 1), mode)) {
1445 1444 con_log(CL_ANN, (CE_WARN,
1446 1445 "mrsas_ioctl: copy_to_user failed"));
1447 1446 rval = 1;
1448 1447 }
1449 1448
1450 1449 break;
1451 1450 case MRSAS_IOCTL_AEN:
1452 1451 if (ddi_copyin((void *) arg, &aen,
1453 1452 sizeof (struct mrsas_aen), mode)) {
1454 1453 con_log(CL_ANN, (CE_WARN,
1455 1454 "mrsas_ioctl: ERROR AEN copyin"));
1456 1455 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1457 1456 return (EFAULT);
1458 1457 }
1459 1458
1460 1459 rval = handle_mfi_aen(instance, &aen);
1461 1460
1462 1461 if (ddi_copyout((void *) &aen, (void *)arg,
1463 1462 sizeof (struct mrsas_aen), mode)) {
1464 1463 con_log(CL_ANN, (CE_WARN,
1465 1464 "mrsas_ioctl: copy_to_user failed"));
1466 1465 rval = 1;
1467 1466 }
1468 1467
1469 1468 break;
1470 1469 default:
1471 1470 rval = scsi_hba_ioctl(dev, cmd, arg,
1472 1471 mode, credp, rvalp);
1473 1472
1474 1473 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: "
1475 1474 "scsi_hba_ioctl called, ret = %x.", rval));
1476 1475 }
1477 1476
1478 1477 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1479 1478 return (rval);
1480 1479 }
1481 1480
1482 1481 /*
1483 1482 * ************************************************************************** *
1484 1483 * *
1485 1484 * common entry points - for block driver types *
1486 1485 * *
1487 1486 * ************************************************************************** *
1488 1487 */
1489 1488 #ifdef __sparc
1490 1489 /*
1491 1490 * reset - TBD
1492 1491 * @dip:
1493 1492 * @cmd:
1494 1493 *
1495 1494 * TBD
1496 1495 */
1497 1496 /*ARGSUSED*/
1498 1497 static int
1499 1498 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1500 1499 {
1501 1500 int instance_no;
1502 1501
1503 1502 struct mrsas_instance *instance;
1504 1503
1505 1504 instance_no = ddi_get_instance(dip);
1506 1505 instance = (struct mrsas_instance *)ddi_get_soft_state
1507 1506 (mrsas_state, instance_no);
1508 1507
1509 1508 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1510 1509
1511 1510 if (!instance) {
1512 1511 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter "
1513 1512 "in reset", instance_no));
1514 1513 return (DDI_FAILURE);
1515 1514 }
1516 1515
1517 1516 instance->func_ptr->disable_intr(instance);
1518 1517
1519 1518 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1520 1519 instance_no));
1521 1520
1522 1521 flush_cache(instance);
1523 1522
1524 1523 return (DDI_SUCCESS);
1525 1524 }
1526 1525 #else /* __sparc */
1527 1526 /*ARGSUSED*/
1528 1527 static int
1529 1528 mrsas_quiesce(dev_info_t *dip)
1530 1529 {
1531 1530 int instance_no;
1532 1531
1533 1532 struct mrsas_instance *instance;
1534 1533
1535 1534 instance_no = ddi_get_instance(dip);
1536 1535 instance = (struct mrsas_instance *)ddi_get_soft_state
1537 1536 (mrsas_state, instance_no);
1538 1537
1539 1538 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1540 1539
1541 1540 if (!instance) {
1542 1541 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d could not get adapter "
1543 1542 "in quiesce", instance_no));
1544 1543 return (DDI_FAILURE);
1545 1544 }
1546 1545 if (instance->deadadapter || instance->adapterresetinprogress) {
1547 1546 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d adapter is not in "
1548 1547 "healthy state", instance_no));
1549 1548 return (DDI_FAILURE);
1550 1549 }
1551 1550
1552 1551 if (abort_aen_cmd(instance, instance->aen_cmd)) {
1553 1552 con_log(CL_ANN1, (CE_WARN, "mrsas_quiesce: "
1554 1553 "failed to abort prevous AEN command QUIESCE"));
1555 1554 }
1556 1555
1557 1556 if (instance->tbolt) {
1558 1557 if (abort_syncmap_cmd(instance,
1559 1558 instance->map_update_cmd)) {
1560 1559 dev_err(dip, CE_WARN,
1561 1560 "mrsas_detach: failed to abort "
1562 1561 "previous syncmap command");
1563 1562 return (DDI_FAILURE);
1564 1563 }
1565 1564 }
1566 1565
1567 1566 instance->func_ptr->disable_intr(instance);
1568 1567
1569 1568 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1570 1569 instance_no));
1571 1570
1572 1571 flush_cache(instance);
1573 1572
1574 1573 if (wait_for_outstanding(instance)) {
1575 1574 con_log(CL_ANN1,
1576 1575 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
1577 1576 return (DDI_FAILURE);
1578 1577 }
1579 1578 return (DDI_SUCCESS);
1580 1579 }
1581 1580 #endif /* __sparc */
1582 1581
1583 1582 /*
1584 1583 * ************************************************************************** *
1585 1584 * *
1586 1585 * entry points (SCSI HBA) *
1587 1586 * *
1588 1587 * ************************************************************************** *
1589 1588 */
1590 1589 /*
1591 1590 * tran_tgt_init - initialize a target device instance
1592 1591 * @hba_dip:
1593 1592 * @tgt_dip:
1594 1593 * @tran:
1595 1594 * @sd:
1596 1595 *
1597 1596 * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1598 1597 * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1599 1598 * the device's address as valid and supportable for that particular HBA.
1600 1599 * By returning DDI_FAILURE, the instance of the target driver for that device
1601 1600 * is not probed or attached.
1602 1601 */
1603 1602 /*ARGSUSED*/
1604 1603 static int
1605 1604 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1606 1605 scsi_hba_tran_t *tran, struct scsi_device *sd)
1607 1606 {
1608 1607 struct mrsas_instance *instance;
1609 1608 uint16_t tgt = sd->sd_address.a_target;
1610 1609 uint8_t lun = sd->sd_address.a_lun;
1611 1610 dev_info_t *child = NULL;
1612 1611
1613 1612 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init target %d lun %d",
1614 1613 tgt, lun));
1615 1614
1616 1615 instance = ADDR2MR(&sd->sd_address);
1617 1616
1618 1617 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
1619 1618 /*
1620 1619 * If no persistent node exists, we don't allow .conf node
1621 1620 * to be created.
1622 1621 */
1623 1622 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
1624 1623 con_log(CL_DLEVEL2,
1625 1624 (CE_NOTE, "mrsas_tgt_init find child ="
1626 1625 " %p t = %d l = %d", (void *)child, tgt, lun));
1627 1626 if (ndi_merge_node(tgt_dip, mrsas_name_node) !=
1628 1627 DDI_SUCCESS)
1629 1628 /* Create this .conf node */
1630 1629 return (DDI_SUCCESS);
1631 1630 }
1632 1631 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init in ndi_per "
1633 1632 "DDI_FAILURE t = %d l = %d", tgt, lun));
1634 1633 return (DDI_FAILURE);
1635 1634
1636 1635 }
1637 1636
1638 1637 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p",
1639 1638 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip));
1640 1639
1641 1640 if (tgt < MRDRV_MAX_LD && lun == 0) {
1642 1641 if (instance->mr_ld_list[tgt].dip == NULL &&
1643 1642 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) {
1644 1643 mutex_enter(&instance->config_dev_mtx);
1645 1644 instance->mr_ld_list[tgt].dip = tgt_dip;
1646 1645 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN;
1647 1646 instance->mr_ld_list[tgt].flag = MRDRV_TGT_VALID;
1648 1647 mutex_exit(&instance->config_dev_mtx);
1649 1648 }
1650 1649 } else if (instance->tbolt || instance->skinny) {
1651 1650 if (instance->mr_tbolt_pd_list[tgt].dip == NULL) {
1652 1651 mutex_enter(&instance->config_dev_mtx);
1653 1652 instance->mr_tbolt_pd_list[tgt].dip = tgt_dip;
1654 1653 instance->mr_tbolt_pd_list[tgt].flag =
1655 1654 MRDRV_TGT_VALID;
1656 1655 mutex_exit(&instance->config_dev_mtx);
1657 1656 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_tgt_init:"
1658 1657 "t%xl%x", tgt, lun));
1659 1658 }
1660 1659 }
1661 1660
1662 1661 return (DDI_SUCCESS);
1663 1662 }
1664 1663
1665 1664 /*ARGSUSED*/
1666 1665 static void
1667 1666 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1668 1667 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1669 1668 {
1670 1669 struct mrsas_instance *instance;
1671 1670 int tgt = sd->sd_address.a_target;
1672 1671 int lun = sd->sd_address.a_lun;
1673 1672
1674 1673 instance = ADDR2MR(&sd->sd_address);
1675 1674
1676 1675 con_log(CL_DLEVEL2, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun));
1677 1676
1678 1677 if (tgt < MRDRV_MAX_LD && lun == 0) {
1679 1678 if (instance->mr_ld_list[tgt].dip == tgt_dip) {
1680 1679 mutex_enter(&instance->config_dev_mtx);
1681 1680 instance->mr_ld_list[tgt].dip = NULL;
1682 1681 mutex_exit(&instance->config_dev_mtx);
1683 1682 }
1684 1683 } else if (instance->tbolt || instance->skinny) {
1685 1684 mutex_enter(&instance->config_dev_mtx);
1686 1685 instance->mr_tbolt_pd_list[tgt].dip = NULL;
1687 1686 mutex_exit(&instance->config_dev_mtx);
1688 1687 con_log(CL_ANN1, (CE_NOTE, "tgt_free: Setting dip = NULL"
1689 1688 "for tgt:%x", tgt));
1690 1689 }
1691 1690 }
1692 1691
1693 1692 dev_info_t *
1694 1693 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun)
1695 1694 {
1696 1695 dev_info_t *child = NULL;
1697 1696 char addr[SCSI_MAXNAMELEN];
1698 1697 char tmp[MAXNAMELEN];
1699 1698
1700 1699 (void) snprintf(addr, sizeof (addr), "%x,%x", tgt, lun);
1701 1700 for (child = ddi_get_child(instance->dip); child;
1702 1701 child = ddi_get_next_sibling(child)) {
1703 1702
1704 1703 if (ndi_dev_is_persistent_node(child) == 0) {
1705 1704 continue;
1706 1705 }
1707 1706
1708 1707 if (mrsas_name_node(child, tmp, MAXNAMELEN) !=
1709 1708 DDI_SUCCESS) {
1710 1709 continue;
1711 1710 }
1712 1711
1713 1712 if (strcmp(addr, tmp) == 0) {
1714 1713 break;
1715 1714 }
1716 1715 }
1717 1716 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_find_child: return child = %p",
1718 1717 (void *)child));
1719 1718 return (child);
1720 1719 }
1721 1720
1722 1721 /*
1723 1722 * mrsas_name_node -
1724 1723 * @dip:
1725 1724 * @name:
1726 1725 * @len:
1727 1726 */
1728 1727 static int
1729 1728 mrsas_name_node(dev_info_t *dip, char *name, int len)
1730 1729 {
1731 1730 int tgt, lun;
1732 1731
1733 1732 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1734 1733 DDI_PROP_DONTPASS, "target", -1);
1735 1734 con_log(CL_DLEVEL2, (CE_NOTE,
1736 1735 "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt));
1737 1736 if (tgt == -1) {
1738 1737 return (DDI_FAILURE);
1739 1738 }
1740 1739 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1741 1740 "lun", -1);
1742 1741 con_log(CL_DLEVEL2,
1743 1742 (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun));
1744 1743 if (lun == -1) {
1745 1744 return (DDI_FAILURE);
1746 1745 }
1747 1746 (void) snprintf(name, len, "%x,%x", tgt, lun);
1748 1747 return (DDI_SUCCESS);
1749 1748 }
1750 1749
1751 1750 /*
1752 1751 * tran_init_pkt - allocate & initialize a scsi_pkt structure
1753 1752 * @ap:
1754 1753 * @pkt:
1755 1754 * @bp:
1756 1755 * @cmdlen:
1757 1756 * @statuslen:
1758 1757 * @tgtlen:
1759 1758 * @flags:
1760 1759 * @callback:
1761 1760 *
1762 1761 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1763 1762 * structure and DMA resources for a target driver request. The
1764 1763 * tran_init_pkt() entry point is called when the target driver calls the
1765 1764 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1766 1765 * is a request to perform one or more of three possible services:
1767 1766 * - allocation and initialization of a scsi_pkt structure
1768 1767 * - allocation of DMA resources for data transfer
1769 1768 * - reallocation of DMA resources for the next portion of the data transfer
1770 1769 */
1771 1770 static struct scsi_pkt *
1772 1771 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
1773 1772 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1774 1773 int flags, int (*callback)(), caddr_t arg)
1775 1774 {
1776 1775 struct scsa_cmd *acmd;
1777 1776 struct mrsas_instance *instance;
1778 1777 struct scsi_pkt *new_pkt;
1779 1778
1780 1779 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1781 1780
1782 1781 instance = ADDR2MR(ap);
1783 1782
1784 1783 /* step #1 : pkt allocation */
1785 1784 if (pkt == NULL) {
1786 1785 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1787 1786 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1788 1787 if (pkt == NULL) {
1789 1788 return (NULL);
1790 1789 }
1791 1790
1792 1791 acmd = PKT2CMD(pkt);
1793 1792
1794 1793 /*
1795 1794 * Initialize the new pkt - we redundantly initialize
1796 1795 * all the fields for illustrative purposes.
1797 1796 */
1798 1797 acmd->cmd_pkt = pkt;
1799 1798 acmd->cmd_flags = 0;
1800 1799 acmd->cmd_scblen = statuslen;
1801 1800 acmd->cmd_cdblen = cmdlen;
1802 1801 acmd->cmd_dmahandle = NULL;
1803 1802 acmd->cmd_ncookies = 0;
1804 1803 acmd->cmd_cookie = 0;
1805 1804 acmd->cmd_cookiecnt = 0;
1806 1805 acmd->cmd_nwin = 0;
1807 1806
1808 1807 pkt->pkt_address = *ap;
1809 1808 pkt->pkt_comp = (void (*)())NULL;
1810 1809 pkt->pkt_flags = 0;
1811 1810 pkt->pkt_time = 0;
1812 1811 pkt->pkt_resid = 0;
1813 1812 pkt->pkt_state = 0;
1814 1813 pkt->pkt_statistics = 0;
1815 1814 pkt->pkt_reason = 0;
1816 1815 new_pkt = pkt;
1817 1816 } else {
1818 1817 acmd = PKT2CMD(pkt);
1819 1818 new_pkt = NULL;
1820 1819 }
1821 1820
1822 1821 /* step #2 : dma allocation/move */
1823 1822 if (bp && bp->b_bcount != 0) {
1824 1823 if (acmd->cmd_dmahandle == NULL) {
1825 1824 if (mrsas_dma_alloc(instance, pkt, bp, flags,
1826 1825 callback) == DDI_FAILURE) {
1827 1826 if (new_pkt) {
1828 1827 scsi_hba_pkt_free(ap, new_pkt);
1829 1828 }
1830 1829 return ((struct scsi_pkt *)NULL);
1831 1830 }
1832 1831 } else {
1833 1832 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1834 1833 return ((struct scsi_pkt *)NULL);
1835 1834 }
1836 1835 }
1837 1836 }
1838 1837
1839 1838 return (pkt);
1840 1839 }
1841 1840
1842 1841 /*
1843 1842 * tran_start - transport a SCSI command to the addressed target
1844 1843 * @ap:
1845 1844 * @pkt:
1846 1845 *
1847 1846 * The tran_start() entry point for a SCSI HBA driver is called to transport a
1848 1847 * SCSI command to the addressed target. The SCSI command is described
1849 1848 * entirely within the scsi_pkt structure, which the target driver allocated
1850 1849 * through the HBA driver's tran_init_pkt() entry point. If the command
1851 1850 * involves a data transfer, DMA resources must also have been allocated for
1852 1851 * the scsi_pkt structure.
1853 1852 *
1854 1853 * Return Values :
1855 1854 * TRAN_BUSY - request queue is full, no more free scbs
1856 1855 * TRAN_ACCEPT - pkt has been submitted to the instance
1857 1856 */
1858 1857 static int
1859 1858 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
1860 1859 {
1861 1860 uchar_t cmd_done = 0;
1862 1861
1863 1862 struct mrsas_instance *instance = ADDR2MR(ap);
1864 1863 struct mrsas_cmd *cmd;
1865 1864
1866 1865 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1867 1866 if (instance->deadadapter == 1) {
1868 1867 con_log(CL_ANN1, (CE_WARN,
1869 1868 "mrsas_tran_start: return TRAN_FATAL_ERROR "
1870 1869 "for IO, as the HBA doesnt take any more IOs"));
1871 1870 if (pkt) {
1872 1871 pkt->pkt_reason = CMD_DEV_GONE;
1873 1872 pkt->pkt_statistics = STAT_DISCON;
1874 1873 }
1875 1874 return (TRAN_FATAL_ERROR);
1876 1875 }
1877 1876
1878 1877 if (instance->adapterresetinprogress) {
1879 1878 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_start: Reset flag set, "
1880 1879 "returning mfi_pkt and setting TRAN_BUSY\n"));
1881 1880 return (TRAN_BUSY);
1882 1881 }
1883 1882
1884 1883 con_log(CL_ANN1, (CE_CONT, "chkpnt:%s:%d:SCSI CDB[0]=0x%x time:%x",
1885 1884 __func__, __LINE__, pkt->pkt_cdbp[0], pkt->pkt_time));
1886 1885
1887 1886 pkt->pkt_reason = CMD_CMPLT;
1888 1887 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1889 1888
1890 1889 cmd = build_cmd(instance, ap, pkt, &cmd_done);
1891 1890
1892 1891 /*
1893 1892 * Check if the command is already completed by the mrsas_build_cmd()
1894 1893 * routine. In which case the busy_flag would be clear and scb will be
1895 1894 * NULL and appropriate reason provided in pkt_reason field
1896 1895 */
1897 1896 if (cmd_done) {
1898 1897 pkt->pkt_reason = CMD_CMPLT;
1899 1898 pkt->pkt_scbp[0] = STATUS_GOOD;
1900 1899 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1901 1900 | STATE_SENT_CMD;
1902 1901 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1903 1902 (*pkt->pkt_comp)(pkt);
1904 1903 }
1905 1904
1906 1905 return (TRAN_ACCEPT);
1907 1906 }
1908 1907
1909 1908 if (cmd == NULL) {
1910 1909 return (TRAN_BUSY);
1911 1910 }
1912 1911
1913 1912 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1914 1913 if (instance->fw_outstanding > instance->max_fw_cmds) {
1915 1914 con_log(CL_ANN, (CE_CONT, "mr_sas:Firmware busy"));
1916 1915 DTRACE_PROBE2(start_tran_err,
1917 1916 uint16_t, instance->fw_outstanding,
1918 1917 uint16_t, instance->max_fw_cmds);
1919 1918 mrsas_return_mfi_pkt(instance, cmd);
1920 1919 return (TRAN_BUSY);
1921 1920 }
1922 1921
1923 1922 /* Synchronize the Cmd frame for the controller */
1924 1923 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1925 1924 DDI_DMA_SYNC_FORDEV);
1926 1925 con_log(CL_ANN, (CE_CONT, "issue_cmd_ppc: SCSI CDB[0]=0x%x"
1927 1926 "cmd->index:%x\n", pkt->pkt_cdbp[0], cmd->index));
1928 1927 instance->func_ptr->issue_cmd(cmd, instance);
1929 1928
1930 1929 } else {
1931 1930 struct mrsas_header *hdr = &cmd->frame->hdr;
1932 1931
1933 1932 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1934 1933
1935 1934 pkt->pkt_reason = CMD_CMPLT;
1936 1935 pkt->pkt_statistics = 0;
1937 1936 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1938 1937
1939 1938 switch (ddi_get8(cmd->frame_dma_obj.acc_handle,
1940 1939 &hdr->cmd_status)) {
1941 1940 case MFI_STAT_OK:
1942 1941 pkt->pkt_scbp[0] = STATUS_GOOD;
1943 1942 break;
1944 1943
1945 1944 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1946 1945 con_log(CL_ANN, (CE_CONT,
1947 1946 "mrsas_tran_start: scsi done with error"));
1948 1947 pkt->pkt_reason = CMD_CMPLT;
1949 1948 pkt->pkt_statistics = 0;
1950 1949
1951 1950 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
1952 1951 break;
1953 1952
1954 1953 case MFI_STAT_DEVICE_NOT_FOUND:
1955 1954 con_log(CL_ANN, (CE_CONT,
1956 1955 "mrsas_tran_start: device not found error"));
1957 1956 pkt->pkt_reason = CMD_DEV_GONE;
1958 1957 pkt->pkt_statistics = STAT_DISCON;
1959 1958 break;
1960 1959
1961 1960 default:
1962 1961 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
1963 1962 }
1964 1963
1965 1964 (void) mrsas_common_check(instance, cmd);
1966 1965 DTRACE_PROBE2(start_nointr_done, uint8_t, hdr->cmd,
1967 1966 uint8_t, hdr->cmd_status);
1968 1967 mrsas_return_mfi_pkt(instance, cmd);
1969 1968
1970 1969 if (pkt->pkt_comp) {
1971 1970 (*pkt->pkt_comp)(pkt);
1972 1971 }
1973 1972
1974 1973 }
1975 1974
1976 1975 return (TRAN_ACCEPT);
1977 1976 }
1978 1977
1979 1978 /*
1980 1979 * tran_abort - Abort any commands that are currently in transport
1981 1980 * @ap:
1982 1981 * @pkt:
1983 1982 *
1984 1983 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
1985 1984 * commands that are currently in transport for a particular target. This entry
1986 1985 * point is called when a target driver calls scsi_abort(). The tran_abort()
1987 1986 * entry point should attempt to abort the command denoted by the pkt
1988 1987 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
1989 1988 * abort all outstanding commands in the transport layer for the particular
1990 1989 * target or logical unit.
1991 1990 */
1992 1991 /*ARGSUSED*/
1993 1992 static int
1994 1993 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1995 1994 {
1996 1995 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1997 1996
1998 1997 /* abort command not supported by H/W */
1999 1998
2000 1999 return (DDI_FAILURE);
2001 2000 }
2002 2001
2003 2002 /*
2004 2003 * tran_reset - reset either the SCSI bus or target
2005 2004 * @ap:
2006 2005 * @level:
2007 2006 *
2008 2007 * The tran_reset() entry point for a SCSI HBA driver is called to reset either
2009 2008 * the SCSI bus or a particular SCSI target device. This entry point is called
2010 2009 * when a target driver calls scsi_reset(). The tran_reset() entry point must
2011 2010 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
2012 2011 * particular target or logical unit must be reset.
2013 2012 */
2014 2013 /*ARGSUSED*/
2015 2014 static int
2016 2015 mrsas_tran_reset(struct scsi_address *ap, int level)
2017 2016 {
2018 2017 struct mrsas_instance *instance = ADDR2MR(ap);
2019 2018
2020 2019 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2021 2020
2022 2021 if (wait_for_outstanding(instance)) {
2023 2022 con_log(CL_ANN1,
2024 2023 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
2025 2024 return (DDI_FAILURE);
2026 2025 } else {
2027 2026 return (DDI_SUCCESS);
2028 2027 }
2029 2028 }
2030 2029
2031 2030 /*
2032 2031 * tran_getcap - get one of a set of SCSA-defined capabilities
2033 2032 * @ap:
2034 2033 * @cap:
2035 2034 * @whom:
2036 2035 *
2037 2036 * The target driver can request the current setting of the capability for a
2038 2037 * particular target by setting the whom parameter to nonzero. A whom value of
2039 2038 * zero indicates a request for the current setting of the general capability
2040 2039 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
2041 2040 * for undefined capabilities or the current value of the requested capability.
2042 2041 */
2043 2042 /*ARGSUSED*/
2044 2043 static int
2045 2044 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
2046 2045 {
2047 2046 int rval = 0;
2048 2047
2049 2048 struct mrsas_instance *instance = ADDR2MR(ap);
2050 2049
2051 2050 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2052 2051
2053 2052 /* we do allow inquiring about capabilities for other targets */
2054 2053 if (cap == NULL) {
2055 2054 return (-1);
2056 2055 }
2057 2056
2058 2057 switch (scsi_hba_lookup_capstr(cap)) {
2059 2058 case SCSI_CAP_DMA_MAX:
2060 2059 if (instance->tbolt) {
2061 2060 /* Limit to 256k max transfer */
2062 2061 rval = mrsas_tbolt_max_cap_maxxfer;
2063 2062 } else {
2064 2063 /* Limit to 16MB max transfer */
2065 2064 rval = mrsas_max_cap_maxxfer;
2066 2065 }
2067 2066 break;
2068 2067 case SCSI_CAP_MSG_OUT:
2069 2068 rval = 1;
2070 2069 break;
2071 2070 case SCSI_CAP_DISCONNECT:
2072 2071 rval = 0;
2073 2072 break;
2074 2073 case SCSI_CAP_SYNCHRONOUS:
2075 2074 rval = 0;
2076 2075 break;
2077 2076 case SCSI_CAP_WIDE_XFER:
2078 2077 rval = 1;
2079 2078 break;
2080 2079 case SCSI_CAP_TAGGED_QING:
2081 2080 rval = 1;
2082 2081 break;
2083 2082 case SCSI_CAP_UNTAGGED_QING:
2084 2083 rval = 1;
2085 2084 break;
2086 2085 case SCSI_CAP_PARITY:
2087 2086 rval = 1;
2088 2087 break;
2089 2088 case SCSI_CAP_INITIATOR_ID:
2090 2089 rval = instance->init_id;
2091 2090 break;
2092 2091 case SCSI_CAP_ARQ:
2093 2092 rval = 1;
2094 2093 break;
2095 2094 case SCSI_CAP_LINKED_CMDS:
2096 2095 rval = 0;
2097 2096 break;
2098 2097 case SCSI_CAP_RESET_NOTIFICATION:
2099 2098 rval = 1;
2100 2099 break;
2101 2100 case SCSI_CAP_GEOMETRY:
2102 2101 rval = -1;
2103 2102
2104 2103 break;
2105 2104 default:
2106 2105 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
2107 2106 scsi_hba_lookup_capstr(cap)));
2108 2107 rval = -1;
2109 2108 break;
2110 2109 }
2111 2110
2112 2111 return (rval);
2113 2112 }
2114 2113
2115 2114 /*
2116 2115 * tran_setcap - set one of a set of SCSA-defined capabilities
2117 2116 * @ap:
2118 2117 * @cap:
2119 2118 * @value:
2120 2119 * @whom:
2121 2120 *
2122 2121 * The target driver might request that the new value be set for a particular
2123 2122 * target by setting the whom parameter to nonzero. A whom value of zero
2124 2123 * means that request is to set the new value for the SCSI bus or for adapter
2125 2124 * hardware in general.
2126 2125 * The tran_setcap() should return the following values as appropriate:
2127 2126 * - -1 for undefined capabilities
2128 2127 * - 0 if the HBA driver cannot set the capability to the requested value
2129 2128 * - 1 if the HBA driver is able to set the capability to the requested value
2130 2129 */
2131 2130 /*ARGSUSED*/
2132 2131 static int
2133 2132 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
2134 2133 {
2135 2134 int rval = 1;
2136 2135
2137 2136 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2138 2137
2139 2138 /* We don't allow setting capabilities for other targets */
2140 2139 if (cap == NULL || whom == 0) {
2141 2140 return (-1);
2142 2141 }
2143 2142
2144 2143 switch (scsi_hba_lookup_capstr(cap)) {
2145 2144 case SCSI_CAP_DMA_MAX:
2146 2145 case SCSI_CAP_MSG_OUT:
2147 2146 case SCSI_CAP_PARITY:
2148 2147 case SCSI_CAP_LINKED_CMDS:
2149 2148 case SCSI_CAP_RESET_NOTIFICATION:
2150 2149 case SCSI_CAP_DISCONNECT:
2151 2150 case SCSI_CAP_SYNCHRONOUS:
2152 2151 case SCSI_CAP_UNTAGGED_QING:
2153 2152 case SCSI_CAP_WIDE_XFER:
2154 2153 case SCSI_CAP_INITIATOR_ID:
2155 2154 case SCSI_CAP_ARQ:
2156 2155 /*
2157 2156 * None of these are settable via
2158 2157 * the capability interface.
2159 2158 */
2160 2159 break;
2161 2160 case SCSI_CAP_TAGGED_QING:
2162 2161 rval = 1;
2163 2162 break;
2164 2163 case SCSI_CAP_SECTOR_SIZE:
2165 2164 rval = 1;
2166 2165 break;
2167 2166
2168 2167 case SCSI_CAP_TOTAL_SECTORS:
2169 2168 rval = 1;
2170 2169 break;
2171 2170 default:
2172 2171 rval = -1;
2173 2172 break;
2174 2173 }
2175 2174
2176 2175 return (rval);
2177 2176 }
2178 2177
2179 2178 /*
2180 2179 * tran_destroy_pkt - deallocate scsi_pkt structure
2181 2180 * @ap:
2182 2181 * @pkt:
2183 2182 *
2184 2183 * The tran_destroy_pkt() entry point is the HBA driver function that
2185 2184 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
2186 2185 * called when the target driver calls scsi_destroy_pkt(). The
2187 2186 * tran_destroy_pkt() entry point must free any DMA resources that have been
2188 2187 * allocated for the packet. An implicit DMA synchronization occurs if the
2189 2188 * DMA resources are freed and any cached data remains after the completion
2190 2189 * of the transfer.
2191 2190 */
2192 2191 static void
2193 2192 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2194 2193 {
2195 2194 struct scsa_cmd *acmd = PKT2CMD(pkt);
2196 2195
2197 2196 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2198 2197
2199 2198 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2200 2199 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2201 2200
2202 2201 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2203 2202
2204 2203 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2205 2204
2206 2205 acmd->cmd_dmahandle = NULL;
2207 2206 }
2208 2207
2209 2208 /* free the pkt */
2210 2209 scsi_hba_pkt_free(ap, pkt);
2211 2210 }
2212 2211
2213 2212 /*
2214 2213 * tran_dmafree - deallocates DMA resources
2215 2214 * @ap:
2216 2215 * @pkt:
2217 2216 *
2218 2217 * The tran_dmafree() entry point deallocates DMAQ resources that have been
2219 2218 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
2220 2219 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
2221 2220 * free only DMA resources allocated for a scsi_pkt structure, not the
2222 2221 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
2223 2222 * implicitly performed.
2224 2223 */
2225 2224 /*ARGSUSED*/
2226 2225 static void
2227 2226 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2228 2227 {
2229 2228 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2230 2229
2231 2230 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2232 2231
2233 2232 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2234 2233 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2235 2234
2236 2235 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2237 2236
2238 2237 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2239 2238
2240 2239 acmd->cmd_dmahandle = NULL;
2241 2240 }
2242 2241 }
2243 2242
2244 2243 /*
2245 2244 * tran_sync_pkt - synchronize the DMA object allocated
2246 2245 * @ap:
2247 2246 * @pkt:
2248 2247 *
2249 2248 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
2250 2249 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
2251 2250 * entry point is called when the target driver calls scsi_sync_pkt(). If the
2252 2251 * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
2253 2252 * must synchronize the CPU's view of the data. If the data transfer direction
2254 2253 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
2255 2254 * device's view of the data.
2256 2255 */
2257 2256 /*ARGSUSED*/
2258 2257 static void
2259 2258 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2260 2259 {
2261 2260 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2262 2261
2263 2262 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2264 2263
2265 2264 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2266 2265 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
2267 2266 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
2268 2267 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
2269 2268 }
2270 2269 }
2271 2270
2272 2271 /*ARGSUSED*/
2273 2272 static int
2274 2273 mrsas_tran_quiesce(dev_info_t *dip)
2275 2274 {
2276 2275 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2277 2276
2278 2277 return (1);
2279 2278 }
2280 2279
2281 2280 /*ARGSUSED*/
2282 2281 static int
2283 2282 mrsas_tran_unquiesce(dev_info_t *dip)
2284 2283 {
2285 2284 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2286 2285
2287 2286 return (1);
2288 2287 }
2289 2288
2290 2289
2291 2290 /*
2292 2291 * mrsas_isr(caddr_t)
2293 2292 *
2294 2293 * The Interrupt Service Routine
2295 2294 *
2296 2295 * Collect status for all completed commands and do callback
2297 2296 *
2298 2297 */
2299 2298 static uint_t
2300 2299 mrsas_isr(struct mrsas_instance *instance)
2301 2300 {
2302 2301 int need_softintr;
2303 2302 uint32_t producer;
2304 2303 uint32_t consumer;
2305 2304 uint32_t context;
2306 2305 int retval;
2307 2306
2308 2307 struct mrsas_cmd *cmd;
2309 2308 struct mrsas_header *hdr;
2310 2309 struct scsi_pkt *pkt;
2311 2310
2312 2311 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2313 2312 ASSERT(instance);
2314 2313 if (instance->tbolt) {
2315 2314 mutex_enter(&instance->chip_mtx);
2316 2315 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2317 2316 !(instance->func_ptr->intr_ack(instance))) {
2318 2317 mutex_exit(&instance->chip_mtx);
2319 2318 return (DDI_INTR_UNCLAIMED);
2320 2319 }
2321 2320 retval = mr_sas_tbolt_process_outstanding_cmd(instance);
2322 2321 mutex_exit(&instance->chip_mtx);
2323 2322 return (retval);
2324 2323 } else {
2325 2324 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2326 2325 !instance->func_ptr->intr_ack(instance)) {
2327 2326 return (DDI_INTR_UNCLAIMED);
2328 2327 }
2329 2328 }
2330 2329
2331 2330 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2332 2331 0, 0, DDI_DMA_SYNC_FORCPU);
2333 2332
2334 2333 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2335 2334 != DDI_SUCCESS) {
2336 2335 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2337 2336 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2338 2337 con_log(CL_ANN1, (CE_WARN,
2339 2338 "mr_sas_isr(): FMA check, returning DDI_INTR_UNCLAIMED"));
2340 2339 return (DDI_INTR_CLAIMED);
2341 2340 }
2342 2341 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2343 2342
2344 2343 #ifdef OCRDEBUG
2345 2344 if (debug_consecutive_timeout_after_ocr_g == 1) {
2346 2345 con_log(CL_ANN1, (CE_NOTE,
2347 2346 "simulating consecutive timeout after ocr"));
2348 2347 return (DDI_INTR_CLAIMED);
2349 2348 }
2350 2349 #endif
2351 2350
2352 2351 mutex_enter(&instance->completed_pool_mtx);
2353 2352 mutex_enter(&instance->cmd_pend_mtx);
2354 2353
2355 2354 producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2356 2355 instance->producer);
2357 2356 consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2358 2357 instance->consumer);
2359 2358
2360 2359 con_log(CL_ANN, (CE_CONT, " producer %x consumer %x ",
2361 2360 producer, consumer));
2362 2361 if (producer == consumer) {
2363 2362 con_log(CL_ANN, (CE_WARN, "producer == consumer case"));
2364 2363 DTRACE_PROBE2(isr_pc_err, uint32_t, producer,
2365 2364 uint32_t, consumer);
2366 2365 mutex_exit(&instance->cmd_pend_mtx);
2367 2366 mutex_exit(&instance->completed_pool_mtx);
2368 2367 return (DDI_INTR_CLAIMED);
2369 2368 }
2370 2369
2371 2370 while (consumer != producer) {
2372 2371 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2373 2372 &instance->reply_queue[consumer]);
2374 2373 cmd = instance->cmd_list[context];
2375 2374
2376 2375 if (cmd->sync_cmd == MRSAS_TRUE) {
2377 2376 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2378 2377 if (hdr) {
2379 2378 mlist_del_init(&cmd->list);
2380 2379 }
2381 2380 } else {
2382 2381 pkt = cmd->pkt;
2383 2382 if (pkt) {
2384 2383 mlist_del_init(&cmd->list);
2385 2384 }
2386 2385 }
2387 2386
2388 2387 mlist_add_tail(&cmd->list, &instance->completed_pool_list);
2389 2388
2390 2389 consumer++;
2391 2390 if (consumer == (instance->max_fw_cmds + 1)) {
2392 2391 consumer = 0;
2393 2392 }
2394 2393 }
2395 2394 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
2396 2395 instance->consumer, consumer);
2397 2396 mutex_exit(&instance->cmd_pend_mtx);
2398 2397 mutex_exit(&instance->completed_pool_mtx);
2399 2398
2400 2399 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2401 2400 0, 0, DDI_DMA_SYNC_FORDEV);
2402 2401
2403 2402 if (instance->softint_running) {
2404 2403 need_softintr = 0;
2405 2404 } else {
2406 2405 need_softintr = 1;
2407 2406 }
2408 2407
2409 2408 if (instance->isr_level == HIGH_LEVEL_INTR) {
2410 2409 if (need_softintr) {
2411 2410 ddi_trigger_softintr(instance->soft_intr_id);
2412 2411 }
2413 2412 } else {
2414 2413 /*
2415 2414 * Not a high-level interrupt, therefore call the soft level
2416 2415 * interrupt explicitly
2417 2416 */
2418 2417 (void) mrsas_softintr(instance);
2419 2418 }
2420 2419
2421 2420 return (DDI_INTR_CLAIMED);
2422 2421 }
2423 2422
2424 2423
2425 2424 /*
2426 2425 * ************************************************************************** *
2427 2426 * *
2428 2427 * libraries *
2429 2428 * *
2430 2429 * ************************************************************************** *
2431 2430 */
2432 2431 /*
2433 2432 * get_mfi_pkt : Get a command from the free pool
2434 2433 * After successful allocation, the caller of this routine
2435 2434 * must clear the frame buffer (memset to zero) before
2436 2435 * using the packet further.
2437 2436 *
2438 2437 * ***** Note *****
2439 2438 * After clearing the frame buffer the context id of the
2440 2439 * frame buffer SHOULD be restored back.
2441 2440 */
2442 2441 struct mrsas_cmd *
2443 2442 mrsas_get_mfi_pkt(struct mrsas_instance *instance)
2444 2443 {
2445 2444 mlist_t *head = &instance->cmd_pool_list;
2446 2445 struct mrsas_cmd *cmd = NULL;
2447 2446
2448 2447 mutex_enter(&instance->cmd_pool_mtx);
2449 2448
2450 2449 if (!mlist_empty(head)) {
2451 2450 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2452 2451 mlist_del_init(head->next);
2453 2452 }
2454 2453 if (cmd != NULL) {
2455 2454 cmd->pkt = NULL;
2456 2455 cmd->retry_count_for_ocr = 0;
2457 2456 cmd->drv_pkt_time = 0;
2458 2457
2459 2458 }
2460 2459 mutex_exit(&instance->cmd_pool_mtx);
2461 2460
2462 2461 return (cmd);
2463 2462 }
2464 2463
2465 2464 static struct mrsas_cmd *
2466 2465 get_mfi_app_pkt(struct mrsas_instance *instance)
2467 2466 {
2468 2467 mlist_t *head = &instance->app_cmd_pool_list;
2469 2468 struct mrsas_cmd *cmd = NULL;
2470 2469
2471 2470 mutex_enter(&instance->app_cmd_pool_mtx);
2472 2471
2473 2472 if (!mlist_empty(head)) {
2474 2473 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2475 2474 mlist_del_init(head->next);
2476 2475 }
2477 2476 if (cmd != NULL) {
2478 2477 cmd->pkt = NULL;
2479 2478 cmd->retry_count_for_ocr = 0;
2480 2479 cmd->drv_pkt_time = 0;
2481 2480 }
2482 2481
2483 2482 mutex_exit(&instance->app_cmd_pool_mtx);
2484 2483
2485 2484 return (cmd);
2486 2485 }
2487 2486 /*
2488 2487 * return_mfi_pkt : Return a cmd to free command pool
2489 2488 */
2490 2489 void
2491 2490 mrsas_return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2492 2491 {
2493 2492 mutex_enter(&instance->cmd_pool_mtx);
2494 2493 /* use mlist_add_tail for debug assistance */
2495 2494 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2496 2495
2497 2496 mutex_exit(&instance->cmd_pool_mtx);
2498 2497 }
2499 2498
2500 2499 static void
2501 2500 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2502 2501 {
2503 2502 mutex_enter(&instance->app_cmd_pool_mtx);
2504 2503
2505 2504 mlist_add(&cmd->list, &instance->app_cmd_pool_list);
2506 2505
2507 2506 mutex_exit(&instance->app_cmd_pool_mtx);
2508 2507 }
2509 2508 void
2510 2509 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2511 2510 {
2512 2511 struct scsi_pkt *pkt;
2513 2512 struct mrsas_header *hdr;
2514 2513 con_log(CL_DLEVEL2, (CE_NOTE, "push_pending_pkt(): Called\n"));
2515 2514 mutex_enter(&instance->cmd_pend_mtx);
2516 2515 mlist_del_init(&cmd->list);
2517 2516 mlist_add_tail(&cmd->list, &instance->cmd_pend_list);
2518 2517 if (cmd->sync_cmd == MRSAS_TRUE) {
2519 2518 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2520 2519 if (hdr) {
2521 2520 con_log(CL_ANN1, (CE_CONT,
2522 2521 "push_pending_mfi_pkt: "
2523 2522 "cmd %p index %x "
2524 2523 "time %llx",
2525 2524 (void *)cmd, cmd->index,
2526 2525 gethrtime()));
2527 2526 /* Wait for specified interval */
2528 2527 cmd->drv_pkt_time = ddi_get16(
2529 2528 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2530 2529 if (cmd->drv_pkt_time < debug_timeout_g)
2531 2530 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2532 2531 con_log(CL_ANN1, (CE_CONT,
2533 2532 "push_pending_pkt(): "
2534 2533 "Called IO Timeout Value %x\n",
2535 2534 cmd->drv_pkt_time));
2536 2535 }
2537 2536 if (hdr && instance->timeout_id == (timeout_id_t)-1) {
2538 2537 instance->timeout_id = timeout(io_timeout_checker,
2539 2538 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2540 2539 }
2541 2540 } else {
2542 2541 pkt = cmd->pkt;
2543 2542 if (pkt) {
2544 2543 con_log(CL_ANN1, (CE_CONT,
2545 2544 "push_pending_mfi_pkt: "
2546 2545 "cmd %p index %x pkt %p, "
2547 2546 "time %llx",
2548 2547 (void *)cmd, cmd->index, (void *)pkt,
2549 2548 gethrtime()));
2550 2549 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2551 2550 }
2552 2551 if (pkt && instance->timeout_id == (timeout_id_t)-1) {
2553 2552 instance->timeout_id = timeout(io_timeout_checker,
2554 2553 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2555 2554 }
2556 2555 }
2557 2556
2558 2557 mutex_exit(&instance->cmd_pend_mtx);
2559 2558
2560 2559 }
2561 2560
2562 2561 int
2563 2562 mrsas_print_pending_cmds(struct mrsas_instance *instance)
2564 2563 {
2565 2564 mlist_t *head = &instance->cmd_pend_list;
2566 2565 mlist_t *tmp = head;
2567 2566 struct mrsas_cmd *cmd = NULL;
2568 2567 struct mrsas_header *hdr;
2569 2568 unsigned int flag = 1;
2570 2569 struct scsi_pkt *pkt;
2571 2570 int saved_level;
2572 2571 int cmd_count = 0;
2573 2572
2574 2573 saved_level = debug_level_g;
2575 2574 debug_level_g = CL_ANN1;
2576 2575
2577 2576 dev_err(instance->dip, CE_NOTE,
2578 2577 "mrsas_print_pending_cmds(): Called");
2579 2578
2580 2579 while (flag) {
2581 2580 mutex_enter(&instance->cmd_pend_mtx);
2582 2581 tmp = tmp->next;
2583 2582 if (tmp == head) {
2584 2583 mutex_exit(&instance->cmd_pend_mtx);
2585 2584 flag = 0;
2586 2585 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds():"
2587 2586 " NO MORE CMDS PENDING....\n"));
2588 2587 break;
2589 2588 } else {
2590 2589 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2591 2590 mutex_exit(&instance->cmd_pend_mtx);
2592 2591 if (cmd) {
2593 2592 if (cmd->sync_cmd == MRSAS_TRUE) {
2594 2593 hdr = (struct mrsas_header *)
2595 2594 &cmd->frame->hdr;
2596 2595 if (hdr) {
2597 2596 con_log(CL_ANN1, (CE_CONT,
2598 2597 "print: cmd %p index 0x%x "
2599 2598 "drv_pkt_time 0x%x (NO-PKT)"
2600 2599 " hdr %p\n", (void *)cmd,
2601 2600 cmd->index,
2602 2601 cmd->drv_pkt_time,
2603 2602 (void *)hdr));
2604 2603 }
2605 2604 } else {
2606 2605 pkt = cmd->pkt;
2607 2606 if (pkt) {
2608 2607 con_log(CL_ANN1, (CE_CONT,
2609 2608 "print: cmd %p index 0x%x "
2610 2609 "drv_pkt_time 0x%x pkt %p \n",
2611 2610 (void *)cmd, cmd->index,
2612 2611 cmd->drv_pkt_time, (void *)pkt));
2613 2612 }
2614 2613 }
2615 2614
2616 2615 if (++cmd_count == 1) {
2617 2616 mrsas_print_cmd_details(instance, cmd,
2618 2617 0xDD);
2619 2618 } else {
2620 2619 mrsas_print_cmd_details(instance, cmd,
2621 2620 1);
2622 2621 }
2623 2622
2624 2623 }
2625 2624 }
2626 2625 }
2627 2626 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds(): Done\n"));
2628 2627
2629 2628
2630 2629 debug_level_g = saved_level;
2631 2630
2632 2631 return (DDI_SUCCESS);
2633 2632 }
2634 2633
2635 2634
2636 2635 int
2637 2636 mrsas_complete_pending_cmds(struct mrsas_instance *instance)
2638 2637 {
2639 2638
2640 2639 struct mrsas_cmd *cmd = NULL;
2641 2640 struct scsi_pkt *pkt;
2642 2641 struct mrsas_header *hdr;
2643 2642
2644 2643 struct mlist_head *pos, *next;
2645 2644
2646 2645 con_log(CL_ANN1, (CE_NOTE,
2647 2646 "mrsas_complete_pending_cmds(): Called"));
2648 2647
2649 2648 mutex_enter(&instance->cmd_pend_mtx);
2650 2649 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
2651 2650 cmd = mlist_entry(pos, struct mrsas_cmd, list);
2652 2651 if (cmd) {
2653 2652 pkt = cmd->pkt;
2654 2653 if (pkt) { /* for IO */
2655 2654 if (((pkt->pkt_flags & FLAG_NOINTR)
2656 2655 == 0) && pkt->pkt_comp) {
2657 2656 pkt->pkt_reason
2658 2657 = CMD_DEV_GONE;
2659 2658 pkt->pkt_statistics
2660 2659 = STAT_DISCON;
2661 2660 con_log(CL_ANN1, (CE_CONT,
2662 2661 "fail and posting to scsa "
2663 2662 "cmd %p index %x"
2664 2663 " pkt %p "
2665 2664 "time : %llx",
2666 2665 (void *)cmd, cmd->index,
2667 2666 (void *)pkt, gethrtime()));
2668 2667 (*pkt->pkt_comp)(pkt);
2669 2668 }
2670 2669 } else { /* for DCMDS */
2671 2670 if (cmd->sync_cmd == MRSAS_TRUE) {
2672 2671 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2673 2672 con_log(CL_ANN1, (CE_CONT,
2674 2673 "posting invalid status to application "
2675 2674 "cmd %p index %x"
2676 2675 " hdr %p "
2677 2676 "time : %llx",
2678 2677 (void *)cmd, cmd->index,
2679 2678 (void *)hdr, gethrtime()));
2680 2679 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2681 2680 complete_cmd_in_sync_mode(instance, cmd);
2682 2681 }
2683 2682 }
2684 2683 mlist_del_init(&cmd->list);
2685 2684 } else {
2686 2685 con_log(CL_ANN1, (CE_CONT,
2687 2686 "mrsas_complete_pending_cmds:"
2688 2687 "NULL command\n"));
2689 2688 }
2690 2689 con_log(CL_ANN1, (CE_CONT,
2691 2690 "mrsas_complete_pending_cmds:"
2692 2691 "looping for more commands\n"));
2693 2692 }
2694 2693 mutex_exit(&instance->cmd_pend_mtx);
2695 2694
2696 2695 con_log(CL_ANN1, (CE_CONT, "mrsas_complete_pending_cmds(): DONE\n"));
2697 2696 return (DDI_SUCCESS);
2698 2697 }
2699 2698
2700 2699 void
2701 2700 mrsas_print_cmd_details(struct mrsas_instance *instance, struct mrsas_cmd *cmd,
2702 2701 int detail)
2703 2702 {
2704 2703 struct scsi_pkt *pkt = cmd->pkt;
2705 2704 Mpi2RaidSCSIIORequest_t *scsi_io = cmd->scsi_io_request;
2706 2705 int i;
2707 2706 int saved_level;
2708 2707 ddi_acc_handle_t acc_handle =
2709 2708 instance->mpi2_frame_pool_dma_obj.acc_handle;
2710 2709
2711 2710 if (detail == 0xDD) {
2712 2711 saved_level = debug_level_g;
2713 2712 debug_level_g = CL_ANN1;
2714 2713 }
2715 2714
2716 2715
2717 2716 if (instance->tbolt) {
2718 2717 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2719 2718 "cmd->index 0x%x SMID 0x%x timer 0x%x sec\n",
2720 2719 (void *)cmd, cmd->index, cmd->SMID, cmd->drv_pkt_time));
2721 2720 } else {
2722 2721 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2723 2722 "cmd->index 0x%x timer 0x%x sec\n",
2724 2723 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2725 2724 }
2726 2725
2727 2726 if (pkt) {
2728 2727 con_log(CL_ANN1, (CE_CONT, "scsi_pkt CDB[0]=0x%x",
2729 2728 pkt->pkt_cdbp[0]));
2730 2729 } else {
2731 2730 con_log(CL_ANN1, (CE_CONT, "NO-PKT"));
2732 2731 }
2733 2732
2734 2733 if ((detail == 0xDD) && instance->tbolt) {
2735 2734 con_log(CL_ANN1, (CE_CONT, "RAID_SCSI_IO_REQUEST\n"));
2736 2735 con_log(CL_ANN1, (CE_CONT, "DevHandle=0x%X Function=0x%X "
2737 2736 "IoFlags=0x%X SGLFlags=0x%X DataLength=0x%X\n",
2738 2737 ddi_get16(acc_handle, &scsi_io->DevHandle),
2739 2738 ddi_get8(acc_handle, &scsi_io->Function),
2740 2739 ddi_get16(acc_handle, &scsi_io->IoFlags),
2741 2740 ddi_get16(acc_handle, &scsi_io->SGLFlags),
2742 2741 ddi_get32(acc_handle, &scsi_io->DataLength)));
2743 2742
2744 2743 for (i = 0; i < 32; i++) {
2745 2744 con_log(CL_ANN1, (CE_CONT, "CDB[%d]=0x%x ", i,
2746 2745 ddi_get8(acc_handle, &scsi_io->CDB.CDB32[i])));
2747 2746 }
2748 2747
2749 2748 con_log(CL_ANN1, (CE_CONT, "RAID-CONTEXT\n"));
2750 2749 con_log(CL_ANN1, (CE_CONT, "status=0x%X extStatus=0x%X "
2751 2750 "ldTargetId=0x%X timeoutValue=0x%X regLockFlags=0x%X "
2752 2751 "RAIDFlags=0x%X regLockRowLBA=0x%" PRIu64
2753 2752 " regLockLength=0x%X spanArm=0x%X\n",
2754 2753 ddi_get8(acc_handle, &scsi_io->RaidContext.status),
2755 2754 ddi_get8(acc_handle, &scsi_io->RaidContext.extStatus),
2756 2755 ddi_get16(acc_handle, &scsi_io->RaidContext.ldTargetId),
2757 2756 ddi_get16(acc_handle, &scsi_io->RaidContext.timeoutValue),
2758 2757 ddi_get8(acc_handle, &scsi_io->RaidContext.regLockFlags),
2759 2758 ddi_get8(acc_handle, &scsi_io->RaidContext.RAIDFlags),
2760 2759 ddi_get64(acc_handle, &scsi_io->RaidContext.regLockRowLBA),
2761 2760 ddi_get32(acc_handle, &scsi_io->RaidContext.regLockLength),
2762 2761 ddi_get8(acc_handle, &scsi_io->RaidContext.spanArm)));
2763 2762 }
2764 2763
2765 2764 if (detail == 0xDD) {
2766 2765 debug_level_g = saved_level;
2767 2766 }
2768 2767 }
2769 2768
2770 2769
2771 2770 int
2772 2771 mrsas_issue_pending_cmds(struct mrsas_instance *instance)
2773 2772 {
2774 2773 mlist_t *head = &instance->cmd_pend_list;
2775 2774 mlist_t *tmp = head->next;
2776 2775 struct mrsas_cmd *cmd = NULL;
2777 2776 struct scsi_pkt *pkt;
2778 2777
2779 2778 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_pending_cmds(): Called"));
2780 2779 while (tmp != head) {
2781 2780 mutex_enter(&instance->cmd_pend_mtx);
2782 2781 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2783 2782 tmp = tmp->next;
2784 2783 mutex_exit(&instance->cmd_pend_mtx);
2785 2784 if (cmd) {
2786 2785 con_log(CL_ANN1, (CE_CONT,
2787 2786 "mrsas_issue_pending_cmds(): "
2788 2787 "Got a cmd: cmd %p index 0x%x drv_pkt_time 0x%x ",
2789 2788 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2790 2789
2791 2790 /* Reset command timeout value */
2792 2791 if (cmd->drv_pkt_time < debug_timeout_g)
2793 2792 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2794 2793
2795 2794 cmd->retry_count_for_ocr++;
2796 2795
2797 2796 dev_err(instance->dip, CE_CONT,
2798 2797 "cmd retry count = %d\n",
2799 2798 cmd->retry_count_for_ocr);
2800 2799
2801 2800 if (cmd->retry_count_for_ocr > IO_RETRY_COUNT) {
2802 2801 dev_err(instance->dip,
2803 2802 CE_WARN, "mrsas_issue_pending_cmds(): "
2804 2803 "cmd->retry_count exceeded limit >%d\n",
2805 2804 IO_RETRY_COUNT);
2806 2805 mrsas_print_cmd_details(instance, cmd, 0xDD);
2807 2806
2808 2807 dev_err(instance->dip, CE_WARN,
2809 2808 "mrsas_issue_pending_cmds():"
2810 2809 "Calling KILL Adapter");
2811 2810 if (instance->tbolt)
2812 2811 mrsas_tbolt_kill_adapter(instance);
2813 2812 else
2814 2813 (void) mrsas_kill_adapter(instance);
2815 2814 return (DDI_FAILURE);
2816 2815 }
2817 2816
2818 2817 pkt = cmd->pkt;
2819 2818 if (pkt) {
2820 2819 con_log(CL_ANN1, (CE_CONT,
2821 2820 "PENDING PKT-CMD ISSUE: cmd %p index %x "
2822 2821 "pkt %p time %llx",
2823 2822 (void *)cmd, cmd->index,
2824 2823 (void *)pkt,
2825 2824 gethrtime()));
2826 2825
2827 2826 } else {
2828 2827 dev_err(instance->dip, CE_CONT,
2829 2828 "mrsas_issue_pending_cmds(): NO-PKT, "
2830 2829 "cmd %p index 0x%x drv_pkt_time 0x%x",
2831 2830 (void *)cmd, cmd->index, cmd->drv_pkt_time);
2832 2831 }
2833 2832
2834 2833
2835 2834 if (cmd->sync_cmd == MRSAS_TRUE) {
2836 2835 dev_err(instance->dip, CE_CONT,
2837 2836 "mrsas_issue_pending_cmds(): "
2838 2837 "SYNC_CMD == TRUE \n");
2839 2838 instance->func_ptr->issue_cmd_in_sync_mode(
2840 2839 instance, cmd);
2841 2840 } else {
2842 2841 instance->func_ptr->issue_cmd(cmd, instance);
2843 2842 }
2844 2843 } else {
2845 2844 con_log(CL_ANN1, (CE_CONT,
2846 2845 "mrsas_issue_pending_cmds: NULL command\n"));
2847 2846 }
2848 2847 con_log(CL_ANN1, (CE_CONT,
2849 2848 "mrsas_issue_pending_cmds:"
2850 2849 "looping for more commands"));
2851 2850 }
2852 2851 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_pending_cmds(): DONE\n"));
2853 2852 return (DDI_SUCCESS);
2854 2853 }
2855 2854
2856 2855
2857 2856
2858 2857 /*
2859 2858 * destroy_mfi_frame_pool
2860 2859 */
2861 2860 void
2862 2861 destroy_mfi_frame_pool(struct mrsas_instance *instance)
2863 2862 {
2864 2863 int i;
2865 2864 uint32_t max_cmd = instance->max_fw_cmds;
2866 2865
2867 2866 struct mrsas_cmd *cmd;
2868 2867
2869 2868 /* return all frames to pool */
2870 2869
2871 2870 for (i = 0; i < max_cmd; i++) {
2872 2871
2873 2872 cmd = instance->cmd_list[i];
2874 2873
2875 2874 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
2876 2875 (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj);
2877 2876
2878 2877 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
2879 2878 }
2880 2879
2881 2880 }
2882 2881
2883 2882 /*
2884 2883 * create_mfi_frame_pool
2885 2884 */
2886 2885 int
2887 2886 create_mfi_frame_pool(struct mrsas_instance *instance)
2888 2887 {
2889 2888 int i = 0;
2890 2889 int cookie_cnt;
2891 2890 uint16_t max_cmd;
2892 2891 uint16_t sge_sz;
2893 2892 uint32_t sgl_sz;
2894 2893 uint32_t tot_frame_size;
2895 2894 struct mrsas_cmd *cmd;
2896 2895 int retval = DDI_SUCCESS;
2897 2896
2898 2897 max_cmd = instance->max_fw_cmds;
2899 2898 sge_sz = sizeof (struct mrsas_sge_ieee);
2900 2899 /* calculated the number of 64byte frames required for SGL */
2901 2900 sgl_sz = sge_sz * instance->max_num_sge;
2902 2901 tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH;
2903 2902
2904 2903 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
2905 2904 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
2906 2905
2907 2906 while (i < max_cmd) {
2908 2907 cmd = instance->cmd_list[i];
2909 2908
2910 2909 cmd->frame_dma_obj.size = tot_frame_size;
2911 2910 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr;
2912 2911 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2913 2912 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2914 2913 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
2915 2914 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
2916 2915
2917 2916 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj,
2918 2917 (uchar_t)DDI_STRUCTURE_LE_ACC);
2919 2918
2920 2919 if (cookie_cnt == -1 || cookie_cnt > 1) {
2921 2920 dev_err(instance->dip, CE_WARN,
2922 2921 "create_mfi_frame_pool: could not alloc.");
2923 2922 retval = DDI_FAILURE;
2924 2923 goto mrsas_undo_frame_pool;
2925 2924 }
2926 2925
2927 2926 bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
2928 2927
2929 2928 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
2930 2929 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer;
2931 2930 cmd->frame_phys_addr =
2932 2931 cmd->frame_dma_obj.dma_cookie[0].dmac_address;
2933 2932
2934 2933 cmd->sense = (uint8_t *)(((unsigned long)
2935 2934 cmd->frame_dma_obj.buffer) +
2936 2935 tot_frame_size - SENSE_LENGTH);
2937 2936 cmd->sense_phys_addr =
2938 2937 cmd->frame_dma_obj.dma_cookie[0].dmac_address +
2939 2938 tot_frame_size - SENSE_LENGTH;
2940 2939
2941 2940 if (!cmd->frame || !cmd->sense) {
2942 2941 dev_err(instance->dip, CE_WARN,
2943 2942 "pci_pool_alloc failed");
2944 2943 retval = ENOMEM;
2945 2944 goto mrsas_undo_frame_pool;
2946 2945 }
2947 2946
2948 2947 ddi_put32(cmd->frame_dma_obj.acc_handle,
2949 2948 &cmd->frame->io.context, cmd->index);
2950 2949 i++;
2951 2950
2952 2951 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
2953 2952 cmd->index, cmd->frame_phys_addr));
2954 2953 }
2955 2954
2956 2955 return (DDI_SUCCESS);
2957 2956
2958 2957 mrsas_undo_frame_pool:
2959 2958 if (i > 0)
2960 2959 destroy_mfi_frame_pool(instance);
2961 2960
2962 2961 return (retval);
2963 2962 }
2964 2963
2965 2964 /*
2966 2965 * free_additional_dma_buffer
2967 2966 */
2968 2967 static void
2969 2968 free_additional_dma_buffer(struct mrsas_instance *instance)
2970 2969 {
2971 2970 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
2972 2971 (void) mrsas_free_dma_obj(instance,
2973 2972 instance->mfi_internal_dma_obj);
2974 2973 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
2975 2974 }
2976 2975
2977 2976 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
2978 2977 (void) mrsas_free_dma_obj(instance,
2979 2978 instance->mfi_evt_detail_obj);
2980 2979 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
2981 2980 }
2982 2981 }
2983 2982
2984 2983 /*
2985 2984 * alloc_additional_dma_buffer
2986 2985 */
2987 2986 static int
2988 2987 alloc_additional_dma_buffer(struct mrsas_instance *instance)
2989 2988 {
2990 2989 uint32_t reply_q_sz;
2991 2990 uint32_t internal_buf_size = PAGESIZE*2;
2992 2991
2993 2992 /* max cmds plus 1 + producer & consumer */
2994 2993 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
2995 2994
2996 2995 instance->mfi_internal_dma_obj.size = internal_buf_size;
2997 2996 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
2998 2997 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2999 2998 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
3000 2999 0xFFFFFFFFU;
3001 3000 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
3002 3001
3003 3002 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
3004 3003 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3005 3004 dev_err(instance->dip, CE_WARN,
3006 3005 "could not alloc reply queue");
3007 3006 return (DDI_FAILURE);
3008 3007 }
3009 3008
3010 3009 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
3011 3010
3012 3011 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
3013 3012
3014 3013 instance->producer = (uint32_t *)((unsigned long)
3015 3014 instance->mfi_internal_dma_obj.buffer);
3016 3015 instance->consumer = (uint32_t *)((unsigned long)
3017 3016 instance->mfi_internal_dma_obj.buffer + 4);
3018 3017 instance->reply_queue = (uint32_t *)((unsigned long)
3019 3018 instance->mfi_internal_dma_obj.buffer + 8);
3020 3019 instance->internal_buf = (caddr_t)(((unsigned long)
3021 3020 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
3022 3021 instance->internal_buf_dmac_add =
3023 3022 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
3024 3023 (reply_q_sz + 8);
3025 3024 instance->internal_buf_size = internal_buf_size -
3026 3025 (reply_q_sz + 8);
3027 3026
3028 3027 /* allocate evt_detail */
3029 3028 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
3030 3029 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
3031 3030 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3032 3031 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3033 3032 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
3034 3033 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
3035 3034
3036 3035 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
3037 3036 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3038 3037 dev_err(instance->dip, CE_WARN, "alloc_additional_dma_buffer: "
3039 3038 "could not allocate data transfer buffer.");
3040 3039 goto mrsas_undo_internal_buff;
3041 3040 }
3042 3041
3043 3042 bzero(instance->mfi_evt_detail_obj.buffer,
3044 3043 sizeof (struct mrsas_evt_detail));
3045 3044
3046 3045 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
3047 3046
3048 3047 return (DDI_SUCCESS);
3049 3048
3050 3049 mrsas_undo_internal_buff:
3051 3050 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
3052 3051 (void) mrsas_free_dma_obj(instance,
3053 3052 instance->mfi_internal_dma_obj);
3054 3053 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
3055 3054 }
3056 3055
3057 3056 return (DDI_FAILURE);
3058 3057 }
3059 3058
3060 3059
3061 3060 void
3062 3061 mrsas_free_cmd_pool(struct mrsas_instance *instance)
3063 3062 {
3064 3063 int i;
3065 3064 uint32_t max_cmd;
3066 3065 size_t sz;
3067 3066
3068 3067 /* already freed */
3069 3068 if (instance->cmd_list == NULL) {
3070 3069 return;
3071 3070 }
3072 3071
3073 3072 max_cmd = instance->max_fw_cmds;
3074 3073
3075 3074 /* size of cmd_list array */
3076 3075 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3077 3076
3078 3077 /* First free each cmd */
3079 3078 for (i = 0; i < max_cmd; i++) {
3080 3079 if (instance->cmd_list[i] != NULL) {
3081 3080 kmem_free(instance->cmd_list[i],
3082 3081 sizeof (struct mrsas_cmd));
3083 3082 }
3084 3083
3085 3084 instance->cmd_list[i] = NULL;
3086 3085 }
3087 3086
3088 3087 /* Now, free cmd_list array */
3089 3088 if (instance->cmd_list != NULL)
3090 3089 kmem_free(instance->cmd_list, sz);
3091 3090
3092 3091 instance->cmd_list = NULL;
3093 3092
3094 3093 INIT_LIST_HEAD(&instance->cmd_pool_list);
3095 3094 INIT_LIST_HEAD(&instance->cmd_pend_list);
3096 3095 if (instance->tbolt) {
3097 3096 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
3098 3097 } else {
3099 3098 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3100 3099 }
3101 3100
3102 3101 }
3103 3102
3104 3103
3105 3104 /*
3106 3105 * mrsas_alloc_cmd_pool
3107 3106 */
3108 3107 int
3109 3108 mrsas_alloc_cmd_pool(struct mrsas_instance *instance)
3110 3109 {
3111 3110 int i;
3112 3111 int count;
3113 3112 uint32_t max_cmd;
3114 3113 uint32_t reserve_cmd;
3115 3114 size_t sz;
3116 3115
3117 3116 struct mrsas_cmd *cmd;
3118 3117
3119 3118 max_cmd = instance->max_fw_cmds;
3120 3119 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
3121 3120 "max_cmd %x", max_cmd));
3122 3121
3123 3122
3124 3123 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3125 3124
3126 3125 /*
3127 3126 * instance->cmd_list is an array of struct mrsas_cmd pointers.
3128 3127 * Allocate the dynamic array first and then allocate individual
3129 3128 * commands.
3130 3129 */
3131 3130 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
3132 3131 ASSERT(instance->cmd_list);
3133 3132
3134 3133 /* create a frame pool and assign one frame to each cmd */
3135 3134 for (count = 0; count < max_cmd; count++) {
3136 3135 instance->cmd_list[count] =
3137 3136 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
3138 3137 ASSERT(instance->cmd_list[count]);
3139 3138 }
3140 3139
3141 3140 /* add all the commands to command pool */
3142 3141
3143 3142 INIT_LIST_HEAD(&instance->cmd_pool_list);
3144 3143 INIT_LIST_HEAD(&instance->cmd_pend_list);
3145 3144 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3146 3145
3147 3146 /*
3148 3147 * When max_cmd is lower than MRSAS_APP_RESERVED_CMDS, how do I split
3149 3148 * into app_cmd and regular cmd? For now, just take
3150 3149 * max(1/8th of max, 4);
3151 3150 */
3152 3151 reserve_cmd = min(MRSAS_APP_RESERVED_CMDS,
3153 3152 max(max_cmd >> 3, MRSAS_APP_MIN_RESERVED_CMDS));
3154 3153
3155 3154 for (i = 0; i < reserve_cmd; i++) {
3156 3155 cmd = instance->cmd_list[i];
3157 3156 cmd->index = i;
3158 3157 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list);
3159 3158 }
3160 3159
3161 3160
3162 3161 for (i = reserve_cmd; i < max_cmd; i++) {
3163 3162 cmd = instance->cmd_list[i];
3164 3163 cmd->index = i;
3165 3164 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
3166 3165 }
3167 3166
3168 3167 return (DDI_SUCCESS);
3169 3168
3170 3169 mrsas_undo_cmds:
3171 3170 if (count > 0) {
3172 3171 /* free each cmd */
3173 3172 for (i = 0; i < count; i++) {
3174 3173 if (instance->cmd_list[i] != NULL) {
3175 3174 kmem_free(instance->cmd_list[i],
3176 3175 sizeof (struct mrsas_cmd));
3177 3176 }
3178 3177 instance->cmd_list[i] = NULL;
3179 3178 }
3180 3179 }
3181 3180
3182 3181 mrsas_undo_cmd_list:
3183 3182 if (instance->cmd_list != NULL)
3184 3183 kmem_free(instance->cmd_list, sz);
3185 3184 instance->cmd_list = NULL;
3186 3185
3187 3186 return (DDI_FAILURE);
3188 3187 }
3189 3188
3190 3189
3191 3190 /*
3192 3191 * free_space_for_mfi
3193 3192 */
3194 3193 static void
3195 3194 free_space_for_mfi(struct mrsas_instance *instance)
3196 3195 {
3197 3196
3198 3197 /* already freed */
3199 3198 if (instance->cmd_list == NULL) {
3200 3199 return;
3201 3200 }
3202 3201
3203 3202 /* Free additional dma buffer */
3204 3203 free_additional_dma_buffer(instance);
3205 3204
3206 3205 /* Free the MFI frame pool */
3207 3206 destroy_mfi_frame_pool(instance);
3208 3207
3209 3208 /* Free all the commands in the cmd_list */
3210 3209 /* Free the cmd_list buffer itself */
3211 3210 mrsas_free_cmd_pool(instance);
3212 3211 }
3213 3212
3214 3213 /*
3215 3214 * alloc_space_for_mfi
3216 3215 */
3217 3216 static int
3218 3217 alloc_space_for_mfi(struct mrsas_instance *instance)
3219 3218 {
3220 3219 /* Allocate command pool (memory for cmd_list & individual commands) */
3221 3220 if (mrsas_alloc_cmd_pool(instance)) {
3222 3221 dev_err(instance->dip, CE_WARN, "error creating cmd pool");
3223 3222 return (DDI_FAILURE);
3224 3223 }
3225 3224
3226 3225 /* Allocate MFI Frame pool */
3227 3226 if (create_mfi_frame_pool(instance)) {
3228 3227 dev_err(instance->dip, CE_WARN,
3229 3228 "error creating frame DMA pool");
3230 3229 goto mfi_undo_cmd_pool;
3231 3230 }
3232 3231
3233 3232 /* Allocate additional DMA buffer */
3234 3233 if (alloc_additional_dma_buffer(instance)) {
3235 3234 dev_err(instance->dip, CE_WARN,
3236 3235 "error creating frame DMA pool");
3237 3236 goto mfi_undo_frame_pool;
3238 3237 }
3239 3238
3240 3239 return (DDI_SUCCESS);
3241 3240
3242 3241 mfi_undo_frame_pool:
3243 3242 destroy_mfi_frame_pool(instance);
3244 3243
3245 3244 mfi_undo_cmd_pool:
3246 3245 mrsas_free_cmd_pool(instance);
3247 3246
3248 3247 return (DDI_FAILURE);
3249 3248 }
3250 3249
3251 3250
3252 3251
3253 3252 /*
3254 3253 * get_ctrl_info
3255 3254 */
3256 3255 static int
3257 3256 get_ctrl_info(struct mrsas_instance *instance,
3258 3257 struct mrsas_ctrl_info *ctrl_info)
3259 3258 {
3260 3259 int ret = 0;
3261 3260
3262 3261 struct mrsas_cmd *cmd;
3263 3262 struct mrsas_dcmd_frame *dcmd;
3264 3263 struct mrsas_ctrl_info *ci;
3265 3264
3266 3265 if (instance->tbolt) {
3267 3266 cmd = get_raid_msg_mfi_pkt(instance);
3268 3267 } else {
3269 3268 cmd = mrsas_get_mfi_pkt(instance);
3270 3269 }
3271 3270
3272 3271 if (!cmd) {
3273 3272 con_log(CL_ANN, (CE_WARN,
3274 3273 "Failed to get a cmd for ctrl info"));
3275 3274 DTRACE_PROBE2(info_mfi_err, uint16_t, instance->fw_outstanding,
3276 3275 uint16_t, instance->max_fw_cmds);
3277 3276 return (DDI_FAILURE);
3278 3277 }
3279 3278
3280 3279 /* Clear the frame buffer and assign back the context id */
3281 3280 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3282 3281 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3283 3282 cmd->index);
3284 3283
3285 3284 dcmd = &cmd->frame->dcmd;
3286 3285
3287 3286 ci = (struct mrsas_ctrl_info *)instance->internal_buf;
3288 3287
3289 3288 if (!ci) {
3290 3289 dev_err(instance->dip, CE_WARN,
3291 3290 "Failed to alloc mem for ctrl info");
3292 3291 mrsas_return_mfi_pkt(instance, cmd);
3293 3292 return (DDI_FAILURE);
3294 3293 }
3295 3294
3296 3295 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info));
3297 3296
3298 3297 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
3299 3298 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3300 3299
3301 3300 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3302 3301 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status,
3303 3302 MFI_CMD_STATUS_POLL_MODE);
3304 3303 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3305 3304 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3306 3305 MFI_FRAME_DIR_READ);
3307 3306 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3308 3307 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3309 3308 sizeof (struct mrsas_ctrl_info));
3310 3309 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3311 3310 MR_DCMD_CTRL_GET_INFO);
3312 3311 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3313 3312 instance->internal_buf_dmac_add);
3314 3313 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3315 3314 sizeof (struct mrsas_ctrl_info));
3316 3315
3317 3316 cmd->frame_count = 1;
3318 3317
3319 3318 if (instance->tbolt) {
3320 3319 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3321 3320 }
3322 3321
3323 3322 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3324 3323 ret = 0;
3325 3324
3326 3325 ctrl_info->max_request_size = ddi_get32(
3327 3326 cmd->frame_dma_obj.acc_handle, &ci->max_request_size);
3328 3327
3329 3328 ctrl_info->ld_present_count = ddi_get16(
3330 3329 cmd->frame_dma_obj.acc_handle, &ci->ld_present_count);
3331 3330
3332 3331 ctrl_info->properties.on_off_properties = ddi_get32(
3333 3332 cmd->frame_dma_obj.acc_handle,
3334 3333 &ci->properties.on_off_properties);
3335 3334 ddi_rep_get8(cmd->frame_dma_obj.acc_handle,
3336 3335 (uint8_t *)(ctrl_info->product_name),
3337 3336 (uint8_t *)(ci->product_name), 80 * sizeof (char),
3338 3337 DDI_DEV_AUTOINCR);
3339 3338 /* should get more members of ci with ddi_get when needed */
3340 3339 } else {
3341 3340 dev_err(instance->dip, CE_WARN,
3342 3341 "get_ctrl_info: Ctrl info failed");
3343 3342 ret = -1;
3344 3343 }
3345 3344
3346 3345 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3347 3346 ret = -1;
3348 3347 }
3349 3348 if (instance->tbolt) {
3350 3349 return_raid_msg_mfi_pkt(instance, cmd);
3351 3350 } else {
3352 3351 mrsas_return_mfi_pkt(instance, cmd);
3353 3352 }
3354 3353
3355 3354 return (ret);
3356 3355 }
3357 3356
3358 3357 /*
3359 3358 * abort_aen_cmd
3360 3359 */
3361 3360 static int
3362 3361 abort_aen_cmd(struct mrsas_instance *instance,
3363 3362 struct mrsas_cmd *cmd_to_abort)
3364 3363 {
3365 3364 int ret = 0;
3366 3365
3367 3366 struct mrsas_cmd *cmd;
3368 3367 struct mrsas_abort_frame *abort_fr;
3369 3368
3370 3369 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_aen:%d", __LINE__));
3371 3370
3372 3371 if (instance->tbolt) {
3373 3372 cmd = get_raid_msg_mfi_pkt(instance);
3374 3373 } else {
3375 3374 cmd = mrsas_get_mfi_pkt(instance);
3376 3375 }
3377 3376
3378 3377 if (!cmd) {
3379 3378 con_log(CL_ANN1, (CE_WARN,
3380 3379 "abort_aen_cmd():Failed to get a cmd for abort_aen_cmd"));
3381 3380 DTRACE_PROBE2(abort_mfi_err, uint16_t, instance->fw_outstanding,
3382 3381 uint16_t, instance->max_fw_cmds);
3383 3382 return (DDI_FAILURE);
3384 3383 }
3385 3384
3386 3385 /* Clear the frame buffer and assign back the context id */
3387 3386 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3388 3387 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3389 3388 cmd->index);
3390 3389
3391 3390 abort_fr = &cmd->frame->abort;
3392 3391
3393 3392 /* prepare and issue the abort frame */
3394 3393 ddi_put8(cmd->frame_dma_obj.acc_handle,
3395 3394 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3396 3395 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3397 3396 MFI_CMD_STATUS_SYNC_MODE);
3398 3397 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3399 3398 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3400 3399 cmd_to_abort->index);
3401 3400 ddi_put32(cmd->frame_dma_obj.acc_handle,
3402 3401 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3403 3402 ddi_put32(cmd->frame_dma_obj.acc_handle,
3404 3403 &abort_fr->abort_mfi_phys_addr_hi, 0);
3405 3404
3406 3405 instance->aen_cmd->abort_aen = 1;
3407 3406
3408 3407 cmd->frame_count = 1;
3409 3408
3410 3409 if (instance->tbolt) {
3411 3410 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3412 3411 }
3413 3412
3414 3413 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3415 3414 con_log(CL_ANN1, (CE_WARN,
3416 3415 "abort_aen_cmd: issue_cmd_in_poll_mode failed"));
3417 3416 ret = -1;
3418 3417 } else {
3419 3418 ret = 0;
3420 3419 }
3421 3420
3422 3421 instance->aen_cmd->abort_aen = 1;
3423 3422 instance->aen_cmd = 0;
3424 3423
3425 3424 if (instance->tbolt) {
3426 3425 return_raid_msg_mfi_pkt(instance, cmd);
3427 3426 } else {
3428 3427 mrsas_return_mfi_pkt(instance, cmd);
3429 3428 }
3430 3429
3431 3430 atomic_add_16(&instance->fw_outstanding, (-1));
3432 3431
3433 3432 return (ret);
3434 3433 }
3435 3434
3436 3435
3437 3436 static int
3438 3437 mrsas_build_init_cmd(struct mrsas_instance *instance,
3439 3438 struct mrsas_cmd **cmd_ptr)
3440 3439 {
3441 3440 struct mrsas_cmd *cmd;
3442 3441 struct mrsas_init_frame *init_frame;
3443 3442 struct mrsas_init_queue_info *initq_info;
3444 3443 struct mrsas_drv_ver drv_ver_info;
3445 3444
3446 3445
3447 3446 /*
3448 3447 * Prepare a init frame. Note the init frame points to queue info
3449 3448 * structure. Each frame has SGL allocated after first 64 bytes. For
3450 3449 * this frame - since we don't need any SGL - we use SGL's space as
3451 3450 * queue info structure
3452 3451 */
3453 3452 cmd = *cmd_ptr;
3454 3453
3455 3454
3456 3455 /* Clear the frame buffer and assign back the context id */
3457 3456 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3458 3457 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3459 3458 cmd->index);
3460 3459
3461 3460 init_frame = (struct mrsas_init_frame *)cmd->frame;
3462 3461 initq_info = (struct mrsas_init_queue_info *)
3463 3462 ((unsigned long)init_frame + 64);
3464 3463
3465 3464 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3466 3465 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3467 3466
3468 3467 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3469 3468
3470 3469 ddi_put32(cmd->frame_dma_obj.acc_handle,
3471 3470 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3472 3471
3473 3472 ddi_put32(cmd->frame_dma_obj.acc_handle,
3474 3473 &initq_info->producer_index_phys_addr_hi, 0);
3475 3474 ddi_put32(cmd->frame_dma_obj.acc_handle,
3476 3475 &initq_info->producer_index_phys_addr_lo,
3477 3476 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3478 3477
3479 3478 ddi_put32(cmd->frame_dma_obj.acc_handle,
3480 3479 &initq_info->consumer_index_phys_addr_hi, 0);
3481 3480 ddi_put32(cmd->frame_dma_obj.acc_handle,
3482 3481 &initq_info->consumer_index_phys_addr_lo,
3483 3482 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3484 3483
3485 3484 ddi_put32(cmd->frame_dma_obj.acc_handle,
3486 3485 &initq_info->reply_queue_start_phys_addr_hi, 0);
3487 3486 ddi_put32(cmd->frame_dma_obj.acc_handle,
3488 3487 &initq_info->reply_queue_start_phys_addr_lo,
3489 3488 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3490 3489
3491 3490 ddi_put8(cmd->frame_dma_obj.acc_handle,
3492 3491 &init_frame->cmd, MFI_CMD_OP_INIT);
3493 3492 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3494 3493 MFI_CMD_STATUS_POLL_MODE);
3495 3494 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3496 3495 ddi_put32(cmd->frame_dma_obj.acc_handle,
3497 3496 &init_frame->queue_info_new_phys_addr_lo,
3498 3497 cmd->frame_phys_addr + 64);
3499 3498 ddi_put32(cmd->frame_dma_obj.acc_handle,
3500 3499 &init_frame->queue_info_new_phys_addr_hi, 0);
3501 3500
3502 3501
3503 3502 /* fill driver version information */
3504 3503 fill_up_drv_ver(&drv_ver_info);
3505 3504
3506 3505 /* allocate the driver version data transfer buffer */
3507 3506 instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
3508 3507 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
3509 3508 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3510 3509 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3511 3510 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
3512 3511 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
3513 3512
3514 3513 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
3515 3514 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3516 3515 con_log(CL_ANN, (CE_WARN,
3517 3516 "init_mfi : Could not allocate driver version buffer."));
3518 3517 return (DDI_FAILURE);
3519 3518 }
3520 3519 /* copy driver version to dma buffer */
3521 3520 (void) memset(instance->drv_ver_dma_obj.buffer, 0,
3522 3521 sizeof (drv_ver_info.drv_ver));
3523 3522 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
3524 3523 (uint8_t *)drv_ver_info.drv_ver,
3525 3524 (uint8_t *)instance->drv_ver_dma_obj.buffer,
3526 3525 sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
3527 3526
3528 3527
3529 3528 /* copy driver version physical address to init frame */
3530 3529 ddi_put64(cmd->frame_dma_obj.acc_handle, &init_frame->driverversion,
3531 3530 instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
3532 3531
3533 3532 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3534 3533 sizeof (struct mrsas_init_queue_info));
3535 3534
3536 3535 cmd->frame_count = 1;
3537 3536
3538 3537 *cmd_ptr = cmd;
3539 3538
3540 3539 return (DDI_SUCCESS);
3541 3540 }
3542 3541
3543 3542
3544 3543 /*
3545 3544 * mrsas_init_adapter_ppc - Initialize MFI interface adapter.
3546 3545 */
3547 3546 int
3548 3547 mrsas_init_adapter_ppc(struct mrsas_instance *instance)
3549 3548 {
3550 3549 struct mrsas_cmd *cmd;
3551 3550
3552 3551 /*
3553 3552 * allocate memory for mfi adapter(cmd pool, individual commands, mfi
3554 3553 * frames etc
3555 3554 */
3556 3555 if (alloc_space_for_mfi(instance) != DDI_SUCCESS) {
3557 3556 con_log(CL_ANN, (CE_NOTE,
3558 3557 "Error, failed to allocate memory for MFI adapter"));
3559 3558 return (DDI_FAILURE);
3560 3559 }
3561 3560
3562 3561 /* Build INIT command */
3563 3562 cmd = mrsas_get_mfi_pkt(instance);
3564 3563 if (cmd == NULL) {
3565 3564 DTRACE_PROBE2(init_adapter_mfi_err, uint16_t,
3566 3565 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
3567 3566 return (DDI_FAILURE);
3568 3567 }
3569 3568
3570 3569 if (mrsas_build_init_cmd(instance, &cmd) != DDI_SUCCESS) {
3571 3570 con_log(CL_ANN,
3572 3571 (CE_NOTE, "Error, failed to build INIT command"));
3573 3572
3574 3573 goto fail_undo_alloc_mfi_space;
3575 3574 }
3576 3575
3577 3576 /*
3578 3577 * Disable interrupt before sending init frame ( see linux driver code)
3579 3578 * send INIT MFI frame in polled mode
3580 3579 */
3581 3580 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3582 3581 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
3583 3582 goto fail_fw_init;
3584 3583 }
3585 3584
3586 3585 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
3587 3586 goto fail_fw_init;
3588 3587 mrsas_return_mfi_pkt(instance, cmd);
3589 3588
3590 3589 if (ctio_enable &&
3591 3590 (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000)) {
3592 3591 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported"));
3593 3592 instance->flag_ieee = 1;
3594 3593 } else {
3595 3594 instance->flag_ieee = 0;
3596 3595 }
3597 3596
3598 3597 ASSERT(!instance->skinny || instance->flag_ieee);
3599 3598
3600 3599 instance->unroll.alloc_space_mfi = 1;
3601 3600 instance->unroll.verBuff = 1;
3602 3601
3603 3602 return (DDI_SUCCESS);
3604 3603
3605 3604
3606 3605 fail_fw_init:
3607 3606 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
3608 3607
3609 3608 fail_undo_alloc_mfi_space:
3610 3609 mrsas_return_mfi_pkt(instance, cmd);
3611 3610 free_space_for_mfi(instance);
3612 3611
3613 3612 return (DDI_FAILURE);
3614 3613
3615 3614 }
3616 3615
3617 3616 /*
3618 3617 * mrsas_init_adapter - Initialize adapter.
3619 3618 */
3620 3619 int
3621 3620 mrsas_init_adapter(struct mrsas_instance *instance)
3622 3621 {
3623 3622 struct mrsas_ctrl_info ctrl_info;
3624 3623
3625 3624
3626 3625 /* we expect the FW state to be READY */
3627 3626 if (mfi_state_transition_to_ready(instance)) {
3628 3627 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready"));
3629 3628 return (DDI_FAILURE);
3630 3629 }
3631 3630
3632 3631 /* get various operational parameters from status register */
3633 3632 instance->max_num_sge =
3634 3633 (instance->func_ptr->read_fw_status_reg(instance) &
3635 3634 0xFF0000) >> 0x10;
3636 3635 instance->max_num_sge =
3637 3636 (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ?
3638 3637 MRSAS_MAX_SGE_CNT : instance->max_num_sge;
3639 3638
3640 3639 /*
3641 3640 * Reduce the max supported cmds by 1. This is to ensure that the
3642 3641 * reply_q_sz (1 more than the max cmd that driver may send)
3643 3642 * does not exceed max cmds that the FW can support
3644 3643 */
3645 3644 instance->max_fw_cmds =
3646 3645 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
3647 3646 instance->max_fw_cmds = instance->max_fw_cmds - 1;
3648 3647
3649 3648
3650 3649
3651 3650 /* Initialize adapter */
3652 3651 if (instance->func_ptr->init_adapter(instance) != DDI_SUCCESS) {
3653 3652 con_log(CL_ANN,
3654 3653 (CE_WARN, "mr_sas: could not initialize adapter"));
3655 3654 return (DDI_FAILURE);
3656 3655 }
3657 3656
3658 3657 /* gather misc FW related information */
3659 3658 instance->disable_online_ctrl_reset = 0;
3660 3659
3661 3660 if (!get_ctrl_info(instance, &ctrl_info)) {
3662 3661 instance->max_sectors_per_req = ctrl_info.max_request_size;
3663 3662 con_log(CL_ANN1, (CE_NOTE,
3664 3663 "product name %s ld present %d",
3665 3664 ctrl_info.product_name, ctrl_info.ld_present_count));
3666 3665 } else {
3667 3666 instance->max_sectors_per_req = instance->max_num_sge *
3668 3667 PAGESIZE / 512;
3669 3668 }
3670 3669
3671 3670 if (ctrl_info.properties.on_off_properties & DISABLE_OCR_PROP_FLAG)
3672 3671 instance->disable_online_ctrl_reset = 1;
3673 3672
3674 3673 return (DDI_SUCCESS);
3675 3674
3676 3675 }
3677 3676
3678 3677
3679 3678
3680 3679 static int
3681 3680 mrsas_issue_init_mfi(struct mrsas_instance *instance)
3682 3681 {
3683 3682 struct mrsas_cmd *cmd;
3684 3683 struct mrsas_init_frame *init_frame;
3685 3684 struct mrsas_init_queue_info *initq_info;
3686 3685
3687 3686 /*
3688 3687 * Prepare a init frame. Note the init frame points to queue info
3689 3688 * structure. Each frame has SGL allocated after first 64 bytes. For
3690 3689 * this frame - since we don't need any SGL - we use SGL's space as
3691 3690 * queue info structure
3692 3691 */
3693 3692 con_log(CL_ANN1, (CE_NOTE,
3694 3693 "mrsas_issue_init_mfi: entry\n"));
3695 3694 cmd = get_mfi_app_pkt(instance);
3696 3695
3697 3696 if (!cmd) {
3698 3697 con_log(CL_ANN1, (CE_WARN,
3699 3698 "mrsas_issue_init_mfi: get_pkt failed\n"));
3700 3699 return (DDI_FAILURE);
3701 3700 }
3702 3701
3703 3702 /* Clear the frame buffer and assign back the context id */
3704 3703 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3705 3704 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3706 3705 cmd->index);
3707 3706
3708 3707 init_frame = (struct mrsas_init_frame *)cmd->frame;
3709 3708 initq_info = (struct mrsas_init_queue_info *)
3710 3709 ((unsigned long)init_frame + 64);
3711 3710
3712 3711 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3713 3712 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3714 3713
3715 3714 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3716 3715
3717 3716 ddi_put32(cmd->frame_dma_obj.acc_handle,
3718 3717 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3719 3718 ddi_put32(cmd->frame_dma_obj.acc_handle,
3720 3719 &initq_info->producer_index_phys_addr_hi, 0);
3721 3720 ddi_put32(cmd->frame_dma_obj.acc_handle,
3722 3721 &initq_info->producer_index_phys_addr_lo,
3723 3722 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3724 3723 ddi_put32(cmd->frame_dma_obj.acc_handle,
3725 3724 &initq_info->consumer_index_phys_addr_hi, 0);
3726 3725 ddi_put32(cmd->frame_dma_obj.acc_handle,
3727 3726 &initq_info->consumer_index_phys_addr_lo,
3728 3727 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3729 3728
3730 3729 ddi_put32(cmd->frame_dma_obj.acc_handle,
3731 3730 &initq_info->reply_queue_start_phys_addr_hi, 0);
3732 3731 ddi_put32(cmd->frame_dma_obj.acc_handle,
3733 3732 &initq_info->reply_queue_start_phys_addr_lo,
3734 3733 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3735 3734
3736 3735 ddi_put8(cmd->frame_dma_obj.acc_handle,
3737 3736 &init_frame->cmd, MFI_CMD_OP_INIT);
3738 3737 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3739 3738 MFI_CMD_STATUS_POLL_MODE);
3740 3739 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3741 3740 ddi_put32(cmd->frame_dma_obj.acc_handle,
3742 3741 &init_frame->queue_info_new_phys_addr_lo,
3743 3742 cmd->frame_phys_addr + 64);
3744 3743 ddi_put32(cmd->frame_dma_obj.acc_handle,
3745 3744 &init_frame->queue_info_new_phys_addr_hi, 0);
3746 3745
3747 3746 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3748 3747 sizeof (struct mrsas_init_queue_info));
3749 3748
3750 3749 cmd->frame_count = 1;
3751 3750
3752 3751 /* issue the init frame in polled mode */
3753 3752 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3754 3753 con_log(CL_ANN1, (CE_WARN,
3755 3754 "mrsas_issue_init_mfi():failed to "
3756 3755 "init firmware"));
3757 3756 return_mfi_app_pkt(instance, cmd);
3758 3757 return (DDI_FAILURE);
3759 3758 }
3760 3759
3761 3760 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3762 3761 return_mfi_app_pkt(instance, cmd);
3763 3762 return (DDI_FAILURE);
3764 3763 }
3765 3764
3766 3765 return_mfi_app_pkt(instance, cmd);
3767 3766 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_init_mfi: Done"));
3768 3767
3769 3768 return (DDI_SUCCESS);
3770 3769 }
3771 3770 /*
3772 3771 * mfi_state_transition_to_ready : Move the FW to READY state
3773 3772 *
3774 3773 * @reg_set : MFI register set
3775 3774 */
3776 3775 int
3777 3776 mfi_state_transition_to_ready(struct mrsas_instance *instance)
3778 3777 {
3779 3778 int i;
3780 3779 uint8_t max_wait;
3781 3780 uint32_t fw_ctrl = 0;
3782 3781 uint32_t fw_state;
3783 3782 uint32_t cur_state;
3784 3783 uint32_t cur_abs_reg_val;
3785 3784 uint32_t prev_abs_reg_val;
3786 3785 uint32_t status;
3787 3786
3788 3787 cur_abs_reg_val =
3789 3788 instance->func_ptr->read_fw_status_reg(instance);
3790 3789 fw_state =
3791 3790 cur_abs_reg_val & MFI_STATE_MASK;
3792 3791 con_log(CL_ANN1, (CE_CONT,
3793 3792 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
3794 3793
3795 3794 while (fw_state != MFI_STATE_READY) {
3796 3795 con_log(CL_ANN, (CE_CONT,
3797 3796 "mfi_state_transition_to_ready:FW state%x", fw_state));
3798 3797
3799 3798 switch (fw_state) {
3800 3799 case MFI_STATE_FAULT:
3801 3800 con_log(CL_ANN, (CE_NOTE,
3802 3801 "mr_sas: FW in FAULT state!!"));
3803 3802
3804 3803 return (ENODEV);
3805 3804 case MFI_STATE_WAIT_HANDSHAKE:
3806 3805 /* set the CLR bit in IMR0 */
3807 3806 con_log(CL_ANN1, (CE_NOTE,
3808 3807 "mr_sas: FW waiting for HANDSHAKE"));
3809 3808 /*
3810 3809 * PCI_Hot Plug: MFI F/W requires
3811 3810 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3812 3811 * to be set
3813 3812 */
3814 3813 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
3815 3814 if (!instance->tbolt && !instance->skinny) {
3816 3815 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
3817 3816 MFI_INIT_HOTPLUG, instance);
3818 3817 } else {
3819 3818 WR_RESERVED0_REGISTER(MFI_INIT_CLEAR_HANDSHAKE |
3820 3819 MFI_INIT_HOTPLUG, instance);
3821 3820 }
3822 3821 max_wait = (instance->tbolt == 1) ? 180 : 2;
3823 3822 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3824 3823 break;
3825 3824 case MFI_STATE_BOOT_MESSAGE_PENDING:
3826 3825 /* set the CLR bit in IMR0 */
3827 3826 con_log(CL_ANN1, (CE_NOTE,
3828 3827 "mr_sas: FW state boot message pending"));
3829 3828 /*
3830 3829 * PCI_Hot Plug: MFI F/W requires
3831 3830 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3832 3831 * to be set
3833 3832 */
3834 3833 if (!instance->tbolt && !instance->skinny) {
3835 3834 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
3836 3835 } else {
3837 3836 WR_RESERVED0_REGISTER(MFI_INIT_HOTPLUG,
3838 3837 instance);
3839 3838 }
3840 3839 max_wait = (instance->tbolt == 1) ? 180 : 10;
3841 3840 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3842 3841 break;
3843 3842 case MFI_STATE_OPERATIONAL:
3844 3843 /* bring it to READY state; assuming max wait 2 secs */
3845 3844 instance->func_ptr->disable_intr(instance);
3846 3845 con_log(CL_ANN1, (CE_NOTE,
3847 3846 "mr_sas: FW in OPERATIONAL state"));
3848 3847 /*
3849 3848 * PCI_Hot Plug: MFI F/W requires
3850 3849 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
3851 3850 * to be set
3852 3851 */
3853 3852 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
3854 3853 if (!instance->tbolt && !instance->skinny) {
3855 3854 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
3856 3855 } else {
3857 3856 WR_RESERVED0_REGISTER(MFI_RESET_FLAGS,
3858 3857 instance);
3859 3858
3860 3859 for (i = 0; i < (10 * 1000); i++) {
3861 3860 status =
3862 3861 RD_RESERVED0_REGISTER(instance);
3863 3862 if (status & 1) {
3864 3863 delay(1 *
3865 3864 drv_usectohz(MILLISEC));
3866 3865 } else {
3867 3866 break;
3868 3867 }
3869 3868 }
3870 3869
3871 3870 }
3872 3871 max_wait = (instance->tbolt == 1) ? 180 : 10;
3873 3872 cur_state = MFI_STATE_OPERATIONAL;
3874 3873 break;
3875 3874 case MFI_STATE_UNDEFINED:
3876 3875 /* this state should not last for more than 2 seconds */
3877 3876 con_log(CL_ANN1, (CE_NOTE, "FW state undefined"));
3878 3877
3879 3878 max_wait = (instance->tbolt == 1) ? 180 : 2;
3880 3879 cur_state = MFI_STATE_UNDEFINED;
3881 3880 break;
3882 3881 case MFI_STATE_BB_INIT:
3883 3882 max_wait = (instance->tbolt == 1) ? 180 : 2;
3884 3883 cur_state = MFI_STATE_BB_INIT;
3885 3884 break;
3886 3885 case MFI_STATE_FW_INIT:
3887 3886 max_wait = (instance->tbolt == 1) ? 180 : 2;
3888 3887 cur_state = MFI_STATE_FW_INIT;
3889 3888 break;
3890 3889 case MFI_STATE_FW_INIT_2:
3891 3890 max_wait = 180;
3892 3891 cur_state = MFI_STATE_FW_INIT_2;
3893 3892 break;
3894 3893 case MFI_STATE_DEVICE_SCAN:
3895 3894 max_wait = 180;
3896 3895 cur_state = MFI_STATE_DEVICE_SCAN;
3897 3896 prev_abs_reg_val = cur_abs_reg_val;
3898 3897 con_log(CL_NONE, (CE_NOTE,
3899 3898 "Device scan in progress ...\n"));
3900 3899 break;
3901 3900 case MFI_STATE_FLUSH_CACHE:
3902 3901 max_wait = 180;
3903 3902 cur_state = MFI_STATE_FLUSH_CACHE;
3904 3903 break;
3905 3904 default:
3906 3905 con_log(CL_ANN1, (CE_NOTE,
3907 3906 "mr_sas: Unknown state 0x%x", fw_state));
3908 3907 return (ENODEV);
3909 3908 }
3910 3909
3911 3910 /* the cur_state should not last for more than max_wait secs */
3912 3911 for (i = 0; i < (max_wait * MILLISEC); i++) {
3913 3912 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
3914 3913 cur_abs_reg_val =
3915 3914 instance->func_ptr->read_fw_status_reg(instance);
3916 3915 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3917 3916
3918 3917 if (fw_state == cur_state) {
3919 3918 delay(1 * drv_usectohz(MILLISEC));
3920 3919 } else {
3921 3920 break;
3922 3921 }
3923 3922 }
3924 3923 if (fw_state == MFI_STATE_DEVICE_SCAN) {
3925 3924 if (prev_abs_reg_val != cur_abs_reg_val) {
3926 3925 continue;
3927 3926 }
3928 3927 }
3929 3928
3930 3929 /* return error if fw_state hasn't changed after max_wait */
3931 3930 if (fw_state == cur_state) {
3932 3931 con_log(CL_ANN1, (CE_WARN,
3933 3932 "FW state hasn't changed in %d secs", max_wait));
3934 3933 return (ENODEV);
3935 3934 }
3936 3935 };
3937 3936
3938 3937 /* This may also need to apply to Skinny, but for now, don't worry. */
3939 3938 if (!instance->tbolt && !instance->skinny) {
3940 3939 fw_ctrl = RD_IB_DOORBELL(instance);
3941 3940 con_log(CL_ANN1, (CE_CONT,
3942 3941 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
3943 3942
3944 3943 /*
3945 3944 * Write 0xF to the doorbell register to do the following.
3946 3945 * - Abort all outstanding commands (bit 0).
3947 3946 * - Transition from OPERATIONAL to READY state (bit 1).
3948 3947 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
3949 3948 * - Set to release FW to continue running (i.e. BIOS handshake
3950 3949 * (bit 3).
3951 3950 */
3952 3951 WR_IB_DOORBELL(0xF, instance);
3953 3952 }
3954 3953
3955 3954 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
3956 3955 return (EIO);
3957 3956 }
3958 3957
3959 3958 return (DDI_SUCCESS);
3960 3959 }
3961 3960
3962 3961 /*
3963 3962 * get_seq_num
3964 3963 */
3965 3964 static int
3966 3965 get_seq_num(struct mrsas_instance *instance,
3967 3966 struct mrsas_evt_log_info *eli)
3968 3967 {
3969 3968 int ret = DDI_SUCCESS;
3970 3969
3971 3970 dma_obj_t dcmd_dma_obj;
3972 3971 struct mrsas_cmd *cmd;
3973 3972 struct mrsas_dcmd_frame *dcmd;
3974 3973 struct mrsas_evt_log_info *eli_tmp;
3975 3974 if (instance->tbolt) {
3976 3975 cmd = get_raid_msg_mfi_pkt(instance);
3977 3976 } else {
3978 3977 cmd = mrsas_get_mfi_pkt(instance);
3979 3978 }
3980 3979
3981 3980 if (!cmd) {
3982 3981 dev_err(instance->dip, CE_WARN, "failed to get a cmd");
3983 3982 DTRACE_PROBE2(seq_num_mfi_err, uint16_t,
3984 3983 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
3985 3984 return (ENOMEM);
3986 3985 }
3987 3986
3988 3987 /* Clear the frame buffer and assign back the context id */
3989 3988 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3990 3989 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3991 3990 cmd->index);
3992 3991
3993 3992 dcmd = &cmd->frame->dcmd;
3994 3993
3995 3994 /* allocate the data transfer buffer */
3996 3995 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info);
3997 3996 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3998 3997 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3999 3998 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4000 3999 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
4001 4000 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
4002 4001
4003 4002 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
4004 4003 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
4005 4004 dev_err(instance->dip, CE_WARN,
4006 4005 "get_seq_num: could not allocate data transfer buffer.");
4007 4006 return (DDI_FAILURE);
4008 4007 }
4009 4008
4010 4009 (void) memset(dcmd_dma_obj.buffer, 0,
4011 4010 sizeof (struct mrsas_evt_log_info));
4012 4011
4013 4012 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4014 4013
4015 4014 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4016 4015 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
4017 4016 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
4018 4017 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4019 4018 MFI_FRAME_DIR_READ);
4020 4019 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4021 4020 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
4022 4021 sizeof (struct mrsas_evt_log_info));
4023 4022 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4024 4023 MR_DCMD_CTRL_EVENT_GET_INFO);
4025 4024 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
4026 4025 sizeof (struct mrsas_evt_log_info));
4027 4026 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
4028 4027 dcmd_dma_obj.dma_cookie[0].dmac_address);
4029 4028
4030 4029 cmd->sync_cmd = MRSAS_TRUE;
4031 4030 cmd->frame_count = 1;
4032 4031
4033 4032 if (instance->tbolt) {
4034 4033 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4035 4034 }
4036 4035
4037 4036 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
4038 4037 dev_err(instance->dip, CE_WARN, "get_seq_num: "
4039 4038 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO");
4040 4039 ret = DDI_FAILURE;
4041 4040 } else {
4042 4041 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer;
4043 4042 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle,
4044 4043 &eli_tmp->newest_seq_num);
4045 4044 ret = DDI_SUCCESS;
4046 4045 }
4047 4046
4048 4047 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
4049 4048 ret = DDI_FAILURE;
4050 4049
4051 4050 if (instance->tbolt) {
4052 4051 return_raid_msg_mfi_pkt(instance, cmd);
4053 4052 } else {
4054 4053 mrsas_return_mfi_pkt(instance, cmd);
4055 4054 }
4056 4055
4057 4056 return (ret);
4058 4057 }
4059 4058
4060 4059 /*
4061 4060 * start_mfi_aen
4062 4061 */
4063 4062 static int
4064 4063 start_mfi_aen(struct mrsas_instance *instance)
4065 4064 {
4066 4065 int ret = 0;
4067 4066
4068 4067 struct mrsas_evt_log_info eli;
4069 4068 union mrsas_evt_class_locale class_locale;
4070 4069
4071 4070 /* get the latest sequence number from FW */
4072 4071 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info));
4073 4072
4074 4073 if (get_seq_num(instance, &eli)) {
4075 4074 dev_err(instance->dip, CE_WARN,
4076 4075 "start_mfi_aen: failed to get seq num");
4077 4076 return (-1);
4078 4077 }
4079 4078
4080 4079 /* register AEN with FW for latest sequence number plus 1 */
4081 4080 class_locale.members.reserved = 0;
4082 4081 class_locale.members.locale = LE_16(MR_EVT_LOCALE_ALL);
4083 4082 class_locale.members.class = MR_EVT_CLASS_INFO;
4084 4083 class_locale.word = LE_32(class_locale.word);
4085 4084 ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
4086 4085 class_locale.word);
4087 4086
4088 4087 if (ret) {
4089 4088 dev_err(instance->dip, CE_WARN,
4090 4089 "start_mfi_aen: aen registration failed");
4091 4090 return (-1);
4092 4091 }
4093 4092
4094 4093
4095 4094 return (ret);
4096 4095 }
4097 4096
4098 4097 /*
4099 4098 * flush_cache
4100 4099 */
4101 4100 static void
4102 4101 flush_cache(struct mrsas_instance *instance)
4103 4102 {
4104 4103 struct mrsas_cmd *cmd = NULL;
4105 4104 struct mrsas_dcmd_frame *dcmd;
4106 4105 if (instance->tbolt) {
4107 4106 cmd = get_raid_msg_mfi_pkt(instance);
4108 4107 } else {
4109 4108 cmd = mrsas_get_mfi_pkt(instance);
4110 4109 }
4111 4110
4112 4111 if (!cmd) {
4113 4112 con_log(CL_ANN1, (CE_WARN,
4114 4113 "flush_cache():Failed to get a cmd for flush_cache"));
4115 4114 DTRACE_PROBE2(flush_cache_err, uint16_t,
4116 4115 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
4117 4116 return;
4118 4117 }
4119 4118
4120 4119 /* Clear the frame buffer and assign back the context id */
4121 4120 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4122 4121 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
4123 4122 cmd->index);
4124 4123
4125 4124 dcmd = &cmd->frame->dcmd;
4126 4125
4127 4126 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4128 4127
4129 4128 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4130 4129 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
4131 4130 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0);
4132 4131 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4133 4132 MFI_FRAME_DIR_NONE);
4134 4133 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4135 4134 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0);
4136 4135 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4137 4136 MR_DCMD_CTRL_CACHE_FLUSH);
4138 4137 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0],
4139 4138 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE);
4140 4139
4141 4140 cmd->frame_count = 1;
4142 4141
4143 4142 if (instance->tbolt) {
4144 4143 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4145 4144 }
4146 4145
4147 4146 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
4148 4147 con_log(CL_ANN1, (CE_WARN,
4149 4148 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH"));
4150 4149 }
4151 4150 con_log(CL_ANN1, (CE_CONT, "flush_cache done"));
4152 4151 if (instance->tbolt) {
4153 4152 return_raid_msg_mfi_pkt(instance, cmd);
4154 4153 } else {
4155 4154 mrsas_return_mfi_pkt(instance, cmd);
4156 4155 }
4157 4156
4158 4157 }
4159 4158
4160 4159 /*
4161 4160 * service_mfi_aen- Completes an AEN command
4162 4161 * @instance: Adapter soft state
4163 4162 * @cmd: Command to be completed
4164 4163 *
4165 4164 */
4166 4165 void
4167 4166 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
4168 4167 {
4169 4168 uint32_t seq_num;
4170 4169 struct mrsas_evt_detail *evt_detail =
4171 4170 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
4172 4171 int rval = 0;
4173 4172 int tgt = 0;
4174 4173 uint8_t dtype;
4175 4174 mrsas_pd_address_t *pd_addr;
4176 4175 ddi_acc_handle_t acc_handle;
4177 4176
4178 4177 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
4179 4178
4180 4179 acc_handle = cmd->frame_dma_obj.acc_handle;
4181 4180 cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status);
4182 4181 if (cmd->cmd_status == ENODATA) {
4183 4182 cmd->cmd_status = 0;
4184 4183 }
4185 4184
4186 4185 /*
4187 4186 * log the MFI AEN event to the sysevent queue so that
4188 4187 * application will get noticed
4189 4188 */
4190 4189 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
4191 4190 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
4192 4191 int instance_no = ddi_get_instance(instance->dip);
4193 4192 con_log(CL_ANN, (CE_WARN,
4194 4193 "mr_sas%d: Failed to log AEN event", instance_no));
4195 4194 }
4196 4195 /*
4197 4196 * Check for any ld devices that has changed state. i.e. online
4198 4197 * or offline.
4199 4198 */
4200 4199 con_log(CL_ANN1, (CE_CONT,
4201 4200 "AEN: code = %x class = %x locale = %x args = %x",
4202 4201 ddi_get32(acc_handle, &evt_detail->code),
4203 4202 evt_detail->cl.members.class,
4204 4203 ddi_get16(acc_handle, &evt_detail->cl.members.locale),
4205 4204 ddi_get8(acc_handle, &evt_detail->arg_type)));
4206 4205
4207 4206 switch (ddi_get32(acc_handle, &evt_detail->code)) {
4208 4207 case MR_EVT_CFG_CLEARED: {
4209 4208 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
4210 4209 if (instance->mr_ld_list[tgt].dip != NULL) {
4211 4210 mutex_enter(&instance->config_dev_mtx);
4212 4211 instance->mr_ld_list[tgt].flag =
4213 4212 (uint8_t)~MRDRV_TGT_VALID;
4214 4213 mutex_exit(&instance->config_dev_mtx);
4215 4214 rval = mrsas_service_evt(instance, tgt, 0,
4216 4215 MRSAS_EVT_UNCONFIG_TGT, NULL);
4217 4216 con_log(CL_ANN1, (CE_WARN,
4218 4217 "mr_sas: CFG CLEARED AEN rval = %d "
4219 4218 "tgt id = %d", rval, tgt));
4220 4219 }
4221 4220 }
4222 4221 break;
4223 4222 }
4224 4223
4225 4224 case MR_EVT_LD_DELETED: {
4226 4225 tgt = ddi_get16(acc_handle, &evt_detail->args.ld.target_id);
4227 4226 mutex_enter(&instance->config_dev_mtx);
4228 4227 instance->mr_ld_list[tgt].flag = (uint8_t)~MRDRV_TGT_VALID;
4229 4228 mutex_exit(&instance->config_dev_mtx);
4230 4229 rval = mrsas_service_evt(instance,
4231 4230 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4232 4231 MRSAS_EVT_UNCONFIG_TGT, NULL);
4233 4232 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d "
4234 4233 "tgt id = %d index = %d", rval,
4235 4234 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4236 4235 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4237 4236 break;
4238 4237 } /* End of MR_EVT_LD_DELETED */
4239 4238
4240 4239 case MR_EVT_LD_CREATED: {
4241 4240 rval = mrsas_service_evt(instance,
4242 4241 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4243 4242 MRSAS_EVT_CONFIG_TGT, NULL);
4244 4243 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d "
4245 4244 "tgt id = %d index = %d", rval,
4246 4245 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4247 4246 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4248 4247 break;
4249 4248 } /* End of MR_EVT_LD_CREATED */
4250 4249
4251 4250 case MR_EVT_PD_REMOVED_EXT: {
4252 4251 if (instance->tbolt || instance->skinny) {
4253 4252 pd_addr = &evt_detail->args.pd_addr;
4254 4253 dtype = pd_addr->scsi_dev_type;
4255 4254 con_log(CL_DLEVEL1, (CE_NOTE,
4256 4255 " MR_EVT_PD_REMOVED_EXT: dtype = %x,"
4257 4256 " arg_type = %d ", dtype, evt_detail->arg_type));
4258 4257 tgt = ddi_get16(acc_handle,
4259 4258 &evt_detail->args.pd.device_id);
4260 4259 mutex_enter(&instance->config_dev_mtx);
4261 4260 instance->mr_tbolt_pd_list[tgt].flag =
4262 4261 (uint8_t)~MRDRV_TGT_VALID;
4263 4262 mutex_exit(&instance->config_dev_mtx);
4264 4263 rval = mrsas_service_evt(instance, ddi_get16(
4265 4264 acc_handle, &evt_detail->args.pd.device_id),
4266 4265 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4267 4266 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4268 4267 "rval = %d tgt id = %d ", rval,
4269 4268 ddi_get16(acc_handle,
4270 4269 &evt_detail->args.pd.device_id)));
4271 4270 }
4272 4271 break;
4273 4272 } /* End of MR_EVT_PD_REMOVED_EXT */
4274 4273
4275 4274 case MR_EVT_PD_INSERTED_EXT: {
4276 4275 if (instance->tbolt || instance->skinny) {
4277 4276 rval = mrsas_service_evt(instance,
4278 4277 ddi_get16(acc_handle,
4279 4278 &evt_detail->args.pd.device_id),
4280 4279 1, MRSAS_EVT_CONFIG_TGT, NULL);
4281 4280 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_INSERTEDi_EXT:"
4282 4281 "rval = %d tgt id = %d ", rval,
4283 4282 ddi_get16(acc_handle,
4284 4283 &evt_detail->args.pd.device_id)));
4285 4284 }
4286 4285 break;
4287 4286 } /* End of MR_EVT_PD_INSERTED_EXT */
4288 4287
4289 4288 case MR_EVT_PD_STATE_CHANGE: {
4290 4289 if (instance->tbolt || instance->skinny) {
4291 4290 tgt = ddi_get16(acc_handle,
4292 4291 &evt_detail->args.pd.device_id);
4293 4292 if ((evt_detail->args.pd_state.prevState ==
4294 4293 PD_SYSTEM) &&
4295 4294 (evt_detail->args.pd_state.newState != PD_SYSTEM)) {
4296 4295 mutex_enter(&instance->config_dev_mtx);
4297 4296 instance->mr_tbolt_pd_list[tgt].flag =
4298 4297 (uint8_t)~MRDRV_TGT_VALID;
4299 4298 mutex_exit(&instance->config_dev_mtx);
4300 4299 rval = mrsas_service_evt(instance,
4301 4300 ddi_get16(acc_handle,
4302 4301 &evt_detail->args.pd.device_id),
4303 4302 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4304 4303 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4305 4304 "rval = %d tgt id = %d ", rval,
4306 4305 ddi_get16(acc_handle,
4307 4306 &evt_detail->args.pd.device_id)));
4308 4307 break;
4309 4308 }
4310 4309 if ((evt_detail->args.pd_state.prevState
4311 4310 == UNCONFIGURED_GOOD) &&
4312 4311 (evt_detail->args.pd_state.newState == PD_SYSTEM)) {
4313 4312 rval = mrsas_service_evt(instance,
4314 4313 ddi_get16(acc_handle,
4315 4314 &evt_detail->args.pd.device_id),
4316 4315 1, MRSAS_EVT_CONFIG_TGT, NULL);
4317 4316 con_log(CL_ANN1, (CE_WARN,
4318 4317 "mr_sas: PD_INSERTED: rval = %d "
4319 4318 " tgt id = %d ", rval,
4320 4319 ddi_get16(acc_handle,
4321 4320 &evt_detail->args.pd.device_id)));
4322 4321 break;
4323 4322 }
4324 4323 }
4325 4324 break;
4326 4325 }
4327 4326
4328 4327 } /* End of Main Switch */
4329 4328
4330 4329 /* get copy of seq_num and class/locale for re-registration */
4331 4330 seq_num = ddi_get32(acc_handle, &evt_detail->seq_num);
4332 4331 seq_num++;
4333 4332 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
4334 4333 sizeof (struct mrsas_evt_detail));
4335 4334
4336 4335 ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0);
4337 4336 ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num);
4338 4337
4339 4338 instance->aen_seq_num = seq_num;
4340 4339
4341 4340 cmd->frame_count = 1;
4342 4341
4343 4342 cmd->retry_count_for_ocr = 0;
4344 4343 cmd->drv_pkt_time = 0;
4345 4344
4346 4345 /* Issue the aen registration frame */
4347 4346 instance->func_ptr->issue_cmd(cmd, instance);
4348 4347 }
4349 4348
4350 4349 /*
4351 4350 * complete_cmd_in_sync_mode - Completes an internal command
4352 4351 * @instance: Adapter soft state
4353 4352 * @cmd: Command to be completed
4354 4353 *
4355 4354 * The issue_cmd_in_sync_mode() function waits for a command to complete
4356 4355 * after it issues a command. This function wakes up that waiting routine by
4357 4356 * calling wake_up() on the wait queue.
4358 4357 */
4359 4358 static void
4360 4359 complete_cmd_in_sync_mode(struct mrsas_instance *instance,
4361 4360 struct mrsas_cmd *cmd)
4362 4361 {
4363 4362 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
4364 4363 &cmd->frame->io.cmd_status);
4365 4364
4366 4365 cmd->sync_cmd = MRSAS_FALSE;
4367 4366
4368 4367 con_log(CL_ANN1, (CE_NOTE, "complete_cmd_in_sync_mode called %p \n",
4369 4368 (void *)cmd));
4370 4369
4371 4370 mutex_enter(&instance->int_cmd_mtx);
4372 4371 if (cmd->cmd_status == ENODATA) {
4373 4372 cmd->cmd_status = 0;
4374 4373 }
4375 4374 cv_broadcast(&instance->int_cmd_cv);
4376 4375 mutex_exit(&instance->int_cmd_mtx);
4377 4376
4378 4377 }
4379 4378
4380 4379 /*
4381 4380 * Call this function inside mrsas_softintr.
4382 4381 * mrsas_initiate_ocr_if_fw_is_faulty - Initiates OCR if FW status is faulty
4383 4382 * @instance: Adapter soft state
4384 4383 */
4385 4384
4386 4385 static uint32_t
4387 4386 mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *instance)
4388 4387 {
4389 4388 uint32_t cur_abs_reg_val;
4390 4389 uint32_t fw_state;
4391 4390
4392 4391 cur_abs_reg_val = instance->func_ptr->read_fw_status_reg(instance);
4393 4392 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
4394 4393 if (fw_state == MFI_STATE_FAULT) {
4395 4394 if (instance->disable_online_ctrl_reset == 1) {
4396 4395 dev_err(instance->dip, CE_WARN,
4397 4396 "mrsas_initiate_ocr_if_fw_is_faulty: "
4398 4397 "FW in Fault state, detected in ISR: "
4399 4398 "FW doesn't support ocr ");
4400 4399
4401 4400 return (ADAPTER_RESET_NOT_REQUIRED);
4402 4401 } else {
4403 4402 con_log(CL_ANN, (CE_NOTE,
4404 4403 "mrsas_initiate_ocr_if_fw_is_faulty: FW in Fault "
4405 4404 "state, detected in ISR: FW supports ocr "));
4406 4405
4407 4406 return (ADAPTER_RESET_REQUIRED);
4408 4407 }
4409 4408 }
4410 4409
4411 4410 return (ADAPTER_RESET_NOT_REQUIRED);
4412 4411 }
4413 4412
4414 4413 /*
4415 4414 * mrsas_softintr - The Software ISR
4416 4415 * @param arg : HBA soft state
4417 4416 *
4418 4417 * called from high-level interrupt if hi-level interrupt are not there,
4419 4418 * otherwise triggered as a soft interrupt
4420 4419 */
4421 4420 static uint_t
4422 4421 mrsas_softintr(struct mrsas_instance *instance)
4423 4422 {
4424 4423 struct scsi_pkt *pkt;
4425 4424 struct scsa_cmd *acmd;
4426 4425 struct mrsas_cmd *cmd;
4427 4426 struct mlist_head *pos, *next;
4428 4427 mlist_t process_list;
4429 4428 struct mrsas_header *hdr;
4430 4429 struct scsi_arq_status *arqstat;
4431 4430
4432 4431 con_log(CL_ANN1, (CE_NOTE, "mrsas_softintr() called."));
4433 4432
4434 4433 ASSERT(instance);
4435 4434
4436 4435 mutex_enter(&instance->completed_pool_mtx);
4437 4436
4438 4437 if (mlist_empty(&instance->completed_pool_list)) {
4439 4438 mutex_exit(&instance->completed_pool_mtx);
4440 4439 return (DDI_INTR_CLAIMED);
4441 4440 }
4442 4441
4443 4442 instance->softint_running = 1;
4444 4443
4445 4444 INIT_LIST_HEAD(&process_list);
4446 4445 mlist_splice(&instance->completed_pool_list, &process_list);
4447 4446 INIT_LIST_HEAD(&instance->completed_pool_list);
4448 4447
4449 4448 mutex_exit(&instance->completed_pool_mtx);
4450 4449
4451 4450 /* perform all callbacks first, before releasing the SCBs */
4452 4451 mlist_for_each_safe(pos, next, &process_list) {
4453 4452 cmd = mlist_entry(pos, struct mrsas_cmd, list);
4454 4453
4455 4454 /* syncronize the Cmd frame for the controller */
4456 4455 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
4457 4456 0, 0, DDI_DMA_SYNC_FORCPU);
4458 4457
4459 4458 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
4460 4459 DDI_SUCCESS) {
4461 4460 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4462 4461 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4463 4462 con_log(CL_ANN1, (CE_WARN,
4464 4463 "mrsas_softintr: "
4465 4464 "FMA check reports DMA handle failure"));
4466 4465 return (DDI_INTR_CLAIMED);
4467 4466 }
4468 4467
4469 4468 hdr = &cmd->frame->hdr;
4470 4469
4471 4470 /* remove the internal command from the process list */
4472 4471 mlist_del_init(&cmd->list);
4473 4472
4474 4473 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
4475 4474 case MFI_CMD_OP_PD_SCSI:
4476 4475 case MFI_CMD_OP_LD_SCSI:
4477 4476 case MFI_CMD_OP_LD_READ:
4478 4477 case MFI_CMD_OP_LD_WRITE:
4479 4478 /*
4480 4479 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
4481 4480 * could have been issued either through an
4482 4481 * IO path or an IOCTL path. If it was via IOCTL,
4483 4482 * we will send it to internal completion.
4484 4483 */
4485 4484 if (cmd->sync_cmd == MRSAS_TRUE) {
4486 4485 complete_cmd_in_sync_mode(instance, cmd);
4487 4486 break;
4488 4487 }
4489 4488
4490 4489 /* regular commands */
4491 4490 acmd = cmd->cmd;
4492 4491 pkt = CMD2PKT(acmd);
4493 4492
4494 4493 if (acmd->cmd_flags & CFLAG_DMAVALID) {
4495 4494 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
4496 4495 (void) ddi_dma_sync(acmd->cmd_dmahandle,
4497 4496 acmd->cmd_dma_offset,
4498 4497 acmd->cmd_dma_len,
4499 4498 DDI_DMA_SYNC_FORCPU);
4500 4499 }
4501 4500 }
4502 4501
4503 4502 pkt->pkt_reason = CMD_CMPLT;
4504 4503 pkt->pkt_statistics = 0;
4505 4504 pkt->pkt_state = STATE_GOT_BUS
4506 4505 | STATE_GOT_TARGET | STATE_SENT_CMD
4507 4506 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
4508 4507
4509 4508 con_log(CL_ANN, (CE_CONT,
4510 4509 "CDB[0] = %x completed for %s: size %lx context %x",
4511 4510 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
4512 4511 acmd->cmd_dmacount, hdr->context));
4513 4512 DTRACE_PROBE3(softintr_cdb, uint8_t, pkt->pkt_cdbp[0],
4514 4513 uint_t, acmd->cmd_cdblen, ulong_t,
4515 4514 acmd->cmd_dmacount);
4516 4515
4517 4516 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
4518 4517 struct scsi_inquiry *inq;
4519 4518
4520 4519 if (acmd->cmd_dmacount != 0) {
4521 4520 bp_mapin(acmd->cmd_buf);
4522 4521 inq = (struct scsi_inquiry *)
4523 4522 acmd->cmd_buf->b_un.b_addr;
4524 4523
4525 4524 if (hdr->cmd_status == MFI_STAT_OK) {
4526 4525 display_scsi_inquiry(
4527 4526 (caddr_t)inq);
4528 4527 }
4529 4528 }
4530 4529 }
4531 4530
4532 4531 DTRACE_PROBE2(softintr_done, uint8_t, hdr->cmd,
4533 4532 uint8_t, hdr->cmd_status);
4534 4533
4535 4534 switch (hdr->cmd_status) {
4536 4535 case MFI_STAT_OK:
4537 4536 pkt->pkt_scbp[0] = STATUS_GOOD;
4538 4537 break;
4539 4538 case MFI_STAT_LD_CC_IN_PROGRESS:
4540 4539 case MFI_STAT_LD_RECON_IN_PROGRESS:
4541 4540 pkt->pkt_scbp[0] = STATUS_GOOD;
4542 4541 break;
4543 4542 case MFI_STAT_LD_INIT_IN_PROGRESS:
4544 4543 con_log(CL_ANN,
4545 4544 (CE_WARN, "Initialization in Progress"));
4546 4545 pkt->pkt_reason = CMD_TRAN_ERR;
4547 4546
4548 4547 break;
4549 4548 case MFI_STAT_SCSI_DONE_WITH_ERROR:
4550 4549 con_log(CL_ANN, (CE_CONT, "scsi_done error"));
4551 4550
4552 4551 pkt->pkt_reason = CMD_CMPLT;
4553 4552 ((struct scsi_status *)
4554 4553 pkt->pkt_scbp)->sts_chk = 1;
4555 4554
4556 4555 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
4557 4556 con_log(CL_ANN,
4558 4557 (CE_WARN, "TEST_UNIT_READY fail"));
4559 4558 } else {
4560 4559 pkt->pkt_state |= STATE_ARQ_DONE;
4561 4560 arqstat = (void *)(pkt->pkt_scbp);
4562 4561 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4563 4562 arqstat->sts_rqpkt_resid = 0;
4564 4563 arqstat->sts_rqpkt_state |=
4565 4564 STATE_GOT_BUS | STATE_GOT_TARGET
4566 4565 | STATE_SENT_CMD
4567 4566 | STATE_XFERRED_DATA;
4568 4567 *(uint8_t *)&arqstat->sts_rqpkt_status =
4569 4568 STATUS_GOOD;
4570 4569 ddi_rep_get8(
4571 4570 cmd->frame_dma_obj.acc_handle,
4572 4571 (uint8_t *)
4573 4572 &(arqstat->sts_sensedata),
4574 4573 cmd->sense,
4575 4574 sizeof (struct scsi_extended_sense),
4576 4575 DDI_DEV_AUTOINCR);
4577 4576 }
4578 4577 break;
4579 4578 case MFI_STAT_LD_OFFLINE:
4580 4579 case MFI_STAT_DEVICE_NOT_FOUND:
4581 4580 con_log(CL_ANN, (CE_CONT,
4582 4581 "mrsas_softintr:device not found error"));
4583 4582 pkt->pkt_reason = CMD_DEV_GONE;
4584 4583 pkt->pkt_statistics = STAT_DISCON;
4585 4584 break;
4586 4585 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
4587 4586 pkt->pkt_state |= STATE_ARQ_DONE;
4588 4587 pkt->pkt_reason = CMD_CMPLT;
4589 4588 ((struct scsi_status *)
4590 4589 pkt->pkt_scbp)->sts_chk = 1;
4591 4590
4592 4591 arqstat = (void *)(pkt->pkt_scbp);
4593 4592 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4594 4593 arqstat->sts_rqpkt_resid = 0;
4595 4594 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
4596 4595 | STATE_GOT_TARGET | STATE_SENT_CMD
4597 4596 | STATE_XFERRED_DATA;
4598 4597 *(uint8_t *)&arqstat->sts_rqpkt_status =
4599 4598 STATUS_GOOD;
4600 4599
4601 4600 arqstat->sts_sensedata.es_valid = 1;
4602 4601 arqstat->sts_sensedata.es_key =
4603 4602 KEY_ILLEGAL_REQUEST;
4604 4603 arqstat->sts_sensedata.es_class =
4605 4604 CLASS_EXTENDED_SENSE;
4606 4605
4607 4606 /*
4608 4607 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
4609 4608 * ASC: 0x21h; ASCQ: 0x00h;
4610 4609 */
4611 4610 arqstat->sts_sensedata.es_add_code = 0x21;
4612 4611 arqstat->sts_sensedata.es_qual_code = 0x00;
4613 4612
4614 4613 break;
4615 4614
4616 4615 default:
4617 4616 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
4618 4617 pkt->pkt_reason = CMD_TRAN_ERR;
4619 4618
4620 4619 break;
4621 4620 }
4622 4621
4623 4622 atomic_add_16(&instance->fw_outstanding, (-1));
4624 4623
4625 4624 (void) mrsas_common_check(instance, cmd);
4626 4625
4627 4626 if (acmd->cmd_dmahandle) {
4628 4627 if (mrsas_check_dma_handle(
4629 4628 acmd->cmd_dmahandle) != DDI_SUCCESS) {
4630 4629 ddi_fm_service_impact(instance->dip,
4631 4630 DDI_SERVICE_UNAFFECTED);
4632 4631 pkt->pkt_reason = CMD_TRAN_ERR;
4633 4632 pkt->pkt_statistics = 0;
4634 4633 }
4635 4634 }
4636 4635
4637 4636 mrsas_return_mfi_pkt(instance, cmd);
4638 4637
4639 4638 /* Call the callback routine */
4640 4639 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4641 4640 pkt->pkt_comp) {
4642 4641 (*pkt->pkt_comp)(pkt);
4643 4642 }
4644 4643
4645 4644 break;
4646 4645
4647 4646 case MFI_CMD_OP_SMP:
4648 4647 case MFI_CMD_OP_STP:
4649 4648 complete_cmd_in_sync_mode(instance, cmd);
4650 4649 break;
4651 4650
4652 4651 case MFI_CMD_OP_DCMD:
4653 4652 /* see if got an event notification */
4654 4653 if (ddi_get32(cmd->frame_dma_obj.acc_handle,
4655 4654 &cmd->frame->dcmd.opcode) ==
4656 4655 MR_DCMD_CTRL_EVENT_WAIT) {
4657 4656 if ((instance->aen_cmd == cmd) &&
4658 4657 (instance->aen_cmd->abort_aen)) {
4659 4658 con_log(CL_ANN, (CE_WARN,
4660 4659 "mrsas_softintr: "
4661 4660 "aborted_aen returned"));
4662 4661 } else {
4663 4662 atomic_add_16(&instance->fw_outstanding,
4664 4663 (-1));
4665 4664 service_mfi_aen(instance, cmd);
4666 4665 }
4667 4666 } else {
4668 4667 complete_cmd_in_sync_mode(instance, cmd);
4669 4668 }
4670 4669
4671 4670 break;
4672 4671
4673 4672 case MFI_CMD_OP_ABORT:
4674 4673 con_log(CL_ANN, (CE_NOTE, "MFI_CMD_OP_ABORT complete"));
4675 4674 /*
4676 4675 * MFI_CMD_OP_ABORT successfully completed
4677 4676 * in the synchronous mode
4678 4677 */
4679 4678 complete_cmd_in_sync_mode(instance, cmd);
4680 4679 break;
4681 4680
4682 4681 default:
4683 4682 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4684 4683 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4685 4684
4686 4685 if (cmd->pkt != NULL) {
4687 4686 pkt = cmd->pkt;
4688 4687 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4689 4688 pkt->pkt_comp) {
4690 4689
4691 4690 con_log(CL_ANN1, (CE_CONT, "posting to "
4692 4691 "scsa cmd %p index %x pkt %p"
4693 4692 "time %llx, default ", (void *)cmd,
4694 4693 cmd->index, (void *)pkt,
4695 4694 gethrtime()));
4696 4695
4697 4696 (*pkt->pkt_comp)(pkt);
4698 4697
4699 4698 }
4700 4699 }
4701 4700 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !"));
4702 4701 break;
4703 4702 }
4704 4703 }
4705 4704
4706 4705 instance->softint_running = 0;
4707 4706
4708 4707 return (DDI_INTR_CLAIMED);
4709 4708 }
4710 4709
4711 4710 /*
4712 4711 * mrsas_alloc_dma_obj
4713 4712 *
4714 4713 * Allocate the memory and other resources for an dma object.
4715 4714 */
4716 4715 int
4717 4716 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj,
4718 4717 uchar_t endian_flags)
4719 4718 {
4720 4719 int i;
4721 4720 size_t alen = 0;
4722 4721 uint_t cookie_cnt;
4723 4722 struct ddi_device_acc_attr tmp_endian_attr;
4724 4723
4725 4724 tmp_endian_attr = endian_attr;
4726 4725 tmp_endian_attr.devacc_attr_endian_flags = endian_flags;
4727 4726 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4728 4727
4729 4728 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
4730 4729 DDI_DMA_SLEEP, NULL, &obj->dma_handle);
4731 4730 if (i != DDI_SUCCESS) {
4732 4731
4733 4732 switch (i) {
4734 4733 case DDI_DMA_BADATTR :
4735 4734 con_log(CL_ANN, (CE_WARN,
4736 4735 "Failed ddi_dma_alloc_handle- Bad attribute"));
4737 4736 break;
4738 4737 case DDI_DMA_NORESOURCES :
4739 4738 con_log(CL_ANN, (CE_WARN,
4740 4739 "Failed ddi_dma_alloc_handle- No Resources"));
4741 4740 break;
4742 4741 default :
4743 4742 con_log(CL_ANN, (CE_WARN,
4744 4743 "Failed ddi_dma_alloc_handle: "
4745 4744 "unknown status %d", i));
4746 4745 break;
4747 4746 }
4748 4747
4749 4748 return (-1);
4750 4749 }
4751 4750
4752 4751 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
4753 4752 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
4754 4753 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
4755 4754 alen < obj->size) {
4756 4755
4757 4756 ddi_dma_free_handle(&obj->dma_handle);
4758 4757
4759 4758 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4760 4759
4761 4760 return (-1);
4762 4761 }
4763 4762
4764 4763 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
4765 4764 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
4766 4765 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
4767 4766
4768 4767 ddi_dma_mem_free(&obj->acc_handle);
4769 4768 ddi_dma_free_handle(&obj->dma_handle);
4770 4769
4771 4770 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4772 4771
4773 4772 return (-1);
4774 4773 }
4775 4774
4776 4775 if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
4777 4776 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4778 4777 return (-1);
4779 4778 }
4780 4779
4781 4780 if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
4782 4781 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4783 4782 return (-1);
4784 4783 }
4785 4784
4786 4785 return (cookie_cnt);
4787 4786 }
4788 4787
4789 4788 /*
4790 4789 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t)
4791 4790 *
4792 4791 * De-allocate the memory and other resources for an dma object, which must
4793 4792 * have been alloated by a previous call to mrsas_alloc_dma_obj()
4794 4793 */
4795 4794 int
4796 4795 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj)
4797 4796 {
4798 4797
4799 4798 if ((obj.dma_handle == NULL) || (obj.acc_handle == NULL)) {
4800 4799 return (DDI_SUCCESS);
4801 4800 }
4802 4801
4803 4802 /*
4804 4803 * NOTE: These check-handle functions fail if *_handle == NULL, but
4805 4804 * this function succeeds because of the previous check.
4806 4805 */
4807 4806 if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) {
4808 4807 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4809 4808 return (DDI_FAILURE);
4810 4809 }
4811 4810
4812 4811 if (mrsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) {
4813 4812 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4814 4813 return (DDI_FAILURE);
4815 4814 }
4816 4815
4817 4816 (void) ddi_dma_unbind_handle(obj.dma_handle);
4818 4817 ddi_dma_mem_free(&obj.acc_handle);
4819 4818 ddi_dma_free_handle(&obj.dma_handle);
4820 4819 obj.acc_handle = NULL;
4821 4820 return (DDI_SUCCESS);
4822 4821 }
4823 4822
4824 4823 /*
4825 4824 * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
4826 4825 * int, int (*)())
4827 4826 *
4828 4827 * Allocate dma resources for a new scsi command
4829 4828 */
4830 4829 int
4831 4830 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt,
4832 4831 struct buf *bp, int flags, int (*callback)())
4833 4832 {
4834 4833 int dma_flags;
4835 4834 int (*cb)(caddr_t);
4836 4835 int i;
4837 4836
4838 4837 ddi_dma_attr_t tmp_dma_attr = mrsas_generic_dma_attr;
4839 4838 struct scsa_cmd *acmd = PKT2CMD(pkt);
4840 4839
4841 4840 acmd->cmd_buf = bp;
4842 4841
4843 4842 if (bp->b_flags & B_READ) {
4844 4843 acmd->cmd_flags &= ~CFLAG_DMASEND;
4845 4844 dma_flags = DDI_DMA_READ;
4846 4845 } else {
4847 4846 acmd->cmd_flags |= CFLAG_DMASEND;
4848 4847 dma_flags = DDI_DMA_WRITE;
4849 4848 }
4850 4849
4851 4850 if (flags & PKT_CONSISTENT) {
4852 4851 acmd->cmd_flags |= CFLAG_CONSISTENT;
4853 4852 dma_flags |= DDI_DMA_CONSISTENT;
4854 4853 }
4855 4854
4856 4855 if (flags & PKT_DMA_PARTIAL) {
4857 4856 dma_flags |= DDI_DMA_PARTIAL;
4858 4857 }
4859 4858
4860 4859 dma_flags |= DDI_DMA_REDZONE;
4861 4860
4862 4861 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
4863 4862
4864 4863 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
4865 4864 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
4866 4865 if (instance->tbolt) {
4867 4866 /* OCR-RESET FIX */
4868 4867 tmp_dma_attr.dma_attr_count_max =
4869 4868 (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */
4870 4869 tmp_dma_attr.dma_attr_maxxfer =
4871 4870 (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */
4872 4871 }
4873 4872
4874 4873 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
4875 4874 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
4876 4875 switch (i) {
4877 4876 case DDI_DMA_BADATTR:
4878 4877 bioerror(bp, EFAULT);
4879 4878 return (DDI_FAILURE);
4880 4879
4881 4880 case DDI_DMA_NORESOURCES:
4882 4881 bioerror(bp, 0);
4883 4882 return (DDI_FAILURE);
4884 4883
4885 4884 default:
4886 4885 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
4887 4886 "impossible result (0x%x)", i));
4888 4887 bioerror(bp, EFAULT);
4889 4888 return (DDI_FAILURE);
4890 4889 }
4891 4890 }
4892 4891
4893 4892 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
4894 4893 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
4895 4894
4896 4895 switch (i) {
4897 4896 case DDI_DMA_PARTIAL_MAP:
4898 4897 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
4899 4898 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
4900 4899 "DDI_DMA_PARTIAL_MAP impossible"));
4901 4900 goto no_dma_cookies;
4902 4901 }
4903 4902
4904 4903 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
4905 4904 DDI_FAILURE) {
4906 4905 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed"));
4907 4906 goto no_dma_cookies;
4908 4907 }
4909 4908
4910 4909 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
4911 4910 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
4912 4911 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
4913 4912 DDI_FAILURE) {
4914 4913
4915 4914 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed"));
4916 4915 goto no_dma_cookies;
4917 4916 }
4918 4917
4919 4918 goto get_dma_cookies;
4920 4919 case DDI_DMA_MAPPED:
4921 4920 acmd->cmd_nwin = 1;
4922 4921 acmd->cmd_dma_len = 0;
4923 4922 acmd->cmd_dma_offset = 0;
4924 4923
4925 4924 get_dma_cookies:
4926 4925 i = 0;
4927 4926 acmd->cmd_dmacount = 0;
4928 4927 for (;;) {
4929 4928 acmd->cmd_dmacount +=
4930 4929 acmd->cmd_dmacookies[i++].dmac_size;
4931 4930
4932 4931 if (i == instance->max_num_sge ||
4933 4932 i == acmd->cmd_ncookies)
4934 4933 break;
4935 4934
4936 4935 ddi_dma_nextcookie(acmd->cmd_dmahandle,
4937 4936 &acmd->cmd_dmacookies[i]);
4938 4937 }
4939 4938
4940 4939 acmd->cmd_cookie = i;
4941 4940 acmd->cmd_cookiecnt = i;
4942 4941
4943 4942 acmd->cmd_flags |= CFLAG_DMAVALID;
4944 4943
4945 4944 if (bp->b_bcount >= acmd->cmd_dmacount) {
4946 4945 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
4947 4946 } else {
4948 4947 pkt->pkt_resid = 0;
4949 4948 }
4950 4949
4951 4950 return (DDI_SUCCESS);
4952 4951 case DDI_DMA_NORESOURCES:
4953 4952 bioerror(bp, 0);
4954 4953 break;
4955 4954 case DDI_DMA_NOMAPPING:
4956 4955 bioerror(bp, EFAULT);
4957 4956 break;
4958 4957 case DDI_DMA_TOOBIG:
4959 4958 bioerror(bp, EINVAL);
4960 4959 break;
4961 4960 case DDI_DMA_INUSE:
4962 4961 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
4963 4962 " DDI_DMA_INUSE impossible"));
4964 4963 break;
4965 4964 default:
4966 4965 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
4967 4966 "impossible result (0x%x)", i));
4968 4967 break;
4969 4968 }
4970 4969
4971 4970 no_dma_cookies:
4972 4971 ddi_dma_free_handle(&acmd->cmd_dmahandle);
4973 4972 acmd->cmd_dmahandle = NULL;
4974 4973 acmd->cmd_flags &= ~CFLAG_DMAVALID;
4975 4974 return (DDI_FAILURE);
4976 4975 }
4977 4976
4978 4977 /*
4979 4978 * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *)
4980 4979 *
4981 4980 * move dma resources to next dma window
4982 4981 *
4983 4982 */
4984 4983 int
4985 4984 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt,
4986 4985 struct buf *bp)
4987 4986 {
4988 4987 int i = 0;
4989 4988
4990 4989 struct scsa_cmd *acmd = PKT2CMD(pkt);
4991 4990
4992 4991 /*
4993 4992 * If there are no more cookies remaining in this window,
4994 4993 * must move to the next window first.
4995 4994 */
4996 4995 if (acmd->cmd_cookie == acmd->cmd_ncookies) {
4997 4996 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
4998 4997 return (DDI_SUCCESS);
4999 4998 }
5000 4999
5001 5000 /* at last window, cannot move */
5002 5001 if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
5003 5002 return (DDI_FAILURE);
5004 5003 }
5005 5004
5006 5005 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
5007 5006 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
5008 5007 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
5009 5008 DDI_FAILURE) {
5010 5009 return (DDI_FAILURE);
5011 5010 }
5012 5011
5013 5012 acmd->cmd_cookie = 0;
5014 5013 } else {
5015 5014 /* still more cookies in this window - get the next one */
5016 5015 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5017 5016 &acmd->cmd_dmacookies[0]);
5018 5017 }
5019 5018
5020 5019 /* get remaining cookies in this window, up to our maximum */
5021 5020 for (;;) {
5022 5021 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
5023 5022 acmd->cmd_cookie++;
5024 5023
5025 5024 if (i == instance->max_num_sge ||
5026 5025 acmd->cmd_cookie == acmd->cmd_ncookies) {
5027 5026 break;
5028 5027 }
5029 5028
5030 5029 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5031 5030 &acmd->cmd_dmacookies[i]);
5032 5031 }
5033 5032
5034 5033 acmd->cmd_cookiecnt = i;
5035 5034
5036 5035 if (bp->b_bcount >= acmd->cmd_dmacount) {
5037 5036 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
5038 5037 } else {
5039 5038 pkt->pkt_resid = 0;
5040 5039 }
5041 5040
5042 5041 return (DDI_SUCCESS);
5043 5042 }
5044 5043
5045 5044 /*
5046 5045 * build_cmd
5047 5046 */
5048 5047 static struct mrsas_cmd *
5049 5048 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
5050 5049 struct scsi_pkt *pkt, uchar_t *cmd_done)
5051 5050 {
5052 5051 uint16_t flags = 0;
5053 5052 uint32_t i;
5054 5053 uint32_t sge_bytes;
5055 5054 uint32_t tmp_data_xfer_len;
5056 5055 ddi_acc_handle_t acc_handle;
5057 5056 struct mrsas_cmd *cmd;
5058 5057 struct mrsas_sge64 *mfi_sgl;
5059 5058 struct mrsas_sge_ieee *mfi_sgl_ieee;
5060 5059 struct scsa_cmd *acmd = PKT2CMD(pkt);
5061 5060 struct mrsas_pthru_frame *pthru;
5062 5061 struct mrsas_io_frame *ldio;
5063 5062
5064 5063 /* find out if this is logical or physical drive command. */
5065 5064 acmd->islogical = MRDRV_IS_LOGICAL(ap);
5066 5065 acmd->device_id = MAP_DEVICE_ID(instance, ap);
5067 5066 *cmd_done = 0;
5068 5067
5069 5068 /* get the command packet */
5070 5069 if (!(cmd = mrsas_get_mfi_pkt(instance))) {
5071 5070 DTRACE_PROBE2(build_cmd_mfi_err, uint16_t,
5072 5071 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
5073 5072 return (NULL);
5074 5073 }
5075 5074
5076 5075 acc_handle = cmd->frame_dma_obj.acc_handle;
5077 5076
5078 5077 /* Clear the frame buffer and assign back the context id */
5079 5078 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
5080 5079 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index);
5081 5080
5082 5081 cmd->pkt = pkt;
5083 5082 cmd->cmd = acmd;
5084 5083 DTRACE_PROBE3(build_cmds, uint8_t, pkt->pkt_cdbp[0],
5085 5084 ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len);
5086 5085
5087 5086 /* lets get the command directions */
5088 5087 if (acmd->cmd_flags & CFLAG_DMASEND) {
5089 5088 flags = MFI_FRAME_DIR_WRITE;
5090 5089
5091 5090 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5092 5091 (void) ddi_dma_sync(acmd->cmd_dmahandle,
5093 5092 acmd->cmd_dma_offset, acmd->cmd_dma_len,
5094 5093 DDI_DMA_SYNC_FORDEV);
5095 5094 }
5096 5095 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
5097 5096 flags = MFI_FRAME_DIR_READ;
5098 5097
5099 5098 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5100 5099 (void) ddi_dma_sync(acmd->cmd_dmahandle,
5101 5100 acmd->cmd_dma_offset, acmd->cmd_dma_len,
5102 5101 DDI_DMA_SYNC_FORCPU);
5103 5102 }
5104 5103 } else {
5105 5104 flags = MFI_FRAME_DIR_NONE;
5106 5105 }
5107 5106
5108 5107 if (instance->flag_ieee) {
5109 5108 flags |= MFI_FRAME_IEEE;
5110 5109 }
5111 5110 flags |= MFI_FRAME_SGL64;
5112 5111
5113 5112 switch (pkt->pkt_cdbp[0]) {
5114 5113
5115 5114 /*
5116 5115 * case SCMD_SYNCHRONIZE_CACHE:
5117 5116 * flush_cache(instance);
5118 5117 * mrsas_return_mfi_pkt(instance, cmd);
5119 5118 * *cmd_done = 1;
5120 5119 *
5121 5120 * return (NULL);
5122 5121 */
5123 5122
5124 5123 case SCMD_READ:
5125 5124 case SCMD_WRITE:
5126 5125 case SCMD_READ_G1:
5127 5126 case SCMD_WRITE_G1:
5128 5127 case SCMD_READ_G4:
5129 5128 case SCMD_WRITE_G4:
5130 5129 case SCMD_READ_G5:
5131 5130 case SCMD_WRITE_G5:
5132 5131 if (acmd->islogical) {
5133 5132 ldio = (struct mrsas_io_frame *)cmd->frame;
5134 5133
5135 5134 /*
5136 5135 * preare the Logical IO frame:
5137 5136 * 2nd bit is zero for all read cmds
5138 5137 */
5139 5138 ddi_put8(acc_handle, &ldio->cmd,
5140 5139 (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE
5141 5140 : MFI_CMD_OP_LD_READ);
5142 5141 ddi_put8(acc_handle, &ldio->cmd_status, 0x0);
5143 5142 ddi_put8(acc_handle, &ldio->scsi_status, 0x0);
5144 5143 ddi_put8(acc_handle, &ldio->target_id, acmd->device_id);
5145 5144 ddi_put16(acc_handle, &ldio->timeout, 0);
5146 5145 ddi_put8(acc_handle, &ldio->reserved_0, 0);
5147 5146 ddi_put16(acc_handle, &ldio->pad_0, 0);
5148 5147 ddi_put16(acc_handle, &ldio->flags, flags);
5149 5148
5150 5149 /* Initialize sense Information */
5151 5150 bzero(cmd->sense, SENSE_LENGTH);
5152 5151 ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH);
5153 5152 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0);
5154 5153 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo,
5155 5154 cmd->sense_phys_addr);
5156 5155 ddi_put32(acc_handle, &ldio->start_lba_hi, 0);
5157 5156 ddi_put8(acc_handle, &ldio->access_byte,
5158 5157 (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0);
5159 5158 ddi_put8(acc_handle, &ldio->sge_count,
5160 5159 acmd->cmd_cookiecnt);
5161 5160 if (instance->flag_ieee) {
5162 5161 mfi_sgl_ieee =
5163 5162 (struct mrsas_sge_ieee *)&ldio->sgl;
5164 5163 } else {
5165 5164 mfi_sgl = (struct mrsas_sge64 *)&ldio->sgl;
5166 5165 }
5167 5166
5168 5167 (void) ddi_get32(acc_handle, &ldio->context);
5169 5168
5170 5169 if (acmd->cmd_cdblen == CDB_GROUP0) {
5171 5170 /* 6-byte cdb */
5172 5171 ddi_put32(acc_handle, &ldio->lba_count, (
5173 5172 (uint16_t)(pkt->pkt_cdbp[4])));
5174 5173
5175 5174 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5176 5175 ((uint32_t)(pkt->pkt_cdbp[3])) |
5177 5176 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
5178 5177 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
5179 5178 << 16)));
5180 5179 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
5181 5180 /* 10-byte cdb */
5182 5181 ddi_put32(acc_handle, &ldio->lba_count, (
5183 5182 ((uint16_t)(pkt->pkt_cdbp[8])) |
5184 5183 ((uint16_t)(pkt->pkt_cdbp[7]) << 8)));
5185 5184
5186 5185 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5187 5186 ((uint32_t)(pkt->pkt_cdbp[5])) |
5188 5187 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5189 5188 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5190 5189 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5191 5190 } else if (acmd->cmd_cdblen == CDB_GROUP5) {
5192 5191 /* 12-byte cdb */
5193 5192 ddi_put32(acc_handle, &ldio->lba_count, (
5194 5193 ((uint32_t)(pkt->pkt_cdbp[9])) |
5195 5194 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5196 5195 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5197 5196 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5198 5197
5199 5198 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5200 5199 ((uint32_t)(pkt->pkt_cdbp[5])) |
5201 5200 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5202 5201 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5203 5202 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5204 5203 } else if (acmd->cmd_cdblen == CDB_GROUP4) {
5205 5204 /* 16-byte cdb */
5206 5205 ddi_put32(acc_handle, &ldio->lba_count, (
5207 5206 ((uint32_t)(pkt->pkt_cdbp[13])) |
5208 5207 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
5209 5208 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
5210 5209 ((uint32_t)(pkt->pkt_cdbp[10]) << 24)));
5211 5210
5212 5211 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5213 5212 ((uint32_t)(pkt->pkt_cdbp[9])) |
5214 5213 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5215 5214 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5216 5215 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5217 5216
5218 5217 ddi_put32(acc_handle, &ldio->start_lba_hi, (
5219 5218 ((uint32_t)(pkt->pkt_cdbp[5])) |
5220 5219 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5221 5220 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5222 5221 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5223 5222 }
5224 5223
5225 5224 break;
5226 5225 }
5227 5226 /* For all non-rd/wr and physical disk cmds */
5228 5227 /* FALLTHROUGH */
5229 5228 default:
5230 5229
5231 5230 switch (pkt->pkt_cdbp[0]) {
5232 5231 case SCMD_MODE_SENSE:
5233 5232 case SCMD_MODE_SENSE_G1: {
5234 5233 union scsi_cdb *cdbp;
5235 5234 uint16_t page_code;
5236 5235
5237 5236 cdbp = (void *)pkt->pkt_cdbp;
5238 5237 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
5239 5238 switch (page_code) {
5240 5239 case 0x3:
5241 5240 case 0x4:
5242 5241 (void) mrsas_mode_sense_build(pkt);
5243 5242 mrsas_return_mfi_pkt(instance, cmd);
5244 5243 *cmd_done = 1;
5245 5244 return (NULL);
5246 5245 }
5247 5246 break;
5248 5247 }
5249 5248 default:
5250 5249 break;
5251 5250 }
5252 5251
5253 5252 pthru = (struct mrsas_pthru_frame *)cmd->frame;
5254 5253
5255 5254 /* prepare the DCDB frame */
5256 5255 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ?
5257 5256 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI);
5258 5257 ddi_put8(acc_handle, &pthru->cmd_status, 0x0);
5259 5258 ddi_put8(acc_handle, &pthru->scsi_status, 0x0);
5260 5259 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id);
5261 5260 ddi_put8(acc_handle, &pthru->lun, 0);
5262 5261 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen);
5263 5262 ddi_put16(acc_handle, &pthru->timeout, 0);
5264 5263 ddi_put16(acc_handle, &pthru->flags, flags);
5265 5264 tmp_data_xfer_len = 0;
5266 5265 for (i = 0; i < acmd->cmd_cookiecnt; i++) {
5267 5266 tmp_data_xfer_len += acmd->cmd_dmacookies[i].dmac_size;
5268 5267 }
5269 5268 ddi_put32(acc_handle, &pthru->data_xfer_len,
5270 5269 tmp_data_xfer_len);
5271 5270 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt);
5272 5271 if (instance->flag_ieee) {
5273 5272 mfi_sgl_ieee = (struct mrsas_sge_ieee *)&pthru->sgl;
5274 5273 } else {
5275 5274 mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl;
5276 5275 }
5277 5276
5278 5277 bzero(cmd->sense, SENSE_LENGTH);
5279 5278 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5280 5279 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5281 5280 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo,
5282 5281 cmd->sense_phys_addr);
5283 5282
5284 5283 (void) ddi_get32(acc_handle, &pthru->context);
5285 5284 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp,
5286 5285 (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
5287 5286
5288 5287 break;
5289 5288 }
5290 5289
5291 5290 /* prepare the scatter-gather list for the firmware */
5292 5291 if (instance->flag_ieee) {
5293 5292 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl_ieee++) {
5294 5293 ddi_put64(acc_handle, &mfi_sgl_ieee->phys_addr,
5295 5294 acmd->cmd_dmacookies[i].dmac_laddress);
5296 5295 ddi_put32(acc_handle, &mfi_sgl_ieee->length,
5297 5296 acmd->cmd_dmacookies[i].dmac_size);
5298 5297 }
5299 5298 sge_bytes = sizeof (struct mrsas_sge_ieee)*acmd->cmd_cookiecnt;
5300 5299 } else {
5301 5300 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
5302 5301 ddi_put64(acc_handle, &mfi_sgl->phys_addr,
5303 5302 acmd->cmd_dmacookies[i].dmac_laddress);
5304 5303 ddi_put32(acc_handle, &mfi_sgl->length,
5305 5304 acmd->cmd_dmacookies[i].dmac_size);
5306 5305 }
5307 5306 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt;
5308 5307 }
5309 5308
5310 5309 cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) +
5311 5310 ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1;
5312 5311
5313 5312 if (cmd->frame_count >= 8) {
5314 5313 cmd->frame_count = 8;
5315 5314 }
5316 5315
5317 5316 return (cmd);
5318 5317 }
5319 5318
5320 5319 /*
5321 5320 * wait_for_outstanding - Wait for all outstanding cmds
5322 5321 * @instance: Adapter soft state
5323 5322 *
5324 5323 * This function waits for upto MRDRV_RESET_WAIT_TIME seconds for FW to
5325 5324 * complete all its outstanding commands. Returns error if one or more IOs
5326 5325 * are pending after this time period.
5327 5326 */
5328 5327 static int
5329 5328 wait_for_outstanding(struct mrsas_instance *instance)
5330 5329 {
5331 5330 int i;
5332 5331 uint32_t wait_time = 90;
5333 5332
5334 5333 for (i = 0; i < wait_time; i++) {
5335 5334 if (!instance->fw_outstanding) {
5336 5335 break;
5337 5336 }
5338 5337
5339 5338 drv_usecwait(MILLISEC); /* wait for 1000 usecs */;
5340 5339 }
5341 5340
5342 5341 if (instance->fw_outstanding) {
5343 5342 return (1);
5344 5343 }
5345 5344
5346 5345 return (0);
5347 5346 }
5348 5347
5349 5348 /*
5350 5349 * issue_mfi_pthru
5351 5350 */
5352 5351 static int
5353 5352 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5354 5353 struct mrsas_cmd *cmd, int mode)
5355 5354 {
5356 5355 void *ubuf;
5357 5356 uint32_t kphys_addr = 0;
5358 5357 uint32_t xferlen = 0;
5359 5358 uint32_t new_xfer_length = 0;
5360 5359 uint_t model;
5361 5360 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5362 5361 dma_obj_t pthru_dma_obj;
5363 5362 struct mrsas_pthru_frame *kpthru;
5364 5363 struct mrsas_pthru_frame *pthru;
5365 5364 int i;
5366 5365 pthru = &cmd->frame->pthru;
5367 5366 kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0];
5368 5367
5369 5368 if (instance->adapterresetinprogress) {
5370 5369 con_log(CL_ANN1, (CE_WARN, "issue_mfi_pthru: Reset flag set, "
5371 5370 "returning mfi_pkt and setting TRAN_BUSY\n"));
5372 5371 return (DDI_FAILURE);
5373 5372 }
5374 5373 model = ddi_model_convert_from(mode & FMODELS);
5375 5374 if (model == DDI_MODEL_ILP32) {
5376 5375 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5377 5376
5378 5377 xferlen = kpthru->sgl.sge32[0].length;
5379 5378
5380 5379 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5381 5380 } else {
5382 5381 #ifdef _ILP32
5383 5382 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5384 5383 xferlen = kpthru->sgl.sge32[0].length;
5385 5384 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5386 5385 #else
5387 5386 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP64"));
5388 5387 xferlen = kpthru->sgl.sge64[0].length;
5389 5388 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
5390 5389 #endif
5391 5390 }
5392 5391
5393 5392 if (xferlen) {
5394 5393 /* means IOCTL requires DMA */
5395 5394 /* allocate the data transfer buffer */
5396 5395 /* pthru_dma_obj.size = xferlen; */
5397 5396 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5398 5397 PAGESIZE);
5399 5398 pthru_dma_obj.size = new_xfer_length;
5400 5399 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr;
5401 5400 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5402 5401 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5403 5402 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
5404 5403 pthru_dma_obj.dma_attr.dma_attr_align = 1;
5405 5404
5406 5405 /* allocate kernel buffer for DMA */
5407 5406 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj,
5408 5407 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5409 5408 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
5410 5409 "could not allocate data transfer buffer."));
5411 5410 return (DDI_FAILURE);
5412 5411 }
5413 5412 (void) memset(pthru_dma_obj.buffer, 0, xferlen);
5414 5413
5415 5414 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5416 5415 if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
5417 5416 for (i = 0; i < xferlen; i++) {
5418 5417 if (ddi_copyin((uint8_t *)ubuf+i,
5419 5418 (uint8_t *)pthru_dma_obj.buffer+i,
5420 5419 1, mode)) {
5421 5420 con_log(CL_ANN, (CE_WARN,
5422 5421 "issue_mfi_pthru : "
5423 5422 "copy from user space failed"));
5424 5423 return (DDI_FAILURE);
5425 5424 }
5426 5425 }
5427 5426 }
5428 5427
5429 5428 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
5430 5429 }
5431 5430
5432 5431 ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd);
5433 5432 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5434 5433 ddi_put8(acc_handle, &pthru->cmd_status, 0);
5435 5434 ddi_put8(acc_handle, &pthru->scsi_status, 0);
5436 5435 ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id);
5437 5436 ddi_put8(acc_handle, &pthru->lun, kpthru->lun);
5438 5437 ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len);
5439 5438 ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count);
5440 5439 ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout);
5441 5440 ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len);
5442 5441
5443 5442 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5444 5443 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
5445 5444 /* ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); */
5446 5445
5447 5446 ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb,
5448 5447 pthru->cdb_len, DDI_DEV_AUTOINCR);
5449 5448
5450 5449 ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64);
5451 5450 ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen);
5452 5451 ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr);
5453 5452
5454 5453 cmd->sync_cmd = MRSAS_TRUE;
5455 5454 cmd->frame_count = 1;
5456 5455
5457 5456 if (instance->tbolt) {
5458 5457 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5459 5458 }
5460 5459
5461 5460 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5462 5461 con_log(CL_ANN, (CE_WARN,
5463 5462 "issue_mfi_pthru: fw_ioctl failed"));
5464 5463 } else {
5465 5464 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) {
5466 5465 for (i = 0; i < xferlen; i++) {
5467 5466 if (ddi_copyout(
5468 5467 (uint8_t *)pthru_dma_obj.buffer+i,
5469 5468 (uint8_t *)ubuf+i, 1, mode)) {
5470 5469 con_log(CL_ANN, (CE_WARN,
5471 5470 "issue_mfi_pthru : "
5472 5471 "copy to user space failed"));
5473 5472 return (DDI_FAILURE);
5474 5473 }
5475 5474 }
5476 5475 }
5477 5476 }
5478 5477
5479 5478 kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status);
5480 5479 kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status);
5481 5480
5482 5481 con_log(CL_ANN, (CE_CONT, "issue_mfi_pthru: cmd_status %x, "
5483 5482 "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status));
5484 5483 DTRACE_PROBE3(issue_pthru, uint8_t, kpthru->cmd, uint8_t,
5485 5484 kpthru->cmd_status, uint8_t, kpthru->scsi_status);
5486 5485
5487 5486 if (kpthru->sense_len) {
5488 5487 uint_t sense_len = SENSE_LENGTH;
5489 5488 void *sense_ubuf =
5490 5489 (void *)(ulong_t)kpthru->sense_buf_phys_addr_lo;
5491 5490 if (kpthru->sense_len <= SENSE_LENGTH) {
5492 5491 sense_len = kpthru->sense_len;
5493 5492 }
5494 5493
5495 5494 for (i = 0; i < sense_len; i++) {
5496 5495 if (ddi_copyout(
5497 5496 (uint8_t *)cmd->sense+i,
5498 5497 (uint8_t *)sense_ubuf+i, 1, mode)) {
5499 5498 con_log(CL_ANN, (CE_WARN,
5500 5499 "issue_mfi_pthru : "
5501 5500 "copy to user space failed"));
5502 5501 }
5503 5502 con_log(CL_DLEVEL1, (CE_WARN,
5504 5503 "Copying Sense info sense_buff[%d] = 0x%X",
5505 5504 i, *((uint8_t *)cmd->sense + i)));
5506 5505 }
5507 5506 }
5508 5507 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
5509 5508 DDI_DMA_SYNC_FORDEV);
5510 5509
5511 5510 if (xferlen) {
5512 5511 /* free kernel buffer */
5513 5512 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
5514 5513 return (DDI_FAILURE);
5515 5514 }
5516 5515
5517 5516 return (DDI_SUCCESS);
5518 5517 }
5519 5518
5520 5519 /*
5521 5520 * issue_mfi_dcmd
5522 5521 */
5523 5522 static int
5524 5523 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5525 5524 struct mrsas_cmd *cmd, int mode)
5526 5525 {
5527 5526 void *ubuf;
5528 5527 uint32_t kphys_addr = 0;
5529 5528 uint32_t xferlen = 0;
5530 5529 uint32_t new_xfer_length = 0;
5531 5530 uint32_t model;
5532 5531 dma_obj_t dcmd_dma_obj;
5533 5532 struct mrsas_dcmd_frame *kdcmd;
5534 5533 struct mrsas_dcmd_frame *dcmd;
5535 5534 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5536 5535 int i;
5537 5536 dcmd = &cmd->frame->dcmd;
5538 5537 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
5539 5538
5540 5539 if (instance->adapterresetinprogress) {
5541 5540 con_log(CL_ANN1, (CE_NOTE, "Reset flag set, "
5542 5541 "returning mfi_pkt and setting TRAN_BUSY"));
5543 5542 return (DDI_FAILURE);
5544 5543 }
5545 5544 model = ddi_model_convert_from(mode & FMODELS);
5546 5545 if (model == DDI_MODEL_ILP32) {
5547 5546 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5548 5547
5549 5548 xferlen = kdcmd->sgl.sge32[0].length;
5550 5549
5551 5550 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5552 5551 } else {
5553 5552 #ifdef _ILP32
5554 5553 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5555 5554 xferlen = kdcmd->sgl.sge32[0].length;
5556 5555 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5557 5556 #else
5558 5557 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_LP64"));
5559 5558 xferlen = kdcmd->sgl.sge64[0].length;
5560 5559 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
5561 5560 #endif
5562 5561 }
5563 5562 if (xferlen) {
5564 5563 /* means IOCTL requires DMA */
5565 5564 /* allocate the data transfer buffer */
5566 5565 /* dcmd_dma_obj.size = xferlen; */
5567 5566 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5568 5567 PAGESIZE);
5569 5568 dcmd_dma_obj.size = new_xfer_length;
5570 5569 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
5571 5570 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5572 5571 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5573 5572 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
5574 5573 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
5575 5574
5576 5575 /* allocate kernel buffer for DMA */
5577 5576 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
5578 5577 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5579 5578 con_log(CL_ANN,
5580 5579 (CE_WARN, "issue_mfi_dcmd: could not "
5581 5580 "allocate data transfer buffer."));
5582 5581 return (DDI_FAILURE);
5583 5582 }
5584 5583 (void) memset(dcmd_dma_obj.buffer, 0, xferlen);
5585 5584
5586 5585 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5587 5586 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
5588 5587 for (i = 0; i < xferlen; i++) {
5589 5588 if (ddi_copyin((uint8_t *)ubuf + i,
5590 5589 (uint8_t *)dcmd_dma_obj.buffer + i,
5591 5590 1, mode)) {
5592 5591 con_log(CL_ANN, (CE_WARN,
5593 5592 "issue_mfi_dcmd : "
5594 5593 "copy from user space failed"));
5595 5594 return (DDI_FAILURE);
5596 5595 }
5597 5596 }
5598 5597 }
5599 5598
5600 5599 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
5601 5600 }
5602 5601
5603 5602 ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd);
5604 5603 ddi_put8(acc_handle, &dcmd->cmd_status, 0);
5605 5604 ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count);
5606 5605 ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout);
5607 5606 ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len);
5608 5607 ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode);
5609 5608
5610 5609 ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b,
5611 5610 (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR);
5612 5611
5613 5612 ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64);
5614 5613 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen);
5615 5614 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr);
5616 5615
5617 5616 cmd->sync_cmd = MRSAS_TRUE;
5618 5617 cmd->frame_count = 1;
5619 5618
5620 5619 if (instance->tbolt) {
5621 5620 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5622 5621 }
5623 5622
5624 5623 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5625 5624 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed"));
5626 5625 } else {
5627 5626 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
5628 5627 for (i = 0; i < xferlen; i++) {
5629 5628 if (ddi_copyout(
5630 5629 (uint8_t *)dcmd_dma_obj.buffer + i,
5631 5630 (uint8_t *)ubuf + i,
5632 5631 1, mode)) {
5633 5632 con_log(CL_ANN, (CE_WARN,
5634 5633 "issue_mfi_dcmd : "
5635 5634 "copy to user space failed"));
5636 5635 return (DDI_FAILURE);
5637 5636 }
5638 5637 }
5639 5638 }
5640 5639 }
5641 5640
5642 5641 kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status);
5643 5642 con_log(CL_ANN,
5644 5643 (CE_CONT, "issue_mfi_dcmd: cmd_status %x", kdcmd->cmd_status));
5645 5644 DTRACE_PROBE3(issue_dcmd, uint32_t, kdcmd->opcode, uint8_t,
5646 5645 kdcmd->cmd, uint8_t, kdcmd->cmd_status);
5647 5646
5648 5647 if (xferlen) {
5649 5648 /* free kernel buffer */
5650 5649 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
5651 5650 return (DDI_FAILURE);
5652 5651 }
5653 5652
5654 5653 return (DDI_SUCCESS);
5655 5654 }
5656 5655
5657 5656 /*
5658 5657 * issue_mfi_smp
5659 5658 */
5660 5659 static int
5661 5660 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5662 5661 struct mrsas_cmd *cmd, int mode)
5663 5662 {
5664 5663 void *request_ubuf;
5665 5664 void *response_ubuf;
5666 5665 uint32_t request_xferlen = 0;
5667 5666 uint32_t response_xferlen = 0;
5668 5667 uint32_t new_xfer_length1 = 0;
5669 5668 uint32_t new_xfer_length2 = 0;
5670 5669 uint_t model;
5671 5670 dma_obj_t request_dma_obj;
5672 5671 dma_obj_t response_dma_obj;
5673 5672 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5674 5673 struct mrsas_smp_frame *ksmp;
5675 5674 struct mrsas_smp_frame *smp;
5676 5675 struct mrsas_sge32 *sge32;
5677 5676 #ifndef _ILP32
5678 5677 struct mrsas_sge64 *sge64;
5679 5678 #endif
5680 5679 int i;
5681 5680 uint64_t tmp_sas_addr;
5682 5681
5683 5682 smp = &cmd->frame->smp;
5684 5683 ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0];
5685 5684
5686 5685 if (instance->adapterresetinprogress) {
5687 5686 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5688 5687 "returning mfi_pkt and setting TRAN_BUSY\n"));
5689 5688 return (DDI_FAILURE);
5690 5689 }
5691 5690 model = ddi_model_convert_from(mode & FMODELS);
5692 5691 if (model == DDI_MODEL_ILP32) {
5693 5692 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5694 5693
5695 5694 sge32 = &ksmp->sgl[0].sge32[0];
5696 5695 response_xferlen = sge32[0].length;
5697 5696 request_xferlen = sge32[1].length;
5698 5697 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5699 5698 "response_xferlen = %x, request_xferlen = %x",
5700 5699 response_xferlen, request_xferlen));
5701 5700
5702 5701 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5703 5702 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5704 5703 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5705 5704 "response_ubuf = %p, request_ubuf = %p",
5706 5705 response_ubuf, request_ubuf));
5707 5706 } else {
5708 5707 #ifdef _ILP32
5709 5708 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5710 5709
5711 5710 sge32 = &ksmp->sgl[0].sge32[0];
5712 5711 response_xferlen = sge32[0].length;
5713 5712 request_xferlen = sge32[1].length;
5714 5713 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5715 5714 "response_xferlen = %x, request_xferlen = %x",
5716 5715 response_xferlen, request_xferlen));
5717 5716
5718 5717 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5719 5718 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5720 5719 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5721 5720 "response_ubuf = %p, request_ubuf = %p",
5722 5721 response_ubuf, request_ubuf));
5723 5722 #else
5724 5723 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_LP64"));
5725 5724
5726 5725 sge64 = &ksmp->sgl[0].sge64[0];
5727 5726 response_xferlen = sge64[0].length;
5728 5727 request_xferlen = sge64[1].length;
5729 5728
5730 5729 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr;
5731 5730 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr;
5732 5731 #endif
5733 5732 }
5734 5733 if (request_xferlen) {
5735 5734 /* means IOCTL requires DMA */
5736 5735 /* allocate the data transfer buffer */
5737 5736 /* request_dma_obj.size = request_xferlen; */
5738 5737 MRSAS_GET_BOUNDARY_ALIGNED_LEN(request_xferlen,
5739 5738 new_xfer_length1, PAGESIZE);
5740 5739 request_dma_obj.size = new_xfer_length1;
5741 5740 request_dma_obj.dma_attr = mrsas_generic_dma_attr;
5742 5741 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5743 5742 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5744 5743 request_dma_obj.dma_attr.dma_attr_sgllen = 1;
5745 5744 request_dma_obj.dma_attr.dma_attr_align = 1;
5746 5745
5747 5746 /* allocate kernel buffer for DMA */
5748 5747 if (mrsas_alloc_dma_obj(instance, &request_dma_obj,
5749 5748 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5750 5749 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5751 5750 "could not allocate data transfer buffer."));
5752 5751 return (DDI_FAILURE);
5753 5752 }
5754 5753 (void) memset(request_dma_obj.buffer, 0, request_xferlen);
5755 5754
5756 5755 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5757 5756 for (i = 0; i < request_xferlen; i++) {
5758 5757 if (ddi_copyin((uint8_t *)request_ubuf + i,
5759 5758 (uint8_t *)request_dma_obj.buffer + i,
5760 5759 1, mode)) {
5761 5760 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5762 5761 "copy from user space failed"));
5763 5762 return (DDI_FAILURE);
5764 5763 }
5765 5764 }
5766 5765 }
5767 5766
5768 5767 if (response_xferlen) {
5769 5768 /* means IOCTL requires DMA */
5770 5769 /* allocate the data transfer buffer */
5771 5770 /* response_dma_obj.size = response_xferlen; */
5772 5771 MRSAS_GET_BOUNDARY_ALIGNED_LEN(response_xferlen,
5773 5772 new_xfer_length2, PAGESIZE);
5774 5773 response_dma_obj.size = new_xfer_length2;
5775 5774 response_dma_obj.dma_attr = mrsas_generic_dma_attr;
5776 5775 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5777 5776 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5778 5777 response_dma_obj.dma_attr.dma_attr_sgllen = 1;
5779 5778 response_dma_obj.dma_attr.dma_attr_align = 1;
5780 5779
5781 5780 /* allocate kernel buffer for DMA */
5782 5781 if (mrsas_alloc_dma_obj(instance, &response_dma_obj,
5783 5782 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5784 5783 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5785 5784 "could not allocate data transfer buffer."));
5786 5785 return (DDI_FAILURE);
5787 5786 }
5788 5787 (void) memset(response_dma_obj.buffer, 0, response_xferlen);
5789 5788
5790 5789 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5791 5790 for (i = 0; i < response_xferlen; i++) {
5792 5791 if (ddi_copyin((uint8_t *)response_ubuf + i,
5793 5792 (uint8_t *)response_dma_obj.buffer + i,
5794 5793 1, mode)) {
5795 5794 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5796 5795 "copy from user space failed"));
5797 5796 return (DDI_FAILURE);
5798 5797 }
5799 5798 }
5800 5799 }
5801 5800
5802 5801 ddi_put8(acc_handle, &smp->cmd, ksmp->cmd);
5803 5802 ddi_put8(acc_handle, &smp->cmd_status, 0);
5804 5803 ddi_put8(acc_handle, &smp->connection_status, 0);
5805 5804 ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count);
5806 5805 /* smp->context = ksmp->context; */
5807 5806 ddi_put16(acc_handle, &smp->timeout, ksmp->timeout);
5808 5807 ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len);
5809 5808
5810 5809 bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr,
5811 5810 sizeof (uint64_t));
5812 5811 ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr);
5813 5812
5814 5813 ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64);
5815 5814
5816 5815 model = ddi_model_convert_from(mode & FMODELS);
5817 5816 if (model == DDI_MODEL_ILP32) {
5818 5817 con_log(CL_ANN1, (CE_CONT,
5819 5818 "issue_mfi_smp: DDI_MODEL_ILP32"));
5820 5819
5821 5820 sge32 = &smp->sgl[0].sge32[0];
5822 5821 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5823 5822 ddi_put32(acc_handle, &sge32[0].phys_addr,
5824 5823 response_dma_obj.dma_cookie[0].dmac_address);
5825 5824 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5826 5825 ddi_put32(acc_handle, &sge32[1].phys_addr,
5827 5826 request_dma_obj.dma_cookie[0].dmac_address);
5828 5827 } else {
5829 5828 #ifdef _ILP32
5830 5829 con_log(CL_ANN1, (CE_CONT,
5831 5830 "issue_mfi_smp: DDI_MODEL_ILP32"));
5832 5831 sge32 = &smp->sgl[0].sge32[0];
5833 5832 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5834 5833 ddi_put32(acc_handle, &sge32[0].phys_addr,
5835 5834 response_dma_obj.dma_cookie[0].dmac_address);
5836 5835 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5837 5836 ddi_put32(acc_handle, &sge32[1].phys_addr,
5838 5837 request_dma_obj.dma_cookie[0].dmac_address);
5839 5838 #else
5840 5839 con_log(CL_ANN1, (CE_CONT,
5841 5840 "issue_mfi_smp: DDI_MODEL_LP64"));
5842 5841 sge64 = &smp->sgl[0].sge64[0];
5843 5842 ddi_put32(acc_handle, &sge64[0].length, response_xferlen);
5844 5843 ddi_put64(acc_handle, &sge64[0].phys_addr,
5845 5844 response_dma_obj.dma_cookie[0].dmac_address);
5846 5845 ddi_put32(acc_handle, &sge64[1].length, request_xferlen);
5847 5846 ddi_put64(acc_handle, &sge64[1].phys_addr,
5848 5847 request_dma_obj.dma_cookie[0].dmac_address);
5849 5848 #endif
5850 5849 }
5851 5850 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp : "
5852 5851 "smp->response_xferlen = %d, smp->request_xferlen = %d "
5853 5852 "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length),
5854 5853 ddi_get32(acc_handle, &sge32[1].length),
5855 5854 ddi_get32(acc_handle, &smp->data_xfer_len)));
5856 5855
5857 5856 cmd->sync_cmd = MRSAS_TRUE;
5858 5857 cmd->frame_count = 1;
5859 5858
5860 5859 if (instance->tbolt) {
5861 5860 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5862 5861 }
5863 5862
5864 5863 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5865 5864 con_log(CL_ANN, (CE_WARN,
5866 5865 "issue_mfi_smp: fw_ioctl failed"));
5867 5866 } else {
5868 5867 con_log(CL_ANN1, (CE_CONT,
5869 5868 "issue_mfi_smp: copy to user space"));
5870 5869
5871 5870 if (request_xferlen) {
5872 5871 for (i = 0; i < request_xferlen; i++) {
5873 5872 if (ddi_copyout(
5874 5873 (uint8_t *)request_dma_obj.buffer +
5875 5874 i, (uint8_t *)request_ubuf + i,
5876 5875 1, mode)) {
5877 5876 con_log(CL_ANN, (CE_WARN,
5878 5877 "issue_mfi_smp : copy to user space"
5879 5878 " failed"));
5880 5879 return (DDI_FAILURE);
5881 5880 }
5882 5881 }
5883 5882 }
5884 5883
5885 5884 if (response_xferlen) {
5886 5885 for (i = 0; i < response_xferlen; i++) {
5887 5886 if (ddi_copyout(
5888 5887 (uint8_t *)response_dma_obj.buffer
5889 5888 + i, (uint8_t *)response_ubuf
5890 5889 + i, 1, mode)) {
5891 5890 con_log(CL_ANN, (CE_WARN,
5892 5891 "issue_mfi_smp : copy to "
5893 5892 "user space failed"));
5894 5893 return (DDI_FAILURE);
5895 5894 }
5896 5895 }
5897 5896 }
5898 5897 }
5899 5898
5900 5899 ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status);
5901 5900 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
5902 5901 ksmp->cmd_status));
5903 5902 DTRACE_PROBE2(issue_smp, uint8_t, ksmp->cmd, uint8_t, ksmp->cmd_status);
5904 5903
5905 5904 if (request_xferlen) {
5906 5905 /* free kernel buffer */
5907 5906 if (mrsas_free_dma_obj(instance, request_dma_obj) !=
5908 5907 DDI_SUCCESS)
5909 5908 return (DDI_FAILURE);
5910 5909 }
5911 5910
5912 5911 if (response_xferlen) {
5913 5912 /* free kernel buffer */
5914 5913 if (mrsas_free_dma_obj(instance, response_dma_obj) !=
5915 5914 DDI_SUCCESS)
5916 5915 return (DDI_FAILURE);
5917 5916 }
5918 5917
5919 5918 return (DDI_SUCCESS);
5920 5919 }
5921 5920
5922 5921 /*
5923 5922 * issue_mfi_stp
5924 5923 */
5925 5924 static int
5926 5925 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5927 5926 struct mrsas_cmd *cmd, int mode)
5928 5927 {
5929 5928 void *fis_ubuf;
5930 5929 void *data_ubuf;
5931 5930 uint32_t fis_xferlen = 0;
5932 5931 uint32_t new_xfer_length1 = 0;
5933 5932 uint32_t new_xfer_length2 = 0;
5934 5933 uint32_t data_xferlen = 0;
5935 5934 uint_t model;
5936 5935 dma_obj_t fis_dma_obj;
5937 5936 dma_obj_t data_dma_obj;
5938 5937 struct mrsas_stp_frame *kstp;
5939 5938 struct mrsas_stp_frame *stp;
5940 5939 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5941 5940 int i;
5942 5941
5943 5942 stp = &cmd->frame->stp;
5944 5943 kstp = (struct mrsas_stp_frame *)&ioctl->frame[0];
5945 5944
5946 5945 if (instance->adapterresetinprogress) {
5947 5946 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5948 5947 "returning mfi_pkt and setting TRAN_BUSY\n"));
5949 5948 return (DDI_FAILURE);
5950 5949 }
5951 5950 model = ddi_model_convert_from(mode & FMODELS);
5952 5951 if (model == DDI_MODEL_ILP32) {
5953 5952 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
5954 5953
5955 5954 fis_xferlen = kstp->sgl.sge32[0].length;
5956 5955 data_xferlen = kstp->sgl.sge32[1].length;
5957 5956
5958 5957 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
5959 5958 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
5960 5959 } else {
5961 5960 #ifdef _ILP32
5962 5961 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
5963 5962
5964 5963 fis_xferlen = kstp->sgl.sge32[0].length;
5965 5964 data_xferlen = kstp->sgl.sge32[1].length;
5966 5965
5967 5966 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
5968 5967 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
5969 5968 #else
5970 5969 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_LP64"));
5971 5970
5972 5971 fis_xferlen = kstp->sgl.sge64[0].length;
5973 5972 data_xferlen = kstp->sgl.sge64[1].length;
5974 5973
5975 5974 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
5976 5975 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
5977 5976 #endif
5978 5977 }
5979 5978
5980 5979
5981 5980 if (fis_xferlen) {
5982 5981 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: "
5983 5982 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
5984 5983
5985 5984 /* means IOCTL requires DMA */
5986 5985 /* allocate the data transfer buffer */
5987 5986 /* fis_dma_obj.size = fis_xferlen; */
5988 5987 MRSAS_GET_BOUNDARY_ALIGNED_LEN(fis_xferlen,
5989 5988 new_xfer_length1, PAGESIZE);
5990 5989 fis_dma_obj.size = new_xfer_length1;
5991 5990 fis_dma_obj.dma_attr = mrsas_generic_dma_attr;
5992 5991 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5993 5992 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5994 5993 fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
5995 5994 fis_dma_obj.dma_attr.dma_attr_align = 1;
5996 5995
5997 5996 /* allocate kernel buffer for DMA */
5998 5997 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj,
5999 5998 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
6000 5999 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : "
6001 6000 "could not allocate data transfer buffer."));
6002 6001 return (DDI_FAILURE);
6003 6002 }
6004 6003 (void) memset(fis_dma_obj.buffer, 0, fis_xferlen);
6005 6004
6006 6005 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6007 6006 for (i = 0; i < fis_xferlen; i++) {
6008 6007 if (ddi_copyin((uint8_t *)fis_ubuf + i,
6009 6008 (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) {
6010 6009 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6011 6010 "copy from user space failed"));
6012 6011 return (DDI_FAILURE);
6013 6012 }
6014 6013 }
6015 6014 }
6016 6015
6017 6016 if (data_xferlen) {
6018 6017 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: data_ubuf = %p "
6019 6018 "data_xferlen = %x", data_ubuf, data_xferlen));
6020 6019
6021 6020 /* means IOCTL requires DMA */
6022 6021 /* allocate the data transfer buffer */
6023 6022 /* data_dma_obj.size = data_xferlen; */
6024 6023 MRSAS_GET_BOUNDARY_ALIGNED_LEN(data_xferlen, new_xfer_length2,
6025 6024 PAGESIZE);
6026 6025 data_dma_obj.size = new_xfer_length2;
6027 6026 data_dma_obj.dma_attr = mrsas_generic_dma_attr;
6028 6027 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
6029 6028 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
6030 6029 data_dma_obj.dma_attr.dma_attr_sgllen = 1;
6031 6030 data_dma_obj.dma_attr.dma_attr_align = 1;
6032 6031
6033 6032 /* allocate kernel buffer for DMA */
6034 6033 if (mrsas_alloc_dma_obj(instance, &data_dma_obj,
6035 6034 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
6036 6035 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6037 6036 "could not allocate data transfer buffer."));
6038 6037 return (DDI_FAILURE);
6039 6038 }
6040 6039 (void) memset(data_dma_obj.buffer, 0, data_xferlen);
6041 6040
6042 6041 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6043 6042 for (i = 0; i < data_xferlen; i++) {
6044 6043 if (ddi_copyin((uint8_t *)data_ubuf + i,
6045 6044 (uint8_t *)data_dma_obj.buffer + i, 1, mode)) {
6046 6045 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6047 6046 "copy from user space failed"));
6048 6047 return (DDI_FAILURE);
6049 6048 }
6050 6049 }
6051 6050 }
6052 6051
6053 6052 ddi_put8(acc_handle, &stp->cmd, kstp->cmd);
6054 6053 ddi_put8(acc_handle, &stp->cmd_status, 0);
6055 6054 ddi_put8(acc_handle, &stp->connection_status, 0);
6056 6055 ddi_put8(acc_handle, &stp->target_id, kstp->target_id);
6057 6056 ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count);
6058 6057
6059 6058 ddi_put16(acc_handle, &stp->timeout, kstp->timeout);
6060 6059 ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len);
6061 6060
6062 6061 ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10,
6063 6062 DDI_DEV_AUTOINCR);
6064 6063
6065 6064 ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64);
6066 6065 ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags);
6067 6066 ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen);
6068 6067 ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr,
6069 6068 fis_dma_obj.dma_cookie[0].dmac_address);
6070 6069 ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen);
6071 6070 ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr,
6072 6071 data_dma_obj.dma_cookie[0].dmac_address);
6073 6072
6074 6073 cmd->sync_cmd = MRSAS_TRUE;
6075 6074 cmd->frame_count = 1;
6076 6075
6077 6076 if (instance->tbolt) {
6078 6077 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6079 6078 }
6080 6079
6081 6080 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
6082 6081 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed"));
6083 6082 } else {
6084 6083
6085 6084 if (fis_xferlen) {
6086 6085 for (i = 0; i < fis_xferlen; i++) {
6087 6086 if (ddi_copyout(
6088 6087 (uint8_t *)fis_dma_obj.buffer + i,
6089 6088 (uint8_t *)fis_ubuf + i, 1, mode)) {
6090 6089 con_log(CL_ANN, (CE_WARN,
6091 6090 "issue_mfi_stp : copy to "
6092 6091 "user space failed"));
6093 6092 return (DDI_FAILURE);
6094 6093 }
6095 6094 }
6096 6095 }
6097 6096 }
6098 6097 if (data_xferlen) {
6099 6098 for (i = 0; i < data_xferlen; i++) {
6100 6099 if (ddi_copyout(
6101 6100 (uint8_t *)data_dma_obj.buffer + i,
6102 6101 (uint8_t *)data_ubuf + i, 1, mode)) {
6103 6102 con_log(CL_ANN, (CE_WARN,
6104 6103 "issue_mfi_stp : copy to"
6105 6104 " user space failed"));
6106 6105 return (DDI_FAILURE);
6107 6106 }
6108 6107 }
6109 6108 }
6110 6109
6111 6110 kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status);
6112 6111 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: stp->cmd_status = %d",
6113 6112 kstp->cmd_status));
6114 6113 DTRACE_PROBE2(issue_stp, uint8_t, kstp->cmd, uint8_t, kstp->cmd_status);
6115 6114
6116 6115 if (fis_xferlen) {
6117 6116 /* free kernel buffer */
6118 6117 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
6119 6118 return (DDI_FAILURE);
6120 6119 }
6121 6120
6122 6121 if (data_xferlen) {
6123 6122 /* free kernel buffer */
6124 6123 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
6125 6124 return (DDI_FAILURE);
6126 6125 }
6127 6126
6128 6127 return (DDI_SUCCESS);
6129 6128 }
6130 6129
6131 6130 /*
6132 6131 * fill_up_drv_ver
6133 6132 */
6134 6133 void
6135 6134 fill_up_drv_ver(struct mrsas_drv_ver *dv)
6136 6135 {
6137 6136 (void) memset(dv, 0, sizeof (struct mrsas_drv_ver));
6138 6137
6139 6138 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
6140 6139 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
6141 6140 (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas"));
6142 6141 (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION));
6143 6142 (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE,
6144 6143 strlen(MRSAS_RELDATE));
6145 6144
6146 6145 }
6147 6146
6148 6147 /*
6149 6148 * handle_drv_ioctl
6150 6149 */
6151 6150 static int
6152 6151 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6153 6152 int mode)
6154 6153 {
6155 6154 int i;
6156 6155 int rval = DDI_SUCCESS;
6157 6156 int *props = NULL;
6158 6157 void *ubuf;
6159 6158
6160 6159 uint8_t *pci_conf_buf;
6161 6160 uint32_t xferlen;
6162 6161 uint32_t num_props;
6163 6162 uint_t model;
6164 6163 struct mrsas_dcmd_frame *kdcmd;
6165 6164 struct mrsas_drv_ver dv;
6166 6165 struct mrsas_pci_information pi;
6167 6166
6168 6167 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
6169 6168
6170 6169 model = ddi_model_convert_from(mode & FMODELS);
6171 6170 if (model == DDI_MODEL_ILP32) {
6172 6171 con_log(CL_ANN1, (CE_CONT,
6173 6172 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6174 6173
6175 6174 xferlen = kdcmd->sgl.sge32[0].length;
6176 6175
6177 6176 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6178 6177 } else {
6179 6178 #ifdef _ILP32
6180 6179 con_log(CL_ANN1, (CE_CONT,
6181 6180 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6182 6181 xferlen = kdcmd->sgl.sge32[0].length;
6183 6182 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6184 6183 #else
6185 6184 con_log(CL_ANN1, (CE_CONT,
6186 6185 "handle_drv_ioctl: DDI_MODEL_LP64"));
6187 6186 xferlen = kdcmd->sgl.sge64[0].length;
6188 6187 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
6189 6188 #endif
6190 6189 }
6191 6190 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6192 6191 "dataBuf=%p size=%d bytes", ubuf, xferlen));
6193 6192
6194 6193 switch (kdcmd->opcode) {
6195 6194 case MRSAS_DRIVER_IOCTL_DRIVER_VERSION:
6196 6195 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6197 6196 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION"));
6198 6197
6199 6198 fill_up_drv_ver(&dv);
6200 6199
6201 6200 if (ddi_copyout(&dv, ubuf, xferlen, mode)) {
6202 6201 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6203 6202 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : "
6204 6203 "copy to user space failed"));
6205 6204 kdcmd->cmd_status = 1;
6206 6205 rval = 1;
6207 6206 } else {
6208 6207 kdcmd->cmd_status = 0;
6209 6208 }
6210 6209 break;
6211 6210 case MRSAS_DRIVER_IOCTL_PCI_INFORMATION:
6212 6211 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
6213 6212 "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON"));
6214 6213
6215 6214 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
6216 6215 0, "reg", &props, &num_props)) {
6217 6216 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6218 6217 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6219 6218 "ddi_prop_look_int_array failed"));
6220 6219 rval = DDI_FAILURE;
6221 6220 } else {
6222 6221
6223 6222 pi.busNumber = (props[0] >> 16) & 0xFF;
6224 6223 pi.deviceNumber = (props[0] >> 11) & 0x1f;
6225 6224 pi.functionNumber = (props[0] >> 8) & 0x7;
6226 6225 ddi_prop_free((void *)props);
6227 6226 }
6228 6227
6229 6228 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
6230 6229
6231 6230 for (i = 0; i < (sizeof (struct mrsas_pci_information) -
6232 6231 offsetof(struct mrsas_pci_information, pciHeaderInfo));
6233 6232 i++) {
6234 6233 pci_conf_buf[i] =
6235 6234 pci_config_get8(instance->pci_handle, i);
6236 6235 }
6237 6236
6238 6237 if (ddi_copyout(&pi, ubuf, xferlen, mode)) {
6239 6238 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6240 6239 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6241 6240 "copy to user space failed"));
6242 6241 kdcmd->cmd_status = 1;
6243 6242 rval = 1;
6244 6243 } else {
6245 6244 kdcmd->cmd_status = 0;
6246 6245 }
6247 6246 break;
6248 6247 default:
6249 6248 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6250 6249 "invalid driver specific IOCTL opcode = 0x%x",
6251 6250 kdcmd->opcode));
6252 6251 kdcmd->cmd_status = 1;
6253 6252 rval = DDI_FAILURE;
6254 6253 break;
6255 6254 }
6256 6255
6257 6256 return (rval);
6258 6257 }
6259 6258
6260 6259 /*
6261 6260 * handle_mfi_ioctl
6262 6261 */
6263 6262 static int
6264 6263 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6265 6264 int mode)
6266 6265 {
6267 6266 int rval = DDI_SUCCESS;
6268 6267
6269 6268 struct mrsas_header *hdr;
6270 6269 struct mrsas_cmd *cmd;
6271 6270
6272 6271 if (instance->tbolt) {
6273 6272 cmd = get_raid_msg_mfi_pkt(instance);
6274 6273 } else {
6275 6274 cmd = mrsas_get_mfi_pkt(instance);
6276 6275 }
6277 6276 if (!cmd) {
6278 6277 con_log(CL_ANN, (CE_WARN, "mr_sas: "
6279 6278 "failed to get a cmd packet"));
6280 6279 DTRACE_PROBE2(mfi_ioctl_err, uint16_t,
6281 6280 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
6282 6281 return (DDI_FAILURE);
6283 6282 }
6284 6283
6285 6284 /* Clear the frame buffer and assign back the context id */
6286 6285 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6287 6286 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6288 6287 cmd->index);
6289 6288
6290 6289 hdr = (struct mrsas_header *)&ioctl->frame[0];
6291 6290
6292 6291 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
6293 6292 case MFI_CMD_OP_DCMD:
6294 6293 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
6295 6294 break;
6296 6295 case MFI_CMD_OP_SMP:
6297 6296 rval = issue_mfi_smp(instance, ioctl, cmd, mode);
6298 6297 break;
6299 6298 case MFI_CMD_OP_STP:
6300 6299 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
6301 6300 break;
6302 6301 case MFI_CMD_OP_LD_SCSI:
6303 6302 case MFI_CMD_OP_PD_SCSI:
6304 6303 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
6305 6304 break;
6306 6305 default:
6307 6306 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
6308 6307 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd));
6309 6308 rval = DDI_FAILURE;
6310 6309 break;
6311 6310 }
6312 6311
6313 6312 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
6314 6313 rval = DDI_FAILURE;
6315 6314
6316 6315 if (instance->tbolt) {
6317 6316 return_raid_msg_mfi_pkt(instance, cmd);
6318 6317 } else {
6319 6318 mrsas_return_mfi_pkt(instance, cmd);
6320 6319 }
6321 6320
6322 6321 return (rval);
6323 6322 }
6324 6323
6325 6324 /*
6326 6325 * AEN
6327 6326 */
6328 6327 static int
6329 6328 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen)
6330 6329 {
6331 6330 int rval = 0;
6332 6331
6333 6332 rval = register_mfi_aen(instance, instance->aen_seq_num,
6334 6333 aen->class_locale_word);
6335 6334
6336 6335 aen->cmd_status = (uint8_t)rval;
6337 6336
6338 6337 return (rval);
6339 6338 }
6340 6339
6341 6340 static int
6342 6341 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num,
6343 6342 uint32_t class_locale_word)
6344 6343 {
6345 6344 int ret_val;
6346 6345
6347 6346 struct mrsas_cmd *cmd, *aen_cmd;
6348 6347 struct mrsas_dcmd_frame *dcmd;
6349 6348 union mrsas_evt_class_locale curr_aen;
6350 6349 union mrsas_evt_class_locale prev_aen;
6351 6350
6352 6351 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6353 6352 /*
6354 6353 * If there an AEN pending already (aen_cmd), check if the
6355 6354 * class_locale of that pending AEN is inclusive of the new
6356 6355 * AEN request we currently have. If it is, then we don't have
6357 6356 * to do anything. In other words, whichever events the current
6358 6357 * AEN request is subscribing to, have already been subscribed
6359 6358 * to.
6360 6359 *
6361 6360 * If the old_cmd is _not_ inclusive, then we have to abort
6362 6361 * that command, form a class_locale that is superset of both
6363 6362 * old and current and re-issue to the FW
6364 6363 */
6365 6364
6366 6365 curr_aen.word = LE_32(class_locale_word);
6367 6366 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6368 6367 aen_cmd = instance->aen_cmd;
6369 6368 if (aen_cmd) {
6370 6369 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle,
6371 6370 &aen_cmd->frame->dcmd.mbox.w[1]);
6372 6371 prev_aen.word = LE_32(prev_aen.word);
6373 6372 prev_aen.members.locale = LE_16(prev_aen.members.locale);
6374 6373 /*
6375 6374 * A class whose enum value is smaller is inclusive of all
6376 6375 * higher values. If a PROGRESS (= -1) was previously
6377 6376 * registered, then a new registration requests for higher
6378 6377 * classes need not be sent to FW. They are automatically
6379 6378 * included.
6380 6379 *
6381 6380 * Locale numbers don't have such hierarchy. They are bitmap
6382 6381 * values
6383 6382 */
6384 6383 if ((prev_aen.members.class <= curr_aen.members.class) &&
6385 6384 !((prev_aen.members.locale & curr_aen.members.locale) ^
6386 6385 curr_aen.members.locale)) {
6387 6386 /*
6388 6387 * Previously issued event registration includes
6389 6388 * current request. Nothing to do.
6390 6389 */
6391 6390
6392 6391 return (0);
6393 6392 } else {
6394 6393 curr_aen.members.locale |= prev_aen.members.locale;
6395 6394
6396 6395 if (prev_aen.members.class < curr_aen.members.class)
6397 6396 curr_aen.members.class = prev_aen.members.class;
6398 6397
6399 6398 ret_val = abort_aen_cmd(instance, aen_cmd);
6400 6399
6401 6400 if (ret_val) {
6402 6401 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
6403 6402 "failed to abort prevous AEN command"));
6404 6403
6405 6404 return (ret_val);
6406 6405 }
6407 6406 }
6408 6407 } else {
6409 6408 curr_aen.word = LE_32(class_locale_word);
6410 6409 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6411 6410 }
6412 6411
6413 6412 if (instance->tbolt) {
6414 6413 cmd = get_raid_msg_mfi_pkt(instance);
6415 6414 } else {
6416 6415 cmd = mrsas_get_mfi_pkt(instance);
6417 6416 }
6418 6417
6419 6418 if (!cmd) {
6420 6419 DTRACE_PROBE2(mfi_aen_err, uint16_t, instance->fw_outstanding,
6421 6420 uint16_t, instance->max_fw_cmds);
6422 6421 return (ENOMEM);
6423 6422 }
6424 6423
6425 6424 /* Clear the frame buffer and assign back the context id */
6426 6425 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6427 6426 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6428 6427 cmd->index);
6429 6428
6430 6429 dcmd = &cmd->frame->dcmd;
6431 6430
6432 6431 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
6433 6432 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
6434 6433
6435 6434 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
6436 6435 sizeof (struct mrsas_evt_detail));
6437 6436
6438 6437 /* Prepare DCMD for aen registration */
6439 6438 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
6440 6439 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
6441 6440 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
6442 6441 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
6443 6442 MFI_FRAME_DIR_READ);
6444 6443 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
6445 6444 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
6446 6445 sizeof (struct mrsas_evt_detail));
6447 6446 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
6448 6447 MR_DCMD_CTRL_EVENT_WAIT);
6449 6448 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num);
6450 6449 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6451 6450 curr_aen.word = LE_32(curr_aen.word);
6452 6451 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1],
6453 6452 curr_aen.word);
6454 6453 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
6455 6454 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address);
6456 6455 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
6457 6456 sizeof (struct mrsas_evt_detail));
6458 6457
6459 6458 instance->aen_seq_num = seq_num;
6460 6459
6461 6460
6462 6461 /*
6463 6462 * Store reference to the cmd used to register for AEN. When an
6464 6463 * application wants us to register for AEN, we have to abort this
6465 6464 * cmd and re-register with a new EVENT LOCALE supplied by that app
6466 6465 */
6467 6466 instance->aen_cmd = cmd;
6468 6467
6469 6468 cmd->frame_count = 1;
6470 6469
6471 6470 /* Issue the aen registration frame */
6472 6471 /* atomic_add_16 (&instance->fw_outstanding, 1); */
6473 6472 if (instance->tbolt) {
6474 6473 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6475 6474 }
6476 6475 instance->func_ptr->issue_cmd(cmd, instance);
6477 6476
6478 6477 return (0);
6479 6478 }
6480 6479
6481 6480 void
6482 6481 display_scsi_inquiry(caddr_t scsi_inq)
6483 6482 {
6484 6483 #define MAX_SCSI_DEVICE_CODE 14
6485 6484 int i;
6486 6485 char inquiry_buf[256] = {0};
6487 6486 int len;
6488 6487 const char *const scsi_device_types[] = {
6489 6488 "Direct-Access ",
6490 6489 "Sequential-Access",
6491 6490 "Printer ",
6492 6491 "Processor ",
6493 6492 "WORM ",
6494 6493 "CD-ROM ",
6495 6494 "Scanner ",
6496 6495 "Optical Device ",
6497 6496 "Medium Changer ",
6498 6497 "Communications ",
6499 6498 "Unknown ",
6500 6499 "Unknown ",
6501 6500 "Unknown ",
6502 6501 "Enclosure ",
6503 6502 };
6504 6503
6505 6504 len = 0;
6506 6505
6507 6506 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: ");
6508 6507 for (i = 8; i < 16; i++) {
6509 6508 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6510 6509 scsi_inq[i]);
6511 6510 }
6512 6511
6513 6512 len += snprintf(inquiry_buf + len, 265 - len, " Model: ");
6514 6513
6515 6514 for (i = 16; i < 32; i++) {
6516 6515 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6517 6516 scsi_inq[i]);
6518 6517 }
6519 6518
6520 6519 len += snprintf(inquiry_buf + len, 265 - len, " Rev: ");
6521 6520
6522 6521 for (i = 32; i < 36; i++) {
6523 6522 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6524 6523 scsi_inq[i]);
6525 6524 }
6526 6525
6527 6526 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6528 6527
6529 6528
6530 6529 i = scsi_inq[0] & 0x1f;
6531 6530
6532 6531
6533 6532 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ",
6534 6533 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
6535 6534 "Unknown ");
6536 6535
6537 6536
6538 6537 len += snprintf(inquiry_buf + len, 265 - len,
6539 6538 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
6540 6539
6541 6540 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
6542 6541 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
6543 6542 } else {
6544 6543 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6545 6544 }
6546 6545
6547 6546 con_log(CL_DLEVEL2, (CE_CONT, inquiry_buf));
6548 6547 }
6549 6548
6550 6549 static void
6551 6550 io_timeout_checker(void *arg)
6552 6551 {
6553 6552 struct scsi_pkt *pkt;
6554 6553 struct mrsas_instance *instance = arg;
6555 6554 struct mrsas_cmd *cmd = NULL;
6556 6555 struct mrsas_header *hdr;
6557 6556 int time = 0;
6558 6557 int counter = 0;
6559 6558 struct mlist_head *pos, *next;
6560 6559 mlist_t process_list;
6561 6560
6562 6561 if (instance->adapterresetinprogress == 1) {
6563 6562 con_log(CL_ANN, (CE_NOTE, "io_timeout_checker:"
6564 6563 " reset in progress"));
6565 6564
6566 6565 instance->timeout_id = timeout(io_timeout_checker,
6567 6566 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6568 6567 return;
6569 6568 }
6570 6569
6571 6570 /* See if this check needs to be in the beginning or last in ISR */
6572 6571 if (mrsas_initiate_ocr_if_fw_is_faulty(instance) == 1) {
6573 6572 dev_err(instance->dip, CE_WARN, "io_timeout_checker: "
6574 6573 "FW Fault, calling reset adapter");
6575 6574 dev_err(instance->dip, CE_CONT, "io_timeout_checker: "
6576 6575 "fw_outstanding 0x%X max_fw_cmds 0x%X",
6577 6576 instance->fw_outstanding, instance->max_fw_cmds);
6578 6577 if (instance->adapterresetinprogress == 0) {
6579 6578 instance->adapterresetinprogress = 1;
6580 6579 if (instance->tbolt)
6581 6580 (void) mrsas_tbolt_reset_ppc(instance);
6582 6581 else
6583 6582 (void) mrsas_reset_ppc(instance);
6584 6583 instance->adapterresetinprogress = 0;
6585 6584 }
6586 6585 instance->timeout_id = timeout(io_timeout_checker,
6587 6586 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6588 6587 return;
6589 6588 }
6590 6589
6591 6590 INIT_LIST_HEAD(&process_list);
6592 6591
6593 6592 mutex_enter(&instance->cmd_pend_mtx);
6594 6593 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
6595 6594 cmd = mlist_entry(pos, struct mrsas_cmd, list);
6596 6595
6597 6596 if (cmd == NULL) {
6598 6597 continue;
6599 6598 }
6600 6599
6601 6600 if (cmd->sync_cmd == MRSAS_TRUE) {
6602 6601 hdr = (struct mrsas_header *)&cmd->frame->hdr;
6603 6602 if (hdr == NULL) {
6604 6603 continue;
6605 6604 }
6606 6605 time = --cmd->drv_pkt_time;
6607 6606 } else {
6608 6607 pkt = cmd->pkt;
6609 6608 if (pkt == NULL) {
6610 6609 continue;
6611 6610 }
6612 6611 time = --cmd->drv_pkt_time;
6613 6612 }
6614 6613 if (time <= 0) {
6615 6614 dev_err(instance->dip, CE_WARN, "%llx: "
6616 6615 "io_timeout_checker: TIMING OUT: pkt: %p, "
6617 6616 "cmd %p fw_outstanding 0x%X max_fw_cmds 0x%X",
6618 6617 gethrtime(), (void *)pkt, (void *)cmd,
6619 6618 instance->fw_outstanding, instance->max_fw_cmds);
6620 6619
6621 6620 counter++;
6622 6621 break;
6623 6622 }
6624 6623 }
6625 6624 mutex_exit(&instance->cmd_pend_mtx);
6626 6625
6627 6626 if (counter) {
6628 6627 if (instance->disable_online_ctrl_reset == 1) {
6629 6628 dev_err(instance->dip, CE_WARN, "%s(): OCR is NOT "
6630 6629 "supported by Firmware, KILL adapter!!!",
6631 6630 __func__);
6632 6631
6633 6632 if (instance->tbolt)
6634 6633 mrsas_tbolt_kill_adapter(instance);
6635 6634 else
6636 6635 (void) mrsas_kill_adapter(instance);
6637 6636
6638 6637 return;
6639 6638 } else {
6640 6639 if (cmd->retry_count_for_ocr <= IO_RETRY_COUNT) {
6641 6640 if (instance->adapterresetinprogress == 0) {
6642 6641 if (instance->tbolt) {
6643 6642 (void) mrsas_tbolt_reset_ppc(
6644 6643 instance);
6645 6644 } else {
6646 6645 (void) mrsas_reset_ppc(
6647 6646 instance);
6648 6647 }
6649 6648 }
6650 6649 } else {
6651 6650 dev_err(instance->dip, CE_WARN,
6652 6651 "io_timeout_checker: "
6653 6652 "cmd %p cmd->index %d "
6654 6653 "timed out even after 3 resets: "
6655 6654 "so KILL adapter", (void *)cmd, cmd->index);
6656 6655
6657 6656 mrsas_print_cmd_details(instance, cmd, 0xDD);
6658 6657
6659 6658 if (instance->tbolt)
6660 6659 mrsas_tbolt_kill_adapter(instance);
6661 6660 else
6662 6661 (void) mrsas_kill_adapter(instance);
6663 6662 return;
6664 6663 }
6665 6664 }
6666 6665 }
6667 6666 con_log(CL_ANN, (CE_NOTE, "mrsas: "
6668 6667 "schedule next timeout check: "
6669 6668 "do timeout \n"));
6670 6669 instance->timeout_id =
6671 6670 timeout(io_timeout_checker, (void *)instance,
6672 6671 drv_usectohz(MRSAS_1_SECOND));
6673 6672 }
6674 6673
6675 6674 static uint32_t
6676 6675 read_fw_status_reg_ppc(struct mrsas_instance *instance)
6677 6676 {
6678 6677 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
6679 6678 }
6680 6679
6681 6680 static void
6682 6681 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
6683 6682 {
6684 6683 struct scsi_pkt *pkt;
6685 6684 atomic_inc_16(&instance->fw_outstanding);
6686 6685
6687 6686 pkt = cmd->pkt;
6688 6687 if (pkt) {
6689 6688 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6690 6689 "ISSUED CMD TO FW : called : cmd:"
6691 6690 ": %p instance : %p pkt : %p pkt_time : %x\n",
6692 6691 gethrtime(), (void *)cmd, (void *)instance,
6693 6692 (void *)pkt, cmd->drv_pkt_time));
6694 6693 if (instance->adapterresetinprogress) {
6695 6694 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6696 6695 con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer"));
6697 6696 } else {
6698 6697 push_pending_mfi_pkt(instance, cmd);
6699 6698 }
6700 6699
6701 6700 } else {
6702 6701 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6703 6702 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
6704 6703 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
6705 6704 }
6706 6705
6707 6706 mutex_enter(&instance->reg_write_mtx);
6708 6707 /* Issue the command to the FW */
6709 6708 WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6710 6709 (((cmd->frame_count - 1) << 1) | 1), instance);
6711 6710 mutex_exit(&instance->reg_write_mtx);
6712 6711
6713 6712 }
6714 6713
6715 6714 /*
6716 6715 * issue_cmd_in_sync_mode
6717 6716 */
6718 6717 static int
6719 6718 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance,
6720 6719 struct mrsas_cmd *cmd)
6721 6720 {
6722 6721 int i;
6723 6722 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6724 6723 struct mrsas_header *hdr = &cmd->frame->hdr;
6725 6724
6726 6725 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called"));
6727 6726
6728 6727 if (instance->adapterresetinprogress) {
6729 6728 cmd->drv_pkt_time = ddi_get16(
6730 6729 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
6731 6730 if (cmd->drv_pkt_time < debug_timeout_g)
6732 6731 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6733 6732
6734 6733 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: "
6735 6734 "issue and return in reset case\n"));
6736 6735 WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6737 6736 (((cmd->frame_count - 1) << 1) | 1), instance);
6738 6737
6739 6738 return (DDI_SUCCESS);
6740 6739 } else {
6741 6740 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n"));
6742 6741 push_pending_mfi_pkt(instance, cmd);
6743 6742 }
6744 6743
6745 6744 cmd->cmd_status = ENODATA;
6746 6745
6747 6746 mutex_enter(&instance->reg_write_mtx);
6748 6747 /* Issue the command to the FW */
6749 6748 WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6750 6749 (((cmd->frame_count - 1) << 1) | 1), instance);
6751 6750 mutex_exit(&instance->reg_write_mtx);
6752 6751
6753 6752 mutex_enter(&instance->int_cmd_mtx);
6754 6753 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
6755 6754 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
6756 6755 }
6757 6756 mutex_exit(&instance->int_cmd_mtx);
6758 6757
6759 6758 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done"));
6760 6759
6761 6760 if (i < (msecs -1)) {
6762 6761 return (DDI_SUCCESS);
6763 6762 } else {
6764 6763 return (DDI_FAILURE);
6765 6764 }
6766 6765 }
6767 6766
6768 6767 /*
6769 6768 * issue_cmd_in_poll_mode
6770 6769 */
6771 6770 static int
6772 6771 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance,
6773 6772 struct mrsas_cmd *cmd)
6774 6773 {
6775 6774 int i;
6776 6775 uint16_t flags;
6777 6776 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6778 6777 struct mrsas_header *frame_hdr;
6779 6778
6780 6779 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called"));
6781 6780
6782 6781 frame_hdr = (struct mrsas_header *)cmd->frame;
6783 6782 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
6784 6783 MFI_CMD_STATUS_POLL_MODE);
6785 6784 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
6786 6785 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
6787 6786
6788 6787 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
6789 6788
6790 6789 /* issue the frame using inbound queue port */
6791 6790 WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6792 6791 (((cmd->frame_count - 1) << 1) | 1), instance);
6793 6792
6794 6793 /* wait for cmd_status to change from 0xFF */
6795 6794 for (i = 0; i < msecs && (
6796 6795 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6797 6796 == MFI_CMD_STATUS_POLL_MODE); i++) {
6798 6797 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
6799 6798 }
6800 6799
6801 6800 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6802 6801 == MFI_CMD_STATUS_POLL_MODE) {
6803 6802 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
6804 6803 "cmd polling timed out"));
6805 6804 return (DDI_FAILURE);
6806 6805 }
6807 6806
6808 6807 return (DDI_SUCCESS);
6809 6808 }
6810 6809
6811 6810 static void
6812 6811 enable_intr_ppc(struct mrsas_instance *instance)
6813 6812 {
6814 6813 uint32_t mask;
6815 6814
6816 6815 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called"));
6817 6816
6818 6817 if (instance->skinny) {
6819 6818 /* For SKINNY, write ~0x1, from BSD's mfi driver. */
6820 6819 WR_OB_INTR_MASK(0xfffffffe, instance);
6821 6820 } else {
6822 6821 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
6823 6822 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
6824 6823
6825 6824 /* WR_OB_INTR_MASK(~0x80000000, instance); */
6826 6825 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance);
6827 6826 }
6828 6827
6829 6828 /* dummy read to force PCI flush */
6830 6829 mask = RD_OB_INTR_MASK(instance);
6831 6830
6832 6831 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
6833 6832 "outbound_intr_mask = 0x%x", mask));
6834 6833 }
6835 6834
6836 6835 static void
6837 6836 disable_intr_ppc(struct mrsas_instance *instance)
6838 6837 {
6839 6838 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called"));
6840 6839
6841 6840 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
6842 6841 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6843 6842
6844 6843 /* For now, assume there are no extras needed for Skinny support. */
6845 6844
6846 6845 WR_OB_INTR_MASK(OB_INTR_MASK, instance);
6847 6846
6848 6847 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
6849 6848 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6850 6849
6851 6850 /* dummy read to force PCI flush */
6852 6851 (void) RD_OB_INTR_MASK(instance);
6853 6852 }
6854 6853
6855 6854 static int
6856 6855 intr_ack_ppc(struct mrsas_instance *instance)
6857 6856 {
6858 6857 uint32_t status;
6859 6858 int ret = DDI_INTR_CLAIMED;
6860 6859
6861 6860 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called"));
6862 6861
6863 6862 /* check if it is our interrupt */
6864 6863 status = RD_OB_INTR_STATUS(instance);
6865 6864
6866 6865 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status));
6867 6866
6868 6867 /*
6869 6868 * NOTE: Some drivers call out SKINNY here, but the return is the same
6870 6869 * for SKINNY and 2108.
6871 6870 */
6872 6871 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) {
6873 6872 ret = DDI_INTR_UNCLAIMED;
6874 6873 }
6875 6874
6876 6875 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
6877 6876 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
6878 6877 ret = DDI_INTR_UNCLAIMED;
6879 6878 }
6880 6879
6881 6880 if (ret == DDI_INTR_UNCLAIMED) {
6882 6881 return (ret);
6883 6882 }
6884 6883
6885 6884 /*
6886 6885 * Clear the interrupt by writing back the same value.
6887 6886 * Another case where SKINNY is slightly different.
6888 6887 */
6889 6888 if (instance->skinny) {
6890 6889 WR_OB_INTR_STATUS(status, instance);
6891 6890 } else {
6892 6891 WR_OB_DOORBELL_CLEAR(status, instance);
6893 6892 }
6894 6893
6895 6894 /* dummy READ */
6896 6895 status = RD_OB_INTR_STATUS(instance);
6897 6896
6898 6897 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared"));
6899 6898
6900 6899 return (ret);
6901 6900 }
6902 6901
6903 6902 /*
6904 6903 * Marks HBA as bad. This will be called either when an
6905 6904 * IO packet times out even after 3 FW resets
6906 6905 * or FW is found to be fault even after 3 continuous resets.
6907 6906 */
6908 6907
6909 6908 static int
6910 6909 mrsas_kill_adapter(struct mrsas_instance *instance)
6911 6910 {
6912 6911 if (instance->deadadapter == 1)
6913 6912 return (DDI_FAILURE);
6914 6913
6915 6914 con_log(CL_ANN1, (CE_NOTE, "mrsas_kill_adapter: "
6916 6915 "Writing to doorbell with MFI_STOP_ADP "));
6917 6916 mutex_enter(&instance->ocr_flags_mtx);
6918 6917 instance->deadadapter = 1;
6919 6918 mutex_exit(&instance->ocr_flags_mtx);
6920 6919 instance->func_ptr->disable_intr(instance);
6921 6920 WR_IB_DOORBELL(MFI_STOP_ADP, instance);
6922 6921 (void) mrsas_complete_pending_cmds(instance);
6923 6922 return (DDI_SUCCESS);
6924 6923 }
6925 6924
6926 6925
6927 6926 static int
6928 6927 mrsas_reset_ppc(struct mrsas_instance *instance)
6929 6928 {
6930 6929 uint32_t status;
6931 6930 uint32_t retry = 0;
6932 6931 uint32_t cur_abs_reg_val;
6933 6932 uint32_t fw_state;
6934 6933
6935 6934 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6936 6935
6937 6936 if (instance->deadadapter == 1) {
6938 6937 dev_err(instance->dip, CE_WARN, "mrsas_reset_ppc: "
6939 6938 "no more resets as HBA has been marked dead ");
6940 6939 return (DDI_FAILURE);
6941 6940 }
6942 6941 mutex_enter(&instance->ocr_flags_mtx);
6943 6942 instance->adapterresetinprogress = 1;
6944 6943 mutex_exit(&instance->ocr_flags_mtx);
6945 6944 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: adpterresetinprogress "
6946 6945 "flag set, time %llx", gethrtime()));
6947 6946
6948 6947 instance->func_ptr->disable_intr(instance);
6949 6948 retry_reset:
6950 6949 WR_IB_WRITE_SEQ(0, instance);
6951 6950 WR_IB_WRITE_SEQ(4, instance);
6952 6951 WR_IB_WRITE_SEQ(0xb, instance);
6953 6952 WR_IB_WRITE_SEQ(2, instance);
6954 6953 WR_IB_WRITE_SEQ(7, instance);
6955 6954 WR_IB_WRITE_SEQ(0xd, instance);
6956 6955 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: magic number written "
6957 6956 "to write sequence register\n"));
6958 6957 delay(100 * drv_usectohz(MILLISEC));
6959 6958 status = RD_OB_DRWE(instance);
6960 6959
6961 6960 while (!(status & DIAG_WRITE_ENABLE)) {
6962 6961 delay(100 * drv_usectohz(MILLISEC));
6963 6962 status = RD_OB_DRWE(instance);
6964 6963 if (retry++ == 100) {
6965 6964 dev_err(instance->dip, CE_WARN,
6966 6965 "mrsas_reset_ppc: DRWE bit "
6967 6966 "check retry count %d", retry);
6968 6967 return (DDI_FAILURE);
6969 6968 }
6970 6969 }
6971 6970 WR_IB_DRWE(status | DIAG_RESET_ADAPTER, instance);
6972 6971 delay(100 * drv_usectohz(MILLISEC));
6973 6972 status = RD_OB_DRWE(instance);
6974 6973 while (status & DIAG_RESET_ADAPTER) {
6975 6974 delay(100 * drv_usectohz(MILLISEC));
6976 6975 status = RD_OB_DRWE(instance);
6977 6976 if (retry++ == 100) {
6978 6977 dev_err(instance->dip, CE_WARN, "mrsas_reset_ppc: "
6979 6978 "RESET FAILED. KILL adapter called.");
6980 6979
6981 6980 (void) mrsas_kill_adapter(instance);
6982 6981 return (DDI_FAILURE);
6983 6982 }
6984 6983 }
6985 6984 con_log(CL_ANN, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete"));
6986 6985 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6987 6986 "Calling mfi_state_transition_to_ready"));
6988 6987
6989 6988 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
6990 6989 if (mfi_state_transition_to_ready(instance) ||
6991 6990 debug_fw_faults_after_ocr_g == 1) {
6992 6991 cur_abs_reg_val =
6993 6992 instance->func_ptr->read_fw_status_reg(instance);
6994 6993 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
6995 6994
6996 6995 #ifdef OCRDEBUG
6997 6996 con_log(CL_ANN1, (CE_NOTE,
6998 6997 "mrsas_reset_ppc :before fake: FW is not ready "
6999 6998 "FW state = 0x%x", fw_state));
7000 6999 if (debug_fw_faults_after_ocr_g == 1)
7001 7000 fw_state = MFI_STATE_FAULT;
7002 7001 #endif
7003 7002
7004 7003 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc : FW is not ready "
7005 7004 "FW state = 0x%x", fw_state));
7006 7005
7007 7006 if (fw_state == MFI_STATE_FAULT) {
7008 7007 /* increment the count */
7009 7008 instance->fw_fault_count_after_ocr++;
7010 7009 if (instance->fw_fault_count_after_ocr
7011 7010 < MAX_FW_RESET_COUNT) {
7012 7011 dev_err(instance->dip, CE_WARN,
7013 7012 "mrsas_reset_ppc: "
7014 7013 "FW is in fault after OCR count %d "
7015 7014 "Retry Reset",
7016 7015 instance->fw_fault_count_after_ocr);
7017 7016 goto retry_reset;
7018 7017
7019 7018 } else {
7020 7019 dev_err(instance->dip, CE_WARN,
7021 7020 "mrsas_reset_ppc: "
7022 7021 "Max Reset Count exceeded >%d"
7023 7022 "Mark HBA as bad, KILL adapter",
7024 7023 MAX_FW_RESET_COUNT);
7025 7024
7026 7025 (void) mrsas_kill_adapter(instance);
7027 7026 return (DDI_FAILURE);
7028 7027 }
7029 7028 }
7030 7029 }
7031 7030 /* reset the counter as FW is up after OCR */
7032 7031 instance->fw_fault_count_after_ocr = 0;
7033 7032
7034 7033
7035 7034 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7036 7035 instance->producer, 0);
7037 7036
7038 7037 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7039 7038 instance->consumer, 0);
7040 7039
7041 7040 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7042 7041 " after resetting produconsumer chck indexs:"
7043 7042 "producer %x consumer %x", *instance->producer,
7044 7043 *instance->consumer));
7045 7044
7046 7045 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7047 7046 "Calling mrsas_issue_init_mfi"));
7048 7047 (void) mrsas_issue_init_mfi(instance);
7049 7048 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7050 7049 "mrsas_issue_init_mfi Done"));
7051 7050
7052 7051 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7053 7052 "Calling mrsas_print_pending_cmd\n"));
7054 7053 (void) mrsas_print_pending_cmds(instance);
7055 7054 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7056 7055 "mrsas_print_pending_cmd done\n"));
7057 7056
7058 7057 instance->func_ptr->enable_intr(instance);
7059 7058 instance->fw_outstanding = 0;
7060 7059
7061 7060 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7062 7061 "Calling mrsas_issue_pending_cmds"));
7063 7062 (void) mrsas_issue_pending_cmds(instance);
7064 7063 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7065 7064 "issue_pending_cmds done.\n"));
7066 7065
7067 7066 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7068 7067 "Calling aen registration"));
7069 7068
7070 7069
7071 7070 instance->aen_cmd->retry_count_for_ocr = 0;
7072 7071 instance->aen_cmd->drv_pkt_time = 0;
7073 7072
7074 7073 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
7075 7074 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
7076 7075
7077 7076 mutex_enter(&instance->ocr_flags_mtx);
7078 7077 instance->adapterresetinprogress = 0;
7079 7078 mutex_exit(&instance->ocr_flags_mtx);
7080 7079 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7081 7080 "adpterresetinprogress flag unset"));
7082 7081
7083 7082 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc done\n"));
7084 7083 return (DDI_SUCCESS);
7085 7084 }
7086 7085
7087 7086 /*
7088 7087 * FMA functions.
7089 7088 */
7090 7089 int
7091 7090 mrsas_common_check(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
7092 7091 {
7093 7092 int ret = DDI_SUCCESS;
7094 7093
7095 7094 if (cmd != NULL &&
7096 7095 mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
7097 7096 DDI_SUCCESS) {
7098 7097 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7099 7098 if (cmd->pkt != NULL) {
7100 7099 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7101 7100 cmd->pkt->pkt_statistics = 0;
7102 7101 }
7103 7102 ret = DDI_FAILURE;
7104 7103 }
7105 7104 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
7106 7105 != DDI_SUCCESS) {
7107 7106 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7108 7107 if (cmd != NULL && cmd->pkt != NULL) {
7109 7108 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7110 7109 cmd->pkt->pkt_statistics = 0;
7111 7110 }
7112 7111 ret = DDI_FAILURE;
7113 7112 }
7114 7113 if (mrsas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) !=
7115 7114 DDI_SUCCESS) {
7116 7115 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7117 7116 if (cmd != NULL && cmd->pkt != NULL) {
7118 7117 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7119 7118 cmd->pkt->pkt_statistics = 0;
7120 7119 }
7121 7120 ret = DDI_FAILURE;
7122 7121 }
7123 7122 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
7124 7123 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7125 7124
7126 7125 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0);
7127 7126
7128 7127 if (cmd != NULL && cmd->pkt != NULL) {
7129 7128 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7130 7129 cmd->pkt->pkt_statistics = 0;
7131 7130 }
7132 7131 ret = DDI_FAILURE;
7133 7132 }
7134 7133
7135 7134 return (ret);
7136 7135 }
7137 7136
7138 7137 /*ARGSUSED*/
7139 7138 static int
7140 7139 mrsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
7141 7140 {
7142 7141 /*
7143 7142 * as the driver can always deal with an error in any dma or
7144 7143 * access handle, we can just return the fme_status value.
7145 7144 */
7146 7145 pci_ereport_post(dip, err, NULL);
7147 7146 return (err->fme_status);
7148 7147 }
7149 7148
7150 7149 static void
7151 7150 mrsas_fm_init(struct mrsas_instance *instance)
7152 7151 {
7153 7152 /* Need to change iblock to priority for new MSI intr */
7154 7153 ddi_iblock_cookie_t fm_ibc;
7155 7154
7156 7155 /* Only register with IO Fault Services if we have some capability */
7157 7156 if (instance->fm_capabilities) {
7158 7157 /* Adjust access and dma attributes for FMA */
7159 7158 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC;
7160 7159 mrsas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7161 7160
7162 7161 /*
7163 7162 * Register capabilities with IO Fault Services.
7164 7163 * fm_capabilities will be updated to indicate
7165 7164 * capabilities actually supported (not requested.)
7166 7165 */
7167 7166
7168 7167 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc);
7169 7168
7170 7169 /*
7171 7170 * Initialize pci ereport capabilities if ereport
7172 7171 * capable (should always be.)
7173 7172 */
7174 7173
7175 7174 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7176 7175 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7177 7176 pci_ereport_setup(instance->dip);
7178 7177 }
7179 7178
7180 7179 /*
7181 7180 * Register error callback if error callback capable.
7182 7181 */
7183 7182 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7184 7183 ddi_fm_handler_register(instance->dip,
7185 7184 mrsas_fm_error_cb, (void*) instance);
7186 7185 }
7187 7186 } else {
7188 7187 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7189 7188 mrsas_generic_dma_attr.dma_attr_flags = 0;
7190 7189 }
7191 7190 }
7192 7191
7193 7192 static void
7194 7193 mrsas_fm_fini(struct mrsas_instance *instance)
7195 7194 {
7196 7195 /* Only unregister FMA capabilities if registered */
7197 7196 if (instance->fm_capabilities) {
7198 7197 /*
7199 7198 * Un-register error callback if error callback capable.
7200 7199 */
7201 7200 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7202 7201 ddi_fm_handler_unregister(instance->dip);
7203 7202 }
7204 7203
7205 7204 /*
7206 7205 * Release any resources allocated by pci_ereport_setup()
7207 7206 */
7208 7207 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7209 7208 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7210 7209 pci_ereport_teardown(instance->dip);
7211 7210 }
7212 7211
7213 7212 /* Unregister from IO Fault Services */
7214 7213 ddi_fm_fini(instance->dip);
7215 7214
7216 7215 /* Adjust access and dma attributes for FMA */
7217 7216 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7218 7217 mrsas_generic_dma_attr.dma_attr_flags = 0;
7219 7218 }
7220 7219 }
7221 7220
7222 7221 int
7223 7222 mrsas_check_acc_handle(ddi_acc_handle_t handle)
7224 7223 {
7225 7224 ddi_fm_error_t de;
7226 7225
7227 7226 if (handle == NULL) {
7228 7227 return (DDI_FAILURE);
7229 7228 }
7230 7229
7231 7230 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
7232 7231
7233 7232 return (de.fme_status);
7234 7233 }
7235 7234
7236 7235 int
7237 7236 mrsas_check_dma_handle(ddi_dma_handle_t handle)
7238 7237 {
7239 7238 ddi_fm_error_t de;
7240 7239
7241 7240 if (handle == NULL) {
7242 7241 return (DDI_FAILURE);
7243 7242 }
7244 7243
7245 7244 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
7246 7245
7247 7246 return (de.fme_status);
7248 7247 }
7249 7248
7250 7249 void
7251 7250 mrsas_fm_ereport(struct mrsas_instance *instance, char *detail)
7252 7251 {
7253 7252 uint64_t ena;
7254 7253 char buf[FM_MAX_CLASS];
7255 7254
7256 7255 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
7257 7256 ena = fm_ena_generate(0, FM_ENA_FMT1);
7258 7257 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) {
7259 7258 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP,
7260 7259 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
7261 7260 }
7262 7261 }
7263 7262
7264 7263 static int
7265 7264 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type)
7266 7265 {
7267 7266
7268 7267 dev_info_t *dip = instance->dip;
7269 7268 int avail, actual, count;
7270 7269 int i, flag, ret;
7271 7270
7272 7271 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_type = %x",
7273 7272 intr_type));
7274 7273
7275 7274 /* Get number of interrupts */
7276 7275 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
7277 7276 if ((ret != DDI_SUCCESS) || (count == 0)) {
7278 7277 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:"
7279 7278 "ret %d count %d", ret, count));
7280 7279
7281 7280 return (DDI_FAILURE);
7282 7281 }
7283 7282
7284 7283 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: count = %d ", count));
7285 7284
7286 7285 /* Get number of available interrupts */
7287 7286 ret = ddi_intr_get_navail(dip, intr_type, &avail);
7288 7287 if ((ret != DDI_SUCCESS) || (avail == 0)) {
7289 7288 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:"
7290 7289 "ret %d avail %d", ret, avail));
7291 7290
7292 7291 return (DDI_FAILURE);
7293 7292 }
7294 7293 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: avail = %d ", avail));
7295 7294
7296 7295 /* Only one interrupt routine. So limit the count to 1 */
7297 7296 if (count > 1) {
7298 7297 count = 1;
7299 7298 }
7300 7299
7301 7300 /*
7302 7301 * Allocate an array of interrupt handlers. Currently we support
7303 7302 * only one interrupt. The framework can be extended later.
7304 7303 */
7305 7304 instance->intr_htable_size = count * sizeof (ddi_intr_handle_t);
7306 7305 instance->intr_htable = kmem_zalloc(instance->intr_htable_size,
7307 7306 KM_SLEEP);
7308 7307 ASSERT(instance->intr_htable);
7309 7308
7310 7309 flag = ((intr_type == DDI_INTR_TYPE_MSI) ||
7311 7310 (intr_type == DDI_INTR_TYPE_MSIX)) ?
7312 7311 DDI_INTR_ALLOC_STRICT : DDI_INTR_ALLOC_NORMAL;
7313 7312
7314 7313 /* Allocate interrupt */
7315 7314 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0,
7316 7315 count, &actual, flag);
7317 7316
7318 7317 if ((ret != DDI_SUCCESS) || (actual == 0)) {
7319 7318 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7320 7319 "avail = %d", avail));
7321 7320 goto mrsas_free_htable;
7322 7321 }
7323 7322
7324 7323 if (actual < count) {
7325 7324 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7326 7325 "Requested = %d Received = %d", count, actual));
7327 7326 }
7328 7327 instance->intr_cnt = actual;
7329 7328
7330 7329 /*
7331 7330 * Get the priority of the interrupt allocated.
7332 7331 */
7333 7332 if ((ret = ddi_intr_get_pri(instance->intr_htable[0],
7334 7333 &instance->intr_pri)) != DDI_SUCCESS) {
7335 7334 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7336 7335 "get priority call failed"));
7337 7336 goto mrsas_free_handles;
7338 7337 }
7339 7338
7340 7339 /*
7341 7340 * Test for high level mutex. we don't support them.
7342 7341 */
7343 7342 if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) {
7344 7343 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7345 7344 "High level interrupts not supported."));
7346 7345 goto mrsas_free_handles;
7347 7346 }
7348 7347
7349 7348 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ",
7350 7349 instance->intr_pri));
7351 7350
7352 7351 /* Call ddi_intr_add_handler() */
7353 7352 for (i = 0; i < actual; i++) {
7354 7353 ret = ddi_intr_add_handler(instance->intr_htable[i],
7355 7354 (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance,
7356 7355 (caddr_t)(uintptr_t)i);
7357 7356
7358 7357 if (ret != DDI_SUCCESS) {
7359 7358 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:"
7360 7359 "failed %d", ret));
7361 7360 goto mrsas_free_handles;
7362 7361 }
7363 7362
7364 7363 }
7365 7364
7366 7365 con_log(CL_DLEVEL1, (CE_NOTE, " ddi_intr_add_handler done"));
7367 7366
7368 7367 if ((ret = ddi_intr_get_cap(instance->intr_htable[0],
7369 7368 &instance->intr_cap)) != DDI_SUCCESS) {
7370 7369 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d",
7371 7370 ret));
7372 7371 goto mrsas_free_handlers;
7373 7372 }
7374 7373
7375 7374 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7376 7375 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable"));
7377 7376
7378 7377 (void) ddi_intr_block_enable(instance->intr_htable,
7379 7378 instance->intr_cnt);
7380 7379 } else {
7381 7380 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable"));
7382 7381
7383 7382 for (i = 0; i < instance->intr_cnt; i++) {
7384 7383 (void) ddi_intr_enable(instance->intr_htable[i]);
7385 7384 con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns "
7386 7385 "%d", i));
7387 7386 }
7388 7387 }
7389 7388
7390 7389 return (DDI_SUCCESS);
7391 7390
7392 7391 mrsas_free_handlers:
7393 7392 for (i = 0; i < actual; i++)
7394 7393 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7395 7394
7396 7395 mrsas_free_handles:
7397 7396 for (i = 0; i < actual; i++)
7398 7397 (void) ddi_intr_free(instance->intr_htable[i]);
7399 7398
7400 7399 mrsas_free_htable:
7401 7400 if (instance->intr_htable != NULL)
7402 7401 kmem_free(instance->intr_htable, instance->intr_htable_size);
7403 7402
7404 7403 instance->intr_htable = NULL;
7405 7404 instance->intr_htable_size = 0;
7406 7405
7407 7406 return (DDI_FAILURE);
7408 7407
7409 7408 }
7410 7409
7411 7410
7412 7411 static void
7413 7412 mrsas_rem_intrs(struct mrsas_instance *instance)
7414 7413 {
7415 7414 int i;
7416 7415
7417 7416 con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called"));
7418 7417
7419 7418 /* Disable all interrupts first */
7420 7419 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7421 7420 (void) ddi_intr_block_disable(instance->intr_htable,
7422 7421 instance->intr_cnt);
7423 7422 } else {
7424 7423 for (i = 0; i < instance->intr_cnt; i++) {
7425 7424 (void) ddi_intr_disable(instance->intr_htable[i]);
7426 7425 }
7427 7426 }
7428 7427
7429 7428 /* Remove all the handlers */
7430 7429
7431 7430 for (i = 0; i < instance->intr_cnt; i++) {
7432 7431 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7433 7432 (void) ddi_intr_free(instance->intr_htable[i]);
7434 7433 }
7435 7434
7436 7435 if (instance->intr_htable != NULL)
7437 7436 kmem_free(instance->intr_htable, instance->intr_htable_size);
7438 7437
7439 7438 instance->intr_htable = NULL;
7440 7439 instance->intr_htable_size = 0;
7441 7440
7442 7441 }
7443 7442
7444 7443 static int
7445 7444 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags,
7446 7445 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
7447 7446 {
7448 7447 struct mrsas_instance *instance;
7449 7448 int config;
7450 7449 int rval = NDI_SUCCESS;
7451 7450
7452 7451 char *ptr = NULL;
7453 7452 int tgt, lun;
7454 7453
7455 7454 con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op));
7456 7455
7457 7456 if ((instance = ddi_get_soft_state(mrsas_state,
7458 7457 ddi_get_instance(parent))) == NULL) {
7459 7458 return (NDI_FAILURE);
7460 7459 }
7461 7460
7462 7461 /* Hold nexus during bus_config */
7463 7462 ndi_devi_enter(parent, &config);
7464 7463 switch (op) {
7465 7464 case BUS_CONFIG_ONE: {
7466 7465
7467 7466 /* parse wwid/target name out of name given */
7468 7467 if ((ptr = strchr((char *)arg, '@')) == NULL) {
7469 7468 rval = NDI_FAILURE;
7470 7469 break;
7471 7470 }
7472 7471 ptr++;
7473 7472
7474 7473 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) {
7475 7474 rval = NDI_FAILURE;
7476 7475 break;
7477 7476 }
7478 7477
7479 7478 if (lun == 0) {
7480 7479 rval = mrsas_config_ld(instance, tgt, lun, childp);
7481 7480 } else if ((instance->tbolt || instance->skinny) && lun != 0) {
7482 7481 rval = mrsas_tbolt_config_pd(instance,
7483 7482 tgt, lun, childp);
7484 7483 } else {
7485 7484 rval = NDI_FAILURE;
7486 7485 }
7487 7486
7488 7487 break;
7489 7488 }
7490 7489 case BUS_CONFIG_DRIVER:
7491 7490 case BUS_CONFIG_ALL: {
7492 7491
7493 7492 rval = mrsas_config_all_devices(instance);
7494 7493
7495 7494 rval = NDI_SUCCESS;
7496 7495 break;
7497 7496 }
7498 7497 }
7499 7498
7500 7499 if (rval == NDI_SUCCESS) {
7501 7500 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
7502 7501
7503 7502 }
7504 7503 ndi_devi_exit(parent, config);
7505 7504
7506 7505 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x",
7507 7506 rval));
7508 7507 return (rval);
7509 7508 }
7510 7509
7511 7510 static int
7512 7511 mrsas_config_all_devices(struct mrsas_instance *instance)
7513 7512 {
7514 7513 int rval, tgt;
7515 7514
7516 7515 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
7517 7516 (void) mrsas_config_ld(instance, tgt, 0, NULL);
7518 7517
7519 7518 }
7520 7519
7521 7520 /* Config PD devices connected to the card */
7522 7521 if (instance->tbolt || instance->skinny) {
7523 7522 for (tgt = 0; tgt < instance->mr_tbolt_pd_max; tgt++) {
7524 7523 (void) mrsas_tbolt_config_pd(instance, tgt, 1, NULL);
7525 7524 }
7526 7525 }
7527 7526
7528 7527 rval = NDI_SUCCESS;
7529 7528 return (rval);
7530 7529 }
7531 7530
7532 7531 static int
7533 7532 mrsas_parse_devname(char *devnm, int *tgt, int *lun)
7534 7533 {
7535 7534 char devbuf[SCSI_MAXNAMELEN];
7536 7535 char *addr;
7537 7536 char *p, *tp, *lp;
7538 7537 long num;
7539 7538
7540 7539 /* Parse dev name and address */
7541 7540 (void) strcpy(devbuf, devnm);
7542 7541 addr = "";
7543 7542 for (p = devbuf; *p != '\0'; p++) {
7544 7543 if (*p == '@') {
7545 7544 addr = p + 1;
7546 7545 *p = '\0';
7547 7546 } else if (*p == ':') {
7548 7547 *p = '\0';
7549 7548 break;
7550 7549 }
7551 7550 }
7552 7551
7553 7552 /* Parse target and lun */
7554 7553 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
7555 7554 if (*p == ',') {
7556 7555 lp = p + 1;
7557 7556 *p = '\0';
7558 7557 break;
7559 7558 }
7560 7559 }
7561 7560 if (tgt && tp) {
7562 7561 if (ddi_strtol(tp, NULL, 0x10, &num)) {
7563 7562 return (DDI_FAILURE); /* Can declare this as constant */
7564 7563 }
7565 7564 *tgt = (int)num;
7566 7565 }
7567 7566 if (lun && lp) {
7568 7567 if (ddi_strtol(lp, NULL, 0x10, &num)) {
7569 7568 return (DDI_FAILURE);
7570 7569 }
7571 7570 *lun = (int)num;
7572 7571 }
7573 7572 return (DDI_SUCCESS); /* Success case */
7574 7573 }
7575 7574
7576 7575 static int
7577 7576 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt,
7578 7577 uint8_t lun, dev_info_t **ldip)
7579 7578 {
7580 7579 struct scsi_device *sd;
7581 7580 dev_info_t *child;
7582 7581 int rval;
7583 7582
7584 7583 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d",
7585 7584 tgt, lun));
7586 7585
7587 7586 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
7588 7587 if (ldip) {
7589 7588 *ldip = child;
7590 7589 }
7591 7590 if (instance->mr_ld_list[tgt].flag != MRDRV_TGT_VALID) {
7592 7591 rval = mrsas_service_evt(instance, tgt, 0,
7593 7592 MRSAS_EVT_UNCONFIG_TGT, NULL);
7594 7593 con_log(CL_ANN1, (CE_WARN,
7595 7594 "mr_sas: DELETING STALE ENTRY rval = %d "
7596 7595 "tgt id = %d ", rval, tgt));
7597 7596 return (NDI_FAILURE);
7598 7597 }
7599 7598 return (NDI_SUCCESS);
7600 7599 }
7601 7600
7602 7601 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
7603 7602 sd->sd_address.a_hba_tran = instance->tran;
7604 7603 sd->sd_address.a_target = (uint16_t)tgt;
7605 7604 sd->sd_address.a_lun = (uint8_t)lun;
7606 7605
7607 7606 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS)
7608 7607 rval = mrsas_config_scsi_device(instance, sd, ldip);
7609 7608 else
7610 7609 rval = NDI_FAILURE;
7611 7610
7612 7611 /* sd_unprobe is blank now. Free buffer manually */
7613 7612 if (sd->sd_inq) {
7614 7613 kmem_free(sd->sd_inq, SUN_INQSIZE);
7615 7614 sd->sd_inq = (struct scsi_inquiry *)NULL;
7616 7615 }
7617 7616
7618 7617 kmem_free(sd, sizeof (struct scsi_device));
7619 7618 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: return rval = %d",
7620 7619 rval));
7621 7620 return (rval);
7622 7621 }
7623 7622
7624 7623 int
7625 7624 mrsas_config_scsi_device(struct mrsas_instance *instance,
7626 7625 struct scsi_device *sd, dev_info_t **dipp)
7627 7626 {
7628 7627 char *nodename = NULL;
7629 7628 char **compatible = NULL;
7630 7629 int ncompatible = 0;
7631 7630 char *childname;
7632 7631 dev_info_t *ldip = NULL;
7633 7632 int tgt = sd->sd_address.a_target;
7634 7633 int lun = sd->sd_address.a_lun;
7635 7634 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7636 7635 int rval;
7637 7636
7638 7637 con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: scsi_device t%dL%d", tgt, lun));
7639 7638 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
7640 7639 NULL, &nodename, &compatible, &ncompatible);
7641 7640
7642 7641 if (nodename == NULL) {
7643 7642 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver "
7644 7643 "for t%dL%d", tgt, lun));
7645 7644 rval = NDI_FAILURE;
7646 7645 goto finish;
7647 7646 }
7648 7647
7649 7648 childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename;
7650 7649 con_log(CL_DLEVEL1, (CE_NOTE,
7651 7650 "mr_sas: Childname = %2s nodename = %s", childname, nodename));
7652 7651
7653 7652 /* Create a dev node */
7654 7653 rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip);
7655 7654 con_log(CL_DLEVEL1, (CE_NOTE,
7656 7655 "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval));
7657 7656 if (rval == NDI_SUCCESS) {
7658 7657 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
7659 7658 DDI_PROP_SUCCESS) {
7660 7659 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7661 7660 "property for t%dl%d target", tgt, lun));
7662 7661 rval = NDI_FAILURE;
7663 7662 goto finish;
7664 7663 }
7665 7664 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
7666 7665 DDI_PROP_SUCCESS) {
7667 7666 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7668 7667 "property for t%dl%d lun", tgt, lun));
7669 7668 rval = NDI_FAILURE;
7670 7669 goto finish;
7671 7670 }
7672 7671
7673 7672 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
7674 7673 "compatible", compatible, ncompatible) !=
7675 7674 DDI_PROP_SUCCESS) {
7676 7675 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7677 7676 "property for t%dl%d compatible", tgt, lun));
7678 7677 rval = NDI_FAILURE;
7679 7678 goto finish;
7680 7679 }
7681 7680
7682 7681 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
7683 7682 if (rval != NDI_SUCCESS) {
7684 7683 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online "
7685 7684 "t%dl%d", tgt, lun));
7686 7685 ndi_prop_remove_all(ldip);
7687 7686 (void) ndi_devi_free(ldip);
7688 7687 } else {
7689 7688 con_log(CL_ANN1, (CE_CONT, "mr_sas: online Done :"
7690 7689 "0 t%dl%d", tgt, lun));
7691 7690 }
7692 7691
7693 7692 }
7694 7693 finish:
7695 7694 if (dipp) {
7696 7695 *dipp = ldip;
7697 7696 }
7698 7697
7699 7698 con_log(CL_DLEVEL1, (CE_NOTE,
7700 7699 "mr_sas: config_scsi_device rval = %d t%dL%d",
7701 7700 rval, tgt, lun));
7702 7701 scsi_hba_nodename_compatible_free(nodename, compatible);
7703 7702 return (rval);
7704 7703 }
7705 7704
7706 7705 /*ARGSUSED*/
7707 7706 int
7708 7707 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event,
7709 7708 uint64_t wwn)
7710 7709 {
7711 7710 struct mrsas_eventinfo *mrevt = NULL;
7712 7711
7713 7712 con_log(CL_ANN1, (CE_NOTE,
7714 7713 "mrsas_service_evt called for t%dl%d event = %d",
7715 7714 tgt, lun, event));
7716 7715
7717 7716 if ((instance->taskq == NULL) || (mrevt =
7718 7717 kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) {
7719 7718 return (ENOMEM);
7720 7719 }
7721 7720
7722 7721 mrevt->instance = instance;
7723 7722 mrevt->tgt = tgt;
7724 7723 mrevt->lun = lun;
7725 7724 mrevt->event = event;
7726 7725 mrevt->wwn = wwn;
7727 7726
7728 7727 if ((ddi_taskq_dispatch(instance->taskq,
7729 7728 (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) !=
7730 7729 DDI_SUCCESS) {
7731 7730 con_log(CL_ANN1, (CE_NOTE,
7732 7731 "mr_sas: Event task failed for t%dl%d event = %d",
7733 7732 tgt, lun, event));
7734 7733 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
↓ open down ↓ |
7651 lines elided |
↑ open up ↑ |
7735 7734 return (DDI_FAILURE);
7736 7735 }
7737 7736 DTRACE_PROBE3(service_evt, int, tgt, int, lun, int, event);
7738 7737 return (DDI_SUCCESS);
7739 7738 }
7740 7739
7741 7740 static void
7742 7741 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt)
7743 7742 {
7744 7743 struct mrsas_instance *instance = mrevt->instance;
7745 - dev_info_t *dip, *pdip;
7744 + dev_info_t *dip;
7746 7745 int circ1 = 0;
7747 - char *devname;
7748 7746
7749 7747 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for"
7750 7748 " tgt %d lun %d event %d",
7751 7749 mrevt->tgt, mrevt->lun, mrevt->event));
7752 7750
7753 7751 if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) {
7754 7752 mutex_enter(&instance->config_dev_mtx);
7755 7753 dip = instance->mr_ld_list[mrevt->tgt].dip;
7756 7754 mutex_exit(&instance->config_dev_mtx);
7757 7755 } else {
7758 7756 mutex_enter(&instance->config_dev_mtx);
7759 7757 dip = instance->mr_tbolt_pd_list[mrevt->tgt].dip;
7760 7758 mutex_exit(&instance->config_dev_mtx);
7761 7759 }
7762 7760
7763 7761
7764 7762 ndi_devi_enter(instance->dip, &circ1);
7765 7763 switch (mrevt->event) {
7766 7764 case MRSAS_EVT_CONFIG_TGT:
7767 7765 if (dip == NULL) {
7768 -
7769 7766 if (mrevt->lun == 0) {
7770 7767 (void) mrsas_config_ld(instance, mrevt->tgt,
7771 7768 0, NULL);
7772 7769 } else if (instance->tbolt || instance->skinny) {
7773 7770 (void) mrsas_tbolt_config_pd(instance,
7774 7771 mrevt->tgt,
7775 7772 1, NULL);
7776 7773 }
7777 7774 con_log(CL_ANN1, (CE_NOTE,
7778 7775 "mr_sas: EVT_CONFIG_TGT called:"
7779 7776 " for tgt %d lun %d event %d",
7780 7777 mrevt->tgt, mrevt->lun, mrevt->event));
7781 -
7782 7778 } else {
7783 7779 con_log(CL_ANN1, (CE_NOTE,
7784 7780 "mr_sas: EVT_CONFIG_TGT dip != NULL:"
7785 7781 " for tgt %d lun %d event %d",
7786 7782 mrevt->tgt, mrevt->lun, mrevt->event));
7787 7783 }
7788 7784 break;
7789 7785 case MRSAS_EVT_UNCONFIG_TGT:
7790 7786 if (dip) {
7791 - if (i_ddi_devi_attached(dip)) {
7792 -
7793 - pdip = ddi_get_parent(dip);
7794 -
7795 - devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP);
7796 - (void) ddi_deviname(dip, devname);
7797 -
7798 - (void) devfs_clean(pdip, devname + 1,
7799 - DV_CLEAN_FORCE);
7800 - kmem_free(devname, MAXNAMELEN + 1);
7801 - }
7802 - (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
7787 + (void) ndi_devi_offline(dip,
7788 + NDI_DEVFS_CLEAN | NDI_DEVI_REMOVE);
7803 7789 con_log(CL_ANN1, (CE_NOTE,
7804 7790 "mr_sas: EVT_UNCONFIG_TGT called:"
7805 7791 " for tgt %d lun %d event %d",
7806 7792 mrevt->tgt, mrevt->lun, mrevt->event));
7807 7793 } else {
7808 7794 con_log(CL_ANN1, (CE_NOTE,
7809 7795 "mr_sas: EVT_UNCONFIG_TGT dip == NULL:"
7810 7796 " for tgt %d lun %d event %d",
7811 7797 mrevt->tgt, mrevt->lun, mrevt->event));
7812 7798 }
7813 7799 break;
7814 7800 }
7815 7801 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7816 7802 ndi_devi_exit(instance->dip, circ1);
7817 7803 }
7818 7804
7819 7805
7820 7806 int
7821 7807 mrsas_mode_sense_build(struct scsi_pkt *pkt)
7822 7808 {
7823 7809 union scsi_cdb *cdbp;
7824 7810 uint16_t page_code;
7825 7811 struct scsa_cmd *acmd;
7826 7812 struct buf *bp;
7827 7813 struct mode_header *modehdrp;
7828 7814
7829 7815 cdbp = (void *)pkt->pkt_cdbp;
7830 7816 page_code = cdbp->cdb_un.sg.scsi[0];
7831 7817 acmd = PKT2CMD(pkt);
7832 7818 bp = acmd->cmd_buf;
7833 7819 if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) {
7834 7820 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command"));
7835 7821 /* ADD pkt statistics as Command failed. */
7836 7822 return (NULL);
7837 7823 }
7838 7824
7839 7825 bp_mapin(bp);
7840 7826 bzero(bp->b_un.b_addr, bp->b_bcount);
7841 7827
7842 7828 switch (page_code) {
7843 7829 case 0x3: {
7844 7830 struct mode_format *page3p = NULL;
7845 7831 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7846 7832 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7847 7833
7848 7834 page3p = (void *)((caddr_t)modehdrp +
7849 7835 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7850 7836 page3p->mode_page.code = 0x3;
7851 7837 page3p->mode_page.length =
7852 7838 (uchar_t)(sizeof (struct mode_format));
7853 7839 page3p->data_bytes_sect = 512;
7854 7840 page3p->sect_track = 63;
7855 7841 break;
7856 7842 }
7857 7843 case 0x4: {
7858 7844 struct mode_geometry *page4p = NULL;
7859 7845 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7860 7846 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7861 7847
7862 7848 page4p = (void *)((caddr_t)modehdrp +
7863 7849 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7864 7850 page4p->mode_page.code = 0x4;
7865 7851 page4p->mode_page.length =
7866 7852 (uchar_t)(sizeof (struct mode_geometry));
7867 7853 page4p->heads = 255;
7868 7854 page4p->rpm = 10000;
7869 7855 break;
7870 7856 }
7871 7857 default:
7872 7858 break;
7873 7859 }
7874 7860 return (NULL);
7875 7861 }
↓ open down ↓ |
63 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX